hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed0b14431574b5a7ba0b7e48c2aa2880eb28eaa7 | 2,125 | py | Python | xdeep/xlocal/gradient/backprop/base.py | Sm0ckingBird/xdeep | 01a877122a17121c41ebc9bf81faa7c7d61e61bb | [
"MIT"
] | 38 | 2019-09-05T21:29:02.000Z | 2022-03-12T14:03:52.000Z | xdeep/xlocal/gradient/backprop/base.py | DATA-Hu-Lab/xpytorch | c2f0323b0ec55d684ee24dbe35a6046fe0074663 | [
"MIT"
] | 4 | 2019-11-19T02:51:30.000Z | 2020-06-30T13:29:25.000Z | xdeep/xlocal/gradient/backprop/base.py | DATA-Hu-Lab/xpytorch | c2f0323b0ec55d684ee24dbe35a6046fe0074663 | [
"MIT"
] | 8 | 2019-10-15T02:05:13.000Z | 2021-06-16T05:43:15.000Z | import torch
import torch.nn as nn
class BaseProp(object):
"""
Base class for backpropagation.
"""
def __init__(self, model):
"""Init
# Arguments:
model: torchvision.models. A pretrained model.
handle: list. Handle list that register a hook function.
relu_outputs: list. Forward output after relu.
"""
self.model = model
self.handle = []
self.relu_outputs = []
def _register_conv_hook(self):
"""
Register hook function to save gradient w.r.t input image.
"""
def _record_gradients(module, grad_in, grad_out):
self.gradients = grad_in[0]
for _, module in self.model.named_modules():
if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:
backward_handle = module.register_backward_hook(_record_gradients)
self.handle.append(backward_handle)
def _register_relu_hooks(self):
"""
Register hook function to save forward and backward relu result.
"""
# Save forward propagation output of the ReLU layer
def _record_output(module, input_, output):
self.relu_outputs.append(output)
def _clip_gradients(module, grad_in, grad_out):
# keep positive forward propagation output
relu_output = self.relu_outputs.pop()
relu_output[relu_output > 0] = 1
# keep positive backward propagation gradient
positive_grad_out = torch.clamp(grad_out[0], min=0.0)
# generate modified guided gradient
modified_grad_out = positive_grad_out * relu_output
return (modified_grad_out, )
for _, module in self.model.named_modules():
if isinstance(module, nn.ReLU):
forward_handle = module.register_forward_hook(_record_output)
backward_handle = module.register_backward_hook(_clip_gradients)
self.handle.append(forward_handle)
self.handle.append(backward_handle)
| 32.19697 | 86 | 0.616941 |
294723ed68f040e1c1271821296fe459d288e097 | 1,282 | py | Python | main2.py | jonesnil/E01b-Smiles | c06b220e4cc45d02206cbf9b5386864713497f18 | [
"MIT"
] | null | null | null | main2.py | jonesnil/E01b-Smiles | c06b220e4cc45d02206cbf9b5386864713497f18 | [
"MIT"
] | null | null | null | main2.py | jonesnil/E01b-Smiles | c06b220e4cc45d02206cbf9b5386864713497f18 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import utils, open_color, arcade
utils.check_version((3,7))
# Open the window. Set the window title and dimensions (width and height)
arcade.open_window(800, 600, "Smiley Face Example")
arcade.set_background_color(open_color.white)
# Start the render process. This must be done before any drawing commands.
arcade.start_render()
face_x,face_y = (400,400)
# Draw the smiley face:
# (x,y,radius,color)
arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3)
# (x,y,radius,color,border_thickness)
arcade.draw_circle_outline(face_x + 0, face_y + 0, 100, open_color.black, 4)
#(x,y,width,height,color)
arcade.draw_ellipse_filled(face_x - 25, face_y + 25, 15, 25, open_color.black)
arcade.draw_ellipse_filled(face_x + 25, face_y + 25, 15, 25, open_color.black)
arcade.draw_circle_filled(face_x - 23, face_y + 30, 3, open_color.gray_2)
arcade.draw_circle_filled(face_x + 27, face_y + 30, 3, open_color.gray_2)
#(x,y,width,height,color,start_degrees,end_degrees,border_thickness)
arcade.draw_arc_outline(face_x + 0, face_y - 5, 60, 50, open_color.black, 190, 350, 4)
# Finish the render
# Nothing will be drawn without this.
# Must happen after all draw commands
arcade.finish_render()
# Keep the window up until someone closes it.
arcade.run()
| 33.736842 | 86 | 0.763651 |
8067fb9214a72acab777648d3813d29085aea8cb | 368 | py | Python | python/using_sqlalchemy01/main.py | garyelephant/snippets | df2fd6b71603a16e1cc5338f10ad55451378cfae | [
"MIT"
] | 1 | 2017-12-19T05:36:49.000Z | 2017-12-19T05:36:49.000Z | python/using_sqlalchemy01/main.py | garyelephant/snippets | df2fd6b71603a16e1cc5338f10ad55451378cfae | [
"MIT"
] | null | null | null | python/using_sqlalchemy01/main.py | garyelephant/snippets | df2fd6b71603a16e1cc5338f10ad55451378cfae | [
"MIT"
] | null | null | null | from .models import User
from .database import session_scope
if __name__ == '__main__':
with session_scope() as session:
users = session.query( User ).order_by( User.id )
# Remove all object instances from this Session to make them available to accessed by outside
users.expunge_all()
for u in users:
print u
| 28.307692 | 101 | 0.652174 |
27a703d0eda1e91c7ccf839f6ed363b573dbd20d | 55,112 | py | Python | rollbar/__init__.py | mikhail-iurkov/pyrollbar | 968bb51fb320302b0e309a3babc71de29a51933e | [
"MIT"
] | 177 | 2015-02-02T19:22:15.000Z | 2022-01-24T07:20:04.000Z | rollbar/__init__.py | mikhail-iurkov/pyrollbar | 968bb51fb320302b0e309a3babc71de29a51933e | [
"MIT"
] | 293 | 2015-01-04T23:24:56.000Z | 2022-02-14T18:23:02.000Z | rollbar/__init__.py | mikhail-iurkov/pyrollbar | 968bb51fb320302b0e309a3babc71de29a51933e | [
"MIT"
] | 121 | 2015-02-06T21:43:51.000Z | 2022-02-14T11:13:33.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import functools
import inspect
import json
import logging
import os
import socket
import sys
import threading
import time
import traceback
import types
import uuid
import wsgiref.util
import warnings
import requests
import six
from rollbar.lib import events, filters, dict_merge, parse_qs, text, transport, urljoin, iteritems, defaultJSONEncode
__version__ = '0.16.2'
__log_name__ = 'rollbar'
log = logging.getLogger(__log_name__)
try:
# 2.x
import Queue as queue
except ImportError:
# 3.x
import queue
# import request objects from various frameworks, if available
try:
from webob import BaseRequest as WebobBaseRequest
except ImportError:
WebobBaseRequest = None
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
DjangoHttpRequest = None
RestFrameworkRequest = None
else:
try:
from django.http import HttpRequest as DjangoHttpRequest
except (ImportError, ImproperlyConfigured):
DjangoHttpRequest = None
try:
from rest_framework.request import Request as RestFrameworkRequest
except (ImportError, ImproperlyConfigured):
RestFrameworkRequest = None
del ImproperlyConfigured
try:
from werkzeug.wrappers import BaseRequest as WerkzeugRequest
except (ImportError, SyntaxError):
WerkzeugRequest = None
try:
from werkzeug.local import LocalProxy as WerkzeugLocalProxy
except (ImportError, SyntaxError):
WerkzeugLocalProxy = None
try:
from tornado.httpserver import HTTPRequest as TornadoRequest
except ImportError:
TornadoRequest = None
try:
from bottle import BaseRequest as BottleRequest
except ImportError:
BottleRequest = None
try:
from sanic.request import Request as SanicRequest
except ImportError:
SanicRequest = None
try:
from google.appengine.api.urlfetch import fetch as AppEngineFetch
except ImportError:
AppEngineFetch = None
try:
from starlette.requests import Request as StarletteRequest
except ImportError:
StarletteRequest = None
try:
from fastapi.requests import Request as FastAPIRequest
except ImportError:
FastAPIRequest = None
try:
import httpx
except ImportError:
httpx = None
AsyncHTTPClient = httpx
def passthrough_decorator(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
try:
from tornado.httpclient import AsyncHTTPClient as TornadoAsyncHTTPClient
except ImportError:
TornadoAsyncHTTPClient = None
try:
import treq
from twisted.python import log as twisted_log
from twisted.web.iweb import IPolicyForHTTPS
from twisted.web.client import BrowserLikePolicyForHTTPS, Agent
from twisted.internet.ssl import CertificateOptions
from twisted.internet import task, defer, ssl, reactor
from zope.interface import implementer
@implementer(IPolicyForHTTPS)
class VerifyHTTPS(object):
def __init__(self):
# by default, handle requests like a browser would
self.default_policy = BrowserLikePolicyForHTTPS()
def creatorForNetloc(self, hostname, port):
# check if the hostname is in the the whitelist, otherwise return the default policy
if not SETTINGS['verify_https']:
return ssl.CertificateOptions(verify=False)
return self.default_policy.creatorForNetloc(hostname, port)
def log_handler(event):
"""
Default uncaught error handler
"""
try:
if not event.get('isError') or 'failure' not in event:
return
err = event['failure']
# Don't report Rollbar internal errors to ourselves
if issubclass(err.type, ApiException):
log.error('Rollbar internal error: %s', err.value)
else:
report_exc_info((err.type, err.value, err.getTracebackObject()))
except:
log.exception('Error while reporting to Rollbar')
# Add Rollbar as a log handler which will report uncaught errors
twisted_log.addObserver(log_handler)
except ImportError:
treq = None
try:
from falcon import Request as FalconRequest
except ImportError:
FalconRequest = None
def get_request():
"""
Get the current request object. Implementation varies on
library support. Modified below when we know which framework
is being used.
"""
# TODO(cory): add in a generic _get_locals_request() which
# will iterate up through the call stack and look for a variable
# that appears to be valid request object.
for fn in (_get_fastapi_request,
_get_starlette_request,
_get_bottle_request,
_get_flask_request,
_get_pyramid_request,
_get_pylons_request):
try:
req = fn()
if req is not None:
return req
except:
pass
return None
def _get_bottle_request():
if BottleRequest is None:
return None
from bottle import request
return request
def _get_flask_request():
if WerkzeugRequest is None:
return None
from flask import request
return request
def _get_pyramid_request():
if WebobBaseRequest is None:
return None
from pyramid.threadlocal import get_current_request
return get_current_request()
def _get_pylons_request():
if WebobBaseRequest is None:
return None
from pylons import request
return request
def _get_starlette_request():
# Do not modify the returned object
if StarletteRequest is None:
return None
from rollbar.contrib.starlette import get_current_request
return get_current_request()
def _get_fastapi_request():
# Do not modify the returned object
if FastAPIRequest is None:
return None
from rollbar.contrib.fastapi import get_current_request
return get_current_request()
BASE_DATA_HOOK = None
agent_log = None
VERSION = __version__
DEFAULT_ENDPOINT = 'https://api.rollbar.com/api/1/'
DEFAULT_TIMEOUT = 3
ANONYMIZE = 'anonymize'
DEFAULT_LOCALS_SIZES = {
'maxlevel': 5,
'maxdict': 10,
'maxlist': 10,
'maxtuple': 10,
'maxset': 10,
'maxfrozenset': 10,
'maxdeque': 10,
'maxarray': 10,
'maxstring': 100,
'maxlong': 40,
'maxother': 100,
}
# configuration settings
# configure by calling init() or overriding directly
SETTINGS = {
'access_token': None,
'enabled': True,
'environment': 'production',
'exception_level_filters': [],
'root': None, # root path to your code
'branch': None, # git branch name
'code_version': None,
'handler': 'default', # 'blocking', 'thread' (default), 'async', 'agent', 'tornado', 'gae', 'twisted' or 'httpx'
'endpoint': DEFAULT_ENDPOINT,
'timeout': DEFAULT_TIMEOUT,
'agent.log_file': 'log.rollbar',
'scrub_fields': [
'pw',
'passwd',
'password',
'secret',
'confirm_password',
'confirmPassword',
'password_confirmation',
'passwordConfirmation',
'access_token',
'accessToken',
'auth',
'authentication',
'authorization',
],
'url_fields': ['url', 'link', 'href'],
'notifier': {
'name': 'pyrollbar',
'version': VERSION
},
'allow_logging_basic_config': True, # set to False to avoid a call to logging.basicConfig()
'locals': {
'enabled': True,
'safe_repr': True,
'scrub_varargs': True,
'sizes': DEFAULT_LOCALS_SIZES,
'safelisted_types': [],
'whitelisted_types': []
},
'verify_https': True,
'shortener_keys': [],
'suppress_reinit_warning': False,
'capture_email': False,
'capture_username': False,
'capture_ip': True,
'log_all_rate_limited_items': True,
'http_proxy': None,
'http_proxy_user': None,
'http_proxy_password': None,
'include_request_body': False,
'request_pool_connections': None,
'request_pool_maxsize': None,
'request_max_retries': None,
}
_CURRENT_LAMBDA_CONTEXT = None
_LAST_RESPONSE_STATUS = None
# Set in init()
_transforms = []
_serialize_transform = None
_initialized = False
from rollbar.lib.transforms.scrub_redact import REDACT_REF
from rollbar.lib import transforms
from rollbar.lib.transforms.scrub import ScrubTransform
from rollbar.lib.transforms.scruburl import ScrubUrlTransform
from rollbar.lib.transforms.scrub_redact import ScrubRedactTransform
from rollbar.lib.transforms.serializable import SerializableTransform
from rollbar.lib.transforms.shortener import ShortenerTransform
## public api
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw):
"""
Saves configuration variables in this module's SETTINGS.
access_token: project access token. Get this from the Rollbar UI:
- click "Settings" in the top nav
- click "Projects" in the left nav
- copy-paste the appropriate token.
environment: environment name. Can be any string; suggestions: 'production', 'development',
'staging', 'yourname'
**kw: provided keyword arguments will override keys in SETTINGS.
"""
global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads
if scrub_fields is not None:
SETTINGS['scrub_fields'] = list(scrub_fields)
if url_fields is not None:
SETTINGS['url_fields'] = list(url_fields)
# Merge the extra config settings into SETTINGS
SETTINGS = dict_merge(SETTINGS, kw)
if _initialized:
# NOTE: Temp solution to not being able to re-init.
# New versions of pyrollbar will support re-initialization
# via the (not-yet-implemented) configure() method.
if not SETTINGS.get('suppress_reinit_warning'):
log.warning('Rollbar already initialized. Ignoring re-init.')
return
SETTINGS['access_token'] = access_token
SETTINGS['environment'] = environment
_configure_transport(**SETTINGS)
if SETTINGS.get('allow_logging_basic_config'):
logging.basicConfig()
if SETTINGS.get('handler') == 'agent':
agent_log = _create_agent_log()
if not SETTINGS['locals']['safelisted_types'] and SETTINGS['locals']['whitelisted_types']:
warnings.warn('whitelisted_types deprecated use safelisted_types instead', DeprecationWarning)
SETTINGS['locals']['safelisted_types'] = SETTINGS['locals']['whitelisted_types']
# We will perform these transforms in order:
# 1. Serialize the payload to be all python built-in objects
# 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields']
# 3. Scrub URLs in the payload for keys that end with 'url'
# 4. Optional - If local variable gathering is enabled, transform the
# trace frame values using the ShortReprTransform.
_serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'],
safelist_types=SETTINGS['locals']['safelisted_types'])
_transforms = [
ScrubRedactTransform(),
_serialize_transform,
ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'),
ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields'])
]
# A list of key prefixes to apply our shortener transform to. The request
# being included in the body key is old behavior and is being retained for
# backwards compatibility.
shortener_keys = [
('request', 'POST'),
('request', 'json'),
('body', 'request', 'POST'),
('body', 'request', 'json'),
]
if SETTINGS['locals']['enabled']:
shortener_keys.append(('body', 'trace', 'frames', '*', 'code'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*'))
shortener_keys.extend(SETTINGS['shortener_keys'])
shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'],
keys=shortener_keys,
**SETTINGS['locals']['sizes'])
_transforms.append(shortener)
_threads = queue.Queue()
events.reset()
filters.add_builtin_filters(SETTINGS)
_initialized = True
def _configure_transport(**kw):
configuration = _requests_configuration(**kw)
transport.configure_pool(**configuration)
def _requests_configuration(**kw):
keys = {
'request_pool_connections': 'pool_connections',
'request_pool_maxsize': 'pool_maxsize',
'request_max_retries': 'max_retries',
}
return {keys[k]: kw.get(k, None) for k in keys}
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper
def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):
"""
Reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
request: optional, a WebOb, Werkzeug-based or Sanic request object.
extra_data: optional, will be included in the 'custom' section of the payload
payload_data: optional, dict that will override values in the final payload
(e.g. 'level' or 'fingerprint')
kw: provided for legacy purposes; unused.
Example usage:
rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN')
try:
do_something()
except:
rollbar.report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'})
"""
if exc_info is None:
exc_info = sys.exc_info()
try:
return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)
except Exception as e:
log.exception("Exception while reporting exc_info to Rollbar. %r", e)
def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object for the context of the message
extra_data: dictionary of params to include with the message. 'body' is reserved.
payload_data: param names to pass in the 'data' level of the payload; overrides defaults.
"""
try:
return _report_message(message, level, request, extra_data, payload_data)
except Exception as e:
log.exception("Exception while reporting message to Rollbar. %r", e)
def send_payload(payload, access_token):
"""
Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'async': calls _send_payload_async() (which makes an async HTTP request using default async handler)
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP request using Twisted and Treq)
- 'httpx': calls _send_payload_httpx() (which makes an async HTTP request using HTTPX)
"""
payload = events.on_payload(payload)
if payload is False:
return
if sys.version_info >= (3, 6):
from rollbar.lib._async import get_current_handler
handler = get_current_handler()
else:
handler = SETTINGS.get('handler')
if handler == 'twisted':
payload['data']['framework'] = 'twisted'
payload_str = _serialize_payload(payload)
if handler == 'blocking':
_send_payload(payload_str, access_token)
elif handler == 'agent':
agent_log.error(payload_str)
elif handler == 'tornado':
if TornadoAsyncHTTPClient is None:
log.error('Unable to find tornado')
return
_send_payload_tornado(payload_str, access_token)
elif handler == 'gae':
if AppEngineFetch is None:
log.error('Unable to find AppEngine URLFetch module')
return
_send_payload_appengine(payload_str, access_token)
elif handler == 'twisted':
if treq is None:
log.error('Unable to find Treq')
return
_send_payload_twisted(payload_str, access_token)
elif handler == 'httpx':
if httpx is None:
log.error('Unable to find HTTPX')
return
_send_payload_httpx(payload_str, access_token)
elif handler == 'async':
if AsyncHTTPClient is None:
log.error('Unable to find async handler')
return
_send_payload_async(payload_str, access_token)
elif handler == 'thread':
_send_payload_thread(payload_str, access_token)
else:
# default to 'thread'
_send_payload_thread(payload_str, access_token)
def search_items(title, return_fields=None, access_token=None, endpoint=None, **search_fields):
"""
Searches a project for items that match the input criteria.
title: all or part of the item's title to search for.
return_fields: the fields that should be returned for each item.
e.g. ['id', 'project_id', 'status'] will return a dict containing
only those fields for each item.
access_token: a project access token. If this is not provided,
the one provided to init() will be used instead.
search_fields: additional fields to include in the search.
currently supported: status, level, environment
"""
if not title:
return []
if return_fields is not None:
return_fields = ','.join(return_fields)
return _get_api('search/',
title=title,
fields=return_fields,
access_token=access_token,
endpoint=endpoint,
**search_fields)
def wait(f=None):
_threads.join()
if f is not None:
return f()
class ApiException(Exception):
"""
This exception will be raised if there was a problem decoding the
response from an API call.
"""
pass
class ApiError(ApiException):
"""
This exception will be raised if the API response contains an 'err'
field, denoting there was a problem fulfilling the api request.
"""
pass
class Result(object):
"""
This class encapsulates the response from an API call.
Usage:
result = search_items(title='foo', fields=['id'])
print result.data
"""
def __init__(self, access_token, path, params, data):
self.access_token = access_token
self.path = path
self.params = params
self.data = data
def __str__(self):
return str(self.data)
class PagedResult(Result):
"""
This class wraps the response from an API call that responded with
a page of results.
Usage:
result = search_items(title='foo', fields=['id'])
print 'First page: %d, data: %s' % (result.page, result.data)
result = result.next_page()
print 'Second page: %d, data: %s' % (result.page, result.data)
"""
def __init__(self, access_token, path, page_num, params, data, endpoint=None):
super(PagedResult, self).__init__(access_token, path, params, data)
self.page = page_num
self.endpoint = endpoint
def next_page(self):
params = copy.copy(self.params)
params['page'] = self.page + 1
return _get_api(self.path, endpoint=self.endpoint, **params)
def prev_page(self):
if self.page <= 1:
return self
params = copy.copy(self.params)
params['page'] = self.page - 1
return _get_api(self.path, endpoint=self.endpoint, **params)
## internal functions
def _resolve_exception_class(idx, filter):
cls, level = filter
if isinstance(cls, six.string_types):
# Lazily resolve class name
parts = cls.split('.')
module = '.'.join(parts[:-1])
if module in sys.modules and hasattr(sys.modules[module], parts[-1]):
cls = getattr(sys.modules[module], parts[-1])
SETTINGS['exception_level_filters'][idx] = (cls, level)
else:
cls = None
return cls, level
def _filtered_level(exception):
for i, filter in enumerate(SETTINGS['exception_level_filters']):
cls, level = _resolve_exception_class(i, filter)
if cls and isinstance(exception, cls):
return level
return None
def _is_ignored(exception):
return _filtered_level(exception) == 'ignored'
def _create_agent_log():
"""
Creates .rollbar log file for use with rollbar-agent
"""
log_file = SETTINGS['agent.log_file']
if not log_file.endswith('.rollbar'):
log.error("Provided agent log file does not end with .rollbar, which it must. "
"Using default instead.")
log_file = DEFAULTS['agent.log_file']
retval = logging.getLogger('rollbar_agent')
handler = logging.FileHandler(log_file, 'a', 'utf-8')
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
retval.addHandler(handler)
retval.setLevel(logging.WARNING)
return retval
def _report_exc_info(exc_info, request, extra_data, payload_data, level=None):
"""
Called by report_exc_info() wrapper
"""
if not _check_config():
return
filtered_level = _filtered_level(exc_info[1])
if level is None:
level = filtered_level
filtered_exc_info = events.on_exception_info(exc_info,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_exc_info is False:
return
cls, exc, trace = filtered_exc_info
data = _build_base_data(request)
if level is not None:
data['level'] = level
# walk the trace chain to collect cause and context exceptions
trace_chain = _walk_trace_chain(cls, exc, trace)
extra_trace_data = None
if len(trace_chain) > 1:
data['body'] = {
'trace_chain': trace_chain
}
if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']):
extra_trace_data = payload_data['body']['trace']
del payload_data['body']['trace']
else:
data['body'] = {
'trace': trace_chain[0]
}
if extra_data:
extra_data = extra_data
if not isinstance(extra_data, dict):
extra_data = {'value': extra_data}
if extra_trace_data:
extra_data = dict_merge(extra_data, extra_trace_data, silence_errors=True)
data['custom'] = extra_data
if extra_trace_data and not extra_data:
data['custom'] = extra_trace_data
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data, silence_errors=True)
payload = _build_payload(data)
send_payload(payload, payload.get('access_token'))
return data['uuid']
def _walk_trace_chain(cls, exc, trace):
trace_chain = [_trace_data(cls, exc, trace)]
seen_exceptions = {exc}
while True:
exc = getattr(exc, '__cause__', None) or getattr(exc, '__context__', None)
if not exc:
break
trace_chain.append(_trace_data(type(exc), exc, getattr(exc, '__traceback__', None)))
if exc in seen_exceptions:
break
seen_exceptions.add(exc)
return trace_chain
def _trace_data(cls, exc, trace):
# exception info
# most recent call last
raw_frames = traceback.extract_tb(trace)
frames = [{'filename': f[0], 'lineno': f[1], 'method': f[2], 'code': f[3]} for f in raw_frames]
trace_data = {
'frames': frames,
'exception': {
'class': getattr(cls, '__name__', cls.__class__.__name__),
'message': text(exc),
}
}
_add_locals_data(trace_data, (cls, exc, trace))
return trace_data
def _report_message(message, level, request, extra_data, payload_data):
"""
Called by report_message() wrapper
"""
if not _check_config():
return
filtered_message = events.on_message(message,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_message is False:
return
data = _build_base_data(request, level=level)
# message
data['body'] = {
'message': {
'body': filtered_message
}
}
if extra_data:
extra_data = extra_data
data['body']['message'].update(extra_data)
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data, silence_errors=True)
payload = _build_payload(data)
send_payload(payload, payload.get('access_token'))
return data['uuid']
def _check_config():
if not SETTINGS.get('enabled'):
log.info("pyrollbar: Not reporting because rollbar is disabled.")
return False
# skip access token check for the agent handler
if SETTINGS.get('handler') == 'agent':
return True
# make sure we have an access_token
if not SETTINGS.get('access_token'):
log.warning("pyrollbar: No access_token provided. Please configure by calling rollbar.init() with your access token.")
return False
return True
def _build_base_data(request, level='error'):
data = {
'timestamp': int(time.time()),
'environment': SETTINGS['environment'],
'level': level,
'language': 'python %s' % '.'.join(str(x) for x in sys.version_info[:3]),
'notifier': SETTINGS['notifier'],
'uuid': text(uuid.uuid4()),
}
if SETTINGS.get('code_version'):
data['code_version'] = SETTINGS['code_version']
if BASE_DATA_HOOK:
BASE_DATA_HOOK(request, data)
return data
def _add_person_data(data, request):
try:
person_data = _build_person_data(request)
except Exception as e:
log.exception("Exception while building person data for Rollbar payload: %r", e)
else:
if person_data:
if not SETTINGS['capture_username'] and 'username' in person_data:
person_data['username'] = None
if not SETTINGS['capture_email'] and 'email' in person_data:
person_data['email'] = None
data['person'] = person_data
def _build_person_data(request):
"""
Returns a dictionary describing the logged-in user using data from `request`.
Try request.rollbar_person first, then 'user', then 'user_id'
"""
if hasattr(request, 'rollbar_person'):
rollbar_person_prop = request.rollbar_person
person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop
if person and isinstance(person, dict):
return person
else:
return None
if StarletteRequest:
from rollbar.contrib.starlette.requests import hasuser
else:
def hasuser(request): return True
if hasuser(request) and hasattr(request, 'user'):
user_prop = request.user
user = user_prop() if callable(user_prop) else user_prop
if not user:
return None
elif isinstance(user, dict):
return user
else:
retval = {}
if getattr(user, 'id', None):
retval['id'] = text(user.id)
elif getattr(user, 'user_id', None):
retval['id'] = text(user.user_id)
# id is required, so only include username/email if we have an id
if retval.get('id'):
username = getattr(user, 'username', None)
email = getattr(user, 'email', None)
retval.update({
'username': username,
'email': email
})
return retval
if hasattr(request, 'user_id'):
user_id_prop = request.user_id
user_id = user_id_prop() if callable(user_id_prop) else user_id_prop
if not user_id:
return None
return {'id': text(user_id)}
def _get_func_from_frame(frame):
func_name = inspect.getframeinfo(frame).function
caller = frame.f_back
if caller:
func = caller.f_locals.get(func_name,
caller.f_globals.get(func_name))
else:
func = None
return func
def _flatten_nested_lists(l):
ret = []
for x in l:
if isinstance(x, list):
ret.extend(_flatten_nested_lists(x))
else:
ret.append(x)
return ret
def _add_locals_data(trace_data, exc_info):
if not SETTINGS['locals']['enabled']:
return
frames = trace_data['frames']
cur_tb = exc_info[2]
frame_num = 0
num_frames = len(frames)
while cur_tb:
cur_frame = frames[frame_num]
tb_frame = cur_tb.tb_frame
cur_tb = cur_tb.tb_next
if not isinstance(tb_frame, types.FrameType):
# this can happen if the traceback or frame is wrapped in some way,
# for example by `ExceptionInfo` in
# https://github.com/celery/billiard/blob/master/billiard/einfo.py
log.warning('Traceback frame not a types.FrameType. Ignoring.')
frame_num += 1
continue
# Create placeholders for argspec/varargspec/keywordspec/locals
argspec = None
varargspec = None
keywordspec = None
_locals = {}
try:
arginfo = inspect.getargvalues(tb_frame)
# Optionally fill in locals for this frame
if arginfo.locals and _check_add_locals(cur_frame, frame_num, num_frames):
# Get all of the named args
#
# args can be a nested list of args in the case where there
# are anonymous tuple args provided.
# e.g. in Python 2 you can:
# def func((x, (a, b), z)):
# return x + a + b + z
#
# func((1, (1, 2), 3))
argspec = _flatten_nested_lists(arginfo.args)
if arginfo.varargs is not None:
varargspec = arginfo.varargs
if SETTINGS['locals']['scrub_varargs']:
temp_varargs = list(arginfo.locals[varargspec])
for i, arg in enumerate(temp_varargs):
temp_varargs[i] = REDACT_REF
arginfo.locals[varargspec] = tuple(temp_varargs)
if arginfo.keywords is not None:
keywordspec = arginfo.keywords
_locals.update(arginfo.locals.items())
except Exception:
log.exception('Error while extracting arguments from frame. Ignoring.')
# Finally, serialize each arg/kwarg/local separately so that we only report
# CircularReferences for each variable, instead of for the entire payload
# as would be the case if we serialized that payload in one-shot.
if argspec:
cur_frame['argspec'] = argspec
if varargspec:
cur_frame['varargspec'] = varargspec
if keywordspec:
cur_frame['keywordspec'] = keywordspec
if _locals:
try:
cur_frame['locals'] = dict((k, _serialize_frame_data(v)) for k, v in iteritems(_locals))
except Exception:
log.exception('Error while serializing frame data.')
frame_num += 1
def _serialize_frame_data(data):
for transform in (ScrubRedactTransform(), _serialize_transform):
data = transforms.transform(data, transform)
return data
def _add_lambda_context_data(data):
"""
Attempts to add information from the lambda context if it exists
"""
global _CURRENT_LAMBDA_CONTEXT
context = _CURRENT_LAMBDA_CONTEXT
if context is None:
return
try:
lambda_data = {
'lambda': {
'remaining_time_in_millis': context.get_remaining_time_in_millis(),
'function_name': context.function_name,
'function_version': context.function_version,
'arn': context.invoked_function_arn,
'request_id': context.aws_request_id,
}
}
if 'custom' in data:
data['custom'] = dict_merge(data['custom'], lambda_data, silence_errors=True)
else:
data['custom'] = lambda_data
except Exception as e:
log.exception("Exception while adding lambda context data: %r", e)
finally:
_CURRENT_LAMBDA_CONTEXT = None
def _add_request_data(data, request):
"""
Attempts to build request data; if successful, sets the 'request' key on `data`.
"""
try:
request_data = _build_request_data(request)
except Exception as e:
log.exception("Exception while building request_data for Rollbar payload: %r", e)
else:
if request_data:
_filter_ip(request_data, SETTINGS['capture_ip'])
data['request'] = request_data
def _check_add_locals(frame, frame_num, total_frames):
"""
Returns True if we should record local variables for the given frame.
"""
# Include the last frames locals
# Include any frame locals that came from a file in the project's root
return any(((frame_num == total_frames - 1),
('root' in SETTINGS and (frame.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))
def _get_actual_request(request):
if WerkzeugLocalProxy and isinstance(request, WerkzeugLocalProxy):
try:
actual_request = request._get_current_object()
except RuntimeError:
return None
return actual_request
return request
def _build_request_data(request):
"""
Returns a dictionary containing data from the request.
"""
# webob (pyramid)
if WebobBaseRequest and isinstance(request, WebobBaseRequest):
return _build_webob_request_data(request)
# django
if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):
return _build_django_request_data(request)
# django rest framework
if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):
return _build_django_request_data(request)
# werkzeug (flask)
if WerkzeugRequest and isinstance(request, WerkzeugRequest):
return _build_werkzeug_request_data(request)
# tornado
if TornadoRequest and isinstance(request, TornadoRequest):
return _build_tornado_request_data(request)
# bottle
if BottleRequest and isinstance(request, BottleRequest):
return _build_bottle_request_data(request)
# Sanic
if SanicRequest and isinstance(request, SanicRequest):
return _build_sanic_request_data(request)
# falcon
if FalconRequest and isinstance(request, FalconRequest):
return _build_falcon_request_data(request)
# Plain wsgi (should be last)
if isinstance(request, dict) and 'wsgi.version' in request:
return _build_wsgi_request_data(request)
# FastAPI (built on top of Starlette, so keep the order)
if FastAPIRequest and isinstance(request, FastAPIRequest):
return _build_fastapi_request_data(request)
# Starlette (should be the last one for Starlette based frameworks)
if StarletteRequest and isinstance(request, StarletteRequest):
return _build_starlette_request_data(request)
return None
def _build_webob_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.GET),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
}
try:
if request.json:
request_data['json'] = request.json
except:
pass
# pyramid matchdict
if getattr(request, 'matchdict', None):
request_data['params'] = request.matchdict
# workaround for webob bug when the request body contains binary data but has a text
# content-type
try:
request_data['POST'] = dict(request.POST)
except UnicodeDecodeError:
request_data['body'] = request.body
return request_data
def _extract_wsgi_headers(items):
headers = {}
for k, v in items:
if k.startswith('HTTP_'):
header_name = '-'.join(k[len('HTTP_'):].replace('_', ' ').title().split(' '))
headers[header_name] = v
return headers
def _build_django_request_data(request):
try:
url = request.get_raw_uri()
except AttributeError:
url = request.build_absolute_uri()
request_data = {
'url': url,
'method': request.method,
'GET': dict(request.GET),
'POST': dict(request.POST),
'user_ip': _wsgi_extract_user_ip(request.META),
}
if SETTINGS['include_request_body']:
try:
request_data['body'] = request.body
except:
pass
request_data['headers'] = _extract_wsgi_headers(request.META.items())
return request_data
def _build_werkzeug_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.args),
'POST': dict(request.form),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
'files_keys': list(request.files.keys()),
}
try:
if request.json:
request_data['body'] = request.json
except Exception:
pass
return request_data
def _build_tornado_request_data(request):
request_data = {
'url': request.full_url(),
'user_ip': request.remote_ip,
'headers': dict(request.headers),
'method': request.method,
'files_keys': request.files.keys(),
'start_time': getattr(request, '_start_time', None),
}
request_data[request.method] = request.arguments
return request_data
def _build_bottle_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.query)
}
if request.json:
try:
request_data['body'] = request.body.getvalue()
except:
pass
else:
request_data['POST'] = dict(request.forms)
return request_data
def _build_sanic_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': request.headers,
'method': request.method,
'GET': dict(request.args)
}
if request.json:
try:
request_data['body'] = request.json
except:
pass
else:
request_data['POST'] = request.form
return request_data
def _build_falcon_request_data(request):
request_data = {
'url': request.url,
'user_ip': _wsgi_extract_user_ip(request.env),
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.params),
'context': dict(request.context),
}
return request_data
def _build_wsgi_request_data(request):
request_data = {
'url': wsgiref.util.request_uri(request),
'user_ip': _wsgi_extract_user_ip(request),
'method': request.get('REQUEST_METHOD'),
}
if 'QUERY_STRING' in request:
request_data['GET'] = parse_qs(request['QUERY_STRING'], keep_blank_values=True)
# Collapse single item arrays
request_data['GET'] = dict((k, v[0] if len(v) == 1 else v) for k, v in request_data['GET'].items())
request_data['headers'] = _extract_wsgi_headers(request.items())
try:
length = int(request.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
input = request.get('wsgi.input')
if length and input and hasattr(input, 'seek') and hasattr(input, 'tell'):
pos = input.tell()
input.seek(0, 0)
request_data['body'] = input.read(length)
input.seek(pos, 0)
return request_data
def _build_starlette_request_data(request):
from starlette.datastructures import UploadFile
request_data = {
'url': str(request.url),
'GET': dict(request.query_params),
'headers': dict(request.headers),
'method': request.method,
'user_ip': _starlette_extract_user_ip(request),
'params': dict(request.path_params),
}
if hasattr(request, '_form'):
request_data['POST'] = {
k: v.filename if isinstance(v, UploadFile) else v
for k, v in request._form.items()
}
request_data['files_keys'] = [
field.filename
for field in request._form.values()
if isinstance(field, UploadFile)
]
if hasattr(request, '_body'):
body = request._body.decode()
else:
body = None
if body and SETTINGS['include_request_body']:
request_data['body'] = body
if hasattr(request, '_json'):
request_data['json'] = request._json
elif body:
try:
request_data['json'] = json.loads(body)
except json.JSONDecodeError:
pass
# Filter out empty values
request_data = {k: v for k, v in request_data.items() if v}
return request_data
def _build_fastapi_request_data(request):
return _build_starlette_request_data(request)
def _filter_ip(request_data, capture_ip):
if 'user_ip' not in request_data or capture_ip == True:
return
current_ip = request_data['user_ip']
if not current_ip:
return
new_ip = current_ip
if not capture_ip:
new_ip = None
elif capture_ip == ANONYMIZE:
try:
if '.' in current_ip:
new_ip = '.'.join(current_ip.split('.')[0:3]) + '.0'
elif ':' in current_ip:
parts = current_ip.split(':')
if len(parts) > 2:
terminal = '0000:0000:0000:0000:0000'
new_ip = ':'.join(parts[0:3] + [terminal])
else:
new_ip = None
except:
new_ip = None
request_data['user_ip'] = new_ip
def _build_server_data():
"""
Returns a dictionary containing information about the server environment.
"""
# server environment
server_data = {
'host': socket.gethostname(),
'pid': os.getpid()
}
# argv does not always exist in embedded python environments
argv = getattr(sys, 'argv', None)
if argv:
server_data['argv'] = argv
for key in ['branch', 'root']:
if SETTINGS.get(key):
server_data[key] = SETTINGS[key]
return server_data
def _transform(obj, key=None):
for transform in _transforms:
obj = transforms.transform(obj, transform, key=key)
return obj
def _build_payload(data):
"""
Returns the full payload as a string.
"""
for k, v in iteritems(data):
data[k] = _transform(v, key=(k,))
payload = {
'access_token': SETTINGS['access_token'],
'data': data
}
return payload
def _serialize_payload(payload):
return json.dumps(payload, default=defaultJSONEncode)
def _send_payload(payload_str, access_token):
try:
_post_api('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
try:
_threads.get_nowait()
_threads.task_done()
except queue.Empty:
pass
def _send_payload_thread(payload_str, access_token):
thread = threading.Thread(target=_send_payload, args=(payload_str, access_token))
_threads.put(thread)
thread.start()
def _send_payload_appengine(payload_str, access_token):
try:
_post_api_appengine('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_appengine(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = AppEngineFetch(url,
method="POST",
payload=payload_str,
headers=headers,
allow_truncated=False,
deadline=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
validate_certificate=SETTINGS.get('verify_https', True))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _post_api(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = transport.post(url,
data=payload_str,
headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _get_api(path, access_token=None, endpoint=None, **params):
access_token = access_token or SETTINGS['access_token']
url = urljoin(endpoint or SETTINGS['endpoint'], path)
params['access_token'] = access_token
resp = transport.get(url,
params=params,
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, access_token, params, resp, endpoint=endpoint)
def _send_payload_tornado(payload_str, access_token):
try:
_post_api_tornado('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_tornado(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
else:
access_token = SETTINGS['access_token']
url = urljoin(SETTINGS['endpoint'], path)
def post_tornado_cb(resp):
r = requests.Response()
r._content = resp.body
r.status_code = resp.code
r.headers.update(resp.headers)
try:
_parse_response(path, access_token, payload_str, r)
except Exception as e:
log.exception('Exception while posting item %r', e)
TornadoAsyncHTTPClient().fetch(url,
callback=post_tornado_cb,
raise_error=False,
body=payload_str,
method='POST',
connect_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
request_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
def _send_payload_twisted(payload_str, access_token):
try:
_post_api_twisted('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_twisted(path, payload_str, access_token=None):
def post_data_cb(data, resp):
resp._content = data
_parse_response(path, SETTINGS['access_token'], payload_str, resp)
def post_cb(resp):
r = requests.Response()
r.status_code = resp.code
r.headers.update(resp.headers.getAllRawHeaders())
return treq.content(resp).addCallback(post_data_cb, r)
headers = {'Content-Type': ['application/json; charset=utf-8']}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = [access_token]
url = urljoin(SETTINGS['endpoint'], path)
try:
encoded_payload = payload_str.encode('utf8')
except (UnicodeDecodeError, UnicodeEncodeError):
encoded_payload = payload_str
treq_client = treq.client.HTTPClient(Agent(reactor, contextFactory=VerifyHTTPS()))
d = treq_client.post(url, encoded_payload, headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
d.addCallback(post_cb)
def _send_payload_httpx(payload_str, access_token):
from rollbar.lib._async import call_later, _post_api_httpx
try:
call_later(_post_api_httpx('item/', payload_str,
access_token=access_token))
except Exception as e:
log.exception('Exception while posting item %r', e)
def _send_payload_async(payload_str, access_token):
try:
_send_payload_httpx(payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _send_failsafe(message, uuid, host):
body_message = ('Failsafe from pyrollbar: {0}. Original payload may be found '
'in your server logs by searching for the UUID.').format(message)
data = {
'level': 'error',
'environment': SETTINGS['environment'],
'body': {
'message': {
'body': body_message
}
},
'notifier': SETTINGS['notifier'],
'custom': {
'orig_uuid': uuid,
'orig_host': host
},
'failsafe': True,
'internal': True,
}
payload = _build_payload(data)
try:
send_payload(payload, SETTINGS['access_token'])
except Exception:
log.exception('Rollbar: Error sending failsafe.')
def _parse_response(path, access_token, params, resp, endpoint=None):
if isinstance(resp, requests.Response):
try:
data = resp.text
except Exception:
data = resp.content
log.error('resp.text is undefined, resp.content is %r', resp.content)
else:
data = resp.content
global _LAST_RESPONSE_STATUS
last_response_was_429 = _LAST_RESPONSE_STATUS == 429
_LAST_RESPONSE_STATUS = resp.status_code
if resp.status_code == 429:
if SETTINGS['log_all_rate_limited_items'] or not last_response_was_429:
log.warning("Rollbar: over rate limit, data was dropped. Payload was: %r", params)
return
elif resp.status_code == 502:
log.exception('Rollbar api returned a 502')
return
elif resp.status_code == 413:
uuid = None
host = None
try:
payload = json.loads(params)
uuid = payload['data']['uuid']
host = payload['data']['server']['host']
log.error("Rollbar: request entity too large for UUID %r\n. Payload:\n%r", uuid, payload)
except (TypeError, ValueError):
log.exception('Unable to decode JSON for failsafe.')
except KeyError:
log.exception('Unable to find payload parameters for failsafe.')
_send_failsafe('payload too large', uuid, host)
# TODO: Should we return here?
elif resp.status_code != 200:
log.warning("Got unexpected status code from Rollbar api: %s\nResponse:\n%s",
resp.status_code, data)
# TODO: Should we also return here?
try:
json_data = json.loads(data)
except (TypeError, ValueError):
log.exception('Could not decode Rollbar api response:\n%s', data)
raise ApiException('Request to %s returned invalid JSON response', path)
else:
if json_data.get('err'):
raise ApiError(json_data.get('message') or 'Unknown error')
result = json_data.get('result', {})
if 'page' in result:
return PagedResult(access_token, path, result['page'], params, result, endpoint=endpoint)
else:
return Result(access_token, path, params, result)
def _extract_user_ip_from_headers(request):
forwarded_for = request.headers.get('X-Forwarded-For')
if forwarded_for:
return forwarded_for
real_ip = request.headers.get('X-Real-Ip')
if real_ip:
return real_ip
return None
def _extract_user_ip(request):
return _extract_user_ip_from_headers(request) or request.remote_addr
def _wsgi_extract_user_ip(environ):
forwarded_for = environ.get('HTTP_X_FORWARDED_FOR')
if forwarded_for:
return forwarded_for
real_ip = environ.get('HTTP_X_REAL_IP')
if real_ip:
return real_ip
return environ['REMOTE_ADDR']
def _starlette_extract_user_ip(request):
return request.client.host or _extract_user_ip_from_headers(request)
| 31.154324 | 126 | 0.634544 |
a724b73e257ab7854ee3917187e5b5b3c4891657 | 4,427 | py | Python | caserec/recommenders/rating_prediction/random_rec.py | caserec2018/CaseRecommender | 1b63fe79aa26786c99f35e6b8f0a0dd9e591811b | [
"MIT"
] | 16 | 2018-09-19T07:29:24.000Z | 2022-03-30T07:32:36.000Z | caserec/recommenders/rating_prediction/random_rec.py | caserec2018/CaseRecommender | 1b63fe79aa26786c99f35e6b8f0a0dd9e591811b | [
"MIT"
] | 1 | 2018-09-10T17:43:56.000Z | 2018-09-10T17:43:56.000Z | caserec/recommenders/rating_prediction/random_rec.py | caserec2018/CaseRecommender | 1b63fe79aa26786c99f35e6b8f0a0dd9e591811b | [
"MIT"
] | 2 | 2019-07-11T10:13:24.000Z | 2020-03-12T10:09:39.000Z | # coding=utf-8
""""
Random Collaborative Filtering Recommender
[Rating Prediction (Rating)]
Random predicts a user’s ratings based on random distributions of rates.
"""
# © 2018. Case Recommender (MIT License)
import numpy as np
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction
from caserec.utils.extra_functions import timed
__author__ = 'removed for double-blind-review'
class RandomRec(BaseRatingPrediction):
def __init__(self, train_file, test_file, uniform=True, output_file=None, sep='\t', output_sep='\t', random_seed=None):
"""
Random recommendation for Rating Prediction
This algorithm predicts ratings for each user-item
Usage::
>> RandomRec(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param uniform: Indicates whether the ratings are drawn from a uniform sample or not
if False, the ratings are drawn from a normal distribution with the same mean and standard deviation
as the feedback provided in train
:type uniform: bool, default True
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(RandomRec, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
sep=sep, output_sep=output_sep)
if random_seed is not None:
np.random.seed(random_seed)
self.uniform = uniform
self.recommender_name = 'Random Recommender'
def predict(self):
if not self.uniform:
feedbacks = []
for user in self.train_set["users"]:
for item in self.train_set['items_seen_by_user'][user]:
feedbacks.append(self.train_set['feedback'][user][item])
std = np.std(feedbacks)
if self.test_file is not None:
for user in self.test_set['users']:
for item in self.test_set['feedback'][user]:
if self.uniform:
feedback_value = np.random.uniform(self.train_set['min_value'],self.train_set['max_value'])
else:
feedback_value = np.random.normal(self.train_set['mean_value'], std)
self.predictions.append((user, item, feedback_value))
else:
raise NotImplemented
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(RandomRec, self).compute(verbose=verbose)
if verbose:
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | 35.701613 | 124 | 0.626835 |
68c54692a4fd74feda292830452318117449c8c5 | 2,110 | py | Python | src/android/toga_android/widgets/label.py | simonw/toga | 8b52479c5d9960c5f3af960b5837ecc467c0bc95 | [
"BSD-3-Clause"
] | 3 | 2020-12-09T02:13:55.000Z | 2021-02-18T00:41:36.000Z | src/android/toga_android/widgets/label.py | simonw/toga | 8b52479c5d9960c5f3af960b5837ecc467c0bc95 | [
"BSD-3-Clause"
] | 1 | 2021-05-23T04:04:58.000Z | 2021-05-25T22:08:14.000Z | src/android/toga_android/widgets/label.py | simonw/toga | 8b52479c5d9960c5f3af960b5837ecc467c0bc95 | [
"BSD-3-Clause"
] | null | null | null | from travertino.size import at_least
from ..libs.android_widgets import Gravity, TextView, TypedValue, View__MeasureSpec
from .base import Widget, align
class Label(Widget):
def create(self):
self.native = TextView(self._native_activity)
self.native.setSingleLine()
def set_text(self, value):
self.native.setText(value)
def set_font(self, font):
if font:
font_impl = font.bind(self.interface.factory)
self.native.setTextSize(TypedValue.COMPLEX_UNIT_SP, font_impl.get_size())
self.native.setTypeface(font_impl.get_typeface(), font_impl.get_style())
def rehint(self):
# Refuse to rehint an Android TextView if it has no LayoutParams yet.
# Calling measure() on an Android TextView w/o LayoutParams raises NullPointerException.
if self.native.getLayoutParams() is None:
return
# Ask the Android TextView first for the height it would use in its
# wildest dreams. This is the height of one line of text.
self.native.measure(
View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED
)
one_line_height = self.native.getMeasuredHeight()
self.interface.intrinsic.height = one_line_height
# Ask it how wide it would be if it had to be just one line tall.
self.native.measure(
View__MeasureSpec.UNSPECIFIED,
View__MeasureSpec.makeMeasureSpec(
one_line_height, View__MeasureSpec.AT_MOST
),
)
self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
def set_alignment(self, value):
# Refuse to set alignment if create() has not been called.
if self.native is None:
return
# Refuse to set alignment if widget has no container.
# On Android, calling setGravity() when the widget has no LayoutParams
# results in a NullPointerException.
if self.native.getLayoutParams() is None:
return
self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))
| 40.576923 | 96 | 0.670616 |
123405d96fd9b955610f3c27e0bb1fd85ccd05a3 | 796 | py | Python | examples/job.py | aerospaceresearch/the-ground-segment-api | 4dac2d7a744b64f9061868cab02e3a594ded1b5b | [
"MIT"
] | 1 | 2021-06-08T03:44:28.000Z | 2021-06-08T03:44:28.000Z | examples/job.py | aerospaceresearch/the-ground-segment-api | 4dac2d7a744b64f9061868cab02e3a594ded1b5b | [
"MIT"
] | null | null | null | examples/job.py | aerospaceresearch/the-ground-segment-api | 4dac2d7a744b64f9061868cab02e3a594ded1b5b | [
"MIT"
] | 1 | 2021-05-15T02:27:54.000Z | 2021-05-15T02:27:54.000Z | import requests
## requirements: python (tested windows, linux)
## modify:
NODE_UUID = "f5dba253-c0ea-475e-9f8b-3733168dc42e"
NODE_TOKEN = "1e4478a5b0f7d622a5d4e60ee694fc7188c649d5"
url = 'http://localhost:8023/api/v1/nodes/' + NODE_UUID + '/job/'
headers = {'Authorization': 'Token ' + NODE_TOKEN}
payload = {'start': '2018-08-04 18:00:00+0200',
'stop': '2018-08-04 20:00:01+0200',
'description': 'no description',
'task': [
{
'mode': 'rx',
'frequency': '145825000.0',
'samplerate': '2048000',
'gain': '0',
'chunksize': '300000000',
}]}
r = requests.post(url, json=payload, headers=headers)
assert r.status_code == 201
print(r.text)
| 30.615385 | 65 | 0.55402 |
ac4e7f60eec2190e6b08cf941cbd432caae10545 | 2,701 | py | Python | colearning/tournament.py | A-Malone/genetic-neural-nets | e8284cc820e6f67a52b4064d7e7320eb29629791 | [
"MIT"
] | null | null | null | colearning/tournament.py | A-Malone/genetic-neural-nets | e8284cc820e6f67a52b4064d7e7320eb29629791 | [
"MIT"
] | null | null | null | colearning/tournament.py | A-Malone/genetic-neural-nets | e8284cc820e6f67a52b4064d7e7320eb29629791 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
from colearning.game import CoopGame
class TeamTournament(object):
"""
Takes a set of populations of neural nets and has them compete in a
tournament to determine fitness
"""
results = None
def __init__(self, game_format):
self.format = game_format
self.game = CoopGame(
render=False,
max_moves=1000
)
def setup(self, populations, players):
self.rounds = 0
self.numGames = 0
self.players = players
self.populations = populations
if(self.results == None):
self.results = np.zeros((populations.shape[0], populations.shape[1], 2), dtype=np.float32)
else:
self.results.fill(0.0)
def _generateMatchups(self, min_matches=10):
"""
Generate a list of matchups, ensures all players get at least min_match
matches
"""
shape = self.populations.shape
teams = np.zeros((shape[0], self.format[1]))
for t_index in range(shape[0]):
for p_index in range(shape[1]):
for match in range(min_matches):
#Assemble teams for each player
for t in range(shape[0]):
teams[t, :] = np.random.choice(range(shape[1]), self.format[1], replace=False)
#Ensure that player is in the game!
if(p_index not in teams[t_index,:]):
teams[t_index, 0] = p_index
yield teams
def _oneGame(self):
""" play one game between teams of agents"""
self.numGames += 1
return self.game.play(self.players, self.results)
def play_tournament(self, populations, players, repeat=1):
""" Play agents against one another """
self.setup(populations, players)
for dummy in range(repeat):
self.rounds += 1
for teams in self._generateMatchups():
# Prepare the players
for t in range(teams.shape[0]):
for p in range(teams.shape[1]):
self.players[t*teams.shape[1] + p].initialize_player(t,teams[t][p])
self.players[t*teams.shape[1] + p].set_params(self.populations[t,teams[t][p],:])
self._oneGame()
# Retrieve the players
for t in range(teams.shape[0]):
for p in range(teams.shape[1]):
self.populations[t,teams[t][p],:] = self.players[t*teams.shape[1] + p].get_params()
self.results[:,:,0] /= self.results[:,:,1]
return self.results[:,:,0]
| 32.154762 | 107 | 0.548686 |
6ebb6f79526585cde2ac3c8a1ac33ed0d6bebeb9 | 1,144 | py | Python | Wikipediapagescrapper/section.py | VisheshBansal/Wikipedia-scraper | 3146c6f79fa82d5596bc9d6c300fb3caeb3403d9 | [
"MIT"
] | null | null | null | Wikipediapagescrapper/section.py | VisheshBansal/Wikipedia-scraper | 3146c6f79fa82d5596bc9d6c300fb3caeb3403d9 | [
"MIT"
] | null | null | null | Wikipediapagescrapper/section.py | VisheshBansal/Wikipedia-scraper | 3146c6f79fa82d5596bc9d6c300fb3caeb3403d9 | [
"MIT"
] | null | null | null |
class Section ():
''' Defines how texts should be started '''
def __init__(self,name,parent,headinglevel):
self.name = name
self.parent= parent
self.headinglevel = headinglevel
self.text = ""
self.section_names= []
self.sections = dict()
return None
def __str__(self):
return str({"text":self.text,"sections":self.section_names})
def addText(self,text):
''' To check if text is inputted into string '''
if isinstance(text,str):
self.text += text
else:
raise TypeError("Only Strings are allowed")
def addSection(self,sect):
''' Checks if the section inputted is a Section '''
if type(sect) == type(self):
self.sections.update({sect.name:sect})
self.section_names += [sect.name]
else:
raise TypeError("You can only add other sections as a child to another section.")
def getSection(self,name):
''' Returns section object based on section name if section does not exist in sections return -1 '''
return self.sections.get(name,-1)
| 33.647059 | 108 | 0.601399 |
fc45ca845742ad126d7631c5b845b06133f0da98 | 235 | py | Python | cpdb/activity_log/constants.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/activity_log/constants.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/activity_log/constants.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | ADD_TAG_TO_DOCUMENT = 'ADD_TAG_TO_DOCUMENT'
REMOVE_TAG_FROM_DOCUMENT = 'REMOVE_TAG_FROM_DOCUMENT'
ACTION_TYPE_CHOICES = (
(ADD_TAG_TO_DOCUMENT, 'Add tag to document'),
(REMOVE_TAG_FROM_DOCUMENT, 'Remove tag from document'),
)
| 29.375 | 59 | 0.787234 |
421897fec254625a332f4f4c98d9da6db2c517dd | 2,493 | py | Python | diagram_autobuild/tool.py | momijiame/diagram-autobuild | 740ec9521dcb012484dc7155784a34f088fa667e | [
"Apache-2.0"
] | 7 | 2015-12-12T02:38:55.000Z | 2021-05-08T20:15:28.000Z | diagram_autobuild/tool.py | momijiame/diagram-autobuild | 740ec9521dcb012484dc7155784a34f088fa667e | [
"Apache-2.0"
] | null | null | null | diagram_autobuild/tool.py | momijiame/diagram-autobuild | 740ec9521dcb012484dc7155784a34f088fa667e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
import os
from future.utils import with_metaclass
class BuildCommand(with_metaclass(abc.ABCMeta)):
def __init__(self, src_file, dst_dir, opts=None):
self.src_file = src_file
self.dst_dir = dst_dir
self.opts = opts or ''
@abc.abstractproperty
def destination(self):
pass
@abc.abstractmethod
def __str__(self):
pass
class GraphvizBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'dot {opts} -T png -o {destination} {src_file}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class ERAlchemyBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'eralchemy {opts} -i {src_file} -o {destination}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagSeriesBuild(BuildCommand):
@abc.abstractproperty
def command(self):
pass
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = '{command} {opts} -o {destination} {src_file}'.format(
command=self.command,
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'blockdiag'
class NwdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'nwdiag'
class SeqdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'seqdiag'
class ActdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'actdiag'
_MAPPINGS = {
'graphviz': GraphvizBuild,
'blockdiag': BlockdiagBuild,
'nwdiag': NwdiagBuild,
'seqdiag': SeqdiagBuild,
'actdiag': ActdiagBuild,
'eralchemy': ERAlchemyBuild,
}
def get_tools():
return _MAPPINGS.keys()
def get_command(tool_name, src_file, dst_dir, opts=None):
class_ = _MAPPINGS.get(tool_name)
instance = class_(src_file, dst_dir, opts)
return instance
| 20.434426 | 75 | 0.63217 |
9f31c5ce4982303bdadbb41c4c30c411d5b3376c | 3,933 | py | Python | experiments/exp_adult_2and4_PGRR_gr.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_adult_2and4_PGRR_gr.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_adult_2and4_PGRR_gr.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | import group_table4PGRR as freq
import linecache
import random
def query_on_adult_dim1(tablePath,tableInterval,queryPath,trueOraclePath,aggregation="count"):
#adult_1 range query 2th & 4th
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
for i in range(1,501):
for _ in range(10):
kthoracle = random.randint(1, 500)
table = freq.group_table(tablePath, tableInterval, k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
# print(i)
# print(queries[i-1])
for k in range(queries[i - 1][0], queries[i - 1][1] + 1):
for j in table.keys():
for entry in table[j]:
if entry[0]==k:
count_value+=1
true_count_value += trueOracle[j][k]
count_value=(count_value/32390)*n
answer[i - 1] += count_value
TrueAnswer[i - 1] = true_count_value
# answer.append(count_value)
# TrueAnswer.append(true_count_value)
# relativeError+= (abs(count_value - true_count_value))/max(0.001*n,float(true_count_value))
# averageError+=count_value - true_count_value
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
for k in range(queries[i-1][0], queries[i-1][1] + 1):
for j in table.keys():
for entry in table[j]:
if entry[0]==k:
sum_value +=j
true_sum_value += j*trueOracle[j][k]
sum_value = (sum_value / 32390) * n
answer[i - 1] += sum_value
TrueAnswer[i - 1] = true_sum_value
# answer.append(sum_value)
# TrueAnswer.append(true_sum_value)
# averageError += sum_value - true_sum_value
# relativeError += (abs(sum_value - true_sum_value)) / max(0.001*n,float(true_sum_value))
answer[i - 1] /= 10.0
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
tablePath="experiments//adult_1_gr_PGRR_results.txt"
tableInterval=23
queryPath="experiments//adult_query_2_4_12.txt"
trueOraclePath="adult/adult4.txt"
#
# file_1 = open("experiments//adult_1_gr_PGRR_results.txt", "r")
# a = file_1.readline()
# print(a)
ans,trueAns,relativeError,averageError=query_on_adult_dim1(tablePath,tableInterval,queryPath,trueOraclePath,aggregation="count")
print(relativeError)
with open("experiments//final_adult_1_count_gr_PGRR.txt","w+") as f:
f.write(str(ans)+"\n")
f.write("true ans" + str(trueAns) + "\n")
f.write("relativeError:"+str(relativeError)+"\n")
f.write("averageError:" + str(averageError) + "\n")
ans,trueAns,relativeError,averageError=query_on_adult_dim1(tablePath,tableInterval,queryPath,trueOraclePath,aggregation="sum")
print(relativeError)
with open("experiments//final_adult_1_sum_gr_PGRR.txt","w+") as f:
f.write(str(ans)+"\n")
f.write("true ans" + str(trueAns) + "\n")
f.write("relativeError:"+str(relativeError)+"\n")
f.write("averageError:"+str(averageError)+"\n")
| 41.840426 | 133 | 0.570557 |
2d4f7cbe53be4b73db3793b4dbee8b2d37a110ce | 8,435 | py | Python | https_everywhere/_fixme.py | KOLANICH-tools/https-everywhere-py | b25084a2edd875689e4da8876cde611dc7f61a7c | [
"Apache-2.0"
] | 7 | 2020-02-26T00:40:44.000Z | 2022-02-11T01:24:55.000Z | https_everywhere/_fixme.py | KOLANICH-tools/https-everywhere-py | b25084a2edd875689e4da8876cde611dc7f61a7c | [
"Apache-2.0"
] | 39 | 2020-02-25T18:04:22.000Z | 2021-04-06T09:57:57.000Z | https_everywhere/_fixme.py | KOLANICH-tools/https-everywhere-py | b25084a2edd875689e4da8876cde611dc7f61a7c | [
"Apache-2.0"
] | 3 | 2021-04-06T09:50:42.000Z | 2021-12-11T22:51:27.000Z | # These cause basic assumptions needed very early in analysis to fail,
# or bad data not yet detected and rejected.
# Entries here may also need an entry in _FIXME_INCORRECT_TEST_URLS below
# to account for the rejection.
_FIXME_REJECT_PATTERNS = [
r"^http://demo\.neobookings\.com/",
r"^http://www\.svenskaspel\.se/",
# https://github.com/EFForg/https-everywhere/issues/18886 :
r"^http://support\.pickaweb\.co\.uk/(assets/)",
# PR submitted
r"^http://((?:a[lt]|s|sca)\d*|www)\.listrakbi\.com/",
r"^http://(www\.)?partners\.peer1\.ca/",
r"^http://(?:dashboard(?:-cdn)?|g-pixel|pixel|segment-pixel)\.invitemedia\.com/",
# merged
r"^http://ww2\.epeat\.com/",
r"^http://cdn\.therepublic\.com/",
]
_FIXME_BROKEN_REGEX_MATCHES = [
"affili.de",
"www.belgium.indymedia.org",
"m.aljazeera.com",
"atms00.alicdn.com",
"i06.c.aliimg.com",
"allianz-fuercybersicherheit.de",
"statics0.beauteprivee.fr",
"support.bulletproofexec.com",
"wwwimage0.cbsstatic.com",
"cdn0.colocationamerica.com",
"www.login.dtcc.edu",
"ejunkie.com",
"e-rewards.com",
"member.eurexchange.com",
"4exhale.org",
"na0.www.gartner.com",
"blog.girlscouts.org",
"lh0.google.*", # fixme
"nardikt.org",
".instellaplatform.com",
"m.w.kuruc.org",
"search.microsoft.com",
"static.millenniumseating.com",
"watchdog.mycomputer.com",
"a0.ec-images.myspacecdn.com",
"a0.mzstatic.com",
"my.netline.com",
"img.e-nls.com",
"x.discover.oceaniacruises.com",
"www.data.phishtank.com",
"p00.qhimg.com",
"webassetsk.scea.com",
"s00.sinaimg.cn",
"mosr.sk",
"sofurryfiles.com",
"asset-g.soupcdn.com",
"cdn00.sure-assist.com",
"www.svenskaspel.se",
"mail.telecom.sk",
"s4.thejournal.ie",
"my.wpi.edu",
"stec-t*.xhcdn.com", # fixme
"www.*.yandex.st", # fixme
"s4.jrnl.ie",
"b2.raptrcdn.com",
"admin.neobookings.com",
"webmail.vipserv.org",
"ak0.polyvoreimg.com",
"cdn.fora.tv",
"cdn.vbseo.com",
"edge.alluremedia.com",
"secure.trustedreviews.com",
"icmail.net",
"www.myftp.utechsoft.com",
"research-store.com",
"app.sirportly.com",
"ec7.images-amazon.com",
"help.npo.nl",
"css.palcdn.com",
"legacy.pgi.com",
"my.btwifi.co.uk",
"orders.gigenetcloud.com",
"owa.space2u.com",
"payment-solutions.entropay.com",
"static.vce.com",
"itpol.dk",
"orionmagazine.com",
# fix merged, not distributed
"citymail.com",
"mvg-mobile.de",
"inchinashop.com",
"www.whispergifts",
# already merged?
"css.bzimages.com",
"cdn0.spiegel.de",
]
_FIXME_LEADING_STAR_GLOBS = [
"*-d.openxenterprise.com",
"*sfx.hosted.exlibrisgroup.com",
"*-async.olark.com",
]
_FIXME_ODD_STARS = [
"go*.imgsmail.ru",
"img*.imgsmail.ru",
"my*.imgsmail.ru",
"secure*.inmotionhosting.com",
"www.secure*.inmotionhosting.com",
"whm*.louhi.net",
"*.r*.cf*.rackcdn.com",
"*.ssl.cf*.rackcdn.com",
"*.la*.salesforceliveagent.com",
"di*.shoppingshadow.com",
"img*.www.spoki.lv",
"mysql*.tal.de",
"clk*.tradedoubler.com",
"imp*.tradedoubler.com",
"star*.tradedoubler.com",
"web.facilities!*.nss.udel.edu", # fixme
"vg*.met.vgwort.de",
"ssl-vg*.met.vgwort.de",
# PR sent
"stec-t*.xhcdn.com",
"p*.qhimg.com",
"webassets*.scea.com",
"s*.sinaimg.cn",
"cdn*.sure-assist.com",
# '(i|j|m|si|t)*.dpfile.com', # see below
]
_FIXME_VERY_BAD_EXPANSION = set(["*.dpfile.com"])
_FIXME_MULTIPLE_RULEST_PREFIXES = [
".actionkit.com",
".bbb.org",
".blogspot.com",
".bloxcms.com",
".cachefly.net",
".force.com",
".google.com",
".icann.org",
".lumension.com",
".maine.edu",
".my.com",
".ou.edu",
".pbworks.com",
".photobucket.com",
".sec.s-msft.com",
".topcoder.com",
".utwente.nl",
".uva.nl",
".vo.msecnd.net",
".which.co.uk",
".wpengine.com",
]
# These should all end with .
_FIXME_SUBDOMAIN_PREFIXES = (
r"([^.]+)\.",
r"([A-Za-z]+\.)?",
r"([.]+)\.",
r"([\w]+)\.",
r"(?:\w+\.)?",
r"([\w-]+)\.",
r"([\w-]+\.)?",
r"([\w-]+\.)",
r"([\w\-])\.", # rsys.net, matches single char, probably broken
r"([\w\-]+)\.",
r"([\w.-]+)\.", # Allows a.b.
r"([^@:/]*)\.", # adverticum.net
r"([^/:@]+\.)?", # atlantic.net
r"([^/:@]+)\.", # 16personalities; single char
r"([^/:@\.]+)\.", # cibc.com, stevens.edu
r"([^/:@\.]+\.)?", # dkb.de
r"([^/:@]*)\.", # pszaf.hu
r"([^/:@]+)?\.", # axa-winterthur
r"([^/@:\.]+)\.", # alliedmods
r"(?:\d\.)?([^@:/\.]+)\.", # cachefly
r"([^@:/]+)?", # domaintank
r"([^\.]+)\.", # iapplicants
r"(?:\w\w\.){0,4}", # List.ru
r"(?:\w\w\.){4}", # Mail.ru
r"(?:[\w-]+\.)?", # ace.advertising.com
r"(?:[\\w-]+\.)?", # uberspace.de
r"\S+\.", # baidu, bdimg.com
r"(?:[\w-]+\.)", # kintera.com
r"([\w\.-]+\.)?",
r"\w{3}\.", # sitemeter.com
r"\w\d+\.", # adspeed.biz
r"(\w+)\.",
r"(\w+\.)?",
# r'(-\w\w\d)?\.', # givex
r"([\w-])\.", # hitbox
# very broad patterns
r"(\d{3}-\w{3}-\d{3})\.", # Mktoresp
)
assert len(set(_FIXME_SUBDOMAIN_PREFIXES)) == len(_FIXME_SUBDOMAIN_PREFIXES)
# These should all begin with .
_FIXME_SUBDOMAIN_SUFFIXES = (r"\.([^/:@]+)",) # dbs
_FIXME_EXTRA_REPLACEMENTS = [
# foo.*.bar
(r"\.\w\w\.", r"\.~~"), # kernel.org
(r"\.\w+\.", r"\.~~"), # emailsrvr.com
# Other
(r"(eu|[\da-f]{12})", "~"), # Yottaa.net
(r"(\d\d)\.", "~~"), # xg4ken.com
(r"([a-z][a-z]\.)?", "~~"), # wikifur
(
r"(?:\w+\.cdn-ec|cdn-static(?:-\d\d)?|",
r"(?:~~cdn-ec|cdn-static(?:-~)?|",
), # viddler.com
(r"vg\d\d\.met", "vg~~met"), # vgwort.de
(r"|imp\w\w|", r"|imp~|"), # tradedoubler.com
(r"|clk\w\w|", r"|clk~|"), # tradedoubler.com
(r"|star\w\w|", r"|star~|"), # tradedoubler.com
(r"|\w\w)\.thesims3", r"|~)\.thesims3"), # thesims3.com country
(r"|mysql\d\d|", r"|mysql~|"), # tal.de
(r"(\w+)-d\.", r"~-d\."), # openxenterprise.com
(r"(?:img\d\w\.)?www", r"(?:img~~)?www"), # spoki.lv
(r"di(\d-?\d?)\.", r"di~~"), # shoppingshadow.com
(r"asp-(\w\w)\.", r"asp-~~"), # secure-zone.net
(r"(\w\w|", r"(~|"), # OpenSUSE
(r"\w\w\.khronos", r"~~khronos"), # khronos
(r"|hal-\w+|", r"|hal-~|"), # archives-ouvertes.fr
(r"((?:[\w-]+|", r"((?:~|"), # bbb
(r"(\w+\.(", r"(~~("), # byteark
(r"(?:\w+sfx\.|sfx)", r"(?:~sfx\.|sfx)"), # exlibrisgroup.com
(r"([\w-]+)(?:\.secure)?\.", r"~(?:\.secure)?\."), # ipcdigital
(
r"|go\d*|hi-tech|img\d*|limg|minigames|my\d+|proxy|\w+\.radar)",
r"|go~|hi-tech|img~|limg|minigames|my~|proxy|~~radar)",
), # mailimg
(r"secure\d{1,3}|", r"secure~|"), # inmotionhosting
(r"|\w+\.i|", r"|~~i|"), #
(r"|whm\d\d)\.", r"|whm~)\."),
(r"|\w+-async|", r"|~-async|"),
(r"(?:r\d+|ssl)\.cf(\d)\.", r"(?:r~|ssl)\.cf~~"), # rackcdn
(r"wwws(-\w\w\d)?\.", r"wwws(-*)?\."), # givex
(r"(?:\.\w+)?|xs|xs2010)", r"(?:\.~)?|xs|xs2010)"),
(r"|[\w-]+\.", "|~~"), # uberspace
(r"|\w+\.", "!~~"), # udel.edu
(r"|\w\.", "|~~"), # wso2.com
(r"la(\w{3})\.", "la~~"), # salesforceliveagent.com
(r"(\w+\.api|ssl-[\w\-]+)\.", r"(~~api\.|ssl-~~)"),
(r"\.((com?\.)?\w{2,3})", r"\.((com?\.)?~)"), # google
# PR sent
(r"p\d+\.qhimg", "p~~qhimg"), # qhimg
(r"webassets\w\.scea", "webassets~~scea"), # scea
(r"s(\d+)\.sinaimg", "s~~sinaimg"), # sinaimg
(r"(\w)\.asset\.soup\.io", r"~~asset\.soup\.io"),
(r"asset-(\w)\.soupcdn", "asset-~~soupcdn"),
(r"cdn\d+\.sure-assist", "cdn~~sure-assist"),
(r"(i|j|m|si|t)(\d+)\.", "(i|j|m|si|t)~~"),
(r"(\w\w\.|www\.)?bitstamp", r"(~~|www\.)?bitstamp"),
]
_FIXME_INCORRECT_TEST_URLS = [
"http://canadapost.ca/",
"http://cibc.com/",
# https://github.com/EFForg/https-everywhere/issues/18891 :
"http://hub.rother.gov.uk/RotherPortal/ServiceForms/BrownBinForGardenWasteFrm.aspx",
# https://github.com/EFForg/https-everywhere/issues/18893 :
"http://es.about.aegeanair.com/Css/fonts/Roboto-Bold/Roboto-Bold.ttf",
"http://ru.about.aegeanair.com/Css/fonts/Roboto-Bold/Roboto-Bold.ttf",
"http://www.belgium.indymedia.org/",
]
| 31.356877 | 88 | 0.51476 |
2d66caec973809697408016455d552929baaa9bd | 485 | py | Python | tests/commands/ddtrace_run_sitecustomize.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | tests/commands/ddtrace_run_sitecustomize.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | tests/commands/ddtrace_run_sitecustomize.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import sys
from ddtrace import tracer
from nose.tools import ok_
if __name__ == '__main__':
# detect if `-S` is used
suppress = len(sys.argv) == 2 and sys.argv[1] is '-S'
if suppress:
ok_('sitecustomize' not in sys.modules)
else:
ok_('sitecustomize' in sys.modules)
# ensure the right `sitecustomize` will be imported
import sitecustomize
ok_(sitecustomize.CORRECT_IMPORT)
print('Test success')
| 24.25 | 57 | 0.686598 |
503bc236889177b4febbc9dc5c8cb8c0eaf2392e | 872 | py | Python | tests/unit/modules/test_uwsgi.py | velom/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | 1 | 2021-04-05T19:46:35.000Z | 2021-04-05T19:46:35.000Z | tests/unit/modules/test_uwsgi.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_uwsgi.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, Mock, patch
# Import salt libs
from salt.modules import uwsgi
uwsgi.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.utils.which', Mock(return_value='/usr/bin/uwsgi'))
class UwsgiTestCase(TestCase):
def test_uwsgi_stats(self):
socket = "127.0.0.1:5050"
mock = MagicMock(return_value='{"a": 1, "b": 2}')
with patch.dict(uwsgi.__salt__, {'cmd.run': mock}):
result = uwsgi.stats(socket)
mock.assert_called_once_with(
['uwsgi', '--connect-and-read', '{0}'.format(socket)],
python_shell=False)
self.assertEqual(result, {'a': 1, 'b': 2})
| 30.068966 | 78 | 0.649083 |
61903541e53b6307eec4862cb9a17350f221877f | 2,244 | py | Python | py 1/Assignment1/src/Solver.py | JamesSatherley/Semester1-2021 | 55e2815530b8ca941670a07b0668d06c3cabd5d9 | [
"MIT"
] | null | null | null | py 1/Assignment1/src/Solver.py | JamesSatherley/Semester1-2021 | 55e2815530b8ca941670a07b0668d06c3cabd5d9 | [
"MIT"
] | null | null | null | py 1/Assignment1/src/Solver.py | JamesSatherley/Semester1-2021 | 55e2815530b8ca941670a07b0668d06c3cabd5d9 | [
"MIT"
] | null | null | null | import pygame, Sudoku_IO
def solve(snapshot, screen):
pygame.time.delay(20)
Sudoku_IO.displayPuzzle(snapshot, screen)
pygame.display.flip()
if isComplete(snapshot): # tests if tree is complete (base case)
return True
else:
clone = snapshot.clone()
unsolved = clone.unsolvedCells() # if tree is not complete, will clone snapshot and set the current cell to the first item
to_use = unsolved[0]
unsolved.remove(to_use)
clone.countPossibles(unsolved) # also calls all needed functions for pruning
checkSingletons(clone, to_use)
for i in range(1,10):
if checkConsistency(clone, to_use, i): # if the cell is consistent clone is set to the number in range 1,10
clone.setCellVal(to_use.getRow(), to_use.getCol(), i)
if solve(clone, screen): # if tree is complete will return, otherwise will return false after setting to_use back to 0
return True
clone.setCellVal(to_use.getRow(), to_use.getCol(), 0)
return False
def checkSingletons(snapshot, x):
unsolved = snapshot.unsolvedCells()
unsolved.remove(x) # checks for singletons by first removing the cell the solve function is working on to not set that and cause errors. for each item in unsolved list it will test its length, if it is only 1 long it is a singleton and gets set to itself
for cell in unsolved:
if len(cell.getCombos()) == 1:
cell.setVal(cell.getCombos()[0])
def checkConsistency(snipshot, cell, num): # check consistency function, checks if num given follows suduko rules. if true number will not be in cells row, col or block
for i in range(9):
if snipshot.cellsByCol(cell.getCol())[i].getVal() == num:
return False
for i in range(9):
if snipshot.cellsByRow(cell.getRow())[i].getVal() == num:
return False
for i in range(9):
if snipshot.cellsByBlock(cell.getRow(), cell.getCol())[i].getVal() == num:
return False
return True
def isComplete(snapshot): # is complete function, finds if the unsolved cells is empty
list_of_empties = snapshot.unsolvedCells()
if list_of_empties:
return False
return True | 46.75 | 258 | 0.672014 |
0f9b044e30bde9609fe7009231ae22952067b23f | 440 | py | Python | cooar/utilities/dl_utils.py | chrisWhyTea/cooar-cli | 32c28acf94150c2788ec928150e7967d7d190553 | [
"BSD-3-Clause"
] | null | null | null | cooar/utilities/dl_utils.py | chrisWhyTea/cooar-cli | 32c28acf94150c2788ec928150e7967d7d190553 | [
"BSD-3-Clause"
] | 23 | 2020-02-02T21:36:12.000Z | 2020-06-17T09:16:41.000Z | cooar/utilities/dl_utils.py | chrisWhyTea/cooar | 32c28acf94150c2788ec928150e7967d7d190553 | [
"BSD-3-Clause"
] | null | null | null | from cooar.file import File
def download(session, file: File, **kwargs):
with session.get(url=file.url, stream=True) as r:
r.raise_for_status()
with open(file.absolute_file_download_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
f.flush()
file.absolute_file_download_path.rename(file.absolute_file_path)
| 33.846154 | 68 | 0.620455 |
f2a501578e32609f0fd9d62c52e71a5567c5511f | 8,633 | py | Python | docs/source/conf.py | cropsinsilico/yggdrasil | 466a4f77605a6f461d57ef7b165a6db7eec4d1fd | [
"BSD-3-Clause"
] | 22 | 2019-02-05T15:20:07.000Z | 2022-02-25T09:00:40.000Z | docs/source/conf.py | cropsinsilico/yggdrasil | 466a4f77605a6f461d57ef7b165a6db7eec4d1fd | [
"BSD-3-Clause"
] | 48 | 2019-02-15T20:41:24.000Z | 2022-03-16T20:52:02.000Z | docs/source/conf.py | cropsinsilico/yggdrasil | 466a4f77605a6f461d57ef7b165a6db7eec4d1fd | [
"BSD-3-Clause"
] | 16 | 2019-04-27T03:36:40.000Z | 2021-12-02T09:47:06.000Z | # -*- coding: utf-8 -*-
#
# yggdrasil documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 6 12:03:29 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import glob
import sys
import sphinx_rtd_theme
import yggdrasil
from yggdrasil import tools, languages, components
# sys.path.insert(0, os.path.abspath('.'))
doxydir = os.path.join(os.path.abspath('../'), "doxy", "xml")
rootdir = os.path.abspath('../../')
srcdir = os.path.join(rootdir, "yggdrasil")
sys.path.append(doxydir)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinxarg.ext',
'breathe',
]
relative_files = []
for lang in tools.get_supported_lang():
if lang == 'python':
continue
try:
lang_dir = languages.get_language_dir(lang)
lang_ext = components.import_component('model', lang).get_language_ext()
except ValueError:
continue
for iext in lang_ext:
relative_files += [os.path.relpath(f, start=srcdir) for f in
glob.glob(os.path.join(lang_dir, '*' + iext))]
relative_files += [os.path.relpath(f, start=srcdir) for f in
glob.glob(os.path.join(lang_dir, '*', '*' + iext))]
breathe_projects = {"yggdrasil": doxydir}
breathe_default_project = "yggdrasil"
breathe_projects_source = {"yggdrasil": (srcdir, relative_files)}
breathe_implementation_filename_extensions = ['.c', '.cc', '.cpp', '.m', '.f90']
filter_method = os.path.abspath(os.path.join(doxydir, '..', 'filter.py'))
filter_source_method = os.path.abspath(os.path.join(doxydir, '..',
'filter_source.py'))
breathe_doxygen_config_options = {
'EXTENSION_MAPPING': 'm=C++ f90=C++',
# 'EXTENSION_MAPPING': 'm=C++ f90=FortranFree',
'FILTER_PATTERNS': '*.m=%s *.f90=%s' % (filter_method, filter_method),
# 'FILTER_PATTERNS': '*.m=%s' % (filter_method),
'FILTER_SOURCE_FILES': 'YES',
'FILTER_SOURCE_PATTERNS': '*.m=%s *.f90=%s' % (filter_source_method,
filter_source_method),
# 'FILTER_SOURCE_PATTERNS': '*.m=%s' % (filter_source_method),
'SOURCE_BROWSER': 'YES',
'ENABLE_PREPROCESSING': 'YES',
'PREDEFINED': 'DOXYGEN_SHOULD_SKIP_THIS'}
# 'EXCLUDE': ('../../yggdrasil/rapidjson '
# '../../yggdrasil/languages/C/serialize/base64.h '
# '../../yggdrasil/examples')
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'yggdrasil'
copyright = u'2017, Meagan Lang, David Raila'
author = u'Meagan Lang, David Raila'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = yggdrasil.__version__
# The short X.Y version.
version = release.split('+')[0]
# Substitutions
# .. _Docs: http://yggdrasil.readthedocs.io/en/latest/
rst_epilog = """
.. _Docs: https://cropsinsilico.github.io/yggdrasil/
.. _Meagan Lang: langmm.astro@gmail.com
.. |yggdrasil| replace:: yggdrasil
"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'yggdrasildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'yggdrasil.tex', u'ygg\\_interface Documentation',
u'Meagan Lang, David Raila', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'yggdrasil', u'yggdrasil Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'yggdrasil', u'yggdrasil Documentation',
author, 'yggdrasil', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.454887 | 80 | 0.675431 |
4518151d70491d1bbce48444de8831ad575686cc | 21,751 | py | Python | world/fashion/models.py | nirvana-game/arxcode | 4d22c1be79727df9fde3ea5b73e9a91083dea66f | [
"MIT"
] | null | null | null | world/fashion/models.py | nirvana-game/arxcode | 4d22c1be79727df9fde3ea5b73e9a91083dea66f | [
"MIT"
] | null | null | null | world/fashion/models.py | nirvana-game/arxcode | 4d22c1be79727df9fde3ea5b73e9a91083dea66f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The Fashion app is for letting players have a mechanical benefit for fashion. Without
a strong mechanical benefit for fashion, players who don't care about it will tend
to protest spending money on it. Fashion is the primary mechanic for organizations
gaining prestige, which influences their economic power.
"""
from __future__ import unicode_literals
from django.db import models
from evennia.utils.idmapper.models import SharedMemoryModel
from world.fashion.exceptions import FashionError
from typeclasses.exceptions import EquipError
from typeclasses.scripts.combat.combat_settings import CombatError
class FashionCommonMixins(SharedMemoryModel):
"""Abstract parent with common fashion methods"""
class Meta:
abstract = True
BUZZ_TYPES = (
"little",
"modest",
"decent",
"exceptional",
"momentous",
"legendary",
"world-shaking",
)
COLOR_TYPES = ("{n", "{355", "{453", "{542", "{530", "{520", "{510")
# Emits use this %s order: fashion model, item/outfit, org, buzz_type
EMIT_TYPES = (
"Despite efforts made by %s, modeling %s on behalf of %s attracts %s notice.",
"When %s models %s on behalf of %s, it gains %s attention from admiring onlookers.",
"%s models %s on behalf of %s, gaining a %s number of admirers and significant compliments.",
"With talented modeling, %s displays %s around Arx, garnering flattering conversation and "
+ "murmurs throughout the city about the fine choices made by %s for sponsoring someone with such %s taste.",
"As %s models %s around Arx, word spreads like wildfire over the city about their incredible fashion "
+ "choices, attracting attention even beyond the city and gaining %s %s acclaim as well.",
"It's more than just fashion when %s shows off %s around Arx. Resonating with the people of Arvum, it "
+ "becomes a statement on contemporary culture and the profound effect that %s has upon it - a %s event "
+ "they'll be discussing for years to come.",
"Across Arvum and beyond, all the world hears about %s modeling %s. History will remember the abstruse "
+ "impact that %s had upon fashion itself, on this %s occasion.",
)
BUZZIES = list(zip(BUZZ_TYPES, COLOR_TYPES, EMIT_TYPES))
@staticmethod
def granulate_fame(fame):
fame = fame or 0
buzz_level = 0
if fame <= 100:
pass
elif fame <= 1000:
buzz_level = 1
elif fame <= 10000:
buzz_level = 2
elif fame <= 100000:
buzz_level = 3
elif fame <= 1000000:
buzz_level = 4
elif fame <= 10000000:
buzz_level = 5
else:
buzz_level = 6
return buzz_level
def get_buzz_word(self, fame):
"""Returns a colorized buzz term based on the amount of fame."""
buzzy = self.BUZZIES[self.granulate_fame(fame)]
return buzzy[1] + buzzy[0] + "{n"
def get_model_msg(self, fashion_model, org, date, fame):
"""
Returns a string summary about the modeling of an outfit or item,
how much buzz it garnered, and the date it was modeled.
Args:
fashion_model: PlayerOrNpc object
org: organization
date: datetime
fame: integer
"""
punctuation = "." if self.granulate_fame(fame) < 3 else "!"
msg = "Modeled by {315%s{n for {125%s{n, " % (fashion_model, org)
msg += "generating %s buzz " % self.get_buzz_word(fame)
msg += "on %s%s" % (date.strftime("%Y/%m/%d"), punctuation)
return msg
@classmethod
def get_emit_msg(cls, fashion_model, thing, org, fame):
"""
Returns the string a room sees when the fashionista models their item
or outfit. Higher impacts notify staff as well.
Args:
fashion_model: player/account object
thing: an item or an outfit
org: organization
fame: integer
String interpolation is specific order, eg: "Despite efforts made by
<name>, modeling <item> on behalf of <org> attracts <adjective> notice."
Order: fashion model, item/outfit, org, buzz_type (based on fame)
"""
buzz_level = cls.granulate_fame(fame)
buzzy = cls.BUZZIES[buzz_level]
color = buzzy[1]
diva = str(fashion_model)
thing = "'%s%s'" % (str(thing), color)
msg = color + "[Fashion] "
msg += buzzy[2] % (diva, thing, org, buzzy[0])
if buzz_level > 4:
from server.utils.arx_utils import inform_staff
inform_staff(msg)
return msg
class FashionOutfit(FashionCommonMixins):
"""
A collection of wearable and wieldable items that all fit on a character
at the same time.
"""
FAME_CAP = 5000000
name = models.CharField(max_length=80, db_index=True)
owner = models.ForeignKey(
"dominion.PlayerOrNpc", related_name="fashion_outfits", on_delete=models.CASCADE
)
fashion_items = models.ManyToManyField(
"objects.ObjectDB", through="ModusOrnamenta", blank=True
)
db_date_created = models.DateTimeField(auto_now_add=True)
archived = models.BooleanField(default=False)
# TODO: foreignkey to @cal events!
def __str__(self):
return str(self.name)
def invalidate_outfit_caches(self):
del self.fame
del self.model_info
del self.list_display
del self.modeled
del self.weapons
del self.apparel
def check_existence(self):
"""Deletes this outfit if none of its items exist."""
if not self.fashion_items.exists():
self.owner_character.msg(
"Nothing remains of the outfit formerly known as '%s'." % self
)
self.delete()
def delete(self, *args, **kwargs):
for item in self.fashion_items.all():
if item.pk:
item.invalidate_snapshots_cache()
super(FashionOutfit, self).delete(*args, **kwargs)
def add_fashion_item(self, item, slot=None):
"""Creates the through-model for what we assume is a valid item."""
slot = slot if slot else item.slot
ModusOrnamenta.objects.create(fashion_outfit=self, fashion_item=item, slot=slot)
def wear(self):
"""Tries to wear our apparel and wield our weapons. Raises EquipErrors."""
try:
self.owner_character.undress()
except CombatError as err:
raise EquipError(str(err) + "\nUndress failed. " + self.equipped_msg)
except EquipError as err:
pass
wield_err, wear_err, = (
"",
"",
)
try:
to_wield = list(
self.weapons.filter(
modusornamenta__slot__istartswith="primary"
).distinct()
)
if to_wield:
self.owner_character.equip_or_remove("wield", list(to_wield))
except EquipError as err:
wield_err = str(err)
try:
to_wear = list(self.apparel)
sheathed = list(
self.weapons.exclude(
modusornamenta__slot__istartswith="primary"
).distinct()
)
to_wear.extend(sheathed)
if to_wear:
self.owner_character.equip_or_remove("wear", to_wear)
except EquipError as err:
wear_err = str(err)
if wield_err or wear_err:
msg = "\n".join(
[ob for ob in (wield_err, wear_err, self.equipped_msg) if ob]
)
raise EquipError(msg)
else:
self.owner_character.msg(self.equipped_msg)
def remove(self):
"""Tries to remove all our fashion_items. Raises EquipErrors."""
try:
self.owner_character.equip_or_remove(
"remove", list(self.fashion_items.all())
)
except (CombatError, EquipError) as err:
raise EquipError(err)
def check_outfit_fashion_ready(self):
"""
Checks each item for model-readiness. If any are not, an exception is
raised showing reasons for each. User may repeat the command to model
the remaining items, if any exist. Returns a set of valid items.
"""
valid_items = set(self.fashion_items.all())
skipped_items = set()
skipped_msg = "|wPieces of this outfit cannot be modeled:|n"
for item in valid_items:
try:
item.check_fashion_ready()
except FashionError as err:
skipped_msg += "\n- " + str(err)
skipped_items.add(item)
valid_items = valid_items.difference(skipped_items)
if skipped_items and self.owner.player.ndb.outfit_model_prompt != str(self):
skipped_msg += "\n|y"
if valid_items:
self.owner.player.ndb.outfit_model_prompt = str(self)
skipped_msg += "Repeat command to model the %d remaining item(s)" % len(
valid_items
)
else:
skipped_msg += "No valid items remain! Try modeling a different outfit"
raise FashionError(skipped_msg + ".|n")
self.owner.player.ndb.outfit_model_prompt = None
return valid_items
def model_outfit_for_fashion(self, org):
"""
Modeling Spine. If there are items in this outfit that can be modeled &
action points are paid, then snapshots are created for each and a sum of
all their fame is returned.
"""
from world.fashion.mixins import FashionableMixins
if self.modeled:
raise FashionError("%s has already been modeled." % self)
if not self.is_carried or not self.is_equipped:
raise FashionError("Outfit must be equipped before trying to model it.")
valid_items = self.check_outfit_fashion_ready()
ap_cost = len(valid_items) * FashionableMixins.fashion_ap_cost
if not self.owner.player.pay_action_points(ap_cost):
raise FashionError(
"It costs %d AP to model %s; you do not have enough energy."
% (ap_cost, self)
)
outfit_fame = 0
for item in valid_items:
outfit_fame += item.model_for_fashion(self.owner.player, org, outfit=self)
return min(outfit_fame, self.FAME_CAP)
@property
def table_display(self):
"""A non-cached table of outfit items/locations, then model-info string."""
from server.utils.prettytable import PrettyTable
table = PrettyTable((str(self), "Slot", "Location"))
modi = self.modusornamenta_set.all()
for mo in modi:
table.add_row(
(str(mo.fashion_item), mo.slot or "", str(mo.fashion_item.location))
)
msg = str(table)
if self.modeled:
msg += "\n" + self.model_info
# TODO: Include existing event info :)
# TODO: Include existing fashion judge votes & comments!
return msg
@property
def list_display(self):
"""A cached string simply listing outfit components & model info."""
if not hasattr(self, "_cached_outfit_display"):
from server.utils.arx_utils import list_to_string
msg = "|w[|n" + str(self) + "|w]|n"
weapons = list(self.weapons)
apparel = list(self.apparel)
if weapons:
msg += " weapons: " + list_to_string(weapons)
if apparel:
msg += "\nattire: " + list_to_string(apparel)
if self.modeled:
msg += "\n" + self.model_info
# TODO: Include existing event info :)
# TODO: Include existing fashion judge votes & comments!
self._cached_outfit_display = msg
return self._cached_outfit_display
@list_display.deleter
def list_display(self):
if hasattr(self, "_cached_outfit_display"):
del self._cached_outfit_display
@property
def model_info(self):
if self.modeled:
if not hasattr(self, "_cached_model_info"):
self._cached_model_info = self.fashion_snapshots.first().display
return self._cached_model_info
@model_info.deleter
def model_info(self):
if hasattr(self, "_cached_model_info"):
del self._cached_model_info
@property
def modeled(self):
if not hasattr(self, "_cached_model_bool"):
self._cached_model_bool = bool(self.fashion_snapshots.exists())
return self._cached_model_bool
@modeled.deleter
def modeled(self):
if hasattr(self, "_cached_model_bool"):
del self._cached_model_bool
@property
def fame(self):
if self.modeled:
if not hasattr(self, "_cached_fame"):
self._cached_fame = sum(
[ob.fame for ob in self.fashion_snapshots.all()]
)
return self._cached_fame
@fame.deleter
def fame(self):
if hasattr(self, "_cached_fame"):
del self._cached_fame
@property
def appraisal_or_buzz(self):
if self.modeled:
return self.buzz
else:
return self.appraisal
@property
def appraisal(self):
"""Returns string sum worth of outfit's unmodeled items."""
worth = 0
for item in self.fashion_items.all():
if not item.modeled_by:
worth += item.item_worth
return str("{:,}".format(worth) or "cannot model")
@property
def buzz(self):
"""Returns colorized string: the term for outfit's fame impact."""
buzz = ""
if self.modeled:
buzz = self.get_buzz_word(self.fame)
return buzz
@property
def weapons(self):
"""
Cached queryset of this outfit's wielded/sheathed weapons, but not
decorative weapons.
"""
if not hasattr(self, "_cached_weapons"):
self._cached_weapons = self.fashion_items.filter(
modusornamenta__slot__iendswith="weapon"
).distinct()
return self._cached_weapons
@weapons.deleter
def weapons(self):
if hasattr(self, "_cached_weapons"):
del self._cached_weapons
@property
def apparel(self):
"""cached queryset of this outfit's worn items. Not sheathed weapons."""
if not hasattr(self, "_cached_apparel"):
self._cached_apparel = self.fashion_items.exclude(
modusornamenta__slot__iendswith="weapon"
).distinct()
return self._cached_apparel
@apparel.deleter
def apparel(self):
if hasattr(self, "_cached_apparel"):
del self._cached_apparel
@property
def is_carried(self):
"""Truthy if all outfit items are located on a character."""
return not self.fashion_items.exclude(db_location=self.owner_character).exists()
@property
def is_equipped(self):
"""Truthy if all outfit items are currently equipped by owner character."""
for item in self.fashion_items.all():
loc = item.location if item.is_equipped else None
if loc != self.owner_character:
return False
return True
@property
def equipped_msg(self):
"""Returns a string saying whether or not outfit is equipped."""
indicator = "successfully" if self.is_equipped else "not"
return "Your outfit '%s' is %s equipped." % (self, indicator)
@property
def owner_character(self):
return self.owner.player.char_ob
class ModusOrnamenta(SharedMemoryModel):
"""
The method of wearing an item in an outfit.
"""
fashion_outfit = models.ForeignKey("FashionOutfit", on_delete=models.CASCADE)
fashion_item = models.ForeignKey("objects.ObjectDB", on_delete=models.CASCADE)
slot = models.CharField(max_length=80, blank=True, null=True)
class FashionSnapshot(FashionCommonMixins):
"""
The recorded moment when a piece of gear becomes a weapon
of the fashionpocalypse.
"""
FAME_CAP = 1500000
ORG_FAME_DIVISOR = 2
DESIGNER_FAME_DIVISOR = 4
db_date_created = models.DateTimeField(auto_now_add=True)
fashion_item = models.ForeignKey(
"objects.ObjectDB",
related_name="fashion_snapshots",
on_delete=models.SET_NULL,
null=True,
)
fashion_model = models.ForeignKey(
"dominion.PlayerOrNpc",
related_name="fashion_snapshots",
on_delete=models.SET_NULL,
null=True,
)
org = models.ForeignKey(
"dominion.Organization",
related_name="fashion_snapshots",
on_delete=models.SET_NULL,
null=True,
)
designer = models.ForeignKey(
"dominion.PlayerOrNpc",
related_name="designer_snapshots",
on_delete=models.SET_NULL,
null=True,
)
fame = models.IntegerField(default=0, blank=True)
outfit = models.ForeignKey(
"FashionOutfit",
related_name="fashion_snapshots",
on_delete=models.SET_NULL,
null=True,
)
def __str__(self):
return (
str(self.fashion_item) if self.fashion_item else "[Snapshot #%d]" % self.id
)
@property
def display(self):
"""The modeled info and 'buzz message' that appears on items."""
displayed_fame = self.fame if not self.outfit else self.outfit.fame
msg = self.get_model_msg(
self.fashion_model, self.org, self.db_date_created, displayed_fame
)
return msg
def save(self, *args, **kwargs):
"""Invalidates cache on save"""
super(FashionSnapshot, self).save(*args, **kwargs)
self.invalidate_fashion_caches()
def delete(self, *args, **kwargs):
"""Invalidates cache before delete"""
self.invalidate_fashion_caches()
super(FashionSnapshot, self).delete(*args, **kwargs)
def invalidate_fashion_caches(self):
if self.outfit:
self.outfit.invalidate_outfit_caches()
self.fashion_item.invalidate_snapshots_cache()
def roll_for_fame(self):
"""
Rolls for amount of fame the item generates, minimum 2 fame. The fashion model's social clout and
skill check of composure + performance is made exponential to be an enormous swing in the efficacy
of fame generated: Someone whose roll+social_clout is 50 will be hundreds of times as effective
as someone who flubs the roll.
"""
from world.stats_and_skills import do_dice_check
char = self.fashion_model.player.char_ob
roll = do_dice_check(
caller=char, stat="composure", skill="performance", difficulty=30
)
roll = pow(max((roll + char.social_clout * 5), 1), 1.5)
percentage = max(roll / 100.0, 0.01)
level_mod = self.fashion_item.item_data.recipe.level / 6.0
percentage *= max(level_mod, 0.01)
percentage *= max((self.fashion_item.item_data.quality_level / 40.0), 0.01)
percentage = max(percentage, 0.2)
# they get either their percentage of the item's worth, their modified roll, or 4, whichever is highest
self.fame = min(
max(int(self.fashion_item.item_worth * percentage), max(int(roll), 4)),
self.FAME_CAP,
)
self.save()
def apply_fame(self, reverse=False):
"""
Awards full amount of fame to fashion model and a portion to the
sponsoring Organization & the item's Designer.
"""
from world.dominion.models import PrestigeCategory
mult = -1 if reverse else 1
model_fame = self.fame * mult
org_fame = self.org_fame * mult
designer_fame = self.designer_fame * mult
self.fashion_model.assets.adjust_prestige(model_fame, PrestigeCategory.FASHION)
self.org.assets.adjust_prestige(org_fame)
self.designer.assets.adjust_prestige(designer_fame, PrestigeCategory.DESIGN)
def inform_fashion_clients(self):
"""
Informs clients when fame is earned, by using their AssetOwner method.
"""
category = "fashion"
msg = "fame awarded from %s modeling %s." % (
self.fashion_model,
self.fashion_item,
)
if self.org_fame > 0:
org_msg = "{{315{:,}{{n {}".format(self.org_fame, msg)
self.org.assets.inform_owner(org_msg, category=category, append=True)
if self.designer_fame > 0:
designer_msg = "{{315{:,}{{n {}".format(self.designer_fame, msg)
self.designer.assets.inform_owner(
designer_msg, category=category, append=True
)
def reverse_snapshot(self):
"""Reverses the fame / action point effects of this snapshot"""
from world.fashion.mixins import FashionableMixins
self.apply_fame(reverse=True)
self.fashion_model.player.pay_action_points(-FashionableMixins.fashion_ap_cost)
@property
def org_fame(self):
"""The portion of fame awarded to sponsoring org"""
return int(self.fame / self.ORG_FAME_DIVISOR)
@property
def designer_fame(self):
"""The portion of fame awarded to item designer."""
return int(self.fame / self.DESIGNER_FAME_DIVISOR)
| 36.991497 | 117 | 0.612432 |
1152fce01096ebeb99bf4b104b381123bbbc7a06 | 305 | py | Python | server.py | huangsongyan/pythondemo | 6279a2a39a34c4d708140c2e7653a127c444ec1b | [
"Apache-2.0"
] | null | null | null | server.py | huangsongyan/pythondemo | 6279a2a39a34c4d708140c2e7653a127c444ec1b | [
"Apache-2.0"
] | null | null | null | server.py | huangsongyan/pythondemo | 6279a2a39a34c4d708140c2e7653a127c444ec1b | [
"Apache-2.0"
] | null | null | null | #encoding:utf-8
# server.py
# 从wsgiref模块导入:
from wsgiref.simple_server import make_server
# 导入我们自己编写的application函数:
from hello import application
# 创建一个服务器,IP地址为空,端口是8000,处理函数是application:
httpd = make_server('',8001, application)
print "Serving HTTP on port 8000..."
# 开始监听HTTP请求:
httpd.serve_forever() | 23.461538 | 45 | 0.793443 |
ea894194e784c445c6225f23840cff6297a3f18d | 70 | py | Python | python/pybool/examples/__init__.py | JohnReid/pybool | 7ee0ec1b669ec0259405d3c120ec3fc2827ba397 | [
"MIT"
] | 5 | 2016-01-17T15:50:08.000Z | 2021-05-13T09:10:41.000Z | python/pybool/examples/__init__.py | JohnReid/pybool | 7ee0ec1b669ec0259405d3c120ec3fc2827ba397 | [
"MIT"
] | 1 | 2016-12-16T03:37:01.000Z | 2016-12-16T08:02:52.000Z | python/pybool/examples/__init__.py | JohnReid/pybool | 7ee0ec1b669ec0259405d3c120ec3fc2827ba397 | [
"MIT"
] | 2 | 2016-05-30T17:55:41.000Z | 2017-12-17T21:02:59.000Z | #
# Copyright John Reid 2010
#
"""
Examples for pybool package.
"""
| 7.777778 | 28 | 0.642857 |
b44b9ad53e3712b2d639408cf5807bfb5afeec4c | 6,695 | py | Python | UMLRT2Kiltera_MM/Properties/Multiplicity/Himesis/HListen1orMoreListenBranchPart1_CompleteLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/Properties/Multiplicity/Himesis/HListen1orMoreListenBranchPart1_CompleteLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/Properties/Multiplicity/Himesis/HListen1orMoreListenBranchPart1_CompleteLHS.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HListen1orMoreListenBranchPart1_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HListen1orMoreListenBranchPart1_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HListen1orMoreListenBranchPart1_CompleteLHS, self).__init__(name='HListen1orMoreListenBranchPart1_CompleteLHS', num_nodes=1, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__UMLRT2Kiltera_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = UUID('2687e375-3ee4-4925-adb1-646cc6e538b9')
# Set the node attributes
self.vs[0]["MT_pivotOut__"] = """element1"""
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Listen"""
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = UUID('5d607181-a25c-41ad-a2f1-9f1c87865bdb')
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| 46.172414 | 148 | 0.534279 |
818153c3d76d0f3efebe621388056edb728f4d78 | 5,275 | py | Python | test.py | motion-workshop/shadow-fileio-python | 48edb8764a109a425281b62c49385321de06bc32 | [
"BSD-2-Clause"
] | null | null | null | test.py | motion-workshop/shadow-fileio-python | 48edb8764a109a425281b62c49385321de06bc32 | [
"BSD-2-Clause"
] | null | null | null | test.py | motion-workshop/shadow-fileio-python | 48edb8764a109a425281b62c49385321de06bc32 | [
"BSD-2-Clause"
] | null | null | null | #
# Copyright (c) 2021, Motion Workshop
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import array
import io
import os
import unittest
import shadow.fileio
class TestTakeIO(unittest.TestCase):
def test_read(self):
prefix = shadow.fileio.find_newest_take()
self.assertIsInstance(prefix, str)
with open('{}/data.mStream'.format(prefix), 'rb') as f:
info, node_list, data = shadow.fileio.read_stream(f)
self.assertIsInstance(info, dict)
self.assertIsInstance(node_list, tuple)
self.assertIsInstance(data, array.array)
self.assertIsInstance(info.get('version'), int)
self.assertIsInstance(info.get('uuid'), str)
self.assertIsInstance(info.get('num_node'), int)
self.assertIsInstance(info.get('frame_stride'), int)
self.assertIsInstance(info.get('num_frame'), int)
self.assertIsInstance(info.get('channel_mask'), int)
self.assertIsInstance(info.get('h'), float)
self.assertIsInstance(info.get('location'), tuple)
self.assertIsInstance(info.get('geomagnetic'), tuple)
self.assertIsInstance(info.get('timestamp'), str)
self.assertEqual(info.get('num_node', 0) * 2, len(node_list))
self.assertEqual(info.get('h'), 0.01)
self.assertEqual(len(info.get('location')), 3)
self.assertEqual(len(info.get('geomagnetic')), 3)
# num_frame * frame_stride / sizeof(float) == len(data)
self.assertEqual(
int(info.get('num_frame', 0) * info.get('frame_stride', 0) / 4),
len(data))
with open('{}/take.mTake'.format(prefix)) as f:
node_map = shadow.fileio.make_node_map(f, node_list)
self.assertIsInstance(node_map, dict)
for node_id in node_map:
self.assertIsInstance(node_id, str)
node = node_map[node_id]
self.assertIsInstance(node, dict)
for channel_id in node:
self.assertIsInstance(channel_id, str)
channel = node[channel_id]
self.assertIsInstance(channel, tuple)
self.assertEqual(len(channel), 2)
self.assertIsInstance(channel[0], int)
self.assertIsInstance(channel[1], int)
self.assertLess(channel[0], channel[1])
# Trim off the YYYY-MM-DD/NNNN portion of the take path prefix. Use it
# to test the other variant of find_newest_take that part of the path.
a, number = os.path.split(prefix)
b, date = os.path.split(a)
prefix_name = shadow.fileio.find_newest_take(
os.path.join(date, number))
self.assertIsInstance(prefix_name, str)
self.assertEqual(prefix, prefix_name)
# Read the stream into memory so we can mess it up for testing.
with open('{}/data.mStream'.format(prefix), 'rb') as f:
buf = f.read()
# Read one frame at a time.
with io.BytesIO(buf) as f:
# Just read the header portion of the stream.
info, node_list = shadow.fileio.read_header(f)
self.assertIsInstance(info, dict)
self.assertIsInstance(node_list, tuple)
# Read one frame at a time.
for i in range(info.get('num_frame', 0)):
frame = shadow.fileio.read_frame(f, info)
self.assertEqual(
int(info.get('frame_stride', 0) / 4),
len(frame))
# Incorrect format.
bad_buf = int(1).to_bytes(4, byteorder='little') + buf[4:]
with io.BytesIO(bad_buf) as f:
with self.assertRaises(ValueError):
shadow.fileio.read_stream(f)
# Incorrect version header.
bad_buf = buf[0:8] + int(1).to_bytes(4, byteorder='little') + buf[12:]
with io.BytesIO(bad_buf) as f:
with self.assertRaises(ValueError):
shadow.fileio.read_stream(f)
if __name__ == '__main__':
unittest.main()
| 37.678571 | 78 | 0.649479 |
5352b02e408fadab99c81c5baff25958cc0319ac | 3,319 | py | Python | utils/print_graph.py | mjasperse/symbiflow-arch-defs | ff3aedec45c0f886260b34ff5288482a89411d13 | [
"ISC"
] | null | null | null | utils/print_graph.py | mjasperse/symbiflow-arch-defs | ff3aedec45c0f886260b34ff5288482a89411d13 | [
"ISC"
] | null | null | null | utils/print_graph.py | mjasperse/symbiflow-arch-defs | ff3aedec45c0f886260b34ff5288482a89411d13 | [
"ISC"
] | null | null | null | #!/usr/bin/env python3
import lib.rr_graph.graph as graph
def print_block_types(g):
'''Sequentially list block types'''
bg = g.block_grid
for type_id, bt in bg.block_types._ids.items():
print(
"{:4} ".format(type_id), "{:40s}".format(bt.to_string()),
bt.to_string(extra=True)
)
def print_grid(g):
'''ASCII diagram displaying XY layout'''
bg = g.block_grid
grid = bg.size
# print('Grid %dw x %dh' % (grid.width, grid.height))
col_widths = []
for x in range(0, grid.width):
col_widths.append(
max(len(bt.name) for bt in bg.block_types_for(col=x))
)
print(" ", end=" ")
for x in range(0, grid.width):
print("{: ^{width}d}".format(x, width=col_widths[x]), end=" ")
print()
print(" /", end="-")
for x in range(0, grid.width):
print("-" * col_widths[x], end="-+-")
print()
for y in reversed(range(0, grid.height)):
print("{: 3d} |".format(y, width=col_widths[0]), end=" ")
for x, bt in enumerate(bg.block_types_for(row=y)):
assert x < len(col_widths), (x, bt)
print(
"{: ^{width}}".format(bt.name, width=col_widths[x]), end=" | "
)
print()
def print_nodes(g, lim=None):
'''Display source/sink edges on all XML nodes'''
def node_name(node):
return graph.RoutingGraphPrinter.node(node, g.block_grid)
def edge_name(node, flip=False):
return graph.RoutingGraphPrinter.edge(
g.routing, node, block_grid=g.block_grid, flip=flip
)
routing = g.routing
print(
'Nodes: {}, edges {}'.format(
len(routing._ids_map(graph.RoutingNode)),
len(routing._ids_map(graph.RoutingEdge))
)
)
nodemap = routing._ids_map(graph.RoutingNode)
edgemap = routing._ids_map(graph.RoutingEdge)
node2edges = routing.edges_for_allnodes()
for i, node_id in enumerate(sorted(node2edges.keys())):
node = nodemap[node_id]
print()
if lim and i >= lim:
print('...')
break
print('{} - {} ({})'.format(i, node_name(node), node_id))
srcs = []
snks = []
for e in node2edges[node_id]:
edge = edgemap[e]
src, snk = routing.nodes_for_edge(edge)
if src == node:
srcs.append(edge)
elif snk == node:
snks.append(edge)
else:
print("!?@", edge_name(edge))
print(" Sources:")
for e in srcs:
print(" ", edge_name(e))
if not srcs:
print(" ", None)
print(" Sink:")
for e in snks:
print(" ", edge_name(e, flip=True))
if not snks:
print(" ", None)
def print_graph(g, lim=0):
print()
print_block_types(g)
print()
print_grid(g)
print()
print_nodes(g, lim=lim)
print()
def main():
import argparse
parser = argparse.ArgumentParser("Print rr_graph.xml file")
parser.add_argument("--lim", type=int, default=0)
parser.add_argument("rr_graph")
args = parser.parse_args()
g = graph.Graph(args.rr_graph)
print_graph(g, lim=args.lim)
if __name__ == "__main__":
main()
| 26.133858 | 78 | 0.542332 |
66b0b419f54a60d8d8e69ac453f5783c62f7238e | 16,871 | py | Python | ryu/controller/network.py | samrussell/ryu | a62aa7ecc6c0372cc2b633140005dc5e85ea6129 | [
"Apache-2.0"
] | 2 | 2015-12-07T06:55:31.000Z | 2017-05-08T06:18:56.000Z | ryu/controller/network.py | samrussell/ryu | a62aa7ecc6c0372cc2b633140005dc5e85ea6129 | [
"Apache-2.0"
] | null | null | null | ryu/controller/network.py | samrussell/ryu | a62aa7ecc6c0372cc2b633140005dc5e85ea6129 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from ryu.base import app_manager
import ryu.exception as ryu_exc
from ryu.app.rest_nw_id import NW_ID_UNKNOWN
from ryu.controller import event
from ryu.exception import NetworkNotFound, NetworkAlreadyExist
from ryu.exception import PortAlreadyExist, PortNotFound, PortUnknown
class MacAddressAlreadyExist(ryu_exc.RyuException):
message = 'port (%(dpid)s, %(port)s) has already mac %(mac_address)s'
class EventNetworkDel(event.EventBase):
def __init__(self, network_id):
super(EventNetworkDel, self).__init__()
self.network_id = network_id
class EventNetworkPort(event.EventBase):
def __init__(self, network_id, dpid, port_no, add_del):
super(EventNetworkPort, self).__init__()
self.network_id = network_id
self.dpid = dpid
self.port_no = port_no
self.add_del = add_del
class EventMacAddress(event.EventBase):
def __init__(self, dpid, port_no, network_id, mac_address, add_del):
super(EventMacAddress, self).__init__()
assert network_id is not None
assert mac_address is not None
self.dpid = dpid
self.port_no = port_no
self.network_id = network_id
self.mac_address = mac_address
self.add_del = add_del
class Networks(dict):
"network_id -> set of (dpid, port_no)"
def __init__(self, f):
super(Networks, self).__init__()
self.send_event = f
def list_networks(self):
return self.keys()
def has_network(self, network_id):
return network_id in self
def update_network(self, network_id):
self.setdefault(network_id, set())
def create_network(self, network_id):
if network_id in self:
raise NetworkAlreadyExist(network_id=network_id)
self[network_id] = set()
def remove_network(self, network_id):
try:
network = self[network_id]
except KeyError:
raise NetworkNotFound(network_id=network_id)
for (dpid, port_no) in network:
self.send_event(EventNetworkPort(network_id, dpid, port_no, False))
self.send_event(EventNetworkDel(network_id))
del self[network_id]
def list_ports(self, network_id):
try:
# use list() to keep compatibility for output
# set() isn't json serializable
return list(self[network_id])
except KeyError:
raise NetworkNotFound(network_id=network_id)
def add_raw(self, network_id, dpid, port_no):
self[network_id].add((dpid, port_no))
def add_event(self, network_id, dpid, port_no):
self.send_event(
EventNetworkPort(network_id, dpid, port_no, True))
# def add(self, network_id, dpid, port_no):
# self.add_raw(network_id, dpid, port_no)
# self.add_event(network_id, dpid, port_no)
def remove_raw(self, network_id, dpid, port_no):
if (dpid, port_no) in self[network_id]:
self.send_event(EventNetworkPort(network_id, dpid, port_no, False))
self[network_id].remove((dpid, port_no))
def remove(self, network_id, dpid, port_no):
try:
self.remove_raw(network_id, dpid, port_no)
except KeyError:
raise NetworkNotFound(network_id=network_id)
except ValueError:
raise PortNotFound(network_id=network_id, dpid=dpid, port=port_no)
def has_port(self, network_id, dpid, port):
return (dpid, port) in self[network_id]
def get_dpids(self, network_id):
try:
ports = self[network_id]
except KeyError:
return set()
# python 2.6 doesn't support set comprehension
# port = (dpid, port_no)
return set([port[0] for port in ports])
class Port(object):
def __init__(self, port_no, network_id, mac_address=None):
super(Port, self).__init__()
self.port_no = port_no
self.network_id = network_id
self.mac_address = mac_address
class DPIDs(dict):
"""dpid -> port_no -> Port(port_no, network_id, mac_address)"""
def __init__(self, f, nw_id_unknown):
super(DPIDs, self).__init__()
self.send_event = f
self.nw_id_unknown = nw_id_unknown
def setdefault_dpid(self, dpid):
return self.setdefault(dpid, {})
def _setdefault_network(self, dpid, port_no, default_network_id):
dp = self.setdefault_dpid(dpid)
return dp.setdefault(port_no, Port(port_no=port_no,
network_id=default_network_id))
def setdefault_network(self, dpid, port_no):
self._setdefault_network(dpid, port_no, self.nw_id_unknown)
def update_port(self, dpid, port_no, network_id):
port = self._setdefault_network(dpid, port_no, network_id)
port.network_id = network_id
def remove_port(self, dpid, port_no):
try:
# self.dpids[dpid][port_no] can be already deleted by
# port_deleted()
port = self[dpid].get(port_no)
if port and port.network_id and port.mac_address:
self.send_event(EventMacAddress(dpid, port_no,
port.network_id,
port.mac_address,
False))
self[dpid].pop(port_no, None)
except KeyError:
raise PortNotFound(dpid=dpid, port=port_no, network_id=None)
def get_ports(self, dpid, network_id=None, mac_address=None):
if network_id is None:
return self.get(dpid, {}).values()
if mac_address is None:
return [p for p in self.get(dpid, {}).values()
if p.network_id == network_id]
# live-migration: There can be two ports that have same mac address.
return [p for p in self.get(dpid, {}).values()
if p.network_id == network_id and p.mac_address == mac_address]
def get_port(self, dpid, port_no):
try:
return self[dpid][port_no]
except KeyError:
raise PortNotFound(dpid=dpid, port=port_no, network_id=None)
def get_network(self, dpid, port_no):
try:
return self[dpid][port_no].network_id
except KeyError:
raise PortUnknown(dpid=dpid, port=port_no)
def get_networks(self, dpid):
return set(self[dpid].values())
def get_network_safe(self, dpid, port_no):
port = self.get(dpid, {}).get(port_no)
if port is None:
return self.nw_id_unknown
return port.network_id
def get_mac(self, dpid, port_no):
port = self.get_port(dpid, port_no)
return port.mac_address
def _set_mac(self, network_id, dpid, port_no, port, mac_address):
if not (port.network_id is None or
port.network_id == network_id or
port.network_id == self.nw_id_unknown):
raise PortNotFound(network_id=network_id, dpid=dpid, port=port_no)
port.network_id = network_id
port.mac_address = mac_address
if port.network_id and port.mac_address:
self.send_event(EventMacAddress(
dpid, port_no, port.network_id, port.mac_address,
True))
def set_mac(self, network_id, dpid, port_no, mac_address):
port = self.get_port(dpid, port_no)
if port.mac_address is not None:
raise MacAddressAlreadyExist(dpid=dpid, port=port_no,
mac_address=mac_address)
self._set_mac(network_id, dpid, port_no, port, mac_address)
def update_mac(self, network_id, dpid, port_no, mac_address):
port = self.get_port(dpid, port_no)
if port.mac_address is None:
self._set_mac(network_id, dpid, port_no, port, mac_address)
return
# For now, we don't allow changing mac address.
if port.mac_address != mac_address:
raise MacAddressAlreadyExist(dpid=dpid, port=port_no,
mac_address=port.mac_address)
MacPort = collections.namedtuple('MacPort', ('dpid', 'port_no'))
class MacToPort(collections.defaultdict):
"""mac_address -> set of MacPort(dpid, port_no)"""
def __init__(self):
super(MacToPort, self).__init__(set)
def add_port(self, dpid, port_no, mac_address):
self[mac_address].add(MacPort(dpid, port_no))
def remove_port(self, dpid, port_no, mac_address):
ports = self[mac_address]
ports.discard(MacPort(dpid, port_no))
if not ports:
del self[mac_address]
def get_ports(self, mac_address):
return self[mac_address]
class MacAddresses(dict):
"""network_id -> mac_address -> set of (dpid, port_no)"""
def add_port(self, network_id, dpid, port_no, mac_address):
mac2port = self.setdefault(network_id, MacToPort())
mac2port.add_port(dpid, port_no, mac_address)
def remove_port(self, network_id, dpid, port_no, mac_address):
mac2port = self.get(network_id)
if mac2port is None:
return
mac2port.remove_port(dpid, port_no, mac_address)
if not mac2port:
del self[network_id]
def get_ports(self, network_id, mac_address):
mac2port = self.get(network_id)
if not mac2port:
return set()
return mac2port.get_ports(mac_address)
class Network(app_manager.RyuApp):
def __init__(self, nw_id_unknown=NW_ID_UNKNOWN):
super(Network, self).__init__()
self.name = 'network'
self.nw_id_unknown = nw_id_unknown
self.networks = Networks(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers, nw_id_unknown)
self.mac_addresses = MacAddresses()
def _check_nw_id_unknown(self, network_id):
if network_id == self.nw_id_unknown:
raise NetworkAlreadyExist(network_id=network_id)
def list_networks(self):
return self.networks.list_networks()
def update_network(self, network_id):
self._check_nw_id_unknown(network_id)
self.networks.update_network(network_id)
def create_network(self, network_id):
self._check_nw_id_unknown(network_id)
self.networks.create_network(network_id)
def remove_network(self, network_id):
self.networks.remove_network(network_id)
def list_ports(self, network_id):
return self.networks.list_ports(network_id)
def _update_port(self, network_id, dpid, port, port_may_exist):
def _known_nw_id(nw_id):
return nw_id is not None and nw_id != self.nw_id_unknown
queue_add_event = False
self._check_nw_id_unknown(network_id)
try:
old_network_id = self.dpids.get_network_safe(dpid, port)
if (self.networks.has_port(network_id, dpid, port) or
_known_nw_id(old_network_id)):
if not port_may_exist:
raise PortAlreadyExist(network_id=network_id,
dpid=dpid, port=port)
if old_network_id != network_id:
queue_add_event = True
self.networks.add_raw(network_id, dpid, port)
if _known_nw_id(old_network_id):
self.networks.remove_raw(old_network_id, dpid, port)
except KeyError:
raise NetworkNotFound(network_id=network_id)
self.dpids.update_port(dpid, port, network_id)
if queue_add_event:
self.networks.add_event(network_id, dpid, port)
def create_port(self, network_id, dpid, port):
self._update_port(network_id, dpid, port, False)
def update_port(self, network_id, dpid, port):
self._update_port(network_id, dpid, port, True)
def _get_old_mac(self, network_id, dpid, port_no):
try:
port = self.dpids.get_port(dpid, port_no)
except PortNotFound:
pass
else:
if port.network_id == network_id:
return port.mac_address
return None
def remove_port(self, network_id, dpid, port_no):
# generate event first, then do the real task
old_mac_address = self._get_old_mac(network_id, dpid, port_no)
self.dpids.remove_port(dpid, port_no)
self.networks.remove(network_id, dpid, port_no)
if old_mac_address is not None:
self.mac_addresses.remove_port(network_id, dpid, port_no,
old_mac_address)
#
# methods for gre tunnel
#
def get_dpids(self, network_id):
return self.networks.get_dpids(network_id)
def has_network(self, network_id):
return self.networks.has_network(network_id)
def get_networks(self, dpid):
return self.dpids.get_networks(dpid)
def create_mac(self, network_id, dpid, port_no, mac_address):
self.mac_addresses.add_port(network_id, dpid, port_no, mac_address)
self.dpids.set_mac(network_id, dpid, port_no, mac_address)
def update_mac(self, network_id, dpid, port_no, mac_address):
old_mac_address = self._get_old_mac(network_id, dpid, port_no)
self.dpids.update_mac(network_id, dpid, port_no, mac_address)
if old_mac_address is not None:
self.mac_addresses.remove_port(network_id, dpid, port_no,
old_mac_address)
self.mac_addresses.add_port(network_id, dpid, port_no, mac_address)
def get_mac(self, dpid, port_no):
return self.dpids.get_mac(dpid, port_no)
def list_mac(self, dpid, port_no):
mac_address = self.dpids.get_mac(dpid, port_no)
if mac_address is None:
return []
return [mac_address]
def get_ports(self, dpid, network_id=None, mac_address=None):
return self.dpids.get_ports(dpid, network_id, mac_address)
def get_port(self, dpid, port_no):
return self.dpids.get_port(dpid, port_no)
def get_ports_with_mac(self, network_id, mac_address):
return self.mac_addresses.get_ports(network_id, mac_address)
#
# methods for simple_isolation
#
def same_network(self, dpid, nw_id, out_port, allow_nw_id_external=None):
assert nw_id != self.nw_id_unknown
out_nw = self.dpids.get_network_safe(dpid, out_port)
if nw_id == out_nw:
return True
if (allow_nw_id_external is not None and
(allow_nw_id_external == nw_id or
allow_nw_id_external == out_nw)):
# allow external network -> known network id
return True
self.logger.debug('blocked dpid %s nw_id %s out_port %d out_nw %s'
'external %s',
dpid, nw_id, out_port, out_nw, allow_nw_id_external)
return False
def get_network(self, dpid, port):
return self.dpids.get_network(dpid, port)
def add_datapath(self, ofp_switch_features):
datapath = ofp_switch_features.datapath
dpid = ofp_switch_features.datapath_id
ports = ofp_switch_features.ports
self.dpids.setdefault_dpid(dpid)
for port_no in ports:
self.port_added(datapath, port_no)
def port_added(self, datapath, port_no):
if port_no == 0 or port_no >= datapath.ofproto.OFPP_MAX:
# skip fake output ports
return
self.dpids.setdefault_network(datapath.id, port_no)
def port_deleted(self, dpid, port_no):
self.dpids.remove_port(dpid, port_no)
def filter_ports(self, dpid, in_port, nw_id, allow_nw_id_external=None):
assert nw_id != self.nw_id_unknown
ret = []
for port in self.get_ports(dpid):
nw_id_ = port.network_id
if port.port_no == in_port:
continue
if nw_id_ == nw_id:
ret.append(port.port_no)
elif (allow_nw_id_external is not None and
nw_id_ == allow_nw_id_external):
ret.append(port.port_no)
return ret
| 35.517895 | 79 | 0.638907 |
4187a80380274dcb5f1e5109173e959868ce31f5 | 12,672 | py | Python | ampligraph/utils/model_utils.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
] | null | null | null | ampligraph/utils/model_utils.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
] | null | null | null | ampligraph/utils/model_utils.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The AmpliGraph Authors. All Rights Reserved.
#
# This file is Licensed under the Apache License, Version 2.0.
# A copy of the Licence is available in LICENCE, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import os
import pickle
import importlib
from time import gmtime, strftime
import glob
import logging
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import numpy as np
import pandas as pd
"""This module contains utility functions for neural knowledge graph embedding models.
"""
DEFAULT_MODEL_NAMES = "{0}.model.pkl"
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def save_model(model, model_name_path=None):
"""Save a trained model to disk.
Examples
--------
>>> import numpy as np
>>> from ampligraph.latent_features import ComplEx
>>> from ampligraph.utils import save_model
>>> model = ComplEx(batches_count=2, seed=555, epochs=20, k=10)
>>> X = np.array([['a', 'y', 'b'],
>>> ['b', 'y', 'a'],
>>> ['a', 'y', 'c'],
>>> ['c', 'y', 'a'],
>>> ['a', 'y', 'd'],
>>> ['c', 'y', 'd'],
>>> ['b', 'y', 'c'],
>>> ['f', 'y', 'e']])
>>> model.fit(X)
>>> y_pred_before = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
>>> example_name = 'helloworld.pkl'
>>> save_model(model, model_name_path = example_name)
>>> print(y_pred_before)
[-0.29721245, 0.07865551]
Parameters
----------
model: EmbeddingModel
A trained neural knowledge graph embedding model,
the model must be an instance of TransE,
DistMult, ComplEx, or HolE.
model_name_path: string
The name of the model to be saved.
If not specified, a default name model
with current datetime is named
and saved to the working directory
"""
logger.debug('Saving model {}.'.format(model.__class__.__name__))
obj = {
'class_name': model.__class__.__name__,
'hyperparams': model.all_params,
'is_fitted': model.is_fitted,
'ent_to_idx': model.ent_to_idx,
'rel_to_idx': model.rel_to_idx,
'is_calibrated': model.is_calibrated
}
model.get_embedding_model_params(obj)
logger.debug('Saving hyperparams:{}\n\tis_fitted: \
{}'.format(model.all_params, model.is_fitted))
if model_name_path is None:
model_name_path = DEFAULT_MODEL_NAMES.format(strftime("%Y_%m_%d-%H_%M_%S", gmtime()))
with open(model_name_path, 'wb') as fw:
pickle.dump(obj, fw)
# dump model tf
def restore_model(model_name_path=None):
"""Restore a saved model from disk.
See also :meth:`save_model`.
Examples
--------
>>> from ampligraph.utils import restore_model
>>> import numpy as np
>>> example_name = 'helloworld.pkl'
>>> restored_model = restore_model(model_name_path = example_name)
>>> y_pred_after = restored_model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
>>> print(y_pred_after)
[-0.29721245, 0.07865551]
Parameters
----------
model_name_path: string
The name of saved model to be restored. If not specified,
the library will try to find the default model in the working directory.
Returns
-------
model: EmbeddingModel
the neural knowledge graph embedding model restored from disk.
"""
if model_name_path is None:
logger.warning("There is no model name specified. \
We will try to lookup \
the latest default saved model...")
default_models = glob.glob("*.model.pkl")
if len(default_models) == 0:
raise Exception("No default model found. Please specify \
model_name_path...")
else:
model_name_path = default_models[len(default_models) - 1]
logger.info("Will will load the model: {0} in your \
current dir...".format(model_name_path))
model = None
logger.info('Will load model {}.'.format(model_name_path))
try:
with open(model_name_path, 'rb') as fr:
restored_obj = pickle.load(fr)
logger.debug('Restoring model ...')
module = importlib.import_module("ampligraph.latent_features")
class_ = getattr(module, restored_obj['class_name'])
model = class_(**restored_obj['hyperparams'])
model.is_fitted = restored_obj['is_fitted']
model.ent_to_idx = restored_obj['ent_to_idx']
model.rel_to_idx = restored_obj['rel_to_idx']
try:
model.is_calibrated = restored_obj['is_calibrated']
except KeyError:
model.is_calibrated = False
model.restore_model_params(restored_obj)
except pickle.UnpicklingError as e:
msg = 'Error unpickling model {} : {}.'.format(model_name_path, e)
logger.debug(msg)
raise Exception(msg)
except (IOError, FileNotFoundError):
msg = 'No model found: {}.'.format(model_name_path)
logger.debug(msg)
raise FileNotFoundError(msg)
return model
def create_tensorboard_visualizations(model, loc, labels=None, write_metadata=True, export_tsv_embeddings=True):
"""Export embeddings to Tensorboard.
This function exports embeddings to disk in a format used by
`TensorBoard <https://www.tensorflow.org/tensorboard>`_ and
`TensorBoard Embedding Projector <https://projector.tensorflow.org>`_.
The function exports:
* A number of checkpoint and graph embedding files in the provided location that will allow
you to visualize embeddings using Tensorboard. This is generally for use with a
`local Tensorboard instance <https://www.tensorflow.org/tensorboard/r1/overview>`_.
* a tab-separated file of embeddings ``embeddings_projector.tsv``. This is generally used to
visualize embeddings by uploading to `TensorBoard Embedding Projector <https://projector.tensorflow.org>`_.
* embeddings metadata (i.e. the embeddings labels from the original knowledge graph), saved to ``metadata.tsv``.
Such file can be used in TensorBoard or uploaded to TensorBoard Embedding Projector.
The content of ``loc`` will look like: ::
tensorboard_files/
├── checkpoint
├── embeddings_projector.tsv
├── graph_embedding.ckpt.data-00000-of-00001
├── graph_embedding.ckpt.index
├── graph_embedding.ckpt.meta
├── metadata.tsv
└── projector_config.pbtxt
.. Note ::
A TensorBoard guide is available at `this address <https://www.tensorflow.org/tensorboard/r1/overview>`_.
.. Note ::
Uploading ``embeddings_projector.tsv`` and ``metadata.tsv`` to
`TensorBoard Embedding Projector <https://projector.tensorflow.org>`_ will give a result
similar to the picture below:
.. image:: ../img/embeddings_projector.png
Examples
--------
>>> import numpy as np
>>> from ampligraph.latent_features import TransE
>>> from ampligraph.utils import create_tensorboard_visualizations
>>>
>>> X = np.array([['a', 'y', 'b'],
>>> ['b', 'y', 'a'],
>>> ['a', 'y', 'c'],
>>> ['c', 'y', 'a'],
>>> ['a', 'y', 'd'],
>>> ['c', 'y', 'd'],
>>> ['b', 'y', 'c'],
>>> ['f', 'y', 'e']])
>>>
>>> model = TransE(batches_count=1, seed=555, epochs=20, k=10, loss='pairwise',
>>> loss_params={'margin':5})
>>> model.fit(X)
>>>
>>> create_tensorboard_visualizations(model, 'tensorboard_files')
Parameters
----------
model: EmbeddingModel
A trained neural knowledge graph embedding model, the model must be an instance of TransE,
DistMult, ComplEx, or HolE.
loc: string
Directory where the files are written.
labels: pd.DataFrame
Label(s) for each embedding point in the Tensorboard visualization.
Default behaviour is to use the embeddings labels included in the model.
export_tsv_embeddings: bool (Default: True
If True, will generate a tab-separated file of embeddings at the given path. This is generally used to
visualize embeddings by uploading to `TensorBoard Embedding Projector <https://projector.tensorflow.org>`_.
write_metadata: bool (Default: True)
If True will write a file named 'metadata.tsv' in the same directory as path.
"""
# Create loc if it doesn't exist
if not os.path.exists(loc):
logger.debug('Creating Tensorboard visualization directory: %s' % loc)
os.mkdir(loc)
if not model.is_fitted:
raise ValueError('Cannot write embeddings if model is not fitted.')
# If no label data supplied, use model ent_to_idx keys as labels
if labels is None:
logger.info('Using model entity dictionary to create Tensorboard metadata.tsv')
labels = list(model.ent_to_idx.keys())
else:
if len(labels) != len(model.ent_to_idx):
raise ValueError('Label data rows must equal number of embeddings.')
if write_metadata:
logger.debug('Writing metadata.tsv to: %s' % loc)
write_metadata_tsv(loc, labels)
if export_tsv_embeddings:
tsv_filename = "embeddings_projector.tsv"
logger.info('Writing embeddings tsv to: %s' % os.path.join(loc, tsv_filename))
np.savetxt(os.path.join(loc, tsv_filename), model.trained_model_params[0], delimiter='\t')
checkpoint_path = os.path.join(loc, 'graph_embedding.ckpt')
# Create embeddings Variable
embedding_var = tf.Variable(model.trained_model_params[0], name='graph_embedding')
with tf.Session() as sess:
saver = tf.train.Saver([embedding_var])
sess.run(embedding_var.initializer)
saver.save(sess, checkpoint_path)
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = 'metadata.tsv'
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(loc), config)
def write_metadata_tsv(loc, data):
"""Write Tensorboard metadata.tsv file.
Parameters
----------
loc: string
Directory where the file is written.
data: list of strings, or pd.DataFrame
Label(s) for each embedding point in the Tensorboard visualization.
If data is a list of strings then no header will be written. If it is a pandas DataFrame with multiple
columns then headers will be written.
"""
# Write metadata.tsv
metadata_path = os.path.join(loc, 'metadata.tsv')
if isinstance(data, list):
with open(metadata_path, 'w+', encoding='utf8') as metadata_file:
for row in data:
metadata_file.write('%s\n' % row)
elif isinstance(data, pd.DataFrame):
data.to_csv(metadata_path, sep='\t', index=False)
def dataframe_to_triples(X, schema):
"""Convert DataFrame into triple format.
Parameters
----------
X: pandas DataFrame with headers
schema: List of (subject, relation_name, object) tuples
where subject and object are in the headers of the data frame
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from ampligraph.utils.model_utils import dataframe_to_triples
>>>
>>> X = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')
>>>
>>> schema = [['species', 'has_sepal_length', 'sepal_length']]
>>>
>>> dataframe_to_triples(X, schema)[0]
array(['setosa', 'has_sepal_length', '5.1'], dtype='<U16')
"""
triples = []
request_headers = set(np.delete(np.array(schema), 1, 1).flatten())
diff = request_headers.difference(set(X.columns))
if len(diff) > 0:
raise Exception("Subject/Object {} are not in data frame headers".format(diff))
for s, p, o in schema:
triples.extend([[si, p, oi] for si, oi in zip(X[s], X[o])])
return np.array(triples)
| 36.518732 | 116 | 0.618134 |
176348a04e7d475cf52d60eeb73d5f0a6fb85890 | 165,623 | py | Python | catkin_ws/src/joystick_drivers/wiimote/src/wiimote/stats.py | RMI-NITT/PEPPER-SLAM_Navigation_Bot | 4a00786cdac1ae25328daf2a896699f303f2afef | [
"MIT"
] | 1 | 2021-11-22T21:30:45.000Z | 2021-11-22T21:30:45.000Z | catkin_ws/src/joystick_drivers/wiimote/src/wiimote/stats.py | svdeepak99/PEPPER-SLAM_Navigation_Bot | 49bfcc08566789a82168800199c8a0835eab7b71 | [
"MIT"
] | null | null | null | catkin_ws/src/joystick_drivers/wiimote/src/wiimote/stats.py | svdeepak99/PEPPER-SLAM_Navigation_Bot | 49bfcc08566789a82168800199c8a0835eab7b71 | [
"MIT"
] | 3 | 2021-04-08T17:13:22.000Z | 2021-12-12T15:55:54.000Z | # Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Dec 18, 2007 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
from __future__ import print_function
## CHANGE LOG:
## ===========
## 07-11.26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in self._dispatch.keys():
raise ValueError("can't have two dispatches on "+str(t))
self._dispatch[t] = func
self._types = self._dispatch.keys()
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError("don't know how to dispatch %s arguments" % type(arg1))
return self._dispatch[type(arg1)](*(arg1,) + args, **kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins,[min(inlist),max(inlist)]) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits != None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Problem in obrientransform.')
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lcov (x,y, keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: lcov(x,y,keepdims=0)
"""
n = len(x)
xmn = mean(x)
ymn = mean(y)
xdeviations = [0]*len(x)
ydeviations = [0]*len(y)
for i in range(len(x)):
xdeviations[i] = x[i] - xmn
ydeviations[i] = y[i] - ymn
ss = 0.0
for i in range(len(xdeviations)):
ss = ss + xdeviations[i]*ydeviations[i]
return ss/float(n-1)
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = raw_input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def llincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.abut(categories,range(2))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t,prob
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t,prob
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)!=len(b):
raise ValueError('Unequal length lists in ttest_rel.')
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob
def lchisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxont. Aborting.')
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff != 0:
d.append(diff)
count = len(d)
absd = map(abs,d)
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = map(len,args)
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError('Bad x in lbetai')
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = map(N.array,lists)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = pstat.unique(pstat.colex(data,col))[0]
rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows,0))
allsubjs = pstat.unique(pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
mean = Dispatch ( (lmean, (ListType, TupleType)), )
median = Dispatch ( (lmedian, (ListType, TupleType)), )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
mode = Dispatch ( (lmode, (ListType, TupleType)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (ListType, TupleType)), )
variation = Dispatch ( (lvariation, (ListType, TupleType)), )
skew = Dispatch ( (lskew, (ListType, TupleType)), )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
var = Dispatch ( (lvar, (ListType, TupleType)), )
stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
sem = Dispatch ( (lsem, (ListType, TupleType)), )
z = Dispatch ( (lz, (ListType, TupleType)), )
zs = Dispatch ( (lzs, (ListType, TupleType)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)), )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
betai = Dispatch ( (lbetai, (IntType, FloatType)), )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)), )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
ss = Dispatch ( (lss, (ListType, TupleType)), )
summult = Dispatch ( (lsummult, (ListType, TupleType)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import numpy as N
import numpy.linalg as LA
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [IntType,FloatType]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [IntType,FloatType]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.float_)
while incr(idx,loopcap) != -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.dtype in [N.int_, N.short,N.ubyte]:
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [IntType,FloatType]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(counts>oldcounts,score,oldmostfreq)
oldcounts = N.where(counts>oldcounts,counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.dtype in [N.int_, N.short,N.ubyte]:
a = a.astype(N.float_)
if limits == None:
return mean(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atmean).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1). ASSUMES A FLAT ARRAY (OR ELSE PREFLATTENS).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.float_)
if limits == None or limits == [None,None]:
return avar(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atvar).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
a = N.compress(mask,a) # squish out excluded values
return avar(a)
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
limits = [min(a)-1, max(a)+1]
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atsem).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in askew: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in akurtosis: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(y==0,1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print("akurtosistest only valid for n>=20 ... continuing anyway, n=",n)
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
@@@sorting OK?
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits != None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1e-6
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.float_)
v = N.zeros(k,N.float_)
m = N.zeros(k,N.float_)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.float_))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(sd==0,0,m/sd)
def acov (x,y, dimension=None,keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: acov(x,y,dimension=None,keepdims=0)
"""
if dimension == None:
x = N.ravel(x)
y = N.ravel(y)
dimension = 0
xmn = amean(x,dimension,1) # keepdims
xdeviations = x - xmn
ymn = amean(y,dimension,1) # keepdims
ydeviations = y - ymn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*x.shape[d]
else:
n = x.shape[dimension]
covar = N.sum(xdeviations*ydeviations)/float(n-1)
return covar
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
## deleted around() as it's in numpy now
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin != None:
mask = mask + N.where(a<threshmin,1,0)
if threshmax != None:
mask = mask + N.where(a>threshmax,1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) != 2:
raise TypeError("acovariance requires 2D matrices")
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = raw_input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def dices(x,y):
"""
Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in x +
number of terms in y). Returns a value between 0 (orthogonal) and 1.
Usage: dices(x,y)
"""
import sets
x = sets.Set(x)
y = sets.Set(y)
common = len(x.intersection(y))
total = float(len(x) + len(y))
return 2*common/total
def icc(x,y=None,verbose=0):
"""
Calculates intraclass correlation coefficients using simple, Type I sums of squares.
If only one variable is passed, assumed it's an Nx2 matrix
Usage: icc(x,y=None,verbose=0)
Returns: icc rho, prob ####PROB IS A GUESS BASED ON PEARSON
"""
TINY = 1.0e-20
if y:
all = N.concatenate([x,y],0)
else:
all = x+0
x = all[:,0]
y = all[:,1]
totalss = ass(all-mean(all))
pairmeans = (x+y)/2.
withinss = ass(x-pairmeans) + ass(y-pairmeans)
withindf = float(len(x))
betwdf = float(len(x)-1)
withinms = withinss / withindf
betweenms = (totalss-withinss) / betwdf
rho = (betweenms-withinms)/(withinms+betweenms)
t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))
prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)
return rho, prob
def alincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
x = N.ravel(x)
y = N.ravel(y)
covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x,y)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.aabut(categories,N.arange(2))
recoded = pstat.arecode(data,codemap,0)
x = pstat.alinexand(data,0,categories[0])
y = pstat.alinexand(data,0,categories[1])
xmean = amean(pstat.acolex(x,1))
ymean = amean(pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest, n
def amasslinregress(*args):
"""
Calculates a regression line on one 1D array (x) and one N-D array (y).
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = N.ravel(args[0])
y = args[1]
else:
x = N.ravel(args[:,0])
y = args[:,1]
else:
x = args[0]
y = args[1]
x = x.astype(N.float_)
y = y.astype(N.float_)
n = len(x)
xmean = amean(x)
ymean = amean(y,0)
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
print(x.shape, y.shape)
r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
zerodivproblem = N.equal(r_den,0)
r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place
r = r_num / r_den # need to do this nicely for matrix division
r = N.where(zerodivproblem,0.0,r)
z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
ss = float(n)*ass(x)-asquare_of_sums(x)
s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place
slope = r_num / s_den
intercept = ymean - slope*xmean
sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)
return slope, intercept, r, prob, sterrest, n
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ndarray:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
if type(t) == N.ndarray:
t = t[0]
if type(probs) == N.ndarray:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def ap2t(pval,df):
"""
Tries to compute a t-value from a p-value (or pval array) and associated df.
SLOW for large numbers of elements(!) as it re-computes p-values 20 times
(smaller step-sizes) at which point it decides it's done. Keeps the signs
of the input array. Returns 1000 (or -1000) if t>100.
Usage: ap2t(pval,df)
Returns: an array of t-values with the shape of pval
"""
pval = N.array(pval)
signs = N.sign(pval)
pval = abs(pval)
t = N.ones(pval.shape,N.float_)*50
step = N.ones(pval.shape,N.float_)*25
print("Initial ap2t() prob calc")
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
print('ap2t() iter: ', end=' ')
for i in range(10):
print(i,' ', end=' ')
t = N.where(pval<prob,t+step,t-step)
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
step = step/2
print()
# since this is an ugly hack, we get ugly boundaries
t = N.where(t>99.9,1000,t) # hit upper-boundary
t = t+signs
return t #, prob, pval
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)!=len(b):
raise ValueError('Unequal length arrays.')
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
@@@NOT RIGHT??
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
f_exp = f_exp.astype(N.float_)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, achisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.float_)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
# try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
# except:
# prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - azprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - azprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Unequal N in awilcoxont. Aborting.')
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = map(len,args)
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in akruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
data = pstat.aabut(*args)
data = data.astype(N.float_)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, achisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ndarray:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.float_)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
if even:
e = N.zeros(probs.shape,N.float_)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.float_)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)!=totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.float_)
e = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)
while asum(mask)!=totalelements:
e = e * (a/z.astype(N.float_))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.float_) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ndarray:
frozen = -1 *N.ones(alam.shape,N.float64)
alam = alam.astype(N.float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.float64)
arrayflag = 1
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.float_)
sum = N.zeros(alam.shape,N.float_)
termbf = N.zeros(alam.shape,N.float_)
a2 = N.array(-2.0*alam*alam,N.float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ndarray:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ndarray:
frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge != 0 and verbose:
print('a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements')
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ndarray:
if asum(N.less(x,0)+N.greater(x,1)) != 0:
raise ValueError('Bad x in abetai')
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ndarray:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) != len(data):
print("data and para must be same length in aglm")
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inv(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = map(N.array,args)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,args)
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [IntType, FloatType]:
ER = N.array([[ER]])
if type(EF) in [IntType, FloatType]:
EF = N.array([[EF]])
n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
d_en = LA.det(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
a = a.astype(N.float_)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [IntType,FloatType]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [ListType, TupleType, N.ndarray]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ndarray:
return s.astype(N.float_)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.float_)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
(ageometricmean, (N.ndarray,)) )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
(aharmonicmean, (N.ndarray,)) )
mean = Dispatch ( (lmean, (ListType, TupleType)),
(amean, (N.ndarray,)) )
median = Dispatch ( (lmedian, (ListType, TupleType)),
(amedian, (N.ndarray,)) )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
(amedianscore, (N.ndarray,)) )
mode = Dispatch ( (lmode, (ListType, TupleType)),
(amode, (N.ndarray,)) )
tmean = Dispatch ( (atmean, (N.ndarray,)) )
tvar = Dispatch ( (atvar, (N.ndarray,)) )
tstdev = Dispatch ( (atstdev, (N.ndarray,)) )
tsem = Dispatch ( (atsem, (N.ndarray,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (ListType, TupleType)),
(amoment, (N.ndarray,)) )
variation = Dispatch ( (lvariation, (ListType, TupleType)),
(avariation, (N.ndarray,)) )
skew = Dispatch ( (lskew, (ListType, TupleType)),
(askew, (N.ndarray,)) )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
(akurtosis, (N.ndarray,)) )
describe = Dispatch ( (ldescribe, (ListType, TupleType)),
(adescribe, (N.ndarray,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
(askewtest, (N.ndarray,)) )
kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
(akurtosistest, (N.ndarray,)) )
normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
(anormaltest, (N.ndarray,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
(aitemfreq, (N.ndarray,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
(ascoreatpercentile, (N.ndarray,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
(apercentileofscore, (N.ndarray,)) )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
(ahistogram, (N.ndarray,)) )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
(acumfreq, (N.ndarray,)) )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
(arelfreq, (N.ndarray,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
(aobrientransform, (N.ndarray,)) )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
(asamplevar, (N.ndarray,)) )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
(asamplestdev, (N.ndarray,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ndarray,)),)
var = Dispatch ( (lvar, (ListType, TupleType)),
(avar, (N.ndarray,)) )
stdev = Dispatch ( (lstdev, (ListType, TupleType)),
(astdev, (N.ndarray,)) )
sterr = Dispatch ( (lsterr, (ListType, TupleType)),
(asterr, (N.ndarray,)) )
sem = Dispatch ( (lsem, (ListType, TupleType)),
(asem, (N.ndarray,)) )
z = Dispatch ( (lz, (ListType, TupleType)),
(az, (N.ndarray,)) )
zs = Dispatch ( (lzs, (ListType, TupleType)),
(azs, (N.ndarray,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ndarray,)),)
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
(atrimboth, (N.ndarray,)) )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
(atrim1, (N.ndarray,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)),
(apaired, (N.ndarray,)) )
lincc = Dispatch ( (llincc, (ListType, TupleType)),
(alincc, (N.ndarray,)) )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
(apearsonr, (N.ndarray,)) )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
(aspearmanr, (N.ndarray,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
(apointbiserialr, (N.ndarray,)) )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
(akendalltau, (N.ndarray,)) )
linregress = Dispatch ( (llinregress, (ListType, TupleType)),
(alinregress, (N.ndarray,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
(attest_1samp, (N.ndarray,)) )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
(attest_ind, (N.ndarray,)) )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
(attest_rel, (N.ndarray,)) )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
(achisquare, (N.ndarray,)) )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
(aks_2samp, (N.ndarray,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
(amannwhitneyu, (N.ndarray,)) )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
(atiecorrect, (N.ndarray,)) )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
(aranksums, (N.ndarray,)) )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
(awilcoxont, (N.ndarray,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
(akruskalwallish, (N.ndarray,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
(afriedmanchisquare, (N.ndarray,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
(achisqprob, (N.ndarray,)) )
zprob = Dispatch ( (lzprob, (IntType, FloatType)),
(azprob, (N.ndarray,)) )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
(aksprob, (N.ndarray,)) )
fprob = Dispatch ( (lfprob, (IntType, FloatType)),
(afprob, (N.ndarray,)) )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
(abetacf, (N.ndarray,)) )
betai = Dispatch ( (lbetai, (IntType, FloatType)),
(abetai, (N.ndarray,)) )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
(aerfcc, (N.ndarray,)) )
gammln = Dispatch ( (lgammln, (IntType, FloatType)),
(agammln, (N.ndarray,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
(aF_oneway, (N.ndarray,)) )
F_value = Dispatch ( (lF_value, (ListType, TupleType)),
(aF_value, (N.ndarray,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType, N.ndarray)), )
sum = Dispatch ( (lsum, (ListType, TupleType)),
(asum, (N.ndarray,)) )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
(acumsum, (N.ndarray,)) )
ss = Dispatch ( (lss, (ListType, TupleType)),
(ass, (N.ndarray,)) )
summult = Dispatch ( (lsummult, (ListType, TupleType)),
(asummult, (N.ndarray,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
(asquare_of_sums, (N.ndarray,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
(asumdiffsquared, (N.ndarray,)) )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
(ashellsort, (N.ndarray,)) )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
(arankdata, (N.ndarray,)) )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
(afindwithin, (N.ndarray,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
| 36.609859 | 178 | 0.568128 |
3238feed775dcc6acf445b5be51e111c56d291ed | 20,104 | py | Python | model.py | xiaoleiy/otaku-yy | 0660ab003f6ea86944d22be2a2ce73b146cd7dfa | [
"MIT"
] | 1 | 2019-01-09T19:19:17.000Z | 2019-01-09T19:19:17.000Z | model.py | xiaoleiy/otaku-yy | 0660ab003f6ea86944d22be2a2ce73b146cd7dfa | [
"MIT"
] | null | null | null | model.py | xiaoleiy/otaku-yy | 0660ab003f6ea86944d22be2a2ce73b146cd7dfa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os,logging
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import Model as DBModel
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from datetime import datetime
import urllib, hashlib,urlparse
logging.info('module base reloaded')
rootpath=os.path.dirname(__file__)
def vcache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
if not g_blog.enable_memcache:
return method(*args, **kwargs)
result=method(*args, **kwargs)
memcache.set(key,result,time)
return result
return _wrapper
return _decorate
class Theme:
def __init__(self, name='default'):
self.name = name
self.mapping_cache = {}
self.dir = '/themes/%s' % name
self.viewdir=os.path.join(rootpath, 'view')
self.server_dir = os.path.join(rootpath, 'themes', self.name)
logging.debug('server_dir:%s'%self.server_dir)
def __getattr__(self, name):
if self.mapping_cache.has_key(name):
return self.mapping_cache[name]
else:
path = os.path.join(self.server_dir, 'templates', name + '.html')
logging.debug('path:%s'%path)
if not os.path.exists(path):
path = os.path.join(rootpath, 'themes', 'default', 'templates', name + '.html')
if os.path.exists(path):
self.mapping_cache[name] = path
return path
else:
self.mapping_cache[name] = path
return path
return None
class ThemeIterator:
def __init__(self, theme_path='themes'):
self.iterating = False
self.theme_path = theme_path
self.list = []
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.theme_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
return value
#return (str(value), unicode(value))
class BaseModel(db.Model):
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
self.__isdirty = False
DBModel.__init__(self, parent=None, key_name=None, _app=None, **kwds)
def __setattr__(self,attrname,value):
"""
DataStore api stores all prop values say "email" is stored in "_email" so
we intercept the set attribute, see if it has changed, then check for an
onchanged method for that property to call
"""
if (attrname.find('_') != 0):
if hasattr(self,'_' + attrname):
curval = getattr(self,'_' + attrname)
if curval != value:
self.__isdirty = True
if hasattr(self,attrname + '_onchange'):
getattr(self,attrname + '_onchange')(curval,value)
DBModel.__setattr__(self,attrname,value)
class Cache(db.Model):
cachekey = db.StringProperty(multiline=False)
content = db.TextProperty()
class Blog(db.Model):
owner = db.UserProperty()
author=db.StringProperty(default='admin')
rpcuser=db.StringProperty(default='admin')
rpcpassword=db.StringProperty(default='')
description = db.TextProperty()
baseurl = db.StringProperty(multiline=False,default=None)
urlpath = db.StringProperty(multiline=False)
title = db.StringProperty(multiline=False,default='Micolog')
subtitle = db.StringProperty(multiline=False,default='This is a micro blog.')
entrycount = db.IntegerProperty(default=0)
posts_per_page= db.IntegerProperty(default=10)
feedurl = db.StringProperty(multiline=False,default='/feed')
blogversion = db.StringProperty(multiline=False,default='0.30')
theme_name = db.StringProperty(multiline=False,default='default')
enable_memcache = db.BooleanProperty(default = False)
link_format=db.StringProperty(multiline=False,default='%(year)s/%(month)s/%(day)s/%(postname)s.html')
comment_notify_mail=db.BooleanProperty(default=True)
#评论顺序
comments_order=db.IntegerProperty(default=0)
#每页评论数
comments_per_page=db.IntegerProperty(default=20)
blognotice=db.StringProperty(default='',multiline=True)
domain=db.StringProperty()
show_excerpt=db.BooleanProperty(default=True)
version=0.32
timedelta=db.FloatProperty(default=8.0)# hours
language=db.StringProperty(default="en-us")
sitemap_entries=db.IntegerProperty(default=30)
sitemap_include_category=db.BooleanProperty(default=False)
sitemap_include_tag=db.BooleanProperty(default=False)
sitemap_ping=db.BooleanProperty(default=False)
default_link_format=db.StringProperty(multiline=False,default='?p=%(post_id)s')
theme=None
#postcount=db.IntegerProperty(default=0)
#pagecount=db.IntegerProperty(default=0)
def save(self):
self.put()
def initialsetup(self):
self.title = 'Your Blog Title'
self.subtitle = 'Your Blog Subtitle'
def get_theme(self):
self.theme= Theme(self.theme_name);
return self.theme
@vcache("blog.hotposts")
def hotposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-readtimes').fetch(8)
@vcache("blog.recentposts")
def recentposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').fetch(8)
@vcache("blog.postscount")
def postscount(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').count()
class Category(db.Model):
name=db.StringProperty(multiline=False)
slug=db.StringProperty(multiline=False)
@property
def posts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).filter('categorie_keys =',self)
@property
def count(self):
return self.posts.count()
class Archive(db.Model):
monthyear = db.StringProperty(multiline=False)
year = db.StringProperty(multiline=False)
month = db.StringProperty(multiline=False)
entrycount = db.IntegerProperty(default=0)
date = db.DateTimeProperty(auto_now_add=True)
class Tag(db.Model):
tag = db.StringProperty(multiline=False)
tagcount = db.IntegerProperty(default=0)
@property
def posts(self):
return Entry.all('entrytype =','post').filter("published =", True).filter('tags =',self)
@classmethod
def add(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if not tag:
tag=Tag(key_name=value)
tag.tag=value
tag.tagcount+=1
tag.put()
return tag
else:
return None
@classmethod
def remove(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if tag:
if tag.tagcount>1:
tag.tagcount-=1
else:
tag.delete()
class Link(db.Model):
href = db.StringProperty(multiline=False,default='')
linktype = db.StringProperty(multiline=False,default='blogroll')
linktext = db.StringProperty(multiline=False,default='')
linkcomment = db.StringProperty(multiline=False,default='')
createdate=db.DateTimeProperty(auto_now=True)
@property
def get_icon_url(self):
"get ico url of the wetsite"
ico_path = '/favicon.ico'
ix = self.href.find('/',len('http://') )
return (ix>0 and self.href[:ix] or self.href ) + ico_path
class Entry(BaseModel):
author = db.UserProperty()
published = db.BooleanProperty(default=False)
content = db.TextProperty(default='')
readtimes = db.IntegerProperty(default=0)
title = db.StringProperty(multiline=False,default='')
date = db.DateTimeProperty(auto_now_add=True)
tags = db.StringListProperty()#old version used
categorie_keys=db.ListProperty(db.Key)
slug = db.StringProperty(multiline=False,default='')
link= db.StringProperty(multiline=False,default='')
monthyear = db.StringProperty(multiline=False)
entrytype = db.StringProperty(multiline=False,default='post',choices=[
'post','page'])
entry_parent=db.IntegerProperty(default=0)#When level=0 show on main menu.
menu_order=db.IntegerProperty(default=0)
commentcount = db.IntegerProperty(default=0)
allow_comment = db.BooleanProperty(default=True) #allow comment
#compatible with wordpress
is_wp=db.BooleanProperty(default=False)
post_id= db.IntegerProperty()
excerpt=db.StringProperty(multiline=True)
postname=''
_relatepost=None
@property
def content_excerpt(self):
return self.get_content_excerpt(_('..more').decode('utf8'))
def get_content_excerpt(self,more='..more'):
if g_blog.show_excerpt:
if self.excerpt:
return self.excerpt+' <a href="/%s">%s</a>'%(self.link,more)
else:
sc=self.content.split('<!--more-->')
if len(sc)>1:
return sc[0]+u' <a href="/%s">%s</a>'%(self.link,more)
else:
return sc[0]
else:
return self.content
def slug_onchange(self,curval,newval):
if not (curval==newval):
self.setpostname(newval)
def setpostname(self,newval):
#check and fix double slug
if newval:
slugcount=Entry.all()\
.filter('entrytype',self.entrytype)\
.filter('date <',self.date)\
.filter('slug =',newval)\
.filter('published',True)\
.count()
if slugcount>0:
self.postname=newval+str(slugcount)
else:
self.postname=newval
else:
self.postname=""
def fullurl(self):
return g_blog.baseurl+'/'+self.link;
@property
def categories(self):
try:
return db.get(self.categorie_keys)
except:
return []
def settags(self,values):
if not values:return
if type(values)==type([]):
tags=values
else:
tags=values.split(',')
logging.info('tags: ok')
if not self.tags:
removelist=[]
addlist=tags
else:
#search different tags
removelist=[n for n in self.tags if n not in tags]
addlist=[n for n in tags if n not in self.tags]
for v in removelist:
Tag.remove(v)
for v in addlist:
Tag.add(v)
self.tags=tags
def get_comments_by_page(self,index,psize):
return self.comments().fetch(psize,offset = (index-1) * psize)
## def get_categories(self):
## return ','.join([cate for cate in self.categories])
##
## def set_categories(self, cates):
## if cates:
## catestemp = [db.Category(cate.strip()) for cate in cates.split(',')]
## self.catesnew = [cate for cate in catestemp if not cate in self.categories]
## self.categorie = tagstemp
## scates = property(get_categories,set_categories)
@property
def strtags(self):
return ','.join(self.tags)
@property
def edit_url(self):
return '/admin/%s?key=%s&action=edit'%(self.entrytype,self.key())
def comments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).order('-date')
else:
return Comment.all().filter('entry =',self).order('date')
def delete_comments(self):
cmts = Comment.all().filter('entry =',self)
for comment in cmts:
comment.delete()
self.commentcount = 0
def update_archive(self):
"""Checks to see if there is a month-year entry for the
month of current blog, if not creates it and increments count"""
my = self.date.strftime('%B %Y') # September-2008
sy = self.date.strftime('%Y') #2008
sm = self.date.strftime('%m') #09
archive = Archive.all().filter('monthyear',my).fetch(10)
if self.entrytype == 'post':
if archive == []:
archive = Archive(monthyear=my,year=sy,month=sm,entrycount=1)
self.monthyear = my
archive.put()
else:
# ratchet up the count
archive[0].entrycount += 1
archive[0].put()
def save(self):
"""
Use this instead of self.put(), as we do some other work here
"""
my = self.date.strftime('%B %Y') # September 2008
self.monthyear = my
return self.put()
def publish(self,newval=True):
if newval:
if not self.is_saved():
self.save()
if not self.is_wp:
self.post_id=self.key().id()
#fix for old version
if not self.postname:
self.setpostname(self.slug)
vals={'year':self.date.year,'month':str(self.date.month).zfill(2),'day':self.date.day,
'postname':self.postname,'post_id':self.post_id}
if self.entrytype=='page':
if self.slug:
self.link=self.postname
else:
self.link=g_blog.default_link_format%vals
else:
if g_blog.link_format and self.postname:
self.link=g_blog.link_format.strip()%vals
else:
self.link=g_blog.default_link_format%vals
if not self.published:
g_blog.entrycount+=1
self.published=True
g_blog.save()
self.save()
else:
self.published=false
if self.published:
g_blog.entrycount-=1
g_blog.save()
self.save()
self.removecache()
if g_blog.sitemap_ping:
Sitemap_NotifySearch()
def removecache(self):
memcache.delete('/')
memcache.delete('/'+self.link)
memcache.delete('/sitemap')
memcache.delete('blog.postcount')
@property
def next(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('post_id').filter('post_id >',self.post_id).fetch(1)
@property
def prev(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-post_id').filter('post_id <',self.post_id).fetch(1)
@property
def relateposts(self):
if self._relatepost:
return self._relatepost
else:
if self.tags:
self._relatepost= Entry.gql("WHERE tags IN :1 and post_id!=:2 order by post_id desc ",self.tags,self.post_id).fetch(5)
else:
self._relatepost= []
return self._relatepost
class User(db.Model):
user = db.UserProperty(required = False)
dispname = db.StringProperty()
email=db.StringProperty()
website = db.LinkProperty()
isadmin=db.BooleanProperty(default=False)
isAuthor=db.BooleanProperty(default=True)
rpcpwd=db.StringProperty()
def __unicode__(self):
#if self.dispname:
return self.dispname
#else:
# return self.user.nickname()
def __str__(self):
return self.__unicode__().encode('utf-8')
class Comment(db.Model):
entry = db.ReferenceProperty(Entry)
date = db.DateTimeProperty(auto_now_add=True)
content = db.TextProperty(required=True)
author=db.StringProperty()
email=db.EmailProperty()
weburl=db.URLProperty()
status=db.IntegerProperty(default=0)
@property
def shortcontent(self,len=20):
scontent=self.content[:len].replace('<','<').replace('>','>')
return scontent
def gravatar_url(self):
# Set your variables here
default = g_blog.baseurl+'/static/images/homsar.jpeg'
if not self.email:
return default
size = 50
try:
# construct the url
imgurl = "http://www.gravatar.com/avatar/"
imgurl +=hashlib.md5(self.email).hexdigest()+"?"+ urllib.urlencode({
'd':default, 's':str(size),'r':'G'})
return imgurl
except:
return default
def save(self):
self.put()
self.entry.commentcount+=1
self.entry.put()
memcache.delete("/"+self.entry.link)
sbody=_('''New comment on your post "%s"
Author : %s
E-mail : %s
URL : %s
Comment:
%s
You can see all comments on this post here:
%s
''')
sbody=sbody.decode('utf-8')
logging.info(type( sbody))
logging.info(sbody)
if g_blog.comment_notify_mail and g_blog.owner and not users.is_current_user_admin() :
sbody=sbody%(self.entry.title,self.author,self.email,self.weburl,self.content,
g_blog.baseurl+"/"+self.entry.link+"#comment-"+str(self.key().id()))
mail.send_mail_to_admins(g_blog.owner.email(),'Comments:'+self.entry.title, sbody,reply_to=self.email)
logging.info('send %s . entry: %s'%(g_blog.owner.email(),self.entry.title))
def delit(self):
self.entry.commentcount-=1
self.entry.put()
self.delete()
class Media(db.Model):
name =db.StringProperty()
mtype=db.StringProperty()
bits=db.BlobProperty()
date=db.DateTimeProperty(auto_now_add=True)
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
def Sitemap_NotifySearch():
""" Send notification of the new Sitemap(s) to the search engines. """
url=g_blog.baseurl+"/sitemap"
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
logging.info('Notifying search engines. %s'%ping[1])
logging.info('url: %s'%notify)
try:
urlfetch.fetch(notify)
except :
logging.error('Cannot contact: %s' % ping[1])
g_blog=None
def InitBlogData():
import settings
global g_blog
g_blog = Blog(key_name = 'default')
g_blog.domain=os.environ['HTTP_HOST']
g_blog.baseurl="http://"+g_blog.domain
g_blog.feedurl=g_blog.baseurl+"/feed"
g_blog.language=settings.LANGUAGE_CODE
g_blog.save()
entry=Entry(title=_("Hello world!").decode('utf8'))
entry.content=_('<p>Welcome to micolog. This is your first post. Edit or delete it, then start blogging!</p>').decode('utf8')
entry.publish()
entry.update_archive()
return g_blog
def gblog_init():
logging.info('module setting reloaded')
global g_blog
g_blog = Blog.get_by_key_name('default')
if not g_blog:
g_blog=InitBlogData()
g_blog.get_theme()
g_blog.rootdir=os.path.dirname(__file__)
logging.info(g_blog.rootdir)
gblog_init()
if __name__=='__main__':
lk = Link()
lk.href = 'http://www.kgblog.net'
print lk.get_icon_url()
| 31.56044 | 144 | 0.593514 |
7bd8f7e6d4605c484a52fc49f4618e8a3cf5a9d7 | 7,692 | py | Python | use_python_connect_mySql_Query.py | escha2019/SQL_Related | b1feea99681fe176695e244c68b889fc0bf1dece | [
"BSD-3-Clause"
] | null | null | null | use_python_connect_mySql_Query.py | escha2019/SQL_Related | b1feea99681fe176695e244c68b889fc0bf1dece | [
"BSD-3-Clause"
] | null | null | null | use_python_connect_mySql_Query.py | escha2019/SQL_Related | b1feea99681fe176695e244c68b889fc0bf1dece | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import mysql.connector
import pandas as pd
# create connection
cnx = mysql.connector.connect(user='user_name', password='password_here',
host='ip_here',
database='db_name')
conn = cnx.cursor()
# In[2]:
# See all 13 tables
info = """SELECT table_name FROM information_schema.tables;
"""
conn.execute(info, conn)
conn.fetchall()[-13:]
# In[4]:
# 1. when was Grace Mayo born?
one = """SELECT birth_date
FROM person
WHERE LOWER(first_name) = '{0}'
AND LOWER(last_name) = '{1}';
""".format('grace', 'mayo')
conn.execute(one, conn)
conn.fetchall()[0][0] # January 1, 1978
# In[5]:
# 2. What was Grace mayo position_id in June 2018
two = """SELECT position_id.position_id
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position_id ON position_id.position_id_pk=position_person.position_id_pk
WHERE LOWER(person.first_name) = '{0}'
AND LOWER(person.last_name) = '{1}'
# keep position_ids that started atleast in june 2018
AND position_id.begin_date < '{2}'
# remove position_ids that ended b4 june 2018,
# excluding end_dates that are null or current position_ids
AND (position_id.end_date >= '{3}' OR position_id.end_date IS NULL);
""".format('grace', 'mayo', '2018-07-01', '2018-06-01')
conn.execute(two, conn)
columns = conn.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in conn.fetchall()]
pd.DataFrame(result) # output as a dataframe
# In[6]:
# 3. What's the title of Grace mayo job's title
thr = """SELECT CONCAT(job.description, ' (', job.title , ')')
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position ON position.position_id_pk=position_person.position_id_pk
INNER JOIN job ON job.job_id=position.job_id
WHERE LOWER(person.first_name) = '{0}'
AND LOWER(person.last_name) = '{1}';
""".format('grace', 'mayo')
conn.execute(thr, conn)
conn.fetchall()[0][0]
# In[7]:
# 4. In what county does Grace Mayo work?
fou = """SELECT county.county
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position_community ON position_community.position_id_pk=position_person.position_id_pk
INNER JOIN community ON community.community_id=position_community.community_id
INNER JOIN district ON district.district_id=community.district_id
INNER JOIN health_district ON health_district.health_district_id=district.health_district_id
INNER JOIN county ON county.county_id = health_district.county_id
WHERE LOWER(person.first_name) = '{0}'
AND LOWER(person.last_name) = '{1}';
""".format('grace', 'mayo')
conn.execute(fou, conn)
columns = conn.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in conn.fetchall()]
pd.DataFrame(result) # output as a dataframe
# In[30]:
# 5. Which community does Grace Mayo serve in?
fiv = """SELECT community.community
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position_community ON position_community.position_id_pk=position_person.position_id_pk
INNER JOIN community ON community.community_id=position_community.community_id
WHERE LOWER(person.first_name) = '{0}'
AND LOWER(person.last_name) = '{1}';
""".format('grace', 'mayo')
conn.execute(fiv, conn)
columns = conn.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in conn.fetchall()]
pd.DataFrame(result) # output as a dataframe
# In[8]:
# 6. Who was Grace Mayo Supervisor in june 2018 and what was her supervisor's position_id?
sql = """SELECT per.first_name,
per.last_name,
p.position_id
FROM (SELECT position_supervisor.position_supervisor_id_pk
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position_id ON position_id.position_id_pk=position_person.position_id_pk
INNER JOIN position_supervisor ON position_supervisor.position_id_pk=position_person.position_id_pk
WHERE LOWER(person.first_name) = '{0}'
AND LOWER(person.last_name) = '{1}'
AND position_id.position_id = '{2}' # taken from Q2
# keep position_ids that started atleast in june 2018
AND position_supervisor.begin_date < '{3}'
# remove position_ids that ended b4 june 2018,
# excluding end_dates that are null or current position_ids
AND (position_supervisor.end_date >= '{4}' OR position_supervisor.end_date IS NULL)
) AS sup_position_id INNER JOIN position_id AS p ON p.position_id_pk=sup_position_id.position_supervisor_id_pk
INNER JOIN position_person AS pp ON p.position_id_pk=pp.position_id_pk
INNER JOIN person AS per ON per.person_id=pp.person_id
""".format('grace', 'mayo', 'DR27-06', '2018-07-01', '2018-06-01' )
conn.execute(sql)
columns = conn.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in conn.fetchall()]
pd.DataFrame(result) # output as a dataframe
# In[9]:
# 7. How many positions did Grace Mayo's supervisor supervised in June 2018?
sql = """SELECT COUNT(DISTINCT p.position_id) AS count_of_supervised
FROM position_supervisor AS supervisee INNER JOIN position_id AS p ON \
p.position_id_pk=supervisee.position_id_pk
WHERE supervisee.position_supervisor_id_pk =
(SELECT position_supervisor.position_supervisor_id_pk
FROM person INNER JOIN position_person ON person.person_id=position_person.person_id
INNER JOIN position_id ON position_id.position_id_pk=position_person.position_id_pk
INNER JOIN position_supervisor ON position_supervisor.position_id_pk=position_person.position_id_pk
WHERE person.person_id = '{0}' # Grace Mayo id
AND position_id.position_id = '{1}' # Grace Mayo postion_id in June, from Q2
# keep position_ids that started atleast in july 2018
AND position_supervisor.begin_date < '{2}'
# remove position_ids that ended b4 june 2018,
# excluding end_dates that are null or current position_ids
AND (position_supervisor.end_date >= '{3}' OR position_supervisor.end_date IS NULL)
)
# keep position_ids that started atleast in june 2018
AND p.begin_date < '{4}'
# remove position_ids that ended b4 june 2018,
# excluding end_dates that are null or current position_ids
AND (p.end_date >= '{5}' OR p.end_date IS NULL)
""".format('2320', 'DR27-06', '2018-07-01', '2018-06-01', '2018-07-01', '2018-06-01')
conn.execute(sql)
columns = conn.description
result = [{columns[index][0]:column for index, column in enumerate(value)} for value in conn.fetchall()]
pd.DataFrame(result) # output as a dataframe
# In[33]:
# close connection to db
conn.close()
| 39.649485 | 125 | 0.658216 |
cb1cbf4f74007fb6e2ece5c2a27227271d099b28 | 2,636 | py | Python | sdk/python/pulumi_azure_native/network/v20191201/get_active_sessions.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20191201/get_active_sessions.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20191201/get_active_sessions.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetActiveSessionsResult',
'AwaitableGetActiveSessionsResult',
'get_active_sessions',
]
@pulumi.output_type
class GetActiveSessionsResult:
"""
Response for GetActiveSessions.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The URL to get the next set of results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BastionActiveSessionResponseResult']]:
"""
List of active sessions on the bastion.
"""
return pulumi.get(self, "value")
class AwaitableGetActiveSessionsResult(GetActiveSessionsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActiveSessionsResult(
next_link=self.next_link,
value=self.value)
def get_active_sessions(bastion_host_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActiveSessionsResult:
"""
Response for GetActiveSessions.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['bastionHostName'] = bastion_host_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20191201:getActiveSessions', __args__, opts=opts, typ=GetActiveSessionsResult).value
return AwaitableGetActiveSessionsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| 32.95 | 143 | 0.675645 |
fcc3d48f12a0b3b4d1abc1abfae55dd4233e5a73 | 20,960 | py | Python | relativity/relativity.py | thorwhalen/relativity | 28e80d6d76f767a01699f2711f9677b960e2b86c | [
"MIT"
] | 1 | 2022-01-19T13:14:39.000Z | 2022-01-19T13:14:39.000Z | relativity/relativity.py | thorwhalen/relativity | 28e80d6d76f767a01699f2711f9677b960e2b86c | [
"MIT"
] | null | null | null | relativity/relativity.py | thorwhalen/relativity | 28e80d6d76f767a01699f2711f9677b960e2b86c | [
"MIT"
] | null | null | null | class _Tmp(object):
__slots__ = ('inv', 'data', 'listeners')
# just a little trick to avoid __init__
# TODO: fill out the rest of dict API and inherit from dict
class M2M(object):
"""
a dict-like entity that represents a many-to-many relationship
between two groups of objects
behaves like a dict-of-tuples; also has .inv which is kept
up to date which is a dict-of-tuples in the other direction
also, can be used as a directed graph among hashable python objects
"""
__slots__ = ('inv', 'data', 'listeners')
def __init__(self, items=None):
self.listeners = []
self.inv = _Tmp()
self.inv.listeners = []
self.inv.inv = self
self.inv.__class__ = self.__class__
if items.__class__ is self.__class__:
self.data = dict(
[(k, set(v)) for k, v in items.data.items()])
self.inv.data = dict(
[(k, set(v)) for k, v in items.inv.data.items()])
return
# tolerate a little weirdness here to make M2M(other_m2m)
# pythonic copying idiom as fast as possible
self.data = {}
self.inv.data = {}
if items:
self.update(items)
def _notify_add(self, key, val):
for listener in self.listeners:
listener.notify_add(key, val)
for listener in self.inv.listeners:
listener.notify_add(val, key)
def _notify_remove(self, key, val):
for listener in self.listeners:
listener.notify_remove(key, val)
for listener in self.inv.listeners:
listener.notify_remove(val, key)
def get(self, key, default=frozenset()):
try:
return self[key]
except KeyError:
return default
def getall(self, keys):
"""
since an M2M maps a key to a set of results
rather than a single result, unlike a normal dict
an M2M can combine the results of many keys together
without changing the return type
"""
empty, sofar = set(), set()
for key in keys:
sofar |= self.data.get(key, empty)
return frozenset(sofar)
def pop(self, key):
val = frozenset(self.data[key])
del self[key]
return val
def __getitem__(self, key):
return frozenset(self.data[key])
def __setitem__(self, key, vals):
vals = set(vals)
if key in self:
to_remove = self.data[key] - vals
vals -= self.data[key]
for val in to_remove:
self.remove(key, val)
for val in vals:
self.add(key, val)
def __delitem__(self, key):
for val in self.data.pop(key):
if self.listeners:
self._notify_remove(key, val)
self.inv.data[val].remove(key)
if not self.inv.data[val]:
del self.inv.data[val]
def update(self, iterable):
"""given an iterable of (key, val), add them all"""
if type(iterable) is type(self):
other = iterable
for k in other.data:
if k not in self.data:
self.data[k] = other.data[k]
if self.listeners:
for v in other.data[k]:
self._notify_add(k, v)
else:
self.data[k].update(other.data[k])
if self.listeners:
for v in other.data[k]:
if v not in self.data[k]:
self._notify_add(k, v)
for k in other.inv.data:
if k not in self.inv.data:
self.inv.data[k] = other.inv.data[k]
else:
self.inv.data[k].update(other.inv.data[k])
elif callable(getattr(iterable, 'keys', None)):
for k in iterable.keys():
self.add(k, iterable[k])
else:
for key, val in iterable:
self.add(key, val)
def only(self, keys):
"""
return a new M2M with only the data associated
with the corresponding keys
"""
return M2M([
item for item in self.iteritems() if item[0] in keys])
def add(self, key, val):
if key not in self.data:
self.data[key] = set()
self.data[key].add(val)
if val not in self.inv.data:
self.inv.data[val] = set()
self.inv.data[val].add(key)
self._notify_add(key, val)
def remove(self, key, val):
self.data[key].remove(val)
if not self.data[key]:
del self.data[key]
self.inv.data[val].remove(key)
if not self.inv.data[val]:
del self.inv.data[val]
self._notify_remove(key, val)
def discard(self, key, val):
if key not in self.data or val not in self.inv.data:
return
self.remove(key, val)
def replace(self, key, newkey):
"""
replace instances of key by newkey
"""
if key not in self.data:
return
self.data[newkey] = fwdset = self.data.pop(key)
if self.listeners:
for val in fwdset:
self._notify_remove(key, val)
self._notify_add(newkey, val)
for val in fwdset:
revset = self.inv.data[val]
revset.remove(key)
revset.add(newkey)
def iteritems(self):
for key in self.data:
for val in self.data[key]:
yield key, val
def keys(self):
return self.data.keys()
def values(self):
return self.inv.data.keys()
def copy(self):
"""
a full copy can be done a lot faster since items don't
need to be added one-by-one to sets
"""
return self.__class__(self)
__copy__ = copy
# NOTE: __copy__ by default will be pretty useless so
# it is overridden here; __deepcopy__ is correct by default
# because it copies self.data, and all of the sets in self.data
# as well as self.inv -- so we don't bother to override the behavior
def __contains__(self, key):
return key in self.data
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __eq__(self, other):
return type(self) == type(other) and self.data == other.data
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r)' % (cn, list(self.iteritems()))
import itertools
def chain(*rels):
"""
Chain M2Ms or M2MChains together into an M2MChain
"""
m2ms = []
for obj in rels:
if type(obj) is M2M:
m2ms.append(obj)
elif type(obj) is M2MChain:
m2ms.extend(obj.m2ms)
return M2MChain(m2ms, copy=False)
class M2MChain(object):
"""
Represents a sequence of ManyToMany relationships
Basically, an M2MChain is a compressed representation of a
sequence of values across all of it's columns.
The Chain can be iterated over, yielding all of the unique
combinations of values that span each of its relationships.
The relationships in the Chain can be updated, and the Chain
will immediately reflect those updates.
Chains may share their internal state with M2MGraph, M2Ms, and
other M2MChains; in this case updates to objects sharing the
same underlying data will immediately be reflected in each
other.
"""
def __init__(self, m2ms, copy=True):
if m2ms.__class__ is self.__class__:
m2ms = m2ms.m2ms
for m2m in m2ms:
if type(m2m) is not M2M:
raise TypeError('can only chain M2Ms, not {}'.format(type(m2m)))
if copy:
self.m2ms = [M2M(d) for d in m2ms]
else:
self.m2ms = m2ms
# TODO: take multiple keysets (one per column)
def only(self, keyset):
"""
returns a chain that is filtered so that only keys in keyset are kept
"""
m2ms = [self.m2ms[0].only(keyset)]
for m2m in self.m2ms[1:]:
m2ms.append(m2m.only(m2ms[-1].values()))
return M2MChain(m2ms, copy=False)
def _roll_lhs(self, key):
# fold up keys left-to-right
if key[0] == slice(None, None, None):
lhs = self.m2ms[0]
else:
lhs = [key[0]]
lhs = set(lhs)
rkey_data = zip(key[1:], self.m2ms)
for rkey, m2m in rkey_data:
new_lhs = set()
for lkey in lhs:
new_lhs |= m2m[lkey]
if rkey != slice(None, None, None):
if rkey in new_lhs:
new_lhs = set([rkey])
lhs = new_lhs
return lhs
def __getitem__(self, key):
if type(key) is slice:
data = self.m2ms[key]
return M2MChain(data, copy=False)
if type(key) is not tuple:
raise TypeError("expected tuple, not {!r}".format(type(key)))
assert len(key) <= len(self.m2ms)
lhs = self._roll_lhs(key)
if len(key) == len(self.m2ms) + 1:
return lhs
# build a chain of the remaining columns
m2ms = []
for cur in self.m2ms[len(key) - 1:]:
new = M2M()
for val in lhs:
if val in cur:
new[val] = cur[val]
m2ms.append(new)
lhs = new.inv
return M2MChain(m2ms, copy=False)
def __contains__(self, vals):
if type(vals) is not tuple:
raise TypeError("expected tuple, not {!r}".format(type(vals)))
return bool(self._roll_lhs(vals))
def add(self, *vals):
assert len(self.m2ms) + 1 == len(vals)
val_pairs = zip(vals[:-1], vals[1:])
for m2m, val_pair in zip(self.m2ms, val_pairs):
m2m.add(*val_pair)
def update(self, vals_seq):
if len(self.m2ms) == 1 and type(vals_seq) is M2M:
self.m2ms[0].update(vals_seq)
else:
for vals in vals_seq:
self.add(*vals)
def pairs(self, start=0, end=None):
"""
return pairs between the given indices of data
"""
pairing = M2MChain(self.m2ms[start:end], copy=False)
return M2M([(row[0], row[-1]) for row in pairing])
def copy(self):
return M2MChain(self)
__copy__ = copy
def __eq__(self, other):
return type(self) is type(other) and self.m2ms == other.m2ms
def __repr__(self):
return "M2MChain({})".format(self.m2ms)
def __nonzero__(self):
try:
next(iter(self))
return True
except StopIteration:
return False
def __iter__(self):
"""
iterate over all of the possible paths through the
chain of many to many dicts
these are sequences of values, such that a value
from M2M N is the key in M2M N+1 across the whole
set of M2Ms
"""
m2ms = self.m2ms
rows = itertools.chain.from_iterable(
[_join_all(key, m2ms[0], m2ms[1:]) for key in m2ms[0]])
for row in rows:
yield tuple(row)
def _join_all(key, nxt, rest, sofar=()):
if not rest:
row = []
while sofar:
row.append(sofar[0])
sofar = sofar[1]
row.reverse()
return [row + [key, val] for val in nxt.get(key)]
return itertools.chain.from_iterable(
[_join_all(val, rest[0], rest[1:], (key, sofar)) for val in nxt.get(key)])
class M2MGraph(object):
"""
represents a graph, where each node is a set of keys,
and each edge is a M2M dict connecting two sets
of keys
this is good at representing a web of relationships
from which various sub relationships can be extracted
for inspection / modification via [] operator
the node set is specified as a M2M dict:
{a: b, b: c, b: d} specifies a graph with nodes
a, b, c, d; and edges (a-b, b-c, b-d)
"""
def __init__(self, relationships, data=None):
relationships = M2M(relationships)
m2ms = {}
cols = M2M()
rels = []
for lhs, rhs in relationships.iteritems():
# check that only one direction is present
assert lhs not in relationships.get(rhs)
if data:
if (lhs, rhs) in data:
m2ms[lhs, rhs] = data[lhs, rhs]
m2ms[rhs, lhs] = data[lhs, rhs].inv
rels.append((lhs, rhs))
elif (rhs, lhs) in data:
m2ms[lhs, rhs] = data[rhs, lhs].inv
m2ms[rhs, lhs] = data[lhs, rhs]
rels.append((rhs, lhs))
else:
rels.append((lhs, rhs))
m2ms[lhs, rhs] = M2M()
m2ms[rhs, lhs] = m2ms[lhs, rhs].inv
cols.add(lhs, (lhs, rhs))
cols.add(rhs, (rhs, lhs))
self.m2ms = m2ms
self.cols = cols
self.rels = rels
@classmethod
def from_rel_data_map(cls, rel_data_map):
"""
convert a map of column label relationships to M2Ms
into a M2MGraph
rel_data_map -- { (lhs_col, rhs_col): {lhs_val: rhs_val} }
"""
# TODO: better checking
cls(rel_data_map.keys(), rel_data_map)
def __getitem__(self, key):
"""
return a M2M, M2MChain, or M2MGraph
over the same underlying data structure for easy
mutation
"""
if type(key) is dict or type(key) is M2M:
return M2MGraph(
key,
dict([((lhs, rhs), self.m2ms[lhs, rhs]) for lhs, rhs in key.items()]))
if key in self.cols:
return self._all_col(key)
if type(key) is tuple:
return self.chain(*key)
raise KeyError(key)
def chain(self, *cols):
"""
return an M2MChain along the given columns
"""
col_pairs = zip(cols[:-1], cols[1:])
m2ms = []
assert col_pairs[0][0] is not Ellipsis # ... at the beginning is invalid
for lhs_col_pair, rhs_col_pair in zip(col_pairs[:-1], col_pairs[1:]):
if lhs_col_pair[0] is Ellipsis:
continue # skip, was handled by lhs
if lhs_col_pair[1] is Ellipsis:
assert rhs_col_pair[0] is Ellipsis
# join ... in the middle via pairs
lhslhs, rhsrhs = lhs_col_pair[0], rhs_col_pair[1]
m2ms.append(self.pairs(lhslhs, rhsrhs))
continue
m2ms.append(self.m2ms[lhs_col_pair])
assert col_pairs[-1][1] is not Ellipsis # ... on the end is invalid
if col_pairs[-1][0] is not Ellipsis:
m2ms.append(self.m2ms[col_pairs[-1]])
return M2MChain(m2ms, False)
def __setitem__(self, key, val):
if type(key) is not tuple:
raise TypeError("expected tuple, not {!r}".format(type(key)))
if type(val) is M2M:
data = [val]
elif type(val) is M2MChain:
data = val.m2ms
else:
raise TypeError("expected M2MChain or M2M for val, not {!r}".format(type(val)))
if len(data) != len(key) - 1:
raise ValueError("value wrong width ({}) for key {}".format(len(data), key))
for colpair, m2m in zip(zip(key[:-1], key[1:]), data):
lhs, rhs = colpair
self.cols.add(lhs, (lhs, rhs))
self.cols.add(rhs, (rhs, lhs))
self.m2ms[lhs, rhs] = m2m
self.m2ms[rhs, lhs] = m2m.inv
def _all_col(self, col):
"""get all the values for a given column"""
sofar = set()
for edge in self.cols[col]:
sofar.update(self.m2ms[edge].keys())
return frozenset(sofar)
def pairs(self, lhs, rhs, paths=None, ignore=None):
"""
get all the unique pairs of values from lhs col and rhs col
ignore is a set of column names to exclude from building paths
paths is a list-of-lists of column names; if specified, will
follow exactly the chain of relationships specified by
each list of column names in paths rather than searching
for paths between lhs and rhs columns
"""
missing = lhs if lhs not in self.cols else rhs if rhs not in self.cols else None
if missing:
raise KeyError('no col named {}; valid cols are {}'.format(missing, ", ".join(self.cols)))
if ignore is None:
ignore = set()
if paths is None:
paths = self._all_paths(lhs, rhs, ignore)
if not paths:
raise ValueError('no paths between col {} and {}'.format(lhs, rhs))
pairs = M2M()
for path in paths:
m2ms = self[path]
if type(m2ms) is M2M:
pairs.update(m2ms.iteritems())
else:
for row in m2ms:
pairs.add(row[0], row[-1])
return pairs
def _all_paths(self, lhs, rhs, already_visited):
"""
lhs - start col
rhs - end col
already_visited - cols that are already on the current
path to avoid loops
returns [[str]]
"""
return [tuple(path) for path in self._all_paths2(lhs, rhs, already_visited)]
def _all_paths2(self, lhs, rhs, already_visited):
if lhs == rhs:
return [[lhs]]
paths = []
for col_pair in self.cols[lhs]:
assert lhs in col_pair
nxt = col_pair[1] if lhs == col_pair[0] else col_pair[0]
if nxt in already_visited:
continue
paths.extend(
[[lhs] + sub_path for sub_path in self._all_paths2(
nxt, rhs, set([lhs]) | already_visited)])
return paths
def add(self, row):
"""
given a row-dict that specifies a bunch of values,
add these values to all of the direct relationships
among the columns specified by the row-dict keys
"""
assert set(row) <= set(self.cols)
to_add = []
for lhs in row:
exists = False
for rhs in row:
for key in (lhs, rhs), (rhs, lhs):
if key in self.m2ms:
to_add.append((key, row[key[0]], row[key[1]]))
exists = True
if not exists:
raise ValueError('could not find any relationships for col {}'.format(lhs))
for key, lval, rval in to_add:
self[key].add(lval, rval)
def remove(self, col, val):
"""
given a column label and value, remove that value from
all relationships involving that column label
"""
for key in self.cols[col]:
del self.m2ms[key][val]
def attach(self, other):
"""
attach all of the relationships from other into the
columns and relationships of the current graph
the underlying data structures remain connected -- modifications
of self or other will be reflected
(this is a relatively cheap operation since only meta-data
is modified)
the set of relationships in self and other must be disjoint,
and there must be at least one link between the columns
of self and other
"""
if type(other) is dict:
relationships = other.keys()
for r in relationships:
assert type(r) is tuple and len(r) == 2
data = {k: M2M(v) for k, v in other.items()}
other = M2MGraph(relationships, data)
assert type(other) is type(self)
# TODO: allow attaching of sequences?
# check that relationships do not overlap
overlaps = set([frozenset(e) for e in self.m2ms]) & (
set([frozenset(e) for e in other.m2ms]))
if overlaps:
raise ValueError('relationships are specified by both graphs: {}'.format(
", ".join([tuple(e) for e in overlaps])))
self.m2ms.update(other.m2ms)
self.cols.update(other.cols)
def replace_col(self, col, valmap):
"""
replace every value in col by the value in valmap
raises KeyError if there is a value not in valmap
"""
for key in self.cols[col]:
if col == key[0]:
m2m = self.m2ms[key]
else:
m2m = self.m2ms[key].inv
for oldval, newval in valmap.items():
m2m.replace(oldval, newval)
def __eq__(self, other):
return type(self) is type(other) and self.m2ms == other.m2ms
def __contains__(self, rel):
return rel in self.m2ms
def __repr__(self):
return "M2MGraph({}, ...)".format(self.rels)
| 33.536 | 102 | 0.548044 |
2cb46a481a02f8bc826cfff26118c7611345dd83 | 3,755 | py | Python | gui_client/socket_client.py | zypangpang/mmdict_client | aac7fcba21cfbf40a949ad5c121a9b5bd43b0095 | [
"Apache-2.0"
] | 2 | 2020-08-30T07:12:38.000Z | 2020-09-08T02:01:31.000Z | gui_client/socket_client.py | zypangpang/mmdict_client | aac7fcba21cfbf40a949ad5c121a9b5bd43b0095 | [
"Apache-2.0"
] | 4 | 2020-09-13T15:00:16.000Z | 2020-09-17T13:07:18.000Z | gui_client/socket_client.py | zypangpang/mmdict_client | aac7fcba21cfbf40a949ad5c121a9b5bd43b0095 | [
"Apache-2.0"
] | 2 | 2020-08-30T07:40:43.000Z | 2020-09-12T18:22:47.000Z | import socket,json,constants
from bs4 import BeautifulSoup
from constants import FRONT_END
from constants import configs
class SocketClient():
front_end=FRONT_END.QTWEBENGINE
@classmethod
def __request(cls,data):
host,_=configs.get_dict_server()
if host=='unix':
return cls.__request_unix(data)
else:
return cls.__request_inet(data)
@classmethod
def __request_unix(cls, data):
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
sock.connect(constants.SOCKET_LOCATION)
sock.sendall(data.encode("utf-8"))
msg_list = []
while True:
msg = sock.recv(8192)
if not msg:
break
msg_list.append(msg)
return b"".join(msg_list).decode("utf-8")
@classmethod
def __request_inet(cls,data):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect(configs.get_dict_server())
sock.sendall(data.encode("utf-8"))
msg_list = []
while True:
msg = sock.recv(8192)
if not msg:
break
msg_list.append(msg)
return b"".join(msg_list).decode("utf-8")
@classmethod
def __assemble_cmd(cls,command,**kwargs):
return json.dumps({
"command":command,
"kwargs":kwargs
})
@classmethod
def __general_tweak(cls,result_obj):
for key,val in result_obj.items():
if val.startswith("@@@LINK="):
word=val[8:].strip()
entry=f'<A href="entry://{word}">{word}</A>'
result_obj[key]=f"SEE: {entry}"
@classmethod
def __tweak_for_qt_webengine(cls,result_obj):
for key,val in result_obj.items():
soup=BeautifulSoup(val,"lxml")
all_a=soup.find_all("a")
for a in all_a:
if " " in a.get('href',""):
a['href']=a['href'].replace("://",":")
result_obj[key]=str(soup)
return result_obj
@classmethod
def __tweak_for_console(cls, result_obj):
for key,val in result_obj.items():
soup=BeautifulSoup(val,"lxml")
result_obj[key]=soup.text
return result_obj
@classmethod
def lookup(cls,word,dicts=None,raw=False):
data=cls.__assemble_cmd("Lookup",word=word,dicts=dicts) if dicts else cls.__assemble_cmd("Lookup",word=word)
#data=data+","+','.join(dicts)
recv_data=cls.__request(data)
return_obj=json.loads(recv_data)
if return_obj['status_code'] == 1:
return return_obj
#print(r_obj['USE THE RIGHT WORD'])
if raw:
return return_obj
r_obj=return_obj['results']
#cls.__general_tweak(r_obj)
if cls.front_end==FRONT_END.QTWEBENGINE:
return_obj['results']=cls.__tweak_for_qt_webengine(r_obj)
elif cls.front_end==FRONT_END.CONSOLE:
return_obj['results']=cls.__tweak_for_console(r_obj)
return return_obj
@classmethod
def list_dicts(cls,enabled=True):
#data=f"ListDicts:{int(enabled)}"
data=cls.__assemble_cmd("ListDicts",enabled=enabled)
return json.loads(cls.__request(data))
@classmethod
def search_word_index(cls,dict_name,word):
#data=f"ListWord:{dict_name.strip()},{word.strip()}"
data=cls.__assemble_cmd("ListWord",dict_name=dict_name.strip(),word=word.strip())
return json.loads(cls.__request(data))
@classmethod
def test(cls):
data="Test"
return cls.__request(data)
if __name__ == '__main__':
pass
| 30.778689 | 116 | 0.587483 |
b1e58c77f1a86693aedd2c17e24060bc1409b7e2 | 7,281 | py | Python | graphs.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | graphs.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | graphs.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | import math
import heapq
import random
import copy
import overpy
import file_utils
import math_utils
class Graph:
def __init__(self):
self.vertices = {} # vertex_id: Vertex
self.vertex_cnt = 0
self.edge_cnt = 0
def __repr__(self):
ret = "Number of vertices: " + str(self.vertex_cnt) + "\n"
ret += "Number of edges: " + str(self.edge_cnt) + "\n"
for vertex_id, vertex in self.vertices.items():
ret += str(vertex_id) + ": " + str(vertex.getEdges()) + "\n"
return ret
def load_map(self, filename):
data = file_utils.load_yaml(filename)
for vertex_data in data["intersections"]:
intersection_id, x, y = vertex_data
self.vertices[intersection_id] = Vertex(intersection_id, x, y, None)
self.vertex_cnt += 1
for connection in data["connections"]:
edge = Edge(connection[0], connection[1], 1, None)
self.add_edge(edge)
def load_open_street_map_data(self, filename, lat_lon_converter):
api = overpy.Overpass()
result = api.parse_json(file_utils.load_bytes(filename))
for node in result.nodes:
if self.vertices.get(node.id) is None:
global_x, global_y = lat_lon_converter.lat_lon_to_global_xy(node.lat, node.lon)
local_x, local_y = lat_lon_converter.global_xy_to_local_xy(global_x, global_y)
self.vertices[node.id] = Vertex(node.id, local_x, local_y, node.tags)
for way in result.ways:
prev_node = None
mid_node = None
for node in way.nodes:
if prev_node is None:
prev_node = node
continue
if mid_node is None:
mid_node = node
continue
if self.can_vertices_be_reduced(prev_node.id, mid_node.id, node.id):
mid_node = node
continue
self.add_edges(prev_node.id, mid_node.id, way.tags)
prev_node = mid_node
mid_node = node
self.add_edges(prev_node.id, mid_node.id, way.tags)
self.remove_unused_vertices()
def add_edge(self, edge):
self.vertices[edge.source].add_edge(edge)
self.edge_cnt += 1
def add_edges(self, v1, v2, tags):
if v1 and v2 and v1 != v2:
distance = self.distance_between_vertices(v1, v2)
edge = Edge(v1, v2, distance, tags)
self.add_edge(edge)
if tags.get('oneway') != 'yes':
other_edge = Edge(v2, v1, distance, tags)
self.add_edge(other_edge)
def adj_edges(self, v):
return self.vertices[v.id].get_edges()
def distance_between_vertices(self, vertex_id_1, vertex_id_2):
v1 = self.vertices[vertex_id_1]
v2 = self.vertices[vertex_id_2]
dx = v2.x - v1.x
dy = v2.y - v1.y
dist = math_utils.pythag(dx, dy)
return dist
def can_vertices_be_reduced(self, vertex_id_1, vertex_id_2, vertex_id_3):
"""Check whether 3 vertices can be reduced to 2 vertices"""
v2 = self.vertices[vertex_id_2]
if v2.is_intersection:
return False
v1 = self.vertices[vertex_id_1]
dx1 = v2.x - v1.x
dy1 = v2.y - v1.y
dist = math_utils.pythag(dx1, dy1)
if dist < 10.0:
return True
v3 = self.vertices[vertex_id_3]
dx2 = v3.x - v2.x
dy2 = v3.y - v2.y
angle1 = math_utils.angle(dx1, dy1)
angle2 = math_utils.angle(dx2, dy2)
if abs(angle2 - angle1) < 0.10: # ~5.7 degrees
return True
return False
def remove_unused_vertices(self):
"""Remove all vertices that aren't a source or dest for an edge"""
vertices_with_all_edges = copy.deepcopy(self.vertices)
for vertex_id, vertex in self.vertices.items():
for edge in vertex.edges:
vertices_with_all_edges[edge.dest].edges.append(edge.id)
vertices_to_remove = set()
for vertex_id, vertex in vertices_with_all_edges.items():
if not vertex.edges:
vertices_to_remove.add(vertex_id)
for vertex_id in vertices_to_remove:
self.vertices.pop(vertex_id, None)
class Edge:
def __init__(self, vertex_id_1, vertex_id_2, weight, tags):
self.id = id(self)
self.source = vertex_id_1
self.dest = vertex_id_2
self.weight = weight
self.name = tags.get("name", "")
self.speed_limit = "".join([c for c in str(tags.get("maxspeed", 25)) if c.isdigit()])
self.lanes = int(tags.get("lanes", 1))
self.is_one_way = True if tags.get("oneway") == "yes" else False
def __repr__(self):
return str(self.source) + "->" + str(self.dest) + " (" + str(self.weight) + ")"
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash((self.id, self.source, self.dest, self.weight, self.name))
class Vertex:
def __init__(self, vertex_id, x, y, tags):
self.id = vertex_id
self.edges = [] # list of connected Edges
self.x = x
self.y = y
self.is_intersection = bool(tags.get("highway") == "traffic_signals")
def __eq__(self, that):
return self.id == that.id
def __repr__(self):
return str(self.id) + ":" + str(self.edges)
def add_edge(self, edge):
self.edges.append(edge)
def get_edges(self):
return self.edges
class ShortestPaths:
"""Uses Dijkstra's algorithm to build a shortest path tree to each vertex"""
def __init__(self, graph, source_vertex):
self.path_of_edges = {} # dest vertex id: connected edge along shortest path
self.path_lengths = {} # dest vertex id: sum of all edge weights along path
self.pq = [] # list of tuples -> (distance to vertex, vertex_id)
for vertex_id in graph.vertices.keys():
self.path_lengths[vertex_id] = math.inf
self.path_lengths[source_vertex.id] = 0
heapq.heappush(self.pq, (0.0, source_vertex.id))
while len(self.pq) != 0:
distance, vertex_id = heapq.heappop(self.pq)
vertex = graph.vertices[vertex_id]
self.relax_edges(graph, vertex)
def relax_edges(self, graph, source_vertex):
for edge in graph.adj_edges(source_vertex):
dest_vertex = graph.vertices[edge.dest]
dest_length = self.path_lengths[dest_vertex.id]
source_length = self.path_lengths[source_vertex.id]
if dest_length > source_length + edge.weight:
self.path_lengths[dest_vertex.id] = source_length + edge.weight
self.path_of_edges[dest_vertex.id] = edge
if (dest_length, dest_vertex.id) in self.pq:
index = self.pq.index((dest_length, dest_vertex.id))
self.pq[index] = (self.path_lengths[dest_vertex.id], dest_vertex.id)
heapq.heapify(self.pq)
else:
heapq.heappush(self.pq, (self.path_lengths[dest_vertex.id], dest_vertex.id))
| 35.004808 | 96 | 0.591402 |
6e632829e815f5f7c4f661d6e66118ae74802571 | 8,547 | py | Python | authors/apps/articles/tests/test_share_articles.py | andela/ah-alpha | dbc038c9a11362a314c258a80c4a133d11ff1012 | [
"BSD-3-Clause"
] | 1 | 2019-03-18T08:24:37.000Z | 2019-03-18T08:24:37.000Z | authors/apps/articles/tests/test_share_articles.py | andela/ah-alpha | dbc038c9a11362a314c258a80c4a133d11ff1012 | [
"BSD-3-Clause"
] | 39 | 2019-01-08T12:12:57.000Z | 2022-03-11T23:39:18.000Z | authors/apps/articles/tests/test_share_articles.py | jamesbeamie/bolt-J | 1824afd73bfba708f0e56fbd7cbb8d7521f06a1a | [
"BSD-3-Clause"
] | 2 | 2019-02-04T08:36:44.000Z | 2019-03-05T19:59:44.000Z | from django.urls import reverse
from rest_framework import status
from django.test import TestCase, Client
import json
from rest_framework.authtoken.models import Token
from ..messages import error_msgs, success_msg
from authors.apps.articles.models import Article
from authors.apps.authentication.models import User
from authors.apps.authentication.backends import JWTokens
class TestArticleRating(TestCase):
def setUp(self):
self.client = Client()
self.email_url = reverse("articles:email_share",
kwargs={"slug": "this-is-a-blog"})
self.facebook_url = reverse("articles:facebook_share",
kwargs={"slug": "this-is-a-blog"})
self.twitter_url = reverse("articles:twitter_share",
kwargs={"slug": "this-is-a-blog"})
self.wrong_email_url = reverse("articles:email_share",
kwargs={"slug": "this-is-a"})
self.wrong_facebook_url = reverse("articles:facebook_share",
kwargs={"slug": "this-is-a"})
self.wrong_twitter_url = reverse("articles:twitter_share",
kwargs={"slug": "this-is-a"})
self.wrong_url = url = reverse("rating:rate",
kwargs={"slug": "this-is-a"})
self.email_body = {
'email': "ewachira254@gmail.com"
}
self.no_email_body = {
'email': ""
}
self.invalid_email_body = {
'email': "ewachira254gmail.com"
}
self.create_user_1 = User.objects.create_user(
username="wachira",
email="ewachira254@gmail.com",
password="@Wachira254"
)
self.token_user_1 = JWTokens.create_token(
self, user=self.create_user_1)
self.create_user_2 = User.objects.create_user(
username="bolton",
email="bolton@gmail.com",
password="@bolton254"
)
self.token_user_2 = JWTokens.create_token(
self, user=self.create_user_2)
self.correct_article = Article.objects.create(
author_id=self.create_user_1.id,
image_path="...",
title="This is a blog",
body="This is a body",
slug="this-is-a-blog"
)
def test_share_via_email(self):
response = self.client.post(
self.email_url,
self.email_body,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
def test_share_via_facebook(self):
response = self.client.post(
self.facebook_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
def test_share_via_twitter(self):
response = self.client.post(
self.twitter_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
def test_unauthorized_share_via_email(self):
response = self.client.post(
self.email_url,
self.email_body,
content_type='application/json'
)
self.assertEqual(response.data['detail'],
'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized_share_via_facebook(self):
response = self.client.post(
self.facebook_url,
content_type='application/json'
)
self.assertEqual(response.data['detail'],
'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized_share_via_twitter(self):
response = self.client.post(
self.twitter_url,
content_type='application/json'
)
self.assertEqual(response.data['detail'],
'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_share_via_email_without_receivers_email(self):
response = self.client.post(
self.email_url,
self.no_email_body,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
error_msgs['no_email'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_share_via_email_with_invalid_receivers_email(self):
response = self.client.post(
self.email_url,
self.invalid_email_body,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
error_msgs['email_format'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_share_via_email_nonexisting_article(self):
response = self.client.post(
self.wrong_email_url,
self.email_body,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
error_msgs['not_found'])
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_share_via_facebook_nonexisting_article(self):
response = self.client.post(
self.wrong_facebook_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
error_msgs['not_found'])
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_share_via_twitter_nonexisting_article(self):
response = self.client.post(
self.wrong_twitter_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_1,
content_type='application/json'
)
self.assertEqual(response.data['message'],
error_msgs['not_found'])
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_share_via_email_by_others(self):
response = self.client.post(
self.email_url,
self.email_body,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_2,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
def test_share_via_facebook_by_others(self):
response = self.client.post(
self.facebook_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_2,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
def test_share_via_twitter_by_others(self):
response = self.client.post(
self.twitter_url,
HTTP_AUTHORIZATION='Bearer ' + self.token_user_2,
content_type='application/json'
)
self.assertEqual(response.data['message'],
success_msg['share_success'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("shared_link", response.data)
| 37.486842 | 76 | 0.608401 |
2ef66b3cd65ae485e57a76a66d608a53a26fcc81 | 10,808 | py | Python | fastfold/model/fastnn/triangle.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 303 | 2022-03-03T01:59:47.000Z | 2022-03-31T07:46:42.000Z | fastfold/model/fastnn/triangle.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 6 | 2022-03-03T22:17:03.000Z | 2022-03-17T06:09:11.000Z | fastfold/model/fastnn/triangle.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 35 | 2022-03-03T01:58:56.000Z | 2022-03-29T21:21:06.000Z | from fastfold.distributed.comm_async import gather_async
import torch
import torch.nn as nn
from fastfold.model.fastnn.kernel import LayerNorm
from fastfold.distributed.comm import col_to_row, row_to_col, scatter
from fastfold.model.fastnn.kernel import bias_dropout_add, bias_ele_dropout_residual
from fastfold.model.fastnn.ops import Linear, SelfAttention, Transition
from fastfold.distributed.comm_async import gather_async_opp, gather_async
def permute_final_dims(tensor, inds):
zero_index = -1 * len(inds)
first_inds = list(range(len(tensor.shape[:zero_index])))
return tensor.permute(first_inds + [zero_index + i for i in inds])
class TriangleMultiplicationOutgoing(nn.Module):
def __init__(self, d_pair, p_drop, c=128):
super(TriangleMultiplicationOutgoing, self).__init__()
self.d_pair = d_pair
self.c = c
self.layernorm1 = LayerNorm(d_pair)
self.left_right_projection = Linear(d_pair, 2 * c)
self.left_right_gate = Linear(d_pair, 2 * c, initializer='zeros', bias_init=1.)
self.output_gate = Linear(d_pair, d_pair, initializer='zeros', bias_init=1.)
self.layernorm2 = LayerNorm(c)
self.output_projection = Linear(d_pair, d_pair, initializer='zeros', use_bias=False)
self.output_bias = nn.parameter.Parameter(data=torch.zeros((d_pair,)), requires_grad=True)
self.p_drop = p_drop
def forward(self, Z_raw, Z_mask_row):
Z = self.layernorm1(Z_raw)
left_right_proj_act = self.left_right_projection(Z)
left_right_proj_act = Z_mask_row.unsqueeze(-1) * left_right_proj_act
left_right_proj_act *= torch.sigmoid(self.left_right_gate(Z))
left_proj_act, right_proj_act = left_right_proj_act.chunk(2, dim=-1)
# right_proj_act = gather(right_proj_act.contiguous(), dim=1)
right_proj_act, work = gather_async(right_proj_act.contiguous(), dim=1)
g = torch.sigmoid(self.output_gate(Z))
left_proj_act = permute_final_dims(left_proj_act, (2, 0, 1))
right_proj_act = gather_async_opp(right_proj_act, work, dim=1)
p = torch.matmul(
left_proj_act,
permute_final_dims(right_proj_act, (2, 1, 0)),
)
ab = permute_final_dims(p, (1, 2, 0))
# ab = torch.einsum('bikd,bjkd->bijd', left_proj_act, right_proj_act)
ab = self.output_projection(self.layernorm2(ab))
dropout_mask = torch.ones_like(Z[:, 0:1, :, :], device=Z.device, dtype=Z.dtype)
return bias_ele_dropout_residual(ab,
self.output_bias,
g,
dropout_mask,
Z_raw,
prob=self.p_drop,
training=self.training)
class TriangleMultiplicationIncoming(nn.Module):
def __init__(self, d_pair, p_drop, c=128):
super(TriangleMultiplicationIncoming, self).__init__()
self.d_pair = d_pair
self.c = c
self.layernorm1 = LayerNorm(d_pair)
self.left_right_projection = Linear(d_pair, 2 * c)
self.left_right_gate = Linear(d_pair, 2 * c, initializer='zeros', bias_init=1.)
self.output_gate = Linear(d_pair, d_pair, initializer='zeros', bias_init=1.)
self.layernorm2 = LayerNorm(c)
self.output_projection = Linear(d_pair, d_pair, initializer='zeros', use_bias=False)
self.output_bias = nn.parameter.Parameter(data=torch.zeros((d_pair,)), requires_grad=True)
self.p_drop = p_drop
def forward(self, Z_raw, Z_mask_col):
Z = self.layernorm1(Z_raw)
left_right_proj_act = self.left_right_projection(Z)
left_right_proj_act = Z_mask_col.unsqueeze(-1) * left_right_proj_act
left_right_proj_act *= torch.sigmoid(self.left_right_gate(Z))
left_proj_act, right_proj_act = left_right_proj_act.chunk(2, dim=-1)
left_proj_act, work = gather_async(left_proj_act.contiguous(), dim=2)
g = torch.sigmoid(self.output_gate(Z))
right_proj_act = permute_final_dims(right_proj_act, (2, 0, 1))
left_proj_act = gather_async_opp(left_proj_act, work, dim=2)
p = torch.matmul(permute_final_dims(left_proj_act, (2, 1, 0)), right_proj_act)
ab = permute_final_dims(p, (1, 2, 0))
# ab = torch.einsum('bkid,bkjd->bijd', left_proj_act, right_proj_act)
ab = self.output_projection(self.layernorm2(ab))
dropout_mask = torch.ones_like(Z[:, 0:1, :, :], device=Z.device, dtype=Z.dtype)
return bias_ele_dropout_residual(ab,
self.output_bias,
g,
dropout_mask,
Z_raw,
prob=self.p_drop,
training=self.training)
class TriangleAttentionStartingNode(nn.Module):
def __init__(self, d_pair, p_drop, c=32, n_head=4):
super(TriangleAttentionStartingNode, self).__init__()
self.d_pair = d_pair
self.c = c
self.n_head = n_head
self.p_drop = p_drop
self.layernorm1 = LayerNorm(d_pair)
# _init_weights = torch.nn.init.normal_(torch.zeros([d_pair, n_head]),
# std=1.0 / math.sqrt(d_pair))
# self.linear_b_weights = nn.parameter.Parameter(data=_init_weights)
self.linear_b = Linear(d_pair, n_head, initializer='linear', use_bias=False)
self.attention = SelfAttention(qkv_dim=d_pair,
c=c,
n_head=n_head,
out_dim=d_pair,
gating=True,
last_bias_fuse=True)
self.out_bias = nn.parameter.Parameter(data=torch.zeros((d_pair,)), requires_grad=True)
def forward(self, Z_raw, Z_mask):
Z = self.layernorm1(Z_raw)
b = self.linear_b(Z)
# b = torch.einsum('bqkc,ch->bhqk', Z, self.linear_b_weights)
b, work = gather_async(b, dim=1)
# b = rearrange(b, 'b q k h -> b h q k')
# padding_bias = (1e9 * (Z_mask - 1.))[:, :, None, None, :]
Z = self.attention(Z, Z_mask, (b, work))
dropout_mask = torch.ones_like(Z[:, 0:1, :, :], device=Z.device, dtype=Z.dtype)
return bias_dropout_add(Z,
self.out_bias,
dropout_mask,
Z_raw,
prob=self.p_drop,
training=self.training)
class TriangleAttentionEndingNode(nn.Module):
def __init__(self, d_pair, p_drop, c=32, n_head=4):
super(TriangleAttentionEndingNode, self).__init__()
self.d_pair = d_pair
self.c = c
self.n_head = n_head
self.p_drop = p_drop
self.layernorm1 = LayerNorm(d_pair)
# _init_weights = torch.nn.init.normal_(torch.zeros([d_pair, n_head]),
# std=1.0 / math.sqrt(d_pair))
# self.linear_b_weights = nn.parameter.Parameter(data=_init_weights)
self.linear_b = Linear(d_pair, n_head, initializer='linear', use_bias=False)
self.attention = SelfAttention(qkv_dim=d_pair,
c=c,
n_head=n_head,
out_dim=d_pair,
gating=True,
last_bias_fuse=True)
self.out_bias = nn.parameter.Parameter(data=torch.zeros((d_pair,)), requires_grad=True)
def forward(self, Z_raw, Z_mask):
Z = Z_raw.transpose(-2, -3)
Z_mask = Z_mask.transpose(-1, -2)
Z = self.layernorm1(Z)
b = self.linear_b(Z)
# b = torch.einsum('bqkc,ch->bhqk', Z, self.linear_b_weights)
b, work = gather_async(b, dim=1)
# b = rearrange(b, 'b q k h -> b h q k')
# padding_bias = (1e9 * (Z_mask - 1.))[:, :, None, None, :]
Z = self.attention(Z, Z_mask, (b, work))
Z = Z.transpose(-2, -3)
dropout_mask = torch.ones_like(Z[:, :, 0:1, :], device=Z.device, dtype=Z.dtype)
return bias_dropout_add(Z,
self.out_bias,
dropout_mask,
Z_raw,
prob=self.p_drop,
training=self.training)
class PairStack(nn.Module):
def __init__(self, d_pair, p_drop=0.25):
super(PairStack, self).__init__()
self.d_pair = d_pair
self.n_head = 4
self.hidden_c = int(d_pair / self.n_head)
self.TriangleMultiplicationOutgoing = TriangleMultiplicationOutgoing(d_pair,
p_drop=p_drop,
c=d_pair)
self.TriangleMultiplicationIncoming = TriangleMultiplicationIncoming(d_pair,
p_drop=p_drop,
c=d_pair)
self.TriangleAttentionStartingNode = TriangleAttentionStartingNode(d_pair,
p_drop=p_drop,
c=self.hidden_c,
n_head=self.n_head)
self.TriangleAttentionEndingNode = TriangleAttentionEndingNode(d_pair,
p_drop=p_drop,
c=self.hidden_c,
n_head=self.n_head)
self.PairTransition = Transition(d=d_pair)
def forward(self, pair, pair_mask):
pair_mask_row = scatter(pair_mask, dim=1)
pair_mask_col = scatter(pair_mask, dim=2)
pair = self.TriangleMultiplicationOutgoing(pair, pair_mask_row)
pair = row_to_col(pair)
pair = self.TriangleMultiplicationIncoming(pair, pair_mask_col)
pair = col_to_row(pair)
pair = self.TriangleAttentionStartingNode(pair, pair_mask_row)
pair = row_to_col(pair)
pair = self.TriangleAttentionEndingNode(pair, pair_mask_col)
pair = self.PairTransition(pair)
pair = col_to_row(pair)
return pair
| 43.232 | 98 | 0.551443 |
c376b81ff4922b244b15b1ab5686c5c8a9cef01b | 15,221 | py | Python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/result_directories_dir_usage_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/result_directories_dir_usage_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/result_directories_dir_usage_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResultDirectoriesDirUsageExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ads_cnt': 'int',
'dir_cnt': 'int',
'file_cnt': 'int',
'lin': 'int',
'log_size_sum': 'int',
'log_size_sum_overflow': 'int',
'name': 'str',
'other_cnt': 'int',
'parent': 'int',
'phys_size_sum': 'int'
}
attribute_map = {
'ads_cnt': 'ads_cnt',
'dir_cnt': 'dir_cnt',
'file_cnt': 'file_cnt',
'lin': 'lin',
'log_size_sum': 'log_size_sum',
'log_size_sum_overflow': 'log_size_sum_overflow',
'name': 'name',
'other_cnt': 'other_cnt',
'parent': 'parent',
'phys_size_sum': 'phys_size_sum'
}
def __init__(self, ads_cnt=None, dir_cnt=None, file_cnt=None, lin=None, log_size_sum=None, log_size_sum_overflow=None, name=None, other_cnt=None, parent=None, phys_size_sum=None): # noqa: E501
"""ResultDirectoriesDirUsageExtended - a model defined in Swagger""" # noqa: E501
self._ads_cnt = None
self._dir_cnt = None
self._file_cnt = None
self._lin = None
self._log_size_sum = None
self._log_size_sum_overflow = None
self._name = None
self._other_cnt = None
self._parent = None
self._phys_size_sum = None
self.discriminator = None
if ads_cnt is not None:
self.ads_cnt = ads_cnt
if dir_cnt is not None:
self.dir_cnt = dir_cnt
if file_cnt is not None:
self.file_cnt = file_cnt
if lin is not None:
self.lin = lin
if log_size_sum is not None:
self.log_size_sum = log_size_sum
if log_size_sum_overflow is not None:
self.log_size_sum_overflow = log_size_sum_overflow
if name is not None:
self.name = name
if other_cnt is not None:
self.other_cnt = other_cnt
if parent is not None:
self.parent = parent
if phys_size_sum is not None:
self.phys_size_sum = phys_size_sum
@property
def ads_cnt(self):
"""Gets the ads_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
Number of alternate data streams. # noqa: E501
:return: The ads_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._ads_cnt
@ads_cnt.setter
def ads_cnt(self, ads_cnt):
"""Sets the ads_cnt of this ResultDirectoriesDirUsageExtended.
Number of alternate data streams. # noqa: E501
:param ads_cnt: The ads_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if ads_cnt is not None and ads_cnt > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `ads_cnt`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if ads_cnt is not None and ads_cnt < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `ads_cnt`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._ads_cnt = ads_cnt
@property
def dir_cnt(self):
"""Gets the dir_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
Number of directories. # noqa: E501
:return: The dir_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._dir_cnt
@dir_cnt.setter
def dir_cnt(self, dir_cnt):
"""Sets the dir_cnt of this ResultDirectoriesDirUsageExtended.
Number of directories. # noqa: E501
:param dir_cnt: The dir_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if dir_cnt is not None and dir_cnt > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `dir_cnt`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if dir_cnt is not None and dir_cnt < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `dir_cnt`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._dir_cnt = dir_cnt
@property
def file_cnt(self):
"""Gets the file_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
Number of files. # noqa: E501
:return: The file_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._file_cnt
@file_cnt.setter
def file_cnt(self, file_cnt):
"""Sets the file_cnt of this ResultDirectoriesDirUsageExtended.
Number of files. # noqa: E501
:param file_cnt: The file_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if file_cnt is not None and file_cnt > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `file_cnt`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if file_cnt is not None and file_cnt < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `file_cnt`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._file_cnt = file_cnt
@property
def lin(self):
"""Gets the lin of this ResultDirectoriesDirUsageExtended. # noqa: E501
Logical inode number. # noqa: E501
:return: The lin of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._lin
@lin.setter
def lin(self, lin):
"""Sets the lin of this ResultDirectoriesDirUsageExtended.
Logical inode number. # noqa: E501
:param lin: The lin of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if lin is not None and lin > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `lin`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if lin is not None and lin < 0: # noqa: E501
raise ValueError("Invalid value for `lin`, must be a value greater than or equal to `0`") # noqa: E501
self._lin = lin
@property
def log_size_sum(self):
"""Gets the log_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
Logical size directory in bytes. # noqa: E501
:return: The log_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._log_size_sum
@log_size_sum.setter
def log_size_sum(self, log_size_sum):
"""Sets the log_size_sum of this ResultDirectoriesDirUsageExtended.
Logical size directory in bytes. # noqa: E501
:param log_size_sum: The log_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if log_size_sum is not None and log_size_sum > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `log_size_sum`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if log_size_sum is not None and log_size_sum < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `log_size_sum`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._log_size_sum = log_size_sum
@property
def log_size_sum_overflow(self):
"""Gets the log_size_sum_overflow of this ResultDirectoriesDirUsageExtended. # noqa: E501
Logical size sum of overflow in bytes. # noqa: E501
:return: The log_size_sum_overflow of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._log_size_sum_overflow
@log_size_sum_overflow.setter
def log_size_sum_overflow(self, log_size_sum_overflow):
"""Sets the log_size_sum_overflow of this ResultDirectoriesDirUsageExtended.
Logical size sum of overflow in bytes. # noqa: E501
:param log_size_sum_overflow: The log_size_sum_overflow of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if log_size_sum_overflow is not None and log_size_sum_overflow > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `log_size_sum_overflow`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if log_size_sum_overflow is not None and log_size_sum_overflow < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `log_size_sum_overflow`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._log_size_sum_overflow = log_size_sum_overflow
@property
def name(self):
"""Gets the name of this ResultDirectoriesDirUsageExtended. # noqa: E501
Name of directory. # noqa: E501
:return: The name of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ResultDirectoriesDirUsageExtended.
Name of directory. # noqa: E501
:param name: The name of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: str
"""
if name is not None and len(name) > 4096:
raise ValueError("Invalid value for `name`, length must be less than or equal to `4096`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def other_cnt(self):
"""Gets the other_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
Other count. # noqa: E501
:return: The other_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._other_cnt
@other_cnt.setter
def other_cnt(self, other_cnt):
"""Sets the other_cnt of this ResultDirectoriesDirUsageExtended.
Other count. # noqa: E501
:param other_cnt: The other_cnt of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if other_cnt is not None and other_cnt > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `other_cnt`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if other_cnt is not None and other_cnt < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `other_cnt`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._other_cnt = other_cnt
@property
def parent(self):
"""Gets the parent of this ResultDirectoriesDirUsageExtended. # noqa: E501
Parent directory inode. # noqa: E501
:return: The parent of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this ResultDirectoriesDirUsageExtended.
Parent directory inode. # noqa: E501
:param parent: The parent of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if parent is not None and parent > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `parent`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if parent is not None and parent < 0: # noqa: E501
raise ValueError("Invalid value for `parent`, must be a value greater than or equal to `0`") # noqa: E501
self._parent = parent
@property
def phys_size_sum(self):
"""Gets the phys_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
Physical size directory in bytes. # noqa: E501
:return: The phys_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
:rtype: int
"""
return self._phys_size_sum
@phys_size_sum.setter
def phys_size_sum(self, phys_size_sum):
"""Sets the phys_size_sum of this ResultDirectoriesDirUsageExtended.
Physical size directory in bytes. # noqa: E501
:param phys_size_sum: The phys_size_sum of this ResultDirectoriesDirUsageExtended. # noqa: E501
:type: int
"""
if phys_size_sum is not None and phys_size_sum > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `phys_size_sum`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if phys_size_sum is not None and phys_size_sum < -9223372036854775808: # noqa: E501
raise ValueError("Invalid value for `phys_size_sum`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._phys_size_sum = phys_size_sum
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResultDirectoriesDirUsageExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.398034 | 197 | 0.635241 |
ed5de0a265b65fb7a49046cb33e8a7920012f2b8 | 10,224 | py | Python | deep_lynx/models/create_manual_import.py | idaholab/Deep-Lynx-Python-Package | 99927cc877eba8e2ee396feec807da1c48c64893 | [
"MIT"
] | 3 | 2021-06-16T20:34:41.000Z | 2021-06-16T23:54:36.000Z | deep_lynx/models/create_manual_import.py | idaholab/Deep-Lynx-Python-Package | 99927cc877eba8e2ee396feec807da1c48c64893 | [
"MIT"
] | null | null | null | deep_lynx/models/create_manual_import.py | idaholab/Deep-Lynx-Python-Package | 99927cc877eba8e2ee396feec807da1c48c64893 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Deep Lynx
The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateManualImport(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'status': 'str',
'id': 'str',
'data_source_id': 'str',
'created_at': 'str',
'modified_at': 'str',
'created_by': 'str',
'modified_by': 'str',
'reference': 'str',
'status_message': 'str'
}
attribute_map = {
'status': 'status',
'id': 'id',
'data_source_id': 'data_source_id',
'created_at': 'created_at',
'modified_at': 'modified_at',
'created_by': 'created_by',
'modified_by': 'modified_by',
'reference': 'reference',
'status_message': 'status_message'
}
def __init__(self, status=None, id=None, data_source_id=None, created_at=None, modified_at=None, created_by=None, modified_by=None, reference=None, status_message=None): # noqa: E501
"""CreateManualImport - a model defined in Swagger""" # noqa: E501
self._status = None
self._id = None
self._data_source_id = None
self._created_at = None
self._modified_at = None
self._created_by = None
self._modified_by = None
self._reference = None
self._status_message = None
self.discriminator = None
if status is not None:
self.status = status
if id is not None:
self.id = id
if data_source_id is not None:
self.data_source_id = data_source_id
if created_at is not None:
self.created_at = created_at
if modified_at is not None:
self.modified_at = modified_at
if created_by is not None:
self.created_by = created_by
if modified_by is not None:
self.modified_by = modified_by
if reference is not None:
self.reference = reference
if status_message is not None:
self.status_message = status_message
@property
def status(self):
"""Gets the status of this CreateManualImport. # noqa: E501
:return: The status of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CreateManualImport.
:param status: The status of this CreateManualImport. # noqa: E501
:type: str
"""
self._status = status
@property
def id(self):
"""Gets the id of this CreateManualImport. # noqa: E501
:return: The id of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CreateManualImport.
:param id: The id of this CreateManualImport. # noqa: E501
:type: str
"""
self._id = id
@property
def data_source_id(self):
"""Gets the data_source_id of this CreateManualImport. # noqa: E501
:return: The data_source_id of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._data_source_id
@data_source_id.setter
def data_source_id(self, data_source_id):
"""Sets the data_source_id of this CreateManualImport.
:param data_source_id: The data_source_id of this CreateManualImport. # noqa: E501
:type: str
"""
self._data_source_id = data_source_id
@property
def created_at(self):
"""Gets the created_at of this CreateManualImport. # noqa: E501
:return: The created_at of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this CreateManualImport.
:param created_at: The created_at of this CreateManualImport. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def modified_at(self):
"""Gets the modified_at of this CreateManualImport. # noqa: E501
:return: The modified_at of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._modified_at
@modified_at.setter
def modified_at(self, modified_at):
"""Sets the modified_at of this CreateManualImport.
:param modified_at: The modified_at of this CreateManualImport. # noqa: E501
:type: str
"""
self._modified_at = modified_at
@property
def created_by(self):
"""Gets the created_by of this CreateManualImport. # noqa: E501
:return: The created_by of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this CreateManualImport.
:param created_by: The created_by of this CreateManualImport. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def modified_by(self):
"""Gets the modified_by of this CreateManualImport. # noqa: E501
:return: The modified_by of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""Sets the modified_by of this CreateManualImport.
:param modified_by: The modified_by of this CreateManualImport. # noqa: E501
:type: str
"""
self._modified_by = modified_by
@property
def reference(self):
"""Gets the reference of this CreateManualImport. # noqa: E501
:return: The reference of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this CreateManualImport.
:param reference: The reference of this CreateManualImport. # noqa: E501
:type: str
"""
self._reference = reference
@property
def status_message(self):
"""Gets the status_message of this CreateManualImport. # noqa: E501
:return: The status_message of this CreateManualImport. # noqa: E501
:rtype: str
"""
return self._status_message
@status_message.setter
def status_message(self, status_message):
"""Sets the status_message of this CreateManualImport.
:param status_message: The status_message of this CreateManualImport. # noqa: E501
:type: str
"""
self._status_message = status_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CreateManualImport, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateManualImport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.050157 | 1,455 | 0.625685 |
3f6f5b4658b1c7f19a97e0795985c119e7517da7 | 19,092 | py | Python | codes/datasets/MVM3D_loader.py | ZichengDuan/MVM3D | 5242fa05afb6bff097908c88a8ef0fd9bc4a1fc5 | [
"MIT"
] | 21 | 2021-09-14T19:11:29.000Z | 2022-02-05T05:58:32.000Z | codes/datasets/MVM3D_loader.py | Robert-Mar/MVM3D | b62c96de5894ae5fef615e2ee54fe975248a3df7 | [
"MIT"
] | 1 | 2021-11-25T08:56:32.000Z | 2021-12-04T07:40:23.000Z | codes/datasets/MVM3D_loader.py | Robert-Mar/MVM3D | b62c96de5894ae5fef615e2ee54fe975248a3df7 | [
"MIT"
] | 2 | 2021-09-13T04:07:10.000Z | 2021-09-14T09:15:52.000Z | import json
from PIL import Image
import torch
from torchvision.transforms import ToTensor
from codes.datasets.MVM3D import *
import warnings
from codes.EX_CONST import Const
warnings.filterwarnings("ignore")
class MVM3D_loader(VisionDataset):
def __init__(self, base, train=True, transform=ToTensor(), target_transform=ToTensor(),
reID=False, grid_reduce=Const.reduce, img_reduce=Const.reduce):
super().__init__(base.root, transform=transform, target_transform=target_transform)
self.reID, self.grid_reduce, self.img_reduce = reID, grid_reduce, img_reduce
self.base = base
self.train = train
self.root, self.num_cam = base.root, base.num_cam
self.img_shape, self.worldgrid_shape = base.img_shape, base.worldgrid_shape # H,W; N_row,N_col
self.reducedgrid_shape = list(map(lambda x: int(x / self.grid_reduce), self.worldgrid_shape))
self.extrinsic_matrix = base.extrinsic_matrices
self.intrinsic_matrix = base.intrinsic_matrices
# split the dataset according to the
if train == 1:
frame_range = list(range(0, 1800)) + list(range(2100, 3500)) + list(range(3600, 4330))
elif train == 3:
frame_range = list (range(2000, 2100)) + list(range(3500, 3600))
elif train == 2:
frame_range = list(range(1800, 2100)) + list(range(3500, 3600))
elif train == 4:
frame_range = list(range(0, 1625))
self.upsample_shape = list(map(lambda x: int(x / self.img_reduce), self.img_shape))
img_reduce_local = np.array(self.img_shape) / np.array(self.upsample_shape)
imgcoord2worldgrid_matrices = get_imgcoord2worldgrid_matrices(base.intrinsic_matrices,
base.extrinsic_matrices,
base.worldgrid2worldcoord_mat)
img_zoom_mat = np.diag(np.append(img_reduce_local, [1]))
map_zoom_mat = np.diag(np.append(np.ones([2]) / self.grid_reduce, [1]))
self.proj_mats = [torch.from_numpy(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)
for cam in range(2)]
# create angle bins
bins = Const.bins
overlap = 0.1
self.bins = bins
self.angle_bins = np.zeros(bins)
self.interval = 2 * np.pi / bins
for i in range(1, bins):
self.angle_bins[i] = i * self.interval
self.angle_bins += self.interval / 2 # center of the bin
self.overlap = overlap
# ranges for confidence
self.bin_ranges = []
for i in range(0, bins):
self.bin_ranges.append(((i * self.interval - overlap) % (2 * np.pi), \
(i * self.interval + self.interval + overlap) % (2 * np.pi)))
self.bev_bboxes = {}
self.left_bboxes = {}
self.right_bboxes = {}
self.left_dir = {}
self.right_dir = {}
self.left_angle = {}
self.right_angle = {}
self.left_orientation = {}
self.left_conf = {}
self.right_orientation = {}
self.right_conf = {}
self.world_xy = {}
self.bev_angle = {}
self.mark = {}
self.img_fpaths = self.base.get_image_fpaths(frame_range)
if train:
self.gt_fpath = os.path.join(self.root, 'res/train_gt.txt')
else:
self.gt_fpath = os.path.join(self.root, 'res/test_gt.txt')
self.prepare_gt(frame_range)
self.prepare_bbox(frame_range)
self.prepare_dir(frame_range)
self.prepare_bins(frame_range)
def get_bin(self, angle):
bin_idxs = []
def is_between(min, max, angle):
max = (max - min) if (max - min) > 0 else (max - min) + 2*np.pi
angle = (angle - min) if (angle - min) > 0 else (angle - min) + 2*np.pi
return angle < max
for bin_idx, bin_range in enumerate(self.bin_ranges):
if is_between(bin_range[0], bin_range[1], angle):
bin_idxs.append(bin_idx)
return bin_idxs
def prepare_bins(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_left_dir = []
frame_right_dir = []
frame_left_ang = []
frame_right_ang = []
frame_wxy = []
frame_bev_angle = []
frame_left_orientation = []
frame_left_conf = []
frame_right_orientation = []
frame_right_conf = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
wx = int(car["wx"]) // 10
wy = int(car["wy"]) // 10
mk = int(car["mark"])
# left_dir = int(car["direc_left"])
# right_dir = int(car["direc_right"])
left_dir = 0
right_dir = 0
bev_angle = float(car["angle"])
frame_wxy.append([wx, wy])
if Const.roi_classes != 1:
frame_left_dir.append(left_dir)
frame_right_dir.append(right_dir)
else:
frame_left_dir.append(0)
frame_right_dir.append(0)
# 0~360
if bev_angle < 0:
bev_angle += 2 * np.pi
# 左角度标签
alpha = np.arctan((Const.grid_height - wy) / wx)
left_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# if frame in range(500, 600) and i == 2:
# print(wx, wy)
# print(np.rad2deg(bev_angle))
# print(np.rad2deg(alpha))
# print(np.rad2deg(left_target))
# print(np.arctan(np.sin(left_target) / np.cos(left_target)))
# frame_left_ang.append([np.sin(left_target), np.cos(left_target)]) # 方案1, 回归sin cos
left_orientation = np.zeros((self.bins, 2))
left_confidence = np.zeros(self.bins)
left_bin_idxs = self.get_bin(left_target)
for bin_idx in left_bin_idxs:
angle_diff = left_target - self.angle_bins[bin_idx]
left_orientation[bin_idx, :] = np.array([np.cos(angle_diff), np.sin(angle_diff)])
left_confidence[bin_idx] = 1
# print("left conf", left_confidence)
frame_left_orientation.append(left_orientation)
frame_left_conf.append(left_confidence)
# 右角度标签, 颠倒一下正方向
bev_angle -= np.pi
if bev_angle < 0:
bev_angle += 2 * np.pi
frame_bev_angle.append(bev_angle)
alpha = np.arctan(wy / (Const.grid_width - wx))
right_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# frame_right_ang.append([np.sin(right_target), np.cos(right_target)]) # 方案1, 回归sin cos
right_orientation = np.zeros((self.bins, 2))
right_confidence = np.zeros(self.bins)
right_bin_idxs = self.get_bin(right_target)
for bin_idx in right_bin_idxs:
angle_diff = right_target - self.angle_bins[bin_idx]
right_orientation[bin_idx, :] = np.array([np.cos(angle_diff), np.sin(angle_diff)])
right_confidence[bin_idx] = 1
# print("right conf", right_confidence)
frame_right_orientation.append(right_orientation)
frame_right_conf.append(right_confidence)
# print(frame_left_orientation)
self.left_orientation[frame] = frame_left_orientation
self.left_conf[frame] = frame_left_conf
self.right_orientation[frame] = frame_right_orientation
self.right_conf[frame] = frame_right_conf
def prepare_gt(self,frame_range):
og_gt = []
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
all_pedestrians = [json.load(json_file)][0]
for single_pedestrian in all_pedestrians:
def is_in_cam(cam):
return not (single_pedestrian['views'][cam]['xmin'] == -1 and
single_pedestrian['views'][cam]['xmax'] == -1 and
single_pedestrian['views'][cam]['ymin'] == -1 and
single_pedestrian['views'][cam]['ymax'] == -1)
in_cam_range = sum(is_in_cam(cam) for cam in range(self.num_cam))
if not in_cam_range:
continue
wx = single_pedestrian['wx']
wy = single_pedestrian['wy']
if wx > Const.grid_width * 10:
wx = Const.grid_width * 10 - 1
if wy > Const.grid_height * 10:
wy = Const.grid_height * 10 - 1
grid_x, grid_y= [wx //10, wy//10]
og_gt.append(np.array([frame, grid_x, grid_y]))
og_gt = np.stack(og_gt, axis=0)
os.makedirs(os.path.dirname(self.gt_fpath), exist_ok=True)
print(self.gt_fpath)
np.savetxt(self.gt_fpath, og_gt, '%d')
def prepare_bbox(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_bev_box = []
frame_left_box = []
frame_right_box = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
ymin_od = int(car["ymin_od"])
xmin_od = int(car["xmin_od"])
ymax_od = int(car["ymax_od"])
xmax_od = int(car["xmax_od"])
frame_bev_box.append([ymin_od, xmin_od, ymax_od, xmax_od])
for j in range(self.num_cam):
ymin = car["views"][j]["ymin"]
xmin = car["views"][j]["xmin"]
ymax = car["views"][j]["ymax"]
xmax = car["views"][j]["xmax"]
if j == 0:
frame_left_box.append([ymin, xmin, ymax, xmax])
else:
frame_right_box.append([ymin, xmin, ymax, xmax])
self.bev_bboxes[frame] = frame_bev_box
self.left_bboxes[frame] = frame_left_box
self.right_bboxes[frame] = frame_right_box
def prepare_dir(self, frame_range):
for fname in sorted(os.listdir(os.path.join(self.root, 'annotations'))):
frame_left_dir = []
frame_right_dir = []
frame_left_ang = []
frame_right_ang = []
frame_wxy = []
frame_bev_angle = []
frame = int(fname.split('.')[0])
if frame in frame_range:
with open(os.path.join(self.root, 'annotations', fname)) as json_file:
cars = [json.load(json_file)][0]
for i, car in enumerate(cars):
wx = int(car["wx"]) // 10
wy = int(car["wy"]) // 10
mk = int(car["mark"])
# left_dir = int(car["direc_left"])
# right_dir = int(car["direc_right"])
left_dir = 0
right_dir = 0
bev_angle = float(car["angle"])
frame_wxy.append([wx, wy])
if Const.roi_classes != 1:
frame_left_dir.append(left_dir)
frame_right_dir.append(right_dir)
else:
frame_left_dir.append(0)
frame_right_dir.append(0)
# 0~360
if bev_angle < 0:
bev_angle += 2 * np.pi
# 左角度标签
alpha = np.arctan((Const.grid_height - wy) / wx)
left_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
# if frame in range(500, 600) and i == 2:
# print(wx, wy)
# print(np.rad2deg(bev_angle))
# print(np.rad2deg(alpha))
# print(np.rad2deg(left_target))
# print(np.arctan(np.sin(left_target) / np.cos(left_target)))
frame_left_ang.append([np.sin(left_target), np.cos(left_target)]) # 方案1, 回归sin cos
# 右角度标签, 颠倒一下正方向
bev_angle -= np.pi
if bev_angle < 0:
bev_angle += 2 * np.pi
frame_bev_angle.append(bev_angle)
alpha = np.arctan(wy / (Const.grid_width - wx))
right_target = bev_angle - alpha if bev_angle - alpha > 0 else 2 * np.pi + (bev_angle - alpha)
frame_right_ang.append([np.sin(right_target), np.cos(right_target)]) # 方案1, 回归sin cos
self.world_xy[frame] = frame_wxy
self.left_dir[frame] = frame_left_dir
self.right_dir[frame] = frame_right_dir
self.bev_angle[frame] = frame_bev_angle
self.left_angle[frame] = frame_left_ang
self.right_angle[frame] = frame_right_ang
self.mark[frame] = mk
def __getitem__(self, index):
frame = list(self.bev_bboxes.keys())[index]
imgs = []
for cam in range(self.num_cam):
fpath = self.img_fpaths[cam][frame]
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
imgs.append(img)
imgs = torch.stack(imgs)
bev_bboxes = torch.tensor(self.bev_bboxes[frame])
left_bboxes = torch.tensor(self.left_bboxes[frame])
right_bboxes = torch.tensor(self.right_bboxes[frame])
left_dirs = torch.tensor(self.left_dir[frame])
right_dirs = torch.tensor(self.right_dir[frame])
left_angles = torch.tensor(self.left_angle[frame])
right_angles = torch.tensor(self.right_angle[frame])
bev_xy =torch.tensor(self.world_xy[frame])
bev_angle = torch.tensor(self.bev_angle[frame])
mark = self.mark[frame]
left_orientation = torch.tensor(self.left_orientation[frame])
left_conf = torch.tensor(self.left_conf[frame])
right_orientation = torch.tensor(self.right_orientation[frame])
right_conf = torch.tensor(self.right_conf[frame])
return imgs, bev_xy, bev_angle, bev_bboxes, \
left_bboxes, right_bboxes,\
left_dirs, right_dirs, \
left_angles, right_angles, \
left_orientation, right_orientation, \
left_conf, right_conf, \
frame, \
self.extrinsic_matrix, self.intrinsic_matrix, \
mark
def __len__(self):
return len(self.bev_bboxes.keys())
def get_imgcoord2worldgrid_matrices(intrinsic_matrices, extrinsic_matrices, worldgrid2worldcoord_mat):
projection_matrices = {}
for cam in range(2):
worldcoord2imgcoord_mat = intrinsic_matrices[cam] @ np.delete(extrinsic_matrices[cam], 2, 1)
worldgrid2imgcoord_mat = worldcoord2imgcoord_mat @ worldgrid2worldcoord_mat
imgcoord2worldgrid_mat = np.linalg.inv(worldgrid2imgcoord_mat)
permutation_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
projection_matrices[cam] = permutation_mat @ imgcoord2worldgrid_mat
return projection_matrices
if __name__ == "__main__":
data_path = os.path.expanduser('/home/dzc/Data/4carreal_0318blend')
world_shape = Const.grid_size
base = Robomaster_1_dataset(data_path, None, worldgrid_shape = world_shape)
dataset = oftFrameDataset(base)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
num_workers=8, pin_memory=True, drop_last=True)
left_result = np.zeros((36,))
right_result = np.zeros((36,))
for batch_idx, data in enumerate(data_loader):
# print(batch_idx)
imgs, bev_xy, bev_angle, gt_bbox, gt_left_bbox, gt_right_bbox, left_dirs, right_dirs, left_sincos, right_sincos, frame, extrin, intrin = data
for i in range(4):
sin = left_sincos.squeeze()[i, 0]
cos = left_sincos.squeeze()[i, 1]
angle = np.arctan(sin / cos)
if (sin > 0 and cos < 0) or (sin < 0 and cos < 0):
angle += np.pi
if sin < 0 and cos > 0:
angle += np.pi * 2
angle = np.rad2deg(angle)
left_result[int(angle.item() // 10)] += 1
if frame in range(600, 700) and i == 0:
print("------------------")
print(frame)
print(angle.item())
sin = right_sincos.squeeze()[i, 0]
cos = right_sincos.squeeze()[i, 1]
angle = np.arctan(sin / cos)
if (sin > 0 and cos < 0) or (sin < 0 and cos < 0):
angle += np.pi
if sin < 0 and cos > 0:
angle += np.pi * 2
angle = np.rad2deg(angle)
right_result[int(angle.item() // 10)] += 1
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
X = np.arange(0, 36)
Y = left_result
fig = plt.figure()
plt.bar(X, Y, 0.4, color="green")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("left")
# plt.show()
# plt.savefig("/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/left_result.jpg")
X = np.arange(0, 36)
Y = right_result
fig = plt.figure()
plt.bar(X, Y, 0.4, color="green")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("right")
# plt.show()
# plt.savefig("/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/right_result.jpg")
| 41.235421 | 149 | 0.532317 |
841be38c5433212520c8969e101fb4e3711b76fa | 8,331 | py | Python | examples/CASAS_Single_Test/b1_lstm_raw.py | TinghuiWang/pyActLearn | d858136e86324fac51b0943765ef60bd405e31d1 | [
"BSD-3-Clause"
] | 3 | 2017-03-15T03:42:57.000Z | 2020-01-19T15:47:12.000Z | examples/CASAS_Single_Test/b1_lstm_raw.py | TinghuiWang/pyActLearn | d858136e86324fac51b0943765ef60bd405e31d1 | [
"BSD-3-Clause"
] | 2 | 2019-02-04T15:31:49.000Z | 2020-01-26T17:49:22.000Z | examples/CASAS_Single_Test/b1_lstm_raw.py | TinghuiWang/pyActLearn | d858136e86324fac51b0943765ef60bd405e31d1 | [
"BSD-3-Clause"
] | 3 | 2019-02-02T19:36:17.000Z | 2021-01-02T15:42:43.000Z | import os
import pickle
import logging
import argparse
import numpy as np
import tensorflow as tf
from datetime import datetime
from pyActLearn.CASAS.data import CASASData
from pyActLearn.CASAS.fuel import CASASFuel
from pyActLearn.learning.nn.lstm import LSTM
from pyActLearn.performance.record import LearningResult
from pyActLearn.performance import get_confusion_matrix
logger = logging.getLogger(__file__)
def training_and_test(token, train_data, test_data, num_classes, result, model, log_dir):
"""Train and test
Args:
token (:obj:`str`): token representing this run
train_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of training feature and label
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
train_y = np.zeros((train_data[1].shape[0], num_classes))
test_y = np.zeros((test_data[1].shape[0], num_classes))
for i in range(train_data[1].shape[0]):
train_y[i, train_data[1].flatten()[i]] = 1
for i in range(test_data[1].shape[0]):
test_y[i, test_data[1].flatten()[i]] = 1
model.fit(train_data[0], train_y, iter_num=8000, batch_size=100, criterion='monitor_based',
summaries_dir=log_dir, test_x=test_data[0], test_y=test_y,
summary_interval=100)
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
# Evaluate the Test and Store Result
confusion_matrix = get_confusion_matrix(num_classes=num_classes,
label=test_data[1][model.num_steps:].flatten(),
predicted=predicted_y)
variable_file = os.path.join(log_dir, token + '_save.ckpt')
saver.save(model.sess, variable_file)
result.add_record(variable_file, key=token, confusion_matrix=confusion_matrix)
return predicted_y, predicted_proba
def load_and_test(token, test_data, num_classes, result, model):
"""Load and test
Args:
token (:obj:`str`): token representing this run
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
saver.restore(model.sess, result.get_record_by_key(token)['model'])
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
return predicted_y, predicted_proba
if __name__ == '__main__':
args_ok = False
parser = argparse.ArgumentParser(description='Run LSTM on single resident CASAS datasets.')
parser.add_argument('-d', '--dataset', help='Directory to original datasets')
parser.add_argument('-o', '--output', help='Output folder')
parser.add_argument('--week', type=int, metavar='N', help='Train on week N-1 and run on week N')
parser.add_argument('--h5py', help='HDF5 dataset folder')
args = parser.parse_args()
# Default parameters
log_filename = os.path.basename(__file__).split('.')[0] + \
'-%s.log' % datetime.now().strftime('%y%m%d_%H:%M:%S')
# Setup output directory
output_dir = args.output
if output_dir is not None:
output_dir = os.path.abspath(os.path.expanduser(output_dir))
if os.path.exists(output_dir):
# Found output_dir, check if it is a directory
if not os.path.isdir(output_dir):
exit('Output directory %s is found, but not a directory. Abort.' % output_dir)
else:
# Create directory
os.makedirs(output_dir)
else:
output_dir = '.'
log_filename = os.path.join(output_dir, log_filename)
# Setup Logging as early as possible
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(name)s:%(levelname)s:%(message)s',
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
# If dataset is specified, update h5py
casas_data_dir = args.dataset
if casas_data_dir is not None:
casas_data_dir = os.path.abspath(os.path.expanduser(casas_data_dir))
if not os.path.isdir(casas_data_dir):
exit('CASAS dataset at %s does not exist. Abort.' % casas_data_dir)
# Find h5py dataset first
h5py_dir = args.h5py
if h5py_dir is not None:
h5py_dir = os.path.abspath(os.path.expanduser(h5py_dir))
else:
# Default location
h5py_dir = os.path.join(output_dir, 'h5py')
if os.path.exists(h5py_dir):
if not os.path.isdir(h5py_dir):
exit('h5py dataset location %s is not a directory. Abort.' % h5py_dir)
if not CASASFuel.files_exist(h5py_dir):
# Finish check and creating all directory needed - now load datasets
if casas_data_dir is not None:
casas_data = CASASData(path=casas_data_dir)
casas_data.summary()
# SVM needs to use statistical feature with per-sensor and normalization
casas_data.populate_feature(method='raw', normalized=True, per_sensor=True)
casas_data.export_hdf5(h5py_dir)
casas_fuel = CASASFuel(dir_name=h5py_dir)
# Prepare learning result
result_pkl_file = os.path.join(output_dir, 'result.pkl')
result = None
if os.path.isfile(result_pkl_file):
f = open(result_pkl_file, 'rb')
result = pickle.load(f)
f.close()
if result.data != h5py_dir:
logger.error('Result pickle file found for different dataset %s' % result.data)
exit('Cannot save learning result at %s' % result_pkl_file)
else:
result = LearningResult(name='LSTM', data=h5py_dir, mode='by_week')
num_classes = casas_fuel.get_output_dims()
# Open Fuel and get all splits
split_list = casas_fuel.get_set_list()
# If week is specified
if args.week is not None:
if 0 < args.week < len(split_list):
split_list = [split_list[args.week - 1], split_list[args.week]]
# Start training
train_names = ('week 24', 'week 23', 'week 22', 'week 21')
test_names = ('week 25', 'week 26', 'week 27', 'week 28')
test_name = 'single_test'
train_set = casas_fuel.get_dataset(train_names, load_in_memory=True)
(train_set_data) = train_set.data_sources
test_set = casas_fuel.get_dataset(test_names, load_in_memory=True)
(test_set_data) = test_set.data_sources
# Prepare Back Annotation
fp_back_annotated = open(os.path.join(output_dir, 'back_annotated.txt'), 'w')
fp_back_probability = open(os.path.join(output_dir, 'back_annotated_proba.txt'), 'w')
output_log_dir = os.path.join(output_dir, 'log')
if not os.path.isdir(output_log_dir):
os.makedirs(output_log_dir)
model = LSTM(casas_fuel.get_input_dims(), casas_fuel.get_output_dims(), num_units=200, num_steps=100)
saver = tf.train.Saver(max_to_keep=len(split_list))
session = tf.Session()
model.sess = session
log_dir = output_log_dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# run svm
logger.info('Training on %s, Testing on %s' % (str(train_names), str(test_names)))
if result.get_record_by_key(test_name) is None:
prediction, prediction_proba = training_and_test(test_name, train_set_data, test_set_data, num_classes,
result, model=model, log_dir=log_dir)
else:
prediction, prediction_proba = load_and_test(test_name, test_set_data, num_classes, result, model=model)
casas_fuel.back_annotate(fp_back_annotated, prediction=prediction, split_name=test_names)
casas_fuel.back_annotate_with_proba(fp_back_probability, prediction_proba, split_name=test_names)
train_name = test_name
train_set_data = test_set_data
fp_back_annotated.close()
fp_back_probability.close()
f = open(result_pkl_file, 'wb')
pickle.dump(obj=result, file=f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
result.export_to_xlsx(os.path.join(output_dir, 'result.xlsx'))
| 47.067797 | 115 | 0.676269 |
014d547a123abf9b6488a703e8ea8c6a4ea903a1 | 8,995 | py | Python | pythonlib/amlrealtimeai/external/tensorflow_serving/apis/classification_pb2.py | mialiu149/aml-real-time-ai | 52bded7018df732932d0b9f20f0bf96470166d54 | [
"MIT"
] | 148 | 2018-05-07T16:14:37.000Z | 2022-02-19T17:13:06.000Z | pythonlib/amlrealtimeai/external/tensorflow_serving/apis/classification_pb2.py | rawnc/aml-real-time-ai | 52bded7018df732932d0b9f20f0bf96470166d54 | [
"MIT"
] | 6 | 2018-06-24T23:14:27.000Z | 2019-05-29T12:09:09.000Z | pythonlib/amlrealtimeai/external/tensorflow_serving/apis/classification_pb2.py | rawnc/aml-real-time-ai | 52bded7018df732932d0b9f20f0bf96470166d54 | [
"MIT"
] | 44 | 2018-05-08T23:32:22.000Z | 2022-03-29T21:30:50.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/classification.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ..apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2
from ..apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/classification.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n,tensorflow_serving/apis/classification.proto\x12\x12tensorflow.serving\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto\"%\n\x05\x43lass\x12\r\n\x05label\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\"=\n\x0f\x43lassifications\x12*\n\x07\x63lasses\x18\x01 \x03(\x0b\x32\x19.tensorflow.serving.Class\"T\n\x14\x43lassificationResult\x12<\n\x0f\x63lassifications\x18\x01 \x03(\x0b\x32#.tensorflow.serving.Classifications\"t\n\x15\x43lassificationRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input\"R\n\x16\x43lassificationResponse\x12\x38\n\x06result\x18\x01 \x01(\x0b\x32(.tensorflow.serving.ClassificationResultB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CLASS = _descriptor.Descriptor(
name='Class',
full_name='tensorflow.serving.Class',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='tensorflow.serving.Class.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='tensorflow.serving.Class.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=179,
)
_CLASSIFICATIONS = _descriptor.Descriptor(
name='Classifications',
full_name='tensorflow.serving.Classifications',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='classes', full_name='tensorflow.serving.Classifications.classes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=181,
serialized_end=242,
)
_CLASSIFICATIONRESULT = _descriptor.Descriptor(
name='ClassificationResult',
full_name='tensorflow.serving.ClassificationResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='classifications', full_name='tensorflow.serving.ClassificationResult.classifications', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=328,
)
_CLASSIFICATIONREQUEST = _descriptor.Descriptor(
name='ClassificationRequest',
full_name='tensorflow.serving.ClassificationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.ClassificationRequest.model_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow.serving.ClassificationRequest.input', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=446,
)
_CLASSIFICATIONRESPONSE = _descriptor.Descriptor(
name='ClassificationResponse',
full_name='tensorflow.serving.ClassificationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='tensorflow.serving.ClassificationResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=448,
serialized_end=530,
)
_CLASSIFICATIONS.fields_by_name['classes'].message_type = _CLASS
_CLASSIFICATIONRESULT.fields_by_name['classifications'].message_type = _CLASSIFICATIONS
_CLASSIFICATIONREQUEST.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_CLASSIFICATIONREQUEST.fields_by_name['input'].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT
_CLASSIFICATIONRESPONSE.fields_by_name['result'].message_type = _CLASSIFICATIONRESULT
DESCRIPTOR.message_types_by_name['Class'] = _CLASS
DESCRIPTOR.message_types_by_name['Classifications'] = _CLASSIFICATIONS
DESCRIPTOR.message_types_by_name['ClassificationResult'] = _CLASSIFICATIONRESULT
DESCRIPTOR.message_types_by_name['ClassificationRequest'] = _CLASSIFICATIONREQUEST
DESCRIPTOR.message_types_by_name['ClassificationResponse'] = _CLASSIFICATIONRESPONSE
Class = _reflection.GeneratedProtocolMessageType('Class', (_message.Message,), dict(
DESCRIPTOR = _CLASS,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Class)
))
_sym_db.RegisterMessage(Class)
Classifications = _reflection.GeneratedProtocolMessageType('Classifications', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONS,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Classifications)
))
_sym_db.RegisterMessage(Classifications)
ClassificationResult = _reflection.GeneratedProtocolMessageType('ClassificationResult', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONRESULT,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResult)
))
_sym_db.RegisterMessage(ClassificationResult)
ClassificationRequest = _reflection.GeneratedProtocolMessageType('ClassificationRequest', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONREQUEST,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationRequest)
))
_sym_db.RegisterMessage(ClassificationRequest)
ClassificationResponse = _reflection.GeneratedProtocolMessageType('ClassificationResponse', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONRESPONSE,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResponse)
))
_sym_db.RegisterMessage(ClassificationResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
| 35.98 | 803 | 0.773874 |
8387c8977ad60002f7e8a1736055cd3bfa576bd0 | 15,063 | py | Python | examples/seq2seq/seq2seq_chainerio.py | msakai/pfio | 29e5c6d6b15d33c7e7e468b42c58d24dae3d8cad | [
"MIT"
] | null | null | null | examples/seq2seq/seq2seq_chainerio.py | msakai/pfio | 29e5c6d6b15d33c7e7e468b42c58d24dae3d8cad | [
"MIT"
] | null | null | null | examples/seq2seq/seq2seq_chainerio.py | msakai/pfio | 29e5c6d6b15d33c7e7e468b42c58d24dae3d8cad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import datetime
from nltk.translate import bleu_score
import numpy
import progressbar
import six
import chainer
from chainer.backends import cuda
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
# PFIO import
import pfio
# pfio.set_root("hdfs")
# PFIO import end
UNK = 0
EOS = 1
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0)
return exs
class Seq2seq(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Seq2seq, self).__init__()
with self.init_scope():
self.embed_x = L.EmbedID(n_source_vocab, n_units)
self.embed_y = L.EmbedID(n_target_vocab, n_units)
self.encoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.decoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.W = L.Linear(n_units, n_target_vocab)
self.n_layers = n_layers
self.n_units = n_units
def forward(self, xs, ys):
xs = [x[::-1] for x in xs]
eos = self.xp.array([EOS], numpy.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# Both xs and ys_in are lists of arrays.
exs = sequence_embed(self.embed_x, xs)
eys = sequence_embed(self.embed_y, ys_in)
batch = len(xs)
# None represents a zero vector in an encoder.
hx, cx, _ = self.encoder(None, None, exs)
_, _, os = self.decoder(hx, cx, eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
chainer.report({'loss': loss.data}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.data * batch / n_words)
chainer.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
h, c, _ = self.encoder(None, None, exs)
ys = self.xp.full(batch, EOS, numpy.int32)
result = []
for i in range(max_length):
eys = self.embed_y(ys)
eys = F.split_axis(eys, batch, 0)
h, c, ys = self.decoder(h, c, eys)
cys = F.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(numpy.int32)
result.append(ys)
# Using `xp.concatenate(...)` instead of `xp.stack(result)` here to
# support NumPy 1.9.
result = cuda.to_cpu(
self.xp.concatenate([self.xp.expand_dims(x, 0) for x in result]).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == EOS)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum([len(x)
for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
return {'xs': to_device_batch([x for x, _ in batch]),
'ys': to_device_batch([y for _, y in batch])}
class CalculateBleu(chainer.training.Extension):
trigger = 1, 'epoch'
priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, batch=100, device=-1, max_length=100):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def forward(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
chainer.report({self.key: bleu})
def count_lines(path):
# PFIO add
with pfio.open(path, mode='r') as f:
# PFIO add end
return sum([1 for _ in f])
def load_vocabulary(path):
# PFIO add
with pfio.open(path, mode='r') as f:
# PFIO add end
# +2 for UNK and EOS
word_ids = {line.strip(): i + 2 for i, line in enumerate(f)}
word_ids['<UNK>'] = 0
word_ids['<EOS>'] = 1
return word_ids
def load_data(vocabulary, path):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
data = []
print('loading...: %s' % path)
# PFIO add
with pfio.open(path, mode='r') as f:
# PFIO add end
for line in bar(f, max_value=n_lines):
words = line.strip().split()
array = numpy.array([vocabulary.get(w, UNK)
for w in words], numpy.int32)
data.append(array)
return data
def load_data_using_dataset_api(
src_vocab, src_path, target_vocab, target_path, filter_func):
def _transform_line(vocabulary, line):
words = line.strip().split()
return numpy.array(
[vocabulary.get(w, UNK) for w in words], numpy.int32)
def _transform(example):
source, target = example
return (
_transform_line(src_vocab, source),
_transform_line(target_vocab, target)
)
return chainer.datasets.TransformDataset(
chainer.datasets.TextDataset(
[src_path, target_path],
encoding='utf-8',
filter_func=filter_func
), _transform)
def calculate_unknown_ratio(data):
unknown = sum((s == UNK).sum() for s in data)
total = sum(s.size for s in data)
return unknown / total
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('SOURCE', help='source sentence list')
parser.add_argument('TARGET', help='target sentence list')
parser.add_argument('SOURCE_VOCAB', help='source vocabulary file')
parser.add_argument('TARGET_VOCAB', help='target vocabulary file')
parser.add_argument('--validation-source',
help='source sentence list for validation')
parser.add_argument('--validation-target',
help='target sentence list for validation')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='number of sentence pairs in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--resume', '-r', default='',
help='resume the training from snapshot')
parser.add_argument('--save', '-s', default='',
help='save a snapshot of the training')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='number of units')
parser.add_argument('--layer', '-l', type=int, default=3,
help='number of layers')
parser.add_argument('--use-dataset-api', default=False,
action='store_true',
help='use TextDataset API to reduce CPU memory usage')
parser.add_argument('--min-source-sentence', type=int, default=1,
help='minimium length of source sentence')
parser.add_argument('--max-source-sentence', type=int, default=50,
help='maximum length of source sentence')
parser.add_argument('--min-target-sentence', type=int, default=1,
help='minimium length of target sentence')
parser.add_argument('--max-target-sentence', type=int, default=50,
help='maximum length of target sentence')
parser.add_argument('--log-interval', type=int, default=200,
help='number of iteration to show log')
parser.add_argument('--validation-interval', type=int, default=4000,
help='number of iteration to evlauate the model '
'with validation dataset')
parser.add_argument('--out', '-o', default='result',
help='directory to output the result')
args = parser.parse_args()
# Load pre-processed dataset
print('[{}] Loading dataset... (this may take several minutes)'.format(
datetime.datetime.now()))
source_ids = load_vocabulary(args.SOURCE_VOCAB)
target_ids = load_vocabulary(args.TARGET_VOCAB)
if args.use_dataset_api:
# By using TextDataset, you can avoid loading whole dataset on memory.
# This significantly reduces the host memory usage.
def _filter_func(s, t):
sl = len(s.strip().split()) # number of words in source line
tl = len(t.strip().split()) # number of words in target line
return (
args.min_source_sentence <= sl <= args.max_source_sentence and
args.min_target_sentence <= tl <= args.max_target_sentence)
train_data = load_data_using_dataset_api(
source_ids, args.SOURCE,
target_ids, args.TARGET,
_filter_func,
)
else:
# Load all records on memory.
train_source = load_data(source_ids, args.SOURCE)
train_target = load_data(target_ids, args.TARGET)
assert len(train_source) == len(train_target)
train_data = [
(s, t)
for s, t in six.moves.zip(train_source, train_target)
if (args.min_source_sentence <= len(s) <= args.max_source_sentence
and
args.min_target_sentence <= len(t) <= args.max_target_sentence)
]
print('[{}] Dataset loaded.'.format(datetime.datetime.now()))
if not args.use_dataset_api:
# Skip printing statistics when using TextDataset API, as it is slow.
train_source_unknown = calculate_unknown_ratio(
[s for s, _ in train_data])
train_target_unknown = calculate_unknown_ratio(
[t for _, t in train_data])
print('Source vocabulary size: %d' % len(source_ids))
print('Target vocabulary size: %d' % len(target_ids))
print('Train data size: %d' % len(train_data))
print('Train source unknown ratio: %.2f%%' % (
train_source_unknown * 100))
print('Train target unknown ratio: %.2f%%' % (
train_target_unknown * 100))
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
# Setup model
model = Seq2seq(args.layer, len(source_ids), len(target_ids), args.unit)
if args.gpu >= 0:
chainer.backends.cuda.get_device(args.gpu).use()
model.to_gpu(args.gpu)
# Setup optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Setup iterator
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
# Setup updater and trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.LogReport(
trigger=(args.log_interval, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/perp', 'validation/main/perp', 'validation/main/bleu',
'elapsed_time']),
trigger=(args.log_interval, 'iteration'))
if args.validation_source and args.validation_target:
test_source = load_data(source_ids, args.validation_source)
test_target = load_data(target_ids, args.validation_target)
assert len(test_source) == len(test_target)
test_data = list(six.moves.zip(test_source, test_target))
test_data = [(s, t) for s, t in test_data if 0 < len(s) and 0 < len(t)]
test_source_unknown = calculate_unknown_ratio(
[s for s, _ in test_data])
test_target_unknown = calculate_unknown_ratio(
[t for _, t in test_data])
print('Validation data: %d' % len(test_data))
print('Validation source unknown ratio: %.2f%%' %
(test_source_unknown * 100))
print('Validation target unknown ratio: %.2f%%' %
(test_target_unknown * 100))
@chainer.training.make_extension()
def translate(trainer):
source, target = test_data[numpy.random.choice(len(test_data))]
result = model.translate([model.xp.array(source)])[0]
source_sentence = ' '.join([source_words[x] for x in source])
target_sentence = ' '.join([target_words[y] for y in target])
result_sentence = ' '.join([target_words[y] for y in result])
print('# source : ' + source_sentence)
print('# result : ' + result_sentence)
print('# expect : ' + target_sentence)
trainer.extend(
translate, trigger=(args.validation_interval, 'iteration'))
trainer.extend(
CalculateBleu(
model, test_data, 'validation/main/bleu', device=args.gpu),
trigger=(args.validation_interval, 'iteration'))
print('start training')
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if args.save:
# Save a snapshot
chainer.serializers.save_npz(args.save, trainer)
if __name__ == '__main__':
main()
| 37.563591 | 79 | 0.599681 |
feadf7b861a22f26502d6baa25411397d1963c2b | 7,580 | py | Python | ProjectCode/Classifier.py | MOWaqar/MSc-Project | 14464da68cc112038714eb5c71a29e2a386bce22 | [
"MIT"
] | null | null | null | ProjectCode/Classifier.py | MOWaqar/MSc-Project | 14464da68cc112038714eb5c71a29e2a386bce22 | [
"MIT"
] | null | null | null | ProjectCode/Classifier.py | MOWaqar/MSc-Project | 14464da68cc112038714eb5c71a29e2a386bce22 | [
"MIT"
] | null | null | null | """
AUTHOR: M O WAQAR 20/08/2018
This script runs three classifiers
1. Logistic Regression
2. SGD(LinearSVC)
3. LightGBM
They are run on the 17 datasats prepared with DataPreperation.py script.
****NOTE: If you have not executed 'DataPreperation.py' run that first and wait for it to finish before running this script.
********OUTPUT of this script is in the console in following format:
LR Results:
Classifier could not run on the data set. Put X instead of score.
Classifier could not run on the data set. Put X instead of score.
Classifier could not run on the data set. Put X instead of score.
File: DataSetVersion2_a.csv, AUC score : 0.607
File: DataSetVersion2_b.csv, AUC score : 0.599
.
.
.
Logistic Regression - done in 499s
LinearSVC Results:
Classifier could not run on the data set. Put X instead of score.
Classifier could not run on the data set. Put X instead of score.
Classifier could not run on the data set. Put X instead of score.
File: DataSetVersion2_a.csv, AUC score : 0.545
File: DataSetVersion2_b.csv, AUC score : 0.534
.
.
.
Linear SVC - done in 12653s
LightGBM Results:
File: DataSetVersion1_a.csv, AUC score : 0.750
File: DataSetVersion1_b.csv, AUC score : 0.757
File: DataSetVersion1_c.csv, AUC score : 0.758
.
.
.
"""
#Import Packages
import time
from contextlib import contextmanager
import gc
import UtilityFunctions as utils
import os
# numpy and pandas for data manipulation
import numpy as np
import pandas as pd
# Suppress warnings
import warnings
warnings.filterwarnings('ignore')
#Logistic regression
from sklearn.linear_model import LogisticRegression as LR
#SGD Classifier
from sklearn.linear_model import SGDClassifier
#LGBM Classifier
from lightgbm import LGBMClassifier
#ROC AUC performance metric
from sklearn.metrics import roc_auc_score
#Cross-Valdiation with stratified sampling
from sklearn.model_selection import StratifiedKFold
# Print time logs in console
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
#Read file from output data folder path.
##Inputs: file name and folder path to location of the file
##Returns: data frame with features and labels column
def setup_input(filename, dataFolder):
#Check if folder path is correct. If not throw an error stating so.
if(not os.path.exists(dataFolder + os.sep + filename)):
print("Input file not found. Make sure files and their path exist. Try running DataPreperation Script first.")
raise NotADirectoryError
# Read csv file, remove unecessary columns 'Unnamed: 0' and 'SK_ID_CURR' and extract and save labels column seperately
df = pd.read_csv(dataFolder + os.sep + filename)
labels = df.pop('TARGET')
if('Unnamed: 0' in df):
df = df.drop(columns='Unnamed: 0')
if('SK_ID_CURR' in df):
df = df.drop(columns='SK_ID_CURR')
return df, labels
#Logistic Regression function with 10 fold stratified CV
#Inputs: Data frame of features and labels column
#Outputs: Mean ROC-AUC score
def logistic_regression(df, labels):
cv = StratifiedKFold(n_splits=10)
foldScores = []
for trainSet, testSet in cv.split(df, labels):
clf = LR(C = 0.001)
model = clf.fit(df.iloc[trainSet], labels.iloc[trainSet])
probabilities = model.predict_proba(df.iloc[testSet])[:,1]
foldScores.append(roc_auc_score(labels[testSet], probabilities))
del clf
return np.mean(foldScores)
#Linear SVC classifier with SGD with 10 fold stratified CV
#Inputs: Data frame of features and labels column
#Outputs:ROC-AUC score
def svclassifier(df, labels):
# Run classifier with 10 fold cross-validation
cv = StratifiedKFold(n_splits=10)
probas_ = np.zeros(df.shape[0])
for trainSet, testSet in cv.split(df, labels):
clf = SGDClassifier(alpha=5.5, class_weight='balanced',
loss='hinge', max_iter=1000, n_jobs=-1)
model = clf.fit(df.iloc[trainSet], labels.iloc[trainSet])
probas_[testSet] = np.array(model.decision_function(df.iloc[testSet]))
del clf
return roc_auc_score(labels, probas_)
#Linear LGBM classifier with 10 fold stratified CV
#Inputs: Data frame of features and labels column
#Outputs:ROC-AUC score
def LGBM_Classifier(df, labels):
probas_ = np.zeros(df.shape[0])
# Run classifier with cross-validation
cv = StratifiedKFold(n_splits=10)
for trainSet, testSet in cv.split(df, labels):
clf = LGBMClassifier(n_jobs=-1, silent=True, )
model = clf.fit(df.iloc[trainSet], labels.iloc[trainSet], eval_set=[(df.iloc[trainSet], labels.iloc[trainSet]),
(df.iloc[testSet], labels.iloc[testSet])], eval_metric= 'auc', verbose= False, early_stopping_rounds= 200)
probas_[testSet] = model.predict_proba(df.iloc[testSet], num_iteration=clf.best_iteration_)[:,1]
del clf
return roc_auc_score(labels, probas_)
##Main function executed when the script is called.
def main():
#Initialize input and output folder paths
input_folder, output_folder = utils.Initialize_Folder_Paths()
#List of input files
file_names = ['DataSetVersion1_a.csv', 'DataSetVersion1_b.csv', 'DataSetVersion1_c.csv', 'DataSetVersion2_a.csv',
'DataSetVersion2_b.csv', 'DataSetVersion2_c.csv', 'DataSetVersion3_a.csv', 'DataSetVersion3_b.csv',
'DataSetVersion3_c.csv', 'DataSetVersion4_a.csv', 'DataSetVersion4_b.csv', 'DataSetVersion4_c.csv',
'DataSetVersion5_a.csv', 'DataSetVersion5_b.csv', 'DataSetVersion5_c.csv', 'DataSetVersion6_a.csv',
'DataSetVersion6_b.csv']
#Run LR with time log and print ROC-AUC scores for each file
with timer("Logistic Regression"):
print('\nLR Results:\n')
for filename in file_names:
input_df, labels = setup_input(filename, output_folder)
try:
print('File: {}, AUC score : {:0.3f}'.format(filename,logistic_regression(input_df, labels)))
except:
print('Classifier could not run on the data set. Put X instead of score.')
continue
finally:
del input_df,labels
gc.collect()
#Run SGD(LinearSVC) with time log and print ROC-AUC scores for each file
with timer("Linear SVC"):
print('\nLinearSVC Results:\n')
for filename in file_names:
input_df, labels = setup_input(filename, output_folder)
try:
print('File: {}, AUC score : {:0.3f}'.format(filename,svclassifier(input_df, labels)))
except:
print('Classifier could not run on the data set. Put X instead of score.')
continue
finally:
del input_df,labels
gc.collect()
#Run LGBM with time log and print ROC-AUC scores for each file
with timer("LightGBM"):
print('\nLightGBM Results:\n')
for filename in file_names:
input_df, labels = setup_input(filename, output_folder)
try:
print('File: {}, AUC score : {:0.3f}'.format(filename,LGBM_Classifier(input_df, labels)))
except:
print('Classifier could not run on the data set. Put X instead of score.')
continue
finally:
del input_df,labels
gc.collect()
if __name__ == "__main__":
with timer("Calssification code"):
main() | 35.754717 | 124 | 0.675989 |
f881d098afd10256c3bd2b4021eb375ca8bd1415 | 97 | py | Python | week3/week3 2.py | Kevinskwk/ILP | 7a3925a22232d486a5a8f5df8255f9297fd73fec | [
"MIT"
] | 1 | 2020-07-09T23:10:56.000Z | 2020-07-09T23:10:56.000Z | week3/week3 2.py | Kevinskwk/ILP | 7a3925a22232d486a5a8f5df8255f9297fd73fec | [
"MIT"
] | null | null | null | week3/week3 2.py | Kevinskwk/ILP | 7a3925a22232d486a5a8f5df8255f9297fd73fec | [
"MIT"
] | null | null | null | listone = input(":")
listtwo = []
for i in listone:
listtwo=[i]+listtwo
print listtwo
| 13.857143 | 24 | 0.608247 |
ed6e1480b9283037806756275a6782837e5ed3bb | 4,430 | py | Python | rl/utils.py | asokraju/kristools | d46aa8705b16bcabbbcfc85cc05e2e34f45823d2 | [
"MIT"
] | 3 | 2020-08-10T03:58:11.000Z | 2020-11-06T04:47:21.000Z | rl/utils.py | asokraju/kristools | d46aa8705b16bcabbbcfc85cc05e2e34f45823d2 | [
"MIT"
] | 2 | 2020-08-20T05:46:39.000Z | 2021-01-16T17:54:26.000Z | rl/utils.py | asokraju/kristools | d46aa8705b16bcabbbcfc85cc05e2e34f45823d2 | [
"MIT"
] | 1 | 2021-01-15T20:31:26.000Z | 2021-01-15T20:31:26.000Z | import numpy as np
from collections import deque
#import yaml
import pickle
import os
from datetime import datetime
from shutil import copyfile
import random
import numbers
import functools
import operator
#OU Noise
# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is
# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# Replay buffer
# Taken from https://github.com/pemami4911/deep-rl/blob/master/ddpg/replay_buffer.py
from collections import deque
import random
import numpy as np
class ReplayBuffer(object):
def __init__(self, buffer_size, random_seed=123):
"""
The right side of the deque contains the most recent experiences
"""
self.buffer_size = buffer_size
self.count = 0
self.buffer = deque()
random.seed(random_seed)
def add(self, s, a, r, t, s2):
experience = (s, a, r, t, s2)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def size(self):
return self.count
def sample_batch(self, batch_size):
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
s_batch = np.array([_[0] for _ in batch])
a_batch = np.array([_[1] for _ in batch])
r_batch = np.array([_[2] for _ in batch])
t_batch = np.array([_[3] for _ in batch])
s2_batch = np.array([_[4] for _ in batch])
return s_batch, a_batch, r_batch, t_batch, s2_batch
def clear(self):
self.buffer.clear()
self.count = 0
class Scaler(object):
""" Generate scale and offset based on running mean and stddev along axis=0
offset = running mean
scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0)
"""
def __init__(self, obs_dim):
"""
Args:
obs_dim: dimension of axis=1
"""
self.vars = np.zeros(obs_dim)
self.means = np.zeros(obs_dim)
self.m = 0
self.n = 0
self.first_pass = True
def update(self, x):
""" Update running mean and variance (this is an exact method)
Args:
x: NumPy array, shape = (N, obs_dim)
see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled-
variance-of-two-groups-given-known-group-variances-mean
"""
if self.first_pass:
self.means = np.mean(x, axis=0)
self.vars = np.var(x, axis=0)
self.m = x.shape[0]
self.first_pass = False
else:
n = x.shape[0]
new_data_var = np.var(x, axis=0)
new_data_mean = np.mean(x, axis=0)
new_data_mean_sq = np.square(new_data_mean)
new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)
self.vars = (((self.m * (self.vars + np.square(self.means))) +
(n * (new_data_var + new_data_mean_sq))) / (self.m + n) -
np.square(new_means))
self.vars = np.maximum(0.0, self.vars) # occasionally goes negative, clip
self.means = new_means
self.m += n
def get(self):
""" returns 2-tuple: (scale, offset) """
return 1/(np.sqrt(self.vars) + 0.1)/3, self.means
def pprint_test(x):
print(x) | 30.979021 | 101 | 0.566366 |
dabdef00e6f04522990c3b9c2dc708934f720d04 | 778 | py | Python | tests/test_blob.py | kwohlfahrt/blob | d837e988bec76a58e0752fb721cdc00aac9af49e | [
"MIT"
] | 9 | 2018-02-22T09:05:03.000Z | 2020-06-25T04:21:52.000Z | tests/test_blob.py | kwohlfahrt/blob | d837e988bec76a58e0752fb721cdc00aac9af49e | [
"MIT"
] | 3 | 2017-10-01T16:07:32.000Z | 2018-10-22T10:50:26.000Z | tests/test_blob.py | kwohlfahrt/blob | d837e988bec76a58e0752fb721cdc00aac9af49e | [
"MIT"
] | 7 | 2016-08-05T12:46:29.000Z | 2021-03-20T12:02:29.000Z | from unittest import TestCase, skip
import numpy as np
import blob
from itertools import repeat
class TestLocalMinima(TestCase):
def test_minima(self):
data = np.ones((7, 7)) * 2
data[2:5, 3:6] = 1
data[3, 4] = 0
# Diagonal values considered peaks
expected = [[3, 4]]
np.testing.assert_equal(blob.localMinima(data, 2), expected)
class TestDetection(TestCase):
def test_blobs(self):
image = np.zeros((128, 128), dtype='float')
image[20, 50] = 1
image[70:72, 10:12] = 1
peaks = [[20, 50], [71, 11]]
blobs = blob.findBlobs(image, scales=(1, 3), threshold=0.1)[:, 1:]
np.testing.assert_equal(blobs, peaks)
if __name__ == "__main__":
from unittest import main
main()
| 27.785714 | 74 | 0.602828 |
e14a09338591877cb7e8cee2723008e0d7933db1 | 360 | py | Python | quiz/forms.py | white-panda-afk/Quizzy-official | 1be4a1a50c49efaae437f089d0d5c7ef9bb3ddff | [
"MIT"
] | 8 | 2020-11-02T10:50:02.000Z | 2021-11-03T16:56:19.000Z | quiz/forms.py | white-panda-afk/Quizzy-official | 1be4a1a50c49efaae437f089d0d5c7ef9bb3ddff | [
"MIT"
] | 11 | 2020-12-09T17:31:31.000Z | 2021-02-17T06:03:20.000Z | quiz/forms.py | white-panda-afk/Quizzy-official | 1be4a1a50c49efaae437f089d0d5c7ef9bb3ddff | [
"MIT"
] | 7 | 2021-01-16T19:39:41.000Z | 2021-01-29T10:17:06.000Z | from django import forms
from django.contrib.auth import login,authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
#form for registration
class RegisterForm(UserCreationForm):
email=forms.EmailField()
class Meta:
model=User
fields=["username","email","password1","password2"] | 32.727273 | 59 | 0.769444 |
c405776225680a07e73f13be49cfa288c73ebba9 | 5,722 | py | Python | src/spring/azext_spring/vendored_sdks/appplatform/v2022_05_01_preview/operations/_operations.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | src/spring/azext_spring/vendored_sdks/appplatform/v2022_05_01_preview/operations/_operations.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/spring/azext_spring/vendored_sdks/appplatform/v2022_05_01_preview/operations/_operations.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | 1 | 2022-02-14T21:43:29.000Z | 2022-02-14T21:43:29.000Z | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.AppPlatform/operations")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.AvailableOperations"]:
"""Lists all of the available REST API operations of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableOperations or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_05_01_preview.models.AvailableOperations]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableOperations"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailableOperations", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.AppPlatform/operations"} # type: ignore
| 39.462069 | 133 | 0.653443 |
914a7b203bd63db8b6f0d65e8e1811b3d943a15e | 5,513 | py | Python | src/utoolbox/io/encdec/amira/base.py | thhsieh00/utoolbox-core | e46704348d60985c205a16f41788d2c185e11fb6 | [
"Apache-2.0"
] | 3 | 2020-08-21T02:34:32.000Z | 2021-04-06T06:56:46.000Z | src/utoolbox/io/encdec/amira/base.py | liuyenting/utoolbox-core | d1430967458204b99780c547eaca60d066490946 | [
"Apache-2.0"
] | null | null | null | src/utoolbox/io/encdec/amira/base.py | liuyenting/utoolbox-core | d1430967458204b99780c547eaca60d066490946 | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
from pyparsing import (
CaselessKeyword,
Combine,
Dict,
Group,
Literal,
MatchFirst,
OneOrMore,
Optional,
QuotedString,
Word,
alphanums,
nestedExpr,
restOfLine,
)
from pyparsing import pyparsing_common as pc
__all__ = ["Amira"]
logger = logging.getLogger(__name__)
# comment
# .. file format
file_format_options = CaselessKeyword("BINARY-LITTLE-ENDIAN") | CaselessKeyword(
"3D ASCII"
).setResultsName("format")
file_format_version = pc.real.setResultsName("version")
file_format = Group(
CaselessKeyword("AmiraMesh").suppress() + file_format_options + file_format_version
).setResultsName("file_format")
# .. comment
comment_string = restOfLine
comment = Group(Word("#", min=1).suppress() + MatchFirst([file_format, comment_string]))
# declaration
object_type = (
CaselessKeyword("lattice")
| CaselessKeyword("vertex")
| CaselessKeyword("edge")
| CaselessKeyword("point")
| CaselessKeyword("points")
).setResultsName("object_type")
object_size = pc.integer.setResultsName("size")
declaration = Group(CaselessKeyword("define").suppress() + object_type + object_size)
# parameter
key = CaselessKeyword("ContentType") | CaselessKeyword("MinMax")
value = (
QuotedString('"', '"')
| Group(OneOrMore(pc.integer | pc.real))
| nestedExpr("{", "}")
)
parameter = Dict(
Group(key + value + Optional(",").suppress())
| Group(Word("_" + alphanums) + value + Optional(",")).suppress()
)
parameters = CaselessKeyword("Parameters").suppress() + nestedExpr(
"{", "}", parameter
).setResultsName("parameters")
# prototype
element_type = (CaselessKeyword("float") | CaselessKeyword("int")).setResultsName(
"type"
)
# .. array
element_counts = pc.integer.setResultsName("counts")
# .. structure
element_name = Word(alphanums).setResultsName("name")
element = (
element_type
+ Optional(Word("[").suppress() + element_counts + Word("]").suppress())
+ element_name
)
section_id = Combine(Literal("@") + pc.integer).setResultsName("section_id")
# .. prototype
prototype = Group(
object_type + nestedExpr("{", "}", element).setResultsName("data_type") + section_id
)
grammar = OneOrMore(
comment.setResultsName("comments", listAllMatches=True)
| declaration.setResultsName("declarations", listAllMatches=True)
| parameters
| prototype.setResultsName("prototypes", listAllMatches=True)
)
class Amira(object):
def __init__(self, path):
self._path = path
self._metadata = self._parse_metadata()
self._validate_file()
self._data = self._parse_data_prototype()
##
@property
def data(self):
return self._data
@property
def metadata(self):
return self._metadata
@property
def path(self):
return self._path
##
def _parse_metadata(self):
lines = []
with open(self.path, "r", errors="ignore") as fd:
for line in fd:
if line.startswith("# Data section follows"):
break
lines.append(line)
lines = "".join(lines)
metadata = grammar.parseString(lines).asDict()
# simplify blocks nested 1 level deeper in a list
# TODO how to fix this in grammar?
def extract(d, key):
d[key] = d[key][0]
extract(metadata, "parameters")
for prototype in metadata["prototypes"]:
extract(prototype, "data_type")
return metadata
def _validate_file(self):
for comment in self.metadata["comments"]:
if "file_format" in comment:
break
else:
raise RuntimeError("not an Amira-generated file")
def _parse_data_prototype(self):
"""Parse data block info, but NOT loaded yet."""
types = self._parse_object_types()
data = dict()
for source in self.metadata["prototypes"]:
name = source["data_type"]["name"]
if name in data:
raise RuntimeError(f'data block "{name}" already exists')
size = types[source["object_type"]]
sid = source["section_id"]
shape, dtype = self._interpret_data_layout(size, source["data_type"])
data[name] = (sid, shape, dtype)
return data
def _parse_object_types(self):
types = dict()
for source in self.metadata["declarations"]:
name, size = source["object_type"], source["size"]
if name in types:
raise RuntimeError(f'malformed declaration, "{name}" already exists')
types[name] = size
return types
@classmethod
def _interpret_data_layout(cls, size, element):
dtype = {"float": np.float32, "int": np.int32}[element["type"]]
# element
shape = (element.get("counts", 1),)
# overall
shape = (size,) + shape
return shape, dtype
if __name__ == "__main__":
from pprint import pprint
import logging
logging.basicConfig(level=logging.DEBUG)
files = ["pureGreen.col", "c6_rawpoints_0042.am", "c6_spatialgraph_0042.am"]
for path in files:
print(path)
am = Amira(path)
pprint(am.metadata)
pprint(am._data)
print()
| 27.984772 | 89 | 0.606566 |
87c1fbe27446cb4924151e07442d806eb831eef9 | 1,873 | py | Python | stacking/2.regression/stacking_submit.py | huseinzol05/Machine-Learning-Data-Science-Reuse | 83f967a94254eb71ae021dccbc13f861d50c64f0 | [
"MIT"
] | 26 | 2018-02-09T04:57:19.000Z | 2021-10-03T13:06:18.000Z | stacking/2.regression/stacking_submit.py | huseinzol05/Machine-Learning-Data-Science-Reuse | 83f967a94254eb71ae021dccbc13f861d50c64f0 | [
"MIT"
] | null | null | null | stacking/2.regression/stacking_submit.py | huseinzol05/Machine-Learning-Data-Science-Reuse | 83f967a94254eb71ae021dccbc13f861d50c64f0 | [
"MIT"
] | 26 | 2018-03-04T08:10:03.000Z | 2021-08-04T08:05:49.000Z | import pandas as pd
import numpy as np
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import r2_score
pd.options.mode.chained_assignment = None
# read datasets
train = pd.read_csv('../../input/train.csv')
test = pd.read_csv('../../input/test.csv')
test_id = test.ID
y_train = train.y
#----------------read the base model prediction files-------------------
x_train = pd.read_csv("../stack_data/....") # define yourself
x_test = pd.read_csv("../stack_data/....")
name_col = ["col" + str(i) for i in range(x_train.shape[1])]
x_test.columns = name_col
x_train.columns = name_col
dtrain = xgb.DMatrix(data = x_train, label = y_train)
params = {
'objective': 'reg:linear',
'metric': 'rmse',
'max_depth' : 4,
'subsample': 0.8,
'colsample_bytree': 0.8,
'learning_rate': 0.001,
'seed': 0,
'nthread': -1,
'silent':True,
'verbose':0
}
def xgb_r2_score(preds, dtrain):
labels = dtrain.get_label()
return 'r2', r2_score(labels, preds)
clf = xgb.cv(params, dtrain, 10000, early_stopping_rounds = 50, feval= xgb_r2_score, maximize=True, verbose_eval=100)
best_rounds = np.argmax(clf['test-r2-mean'])
print("----------------------------------------------------")
print(clf.iloc[best_rounds])
files_name = clf.iloc[best_rounds]["test-r2-mean"]
print("------train-----------------------------------------")
bst = xgb.train(params, dtrain, best_rounds)
print("------predict---------------------------------------")
dtest = xgb.DMatrix(data = x_test)
preds = bst.predict(dtest)
output = pd.DataFrame({'id': test_id.astype(np.int32), 'y': preds})
print("------file generate---------------------------------")
output.to_csv('../upload/cv_' + str(best_rounds) + '_' + str(files_name) + '_' + 'my_preds.csv', index=None) | 34.054545 | 118 | 0.568073 |
6b2ea8c4fab25c0629cfd3171b64b4bb4e0fed77 | 15,174 | py | Python | iPERCore/models/networks/generators/lwb_avg_resunet.py | ak9250/iPERCore | c7bac39d65964350074a647e89975655cd443790 | [
"Apache-2.0"
] | 1 | 2021-03-24T08:44:45.000Z | 2021-03-24T08:44:45.000Z | iPERCore/models/networks/generators/lwb_avg_resunet.py | ak9250/iPERCore | c7bac39d65964350074a647e89975655cd443790 | [
"Apache-2.0"
] | null | null | null | iPERCore/models/networks/generators/lwb_avg_resunet.py | ak9250/iPERCore | c7bac39d65964350074a647e89975655cd443790 | [
"Apache-2.0"
] | 1 | 2022-01-05T20:20:28.000Z | 2022-01-05T20:20:28.000Z | # Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bg_inpaintor import ResNetInpaintor
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, in_channel, out_channel):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1)
)
def forward(self, x):
return x + self.main(x)
class LWB(nn.Module):
def __init__(self):
super(LWB, self).__init__()
def forward(self, X, T):
"""
Args:
X (torch.tensor): (N, C, H, W) or (N, nt, C, H, W) or (N, ns, C, H, W)
T (torch.tensor): (N, h, w, 2) or (N, nt, h, w, 2) or (N, nt, ns, h, w, 2)
Returns:
x_warp (torch.tensor): (N, C, H ,W)
"""
x_shape = X.shape
T_shape = T.shape
x_n_dim = len(x_shape)
T_n_dim = len(T_shape)
assert x_n_dim >= 4 and T_n_dim >= 4
if x_n_dim == 4 and T_n_dim == 4:
warp = self.transform(X, T)
elif x_n_dim == 5 and T_n_dim == 5:
bs, nt, C, H, W = x_shape
h, w = T_shape[2:4]
warp = self.transform(X.view(bs * nt, C, H, W), T.view(bs * nt, h, w, 2))
else:
raise ValueError("#dim of X must >= 4 and #dim of T must >= 4")
return warp
def resize_trans(self, x, T):
_, _, h, w = x.shape
T_scale = T.permute(0, 3, 1, 2) # (bs, 2, h, w)
T_scale = F.interpolate(T_scale, size=(h, w), mode='bilinear', align_corners=True)
T_scale = T_scale.permute(0, 2, 3, 1) # (bs, h, w, 2)
return T_scale
def transform(self, x, T):
bs, c, h_x, w_x = x.shape
bs, h_t, w_t, _ = T.shape
if h_t != h_x or w_t != w_x:
T = self.resize_trans(x, T)
x_trans = F.grid_sample(x, T)
return x_trans
class AddLWB(nn.Module):
def __init__(self):
super().__init__()
self.lwb = LWB()
def forward(self, tsf_x, src_x, Tst):
"""
Args:
tsf_x (torch.Tensor): (bs, c, h, w)
src_x (torch.Tensor): (bs * ns, c, h, w)
Tst (torch.Tensor): (bs * ns, h, w, 2)
Returns:
fused_x (torch.Tensor): (bs, c, h, w)
"""
bsns, _, _, _ = Tst.shape
bs, c, h, w = tsf_x.shape
ns = bsns // bs
warp_x = self.lwb(src_x, Tst).view(bs, ns, -1, h, w)
# tsf_x.unsqueeze_(dim=1)
tsf_x = tsf_x.unsqueeze(dim=1)
fused_x = torch.mean(torch.cat([tsf_x, warp_x], dim=1), dim=1)
return fused_x
class Encoder(nn.Module):
def __init__(self, in_channel, num_filters, use_bias=True):
super().__init__()
layers = list()
# Down-Sampling
self.n_down = len(num_filters)
for i in range(self.n_down):
if i == 0:
c_in = in_channel
else:
c_in = num_filters[i - 1]
block = nn.Sequential(
nn.Conv2d(c_in, num_filters[i], kernel_size=3, stride=2, padding=1, bias=use_bias),
nn.ReLU(inplace=True)
)
layers.append(block)
self.layers = nn.Sequential(*layers)
def forward(self, x, get_details=True):
if get_details:
x_list = []
for i in range(self.n_down):
x = self.layers[i](x)
x_list.append(x)
outs = x_list
else:
outs = self.layers(x)
return outs
class Decoder(nn.Module):
def __init__(self, in_channel, num_filters):
super().__init__()
layers = list()
self.n_down = len(num_filters)
for i in range(0, self.n_down):
if i == 0:
c_in = in_channel
else:
c_in = num_filters[i - 1]
block = nn.Sequential(
nn.ConvTranspose2d(c_in, num_filters[i], kernel_size=4, stride=2, padding=1, bias=True),
nn.ReLU(inplace=True)
)
layers.append(block)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class SkipDecoder(nn.Module):
def __init__(self, in_channel, enc_num_filters, dec_num_filters):
super().__init__()
upconvs = list()
skippers = list()
self.n_down = len(dec_num_filters)
for i in range(0, self.n_down):
if i == 0:
d_in = in_channel
else:
d_in = dec_num_filters[i - 1]
upconvs.append(nn.Sequential(
nn.ConvTranspose2d(d_in, dec_num_filters[i], kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
))
if i != self.n_down - 1:
s_in = enc_num_filters[self.n_down - 2 - i] + dec_num_filters[i]
skippers.append(nn.Sequential(
nn.Conv2d(s_in, dec_num_filters[i], kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
))
self.skippers = nn.Sequential(*skippers)
self.upconvs = nn.Sequential(*upconvs)
# print(self.skippers)
# print(self.upconvs)
def forward(self, x, enc_outs):
d_out = x
for i in range(self.n_down):
d_out = self.upconvs[i](d_out)
if i != self.n_down - 1:
skip = torch.cat([enc_outs[self.n_down - 2 - i], d_out], dim=1)
# print(skip.shape, self.skippers[i])
d_out = self.skippers[i](skip)
return d_out
class ResAutoEncoder(nn.Module):
def __init__(self, in_channel=6, num_filters=(64, 128, 128, 128), n_res_block=4):
super(ResAutoEncoder, self).__init__()
self._name = 'ResAutoEncoder'
# build encoders
self.encoders = Encoder(in_channel=in_channel, num_filters=num_filters)
res_blocks = []
for i in range(n_res_block):
res_blocks.append(ResidualBlock(num_filters[-1], num_filters[-1]))
self.res_blocks = nn.Sequential(*res_blocks)
self.decoders = Decoder(in_channel=num_filters[-1], num_filters=list(reversed(num_filters)))
self.img_reg = nn.Sequential(
nn.Conv2d(num_filters[0], 3, kernel_size=5, stride=1, padding=2, bias=False),
nn.Tanh()
)
self.att_reg = nn.Sequential(
nn.Conv2d(num_filters[0], 1, kernel_size=5, stride=1, padding=2, bias=False),
nn.Sigmoid()
)
def forward(self, x):
enc_x = self.encoders(x, get_details=False)
# print("enc = {}".format(enc_x.shape))
res_x = self.res_blocks(enc_x)
# print("rex = {}".format(res_x.shape)
dec_x = self.decoders(res_x)
# print("dec = {}".format(dec_x.shape))
return self.regress(dec_x)
def decode(self, x):
return self.decoders(x)
def regress(self, x):
return self.img_reg(x), self.att_reg(x)
def encode(self, x):
return self.encoders(x)
def res_out(self, x):
res_outs = []
for i in range(len(self.res_blocks)):
x = self.res_blocks[i](x)
res_outs.append(x)
return res_outs
class AddLWBGenerator(nn.Module):
def __init__(
self, bg_dim=4, src_dim=6, tsf_dim=3,
num_filters=(64, 128, 256), n_res_block=6,
temporal=True
):
super(AddLWBGenerator, self).__init__()
self.bg_net = ResNetInpaintor(c_dim=bg_dim, num_filters=(64, 128, 128, 256), n_res_block=n_res_block)
# build src_net
self.src_net = ResAutoEncoder(in_channel=src_dim, num_filters=num_filters, n_res_block=n_res_block)
# build tsf_net
self.temporal = temporal
self.tsf_net_enc = Encoder(in_channel=tsf_dim, num_filters=num_filters, use_bias=False)
self.tsf_net_dec = SkipDecoder(num_filters[-1], num_filters, list(reversed(num_filters)))
self.add_lwb = AddLWB()
res_blocks = []
for i in range(n_res_block):
res_blocks.append(ResidualBlock(num_filters[-1], num_filters[-1]))
self.res_blocks = nn.Sequential(*res_blocks)
self.tsf_img_reg = nn.Sequential(
nn.Conv2d(num_filters[0], 3, kernel_size=5, stride=1, padding=2, bias=False),
nn.Tanh()
)
self.tsf_att_reg = nn.Sequential(
nn.Conv2d(num_filters[0], 1, kernel_size=5, stride=1, padding=2, bias=False),
nn.Sigmoid()
)
def forward_bg(self, bg_inputs):
"""
Args:
bg_inputs (torch.tensor): (bs, ns, 4, h, w)
Returns:
bg_img (torch.tensor): the `viewed` bg_img from (bs * ns, 3, h, w) to (bs, ns, 3, h, w)
"""
bs, ns, _, h, w = bg_inputs.shape
bg_img = self.bg_net(bg_inputs.view(bs * ns, -1, h, w))
bg_img = bg_img.view(bs, ns, 3, h, w)
return bg_img
def forward_src(self, src_inputs, only_enc=True):
"""
Args:
src_inputs (torch.tensor): (bs, ns, 6, h, w)
only_enc (bool): the flag to control only encode or return the all outputs, including,
encoder outputs, predicted img and mask map.
Returns:
enc_outs (list of torch.tensor): [torch.tensor(bs*ns, c1, h1, w1), tensor.tensor(bs*ns, c2, h2, w2), ... ]
img (torch.tensor): if `only_enc == True`, return the predicted image map (bs, ns, 3, h, w).
mask (torch.tensor): if `only_enc == True`, return the predicted mask map (bs, ns, 3, h, w)
"""
bs, ns, _, h, w = src_inputs.shape
src_enc_outs = self.src_net.encode(src_inputs.view(bs * ns, -1, h, w))
src_res_outs = self.src_net.res_out(src_enc_outs[-1])
if only_enc:
return src_enc_outs, src_res_outs
else:
img, mask = self.src_net.regress(self.src_net.decode(src_res_outs[-1]))
img = img.view(bs, ns, 3, h, w)
mask = mask.view(bs, ns, 1, h, w)
return src_enc_outs, src_res_outs, img, mask
def forward_tsf(self, tsf_inputs, src_enc_outs, src_res_outs, Tst,
temp_enc_outs=None, temp_res_outs=None, Ttt=None):
"""
Processing one time step of tsf stream.
Args:
tsf_inputs (torch.tensor): (bs, 6, h, w)
src_enc_outs (list of torch.tensor): [(bs*ns, c1, h1, w1), (bs*ns, c2, h2, w2),..]
src_res_outs (list of torch.tensor): [(bs*ns, c1, h1, w1), (bs*ns, c2, h2, w2),..]
Tst (torch.tensor): (bs, ns, h, w, 2), flow transformation from source images/features
temp_enc_outs (list of torch.tensor): [(bs*nt, c1, h1, w1), (bs*nt, c2, h2, w2),..]
Ttt (torch.tensor): (bs, nt, h, w, 2), flow transformation from previous images/features (temporal smooth)
Returns:
tsf_enc_outs (list of torch.tensor):
tsf_img (torch.tensor): (bs, 3, h, w)
tsf_mask (torch.tensor): (bs, 1, h, w)
"""
bs, ns, h, w, _ = Tst.shape
n_down = self.tsf_net_enc.n_down
# 1. encoders
tsf_enc_outs = []
tsf_x = tsf_inputs
Tst = Tst.view((bs * ns, h, w, 2))
for i in range(n_down):
tsf_x = self.tsf_net_enc.layers[i](tsf_x)
src_x = src_enc_outs[i]
tsf_x = self.add_lwb(tsf_x, src_x, Tst)
tsf_enc_outs.append(tsf_x)
# 2. res-blocks
for i in range(len(self.res_blocks)):
tsf_x = self.res_blocks[i](tsf_x)
src_x = src_res_outs[i]
tsf_x = self.add_lwb(tsf_x, src_x, Tst)
# 3. decoders
tsf_x = self.tsf_net_dec(tsf_x, tsf_enc_outs)
tsf_img, tsf_mask = self.tsf_img_reg(tsf_x), self.tsf_att_reg(tsf_x)
return tsf_img, tsf_mask
def forward(self, bg_inputs, src_inputs, tsf_inputs, Tst, Ttt=None, only_tsf=True):
"""
Args:
bg_inputs (torch.tensor): (bs, ns, 4, H, W)
src_inputs (torch.tensor): (bs, ns, 6, H, W)
tsf_inputs (torch.tensor): (bs, nt, 3 or 6, H, W)
Tst (torch.tensor): (bs, nt, ns, H, W, 2)
Ttt (torch.tensor or None): (bs, nt - 1, H, H, 2)
only_tsf (bool):
Returns:
bg_img (torch.tensor): the inpainted bg images, (bs, ns or 1, 3, h, w)
"""
# print(src_inputs.shape, Tst.shape, Ttt.shape)
bs, nt, ns, h, w, _ = Tst.shape
# 1. inpaint background
bg_img = self.forward_bg(bg_inputs) # (N, ns or 1, 3, h, w)
# 2. process source inputs
# src_enc_outs: [torch.tensor(bs*ns, c1, h1, w1), tensor.tensor(bs*ns, c2, h2, w2), ... ]
# src_img: the predicted image map (bs, ns, 3, h, w)
# src_mask: the predicted mask map (bs, ns, 3, h, w)
if only_tsf:
src_enc_outs, src_res_outs = self.forward_src(src_inputs, only_enc=True)
src_imgs, src_masks = None, None
else:
src_enc_outs, src_res_outs, src_imgs, src_masks = self.forward_src(src_inputs, only_enc=False)
# 3. process transform inputs
tsf_imgs, tsf_masks = [], []
for t in range(nt):
t_tsf_inputs = tsf_inputs[:, t]
if t != 0 and self.temporal:
_tsf_cond = tsf_inputs[:, t - 1, 0:3]
_tsf_img = tsf_imgs[-1] * (1 - tsf_masks[-1])
_tsf_inputs = torch.cat([_tsf_img, _tsf_cond], dim=1).unsqueeze_(dim=1)
_temp_enc_outs, _temp_res_outs = self.forward_src(_tsf_inputs, only_enc=True)
_Ttt = Ttt[:, t-1:t]
else:
_Ttt = None
_temp_enc_outs, _temp_res_outs = None, None
tsf_img, tsf_mask = self.forward_tsf(t_tsf_inputs, src_enc_outs, src_res_outs,
Tst[:, t].contiguous(), _temp_enc_outs, _temp_res_outs, _Ttt)
tsf_imgs.append(tsf_img)
tsf_masks.append(tsf_mask)
tsf_imgs = torch.stack(tsf_imgs, dim=1)
tsf_masks = torch.stack(tsf_masks, dim=1)
if only_tsf:
return bg_img, tsf_imgs, tsf_masks
else:
return bg_img, src_imgs, src_masks, tsf_imgs, tsf_masks
if __name__ == '__main__':
alwb_gen = AddLWBGenerator(temporal=True, num_filters=[64, 128, 256])
bg_inputs = torch.rand(2, 5, 4, 512, 512)
src_inputs = torch.rand(2, 5, 6, 512, 512)
tsf_inputs = torch.rand(2, 2, 3, 512, 512)
Tst = torch.rand(2, 2, 5, 512, 512, 2)
Ttt = torch.rand(2, 1, 512, 512, 2)
bg_img, src_img, src_mask, tsf_img, tsf_mask = alwb_gen(bg_inputs, src_inputs, tsf_inputs, Tst, Ttt, only_tsf=False)
print(bg_img.shape, src_img.shape, src_mask.shape, tsf_img.shape, tsf_mask.shape)
| 32.844156 | 120 | 0.554238 |
949620dd7548061eb94904c159b794186e693070 | 548 | py | Python | week3/exercises/globalvars.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | week3/exercises/globalvars.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | week3/exercises/globalvars.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | def fib(n):
global numFibCalls
numFibCalls += 1
if n == 1:
return 1
elif n == 2:
return 2
else:
return fib(n-1) + fib(n-2)
def fibef(n, d):
global numFibCalls
numFibCalls += 1
if n in d:
return d[n]
else:
ans = fibef(n-1, d) + fibef(n-2, d)
d[n] = ans
return ans
numFibCalls = 0
fibArg = #number here
print(fib(fibArg))
print('function calls', numFibCalls)
numFibCalls = 0
d = {1:1, 2:2}
print(fibef(fibArg, d))
print('function calls', numFibCalls)
| 16.117647 | 43 | 0.554745 |
86b53e6956f3702da6fa581427a086d743b2589f | 14,401 | py | Python | packit_service/worker/jobs.py | thrix/packit-service | 235fa464e3082ff798ced1c2f71b7661de19fab3 | [
"MIT"
] | null | null | null | packit_service/worker/jobs.py | thrix/packit-service | 235fa464e3082ff798ced1c2f71b7661de19fab3 | [
"MIT"
] | null | null | null | packit_service/worker/jobs.py | thrix/packit-service | 235fa464e3082ff798ced1c2f71b7661de19fab3 | [
"MIT"
] | null | null | null | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
"""
We love you, Steve Jobs.
"""
import logging
from typing import Any
from typing import List, Set, Type, Union
from celery import group
from ogr.abstract import CommitStatus
from packit.config import JobConfig, PackageConfig
from packit_service.config import ServiceConfig
from packit_service.constants import TASK_ACCEPTED
from packit_service.log_versions import log_job_versions
from packit_service.service.events import (
Event,
EventData,
PullRequestCommentPagureEvent,
IssueCommentGitlabEvent,
MergeRequestCommentGitlabEvent,
MergeRequestGitlabEvent,
InstallationEvent,
IssueCommentEvent,
PullRequestCommentGithubEvent,
)
from packit_service.worker.allowlist import Allowlist
from packit_service.worker.build import CoprBuildJobHelper, KojiBuildJobHelper
from packit_service.worker.handlers import (
BugzillaHandler,
CoprBuildHandler,
CoprBuildEndHandler,
CoprBuildStartHandler,
GithubAppInstallationHandler,
KojiBuildHandler,
TestingFarmResultsHandler,
)
from packit_service.worker.handlers.abstract import (
JobHandler,
MAP_COMMENT_TO_HANDLER,
MAP_JOB_TYPE_TO_HANDLER,
MAP_REQUIRED_JOB_TYPE_TO_HANDLER,
SUPPORTED_EVENTS_FOR_HANDLER,
)
from packit_service.worker.parser import CentosEventParser, Parser
from packit_service.worker.result import TaskResults
REQUESTED_PULL_REQUEST_COMMENT = "/packit"
logger = logging.getLogger(__name__)
def get_handlers_for_event(
event: Event, package_config: PackageConfig
) -> Set[Type[JobHandler]]:
"""
Get all handlers that we need to run for the given event.
We need to return all handler classes that:
- can react to the given event AND
- are configured in the package_config (either directly or as a required job)
Examples of the matching can be found in the tests:
./tests/unit/test_jobs.py:test_get_handlers_for_event
:param event: event which we are reacting to
:param package_config: for checking configured jobs
:return: set of handler instances that we need to run for given event and user configuration
"""
jobs_matching_trigger = []
for job in package_config.jobs:
if (
job.trigger == event.db_trigger.job_config_trigger_type
and job not in jobs_matching_trigger
):
jobs_matching_trigger.append(job)
if isinstance(
event,
(
PullRequestCommentGithubEvent,
PullRequestCommentPagureEvent,
IssueCommentEvent,
MergeRequestCommentGitlabEvent,
IssueCommentGitlabEvent,
),
):
handlers_triggered_by_comment = get_handlers_for_comment(event.comment)
else:
handlers_triggered_by_comment = None
matching_handlers: Set[Type["JobHandler"]] = set()
for job in jobs_matching_trigger:
for handler in (
MAP_JOB_TYPE_TO_HANDLER[job.type]
| MAP_REQUIRED_JOB_TYPE_TO_HANDLER[job.type]
):
if isinstance(event, tuple(SUPPORTED_EVENTS_FOR_HANDLER[handler])) and (
handlers_triggered_by_comment is None
or handler in handlers_triggered_by_comment
):
matching_handlers.add(handler)
if not matching_handlers:
logger.debug(
f"We did not find any handler for a following event:\n{event.__class__}"
)
return matching_handlers
def get_packit_commands_from_comment(comment: str) -> List[str]:
comment_parts = comment.strip()
if not comment_parts:
logger.debug("Empty comment, nothing to do.")
return []
comment_lines = comment_parts.split("\n")
for line in filter(None, map(str.strip, comment_lines)):
(packit_mark, *packit_command) = line.split(maxsplit=3)
# packit_command[0] has the first cmd and [1] has the second, if needed.
if packit_mark == REQUESTED_PULL_REQUEST_COMMENT:
if packit_command:
return packit_command
return []
def get_handlers_for_comment(comment: str) -> Set[Type[JobHandler]]:
commands = get_packit_commands_from_comment(comment)
if not commands:
return set()
handlers = MAP_COMMENT_TO_HANDLER[commands[0]]
if not handlers:
logger.debug(f"Command {commands[0]} not supported by packit.")
return handlers
def get_config_for_handler_kls(
handler_kls: Type[JobHandler], event: Event, package_config: PackageConfig
) -> List[JobConfig]:
"""
Get a list of JobConfigs relevant to event and the handler class.
We need to find all job configurations that:
- can be run by the given handler class AND
- that matches the trigger of the event
If there is no matching job-config found, we will pick the ones that are required.
e.g.: For build handler, you can pick the test config since tests require the build.
Examples of the matching can be found in the tests:
./tests/unit/test_jobs.py:test_get_config_for_handler_kls
:param handler_kls: class that will use the JobConfig
:param event: which we are reacting to
:param package_config: we pick the JobConfig(s) from this package_config instance
:return: list of JobConfigs relevant to the given handler and event
preserving the order in the config
"""
jobs_matching_trigger: List[JobConfig] = []
for job in package_config.jobs:
if job.trigger == event.db_trigger.job_config_trigger_type:
jobs_matching_trigger.append(job)
matching_jobs: List[JobConfig] = []
for job in jobs_matching_trigger:
if handler_kls in MAP_JOB_TYPE_TO_HANDLER[job.type]:
matching_jobs.append(job)
if not matching_jobs:
logger.debug("No config found, let's see the jobs that requires this handler.")
for job in jobs_matching_trigger:
if handler_kls in MAP_REQUIRED_JOB_TYPE_TO_HANDLER[job.type]:
matching_jobs.append(job)
if not matching_jobs:
logger.warning(
f"We did not find any config for {handler_kls} and a following event:\n"
f"{event.__class__}"
)
return matching_jobs
class SteveJobs:
"""
Steve makes sure all the jobs are done with precision.
"""
def __init__(self):
self._service_config = None
log_job_versions()
@property
def service_config(self):
if self._service_config is None:
self._service_config = ServiceConfig.get_service_config()
return self._service_config
def process_jobs(self, event: Event) -> List[TaskResults]:
"""
Create a Celery task for a job handler (if trigger matches) for every job defined in config.
"""
if not event.package_config:
# this happens when service receives events for repos which don't have packit config
# success=True - it's not an error that people don't have packit.yaml in their repo
return [
TaskResults.create_from(
success=True,
msg="No packit config found in the repository.",
job_config=None,
event=event,
)
]
handler_classes = get_handlers_for_event(event, event.package_config)
if not handler_classes:
logger.debug(
f"There is no handler for {event} event suitable for the configuration."
)
return []
allowlist = Allowlist()
processing_results: List[TaskResults] = []
for handler_kls in handler_classes:
# TODO: merge to to get_handlers_for_event so
# so we don't need to go through the similar process twice.
job_configs = get_config_for_handler_kls(
handler_kls=handler_kls,
event=event,
package_config=event.package_config,
)
# check allowlist approval for every job to be able to track down which jobs
# failed because of missing allowlist approval
if not allowlist.check_and_report(
event,
event.project,
service_config=self.service_config,
job_configs=job_configs,
):
processing_results = []
for job_config in job_configs:
processing_results.append(
TaskResults.create_from(
success=False,
msg="Account is not allowlisted!",
job_config=job_config,
event=event,
)
)
return processing_results
signatures = []
# we want to run handlers for all possible jobs, not just the first one
for job_config in job_configs:
handler = handler_kls(
package_config=event.package_config,
job_config=job_config,
event=event.get_dict(),
)
if not handler.pre_check():
continue
if isinstance(handler, (CoprBuildHandler, KojiBuildHandler)):
helper = (
CoprBuildJobHelper
if isinstance(handler, CoprBuildHandler)
else KojiBuildJobHelper
)
job_helper = helper(
service_config=self.service_config,
package_config=event.package_config,
project=event.project,
metadata=EventData.from_event_dict(event.get_dict()),
db_trigger=event.db_trigger,
job_config=job_config,
)
job_helper.report_status_to_all(
description=TASK_ACCEPTED,
state=CommitStatus.pending,
url="",
)
signatures.append(
handler_kls.get_signature(event=event, job=job_config)
)
processing_results.append(
TaskResults.create_from(
success=True,
msg="Job created.",
job_config=job_config,
event=event,
)
)
# https://docs.celeryproject.org/en/stable/userguide/canvas.html#groups
group(signatures).apply_async()
return processing_results
def process_message(
self, event: dict, topic: str = None, source: str = None
) -> List[TaskResults]:
"""
Entrypoint for message processing.
:param event: dict with webhook/fed-mes payload
:param topic: meant to be a topic provided by messaging subsystem (fedmsg, mqqt)
:param source: source of message
"""
if topic:
# let's pre-filter messages: we don't need to get debug logs from processing
# messages when we know beforehand that we are not interested in messages for such topic
topics = [
getattr(handler, "topic", None)
for handler in JobHandler.get_all_subclasses()
]
if topic not in topics:
logger.debug(f"{topic} not in {topics}")
return []
event_object: Any
if source == "centosmsg":
event_object = CentosEventParser().parse_event(event)
else:
event_object = Parser.parse_event(event)
if not (event_object and event_object.pre_check()):
return []
# CoprBuildEvent.get_project returns None when the build id is not known
if not event_object.project:
logger.warning(
"Cannot obtain project from this event! "
"Skipping private repository check!"
)
elif event_object.project.is_private():
service_with_namespace = (
f"{event_object.project.service.hostname}/"
f"{event_object.project.namespace}"
)
if (
service_with_namespace
not in self.service_config.enabled_private_namespaces
):
logger.info(
f"We do not interact with private repositories by default. "
f"Add `{service_with_namespace}` to the `enabled_private_namespaces` "
f"in the service configuration."
)
return []
logger.debug(
f"Working in `{service_with_namespace}` namespace "
f"which is private but enabled via configuration."
)
handler: Union[
GithubAppInstallationHandler,
TestingFarmResultsHandler,
CoprBuildStartHandler,
CoprBuildEndHandler,
]
processing_results = None
# Bugzilla handler is run even the job is not configured in a package.
# This's not in the condition below because we want to run process_jobs() as well.
if isinstance(event_object, MergeRequestGitlabEvent):
BugzillaHandler.get_signature(
event=event_object,
job=None,
).apply_async()
# installation is handled differently b/c app is installed to GitHub account
# not repository, so package config with jobs is missing
if isinstance(event_object, InstallationEvent):
GithubAppInstallationHandler.get_signature(
event=event_object, job=None
).apply_async()
else:
# Processing the jobs from the config.
processing_results = self.process_jobs(event_object)
if processing_results is None:
processing_results = [
TaskResults.create_from(
success=True,
msg="Job created.",
job_config=None,
event=event_object,
)
]
return processing_results
| 35.296569 | 100 | 0.610791 |
55dd1bef87480b57557bd6589a5aa93ed214135a | 1,913 | py | Python | envs/ImageObsVecEnvWrapper.py | harry-uglow/Curriculum-Reinforcement-Learning | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 15 | 2020-02-02T22:22:41.000Z | 2022-03-03T07:50:45.000Z | envs/ImageObsVecEnvWrapper.py | harry-uglow/Deep-RL-Sim2Real | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 8 | 2020-01-28T20:45:54.000Z | 2022-03-14T07:58:27.000Z | envs/ImageObsVecEnvWrapper.py | harry-uglow/Curriculum-Reinforcement-Learning | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 5 | 2020-03-26T15:46:51.000Z | 2022-01-17T09:48:02.000Z | import numpy as np
from baselines.common.vec_env import VecEnvWrapper
from gym import spaces
import cv2
# Swap out state observation for image
class ImageObsVecEnvWrapper(VecEnvWrapper):
def __init__(self, venv, res):
observation_space = spaces.Box(0, 255, [3, *res], dtype=venv.observation_space.dtype)
super().__init__(venv, observation_space)
self.curr_state_obs = None
def reset(self):
self.curr_state_obs = self.venv.reset()
def step_wait(self):
self.curr_state_obs, rew, done, info = self.venv.step_wait()
return rew, done, info
class SimImageObsVecEnvWrapper(ImageObsVecEnvWrapper):
def __init__(self, venv):
res = venv.get_images(mode='activate')[0]
super().__init__(venv, res)
def reset(self):
super(SimImageObsVecEnvWrapper, self).reset()
image_obs = np.transpose(self.venv.get_images(), (0, 3, 1, 2))
cv2.imwrite('im.png', image_obs[0].transpose(1, 2, 0))
return image_obs
def step_wait(self):
rew, done, info = super(SimImageObsVecEnvWrapper, self).step_wait()
image_obs = np.transpose(self.venv.get_images(), (0, 3, 1, 2))
return image_obs, rew, done, info
class RealImageObsVecEnvWrapper(ImageObsVecEnvWrapper):
def __init__(self, venv, res, camera):
super().__init__(venv, res)
self.cam = camera
def reset(self):
super(RealImageObsVecEnvWrapper, self).reset()
image_obs = self.cam.get_image()
return image_obs
def step_wait(self):
rew, done, info = super(RealImageObsVecEnvWrapper, self).step_wait()
image_obs = self.cam.get_image()
return image_obs, rew, done, info
def get_image_obs_wrapper(venv):
if isinstance(venv, ImageObsVecEnvWrapper):
return venv
elif hasattr(venv, 'venv'):
return get_image_obs_wrapper(venv.venv)
return None
| 31.360656 | 93 | 0.672243 |
2db7aa4470d863d330e0d53b97e8da446d16c7d0 | 1,221 | py | Python | python_arrays.py | galgodon/astr-119-session-3 | 50f444fe6de564c8c3a4b937917929f267f108c3 | [
"MIT"
] | null | null | null | python_arrays.py | galgodon/astr-119-session-3 | 50f444fe6de564c8c3a4b937917929f267f108c3 | [
"MIT"
] | 1 | 2018-10-04T20:10:52.000Z | 2018-10-18T01:38:43.000Z | python_arrays.py | galgodon/astr-119-session-3 | 50f444fe6de564c8c3a4b937917929f267f108c3 | [
"MIT"
] | 1 | 2018-10-18T01:36:30.000Z | 2018-10-18T01:36:30.000Z | #!/usr/bin/env python3
x = [0.0, 3.0, 5.0, 2.5, 3.7] # create an array called x. (an array is a list where every element is the same type)
print(type(x)) # print data type of x (list)
x.pop(2) # delete the 3rd element of x (position 2 since we start at position 0)
print(x) # print the new x
x.remove(2.5) # remove any element of x that is 2.5
print(x) # print new x
x.append(1.2) # add 1.2 to the end of x
print(x) # print new x
y = x.copy() # define y as a copy of x
print(y) # print y, it will be identical to x
print(y.count(0.0)) # count how many elements are 0.0
print(y.index(3.7)) # outputs the index of the first element that is 3.7
y.sort() # sort the elements of y in numerical order
print(y) # print new y
y.reverse() # reverse the order of y
print(y) # print new y
y.clear() # clear all elements of y
print(y) # print new y | 42.103448 | 118 | 0.46683 |
962cf04b5951e8242d273a0681e355a8cc06d6e5 | 8,887 | py | Python | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | import math
from qtpy.QtCore import (QPointF, QRectF, Qt, Property, QLineF)
from qtpy.QtGui import (QPainterPath, QBrush, QColor, QPolygonF, QTransform)
from .base import BaseSymbolIcon
class PneumaticValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a pneumatic valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(PneumaticValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.3))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.2, 0, 0.6, 0.3))
class FastShutterSymbolIcon(BaseSymbolIcon):
"""
A widget with a fast shutter symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.4, 0), QPointF(0.5, 0.15))
painter.drawLine(QPointF(0.4, 0.10), QPointF(0.5, 0.25))
painter.drawLine(QPointF(0.5, 0.15), QPointF(0.6, 0))
painter.drawLine(QPointF(0.5, 0.25), QPointF(0.6, 0.10))
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.0))
class RightAngleManualValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a right angle manual valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0))
path.lineTo(1, 1)
path.lineTo(0.005, 1)
path.lineTo(0.5, 0.5)
path.lineTo(0, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawEllipse(QPointF(0.5, 0.5), 0.05, 0.05)
class ApertureValveSymbolIcon(BaseSymbolIcon):
"""
A widget with an aperture valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(ApertureValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawEllipse(QPointF(0.5, 0.6), 0.1, 0.1)
painter.drawLine(QPointF(0.5, 0.5), QPointF(0.5, 0.3))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.2, 0, 0.6, 0.3))
class NeedleValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a needle valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(NeedleValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.15))
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
top_arrow_point = QPointF(0.65, 0.36)
arrow = QPolygonF(
[QPointF(-0.09, 0.0),
QPointF(-0.005, 0.0),
QPointF(-0.005, 0.8),
QPointF(0.005, 0.8),
QPointF(0.005, 0.0),
QPointF(0.09, 0.0),
QPointF(0.00, -0.25)]
)
t = QTransform()
t.rotate(35)
top_arrow_r = t.map(arrow)
arrow_l = top_arrow_r.translated(top_arrow_point)
painter.drawPolygon(arrow_l)
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.3, 0, 0.4, 0.15))
class ProportionalValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a proportional valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(ProportionalValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.15))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.35, 0, 0.3, 0.3))
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
top_arrow_point = QPointF(0.65, 0.42)
arrow = QPolygonF(
[QPointF(-0.07, 0.0),
QPointF(-0.005, 0.0),
QPointF(-0.005, 0.8),
QPointF(0.005, 0.8),
QPointF(0.005, 0.0),
QPointF(0.07, 0.0),
QPointF(0.00, -0.25)]
)
t = QTransform()
t.rotate(40)
top_arrow_r = t.map(arrow)
arrow_l = top_arrow_r.translated(top_arrow_point)
painter.drawPolygon(arrow_l)
t_x = 0.4
t_y = 0.05
painter.drawLines([QLineF(0.0+t_x, 0.0+t_y, 0.0+t_x, 0.2+t_y),
QLineF(0.0+t_x, 0.0+t_y, 0.1+t_x, 0.2+t_y),
QLineF(0.1+t_x, 0.2+t_y, 0.2+t_x, 0.0+t_y),
QLineF(0.2+t_x, 0.0+t_y, 0.2+t_x, 0.2+t_y)])
class ControlValveSymbolIcon(PneumaticValveSymbolIcon):
"""Icon for a Control Valve with readback"""
def draw_icon(self, painter):
pen = painter.pen()
pen.setWidthF(pen.width()*2)
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
# Circle parameters
radius = 0.3
center = (0.5, 1 - radius)
# Draw circle
painter.drawEllipse(QPointF(*center),
radius, radius)
# X pattern
quad = math.cos(math.radians(45)) * radius
painter.drawLine(QLineF(center[0] + quad,
center[1] + quad,
center[0] - quad,
center[1] - quad))
painter.drawLine(QLineF(center[0] + quad,
center[1] - quad,
center[0] - quad,
center[1] + quad))
# Interlock Icon
square_dims = (0.4, 0.2)
painter.drawLine(QPointF(center[0], center[1] - radius),
QPointF(center[0], square_dims[1]))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF((1 - square_dims[0])/2., 0, *square_dims))
class ControlOnlyValveSymbolIcon(BaseSymbolIcon):
"""Icon for a Control Valve with no readback"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
| 31.182456 | 76 | 0.579498 |
6ad80e10bcb7f1a7fd73675294223d070e484686 | 13,903 | py | Python | extract_features.py | trangvu/bert | 001fce0550cae29e43c175d146d1a33980870ac8 | [
"Apache-2.0"
] | null | null | null | extract_features.py | trangvu/bert | 001fce0550cae29e43c175d146d1a33980870ac8 | [
"Apache-2.0"
] | null | null | null | extract_features.py | trangvu/bert | 001fce0550cae29e43c175d146d1a33980870ac8 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
from model import modeling, tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
| 33.181384 | 82 | 0.674387 |
871ca1ef887cc3d80176cbe01cdace4c79918524 | 7,286 | py | Python | lapps-grid/tools/data_source/microbial_import_code.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | lapps-grid/tools/data_source/microbial_import_code.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 3 | 2015-06-06T22:16:03.000Z | 2015-11-12T00:22:45.000Z | lapps-grid/tools/data_source/microbial_import_code.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | from __future__ import print_function
from shutil import copyfile
from galaxy import tools
def load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' ):
# FIXME: this function is duplicated in the DynamicOptions class. It is used here only to
# set data.name in exec_after_process().
microbe_info = {}
orgs = {}
filename = "%s/microbial_data.loc" % GALAXY_DATA_INDEX_DIR
for i, line in enumerate( open( filename ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( sep )
# read each line, if not enough fields, go to next line
try:
info_type = fields.pop(0)
if info_type.upper() == "ORG":
# ORG 12521 Clostridium perfringens SM101 bacteria Firmicutes CP000312,CP000313,CP000314,CP000315 http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=12521
org_num = fields.pop(0)
name = fields.pop(0)
kingdom = fields.pop(0)
group = fields.pop(0)
chromosomes = fields.pop(0)
info_url = fields.pop(0)
link_site = fields.pop(0)
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'name' ] = name
orgs[ org_num ][ 'kingdom' ] = kingdom
orgs[ org_num ][ 'group' ] = group
orgs[ org_num ][ 'chromosomes' ] = chromosomes
orgs[ org_num ][ 'info_url' ] = info_url
orgs[ org_num ][ 'link_site' ] = link_site
elif info_type.upper() == "CHR":
# CHR 12521 CP000315 Clostridium perfringens phage phiSM101, complete genome 38092 110684521 CP000315.1
org_num = fields.pop(0)
chr_acc = fields.pop(0)
name = fields.pop(0)
length = fields.pop(0)
gi = fields.pop(0)
gb = fields.pop(0)
info_url = fields.pop(0)
chr = {}
chr[ 'name' ] = name
chr[ 'length' ] = length
chr[ 'gi' ] = gi
chr[ 'gb' ] = gb
chr[ 'info_url' ] = info_url
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ] = chr
elif info_type.upper() == "DATA":
# DATA 12521_12521_CDS 12521 CP000315 CDS bed /home/djb396/alignments/playground/bacteria/12521/CP000315.CDS.bed
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0)
data = {}
data[ 'filetype' ] = filetype
data[ 'path' ] = path
data[ 'feature' ] = feature
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
if 'data' not in orgs[ org_num ][ 'chrs' ][ chr_acc ]:
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ][ uid ] = data
else:
continue
except:
continue
for org_num in orgs:
org = orgs[ org_num ]
if org[ 'kingdom' ] not in microbe_info:
microbe_info[ org[ 'kingdom' ] ] = {}
if org_num not in microbe_info[ org[ 'kingdom' ] ]:
microbe_info[ org[ 'kingdom' ] ][org_num] = org
return microbe_info
# post processing, set build for data and add additional data to history
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
base_dataset = list(out_data.items())[0][1]
history = base_dataset.history
if history is None:
print("unknown history!")
return
kingdom = param_dict.get( 'kingdom', None )
org = param_dict.get( 'org', None )
# if not (kingdom or group or org):
if not (kingdom or org):
print("Parameters are not available.")
# workflow passes galaxy.tools.parameters.basic.UnvalidatedValue instead of values
if isinstance( kingdom, tools.parameters.basic.UnvalidatedValue ):
kingdom = kingdom.value
if isinstance( org, tools.parameters.basic.UnvalidatedValue ):
org = org.value
GALAXY_DATA_INDEX_DIR = app.config.tool_data_path
microbe_info = load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' )
split_stdout = stdout.split("\n")
basic_name = ""
for line in split_stdout:
fields = line.split("\t")
if fields[0] == "#File1":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
file_type = fields[4]
name, data = list(out_data.items())[0]
data.set_size()
basic_name = data.name
data.name = data.name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] + " for " + microbe_info[kingdom][org]['name'] + ":" + chr + ")"
data.dbkey = dbkey
data.info = data.name
data = app.datatypes_registry.change_datatype( data, file_type )
data.init_meta()
data.set_peek()
app.model.context.add( data )
app.model.context.flush()
elif fields[0] == "#NewFile":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
filepath = fields[4]
file_type = fields[5]
newdata = app.model.HistoryDatasetAssociation( create_dataset=True, sa_session=app.model.context ) # This import should become a library
newdata.set_size()
newdata.extension = file_type
newdata.name = basic_name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] + " for " + microbe_info[kingdom][org]['name'] + ":" + chr + ")"
app.model.context.add( newdata )
app.model.context.flush()
app.security_agent.copy_dataset_permissions( base_dataset.dataset, newdata.dataset )
history.add_dataset( newdata )
app.model.context.add( history )
app.model.context.flush()
try:
copyfile(filepath, newdata.file_name)
newdata.info = newdata.name
newdata.state = newdata.states.OK
except:
newdata.info = "The requested file is missing from the system."
newdata.state = newdata.states.ERROR
newdata.dbkey = dbkey
newdata.init_meta()
newdata.set_peek()
app.model.context.flush()
| 45.823899 | 243 | 0.516607 |
f254352957878f54717e5bb9075bed0a2806100b | 660 | py | Python | Aulas/app001/ex09-texvariable_stringvar.py | JonasJF360/Curso_Tkinter | 7a72187a03ef9f7f9d7f760d2cd1434e0287a266 | [
"MIT"
] | null | null | null | Aulas/app001/ex09-texvariable_stringvar.py | JonasJF360/Curso_Tkinter | 7a72187a03ef9f7f9d7f760d2cd1434e0287a266 | [
"MIT"
] | null | null | null | Aulas/app001/ex09-texvariable_stringvar.py | JonasJF360/Curso_Tkinter | 7a72187a03ef9f7f9d7f760d2cd1434e0287a266 | [
"MIT"
] | null | null | null | from tkinter import *
janela = Tk()
janela.title('Textvariable e Stringvar')
janela.geometry('500x500')
texto = StringVar()
texto.set('Olá, mundo!')
label1 = Label(
janela,
text=texto, # Isso terá um problema
font='Arial 20',
bg='red',
fg='white'
).pack()
label2 = Label(
janela,
textvariable=texto,
font='Arial 20',
bg='blue',
fg='white'
).pack()
label3 = Label(
janela,
textvariable=texto,
font='Arial 20',
bg='blue',
fg='white'
).pack()
label4 = Label(
janela,
textvariable=texto,
font='Arial 20',
bg='blue',
fg='white'
).pack()
texto.set('Novo texto')
janela.mainloop() | 15.714286 | 40 | 0.601515 |
53d91c5680fceeec4492e81988c68a0bbe2fe74f | 111 | py | Python | problem/01000~09999/02605/2605.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/02605/2605.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/02605/2605.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | arr=[];input()
for n,i in enumerate(map(int,input().split())):arr.insert(n-i,n+1)
print(' '.join(map(str,arr))) | 37 | 66 | 0.63964 |
85fdb3cd01376fe4f677a8865e8b037b416b3276 | 9,376 | py | Python | swig/python/osgeo/gnm.py | FeU-aKlos/gdal | bba6781133815248c9329842d365f8812b74c33f | [
"Apache-2.0"
] | 3,100 | 2015-01-02T10:33:40.000Z | 2022-03-31T02:06:51.000Z | swig/python/osgeo/gnm.py | FeU-aKlos/gdal | bba6781133815248c9329842d365f8812b74c33f | [
"Apache-2.0"
] | 3,496 | 2015-01-06T16:53:30.000Z | 2022-03-31T20:18:51.000Z | swig/python/osgeo/gnm.py | FeU-aKlos/gdal | bba6781133815248c9329842d365f8812b74c33f | [
"Apache-2.0"
] | 2,036 | 2015-01-08T20:22:12.000Z | 2022-03-31T10:24:08.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _gnm
else:
import _gnm
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
def GetUseExceptions(*args):
r"""GetUseExceptions() -> int"""
return _gnm.GetUseExceptions(*args)
def UseExceptions(*args):
r"""UseExceptions()"""
return _gnm.UseExceptions(*args)
def DontUseExceptions(*args):
r"""DontUseExceptions()"""
return _gnm.DontUseExceptions(*args)
from . import ogr
from . import osr
GATDijkstraShortestPath = _gnm.GATDijkstraShortestPath
GATKShortestPath = _gnm.GATKShortestPath
GATConnectedComponents = _gnm.GATConnectedComponents
GNM_EDGE_DIR_BOTH = _gnm.GNM_EDGE_DIR_BOTH
GNM_EDGE_DIR_SRCTOTGT = _gnm.GNM_EDGE_DIR_SRCTOTGT
GNM_EDGE_DIR_TGTTOSRC = _gnm.GNM_EDGE_DIR_TGTTOSRC
def CastToNetwork(*args):
r"""CastToNetwork(MajorObject base) -> Network"""
return _gnm.CastToNetwork(*args)
def CastToGenericNetwork(*args):
r"""CastToGenericNetwork(MajorObject base) -> GenericNetwork"""
return _gnm.CastToGenericNetwork(*args)
class Network(ogr.MajorObject):
r"""Proxy of C++ GNMNetworkShadow class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gnm.delete_Network
def ReleaseResultSet(self, *args):
r"""ReleaseResultSet(Network self, Layer layer)"""
return _gnm.Network_ReleaseResultSet(self, *args)
def GetVersion(self, *args):
r"""GetVersion(Network self) -> int"""
return _gnm.Network_GetVersion(self, *args)
def GetName(self, *args):
r"""GetName(Network self) -> char const *"""
return _gnm.Network_GetName(self, *args)
def GetFeatureByGlobalFID(self, *args):
r"""GetFeatureByGlobalFID(Network self, GIntBig GFID) -> Feature"""
return _gnm.Network_GetFeatureByGlobalFID(self, *args)
def GetPath(self, *args, **kwargs):
r"""GetPath(Network self, GIntBig nStartFID, GIntBig nEndFID, GNMGraphAlgorithmType eAlgorithm, char ** options=None) -> Layer"""
return _gnm.Network_GetPath(self, *args, **kwargs)
def DisconnectAll(self, *args):
r"""DisconnectAll(Network self) -> CPLErr"""
return _gnm.Network_DisconnectAll(self, *args)
def GetProjection(self, *args):
r"""GetProjection(Network self) -> char const *"""
return _gnm.Network_GetProjection(self, *args)
def GetProjectionRef(self, *args):
r"""GetProjectionRef(Network self) -> char const *"""
return _gnm.Network_GetProjectionRef(self, *args)
def GetFileList(self, *args):
r"""GetFileList(Network self) -> char **"""
return _gnm.Network_GetFileList(self, *args)
def CreateLayer(self, *args, **kwargs):
r"""CreateLayer(Network self, char const * name, SpatialReference srs=None, OGRwkbGeometryType geom_type=wkbUnknown, char ** options=None) -> Layer"""
return _gnm.Network_CreateLayer(self, *args, **kwargs)
def CopyLayer(self, *args, **kwargs):
r"""CopyLayer(Network self, Layer src_layer, char const * new_name, char ** options=None) -> Layer"""
return _gnm.Network_CopyLayer(self, *args, **kwargs)
def DeleteLayer(self, *args):
r"""DeleteLayer(Network self, int index) -> OGRErr"""
return _gnm.Network_DeleteLayer(self, *args)
def GetLayerCount(self, *args):
r"""GetLayerCount(Network self) -> int"""
return _gnm.Network_GetLayerCount(self, *args)
def GetLayerByIndex(self, *args):
r"""GetLayerByIndex(Network self, int index=0) -> Layer"""
return _gnm.Network_GetLayerByIndex(self, *args)
def GetLayerByName(self, *args):
r"""GetLayerByName(Network self, char const * layer_name) -> Layer"""
return _gnm.Network_GetLayerByName(self, *args)
def TestCapability(self, *args):
r"""TestCapability(Network self, char const * cap) -> bool"""
return _gnm.Network_TestCapability(self, *args)
def StartTransaction(self, *args, **kwargs):
r"""StartTransaction(Network self, int force=FALSE) -> OGRErr"""
return _gnm.Network_StartTransaction(self, *args, **kwargs)
def CommitTransaction(self, *args):
r"""CommitTransaction(Network self) -> OGRErr"""
return _gnm.Network_CommitTransaction(self, *args)
def RollbackTransaction(self, *args):
r"""RollbackTransaction(Network self) -> OGRErr"""
return _gnm.Network_RollbackTransaction(self, *args)
# Register Network in _gnm:
_gnm.Network_swigregister(Network)
class GenericNetwork(Network):
r"""Proxy of C++ GNMGenericNetworkShadow class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gnm.delete_GenericNetwork
def ConnectFeatures(self, *args):
r"""ConnectFeatures(GenericNetwork self, GIntBig nSrcFID, GIntBig nTgtFID, GIntBig nConFID, double dfCost, double dfInvCost, GNMDirection eDir) -> CPLErr"""
return _gnm.GenericNetwork_ConnectFeatures(self, *args)
def DisconnectFeatures(self, *args):
r"""DisconnectFeatures(GenericNetwork self, GIntBig nSrcFID, GIntBig nTgtFID, GIntBig nConFID) -> CPLErr"""
return _gnm.GenericNetwork_DisconnectFeatures(self, *args)
def DisconnectFeaturesWithId(self, *args):
r"""DisconnectFeaturesWithId(GenericNetwork self, GIntBig nFID) -> CPLErr"""
return _gnm.GenericNetwork_DisconnectFeaturesWithId(self, *args)
def ReconnectFeatures(self, *args):
r"""ReconnectFeatures(GenericNetwork self, GIntBig nSrcFID, GIntBig nTgtFID, GIntBig nConFID, double dfCost, double dfInvCost, GNMDirection eDir) -> CPLErr"""
return _gnm.GenericNetwork_ReconnectFeatures(self, *args)
def CreateRule(self, *args):
r"""CreateRule(GenericNetwork self, char const * pszRuleStr) -> CPLErr"""
return _gnm.GenericNetwork_CreateRule(self, *args)
def DeleteAllRules(self, *args):
r"""DeleteAllRules(GenericNetwork self) -> CPLErr"""
return _gnm.GenericNetwork_DeleteAllRules(self, *args)
def DeleteRule(self, *args):
r"""DeleteRule(GenericNetwork self, char const * pszRuleStr) -> CPLErr"""
return _gnm.GenericNetwork_DeleteRule(self, *args)
def GetRules(self, *args):
r"""GetRules(GenericNetwork self) -> char **"""
return _gnm.GenericNetwork_GetRules(self, *args)
def ConnectPointsByLines(self, *args, **kwargs):
r"""ConnectPointsByLines(GenericNetwork self, char ** papszLayerList, double dfTolerance, double dfCost, double dfInvCost, GNMDirection eDir) -> CPLErr"""
return _gnm.GenericNetwork_ConnectPointsByLines(self, *args, **kwargs)
def ChangeBlockState(self, *args):
r"""ChangeBlockState(GenericNetwork self, GIntBig nFID, bool bIsBlock) -> CPLErr"""
return _gnm.GenericNetwork_ChangeBlockState(self, *args)
def ChangeAllBlockState(self, *args):
r"""ChangeAllBlockState(GenericNetwork self, bool bIsBlock=False) -> CPLErr"""
return _gnm.GenericNetwork_ChangeAllBlockState(self, *args)
# Register GenericNetwork in _gnm:
_gnm.GenericNetwork_swigregister(GenericNetwork)
| 38.113821 | 166 | 0.696246 |
3e8606d5b7fc3c75f054f7e4500fb438d3d013ca | 5,363 | py | Python | covid-19-tests-owid.py | flexiodata/functions-covid-19-feed | 1922ac245016488e89614019be9e490281628ec8 | [
"MIT"
] | 1 | 2021-06-18T17:55:47.000Z | 2021-06-18T17:55:47.000Z | covid-19-tests-owid.py | flexiodata/functions-covid-19-feed | 1922ac245016488e89614019be9e490281628ec8 | [
"MIT"
] | null | null | null | covid-19-tests-owid.py | flexiodata/functions-covid-19-feed | 1922ac245016488e89614019be9e490281628ec8 | [
"MIT"
] | 1 | 2021-06-18T17:56:26.000Z | 2021-06-18T17:56:26.000Z |
# ---
# name: covid-19-tests-owid
# deployed: true
# config: index
# title: Covid-19 Tests (Our World In Data)
# description: Returns data about Covid-19 tests from the Our World In Data Covid-19 GitHub Repository
# params:
# - name: properties
# type: array
# description: The properties to return, given as a string or array; defaults to all properties; see "Returns" for available properties
# required: false
# - name: filter
# type: array
# description: Search query to determine the rows to return, given as a string or array
# required: false
# returns:
# - name: entity
# type: string
# description: The country/entity performing the tests
# - name: date
# type: string
# description: The date of the tests
# - name: total_cumulative
# type: string
# description: The cumulative total number of tests performed
# - name: total_cumulative_daily_change
# type: integer
# description: The daily change in cumulative total number of tests performed
# - name: total_cumulative_per_thousand
# type: number
# description: Cumulative total per thousand
# - name: total_cumulative_per_thousand_daily_change
# type: number
# description: The daily change in cumulative total per thousand
# - name: three_day_rolling_mean_daily_change
# type: number
# description: The three-day rolling mean daily change
# - name: three_day_rolling_mean_daily_change_per_thousand
# type: number
# description: The three-day rolling mean daily change per thousand
# - name: source_url
# type: string
# description: The source url for the information
# - name: source_label
# type: string
# description: The source label for the information
# - name: notes
# type: string
# description: Notes for the information
# examples:
# - '"entity,date,total_cumulative"'
# - '"", "+CDC +\"United States\""'
# notes: |-
# Data from Our World In Data, based on data collected by the Our World in Data team from official reports
# Additional Resources:
# * Our World In Data Covid-19 GitHub Repo Source Data: \
# https://github.com/owid/covid-19-data/tree/master/public/data
# * Our World In Data Covid-19 Tracking Page: \
# https://ourworldindata.org/coronavirus
# * Our World In Data Covid-19 Testing Sources: \
# https://ourworldindata.org/covid-testing#source-information-country-by-country
# ---
import csv
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from contextlib import closing
from collections import OrderedDict
from time import sleep
def flex_handler(flex):
# configuration
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv'
# set the output type to ndjson for loading into an index
flex.output.content_type = 'application/x-ndjson'
# get the data for each line in each file and write it to
# stdout with one json object per-line (ndjson) for loading
# into an index
for row in get_data(url):
item = json.dumps(row) + "\n"
flex.output.write(item)
def get_data(url):
# get the data
headers = {
'User-Agent': 'Flex.io Covid-19 Integration'
}
request = requests_retry_session().get(url, stream=True, headers=headers)
with closing(request) as r:
# get each line and return a dictionary item for each line
f = (line.decode('utf-8') for line in r.iter_lines())
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
data = get_item(row)
yield data
def get_item(row):
# convert keys to lowercase and make sure the values are formatted
row = {k.lower(): v for k, v in row.items()}
item = OrderedDict()
item['entity'] = row.get('entity','')
item['date'] = row.get('date','')
item['total_cumulative'] = to_number(row.get('cumulative total',''))
item['total_cumulative_daily_change'] = to_number(row.get('daily change in cumulative total',''))
item['total_cumulative_per_thousand'] = to_number(row.get('cumulative total per thousand',''))
item['total_cumulative_per_thousand_daily_change'] = to_number(row.get('daily change in cumulative total per thousand',''))
item['three_day_rolling_mean_daily_change'] = to_number(row.get('3-day rolling mean daily change',''))
item['three_day_rolling_mean_daily_change_per_thousand'] =to_number( row.get('3-day rolling mean daily change per thousand',''))
item['source_url'] = row.get('source url','')
item['source_label'] = row.get('source label','')
item['notes'] = row.get('notes','')
return item
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(429, 500, 502, 503, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def to_number(value):
try:
v = value
return float(v)
except ValueError:
return value
| 36.732877 | 139 | 0.688421 |
b86eb5cd26e2228a20ba422fba27298974bc3f24 | 5,642 | py | Python | mode1.py | Charlie0921/CS131_FinalProject_Library | 9000ab14c17e59aecdc98c6acb6d4e70a421f5eb | [
"MIT"
] | null | null | null | mode1.py | Charlie0921/CS131_FinalProject_Library | 9000ab14c17e59aecdc98c6acb6d4e70a421f5eb | [
"MIT"
] | null | null | null | mode1.py | Charlie0921/CS131_FinalProject_Library | 9000ab14c17e59aecdc98c6acb6d4e70a421f5eb | [
"MIT"
] | null | null | null | import init as init
import mode5 as m5
# Retrieve Book dataframe from 'booklist.txt'
Book = init.read()
# Update Book dataframe & Create Student dataframe through 'librarylog.txt'
Student = []
init.update(Book, Student)
#######################################check fine#########################################################
def checkFine(Student, name, day):
finelist = m5.getPendingFines(day, Student)
isIntheList = False
for fine in finelist:
if fine[0] == name:
isIntheList = True
break
if isIntheList:
if fine[1] == 0:
return False
else:
return True
else:
return False
######################################################borrow########################################
# calculate number of books after borrowing
def borrow(check, borrowTime, z):
borrowRange = list(range(borrowTime[z][0], borrowTime[z][1]+1))
i = 0
while i < len(check):
if check[i][1] == 0:
check[i][1] = 0
elif check[i][0] in borrowRange:
check[i][1] = check[i][1] - 1
i += 1
return check
######################################check number books#####################################################
def findBookRow(Book, name):
for line in Book:
if line[0] == name:
return line
def checkNumberBooks(Bookdata):
borrowTime = Bookdata[3]
check = []
largest = init.getLogDate()
numberRange = list(range(1, largest+1))
# [days,number of books]
for number in numberRange:
count = numberRange.count(number)
check.append([number, count])
changeDate = []
for item in Bookdata[1]:
changeDate.append(item)
# change copies of books in 2d list
j = 0
while j < len(check):
i = 0
while i < len(changeDate):
if check[j][0] == changeDate[i][0]:
check[j][1] = changeDate[i][1]
break
else:
check[j][1] = check[j-1][1]
i += 1
j += 1
# calculate number of books after borrowing
for z in range(0, len(borrowTime)):
check = borrow(check, borrowTime, z)
return check
#####################################Check available using dates#######################################
# check if the person can borrow base on the number of book(if 0 books can't borrow)
def checkAvailableDates(check, start_date, num_of_days):
end_date = int(start_date + num_of_days)
isValid = True
for j in range(0, len(check)):
day = []
numberBook = []
days = check[j][0]
numberBooks = check[j][1]
day.append(days)
numberBook.append(numberBooks)
for testDate in range(start_date, end_date+1):
for item in day:
if item == testDate:
index1 = int(day.index(item))
if numberBook[index1] == 0:
isValid = False
break
return isValid
###################################Check student books################
singleStudentLst = []
def singleStudent(Student, student_name, singleStudentLst):
for i in range(0, len(Student)):
if Student[i][0] == student_name:
singleStudentLst.append(Student[i])
return singleStudentLst
# get a largest day
largestBorrowDay = init.getLogDate()
# get a smallest day
borrowDayList = []
def dayList(singleStudentLst):
borrow = []
for i in range(0, len(singleStudentLst)):
whenBorrow = singleStudentLst[i][2]
borrow.append(whenBorrow)
borrow.sort()
smallestBorrowDay = borrow[0]
for i in range(smallestBorrowDay, largestBorrowDay + 1):
borrowDayList.append([i, 0])
# change the numbers of the list
def canBorrow(borrowDayList):
for i in range(0, len(singleStudentLst)):
whenBorrow = singleStudentLst[i][2]
whenReturn = singleStudentLst[i][4]
studentBorrow = list(range(whenBorrow, whenReturn+1))
# change coies of books
i = 0
while i < len(borrowDayList):
if borrowDayList[i][0] in studentBorrow:
borrowDayList[i][1] = borrowDayList[i][1] + 1
else:
borrowDayList[i][1] = borrowDayList[i][1]
i += 1
def checkIfBorrow(borrowDayList):
lastNumber = len(borrowDayList)-1
finalNumber = borrowDayList[lastNumber][1]
isPossible = True
if finalNumber >= 3:
isPossible = False
return isPossible
# main
def checkStudentBooks(Student, student_name):
singleStudent(Student, student_name, singleStudentLst)
dayList(singleStudentLst)
canBorrow(borrowDayList)
return checkIfBorrow(borrowDayList)
#########################################Main######################################
def checkAvailable(student_name, start_date, num_of_days, book_name, Book, Student):
# 1. Check if the user has a pending fine -> if hasFine is True, user has a pending fine.
hasFine = checkFine(Student, student_name, start_date)
# Check if the requested days are available
data = findBookRow(Book, book_name)
check = checkNumberBooks(data)
# 3. if hasBook is True, there is a book for a user to borrow
hasBook = checkAvailableDates(check, start_date, num_of_days)
# 2. Check if the user has borrowed over 3 books
yesBook = checkStudentBooks(Student, student_name)
if not hasFine and hasBook:
if yesBook:
print("You can borrow")
else:
print("You cannot borrow")
else:
print("You cannot borrow")
| 28.351759 | 109 | 0.56044 |
75fb1aa1b55f0df4e03617e086ead4d3bd91c136 | 2,003 | py | Python | api/routers/bandwidth.py | ShayBox/API | d141b3d148d93809afcfbcd311607e0835fd671f | [
"MIT"
] | 2 | 2019-08-05T18:13:13.000Z | 2021-04-01T12:39:29.000Z | api/routers/bandwidth.py | ShayBox/API | d141b3d148d93809afcfbcd311607e0835fd671f | [
"MIT"
] | 1 | 2019-10-30T01:29:24.000Z | 2019-10-30T01:29:24.000Z | api/routers/bandwidth.py | ShayBox/API | d141b3d148d93809afcfbcd311607e0835fd671f | [
"MIT"
] | null | null | null | from ..configuration import settings
from datetime import datetime
from fastapi import APIRouter
from fastapi_cache.decorator import cache
from fastapi.params import Query
from requests import Session
from starlette.responses import Response
# https://stackoverflow.com/a/60451840/17289156
class NoRebuildAuthSession(Session):
def rebuild_auth(self, prepared_request, response) -> None:
"""
No code here means requests will always preserve the Authorization
header when redirected.
Be careful not to leak your credentials to untrusted hosts!
"""
session = NoRebuildAuthSession()
router = APIRouter(
prefix="/bandwidth",
tags=["bandwidth"],
)
def get_data(endpoint: str) -> dict:
base_url = "https://api.buckeyebroadband.com/usage/v2"
url = f"{base_url}/users/{settings.buckeye_user_id}/modems/{settings.buckeye_serial_number}/"
headers = {"Authorization": settings.buckeye_api_key}
response = session.get(url + endpoint, headers=headers)
return response.json()
start = datetime(2020, 2, 13)
end = datetime.now()
diff = (end.year - start.year) * 12 + (end.month - start.month)
@router.get("/categories")
@cache(expire=60 * 60 * 24)
async def get_categories(
num: int = Query(0, ge=0, le=diff, description="Number of month to query")
):
usage = get_data(f"cycles/{num}/usage")["data"][num]
begin, end = usage["beginCycle"], usage["endCycle"]
return get_data(f"usageDetails/{begin}/{end}")
@router.get("/cycles")
@cache(expire=60 * 60 * 24)
async def get_cycles(
num: int = Query(11, ge=0, le=diff, description="Number of months to query")
):
return get_data(f"cycles/{num}/usage")
@router.get("/daily")
@cache(expire=60 * 60 * 24)
async def get_daily(
num: int = Query(0, ge=0, le=diff, description="Number of month to query")
):
usage = get_data(f"cycles/{num}/usage")["data"][num]
begin, end = usage["beginCycle"], usage["endCycle"]
return get_data(f"dailyUsage/{begin}/{end}")
| 30.815385 | 97 | 0.696955 |
152a1a76f4e0b182f88efa6bc150ef5e3b2de65c | 6,442 | py | Python | examples/pwr_run/checkpointing/debug/k80_only/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/debug/k80_only/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/debug/k80_only/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_k80_only/' + job_name + '*'
total_epochs = 44
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_k80_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 30.67619 | 118 | 0.704284 |
e8123d1b362a6a7291591d4fb43e4ad7b7bfb635 | 607 | py | Python | django_eveonline_timerboard/bindings.py | KryptedGaming/django-eveonline-timerboard | da413aa9d84b8ab4e97021a91ce53c16b0f88d81 | [
"MIT"
] | null | null | null | django_eveonline_timerboard/bindings.py | KryptedGaming/django-eveonline-timerboard | da413aa9d84b8ab4e97021a91ce53c16b0f88d81 | [
"MIT"
] | 2 | 2020-10-14T15:49:35.000Z | 2020-10-14T15:51:36.000Z | django_eveonline_timerboard/bindings.py | KryptedGaming/django-eveonline-timerboard | da413aa9d84b8ab4e97021a91ce53c16b0f88d81 | [
"MIT"
] | null | null | null | from django.apps import apps
from django.urls import reverse
from django.conf import settings
from packagebinder.bind import PackageBinding, SettingsBinding, TaskBinding, SidebarBinding
import logging
logger = logging.getLogger(__name__)
app_config = apps.get_app_config('django_eveonline_timerboard')
def create_bindings():
sidebar_bindings = apps.get_app_config('packagebinder').sidebar_bindings
sidebar_bindings['django_eveonline_connector'].add_child_menu_item({
"fa_icon": "fa-clock",
"name": "Timerboard",
"url": reverse("django-eveonline-timerboard-view"),
})
| 35.705882 | 91 | 0.769357 |
ac51fab03883cd85083cb14cd0fc671ea12d5c2c | 1,109 | py | Python | l5kit/l5kit/evaluation/error_functions.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | null | null | null | l5kit/l5kit/evaluation/error_functions.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | null | null | null | l5kit/l5kit/evaluation/error_functions.py | cdicle-motional/l5kit | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | [
"Apache-2.0"
] | 1 | 2021-11-19T08:13:46.000Z | 2021-11-19T08:13:46.000Z | import math
from typing import Callable
import torch
# Error function type
ErrorFunction = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
def l2_error(pred: torch.Tensor, gt: torch.Tensor) -> torch.Tensor:
"""A function that takes pred, gt tensor and computes their L2 distance.
:param pred: predicted tensor, size: [batch_size, num_dims]
:param gt: gt tensor, size: [batch_size, num_dims]
:return: l2 distance between the predicted and gt tensor, size: [batch_size,]
"""
return torch.norm(pred - gt, p=2, dim=-1)
def closest_angle_error(angle_a: torch.Tensor, angle_b: torch.Tensor) -> torch.Tensor:
""" Finds the closest angle between angle_b - angle_a in radians.
:param angle_a: a Tensor of angles in radians
:param angle_b: a Tensor of angles in radians
:return: The relative angle error between A and B between [0, pi]
"""
assert angle_a.shape == angle_b.shape
two_pi = 2.0 * math.pi
wrapped = torch.fmod(angle_b - angle_a, two_pi)
closest_angle = torch.fmod(2.0 * wrapped, two_pi) - wrapped
return torch.abs(closest_angle)
| 33.606061 | 86 | 0.706041 |
29daaeb787d652d68e370e4225ca681fd7af6826 | 6,431 | py | Python | webpersonal/portafolio/views.py | RoniHernandez99/miWebPersonal | b6207c3c3e716bf6cc48dcc1198a1a0427961ed9 | [
"Apache-2.0"
] | null | null | null | webpersonal/portafolio/views.py | RoniHernandez99/miWebPersonal | b6207c3c3e716bf6cc48dcc1198a1a0427961ed9 | [
"Apache-2.0"
] | null | null | null | webpersonal/portafolio/views.py | RoniHernandez99/miWebPersonal | b6207c3c3e716bf6cc48dcc1198a1a0427961ed9 | [
"Apache-2.0"
] | null | null | null | from ast import keyword
from django.shortcuts import render
from django.views.generic import TemplateView,ListView,DetailView
from .models import Apartado_Portafolio,Habilidad,Especializacion,Proyecto,CursoImpartido,CursoTomado,ExperienciaLaboral
class ProyectoDetailView(DetailView):
model = Proyecto
template_name="portafolio/detalle_proyecto.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
return context
class EspecializacionDetailView(DetailView):
model = Especializacion
template_name="portafolio/detalle_especializacion.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
return context
class CursoImpartidoDetailView(DetailView):
model = CursoImpartido
template_name="portafolio/detalle_cursoImpartido.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
return context
class CursoTomadoDetailView(DetailView):
model = CursoTomado
template_name="portafolio/detalle_cursoTomado.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
return context
class ProyectoListView(ListView):
model = Proyecto
template_name = "portafolio/lista_proyectos.html"
paginate_by=6
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
context['apartadoPortafolio']="Mis proyectos"
return context
class EspecializacionListView(ListView):
model =Especializacion
template_name = "portafolio/lista_especializaciones.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
context['apartadoPortafolio']="Mis especializaciones"
return context
class CursoImpartidoListView(ListView):
model = CursoImpartido
template_name = "portafolio/lista_cursosImpartidos.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
context['apartadoPortafolio']="Cursos impartidos"
return context
class CursoTomadoListView(ListView):
model = CursoTomado
template_name = "portafolio/lista_cursosTomados.html"
paginate_by=9
def get_queryset(self):
# un form manda unicamente el dato de: 'palabraClave'
palabraClave=self.request.GET.get("palabraClave",None)
# otro form manda unicamente el dato de: 'habilidad'
id_habilidad=self.request.GET.get("habilidad",None)
if palabraClave:
cursosConDichaPalabra=CursoTomado.objects.filter(
nombre__icontains=palabraClave
)
return cursosConDichaPalabra
elif id_habilidad:
try:
id_habilidad=int(id_habilidad)
habilidad=Habilidad.objects.get(id=id_habilidad)
# haciendo uso del related name para hallar todos los cursos que pertenezcan a dicha categoria...
cursosConDichaHabilidad=habilidad.get_cursos.all()
return cursosConDichaHabilidad
except:
return CursoTomado.objects.none()
else:
datos=CursoTomado.objects.all()
return datos
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
# un form manda unicamente el dato de: 'palabraClave'
palabraClave=self.request.GET.get("palabraClave",None)
# otro form manda unicamente el dato de: 'habilidad'
id_habilidad=self.request.GET.get("habilidad",None)
if palabraClave:
context['palabraClave_DIO_USER']=palabraClave
elif id_habilidad:
context['id_habilidad_DIO_USER']=id_habilidad
if datos:
context['imagenPortada']=datos.imagenPortada
context['apartadoPortafolio']="Cursos tomados"
# categorias
context['habilidadesCursos']=Habilidad.objects.all()
# solo retornara los primeros 6 cursos
#cursosTomados=list(CursoTomado.objects.filter(preparadoParaMostrar=True)[:6])
#cursosTomados=cursosTomados+cursosTomados+cursosTomados+cursosTomados+cursosTomados
#context['cursotomado_list']=cursosTomados
return context
class Portafolio(TemplateView):
template_name="portafolio/portafolio.html"
def get_context_data(self,**kwargs):
context=super().get_context_data(**kwargs)
datos=Apartado_Portafolio.objects.get_apartado()
if datos:
context['imagenPortada']=datos.imagenPortada
# solo retornara los 3 primeros proyectos
context['proyectos']=datos.proyectosDestacados.all()[:3]
# solo retornara las 3 primeras especializaciones
context['especializaciones']=datos.especializacionesDestacados.all()[:3]
# solo retornara los 3 primeros cursos impartidos
context['cursosImpartidos']=datos.cursosImpartidosDestacados.all()[:3]
# solo retornara los primeros 6 cursos
context['cursosTomados']=datos.cursosTomadosDestacados.all()[:6]
# retornara todo
context['experienciaLaboral']=ExperienciaLaboral.objects.all()
return context | 31.995025 | 120 | 0.672057 |
9a0c7330f8fc745975bc7fb86c3620b7a00ef62a | 21,608 | py | Python | align/detect_face.py | cooolallen/CVFinal | fed590a44703d5a9950a144f7a57b5c209687be6 | [
"MIT"
] | 6 | 2017-09-09T05:34:01.000Z | 2021-07-17T15:10:43.000Z | align/detect_face.py | cooolallen/Face-recognition-with-User-interface | fed590a44703d5a9950a144f7a57b5c209687be6 | [
"MIT"
] | 5 | 2017-09-09T05:34:38.000Z | 2019-08-16T02:30:21.000Z | align/detect_face.py | cooolallen/Face-recognition-with-User-interface | fed590a44703d5a9950a144f7a57b5c209687be6 | [
"MIT"
] | 3 | 2020-04-13T08:02:49.000Z | 2022-03-02T12:21:10.000Z | """ Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
try:
data_dict = np.load(data_path).item() #pylint: disable=no-member
except:
data_dict = np.load(data_path,encoding='bytes').item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name.decode('utf-8'))
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = inp.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = inp.get_shape().as_list()
alpha = self.make_var('alpha', shape=(i[-1]))
output = tf.nn.relu(inp) + tf.mul(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
sess = tf.Session(graph=tf.get_default_graph())
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: caffemodel
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
# fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
factor_count=0
total_boxes=np.empty((0,9))
points=[]
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# creat scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale=scales[j]
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
# calibrate bounding boxes
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #pylint: disable=no-member
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
| 38.933333 | 138 | 0.571316 |
bff25e8e55d16bd91cda0d4d959ef4a5e5d4e030 | 2,530 | py | Python | openerp/addons/hr_timesheet/__openerp__.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/hr_timesheet/__openerp__.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/hr_timesheet/__openerp__.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Timesheets',
'version': '1.0',
'category': 'Human Resources',
'sequence': 23,
'description': """
This module implements a timesheet system.
==========================================
Each employee can encode and track their time spent on the different projects.
A project is an analytic account and the time spent on a project generates costs on
the analytic account.
Lots of reporting on time and employee tracking are provided.
It is completely integrated with the cost accounting module. It allows you to set
up a management by affair.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_timesheet_lines.jpeg'],
'depends': ['account', 'hr', 'base', 'hr_attendance', 'process'],
'data': [
'security/ir.model.access.csv',
'security/hr_timesheet_security.xml',
'hr_timesheet_view.xml',
'hr_timesheet_report.xml',
'hr_timesheet_wizard.xml',
'process/hr_timesheet_process.xml',
'wizard/hr_timesheet_print_employee_view.xml',
'wizard/hr_timesheet_print_users_view.xml',
'wizard/hr_timesheet_sign_in_out_view.xml',
'hr_timesheet_installer.xml',
'hr_timesheet_data.xml'
],
'demo': ['hr_timesheet_demo.xml'],
'test': [
'test/test_hr_timesheet.yml',
'test/hr_timesheet_report.yml',
'test/hr_timesheet_demo.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 37.205882 | 83 | 0.63004 |
09e0f3d5a6fa089ebaa00c4d7ae1f6ac4eaaa202 | 9,667 | py | Python | aeppl_hmm/dists.py | AmpersandTV/aeppl-hmm | 3d8807cddf7930468f60e8b8ef9da08618138e7b | [
"MIT"
] | 4 | 2021-12-08T20:39:59.000Z | 2022-02-10T19:45:11.000Z | aeppl_hmm/dists.py | brandonwillard/aeppl-hmm | 3d8807cddf7930468f60e8b8ef9da08618138e7b | [
"MIT"
] | 2 | 2021-12-08T21:45:13.000Z | 2022-02-10T19:48:46.000Z | aeppl_hmm/dists.py | brandonwillard/aeppl-hmm | 3d8807cddf7930468f60e8b8ef9da08618138e7b | [
"MIT"
] | 1 | 2021-12-08T20:39:04.000Z | 2021-12-08T20:39:04.000Z | from copy import copy
from typing import Sequence
import aesara
import aesara.tensor as at
import numpy as np
from aeppl.abstract import MeasurableVariable
from aeppl.dists import dirac_delta
from aeppl.logprob import _logprob
from aesara.compile.builders import OpFromGraph
from aesara.graph.basic import Constant
from aesara.tensor.basic import make_vector
from aesara.tensor.random.basic import categorical
from aesara.tensor.random.utils import broadcast_params, normalize_size_param
from aesara.tensor.var import TensorVariable
def non_constant(x):
x = at.as_tensor_variable(x)
if isinstance(x, Constant):
# XXX: This isn't good for `size` parameters, because it could result
# in `at.get_vector_length` exceptions.
res = x.type()
res.tag = copy(res.tag)
if aesara.config.compute_test_value != "off":
res.tag.test_value = x.data
res.name = x.name
return res
else:
return x
def switching_process(
comp_rvs: Sequence[TensorVariable],
states: TensorVariable,
):
"""Construct a switching process over arbitrary univariate mixtures and a state sequence.
This simply constructs a graph of the following form:
at.stack(comp_rvs)[states, *idx]
where ``idx`` makes sure that `states` selects mixture components along all
the other axes.
Parameters
----------
comp_rvs
A list containing `RandomVariable` objects for each mixture component.
states
The hidden state sequence. It should have a number of states
equal to the size of `comp_dists`.
""" # noqa: E501
states = at.as_tensor(states, dtype=np.int64)
comp_rvs_bcast = at.broadcast_arrays(*[at.as_tensor(rv) for rv in comp_rvs])
M_rv = at.stack(comp_rvs_bcast)
indices = (states,)
indices += tuple(at.arange(d) for d in tuple(M_rv.shape)[1:])
rv_var = M_rv[indices]
return rv_var
def poisson_zero_process(mu=None, states=None, srng=None, **kwargs):
"""A Poisson-Dirac-delta (at zero) mixture process.
The first mixture component (at index 0) is the Dirac-delta at zero, and
the second mixture component is the Poisson random variable.
Parameters
----------
mu: tensor
The Poisson rate(s)
states: tensor
A vector of integer 0-1 states that indicate which component of
the mixture is active at each point/time.
"""
mu = at.as_tensor_variable(mu)
states = at.as_tensor_variable(states)
# NOTE: This creates distributions that are *not* part of a `Model`
return switching_process(
[dirac_delta(at.as_tensor(0, dtype=np.int64)), srng.poisson(mu)],
states,
**kwargs
)
class DiscreteMarkovChainFactory(OpFromGraph):
# Add `RandomVariable`-like "metadata"
ndim_supp = 1
ndims_params = (3, 1)
MeasurableVariable.register(DiscreteMarkovChainFactory)
def create_discrete_mc_op(rng, size, Gammas, gamma_0):
"""Construct a `DiscreteMarkovChainFactory` `Op`.
This returns a `Scan` that performs the follow:
states[0] = categorical(gamma_0)
for t in range(1, N):
states[t] = categorical(Gammas[t, state[t-1]])
The Aesara graph representing the above is wrapped in an `OpFromGraph` so
that we can easily assign it a specific log-probability.
TODO: Eventually, AePPL should be capable of parsing more sophisticated
`Scan`s and producing nearly the same log-likelihoods, and the use of
`OpFromGraph` will no longer be necessary.
"""
# Again, we need to preserve the length of this symbolic vector, so we do
# this.
size_param = make_vector(
*[non_constant(size[i]) for i in range(at.get_vector_length(size))]
)
size_param.name = "size"
# We make shallow copies so that unwanted ancestors don't appear in the
# graph.
Gammas_param = non_constant(Gammas).type()
Gammas_param.name = "Gammas_param"
gamma_0_param = non_constant(gamma_0).type()
gamma_0_param.name = "gamma_0_param"
bcast_Gammas_param, bcast_gamma_0_param = broadcast_params(
(Gammas_param, gamma_0_param), (3, 1)
)
# Sample state 0 in each state sequence
state_0 = categorical(
bcast_gamma_0_param,
size=tuple(size_param) + tuple(bcast_gamma_0_param.shape[:-1]),
# size=at.join(0, size_param, bcast_gamma_0_param.shape[:-1]),
rng=rng,
)
N = bcast_Gammas_param.shape[-3]
states_shape = tuple(state_0.shape) + (N,)
bcast_Gammas_param = at.broadcast_to(
bcast_Gammas_param, states_shape + tuple(bcast_Gammas_param.shape[-2:])
)
def loop_fn(n, state_nm1, Gammas_inner, rng):
gamma_t = Gammas_inner[..., n, :, :]
idx = tuple(at.ogrid[[slice(None, d) for d in tuple(state_0.shape)]]) + (
state_nm1.T,
)
gamma_t = gamma_t[idx]
state_n = categorical(gamma_t, rng=rng)
return state_n.T
res, _ = aesara.scan(
loop_fn,
outputs_info=[{"initial": state_0.T, "taps": [-1]}],
sequences=[at.arange(N)],
non_sequences=[bcast_Gammas_param, rng],
# strict=True,
)
return DiscreteMarkovChainFactory(
[size_param, Gammas_param, gamma_0_param],
[res.T],
inline=True,
on_unused_input="ignore",
)
def discrete_markov_chain(
Gammas: TensorVariable, gamma_0: TensorVariable, size=None, rng=None, **kwargs
):
"""Construct a first-order discrete Markov chain distribution.
This characterizes vector random variables consisting of state indicator
values (i.e. ``0`` to ``M - 1``) that are driven by a discrete Markov chain.
Parameters
----------
Gammas
An array of transition probability matrices. `Gammas` takes the
shape ``... x N x M x M`` for a state sequence of length ``N`` having
``M``-many distinct states. Each row, ``r``, in a transition probability
matrix gives the probability of transitioning from state ``r`` to each
other state.
gamma_0
The initial state probabilities. The last dimension should be length ``M``,
i.e. the number of distinct states.
"""
gamma_0 = at.as_tensor_variable(gamma_0)
assert Gammas.ndim >= 3
Gammas = at.as_tensor_variable(Gammas)
size = normalize_size_param(size)
if rng is None:
rng = aesara.shared(np.random.RandomState(), borrow=True)
DiscreteMarkovChainOp = create_discrete_mc_op(rng, size, Gammas, gamma_0)
rv_var = DiscreteMarkovChainOp(size, Gammas, gamma_0)
testval = kwargs.pop("testval", None)
if testval is not None:
rv_var.tag.test_value = testval
return rv_var
@_logprob.register(DiscreteMarkovChainFactory)
def discrete_mc_logp(op, states, *dist_params, **kwargs):
r"""Create a Aesara graph that computes the log-likelihood for a discrete Markov chain.
This is the log-likelihood for the joint distribution of states, :math:`S_t`, conditional
on state samples, :math:`s_t`, given by the following:
.. math::
\int_{S_0} P(S_1 = s_1 \mid S_0) dP(S_0) \prod^{T}_{t=2} P(S_t = s_t \mid S_{t-1} = s_{t-1})
The first term (i.e. the integral) simply computes the marginal :math:`P(S_1 = s_1)`, so
another way to express this result is as follows:
.. math::
P(S_1 = s_1) \prod^{T}_{t=2} P(S_t = s_t \mid S_{t-1} = s_{t-1})
""" # noqa: E501
(states,) = states
_, Gammas, gamma_0 = dist_params[: len(dist_params) - len(op.shared_inputs)]
Gammas = at.shape_padleft(Gammas, states.ndim - (Gammas.ndim - 2))
# Multiply the initial state probabilities by the first transition
# matrix by to get the marginal probability for state `S_1`.
# The integral that produces the marginal is essentially
# `gamma_0.dot(Gammas[0])`
Gamma_1 = Gammas[..., 0:1, :, :]
gamma_0 = at.expand_dims(gamma_0, (-3, -1))
P_S_1 = at.sum(gamma_0 * Gamma_1, axis=-2)
# The `tt.switch`s allow us to broadcast the indexing operation when
# the replication dimensions of `states` and `Gammas` don't match
# (e.g. `states.shape[0] > Gammas.shape[0]`)
S_1_slices = tuple(
slice(
at.switch(at.eq(P_S_1.shape[i], 1), 0, 0),
at.switch(at.eq(P_S_1.shape[i], 1), 1, d),
)
for i, d in enumerate(states.shape)
)
S_1_slices = (tuple(at.ogrid[S_1_slices]) if S_1_slices else tuple()) + (
states[..., 0:1],
)
logp_S_1 = at.log(P_S_1[S_1_slices]).sum(axis=-1)
# These are slices for the extra dimensions--including the state
# sequence dimension (e.g. "time")--along which which we need to index
# the transition matrix rows using the "observed" `states`.
trans_slices = tuple(
slice(
at.switch(at.eq(Gammas.shape[i], 1), 0, 1 if i == states.ndim - 1 else 0),
at.switch(at.eq(Gammas.shape[i], 1), 1, d),
)
for i, d in enumerate(states.shape)
)
trans_slices = (tuple(at.ogrid[trans_slices]) if trans_slices else tuple()) + (
states[..., :-1],
)
# Select the transition matrix row of each observed state; this yields
# `P(S_t | S_{t-1} = s_{t-1})`
P_S_2T = Gammas[trans_slices]
obs_slices = tuple(slice(None, d) for d in P_S_2T.shape[:-1])
obs_slices = (tuple(at.ogrid[obs_slices]) if obs_slices else tuple()) + (
states[..., 1:],
)
logp_S_1T = at.log(P_S_2T[obs_slices])
res = logp_S_1 + at.sum(logp_S_1T, axis=-1)
res.name = "DiscreteMarkovChain_logp"
if kwargs.get("sum", False):
res = res.sum()
return res
| 32.439597 | 100 | 0.655633 |
f114756b3b937963aa5f1a3c22d664a0a44dc3c4 | 54 | py | Python | build/lib/ciberatac/__init__.py | goodarzilab/ciberatac | 58c150813cfdf1cea160b9b2c464c382cb0f7395 | [
"BSD-3-Clause"
] | 3 | 2022-02-25T19:24:52.000Z | 2022-03-22T16:48:07.000Z | src/ciberatac/__init__.py | goodarzilab/ciberatac | 58c150813cfdf1cea160b9b2c464c382cb0f7395 | [
"BSD-3-Clause"
] | null | null | null | src/ciberatac/__init__.py | goodarzilab/ciberatac | 58c150813cfdf1cea160b9b2c464c382cb0f7395 | [
"BSD-3-Clause"
] | null | null | null | import model
import predict
import train
import utils
| 10.8 | 14 | 0.851852 |
66a58750a2687602ec8a6cbe7682ab85be92e2a9 | 2,852 | py | Python | mio/cli/completion.py | Datum-Technology-Corporation/mio_platform_client_cli | 74e09ae279ae44b3afd67493d90aec5d45ee7909 | [
"Apache-2.0"
] | null | null | null | mio/cli/completion.py | Datum-Technology-Corporation/mio_platform_client_cli | 74e09ae279ae44b3afd67493d90aec5d45ee7909 | [
"Apache-2.0"
] | null | null | null | mio/cli/completion.py | Datum-Technology-Corporation/mio_platform_client_cli | 74e09ae279ae44b3afd67493d90aec5d45ee7909 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Datum Technology Corporation
# SPDX-License-Identifier: Apache-2.0 WITH SHL-2.1
########################################################################################################################
# Licensed under the Solderpad Hardware License v 2.1 (the "License"); you may not use this file except in compliance
# with the License, or, at your option, the Apache License version 2.0. You may obtain a copy of the License at
# https://solderpad.org/licenses/SHL-2.1/
# Unless required by applicable law or agreed to in writing, any work distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
########################################################################################################################
"""Moore.io Completion Command
Produces outputs for shell/editor tab completion of mio commands and hdl
symbols from populated IPs.
Usage:
mio completion [--mio-commands] [--shell=<variant>]
mio completion --ctags [--variant=<type> ]
Options:
-m, --mio-commands
Outputs Moore.io commands completion text
-s <variant>, --shell=<variant>
Specify shell variant for completion: bash, csh, zsh [default: bash]
-c, --ctags
Outputs Ctags of IP HDL symbols
-v <type>, --variant=<type>
Specifies which CTags variant to use: ctags, etags [default: ctags]
Examples:
source <(mio completion --shell=csh)
mio completion --mio-commands >> /usr/local/etc/bash_completion.d/mio
mio completion --mio-commands --shell=bash >> ~/.bashrc
mio completion --ctags >> ~/tags/my_project.tags
"""
########################################################################################################################
# IMPORTS
########################################################################################################################
from docopt import docopt
import logging
########################################################################################################################
########################################################################################################################
# ENTRY POINT
########################################################################################################################
def main(upper_args):
logging.debug("completion - upper_args: " + str(upper_args))
args = docopt(__doc__, argv=upper_args, options_first=False)
logging.debug("completion - args: " + str(args))
########################################################################################################################
| 49.172414 | 120 | 0.45582 |
5030875ec3b463c9cfa8f94efff349e505b885bb | 31,078 | py | Python | RegRCNN/datasets/toy/data_loader.py | HannahElisa/RegRCNN | 1aa69d00c61bd36685213248bb30d4ba30ac5a06 | [
"Apache-2.0"
] | null | null | null | RegRCNN/datasets/toy/data_loader.py | HannahElisa/RegRCNN | 1aa69d00c61bd36685213248bb30d4ba30ac5a06 | [
"Apache-2.0"
] | null | null | null | RegRCNN/datasets/toy/data_loader.py | HannahElisa/RegRCNN | 1aa69d00c61bd36685213248bb30d4ba30ac5a06 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
sys.path.append('../') # works on cluster indep from where sbatch job is started
import RegRCNN.plotting as plg
import numpy as np
import os
from multiprocessing import Lock
from collections import OrderedDict
import pandas as pd
import pickle
import time
# batch generator tools from https://github.com/MIC-DKFZ/batchgenerators
from batchgenerators.transforms.spatial_transforms import MirrorTransform as Mirror
from batchgenerators.transforms.abstract_transforms import Compose
from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
from batchgenerators.transforms.spatial_transforms import SpatialTransform
from batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import utils.dataloader_utils as dutils
from utils.dataloader_utils import ConvertSegToBoundingBoxCoordinates
def load_obj(file_path):
with open(file_path, 'rb') as handle:
return pickle.load(handle)
class Dataset(dutils.Dataset):
r""" Load a dict holding memmapped arrays and clinical parameters for each patient,
evtly subset of those.
If server_env: copy and evtly unpack (npz->npy) data in cf.data_rootdir to
cf.data_dir.
:param cf: config file
:param folds: number of folds out of @params n_cv folds to include
:param n_cv: number of total folds
:return: dict with imgs, segs, pids, class_labels, observables
"""
def __init__(self, cf, logger, subset_ids=None, data_sourcedir=None, mode='train'):
super(Dataset,self).__init__(cf, data_sourcedir=data_sourcedir)
load_exact_gts = (mode=='test' or cf.val_mode=="val_patient") and self.cf.test_against_exact_gt
p_df = pd.read_pickle(os.path.join(self.data_dir, cf.info_df_name))
if subset_ids is not None:
p_df = p_df[p_df.pid.isin(subset_ids)]
logger.info('subset: selected {} instances from df'.format(len(p_df)))
pids = p_df.pid.tolist()
#evtly copy data from data_sourcedir to data_dest
if cf.server_env and not hasattr(cf, "data_dir"):
file_subset = [os.path.join(self.data_dir, '{}.*'.format(pid)) for pid in pids]
file_subset += [os.path.join(self.data_dir, '{}_seg.*'.format(pid)) for pid in pids]
file_subset += [cf.info_df_name]
if load_exact_gts:
file_subset += [os.path.join(self.data_dir, '{}_exact_seg.*'.format(pid)) for pid in pids]
self.copy_data(cf, file_subset=file_subset)
img_paths = [os.path.join(self.data_dir, '{}.npy'.format(pid)) for pid in pids]
seg_paths = [os.path.join(self.data_dir, '{}_seg.npy'.format(pid)) for pid in pids]
if load_exact_gts:
exact_seg_paths = [os.path.join(self.data_dir, '{}_exact_seg.npy'.format(pid)) for pid in pids]
class_targets = p_df['class_ids'].tolist()
rg_targets = p_df['regression_vectors'].tolist()
if load_exact_gts:
exact_rg_targets = p_df['undistorted_rg_vectors'].tolist()
fg_slices = p_df['fg_slices'].tolist()
self.data = OrderedDict()
for ix, pid in enumerate(pids):
self.data[pid] = {'data': img_paths[ix], 'seg': seg_paths[ix], 'pid': pid,
'fg_slices': np.array(fg_slices[ix])}
if load_exact_gts:
self.data[pid]['exact_seg'] = exact_seg_paths[ix]
if 'class' in self.cf.prediction_tasks:
self.data[pid]['class_targets'] = np.array(class_targets[ix], dtype='uint8')
else:
self.data[pid]['class_targets'] = np.ones_like(np.array(class_targets[ix]), dtype='uint8')
if load_exact_gts:
self.data[pid]['exact_class_targets'] = self.data[pid]['class_targets']
if any(['regression' in task for task in self.cf.prediction_tasks]):
self.data[pid]['regression_targets'] = np.array(rg_targets[ix], dtype='float16')
self.data[pid]["rg_bin_targets"] = np.array([cf.rg_val_to_bin_id(v) for v in rg_targets[ix]], dtype='uint8')
if load_exact_gts:
self.data[pid]['exact_regression_targets'] = np.array(exact_rg_targets[ix], dtype='float16')
self.data[pid]["exact_rg_bin_targets"] = np.array([cf.rg_val_to_bin_id(v) for v in exact_rg_targets[ix]],
dtype='uint8')
cf.roi_items = cf.observables_rois[:]
cf.roi_items += ['class_targets']
if any(['regression' in task for task in self.cf.prediction_tasks]):
cf.roi_items += ['regression_targets']
cf.roi_items += ['rg_bin_targets']
self.set_ids = np.array(list(self.data.keys()))
self.df = None
class BatchGenerator(dutils.BatchGenerator):
"""
creates the training/validation batch generator. Samples n_batch_size patients (draws a slice from each patient if 2D)
from the data set while maintaining foreground-class balance. Returned patches are cropped/padded to pre_crop_size.
Actual patch_size is obtained after data augmentation.
:param data: data dictionary as provided by 'load_dataset'.
:param batch_size: number of patients to sample for the batch
:return dictionary containing the batch data (b, c, x, y, (z)) / seg (b, 1, x, y, (z)) / pids / class_target
"""
def __init__(self, cf, data, sample_pids_w_replace=True, max_batches=None, raise_stop_iteration=False, seed=0):
super(BatchGenerator, self).__init__(cf, data, sample_pids_w_replace=sample_pids_w_replace,
max_batches=max_batches, raise_stop_iteration=raise_stop_iteration,
seed=seed)
self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"
self.crop_margin = np.array(self.cf.patch_size) / 8. # min distance of ROI center to edge of cropped_patch.
self.p_fg = 0.5
self.empty_samples_max_ratio = 0.6
self.balance_target_distribution(plot=sample_pids_w_replace)
def generate_train_batch(self):
# everything done in here is per batch
# print statements in here get confusing due to multithreading
batch_pids = self.get_batch_pids()
batch_data, batch_segs, batch_patient_targets = [], [], []
batch_roi_items = {name: [] for name in self.cf.roi_items}
# record roi count and empty count of classes in batch
# empty count for no presence of resp. class in whole sample (empty slices in 2D/patients in 3D)
batch_roi_counts = np.zeros((len(self.unique_ts),), dtype='uint32')
batch_empty_counts = np.zeros((len(self.unique_ts),), dtype='uint32')
for b in range(len(batch_pids)):
patient = self._data[batch_pids[b]]
data = np.load(patient['data'], mmap_mode='r').astype('float16')[np.newaxis]
seg = np.load(patient['seg'], mmap_mode='r').astype('uint8')
(c, y, x, z) = data.shape
if self.cf.dim == 2:
elig_slices, choose_fg = [], False
if len(patient['fg_slices']) > 0:
if np.all(batch_empty_counts / self.batch_size >= self.empty_samples_max_ratio) or np.random.rand(
1) <= self.p_fg:
# fg is to be picked
for tix in np.argsort(batch_roi_counts):
# pick slices of patient that have roi of sought-for target
# np.unique(seg[...,sl_ix][seg[...,sl_ix]>0]) gives roi_ids (numbering) of rois in slice sl_ix
elig_slices = [sl_ix for sl_ix in np.arange(z) if np.count_nonzero(
patient[self.balance_target][np.unique(seg[..., sl_ix][seg[..., sl_ix] > 0]) - 1] ==
self.unique_ts[tix]) > 0]
if len(elig_slices) > 0:
choose_fg = True
break
else:
# pick bg
elig_slices = np.setdiff1d(np.arange(z), patient['fg_slices'])
if len(elig_slices) > 0:
sl_pick_ix = np.random.choice(elig_slices, size=None)
else:
sl_pick_ix = np.random.choice(z, size=None)
data = data[..., sl_pick_ix]
seg = seg[..., sl_pick_ix]
spatial_shp = data[0].shape
assert spatial_shp == seg.shape, "spatial shape incongruence betw. data and seg"
if np.any([spatial_shp[ix] < self.cf.pre_crop_size[ix] for ix in range(len(spatial_shp))]):
new_shape = [np.max([spatial_shp[ix], self.cf.pre_crop_size[ix]]) for ix in range(len(spatial_shp))]
data = dutils.pad_nd_image(data, (len(data), *new_shape))
seg = dutils.pad_nd_image(seg, new_shape)
# eventual cropping to pre_crop_size: sample pixel from random ROI and shift center,
# if possible, to that pixel, so that img still contains ROI after pre-cropping
dim_cropflags = [spatial_shp[i] > self.cf.pre_crop_size[i] for i in range(len(spatial_shp))]
if np.any(dim_cropflags):
# sample pixel from random ROI and shift center, if possible, to that pixel
if self.cf.dim==3:
choose_fg = np.any(batch_empty_counts/self.batch_size>=self.empty_samples_max_ratio) or \
np.random.rand(1) <= self.p_fg
if choose_fg and np.any(seg):
available_roi_ids = np.unique(seg)[1:]
for tix in np.argsort(batch_roi_counts):
elig_roi_ids = available_roi_ids[patient[self.balance_target][available_roi_ids-1] == self.unique_ts[tix]]
if len(elig_roi_ids)>0:
seg_ics = np.argwhere(seg == np.random.choice(elig_roi_ids, size=None))
break
roi_anchor_pixel = seg_ics[np.random.choice(seg_ics.shape[0], size=None)]
assert seg[tuple(roi_anchor_pixel)] > 0
# sample the patch center coords. constrained by edges of image - pre_crop_size /2 and
# distance to the selected ROI < patch_size /2
def get_cropped_centercoords(dim):
low = np.max((self.cf.pre_crop_size[dim] // 2,
roi_anchor_pixel[dim] - (
self.cf.patch_size[dim] // 2 - self.cf.crop_margin[dim])))
high = np.min((spatial_shp[dim] - self.cf.pre_crop_size[dim] // 2,
roi_anchor_pixel[dim] + (
self.cf.patch_size[dim] // 2 - self.cf.crop_margin[dim])))
if low >= high: # happens if lesion on the edge of the image.
low = self.cf.pre_crop_size[dim] // 2
high = spatial_shp[dim] - self.cf.pre_crop_size[dim] // 2
assert low < high, 'low greater equal high, data dimension {} too small, shp {}, patient {}, low {}, high {}'.format(
dim,
spatial_shp, patient['pid'], low, high)
return np.random.randint(low=low, high=high)
else:
# sample crop center regardless of ROIs, not guaranteed to be empty
def get_cropped_centercoords(dim):
return np.random.randint(low=self.cf.pre_crop_size[dim] // 2,
high=spatial_shp[dim] - self.cf.pre_crop_size[dim] // 2)
sample_seg_center = {}
for dim in np.where(dim_cropflags)[0]:
sample_seg_center[dim] = get_cropped_centercoords(dim)
min_ = int(sample_seg_center[dim] - self.cf.pre_crop_size[dim] // 2)
max_ = int(sample_seg_center[dim] + self.cf.pre_crop_size[dim] // 2)
data = np.take(data, indices=range(min_, max_), axis=dim + 1) # +1 for channeldim
seg = np.take(seg, indices=range(min_, max_), axis=dim)
batch_data.append(data)
batch_segs.append(seg[np.newaxis])
for o in batch_roi_items: #after loop, holds every entry of every batchpatient per observable
batch_roi_items[o].append(patient[o])
if self.cf.dim == 3:
for tix in range(len(self.unique_ts)):
non_zero = np.count_nonzero(patient[self.balance_target] == self.unique_ts[tix])
batch_roi_counts[tix] += non_zero
batch_empty_counts[tix] += int(non_zero==0)
# todo remove assert when checked
if not np.any(seg):
assert non_zero==0
elif self.cf.dim == 2:
for tix in range(len(self.unique_ts)):
non_zero = np.count_nonzero(patient[self.balance_target][np.unique(seg[seg>0]) - 1] == self.unique_ts[tix])
batch_roi_counts[tix] += non_zero
batch_empty_counts[tix] += int(non_zero == 0)
# todo remove assert when checked
if not np.any(seg):
assert non_zero==0
batch = {'data': np.array(batch_data), 'seg': np.array(batch_segs).astype('uint8'),
'pid': batch_pids,
'roi_counts': batch_roi_counts, 'empty_counts': batch_empty_counts}
for key,val in batch_roi_items.items(): #extend batch dic by entries of observables dic
batch[key] = np.array(val)
return batch
class PatientBatchIterator(dutils.PatientBatchIterator):
"""
creates a test generator that iterates over entire given dataset returning 1 patient per batch.
Can be used for monitoring if cf.val_mode = 'patient_val' for a monitoring closer to actually evaluation (done in 3D),
if willing to accept speed-loss during training.
Specific properties of toy data set: toy data may be created with added ground-truth noise. thus, there are
exact ground truths (GTs) and noisy ground truths available. the normal or noisy GTs are used in training by
the BatchGenerator. The PatientIterator, however, may use the exact GTs if set in configs.
:return: out_batch: dictionary containing one patient with batch_size = n_3D_patches in 3D or
batch_size = n_2D_patches in 2D .
"""
def __init__(self, cf, data, mode='test'):
super(PatientBatchIterator, self).__init__(cf, data)
self.patch_size = cf.patch_size_2D + [1] if cf.dim == 2 else cf.patch_size_3D
self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"
if (mode=="validation" and hasattr(self.cf, 'val_against_exact_gt') and self.cf.val_against_exact_gt) or \
(mode == 'test' and self.cf.test_against_exact_gt):
self.gt_prefix = 'exact_'
print("PatientIterator: Loading exact Ground Truths.")
else:
self.gt_prefix = ''
self.patient_ix = 0 # running index over all patients in set
def generate_train_batch(self, pid=None):
if pid is None:
pid = self.dataset_pids[self.patient_ix]
patient = self._data[pid]
# already swapped dimensions in pp from (c,)z,y,x to c,y,x,z or h,w,d to ease 2D/3D-case handling
data = np.load(patient['data'], mmap_mode='r').astype('float16')[np.newaxis]
seg = np.load(patient[self.gt_prefix+'seg']).astype('uint8')[np.newaxis]
data_shp_raw = data.shape
plot_bg = data[self.cf.plot_bg_chan] if self.cf.plot_bg_chan not in self.chans else None
data = data[self.chans]
discarded_chans = len(
[c for c in np.setdiff1d(np.arange(data_shp_raw[0]), self.chans) if c < self.cf.plot_bg_chan])
spatial_shp = data[0].shape # spatial dims need to be in order x,y,z
assert spatial_shp == seg[0].shape, "spatial shape incongruence betw. data and seg"
if np.any([spatial_shp[i] < ps for i, ps in enumerate(self.patch_size)]):
new_shape = [np.max([spatial_shp[i], self.patch_size[i]]) for i in range(len(self.patch_size))]
data = dutils.pad_nd_image(data, new_shape) # use 'return_slicer' to crop image back to original shape.
seg = dutils.pad_nd_image(seg, new_shape)
if plot_bg is not None:
plot_bg = dutils.pad_nd_image(plot_bg, new_shape)
if self.cf.dim == 3 or self.cf.merge_2D_to_3D_preds:
# adds the batch dim here bc won't go through MTaugmenter
out_data = data[np.newaxis]
out_seg = seg[np.newaxis]
if plot_bg is not None:
out_plot_bg = plot_bg[np.newaxis]
# data and seg shape: (1,c,x,y,z), where c=1 for seg
batch_3D = {'data': out_data, 'seg': out_seg}
for o in self.cf.roi_items:
batch_3D[o] = np.array([patient[self.gt_prefix+o]])
converter = ConvertSegToBoundingBoxCoordinates(3, self.cf.roi_items, False, self.cf.class_specific_seg)
batch_3D = converter(**batch_3D)
batch_3D.update({'patient_bb_target': batch_3D['bb_target'], 'original_img_shape': out_data.shape})
for o in self.cf.roi_items:
batch_3D["patient_" + o] = batch_3D[o]
if self.cf.dim == 2:
out_data = np.transpose(data, axes=(3, 0, 1, 2)).astype('float32') # (c,y,x,z) to (b=z,c,x,y), use z=b as batchdim
out_seg = np.transpose(seg, axes=(3, 0, 1, 2)).astype('uint8') # (c,y,x,z) to (b=z,c,x,y)
batch_2D = {'data': out_data, 'seg': out_seg}
for o in self.cf.roi_items:
batch_2D[o] = np.repeat(np.array([patient[self.gt_prefix+o]]), len(out_data), axis=0)
converter = ConvertSegToBoundingBoxCoordinates(2, self.cf.roi_items, False, self.cf.class_specific_seg)
batch_2D = converter(**batch_2D)
if plot_bg is not None:
out_plot_bg = np.transpose(plot_bg, axes=(2, 0, 1)).astype('float32')
if self.cf.merge_2D_to_3D_preds:
batch_2D.update({'patient_bb_target': batch_3D['patient_bb_target'],
'original_img_shape': out_data.shape})
for o in self.cf.roi_items:
batch_2D["patient_" + o] = batch_3D[o]
else:
batch_2D.update({'patient_bb_target': batch_2D['bb_target'],
'original_img_shape': out_data.shape})
for o in self.cf.roi_items:
batch_2D["patient_" + o] = batch_2D[o]
out_batch = batch_3D if self.cf.dim == 3 else batch_2D
out_batch.update({'pid': np.array([patient['pid']] * len(out_data))})
if self.cf.plot_bg_chan in self.chans and discarded_chans > 0: # len(self.chans[:self.cf.plot_bg_chan])<data_shp_raw[0]:
assert plot_bg is None
plot_bg = int(self.cf.plot_bg_chan - discarded_chans)
out_plot_bg = plot_bg
if plot_bg is not None:
out_batch['plot_bg'] = out_plot_bg
# eventual tiling into patches
spatial_shp = out_batch["data"].shape[2:]
if np.any([spatial_shp[ix] > self.patch_size[ix] for ix in range(len(spatial_shp))]):
patient_batch = out_batch
print("patientiterator produced patched batch!")
patch_crop_coords_list = dutils.get_patch_crop_coords(data[0], self.patch_size)
new_img_batch, new_seg_batch = [], []
for c in patch_crop_coords_list:
new_img_batch.append(data[:, c[0]:c[1], c[2]:c[3], c[4]:c[5]])
seg_patch = seg[:, c[0]:c[1], c[2]: c[3], c[4]:c[5]]
new_seg_batch.append(seg_patch)
shps = []
for arr in new_img_batch:
shps.append(arr.shape)
data = np.array(new_img_batch) # (patches, c, x, y, z)
seg = np.array(new_seg_batch)
if self.cf.dim == 2:
# all patches have z dimension 1 (slices). discard dimension
data = data[..., 0]
seg = seg[..., 0]
patch_batch = {'data': data.astype('float32'), 'seg': seg.astype('uint8'),
'pid': np.array([patient['pid']] * data.shape[0])}
for o in self.cf.roi_items:
patch_batch[o] = np.repeat(np.array([patient[self.gt_prefix+o]]), len(patch_crop_coords_list), axis=0)
#patient-wise (orig) batch info for putting the patches back together after prediction
for o in self.cf.roi_items:
patch_batch["patient_"+o] = patient_batch["patient_"+o]
if self.cf.dim == 2:
# this could also be named "unpatched_2d_roi_items"
patch_batch["patient_" + o + "_2d"] = patient_batch[o]
patch_batch['patch_crop_coords'] = np.array(patch_crop_coords_list)
patch_batch['patient_bb_target'] = patient_batch['patient_bb_target']
if self.cf.dim == 2:
patch_batch['patient_bb_target_2d'] = patient_batch['bb_target']
patch_batch['patient_data'] = patient_batch['data']
patch_batch['patient_seg'] = patient_batch['seg']
patch_batch['original_img_shape'] = patient_batch['original_img_shape']
if plot_bg is not None:
patch_batch['patient_plot_bg'] = patient_batch['plot_bg']
converter = ConvertSegToBoundingBoxCoordinates(self.cf.dim, self.cf.roi_items, get_rois_from_seg=False,
class_specific_seg=self.cf.class_specific_seg)
patch_batch = converter(**patch_batch)
out_batch = patch_batch
self.patient_ix += 1
if self.patient_ix == len(self.dataset_pids):
self.patient_ix = 0
return out_batch
def create_data_gen_pipeline(cf, patient_data, do_aug=True, **kwargs):
"""
create mutli-threaded train/val/test batch generation and augmentation pipeline.
:param patient_data: dictionary containing one dictionary per patient in the train/test subset.
:param is_training: (optional) whether to perform data augmentation (training) or not (validation/testing)
:return: multithreaded_generator
"""
# create instance of batch generator as first element in pipeline.
data_gen = BatchGenerator(cf, patient_data, **kwargs)
my_transforms = []
if do_aug:
if cf.da_kwargs["mirror"]:
mirror_transform = Mirror(axes=cf.da_kwargs['mirror_axes'])
my_transforms.append(mirror_transform)
spatial_transform = SpatialTransform(patch_size=cf.patch_size[:cf.dim],
patch_center_dist_from_border=cf.da_kwargs['rand_crop_dist'],
do_elastic_deform=cf.da_kwargs['do_elastic_deform'],
alpha=cf.da_kwargs['alpha'], sigma=cf.da_kwargs['sigma'],
do_rotation=cf.da_kwargs['do_rotation'], angle_x=cf.da_kwargs['angle_x'],
angle_y=cf.da_kwargs['angle_y'], angle_z=cf.da_kwargs['angle_z'],
do_scale=cf.da_kwargs['do_scale'], scale=cf.da_kwargs['scale'],
random_crop=cf.da_kwargs['random_crop'])
my_transforms.append(spatial_transform)
else:
my_transforms.append(CenterCropTransform(crop_size=cf.patch_size[:cf.dim]))
my_transforms.append(ConvertSegToBoundingBoxCoordinates(cf.dim, cf.roi_items, False, cf.class_specific_seg))
all_transforms = Compose(my_transforms)
# multithreaded_generator = SingleThreadedAugmenter(data_gen, all_transforms)
multithreaded_generator = MultiThreadedAugmenter(data_gen, all_transforms, num_processes=data_gen.n_filled_threads,
seeds=range(data_gen.n_filled_threads))
return multithreaded_generator
def get_train_generators(cf, logger, data_statistics=False):
"""
wrapper function for creating the training batch generator pipeline. returns the train/val generators.
selects patients according to cv folds (generated by first run/fold of experiment):
splits the data into n-folds, where 1 split is used for val, 1 split for testing and the rest for training. (inner loop test set)
If cf.hold_out_test_set is True, adds the test split to the training data.
"""
dataset = Dataset(cf, logger)
dataset.init_FoldGenerator(cf.seed, cf.n_cv_splits)
dataset.generate_splits(check_file=os.path.join(cf.exp_dir, 'fold_ids.pickle'))
set_splits = dataset.fg.splits
test_ids, val_ids = set_splits.pop(cf.fold), set_splits.pop(cf.fold - 1)
train_ids = np.concatenate(set_splits, axis=0)
if cf.hold_out_test_set:
train_ids = np.concatenate((train_ids, test_ids), axis=0)
test_ids = []
train_data = {k: v for (k, v) in dataset.data.items() if str(k) in train_ids}
val_data = {k: v for (k, v) in dataset.data.items() if str(k) in val_ids}
logger.info("data set loaded with: {} train / {} val / {} test patients".format(len(train_ids), len(val_ids),
len(test_ids)))
if data_statistics:
dataset.calc_statistics(subsets={"train": train_ids, "val": val_ids, "test": test_ids}, plot_dir=
os.path.join(cf.plot_dir,"dataset"))
batch_gen = {}
batch_gen['train'] = create_data_gen_pipeline(cf, train_data, do_aug=cf.do_aug, sample_pids_w_replace=True)
if cf.val_mode == 'val_patient':
batch_gen['val_patient'] = PatientBatchIterator(cf, val_data, mode='validation')
batch_gen['n_val'] = len(val_ids) if cf.max_val_patients=="all" else min(len(val_ids), cf.max_val_patients)
elif cf.val_mode == 'val_sampling':
batch_gen['n_val'] = int(np.ceil(len(val_data)/cf.batch_size)) if cf.num_val_batches == "all" else cf.num_val_batches
# in current setup, val loader is used like generator. with max_batches being applied in train routine.
batch_gen['val_sampling'] = create_data_gen_pipeline(cf, val_data, do_aug=False, sample_pids_w_replace=False,
max_batches=None, raise_stop_iteration=False)
return batch_gen
def get_test_generator(cf, logger):
"""
if get_test_generators is possibly called multiple times in server env, every time of
Dataset initiation rsync will check for copying the data; this should be okay
since rsync will not copy if files already exist in destination.
"""
if cf.hold_out_test_set:
sourcedir = cf.test_data_sourcedir
test_ids = None
else:
sourcedir = None
with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'rb') as handle:
set_splits = pickle.load(handle)
test_ids = set_splits[cf.fold]
test_set = Dataset(cf, logger, subset_ids=test_ids, data_sourcedir=sourcedir, mode='test')
logger.info("data set loaded with: {} test patients".format(len(test_set.set_ids)))
batch_gen = {}
batch_gen['test'] = PatientBatchIterator(cf, test_set.data)
batch_gen['n_test'] = len(test_set.set_ids) if cf.max_test_patients=="all" else \
min(cf.max_test_patients, len(test_set.set_ids))
return batch_gen
if __name__=="__main__":
import utils.exp_utils as utils
from datasets.toy.configs import Configs
cf = Configs()
total_stime = time.time()
times = {}
# cf.server_env = True
# cf.data_dir = "experiments/dev_data"
cf.exp_dir = "experiments/dev/"
cf.plot_dir = cf.exp_dir + "plots"
os.makedirs(cf.exp_dir, exist_ok=True)
cf.fold = 0
logger = utils.get_logger(cf.exp_dir)
gens = get_train_generators(cf, logger)
train_loader = gens['train']
for i in range(0):
stime = time.time()
print("producing training batch nr ", i)
ex_batch = next(train_loader)
times["train_batch"] = time.time() - stime
#experiments/dev/dev_exbatch_{}.png".format(i)
plg.view_batch(cf, ex_batch, out_file="experiments/dev/dev_exbatch_{}.png".format(i), show_gt_labels=True, vmin=0, show_info=False)
val_loader = gens['val_sampling']
stime = time.time()
for i in range(1):
ex_batch = next(val_loader)
times["val_batch"] = time.time() - stime
stime = time.time()
#"experiments/dev/dev_exvalbatch_{}.png"
plg.view_batch(cf, ex_batch, out_file="experiments/dev/dev_exvalbatch_{}.png".format(i), show_gt_labels=True, vmin=0, show_info=True)
times["val_plot"] = time.time() - stime
#
test_loader = get_test_generator(cf, logger)["test"]
stime = time.time()
ex_batch = test_loader.generate_train_batch(pid=None)
times["test_batch"] = time.time() - stime
stime = time.time()
plg.view_batch(cf, ex_batch, show_gt_labels=True, out_file="experiments/dev/dev_expatchbatch.png", vmin=0)
times["test_patchbatch_plot"] = time.time() - stime
print("Times recorded throughout:")
for (k, v) in times.items():
print(k, "{:.2f}".format(v))
mins, secs = divmod((time.time() - total_stime), 60)
h, mins = divmod(mins, 60)
t = "{:d}h:{:02d}m:{:02d}s".format(int(h), int(mins), int(secs))
print("{} total runtime: {}".format(os.path.split(__file__)[1], t)) | 52.231933 | 141 | 0.612652 |
d8dafadd7494f9c07163ed0511b5ae677d9b2014 | 1,568 | py | Python | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 12/The Number Guessing Game.py | edaaydinea/365-days-of-coding-challenge | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | 4 | 2022-01-05T12:14:13.000Z | 2022-01-08T16:03:32.000Z | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 12/The Number Guessing Game.py | edaaydinea/365-days-of-code | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | null | null | null | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 12/The Number Guessing Game.py | edaaydinea/365-days-of-code | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | null | null | null | from random import randint
from art import logo
EASY_LEVEL_TURNS = 10
HARD_LEVEL_TURNS = 5
# Function to check user's guess against actual answer.
def check_answer(guess, answer, turns):
"""checks answer against guess. Returns the number of turns remaining."""
if guess > answer:
print("Too high.")
return turns - 1
elif guess < answer:
print("Too low.")
return turns - 1
else:
print(f"You got it! The answer was {answer}.")
# Make function to set difficulty.
def set_difficulty():
level = input("Choose a difficulty. Type 'easy' or 'hard': ")
if level == "easy":
return EASY_LEVEL_TURNS
else:
return HARD_LEVEL_TURNS
def game():
print(logo)
# Choosing a random number between 1 and 100.
print("Welcome to the Number Guessing Game!")
print("I'm thinking of a number between 1 and 100.")
answer = randint(1, 100)
print(f"Pssst, the correct answer is {answer}")
turns = set_difficulty()
# Repeat the guessing functionality if they get it wrong.
guess = 0
while guess != answer:
print(f"You have {turns} attempts remaining to guess the number.")
# Let the user guess a number.
guess = int(input("Make a guess: "))
# Track the number of turns and reduce by 1 if they get it wrong.
turns = check_answer(guess, answer, turns)
if turns == 0:
print("You've run out of guesses, you lose.")
return
elif guess != answer:
print("Guess again.")
game()
| 27.034483 | 77 | 0.626276 |
ed78ceae5fb029bcc137171602565b0fc4932199 | 12,348 | py | Python | microsoftgraph/client.py | Disruption/microsoftgraph-python | 7877933e492859a6daaf32196cfcbe82e0e4b714 | [
"MIT"
] | null | null | null | microsoftgraph/client.py | Disruption/microsoftgraph-python | 7877933e492859a6daaf32196cfcbe82e0e4b714 | [
"MIT"
] | null | null | null | microsoftgraph/client.py | Disruption/microsoftgraph-python | 7877933e492859a6daaf32196cfcbe82e0e4b714 | [
"MIT"
] | null | null | null | from urllib.parse import urlencode
import requests
from microsoftgraph import exceptions
from microsoftgraph.calendar import Calendar
from microsoftgraph.contacts import Contacts
from microsoftgraph.files import Files
from microsoftgraph.mail import Mail
from microsoftgraph.notes import Notes
from microsoftgraph.response import Response
from microsoftgraph.users import Users
from microsoftgraph.webhooks import Webhooks
from microsoftgraph.workbooks import Workbooks
class Client(object):
AUTHORITY_URL = "https://login.microsoftonline.com/"
AUTH_ENDPOINT = "/oauth2/v2.0/authorize?"
TOKEN_ENDPOINT = "/oauth2/v2.0/token"
RESOURCE = "https://graph.microsoft.com/"
def __init__(
self,
client_id: str,
client_secret: str,
api_version: str = "v1.0",
account_type: str = "common",
requests_hooks: dict = None,
paginate: bool = True,
) -> None:
"""Instantiates library.
Args:
client_id (str): Application client id.
client_secret (str): Application client secret.
api_version (str, optional): v1.0 or beta. Defaults to "v1.0".
account_type (str, optional): common, organizations or consumers. Defaults to "common".
requests_hooks (dict, optional): Requests library event hooks. Defaults to None.
Raises:
Exception: requests_hooks is not a dict.
"""
self.client_id = client_id
self.client_secret = client_secret
self.api_version = api_version
self.account_type = account_type
self.base_url = self.RESOURCE + self.api_version + "/"
self.token = None
self.workbook_session_id = None
self.paginate = paginate
self.calendar = Calendar(self)
self.contacts = Contacts(self)
self.files = Files(self)
self.mail = Mail(self)
self.notes = Notes(self)
self.users = Users(self)
self.webhooks = Webhooks(self)
self.workbooks = Workbooks(self)
if requests_hooks and not isinstance(requests_hooks, dict):
raise Exception(
'requests_hooks must be a dict. e.g. {"response": func}. http://docs.python-requests.org/en/master/user/advanced/#event-hooks'
)
self.requests_hooks = requests_hooks
def authorization_url(self, redirect_uri: str, scope: list, state: str = None) -> str:
"""Generates an Authorization URL.
The first step to getting an access token for many OpenID Connect (OIDC) and OAuth 2.0 flows is to redirect the
user to the Microsoft identity platform /authorize endpoint. Azure AD will sign the user in and ensure their
consent for the permissions your app requests. In the authorization code grant flow, after consent is obtained,
Azure AD will return an authorization_code to your app that it can redeem at the Microsoft identity platform
/token endpoint for an access token.
https://docs.microsoft.com/en-us/graph/auth-v2-user#2-get-authorization
Args:
redirect_uri (str): The redirect_uri of your app, where authentication responses can be sent and received by
your app. It must exactly match one of the redirect_uris you registered in the app registration portal.
scope (list): A list of the Microsoft Graph permissions that you want the user to consent to. This may also
include OpenID scopes.
state (str, optional): A value included in the request that will also be returned in the token response.
It can be a string of any content that you wish. A randomly generated unique value is typically
used for preventing cross-site request forgery attacks. The state is also used to encode information
about the user's state in the app before the authentication request occurred, such as the page or view
they were on. Defaults to None.
Returns:
str: Url for OAuth 2.0.
"""
params = {
"client_id": self.client_id,
"redirect_uri": redirect_uri,
"scope": " ".join(scope),
"response_type": "code",
"response_mode": "query",
}
if state:
params["state"] = state
response = self.AUTHORITY_URL + self.account_type + self.AUTH_ENDPOINT + urlencode(params)
return response
def exchange_code(self, redirect_uri: str, code: str) -> Response:
"""Exchanges an oauth code for an user token.
Your app uses the authorization code received in the previous step to request an access token by sending a POST
request to the /token endpoint.
https://docs.microsoft.com/en-us/graph/auth-v2-user#3-get-a-token
Args:
redirect_uri (str): The redirect_uri of your app, where authentication responses can be sent and received by
your app. It must exactly match one of the redirect_uris you registered in the app registration portal.
code (str): The authorization_code that you acquired in the first leg of the flow.
Returns:
Response: Microsoft Graph Response.
"""
data = {
"client_id": self.client_id,
"redirect_uri": redirect_uri,
"client_secret": self.client_secret,
"code": code,
"grant_type": "authorization_code",
}
response = requests.post(self.AUTHORITY_URL + self.account_type + self.TOKEN_ENDPOINT, data=data)
return self._parse(response)
def refresh_token(self, redirect_uri: str, refresh_token: str) -> Response:
"""Exchanges a refresh token for an user token.
Access tokens are short lived, and you must refresh them after they expire to continue accessing resources.
You can do so by submitting another POST request to the /token endpoint, this time providing the refresh_token
instead of the code.
https://docs.microsoft.com/en-us/graph/auth-v2-user#5-use-the-refresh-token-to-get-a-new-access-token
Args:
redirect_uri (str): The redirect_uri of your app, where authentication responses can be sent and received by
your app. It must exactly match one of the redirect_uris you registered in the app registration portal.
refresh_token (str): An OAuth 2.0 refresh token. Your app can use this token acquire additional access tokens
after the current access token expires. Refresh tokens are long-lived, and can be used to retain access
to resources for extended periods of time.
Returns:
Response: Microsoft Graph Response.
"""
data = {
"client_id": self.client_id,
"redirect_uri": redirect_uri,
"client_secret": self.client_secret,
"refresh_token": refresh_token,
"grant_type": "refresh_token",
}
response = requests.post(self.AUTHORITY_URL + self.account_type + self.TOKEN_ENDPOINT, data=data)
return self._parse(response)
def set_token(self, token: dict) -> None:
"""Sets the User token for its use in this library.
Args:
token (dict): User token data.
"""
self.token = token
def set_workbook_session_id(self, workbook_session_id: dict) -> None:
"""Sets the Workbook Session Id token for its use in this library.
Args:
token (dict): Workbook Session ID.
"""
self.workbook_session_id = workbook_session_id
def _paginate_response(self, response: Response) -> Response:
"""Some queries against Microsoft Graph return multiple pages of data either due to server-side paging or due to
the use of the $top query parameter to specifically limit the page size in a request. When a result set spans
multiple pages, Microsoft Graph returns an @odata.nextLink property in the response that contains a URL to the
next page of results.
https://docs.microsoft.com/en-us/graph/paging?context=graph%2Fapi%2F1.0&view=graph-rest-1.0
Args:
response (Response): Graph API Response.
Returns:
Response: Graph API Response.
"""
if not isinstance(response.data, dict):
return response
# Copy data to avoid side effects
data = list(response.data["value"])
while "@odata.nextLink" in response.data:
response = self._do_get(response.data["@odata.nextLink"])
data.extend(response.data["value"])
response.data["value"] = data
return response
def _get(self, url, **kwargs) -> Response:
response = self._do_get(url, **kwargs)
if self.paginate:
return self._paginate_response(response)
return response
def _do_get(self, url, **kwargs) -> Response:
return self._request("GET", url, **kwargs)
def _post(self, url, **kwargs):
return self._request("POST", url, **kwargs)
def _put(self, url, **kwargs):
return self._request("PUT", url, **kwargs)
def _patch(self, url, **kwargs):
return self._request("PATCH", url, **kwargs)
def _delete(self, url, **kwargs):
return self._request("DELETE", url, **kwargs)
def _request(self, method, url, headers=None, **kwargs) -> Response:
_headers = {
"Accept": "application/json",
}
_headers["Authorization"] = "Bearer " + self.token["access_token"]
if headers:
_headers.update(headers)
if self.requests_hooks:
kwargs.update({"hooks": self.requests_hooks})
if "Content-Type" not in _headers:
_headers["Content-Type"] = "application/json"
return self._parse(requests.request(method, url, headers=_headers, **kwargs))
def _parse(self, response) -> Response:
status_code = response.status_code
r = Response(original=response)
if status_code in (200, 201, 202, 204, 206):
return r
elif status_code == 400:
raise exceptions.BadRequest(r.data)
elif status_code == 401:
raise exceptions.Unauthorized(r.data)
elif status_code == 403:
raise exceptions.Forbidden(r.data)
elif status_code == 404:
raise exceptions.NotFound(r.data)
elif status_code == 405:
raise exceptions.MethodNotAllowed(r.data)
elif status_code == 406:
raise exceptions.NotAcceptable(r.data)
elif status_code == 409:
raise exceptions.Conflict(r.data)
elif status_code == 410:
raise exceptions.Gone(r.data)
elif status_code == 411:
raise exceptions.LengthRequired(r.data)
elif status_code == 412:
raise exceptions.PreconditionFailed(r.data)
elif status_code == 413:
raise exceptions.RequestEntityTooLarge(r.data)
elif status_code == 415:
raise exceptions.UnsupportedMediaType(r.data)
elif status_code == 416:
raise exceptions.RequestedRangeNotSatisfiable(r.data)
elif status_code == 422:
raise exceptions.UnprocessableEntity(r.data)
elif status_code == 429:
raise exceptions.TooManyRequests(r.data)
elif status_code == 500:
raise exceptions.InternalServerError(r.data)
elif status_code == 501:
raise exceptions.NotImplemented(r.data)
elif status_code == 503:
raise exceptions.ServiceUnavailable(r.data)
elif status_code == 504:
raise exceptions.GatewayTimeout(r.data)
elif status_code == 507:
raise exceptions.InsufficientStorage(r.data)
elif status_code == 509:
raise exceptions.BandwidthLimitExceeded(r.data)
else:
if r["error"]["innerError"]["code"] == "lockMismatch":
# File is currently locked due to being open in the web browser
# while attempting to reupload a new version to the drive.
# Thus temporarily unavailable.
raise exceptions.ServiceUnavailable(r.data)
raise exceptions.UnknownError(r.data)
| 42 | 142 | 0.641561 |
0b00a143e366e40fc1e8e55364c5c3c8e10c93d4 | 1,206 | py | Python | python/cuml/__init__.py | kkraus14/cuml | 3ac4c678f5a2738ce54ce5b8575820d556595393 | [
"Apache-2.0"
] | null | null | null | python/cuml/__init__.py | kkraus14/cuml | 3ac4c678f5a2738ce54ce5b8575820d556595393 | [
"Apache-2.0"
] | null | null | null | python/cuml/__init__.py | kkraus14/cuml | 3ac4c678f5a2738ce54ce5b8575820d556595393 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.cluster.dbscan import DBSCAN
from cuml.cluster.kmeans import KMeans
from cuml.decomposition.pca import PCA
from cuml.decomposition.tsvd import TruncatedSVD
from cuml.filter.kalman_filter import KalmanFilter
from cuml.linear_model.linear_regression import LinearRegression
from cuml.linear_model.ridge import Ridge
from cuml.neighbors.nearest_neighbors import NearestNeighbors
from cuml.utils.pointer_utils import device_of_gpu_matrix
from cuml.solvers.sgd import SGD
from cuml.manifold.umap import UMAP
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 30.923077 | 74 | 0.803483 |
61152afccd3c3b9e60b73e584051d4234813edf1 | 819 | py | Python | invenio_oarepo_oai_pmh_harvester/rules/uk/degree_grantor.py | Semtexcz/invenio-oarepo-oai-pmh-harvester | 2866c7d7355f6885b4f443ee1e82baa24502b36e | [
"MIT"
] | null | null | null | invenio_oarepo_oai_pmh_harvester/rules/uk/degree_grantor.py | Semtexcz/invenio-oarepo-oai-pmh-harvester | 2866c7d7355f6885b4f443ee1e82baa24502b36e | [
"MIT"
] | null | null | null | invenio_oarepo_oai_pmh_harvester/rules/uk/degree_grantor.py | Semtexcz/invenio-oarepo-oai-pmh-harvester | 2866c7d7355f6885b4f443ee1e82baa24502b36e | [
"MIT"
] | null | null | null | from invenio_initial_theses_conversion.rules.marc21.bd7102 import get_degree_grantor
from invenio_oarepo_oai_pmh_harvester.register import Decorators
from invenio_oarepo_oai_pmh_harvester.transformer import OAITransformer
@Decorators.rule('xoai')
@Decorators.pre_rule("/uk/grantor")
def transform_uk_grantor(paths, el, results, phase, **kwargs):
value_array = el["cs_CZ"][0]["value"]
assert len(value_array) == 1
grantor_array = value_array[0].split(",", 3)
grantor_array = [member.strip() for member in grantor_array]
results[-1]["degreeGrantor"] = get_degree_grantor(grantor_array[0],
faculty_name=grantor_array[1],
department_name=grantor_array[2])
return OAITransformer.PROCESSED
| 45.5 | 87 | 0.68254 |
790dc82fad44913c8a30acf36c53c51c6aad0661 | 7,486 | py | Python | mayan/apps/web_links/views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/web_links/views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/web_links/views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | import logging
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectViewMixin
from .events import event_web_link_edited
from .forms import WebLinkForm
from .icons import icon_web_link_setup
from .links import link_web_link_create
from .models import ResolvedWebLink, WebLink
from .permissions import (
permission_web_link_create, permission_web_link_delete,
permission_web_link_edit, permission_web_link_instance_view,
permission_web_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeWebLinksView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = WebLink
secondary_object_permission = permission_web_link_edit
list_available_title = _('Available web links')
list_added_title = _('Web links enabled')
related_field = 'web_links'
def action_add(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.add(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.remove(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Web links to enable for document type: %s'
) % self.main_object,
}
class ResolvedWebLinkView(ExternalObjectViewMixin, RedirectView):
external_object_pk_url_kwarg = 'document_id'
external_object_permission = permission_web_link_instance_view
external_object_queryset = Document.valid.all()
def get_redirect_url(self, *args, **kwargs):
return self.get_web_link().get_redirect(
document=self.external_object, user=self.request.user
).url
def get_web_link(self):
return get_object_or_404(
klass=self.get_web_link_queryset(), pk=self.kwargs['web_link_id']
)
def get_web_link_queryset(self):
queryset = ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
return AccessControlList.objects.restrict_queryset(
permission=permission_web_link_instance_view, queryset=queryset,
user=self.request.user
)
class WebLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new web link')}
form_class = WebLinkForm
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
view_permission = permission_web_link_create
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkDeleteView(SingleObjectDeleteView):
model = WebLink
object_permission = permission_web_link_delete
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete web link: %s') % self.object
}
class WebLinkDocumentTypesView(AddRemoveView):
main_object_method_add_name = 'document_types_add'
main_object_method_remove_name = 'document_types_remove'
main_object_permission = permission_web_link_edit
main_object_model = WebLink
main_object_pk_url_kwarg = 'web_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable web link: %s'
) % self.main_object,
}
class WebLinkEditView(SingleObjectEditView):
form_class = WebLinkForm
model = WebLink
object_permission = permission_web_link_edit
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit web link: %s') % self.object
}
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkListView(SingleObjectListView):
object_permission = permission_web_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_main_link': link_web_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links'
),
'title': _('Web links'),
}
def get_source_queryset(self):
return self.get_web_link_queryset()
def get_web_link_queryset(self):
return WebLink.objects.all()
class DocumentWebLinkListView(ExternalObjectViewMixin, WebLinkListView):
external_object_permission = permission_web_link_instance_view
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
object_permission = permission_web_link_instance_view
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links for this document'
),
'object': self.external_object,
'title': _('Web links for document: %s') % self.external_object,
}
def get_web_link_queryset(self):
return ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
| 34.027273 | 78 | 0.687016 |
d5dfa73f7ad884f865ce08b97be7e3fcfdd368e2 | 6,037 | py | Python | FBSSNN/pkl_spk_tms/gn_icub_stimulus_rtnaW.py | mahmoud-a-ali/FBSSNN | e89659bcaca7be04937efceb3a0831b78ad40bfc | [
"Apache-2.0"
] | null | null | null | FBSSNN/pkl_spk_tms/gn_icub_stimulus_rtnaW.py | mahmoud-a-ali/FBSSNN | e89659bcaca7be04937efceb3a0831b78ad40bfc | [
"Apache-2.0"
] | null | null | null | FBSSNN/pkl_spk_tms/gn_icub_stimulus_rtnaW.py | mahmoud-a-ali/FBSSNN | e89659bcaca7be04937efceb3a0831b78ad40bfc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 12:40:30 2018
@author: mali
to generate icub in such away that wach letter with different disp
"""
import comn_conversion as cnvrt
import numpy as np
import random
# check desired requirements ==================================================
rtna_w = 32
rtna_h = 32
#line_len = 25
t_shft = .5 #msec
t_res = 0.01 #msec
i_dsp= -4
c_dsp= -2
u_dsp= 0
b_dsp= 4
n_shfts=3
ch_idx =0
t_idx = 1
pol_idx = 2
x_idx = 3
y_idx = 4
print '\n####### generateicub stimulus .... '
#-----------------------------------------------------------------------------
def Hline(xi, xf, y):
x_=[]
y_=[]
for x in range(xi, xf+1 ):
x_.append(x)
y_.append(y)
return (x_, y_)
#-----------------------------------------------------------------------------
def Vline(yi, yf, x):
x_=[]
y_=[]
for y in range(yi, yf+1 ):
x_.append(x)
y_.append(y)
return (x_, y_)
#-----------------------------------------------------------------------------
if rtna_w ==64:
Xi1 ,Yi1 =Hline(10, 12, 22)
Xi2 ,Yi2 =Hline(10, 12, 38)
Xi3 ,Yi3 =Vline(22, 38, 11)
Xi= Xi1+ Xi2+ Xi3
Yi= Yi1+ Yi2+ Yi3
Xc1 ,Yc1 =Hline(19, 25, 27)
Xc2 ,Yc2 =Hline(19, 25, 38)
Xc3 ,Yc3 =Vline(27, 38, 19)
Xc= Xc1+ Xc2+ Xc3
Yc= Yc1+ Yc2+ Yc3
Xu1 ,Yu1 =Hline(32, 38, 38)
Xu2 ,Yu2 =Vline(27, 38, 32)
Xu3 ,Yu3 =Vline(27, 38, 38)
Xu= Xu1+ Xu2+ Xu3
Yu= Yu1+ Yu2+ Yu3
Xb1 ,Yb1 =Hline(43, 49, 29)
Xb2 ,Yb2 =Hline(43, 49, 38)
Xb3 ,Yb3 =Vline(22, 38, 43)
Xb4 ,Yb4 =Vline(29, 38, 49)
Xb= Xb1+ Xb2+ Xb3 + Xb4
Yb= Yb1+ Yb2+ Yb3 + Yb4
if rtna_w ==32:
Xi1 ,Yi1 =Hline(5, 7, 11)
Xi2 ,Yi2 =Hline(5, 7, 20)
Xi3 ,Yi3 =Vline(11, 20, 6)
Xi= Xi1+ Xi2+ Xi3
Yi= Yi1+ Yi2+ Yi3
Xc1 ,Yc1 =Hline(10, 14, 20)
Xc2 ,Yc2 =Hline(10, 14, 14)
Xc3 ,Yc3 =Vline(14, 20, 10)
Xc= Xc1+ Xc2+ Xc3
Yc= Yc1+ Yc2+ Yc3
Xu1 ,Yu1 =Hline(17, 21, 20)
Xu2 ,Yu2 =Vline(14, 20, 17)
Xu3 ,Yu3 =Vline(14, 20, 21)
Xu= Xu1+ Xu2+ Xu3
Yu= Yu1+ Yu2+ Yu3
Xb1 ,Yb1 =Hline(24, 29, 20)
Xb2 ,Yb2 =Hline(24, 29, 14)
Xb3 ,Yb3 =Vline(11, 20, 24)
Xb4 ,Yb4 =Vline(14, 20, 29)
Xb= Xb1+ Xb2+ Xb3 + Xb4
Yb= Yb1+ Yb2+ Yb3 + Yb4
X_lft = Xi+ Xc+ Xu +Xb
Y = Yi+ Yc+ Yu +Yb
n_evts = len( Y )
T=[0] *n_evts
T_msec = [float(t) for t in T]
POL = [0] * n_evts
CH = [0] * n_evts
Evts = np.transpose([CH, T_msec, POL, X_lft, Y])
Evts_arr = np.array(Evts)
#print '## L:: Evts_arr :\n{}'.format(Evts_arr)
Xir= [x+i_dsp for x in Xi]
Xcr= [x+c_dsp for x in Xc]
Xur= [x+u_dsp for x in Xu]
Xbr= [x+b_dsp for x in Xb]
X_rght = Xir + Xcr + Xur +Xbr
CH = [1] * n_evts
R_Evts = np.transpose([CH, T_msec, POL, X_rght, Y])
R_Evts_arr = np.array(R_Evts)
#print '## R:: R_Evts_arr :\n{}'.format(R_Evts_arr)
#print '## Xi:{}'.format(Xi)
#print '## Xir:{}'.format(Xir)
#print 'n_evts : {}'.format(n_evts)
Levts= np.copy(Evts_arr)
Revts= np.copy(R_Evts_arr)
L= np.copy(Evts_arr)
R= np.copy(R_Evts_arr)
for evt in range ( len(Yi) ):
L[evt][t_idx] = L[evt][t_idx] + t_res
for evt in range ( len(Yi), len(Yi)+len(Yc) ):
L[evt][t_idx] = L[evt][t_idx] + 2*t_res
for evt in range ( len(Yi)+len(Yc), len(Yi)+len(Yc)+len(Yu) ):
L[evt][t_idx] = L[evt][t_idx] + 3*t_res
for evt in range ( len(Yi)+len(Yc)+len(Yu), len(Yi)+len(Yc)+len(Yu)+len(Yb) ):
L[evt][t_idx] = L[evt][t_idx] + 4*t_res
for evt in range ( len(Yi) ):
R[evt][t_idx] = R[evt][t_idx] + t_res
for evt in range ( len(Yi), len(Yi)+len(Yc) ):
R[evt][t_idx] = R[evt][t_idx] + 2*t_res
for evt in range ( len(Yi)+len(Yc), len(Yi)+len(Yc)+len(Yu) ):
R[evt][t_idx] = R[evt][t_idx] + 3*t_res
for evt in range ( len(Yi)+len(Yc)+len(Yu), len(Yi)+len(Yc)+len(Yu)+len(Yb) ):
R[evt][t_idx] = R[evt][t_idx] + 4*t_res
Lcpy= np.copy(L)
Rcpy= np.copy(R)
#print '## Levts :\n{}'.format(L[:,t_idx])
for shft in range(1, n_shfts):
Lcpy[:,t_idx]= Lcpy[:,t_idx] + t_shft
Rcpy[:,t_idx]= Rcpy[:,t_idx] + t_shft
Lcpy[:,x_idx]= Lcpy[:,x_idx] + 1
Rcpy[:,x_idx]= Rcpy[:,x_idx] + 1
L= np.concatenate([L, Lcpy])
R= np.concatenate([R, Rcpy])
#print '## Levts :\n{}'.format(L[:,t_idx])
print '## Levts :\n{}'.format(L)
print '## Revts :\n{}'.format(R)
print 'n_evts : {}'.format( len(L) )
##add noise beside the line
##for y in Y:
## if y%5==0:
## X[y]=random.randint(0,127)
#for i in range(1, len(X), 5 ):
# X[i] = random.randint(0,127)
# Y[i]=random.randint(0,127)
import os
scrpt_name_py = os.path.basename(__file__) # or import sys then sys.argv[0]
scrpt_name = scrpt_name_py.split('.')[0]
fldr_name = 'txt_evts/icub_{}/'.format( rtna_w)
file_name = 'icub{}x{}_{}{}{}{}_lft.txt'.format( rtna_w, rtna_h, i_dsp, c_dsp, u_dsp, b_dsp)
file_path = cnvrt.write_flenfldr_ncrntpth(fldr_name, file_name)
np.savetxt(file_path , L)
R_file_name ='icub{}x{}_{}{}{}{}_rght.txt'.format( rtna_w, rtna_h, i_dsp, c_dsp, u_dsp, b_dsp)
R_file_path = cnvrt.write_flenfldr_ncrntpth(fldr_name, R_file_name)
np.savetxt(R_file_path , R)
print '\n####### data_set stored in: {} '.format(fldr_name )
print '{} \n{}'.format(file_name, R_file_name)
print '## done !'
print '\n####### load {} by np.loadtxt(filename to check) '.format(file_name)
evts = np.loadtxt(file_path)
print '## n_evts : {}'.format(len(evts))
print '## start_time : {}'.format(evts[0][t_idx])
print '## first_evt : {}'.format(evts[0])
print '## last_evt : {}'.format(evts[len(evts)-1])
print '\n####### load {} by np.loadtxt(filename to check) '.format(R_file_name)
evts = np.loadtxt(R_file_path)
print '## n_evts : {}'.format(len(evts))
print '## start_time : {}'.format(evts[0][t_idx])
print '## first_evt : {}'.format(evts[0])
print '## last_evt : {}'.format(evts[len(evts)-1])
| 25.154167 | 94 | 0.5405 |
68ab41cb28215c75c491ee16a09dd3112edcaae4 | 681 | py | Python | vivisect/tests/testsrec.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testsrec.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testsrec.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import logging
import unittest
import Elf
import envi
import vivisect.cli as viv_cli
import vivisect.tests.helpers as helpers
logger = logging.getLogger(__name__)
path = ('raw', 'msp430', 'blink.srec')
class IHexTests(unittest.TestCase):
def test_ihex(self):
fn = helpers.getTestPath(*path)
vw = viv_cli.VivCli()
vw.config.viv.parsers.srec.arch = 'msp430'
vw.loadFromFile(fn)
vw.makeFunction(0x4000)
self.assertEqual(vw.getFunction(0x4000), 0x4000)
self.assertEqual(vw.getFunction(0x4050), 0x4000)
self.assertEqual(vw.getFunction(0x4060), 0x405e)
self.assertEqual(vw.getFunction(0x4068), 0x405e)
| 23.482759 | 56 | 0.693098 |
1b1e215bdf0098ab1db96f784344c6fb0034dc58 | 805 | py | Python | api/utils.py | jdeepe/Dynamodb-ORM-Demo | a4fb2bc782cc3e9583d4a269ecec9d86cb27dd6b | [
"Apache-2.0"
] | null | null | null | api/utils.py | jdeepe/Dynamodb-ORM-Demo | a4fb2bc782cc3e9583d4a269ecec9d86cb27dd6b | [
"Apache-2.0"
] | null | null | null | api/utils.py | jdeepe/Dynamodb-ORM-Demo | a4fb2bc782cc3e9583d4a269ecec9d86cb27dd6b | [
"Apache-2.0"
] | null | null | null | from pynamodb.attributes import ListAttribute, MapAttribute, NumberAttribute
class ModelIterator:
def __iter__(self):
for name, attr in self.get_attributes().items():
if isinstance(attr, MapAttribute):
yield name, getattr(self, name).as_dict()
if isinstance(attr, ListAttribute):
results = []
for el in getattr(self, name):
if isinstance(el, MapAttribute):
results.append((el.as_dict()))
else:
results.append(el)
yield name, results
elif isinstance(attr, NumberAttribute):
yield name, getattr(self, name)
else:
yield name, attr.serialize(getattr(self, name))
| 38.333333 | 76 | 0.540373 |
813dfffcb86722511ff02176007fd492bf4ab168 | 3,682 | py | Python | low_spatial_res/smagorinski.py | pdnooteboom/PO_res_error | 2c0e1f12203585e2ca3f9a5e686b4e8004052884 | [
"MIT"
] | 1 | 2021-04-12T16:07:42.000Z | 2021-04-12T16:07:42.000Z | low_spatial_res/smagorinski.py | pdnooteboom/PO_res_error | 2c0e1f12203585e2ca3f9a5e686b4e8004052884 | [
"MIT"
] | null | null | null | low_spatial_res/smagorinski.py | pdnooteboom/PO_res_error | 2c0e1f12203585e2ca3f9a5e686b4e8004052884 | [
"MIT"
] | 1 | 2021-04-12T16:07:45.000Z | 2021-04-12T16:07:45.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 15:38:49 2019
@author: nooteboom
"""
from parcels import Field
import math
from random import random
def prepare(fieldset, Cs=0.1):
"""
Add cell_areas field and gradients of U and V that are both necessary for Smagorinsky parametrization.
:param fieldset: mod:`parcels.fieldset.FieldSet` object to add necessary fields to
"""
fieldset.add_constant('Cs', Cs)
fieldset.add_constant('resolutionx',fieldset.U.grid.lon[1]-fieldset.U.grid.lon[0])
fieldset.add_constant('resolutiony',fieldset.U.grid.lat[1]-fieldset.U.grid.lat[0])
x = fieldset.U.grid.lon
y = fieldset.U.grid.lat
cell_areas = Field(name='cell_areas', data=fieldset.U.cell_areas(), lon=x, lat=y)
fieldset.add_field(cell_areas)
fieldset.U.calc_cell_edge_sizes()
cell_edge_sizes_x = Field(name='cell_edge_sizes_x', data=fieldset.U.cell_edge_sizes['x'], lon=x, lat=y)
cell_edge_sizes_y = Field(name='cell_edge_sizes_y', data=fieldset.U.cell_edge_sizes['y'], lon=x, lat=y)
fieldset.add_field(cell_edge_sizes_x)
fieldset.add_field(cell_edge_sizes_y)
class kernels:
def __init__(self):
self.members = ['uniform', 'spatially_varying']
def default(particle, fieldset, time):
"""Smagorinski parametrization kernel.
"""
dx = 0.01;
dudx = (fieldset.U[time, particle.depth, particle.lat, particle.lon+dx]-fieldset.U[time, particle.depth, particle.lat, particle.lon-dx]) / (2*dx)
dudy = (fieldset.U[time, particle.depth, particle.lat+dx, particle.lon]-fieldset.U[time, particle.depth, particle.lat-dx, particle.lon]) / (2*dx)
dvdx = (fieldset.V[time, particle.depth, particle.lat, particle.lon+dx]-fieldset.V[time, particle.depth, particle.lat, particle.lon-dx]) / (2*dx)
dvdy = (fieldset.V[time, particle.depth, particle.lat+dx, particle.lon]-fieldset.V[time, particle.depth, particle.lat-dx, particle.lon]) / (2*dx)
A = fieldset.cell_areas[time, 0, particle.lat, particle.lon]
Vh = fieldset.Cs * A * math.sqrt(dudx**2 + 0.5*(dudy + dvdx)**2 + dvdy**2)
xres = fieldset.resolutionx # [degrees]
yres = fieldset.resolutiony
dx_cell = fieldset.cell_edge_sizes_x[0, 0, particle.lat, particle.lon] # [meters]
dy_cell = fieldset.cell_edge_sizes_y[0, 0, particle.lat, particle.lon]
hitBoundary = True
tries = 0
while hitBoundary and tries <15:
dlat = yres * random.normalvariate(0,1) * math.sqrt(2*math.fabs(particle.dt)* Vh) / dy_cell
dlon = xres * random.normalvariate(0,1) * math.sqrt(2*math.fabs(particle.dt)* Vh) / dx_cell
if(fieldset.U[time, particle.depth, particle.lat+dlat, particle.lon+dlon] > 0):
if(fieldset.V[time, particle.depth, particle.lat+dlat, particle.lon+dlon] > 0):
hitBoundary = False
elif(fieldset.V[time, particle.depth, particle.lat+dlat, particle.lon+dlon] < 0):
hitBoundary = False
elif(fieldset.U[time, particle.depth, particle.lat+dlat, particle.lon+dlon] < 0):
if(fieldset.V[time, particle.depth, particle.lat+dlat, particle.lon+dlon] > 0):
hitBoundary = False
elif(fieldset.V[time, particle.depth, particle.lat+dlat, particle.lon+dlon] < 0):
hitBoundary = False
tries += 1
if tries == 15:
dlat = 0
dlon = 0
particle.lat += dlat
particle.lon += dlon
| 45.45679 | 153 | 0.624661 |
6a4a1aa0fa513d35332ca4a645b5bda917decd18 | 1,110 | bzl | Python | tools/test/runtime_resources.bzl | pswaminathan/grab-bazel-common | 5f441bccbeca5d3e8530647035e5db2b1708897c | [
"Apache-2.0"
] | 15 | 2021-06-25T11:03:20.000Z | 2022-03-09T07:20:04.000Z | tools/test/runtime_resources.bzl | pswaminathan/grab-bazel-common | 5f441bccbeca5d3e8530647035e5db2b1708897c | [
"Apache-2.0"
] | 12 | 2021-07-28T08:27:01.000Z | 2022-03-15T08:35:43.000Z | tools/test/runtime_resources.bzl | pswaminathan/grab-bazel-common | 5f441bccbeca5d3e8530647035e5db2b1708897c | [
"Apache-2.0"
] | 5 | 2021-07-28T12:43:33.000Z | 2021-12-14T17:37:39.000Z | """
A rule to collect all dependencies' Android resource jars that is made available
only on compile time and get them loaded during runtime
It works by iterating through the transitive compile time jars of all given
target and retrieving jar files that ends with `_resources.jar` into a JavaInfo
which can then be loaded during runtime.
"""
def _runtime_resources_impl(ctx):
deps = ctx.attr.deps
resources_java_infos = {}
for target in deps:
if (JavaInfo in target):
for jar in target[JavaInfo].transitive_compile_time_jars.to_list():
if (jar.basename.endswith("_resources.jar")):
resources_java_infos[jar.path] = JavaInfo(
output_jar = jar,
compile_jar = jar,
)
resources_java_infos = list(resources_java_infos.values())
merged_java_infos = java_common.merge(resources_java_infos)
return [
merged_java_infos,
]
runtime_resources = rule(
implementation = _runtime_resources_impl,
attrs = {
"deps": attr.label_list(),
},
)
| 30.833333 | 80 | 0.658559 |
da40bc27b56414e79b1c208f3d26bc7da28cd3f2 | 1,596 | py | Python | util/preprocessing/aspectawarepreprocessor.py | baishalidutta/Super_Resolution | d8fd94d1fcb6d6d2e2410dc93d26a9e46869bc7a | [
"Apache-2.0"
] | null | null | null | util/preprocessing/aspectawarepreprocessor.py | baishalidutta/Super_Resolution | d8fd94d1fcb6d6d2e2410dc93d26a9e46869bc7a | [
"Apache-2.0"
] | null | null | null | util/preprocessing/aspectawarepreprocessor.py | baishalidutta/Super_Resolution | d8fd94d1fcb6d6d2e2410dc93d26a9e46869bc7a | [
"Apache-2.0"
] | null | null | null | __author__ = "Baishali Dutta"
__copyright__ = "Copyright (C) 2021 Baishali Dutta"
__license__ = "Apache License 2.0"
__version__ = "0.1"
# import the necessary packages
import imutils
import cv2
class AspectAwarePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# grab the dimensions of the image and then initialize
# the deltas to use when cropping
(h, w) = image.shape[:2]
dW = 0
dH = 0
# if the width is smaller than the height, then resize
# along the width (i.e., the smaller dimension) and then
# update the deltas to crop the height to the desired
# dimension
if w < h:
image = imutils.resize(image, width=self.width,
inter=self.inter)
dH = int((image.shape[0] - self.height) / 2.0)
# otherwise, the height is smaller than the width so
# resize along the height and then update the deltas
# crop along the width
else:
image = imutils.resize(image, height=self.height,
inter=self.inter)
dW = int((image.shape[1] - self.width) / 2.0)
# now that our images have been resized, we need to
# re-grab the width and height, followed by performing
# the crop
(h, w) = image.shape[:2]
image = image[dH:h - dH, dW:w - dW]
# finally, resize the image to the provided spatial
# dimensions to ensure our output image is always a fixed
# size
return cv2.resize(image, (self.width, self.height),
interpolation=self.inter) | 30.692308 | 59 | 0.701754 |
04370674f289796b1d53e7ac6855f32fc09b6b63 | 814 | py | Python | example/plugins/documents.py | ekonda/kutana | 902f9d521c10c6c7ccabb1387ee3d87db5e2eba6 | [
"MIT"
] | 69 | 2018-10-05T21:42:51.000Z | 2022-03-16T17:22:21.000Z | example/plugins/documents.py | ekonda/kutana | 902f9d521c10c6c7ccabb1387ee3d87db5e2eba6 | [
"MIT"
] | 41 | 2018-10-20T09:18:43.000Z | 2021-11-22T12:19:44.000Z | example/plugins/documents.py | ekonda/kutana | 902f9d521c10c6c7ccabb1387ee3d87db5e2eba6 | [
"MIT"
] | 26 | 2018-10-20T09:13:42.000Z | 2021-12-24T17:01:02.000Z | from kutana import Plugin, Attachment, get_path, t
plugin = Plugin(name=t("Attachments"), description=t("Sends some attachments (.attachments)"))
@plugin.on_commands(["attachments"])
async def __(msg, ctx):
# Image
with open(get_path(__file__, "assets/pizza.png"), "rb") as fh:
image = Attachment.new(fh.read(), "pizza.png")
await ctx.reply(t("Image"), attachments=image)
# Document
with open(get_path(__file__, "assets/pizza.png"), "rb") as fh:
doc = Attachment.new(fh.read(), "pizza.png")
await ctx.reply(t("Document"), attachments=doc)
# Audio message
with open(get_path(__file__, "assets/audio.ogg"), "rb") as fh:
audio_message = Attachment.new(fh.read(), "audio.ogg", "voice")
await ctx.reply(t("Audio message"), attachments=audio_message)
| 31.307692 | 94 | 0.664619 |
75326f157cfb75842c04f445e8941085934c9fee | 442 | py | Python | zbior/65/reader.py | bartekpacia/informatyka-frycz | 6fdbbdea0c6b6a710378f22e90d467c9f91e64aa | [
"MIT"
] | 2 | 2021-03-06T22:09:44.000Z | 2021-03-14T14:41:03.000Z | zbior/65/reader.py | bartekpacia/informatyka-frycz | 6fdbbdea0c6b6a710378f22e90d467c9f91e64aa | [
"MIT"
] | 1 | 2020-03-25T15:42:47.000Z | 2020-10-06T21:41:14.000Z | zbior/65/reader.py | bartekpacia/informatyka-frycz | 6fdbbdea0c6b6a710378f22e90d467c9f91e64aa | [
"MIT"
] | null | null | null | from typing import List, Tuple
def read_data() -> List[Tuple[str, str, float]]:
ulamki: List[Tuple[str, str, float]] = []
with open("dane_ulamki.txt") as f:
for line in f:
sline = line.strip()
licznik, mianownik = sline.split()
ulamek_wartosc = int(licznik) / int(mianownik)
ulamek = (licznik, mianownik, ulamek_wartosc)
ulamki.append(ulamek)
return ulamki
| 26 | 58 | 0.588235 |
5da9cf9a2d3eedce68adcbaead80db1959df699a | 4,306 | py | Python | archive/scripts/Human_experiment_lvl_sim/nbconverted/4_sim_experiment_corrected.py | ajlee21/Batch_effects_simulation | d707321346de48de5e63cf251280bdf9372be59c | [
"BSD-3-Clause"
] | 6 | 2020-05-04T15:16:32.000Z | 2021-02-28T04:49:21.000Z | archive/scripts/Human_experiment_lvl_sim/nbconverted/4_sim_experiment_corrected.py | ajlee21/Batch_effects_simulation | d707321346de48de5e63cf251280bdf9372be59c | [
"BSD-3-Clause"
] | 12 | 2020-02-27T20:12:36.000Z | 2021-04-07T20:28:35.000Z | archive/scripts/Human_experiment_lvl_sim/nbconverted/4_sim_experiment_corrected.py | ajlee21/Batch_effects_simulation | d707321346de48de5e63cf251280bdf9372be59c | [
"BSD-3-Clause"
] | 2 | 2019-06-02T18:29:17.000Z | 2020-02-13T09:33:37.000Z |
# coding: utf-8
# # Simulation experiment using noise-corrected data
#
# Run entire simulation experiment multiple times to generate confidence interval. The simulation experiment can be found in ```functions/pipeline.py```
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from joblib import Parallel, delayed
import multiprocessing
import sys
import os
import pandas as pd
import warnings
warnings.filterwarnings(action='ignore')
sys.path.append("../../")
from functions import pipelines, utils
from numpy.random import seed
randomState = 123
seed(randomState)
# In[ ]:
# Read in config variables
config_file = os.path.abspath(os.path.join(os.getcwd(),"../../configs", "config_Human_experiment.tsv"))
params = utils.read_config(config_file)
# In[ ]:
# Load parameters
dataset_name = params["dataset_name"]
analysis_name = params["analysis_name"]
NN_architecture = params["NN_architecture"]
num_simulated_samples = params["num_simulated_samples"]
lst_num_experiments = params["lst_num_experiments"]
use_pca = params["use_pca"]
num_PCs = params["num_PCs"]
local_dir = params["local_dir"]
correction_method = params["correction_method"]
iterations = params["iterations"]
num_cores = params["num_cores"]
# In[ ]:
# Additional parameters
file_prefix = "Partition_corrected"
corrected = True
# In[3]:
# Input files
base_dir = os.path.abspath(
os.path.join(
os.getcwd(), "../..")) # base dir on repo
normalized_data_file = os.path.join(
base_dir,
dataset_name,
"data",
"input",
"recount2_gene_normalized_data.tsv.xz")
experiment_ids_file = os.path.join(
base_dir,
dataset_name,
"data",
"metadata",
"recount2_experiment_ids.txt")
# In[4]:
# Output files
similarity_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_similarity_corrected_"+correction_method+".pickle")
ci_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_ci_corrected_"+correction_method+".pickle")
# In[5]:
# Run multiple simulations - corrected
results = Parallel(n_jobs=num_cores, verbose=100)(
delayed(
pipelines.experiment_level_simulation_corrected)(i,
NN_architecture,
dataset_name,
analysis_name,
num_simulated_experiments,
lst_num_partitions,
corrected,
correction_method,
use_pca,
num_PCs,
file_prefix,
normalized_data_file,
experiment_ids_file,
local_dir) for i in iterations)
# In[6]:
# Concatenate output dataframes
all_svcca_scores = pd.DataFrame()
for i in iterations:
all_svcca_scores = pd.concat([all_svcca_scores, results[i][1]], axis=1)
all_svcca_scores
# In[7]:
# Get median for each row (number of experiments)
mean_scores = all_svcca_scores.mean(axis=1).to_frame()
mean_scores.columns = ['score']
mean_scores
# In[8]:
# Get standard dev for each row (number of experiments)
import math
std_scores = (all_svcca_scores.std(axis=1)/math.sqrt(10)).to_frame()
std_scores.columns = ['score']
std_scores
# In[9]:
# Get confidence interval for each row (number of experiments)
err = std_scores*1.96
# In[10]:
# Get boundaries of confidence interval
ymax = mean_scores + err
ymin = mean_scores - err
ci = pd.concat([ymin, ymax], axis=1)
ci.columns = ['ymin', 'ymax']
ci
# In[11]:
mean_scores
# In[12]:
# Pickle dataframe of mean scores scores for first run, interval
mean_scores.to_pickle(similarity_corrected_file)
ci.to_pickle(ci_corrected_file)
| 22.663158 | 153 | 0.610543 |
5ee7c6ec5fb133ec91b73fd6644ab131bc85d353 | 3,495 | py | Python | src/number_detection.py | SmBe19/Nonograms | aa4d707f7f5197220552d8b2952fa15e3e3834cd | [
"MIT"
] | null | null | null | src/number_detection.py | SmBe19/Nonograms | aa4d707f7f5197220552d8b2952fa15e3e3834cd | [
"MIT"
] | null | null | null | src/number_detection.py | SmBe19/Nonograms | aa4d707f7f5197220552d8b2952fa15e3e3834cd | [
"MIT"
] | null | null | null | import os
import time
import pytesseract
from PIL import Image
ROOT_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OCR_PATH = os.path.join(ROOT_PATH, 'ocr')
DATA_PATH = os.path.join(ROOT_PATH, 'data')
WHITE = (255, 255, 255)
EMPTY_CUTOFF = 0.99
def white_ratio(region: Image) -> float:
white_pixels = 0
for y in range(region.size[1]):
for x in range(region.size[0]):
if region.getpixel((x, y)) == WHITE:
white_pixels += 1
return white_pixels / (region.size[0] * region.size[1])
def is_empty(region: Image) -> bool:
return white_ratio(region) >= EMPTY_CUTOFF
def cut_off_border(region: Image) -> Image:
top = 0
bottom = region.size[1]
left = 0
right = region.size[0]
mid_x = right // 2
mid_y = bottom // 2
while top < bottom and region.getpixel((mid_x, top)) != WHITE:
top += 1
if top == bottom:
return None
while region.getpixel((mid_x, bottom - 1)) != WHITE:
bottom -= 1
while left < right and region.getpixel((left, mid_y)) != WHITE:
left += 1
if left == right:
return None
while region.getpixel((right - 1, mid_y)) != WHITE:
right -= 1
return region.crop((left, top, right, bottom))
def crop_content(region: Image) -> Image:
top = 0
bottom = region.size[1]
left = 0
right = region.size[0]
def white_line(x: int, y: int, dx: int, dy: int):
while x < region.size[0] and y < region.size[1]:
if region.getpixel((x, y)) != WHITE:
return False
x += dx
y += dy
return True
while white_line(0, top, 1, 0):
top += 1
while white_line(0, bottom - 1, 1, 0):
bottom -= 1
while white_line(left, 0, 0, 1):
left += 1
while white_line(right - 1, 0, 0, 1):
right -= 1
return region.crop((left - 2, top - 2, right + 2, bottom + 2))
def init_tesseract() -> None:
configs = [
'load_system_dawg F',
'load_freq_dawg F',
'load_unambig_dawg F',
'load_punc_dawg F',
'load_bigram_dawg F',
'load_number_dawg T',
'user_words_file {}'.format(os.path.join(OCR_PATH, 'eng.user-words')),
]
with open(os.path.join(OCR_PATH, 'eng.user-words'), 'w') as f:
for i in range(1, 100):
print(i, file=f)
with open(os.path.join(OCR_PATH, 'config'), 'w') as f:
for config in configs:
print(config, file=f)
tesseract_fixes = {}
def find_number_with_tesseract(region: Image) -> int:
text = pytesseract.image_to_string(region, config='--psm 8 "{}"'.format(os.path.join(OCR_PATH, 'config'))).strip()
if text.isdigit():
return int(text)
if text in tesseract_fixes:
return tesseract_fixes[text]
# TODO teach Tesseract that it should only detect numbers
print('Could not detect number, got:', text)
save_region(region, 'not detect')
region.show()
value = input()
if value.isdigit():
tesseract_fixes[text] = int(value)
return int(value)
return 0
def detect_number(region: Image) -> int:
region = cut_off_border(region)
if region is None or is_empty(region):
return 0
region = crop_content(region)
number = find_number_with_tesseract(region)
return number
def save_region(region: Image, message: str) -> None:
region.save(os.path.join(DATA_PATH, 'region_{}_{}.png'.format(time.time(), message)))
| 28.414634 | 118 | 0.601144 |
02a2c7d1bd26384c403a353518bfef0ae15d24ec | 3,497 | py | Python | bayesian/__init__.py | Preeticp/fabric8-analytics-server | aa03f3f0032bd741821d3a789e95a15aea834a48 | [
"Apache-2.0"
] | null | null | null | bayesian/__init__.py | Preeticp/fabric8-analytics-server | aa03f3f0032bd741821d3a789e95a15aea834a48 | [
"Apache-2.0"
] | null | null | null | bayesian/__init__.py | Preeticp/fabric8-analytics-server | aa03f3f0032bd741821d3a789e95a15aea834a48 | [
"Apache-2.0"
] | null | null | null | """Module with the declaration of web application and its basic endpoints."""
import logging
import os
from flask import Flask
from flask import Response
from flask import g
from flask import redirect
from flask import request
from flask import url_for
from flask_appconfig import AppConfig
from flask_security import SQLAlchemyUserDatastore, Security
from flask_sqlalchemy import SQLAlchemy
from flask_cache import Cache
from f8a_worker.setup_celery import init_selinon
def setup_logging(app):
"""Set up logger, the log level is read from the environment variable."""
if not app.debug:
handler = logging.StreamHandler()
log_level = os.environ.get('FLASK_LOGGING_LEVEL', logging.getLevelName(logging.WARNING))
handler.setLevel(log_level)
app.logger.addHandler(handler)
# we must initialize DB here to not create import loop with .auth...
# flask really sucks at this
rdb = SQLAlchemy()
cache = Cache(config={'CACHE_TYPE': 'simple'})
def create_app(configfile=None):
"""Create the web application and define basic endpoints."""
# do the imports here to not shadow e.g. "import bayesian.frontend.api_v1"
# by Blueprint imported here
from .api_v1 import api_v1
from .exceptions import HTTPError
from .utils import JSONEncoderWithExtraTypes
app = Flask(__name__)
AppConfig(app, configfile)
cache.init_app(app)
# actually init the DB with config values now
rdb.init_app(app)
app.rdb = rdb
# We need JSON encoder that can serialize datetime.datetime
app.json_encoder = JSONEncoderWithExtraTypes
app.register_blueprint(api_v1)
# Redirect to latest API version if /api is accessed
app.route('/api')(lambda: redirect(url_for('api_v1.apiendpoints__slashless')))
# Likewise for base URL, and make that accessible by name
@app.route('/')
def base_url():
return redirect(url_for('api_v1.apiendpoints__slashless'))
@app.errorhandler(HTTPError)
def handleerrors(e):
bp = app.blueprints.get(request.blueprint)
# if there's an error pre-request (e.g. during authentication) in non-GET requests,
# request.blueprint is not set yet
if not bp:
# sort by the length of url_prefix, filter out blueprints without prefix
bps = reversed(sorted(
[(name, b) for name, b in app.blueprints.items() if b.url_prefix is not None],
key=lambda tpl: len(tpl[1].url_prefix)))
for bp_name, b in bps:
if request.environ['PATH_INFO'].startswith(b.url_prefix):
bp = b
break
if bp:
handler = getattr(bp, 'coreapi_http_error_handler', None)
if handler:
return handler(e)
return Response(e.error, status=e.status_code)
setup_logging(app)
@app.before_request
def set_current_user():
g.current_user = None
@app.after_request
def access_control_allow_origin(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Headers"] = "authorization, content-type"
response.headers["Access-Control-Allow-Methods"] = "DELETE, GET, HEAD, OPTIONS,"\
"PATCH, POST, PUT"
response.headers["Allow"] = "GET, HEAD, OPTIONS, PATCH, POST, PUT"
return response
return app
init_selinon()
app = create_app()
app.logger.info('App initialized, ready to roll...')
| 32.990566 | 96 | 0.682585 |
660033613450eebb6375547d3a0ef4e2a0603420 | 17,000 | py | Python | nilearn/regions/tests/test_region_extractor.py | Qin-Ming/nilearn | 82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f | [
"BSD-2-Clause"
] | 827 | 2015-01-30T23:11:42.000Z | 2022-03-29T21:21:05.000Z | nilearn/regions/tests/test_region_extractor.py | Qin-Ming/nilearn | 82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f | [
"BSD-2-Clause"
] | 2,845 | 2015-01-04T22:14:41.000Z | 2022-03-31T20:28:09.000Z | nilearn/regions/tests/test_region_extractor.py | Qin-Ming/nilearn | 82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f | [
"BSD-2-Clause"
] | 484 | 2015-02-03T10:58:19.000Z | 2022-03-29T21:57:16.000Z | """ Test Region Extractor and its functions """
import numpy as np
import nibabel
import pytest
from scipy import ndimage
from nilearn.regions import (connected_regions, RegionExtractor,
connected_label_regions)
from nilearn.regions.region_extractor import (_threshold_maps_ratio,
_remove_small_regions)
from nilearn._utils.data_gen import generate_maps, generate_labeled_regions
from nilearn._utils.exceptions import DimensionError
from nilearn.image import get_data
def _make_random_data(shape):
affine = np.eye(4)
rng = np.random.RandomState(42)
data_rng = rng.normal(size=shape)
img = nibabel.Nifti1Image(data_rng, affine)
data = get_data(img)
return img, data
def test_invalid_thresholds_in_threshold_maps_ratio():
maps, _ = generate_maps((10, 11, 12), n_regions=2)
for invalid_threshold in ['80%', 'auto', -1.0]:
with pytest.raises(
ValueError,
match="threshold given as ratio to the number of voxels must "
"be Real number and should be positive "
"and between 0 and total number of maps "
"i.e. n_maps={0}. "
"You provided {1}".format(maps.shape[-1],
invalid_threshold)):
_threshold_maps_ratio(maps, threshold=invalid_threshold)
def test_nans_threshold_maps_ratio():
maps, _ = generate_maps((10, 10, 10), n_regions=2)
data = get_data(maps)
data[:, :, 0] = np.nan
maps_img = nibabel.Nifti1Image(data, np.eye(4))
thr_maps = _threshold_maps_ratio(maps_img, threshold=0.8)
def test_threshold_maps_ratio():
# smoke test for function _threshold_maps_ratio with randomly
# generated maps
rng = np.random.RandomState(42)
maps, _ = generate_maps((6, 8, 10), n_regions=3)
# test that there is no side effect
get_data(maps)[:3] = 100
maps_data = get_data(maps).copy()
thr_maps = _threshold_maps_ratio(maps, threshold=1.0)
np.testing.assert_array_equal(get_data(maps), maps_data)
# make sure that n_regions (4th dimension) are kept same even
# in thresholded image
assert thr_maps.shape[-1] == maps.shape[-1]
# check that the size should be same for 3D image
# before and after thresholding
img = np.zeros((30, 30, 30)) + 0.1 * rng.standard_normal(size=(30, 30, 30))
img = nibabel.Nifti1Image(img, affine=np.eye(4))
thr_maps_3d = _threshold_maps_ratio(img, threshold=0.5)
assert img.shape == thr_maps_3d.shape
def test_invalids_extract_types_in_connected_regions():
maps, _ = generate_maps((10, 11, 12), n_regions=2)
valid_names = ['connected_components', 'local_regions']
# test whether same error raises as expected when invalid inputs
# are given to extract_type in connected_regions function
message = ("'extract_type' should be {0}")
for invalid_extract_type in ['connect_region', 'local_regios']:
with pytest.raises(ValueError, match=message.format(valid_names)):
connected_regions(maps, extract_type=invalid_extract_type)
def test_connected_regions():
rng = np.random.RandomState(42)
# 4D maps
n_regions = 4
maps, mask_img = generate_maps((30, 30, 30), n_regions=n_regions)
# 3D maps
map_img = np.zeros((30, 30, 30)) + 0.1 * rng.standard_normal(
size=(30, 30, 30)
)
map_img = nibabel.Nifti1Image(map_img, affine=np.eye(4))
# smoke test for function connected_regions and also to check
# if the regions extracted should be equal or more than already present.
# 4D image case
for extract_type in ['connected_components', 'local_regions']:
connected_extraction_img, index = connected_regions(maps, min_region_size=10,
extract_type=extract_type)
assert connected_extraction_img.shape[-1] >= n_regions
assert index, np.ndarray
# For 3D images regions extracted should be more than equal to one
connected_extraction_3d_img, _ = connected_regions(map_img, min_region_size=10,
extract_type=extract_type)
assert connected_extraction_3d_img.shape[-1] >= 1
# Test input mask_img
mask = get_data(mask_img)
mask[1, 1, 1] = 0
extraction_with_mask_img, index = connected_regions(maps,
mask_img=mask_img)
assert extraction_with_mask_img.shape[-1] >= 1
extraction_without_mask_img, index = connected_regions(maps)
assert np.all(get_data(extraction_with_mask_img)[mask == 0] == 0.)
assert not np.all(get_data(extraction_without_mask_img)[mask == 0] == 0.)
# mask_img with different shape
mask = np.zeros(shape=(10, 11, 12), dtype=np.int)
mask[1:-1, 1:-1, 1:-1] = 1
affine = np.array([[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
[0., 0., 0., 2.]])
mask_img = nibabel.Nifti1Image(mask, affine=affine)
extraction_not_same_fov_mask, _ = connected_regions(maps,
mask_img=mask_img)
assert maps.shape[:3] == extraction_not_same_fov_mask.shape[:3]
assert mask_img.shape != extraction_not_same_fov_mask.shape[:3]
extraction_not_same_fov, _ = connected_regions(maps)
assert (np.sum(get_data(extraction_not_same_fov) == 0) >
np.sum(get_data(extraction_not_same_fov_mask) == 0))
def test_invalid_threshold_strategies():
maps, _ = generate_maps((6, 8, 10), n_regions=1)
extract_strategy_check = RegionExtractor(maps, thresholding_strategy='n_')
valid_strategies = ['ratio_n_voxels', 'img_value', 'percentile']
with pytest.raises(ValueError,
match="'thresholding_strategy' should be either of "
"these".format(valid_strategies)):
extract_strategy_check.fit()
def test_threshold_as_none_and_string_cases():
maps, _ = generate_maps((6, 8, 10), n_regions=1)
extract_thr_none_check = RegionExtractor(maps, threshold=None)
with pytest.raises(ValueError,
match="The given input to threshold is not valid."):
extract_thr_none_check.fit()
extract_thr_string_check = RegionExtractor(maps, threshold='30%')
with pytest.raises(ValueError,
match="The given input to threshold is not valid."):
extract_thr_string_check.fit()
def test_region_extractor_fit_and_transform():
n_regions = 9
n_subjects = 5
maps, mask_img = generate_maps((40, 40, 40), n_regions=n_regions)
# Test maps are zero in the mask
mask_data = get_data(mask_img)
mask_data[1, 1, 1] = 0
extractor_without_mask = RegionExtractor(maps)
extractor_without_mask.fit()
extractor_with_mask = RegionExtractor(maps, mask_img=mask_img)
extractor_with_mask.fit()
assert not np.all(
get_data(extractor_without_mask.regions_img_)[mask_data == 0] == 0.)
assert np.all(
get_data(extractor_with_mask.regions_img_)[mask_data == 0] == 0.)
# smoke test to RegionExtractor with thresholding_strategy='ratio_n_voxels'
extract_ratio = RegionExtractor(maps, threshold=0.2,
thresholding_strategy='ratio_n_voxels')
extract_ratio.fit()
assert extract_ratio.regions_img_ != ''
assert extract_ratio.regions_img_.shape[-1] >= 9
# smoke test with threshold=string and strategy=percentile
extractor = RegionExtractor(maps, threshold=30,
thresholding_strategy='percentile',
mask_img=mask_img)
extractor.fit()
assert extractor.index_, np.ndarray
assert extractor.regions_img_ != ''
assert extractor.regions_img_.shape[-1] >= 9
n_regions_extracted = extractor.regions_img_.shape[-1]
shape = (91, 109, 91, 7)
expected_signal_shape = (7, n_regions_extracted)
for id_ in range(n_subjects):
img, data = _make_random_data(shape)
# smoke test NiftiMapsMasker transform inherited in Region Extractor
signal = extractor.transform(img)
assert expected_signal_shape == signal.shape
# smoke test with high resolution image
maps, mask_img = generate_maps((20, 20, 20), n_regions=n_regions,
affine=.2 * np.eye(4))
extract_ratio = RegionExtractor(maps,
thresholding_strategy='ratio_n_voxels',
smoothing_fwhm=.6,
min_region_size=.4)
extract_ratio.fit()
assert extract_ratio.regions_img_ != ''
assert extract_ratio.regions_img_.shape[-1] >= 9
# smoke test with zeros on the diagonal of the affine
affine = np.eye(4)
affine[[0, 1]] = affine[[1, 0]] # permutes first and second lines
maps, mask_img = generate_maps((40, 40, 40), n_regions=n_regions,
affine=affine)
extract_ratio = RegionExtractor(maps, threshold=0.2,
thresholding_strategy='ratio_n_voxels')
extract_ratio.fit()
assert extract_ratio.regions_img_ != ''
assert extract_ratio.regions_img_.shape[-1] >= 9
def test_error_messages_connected_label_regions():
shape = (13, 11, 12)
affine = np.eye(4)
n_regions = 2
labels_img = generate_labeled_regions(shape, affine=affine,
n_regions=n_regions)
with pytest.raises(
ValueError,
match="Expected 'min_size' to be specified as integer."):
connected_label_regions(labels_img=labels_img, min_size='a')
with pytest.raises(
ValueError,
match="'connect_diag' must be specified as True or False."):
connected_label_regions(labels_img=labels_img, connect_diag=None)
def test_remove_small_regions():
data = np.array([[[0., 1., 0.],
[0., 1., 1.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 1.]]])
# To remove small regions, data should be labelled
label_map, n_labels = ndimage.label(data)
sum_label_data = np.sum(label_map)
affine = np.eye(4)
min_size = 10
# data can be act as mask_data to identify regions in label_map because
# features in label_map are built upon non-zeros in data
index = np.arange(n_labels + 1)
removed_data = _remove_small_regions(label_map, index, affine, min_size)
sum_removed_data = np.sum(removed_data)
assert sum_removed_data < sum_label_data
def test_connected_label_regions():
shape = (13, 11, 12)
affine = np.eye(4)
n_regions = 9
labels_img = generate_labeled_regions(shape, affine=affine,
n_regions=n_regions)
labels_data = get_data(labels_img)
n_labels_wo_reg_ext = len(np.unique(labels_data))
# region extraction without specifying min_size
extracted_regions_on_labels_img = connected_label_regions(labels_img)
extracted_regions_labels_data = get_data(extracted_regions_on_labels_img)
n_labels_wo_min = len(np.unique(extracted_regions_labels_data))
assert n_labels_wo_reg_ext < n_labels_wo_min
# with specifying min_size
extracted_regions_with_min = connected_label_regions(labels_img,
min_size=100)
extracted_regions_with_min_data = get_data(extracted_regions_with_min)
n_labels_with_min = len(np.unique(extracted_regions_with_min_data))
assert n_labels_wo_min > n_labels_with_min
# Test connect_diag=False
ext_reg_without_connect_diag = connected_label_regions(labels_img,
connect_diag=False)
data_wo_connect_diag = get_data(ext_reg_without_connect_diag)
n_labels_wo_connect_diag = len(np.unique(data_wo_connect_diag))
assert n_labels_wo_connect_diag > n_labels_wo_reg_ext
# If min_size is large and if all the regions are removed then empty image
# will be returned
extract_reg_min_size_large = connected_label_regions(labels_img,
min_size=500)
assert np.unique(get_data(extract_reg_min_size_large)) == 0
# Test the names of the brain regions given in labels.
# Test labels for 9 regions in n_regions
labels = ['region_a', 'region_b', 'region_c', 'region_d', 'region_e',
'region_f', 'region_g', 'region_h', 'region_i']
# If labels are provided, first return will contain extracted labels image
# and second return will contain list of new names generated based on same
# name with assigned on both hemispheres for example.
extracted_reg, new_labels = connected_label_regions(labels_img,
min_size=100,
labels=labels)
# The length of new_labels returned can differ depending upon min_size. If
# min_size given is more small regions can be removed therefore newly
# generated labels can be less than original size of labels. Or if min_size
# is less then newly generated labels can be more.
# We test here whether labels returned are empty or not.
assert new_labels != ''
assert len(new_labels) <= len(labels)
# labels given in numpy array
labels = np.asarray(labels)
extracted_reg2, new_labels2 = connected_label_regions(labels_img,
labels=labels)
assert new_labels != ''
# By default min_size is less, so newly generated labels can be more.
assert len(new_labels2) >= len(labels)
# If number of labels provided are wrong (which means less than number of
# unique labels in labels_img), then we raise an error
# Test whether error raises
unique_labels = set(np.unique(np.asarray(get_data(labels_img))))
unique_labels.remove(0)
# labels given are less than n_regions=9
provided_labels = ['region_a', 'region_c', 'region_f',
'region_g', 'region_h', 'region_i']
assert len(provided_labels) < len(unique_labels)
with pytest.raises(ValueError):
connected_label_regions(labels_img, labels=provided_labels)
# Test if unknown/negative integers are provided as labels in labels_img,
# we raise an error and test the same whether error is raised.
# Introduce data type of float, see issue: https://github.com/nilearn/nilearn/issues/2580
labels_data = np.zeros(shape, dtype=np.float32)
h0 = shape[0] // 2
h1 = shape[1] // 2
h2 = shape[2] // 2
labels_data[:h0, :h1, :h2] = 1
labels_data[:h0, :h1, h2:] = 2
labels_data[:h0, h1:, :h2] = 3
labels_data[:h0, h1:, h2:] = -4
labels_data[h0:, :h1, :h2] = 5
labels_data[h0:, :h1, h2:] = 6
labels_data[h0:, h1:, :h2] = np.nan
labels_data[h0:, h1:, h2:] = np.inf
neg_labels_img = nibabel.Nifti1Image(labels_data, affine)
with pytest.raises(ValueError):
connected_label_regions(labels_img=neg_labels_img)
# If labels_img provided is 4D Nifti image, then test whether error is
# raised or not. Since this function accepts only 3D image.
labels_4d_data = np.zeros((shape) + (2, ))
labels_data[h0:, h1:, :h2] = 0
labels_data[h0:, h1:, h2:] = 0
labels_4d_data[..., 0] = labels_data
labels_4d_data[..., 1] = labels_data
labels_img_4d = nibabel.Nifti1Image(labels_4d_data, np.eye(4))
with pytest.raises(DimensionError):
connected_label_regions(labels_img=labels_img_4d)
# Test if labels (or names to regions) given is a string without a list.
# Then, we expect it to be split to regions extracted and returned as list.
labels_in_str = 'region_a'
labels_img_in_str = generate_labeled_regions(shape, affine=affine,
n_regions=1)
extract_regions, new_labels = connected_label_regions(labels_img_in_str,
labels=labels_in_str)
assert isinstance(new_labels, list)
# If user has provided combination of labels, then function passes without
# breaking and new labels are returned based upon given labels and should
# be equal or more based on regions extracted
combined_labels = ['region_a', '1', 'region_b', '2', 'region_c', '3',
'region_d', '4', 'region_e']
ext_reg, new_labels = connected_label_regions(labels_img,
labels=combined_labels)
assert len(new_labels) >= len(combined_labels)
| 42.079208 | 93 | 0.640529 |
a9f9d7bb9865d8ca8d30f93c83f323850e73c770 | 2,863 | py | Python | main.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | 1 | 2021-08-10T19:50:57.000Z | 2021-08-10T19:50:57.000Z | main.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | main.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | import asyncio
import gc
import os
import psutil
import sys
from core_modules.keyboard_handler import KeyboardHandler
from core_modules.macro_handler import MacroHandler
from core_modules.tray import Tray
from global_modules import logs
from global_modules.get_config import get_config
from global_modules.macro_manager import __clear_registered, load_all
from global_modules.temp_manager import purge_temp
# ================================= Set the program priority below normal if possible ==================================
if sys.platform == "win32":
try:
psutil.Process().nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
except Exception as err:
logs.error("core", f"Cannot set process priority below normal: {type(err)}: {err}")
elif sys.platform == "linux" or sys.platform == "linux2":
try:
psutil.Process().nice(10)
except Exception as err:
logs.error("core", f"Cannot set nice to 10: {type(err)}: {err}")
# ======================================================================================================================
loop_ = asyncio.get_event_loop()
tray = None
macro_handler = None
keyboard_handler = None
async def main():
global tray
global macro_handler
global keyboard_handler
purge_temp_loop()
while not isinstance(tray, Tray):
await asyncio.sleep(0.1)
if isinstance(tray, Tray):
macro_handler = MacroHandler(tray)
keyboard_handler = KeyboardHandler(macro_handler, tray, asyncio.get_event_loop())
update_handlers_loop()
gc.collect()
def create_tray(loop: asyncio.AbstractEventLoop):
global tray
logs.info("main", "Launching tray")
tray = Tray(loop)
tray.run_tray()
def purge_temp_loop():
purge_temp()
loop = asyncio.get_running_loop()
loop.call_later(60, purge_temp_loop)
def update_handlers_loop():
global macro_handler
global keyboard_handler
global tray
if isinstance(tray, Tray):
if tray.enabled:
to = get_config("global.timeout_update_handler.enabled")
else:
to = get_config("global.timeout_update_handler.disabled")
else:
to = get_config("global.timeout_update_handler.enabled")
if isinstance(macro_handler, MacroHandler):
macro_handler.update()
if isinstance(keyboard_handler, KeyboardHandler):
keyboard_handler.update()
loop = asyncio.get_running_loop()
loop.call_later(to, update_handlers_loop)
if __name__ == "__main__":
if sys.platform == "darwin":
logs.error("core", "MacOS not supported")
exit(1)
if not os.path.exists("macros"):
os.mkdir("macros")
purge_temp(True)
logs.clear_logs()
__clear_registered()
load_all()
loop_.run_in_executor(None, create_tray, loop_)
loop_.create_task(main())
loop_.run_forever()
| 25.792793 | 120 | 0.657352 |
35bb5ac1200646e41f3275a250552b234b800421 | 2,653 | py | Python | sunpos.py | marcocipriani01/AllSky-new | 8e3951bccdb4709ef3abfeb7c5bf8961e9c8d3f1 | [
"MIT"
] | null | null | null | sunpos.py | marcocipriani01/AllSky-new | 8e3951bccdb4709ef3abfeb7c5bf8961e9c8d3f1 | [
"MIT"
] | null | null | null | sunpos.py | marcocipriani01/AllSky-new | 8e3951bccdb4709ef3abfeb7c5bf8961e9c8d3f1 | [
"MIT"
] | null | null | null | # Credits:
# https://levelup.gitconnected.com/python-sun-position-for-solar-energy-and-research-7a4ead801777
import datetime
from dateutil.tz import tzutc
from math import sin, cos, tan, asin, atan2, radians as rad, degrees as deg
def sun_position(location, utc=None, refraction=True):
if utc is None:
utc = datetime.datetime.now(tzutc())
latitude, longitude = location
# Convert latitude and longitude to radians
rlat = rad(latitude)
rlon = rad(longitude)
# Decimal hour of the day at Greenwich
greenwichtime = utc.hour + utc.minute / 60 + utc.second / 3600
# Days from J2000, accurate from 1901 to 2099
daynum = (
367 * utc.year
- 7 * (utc.year + (utc.month + 9) // 12) // 4
+ 275 * utc.month // 9
+ utc.day
- 730531.5
+ greenwichtime / 24
)
# Mean longitude of the sun
mean_long = daynum * 0.01720279239 + 4.894967873
# Mean anomaly of the Sun
mean_anom = daynum * 0.01720197034 + 6.240040768
# Ecliptic longitude of the sun
eclip_long = (
mean_long
+ 0.03342305518 * sin(mean_anom)
+ 0.0003490658504 * sin(2 * mean_anom)
)
# Obliquity of the ecliptic
obliquity = 0.4090877234 - 0.000000006981317008 * daynum
# Right ascension of the sun
rasc = atan2(cos(obliquity) * sin(eclip_long), cos(eclip_long))
# Declination of the sun
decl = asin(sin(obliquity) * sin(eclip_long))
# Local sidereal time
sidereal = 4.894961213 + 6.300388099 * daynum + rlon
# Hour angle of the sun
hour_ang = sidereal - rasc
# Local elevation of the sun
elevation = asin(sin(decl) * sin(rlat) + cos(decl) * cos(rlat) * cos(hour_ang))
# Local azimuth of the sun
azimuth = atan2(
-cos(decl) * cos(rlat) * sin(hour_ang),
sin(decl) - sin(rlat) * sin(elevation),
)
# Convert azimuth and elevation to degrees
azimuth = into_range(deg(azimuth), 0, 360)
elevation = into_range(deg(elevation), -180, 180)
# Refraction correction (optional)
if refraction:
targ = rad((elevation + (10.3 / (elevation + 5.11))))
elevation += (1.02 / tan(targ)) / 60
# Return azimuth and elevation in degrees
return (round(azimuth, 2), round(elevation, 2))
def into_range(x, range_min, range_max):
shiftedx = x - range_min
delta = range_max - range_min
return (((shiftedx % delta) + delta) % delta) + range_min
if __name__ == "__main__":
location = (41.902782, 12.496366) # Rome, Italy
azimuth, elevation = sun_position(location)
print("Azimuth: ", azimuth)
print("Elevation: ", elevation)
| 34.907895 | 97 | 0.637392 |
eb9c6f42fc5818ffc473f6d5a02e2c54848f3107 | 397 | py | Python | QuickTutor/asgi.py | caleb-bodishbaugh/cs3240-s20-QuickTutor | 9d575fd85496a7c2dab215610de2ba9ef58559a0 | [
"Unlicense",
"MIT"
] | 1 | 2021-08-20T15:23:03.000Z | 2021-08-20T15:23:03.000Z | QuickTutor/asgi.py | caleb-bodishbaugh/cs3240-s20-QuickTutor | 9d575fd85496a7c2dab215610de2ba9ef58559a0 | [
"Unlicense",
"MIT"
] | 6 | 2021-03-19T04:35:36.000Z | 2022-01-13T02:48:36.000Z | QuickTutor/asgi.py | caleb-bodishbaugh/cs3240-s20-QuickTutor | 9d575fd85496a7c2dab215610de2ba9ef58559a0 | [
"Unlicense",
"MIT"
] | 1 | 2021-08-20T15:22:48.000Z | 2021-08-20T15:22:48.000Z | """
ASGI config for QuickTutor project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'QuickTutor.settings')
application = get_asgi_application()
| 23.352941 | 78 | 0.788413 |
79a0c5045ab3bfbd869207b3158d41a654033599 | 8,628 | py | Python | trinity/components/builtin/network_db/component.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | trinity/components/builtin/network_db/component.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | trinity/components/builtin/network_db/component.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | import asyncio
from argparse import (
Namespace,
ArgumentParser,
_SubParsersAction,
)
from typing import Iterable
from lahja import EndpointAPI
from sqlalchemy.orm import Session
from eth_utils import to_tuple
from p2p.service import BaseService
from p2p.tracking.connection import (
BaseConnectionTracker,
NoopConnectionTracker,
)
from trinity._utils.shutdown import (
exit_with_services,
)
from trinity.config import (
TrinityConfig,
)
from trinity.db.orm import get_tracking_database
from trinity.events import ShutdownRequest
from trinity.extensibility import (
AsyncioIsolatedComponent,
)
from trinity.db.network import (
get_networkdb_path,
)
from trinity.exceptions import BadDatabaseError
from .connection.server import ConnectionTrackerServer
from .connection.tracker import (
SQLiteConnectionTracker,
MemoryConnectionTracker,
)
from .cli import (
TrackingBackend,
NormalizeTrackingBackend,
)
from .eth1_peer_db.server import PeerDBServer
from .eth1_peer_db.tracker import (
BaseEth1PeerTracker,
NoopEth1PeerTracker,
SQLiteEth1PeerTracker,
MemoryEth1PeerTracker,
)
class NetworkDBComponent(AsyncioIsolatedComponent):
@property
def name(self) -> str:
return "Network Database"
@property
def normalized_name(self) -> str:
return "network-db"
@classmethod
def configure_parser(cls,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
tracking_parser = arg_parser.add_argument_group('network db')
tracking_parser.add_argument(
'--network-tracking-backend',
help=(
"Configure whether nodes are tracked and how. (sqlite3: persistent "
"tracking across runs from an on-disk sqlite3 database, memory: tracking "
"only in memory, do-not-track: no tracking)"
),
action=NormalizeTrackingBackend,
choices=('sqlite3', 'memory', 'do-not-track'),
default=TrackingBackend.SQLITE3,
type=str,
)
tracking_parser.add_argument(
'--disable-networkdb-component',
help=(
"Disables the builtin 'Network Database' component. "
"**WARNING**: disabling this API without a proper replacement "
"will cause your trinity node to crash."
),
action='store_true',
)
tracking_parser.add_argument(
'--disable-blacklistdb',
help=(
"Disables the blacklist database server component of the Network Database "
"component. **WARNING**: disabling this API without a proper replacement "
"will cause your trinity node to crash."
),
action='store_true',
)
tracking_parser.add_argument(
'--disable-eth1-peer-db',
help=(
"Disables the ETH1.0 peer database server component of the Network Database "
"component. **WARNING**: disabling this API without a proper replacement "
"will cause your trinity node to crash."
),
action='store_true',
)
tracking_parser.add_argument(
'--enable-experimental-eth1-peer-tracking',
help=(
"Enables the experimental tracking of metadata about successful "
"connections to Eth1 peers."
),
action='store_true',
)
# Command to wipe the on-disk database
remove_db_parser = subparser.add_parser(
'remove-network-db',
help='Remove the on-disk sqlite database that tracks data about the p2p network',
)
remove_db_parser.set_defaults(func=cls.clear_node_db)
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
if self.boot_info.args.disable_networkdb_component:
self.logger.warning("Network Database disabled via CLI flag")
# Allow this component to be disabled for extreme cases such as the
# user swapping in an equivalent experimental version.
return
else:
try:
get_tracking_database(get_networkdb_path(self.boot_info.trinity_config))
except BadDatabaseError as err:
manager_eventbus.broadcast_nowait(ShutdownRequest(
"Error loading network database. Trying removing database "
f"with `remove-network-db` command:\n{err}"
))
else:
self.start()
@classmethod
def clear_node_db(cls, args: Namespace, trinity_config: TrinityConfig) -> None:
logger = cls.get_logger()
db_path = get_networkdb_path(trinity_config)
if db_path.exists():
logger.info("Removing network database at: %s", db_path.resolve())
db_path.unlink()
else:
logger.info("No network database found at: %s", db_path.resolve())
_session: Session = None
def _get_database_session(self) -> Session:
if self._session is None:
self._session = get_tracking_database(get_networkdb_path(self.boot_info.trinity_config))
return self._session
#
# Blacklist Server
#
def _get_blacklist_tracker(self) -> BaseConnectionTracker:
backend = self.boot_info.args.network_tracking_backend
if backend is TrackingBackend.SQLITE3:
session = self._get_database_session()
return SQLiteConnectionTracker(session)
elif backend is TrackingBackend.MEMORY:
return MemoryConnectionTracker()
elif backend is TrackingBackend.DO_NOT_TRACK:
return NoopConnectionTracker()
else:
raise Exception(f"INVARIANT: {backend}")
def _get_blacklist_service(self) -> ConnectionTrackerServer:
tracker = self._get_blacklist_tracker()
return ConnectionTrackerServer(
event_bus=self.event_bus,
tracker=tracker,
)
#
# Eth1 Peer Server
#
def _get_eth1_tracker(self) -> BaseEth1PeerTracker:
if not self.boot_info.args.enable_experimental_eth1_peer_tracking:
return NoopEth1PeerTracker()
backend = self.boot_info.args.network_tracking_backend
if backend is TrackingBackend.SQLITE3:
session = self._get_database_session()
# TODO: correctly determine protocols and versions
protocols = ('eth',)
protocol_versions = (63,)
# TODO: get genesis_hash
return SQLiteEth1PeerTracker(
session,
network_id=self.boot_info.trinity_config.network_id,
protocols=protocols,
protocol_versions=protocol_versions,
)
elif backend is TrackingBackend.MEMORY:
return MemoryEth1PeerTracker()
elif backend is TrackingBackend.DO_NOT_TRACK:
return NoopEth1PeerTracker()
else:
raise Exception(f"INVARIANT: {backend}")
def _get_eth1_peer_server(self) -> PeerDBServer:
tracker = self._get_eth1_tracker()
return PeerDBServer(
event_bus=self.event_bus,
tracker=tracker,
)
@to_tuple
def _get_services(self) -> Iterable[BaseService]:
if self.boot_info.args.disable_blacklistdb:
# Allow this component to be disabled for extreme cases such as the
# user swapping in an equivalent experimental version.
self.logger.warning("Blacklist Database disabled via CLI flag")
return
else:
yield self._get_blacklist_service()
if self.boot_info.args.disable_eth1_peer_db:
# Allow this component to be disabled for extreme cases such as the
# user swapping in an equivalent experimental version.
self.logger.warning("ETH1 Peer Database disabled via CLI flag")
else:
yield self._get_eth1_peer_server()
def do_start(self) -> None:
try:
tracker_services = self._get_services()
except BadDatabaseError as err:
self.logger.exception(f"Unrecoverable error in Network Component: {err}")
else:
asyncio.ensure_future(exit_with_services(
self._event_bus_service,
*tracker_services,
))
for service in tracker_services:
asyncio.ensure_future(service.run())
| 34.650602 | 100 | 0.631548 |
1291759f4b69437dcb17dc586cd551e0a3d69dd9 | 2,786 | py | Python | examples/pipeline/hetero_feature_selection/pipeline-hetero-feature-selection-fast-sbt.py | qixiuai/FATE | 6d50af65b96b5b226afda30dfa8e4a1e5746952d | [
"Apache-2.0"
] | null | null | null | examples/pipeline/hetero_feature_selection/pipeline-hetero-feature-selection-fast-sbt.py | qixiuai/FATE | 6d50af65b96b5b226afda30dfa8e4a1e5746952d | [
"Apache-2.0"
] | null | null | null | examples/pipeline/hetero_feature_selection/pipeline-hetero-feature-selection-fast-sbt.py | qixiuai/FATE | 6d50af65b96b5b226afda30dfa8e4a1e5746952d | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_feature_selection import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
backend = config.backend
work_mode = config.work_mode
fast_sbt_param = {
"name": "fast_secureboost_0",
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 4,
"subsample_feature_rate": 1,
"n_iter_no_change": False,
"work_mode": "layered",
"guest_depth": 2,
"host_depth": 3,
"tol": 0.0001,
"bin_num": 50,
"metrics": ["Recall", "ks", "auc", "roc"],
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "iterativeAffine"
},
"predict_param": {
"threshold": 0.5
},
"validation_freqs": 1
}
selection_param = {
"name": "hetero_feature_selection_0",
"select_col_indexes": -1,
"select_names": [],
"filter_methods": [
"hetero_fast_sbt_filter"
],
"sbt_param": {
"metrics": "feature_importance",
"filter_type": "threshold",
"take_high": True,
"threshold": 0.03
}}
pipeline = common_tools.make_normal_dsl(config, namespace, selection_param,
fast_sbt_param=fast_sbt_param)
pipeline.fit(backend=backend, work_mode=work_mode)
common_tools.prettify(pipeline.get_component("hetero_feature_selection_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 30.615385 | 93 | 0.622757 |
a316bc543164db3ea1f947a610e805ad4ea09cd6 | 3,402 | py | Python | dataset/cifar10.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | null | null | null | dataset/cifar10.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | null | null | null | dataset/cifar10.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
] | 1 | 2021-08-07T14:56:44.000Z | 2021-08-07T14:56:44.000Z | # coding: utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.vision.transforms as transforms
from paddle.io import Dataset
class MyDataset(Dataset):
def __init__(self, datasets, data_transforms=None):
self.datasets = datasets
self.data_transforms = data_transforms
def __getitem__(self, idx):
img, label = self.datasets[idx]
if self.data_transforms is not None:
img = self.data_transforms(img)
return img, label
def __len__(self):
return len(self.datasets)
# 增加has_val_dataset来控制是否使用论文的数据集划分方法,默认使用
def load_data(root, train_batch_size, test_batch_size, train_size=45000, val_size=5000, has_val_dataset=True):
print('Loading data...')
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_set = paddle.vision.datasets.Cifar10(data_file=root, mode='test', download=True, transform=test_transform, backend='cv2')
if has_val_dataset:
# 论文的训练集45000,验证集5000随机划分
train_set = paddle.vision.datasets.Cifar10(data_file=root, mode='train', download=True, transform=None, backend='pil')
train_set, val_set = paddle.io.random_split(train_set, [train_size, val_size])
train_set = MyDataset(train_set, data_transforms=train_transform)
val_set = MyDataset(val_set, data_transforms=test_transform)
else:
# 不按论文,按传统的训练集50000,验证集就用测试集10000
train_set = paddle.vision.datasets.Cifar10(data_file=root, mode='train', download=True, transform=train_transform, backend='pil')
val_set = test_set
#不设置places=paddle.CPUPlace()出现莫名的Segmentation fault错误
#设置num_workers>0,后期共享内存会爆,就干脆这样了,稍慢些
train_loader = paddle.io.DataLoader(train_set, batch_size=train_batch_size,
shuffle=True, num_workers=0, places=paddle.CPUPlace())
val_loader = paddle.io.DataLoader(val_set, batch_size=test_batch_size,
shuffle=False, num_workers=0, places=paddle.CPUPlace())
test_loader = paddle.io.DataLoader(test_set, batch_size=test_batch_size,
shuffle=False, num_workers=0, places=paddle.CPUPlace())
print('Finish loading! tran data length:{}, val data length:{}, test data length:{}'.format(len(train_set), len(val_set), len(test_set)))
return train_loader, val_loader, test_loader
| 45.36 | 142 | 0.6796 |
926c0aba71f08b30bf3021e277e2afaa526b64c6 | 1,753 | py | Python | thermal-story.py | papayapeter/thermal-story | 9980a4efa250ed79a1dcb50c490eecc596b50041 | [
"MIT"
] | null | null | null | thermal-story.py | papayapeter/thermal-story | 9980a4efa250ed79a1dcb50c490eecc596b50041 | [
"MIT"
] | null | null | null | thermal-story.py | papayapeter/thermal-story | 9980a4efa250ed79a1dcb50c490eecc596b50041 | [
"MIT"
] | null | null | null | # zeno gries
# thermal story
# 2019
# using froked adafruit library version (with german and french letter support)
import os
import serial
import adafruit_thermal_printer
import readline
# establish objects
uart = serial.Serial('/dev/serial0', baudrate=9600, timeout=3000)
ThermalPrinter = adafruit_thermal_printer.get_printer_class(2.64)
printer = ThermalPrinter(uart)
# open file once (to make sure it is there)
log = open('log.txt', 'a+')
log.close()
# clear screen
os.system('clear')
# print intruction
print('Schreib die Geschichte weiter.\n\n')
# print story so far line by line
for line in open('log.txt'):
print(line)
# main loop
while (True):
# check for paper
if printer.has_paper():
# wait for input
text = input("> ")
# sort out special commands
if text == '--test': # print test page
printer.test_page()
printer.feed(2)
elif text == '--quit': # quit program
break
elif text == '--feed': # feed to lines of paper
printer.feed(2)
elif text == '--clear': # clear all logs
os.remove('log.txt')
log = open('log.txt', 'w+')
log.close()
# clear screen
os.system('clear')
# print intruction
print('Schreib die Geschichte weiter.\n\n')
else: # print
# write to log and print
log = open('log.txt', 'a+')
log.write(text + '\n')
log.close()
printer.print(text)
else: # if there is no paper then print that or quit
text = input('Papier muss nachgelegt werden. Frag an der Bar nach.')
if text == '--quit':
break
| 25.779412 | 79 | 0.575014 |
9e3a5fcced1827da22b849360de052d12a235ccc | 162 | py | Python | CalibTracker/SiStripCommon/python/shallowTree_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripCommon/python/shallowTree_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripCommon/python/shallowTree_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
shallowTree = cms.EDAnalyzer(
"ShallowTree",
outputCommands = cms.untracked.vstring(
'drop *',
)
)
| 18 | 42 | 0.660494 |
079d8f7806bfa7bbf91184736e437762027c0e1d | 12,369 | py | Python | site.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:44:10.000Z | 2020-03-12T16:44:10.000Z | site.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | site.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: site
import sys, os, __builtin__, traceback
PREFIXES = [
sys.prefix, sys.exec_prefix]
ENABLE_USER_SITE = None
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return (
dir, os.path.normcase(dir))
def abs__file__():
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
def removeduppaths():
L = []
known_paths = set()
for dir in sys.path:
dir, dircase = makepath(dir)
if dircase not in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, 'rU')
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith('#'):
continue
try:
if line.startswith(('import ', 'import\t')):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if dircase not in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >> sys.stderr, ('Error processing line {:d} of {}:\n').format(n + 1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >> sys.stderr, ' ' + line
print >> sys.stderr, '\nRemainder of file ignored'
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if sitedircase not in known_paths:
sys.path.append(sitedir)
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + 'pth'
names = [ name for name in names if name.endswith(dotpth) ]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
if sys.flags.no_user_site:
return False
if hasattr(os, 'getuid') and hasattr(os, 'geteuid'):
if os.geteuid() != os.getuid():
return None
if hasattr(os, 'getgid') and hasattr(os, 'getegid'):
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
global USER_SITE
user_base = getuserbase()
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
global ENABLE_USER_SITE
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, 'Lib', 'site-packages'))
else:
if os.sep == '/':
sitepackages.append(os.path.join(prefix, 'lib', 'python' + sys.version[:3], 'site-packages'))
sitepackages.append(os.path.join(prefix, 'lib', 'site-python'))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, 'lib', 'site-packages'))
if sys.platform == 'darwin':
from sysconfig import get_config_var
framework = get_config_var('PYTHONFRAMEWORK')
if framework:
sitepackages.append(os.path.join('/Library', framework, sys.version[:3], 'site-packages'))
return sitepackages
def addsitepackages(known_paths):
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
dllpath = os.path.join(sys.prefix, 'Lib', 'lib-dynload')
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = (';').join(libpath)
def setquit():
if os.sep == ':':
eof = 'Cmd-Q'
else:
if os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
return
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, 'rU')
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
return
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return ('\n').join(self.__lines)
return 'Type %s() to see the full %s text' % ((self.__name,) * 2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
return
def setcopyright():
__builtin__.copyright = _Printer('copyright', sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer('credits', 'Jython is maintained by the Jython developers (www.jython.org).')
else:
__builtin__.credits = _Printer('credits', ' Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands\n for supporting Python development. See www.python.org for more information.')
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer('license', 'See http://www.python.org/%.3s/license.html' % sys.version, [
'LICENSE.txt', 'LICENSE'], [
os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
def __repr__(self):
return 'Type help() for interactive help, or help(object) for help about object.'
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'):
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
encoding = 'ascii'
if encoding != 'ascii':
sys.setdefaultencoding(encoding)
def execsitecustomize():
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >> sys.stderr, "'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >> sys.stderr, "'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
if hasattr(sys, 'setdefaultencoding'):
del sys.setdefaultencoding
return
main()
def _script():
help = " %s [--user-base] [--user-site]\n\n Without arguments print some useful information\n With arguments print the value of USER_BASE and/or USER_SITE separated\n by '%s'.\n\n Exit codes with --user-base or --user-site:\n 0 - user site directory is enabled\n 1 - user site directory is disabled by user\n 2 - uses site directory is disabled by super user\n or for security reasons\n >2 - unknown error\n "
args = sys.argv[1:]
if not args:
print 'sys.path = ['
for dir in sys.path:
print ' %r,' % (dir,)
print ']'
print 'USER_BASE: %r (%s)' % (USER_BASE,
'exists' if os.path.isdir(USER_BASE) else "doesn't exist")
print 'USER_SITE: %r (%s)' % (USER_SITE,
'exists' if os.path.isdir(USER_SITE) else "doesn't exist")
print 'ENABLE_USER_SITE: %r' % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
return
if __name__ == '__main__':
_script() | 28.304348 | 459 | 0.567871 |
07dcc830cd8a4c2e6ca7219061211006d558120b | 24,420 | py | Python | gtag/gtag.py | manatlan/gtag | 1e31cf9483a121cc769831ff6b94e7663a1eb0bf | [
"Apache-2.0"
] | 10 | 2020-05-19T07:17:37.000Z | 2021-11-15T09:40:30.000Z | gtag/gtag.py | manatlan/gtag | 1e31cf9483a121cc769831ff6b94e7663a1eb0bf | [
"Apache-2.0"
] | 1 | 2020-06-19T11:48:22.000Z | 2020-06-19T15:13:11.000Z | gtag/gtag.py | manatlan/gtag | 1e31cf9483a121cc769831ff6b94e7663a1eb0bf | [
"Apache-2.0"
] | 1 | 2020-10-02T00:52:58.000Z | 2020-10-02T00:52:58.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# #############################################################################
# Apache2 2020 - manatlan manatlan[at]gmail(dot)com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# more: https://github.com/manatlan/guy
# #############################################################################
import guy,sys,asyncio,hashlib,html,inspect,types,json
import typing as T
isAsyncGenerator=lambda x: "async_generator" in str(type(x)) #TODO: howto better ?
value=lambda x: x.getValue() if isinstance(x,ReactiveProp) else x
def jjs(obj): #TODO: not optimal ... do better than str's pattern subst ;-)
""" json dumps (js is b'' (bytes)) """
def my(obj):
if isinstance(obj,bytes):
return "<:<:%s:>:>" % obj.decode()
else:
return guy.serialize(obj)
return guy.json.dumps(obj, default=my).replace('"<:<:',"").replace(':>:>"',"")
def log(*a):
# print(*a)
pass
class MyMetaclass(type):
def __getattr__(self,name:str):
def _(*a,**k) -> Tag:
t=Tag(*a,**k)
t.tag=name
return t
return _
class Tag(metaclass=MyMetaclass):
""" This is a helper to produce a "HTML TAG" """
tag="div" # default one
klass=None
def __init__(self,*contents,**attrs):
#~ assert "id" not in attrs.keys()
self.id=None
self.__contents=list(contents)
self.__dict__.update(attrs)
def add(self,*elt):
self.__contents.extend(elt)
def __str__(self):
attrs={k:v for k,v in self.__dict__.items() if not k.startswith("_")} # clone the dict (important)!
klass= attrs.get("klass") or self.klass
if "klass" in attrs: del attrs["klass"]
if klass: attrs["class"]=klass
if self.id: attrs["id"]=self.id
rattrs=[]
for k,v in attrs.items():
if v is not None and k not in ["tag"]:
if isinstance(v,bool):
if v: rattrs.append(k)
else:
rattrs.append( '%s="%s"'%(k.replace("_","-") if k!="klass" else "class",html.escape( str(v) )) )
return """<%(tag)s%(attrs)s>%(content)s</%(tag)s>""" % dict(
tag=self.tag.replace("_","-"),
attrs=" ".join([""]+rattrs) if rattrs else "",
content=" ".join([str(i) for i in self.__contents if value(i) is not None and not isinstance(i,bool)]),
)
def __repr__(self):
return "<%s>" % self.__class__.__name__
class NONE: pass
class ReactiveProp:
def __init__(self,dico:dict,attribut:str,value=NONE):
assert not isinstance(dico,ReactiveProp)
assert not isinstance(attribut,ReactiveProp)
assert not isinstance(value,ReactiveProp)
self._instance=dico
self._attribut=attribut
if value!=NONE:
self.setValue(value)
def setValue(self,v):
assert not isinstance(v,ReactiveProp)
self._instance[self._attribut]=v
def getValue(self):
return self._instance[self._attribut]
def __eq__(self, v):
return self.getValue() == value(v)
def __ne__(self, v):
return self.getValue() != value(v)
def __lt__(self, v):
return self.getValue() < value(v)
def __le__(self, v):
return self.getValue() <= value(v)
def __ge__(self, v):
return self.getValue() >= value(v)
def __gt__(self, v):
return self.getValue() > value(v)
def __add__(self,v):
return self.getValue() + value(v)
def __sub__(self,v):
return self.getValue() - value(v)
def __mul__(self,v):
return self.getValue() * value(v)
def __truediv__(self,v):
return self.getValue() / value(v)
def __floordiv__(self,v):
return self.getValue() // value(v)
def __mod__(self,v):
return self.getValue() % value(v)
def __iadd__(self,v):
return self.getValue() + value(v)
def __isub__(self,v):
return self.getValue() - value(v)
def __imul__(self,v):
return self.getValue() * value(v)
def __itruediv__(self,v):
return self.getValue() / value(v)
def __ifloordiv__(self,v):
return self.getValue() // value(v)
def __radd__(self, v):
return value(v) + self.getValue()
def __rsub__(self, v):
return value(v) - self.getValue()
def __rmul__(self, v):
return value(v) * self.getValue()
def __rtruediv__(self, v):
return value(v) / self.getValue()
def __rfloordiv__(self, v):
return value(v) // self.getValue()
def __rmod__(self,v):
return value(v) % self.getValue()
def __and__(self,v):
return self.getValue() & value(v)
def __or__(self,v):
return self.getValue() | value(v)
def __xor__(self,v):
return self.getValue() ^ value(v)
def __rand__(self,v):
return value(v) & self.getValue()
def __ror__(self,v):
return value(v) | self.getValue()
def __rxor__(self,v):
return value(v) ^ self.getValue()
def __getitem__(self,k):
return self.getValue()[k]
def __setitem__(self,k,v):
self.getValue()[k]=value(v)
def __delitem__(self,k):
del self.getValue()[ value(k) ]
def __float__(self):
return float(self.getValue())
def __int__(self):
return int(self.getValue())
def __bool__(self):
return bool(self.getValue())
def __str__(self):
return str(self.getValue())
@property
def __class__(self):
return type(self.getValue())
def __hash__(self):
return hash(self.getValue())
def __iter__(self):
return iter(self.getValue())
def __next__(self):
return next(self.getValue())
def __contains__(self,x):
return value(x) in self.getValue()
def __getattr__(self,k):
return getattr(self.getValue(),k)
def __setattr__(self,k,v):
if k.startswith("_"):
super().__setattr__(k, v)
else:
setattr(self.getValue(),k,value(v))
def __len__(self):
return len(self.getValue())
def __call__(self,*a,**k):
return self.getValue()(*a,**k)
#TODO: add a lot of __slot__ ;-)
def __repr__(self):
iid=self._instance.id if hasattr(self._instance,"id") else str(self._instance)
return "<ReactiveProp:%s attr=%s of instance=%s>" % (self.__class__.__name__,self._attribut,iid)
class render:
# POST build
@staticmethod
def local( method ): # gtag.event decorator
""" Make the method renders only this component (and its childs)"""
Capacity(method).set(inspect.getouterframes(inspect.currentframe())[0].function)
return method
@staticmethod
def parent( method ): # gtag.event decorator
""" Make the method renders only its parent (and its childs) """
Capacity(method).set(inspect.getouterframes(inspect.currentframe())[0].function)
return method
@staticmethod
def none( method ): # gtag.event decorator
""" Make the method renders nothing """
Capacity(method).set(inspect.getouterframes(inspect.currentframe())[0].function)
return method
class Capacity:
def __init__(self,method:callable):
self.__method=method
def has( self, f:callable ):
if hasattr(self.__method,"capacities"):
return f.__name__ in self.__method.capacities
def set( self, capacity ):
if not hasattr(self.__method,"capacities"):
self.__method.capacities=[]
self.__method.capacities.append(capacity)
class Binder:
def __init__(self,instance):
self.__instance=instance
def __getattr__(self,name:str):
m=hasattr(self.__instance,name) and getattr(self.__instance,name)
if m and callable( m ): # bind a self.method -> return a js/string for a guy's call in js side
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# resolve all declaredJsVars from all childs
pool={}
for id,o in self.__instance._getChilds().items():
pool.update( o._declaredJsInputs )
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
def _(*args,**kargs):
if args or kargs:
args=[value(i) for i in args]
kargs={k:value(v) for k,v in kargs.items()}
return "callEvent('%s','%s',%s,%s,%s)" % (self.__instance.id,name,jjs(args),jjs(kargs),jjs(pool))
return _
else:
raise Exception("Unknown method '%s' in '%s'"%(name,self.__instance.__class__.__name__))
class GTag:
"""
The magic thing ;-)
"""
size=None
_call=None # first event to call at start !
_parent=None
""" size of the windowed runned gtag (tuple (width,height) or guy.FULLSCREEN or None) """
# implicit parent version (don't need to pass self(=parent) when creating a gtag)
def __init__(self,*a,**k):
self._data={}
self._id="%s_%s" % (self.__class__.__name__,hex(id(self))[2:])
self._tag=NONE
self._scripts=[]
if "parent" in k.keys(): # clonage (only main tags, so parent is None)
#(but could be used to reparent a gtag manually)
parent=k["parent"]
del k["parent"]
else:
# guess parent
lvl=0
while True:
lvl+=1
frame = sys._getframe(lvl)
arguments = frame.f_code.co_argcount
if arguments == 0:
parent=None
break
else:
caller_calls_self = frame.f_code.co_varnames[0]
parent=frame.f_locals[caller_calls_self]
if isinstance(parent,GTag):
break
self._childs=[] #<- this is cleared at each rendering
self._args=a
self._kargs=k
self._parent=parent
log("INIT",repr(self))
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
if self.parent is None:
self.main._localInputs={}
signature = inspect.signature( self.init )
self._declaredJsInputs={k: v.default for k, v in signature.parameters.items() if type(v.default)==bytes}
for k,v in self._declaredJsInputs.items():
if k not in self.main._localInputs:
self.main._localInputs[k]=None # init js props at null
rp=ReactiveProp( self.main._localInputs,k)
#~ self._data[k]=rp
super().__setattr__(k,rp)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
self.init(*self._args,**self._kargs)
self._childs=[] #<- clear innerchilds (creating during child phase), to avoid to appears in child
self._scriptsInInit=self._scripts[:]
# self._tag = self.build() #TODO: remove this build !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Store the instance in the parent._childs
if self._parent:
self._parent._childs.append(self)
def _tree(self):
def _gc(g,lvl=0) -> list:
ll=["+" + (" "*lvl) + repr(g)]
for obj in g._ichilds:
inners=_gc(obj,lvl+1)
ll.extend( [i+' (INNER)' for i in inners] )
for obj in g._childs:
ll.extend( _gc(obj,lvl+1) )
return ll
return "\n".join(_gc(self,0))
@property
def _ichilds(self):
ll= [v for k,v in self._data.items() if isinstance(v,GTag)]
return ll
def _getChilds(self) -> dict:
def _gc(g) -> dict:
d={g.id:g}
for obj in g._ichilds:
d.update( _gc(obj) )
for obj in g._childs:
d.update( _gc(obj) )
return d
return _gc(self)
def _getRef(self,id): # -> GTag
childs=self._getChilds()
try:
return childs[id]
except KeyError:
raise Exception("ERROR: Unknown child '%s' in '%s'"%(id,self.id))
@property
def id(self):
return self._id
@property
def parent(self): # -> T.Union[GTag,None]:
""" return the parent instance (None if gtag is the main) """
if self._parent is None:
return None
else:
return self._parent
@property
def main(self): # -> GTag:
""" return the main instance """
x=self
while x._parent is not None:
x=x._parent
return x
@property
def bind(self):
""" to bind method ! and return its js repr"""
return Binder(self)
def _clone(self): #TODO: not clear here ... need redone (the rebuild() needed ?! why ?! (because storage in "_tag")
assert self._parent==None,"Can't clone a gtag which is not the main one"
props={k:v for k,v in self.__dict__.items() if k[0]!="_" or k=="_call" or k=="_data"}
gtag = self.__class__(*self._args,**self._kargs,parent=None) # parent=None, will avoid guess parent ! (it makes sense, because you can clone only mains)
gtag.__dict__.update(props)
gtag._scripts=[]
gtag._localInputs = self._localInputs
gtag.init(*self._args,**self._kargs)
gtag._rebuild()
log("^^^ CLONED ^^^",repr(self),"-->",repr(gtag))
return gtag
def _guessHeaders(self):
""" try to found the headers, based of declarations of each gtag, and return the html elements to include in header
"""
assert self._parent is None,"You are not on the main instance, you can't get a child"
mklist=lambda x: x if isinstance(x,list) else [x]
md5= lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()
lmd5=[]
ll=[]
for g in GTag.__subclasses__():
if hasattr(g,"headers"):
for i in mklist(getattr(g,"headers")):
m5=md5(str(i))
if m5 not in lmd5:
lmd5.append(m5)
ll.append( i )
return ll
def init(self,*a,**k):
""" Override to make inits (replace the __init__(), but same role)"""
pass
def build(self) -> T.Union[Tag,None]:
""" Override for static build
SHOULD RETURN a "Tag" (not a GTag)
"""
raise Exception("Should be implemented")
async def _start(self):
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- call statement (old autostart system)
if self._call: # there is an event to call at start !
if asyncio.iscoroutine(self._call):
rep=self._call
if isinstance(rep, types.GeneratorType):
for _ in rep:
assert _ is None, "wtf (event returns something)?"
yield
else:
await rep
elif isAsyncGenerator(self._call):
async for _ in self._call:
assert _ is None, "wtf (event returns something)?"
yield
else:
raise Exception("Not implemented (calling a (sync)start function)")
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def exit(self,v=None): pass # overriden by run/runcef/serve
def _clearScripts(self):
self._scripts=self._scriptsInInit[:]
def _rebuild(self,clearScripts=True):
if clearScripts: self._clearScripts()
self._childs=[]
self._tag=self.build( )
def __str__(self):
log("___rendering",repr(self))
if self._tag == NONE:
self._tag=self.build()
o= self._tag
if o is None:
return ""
else:
for i in self._ichilds:
i._rebuild()
if isinstance(o,Tag):
o.id=self.id
elif isinstance(o,GTag):
o=Tag.div(o) # auto div'ify
o.id=self.id
else:
o=Tag.span(o) # auto span'ify
o.id=self.id
return str(o)
def __repr__(self):
return "<GTag: %s [parent:%s] (innerchilds=%s)>" % (
self._id,
self._parent.id if self._parent else "None",
[i._id for i in self._ichilds]
)
def __setattr__(self,k,v):
if k.startswith("_"):
# print("REAL SET",k,repr(v))
super().__setattr__(k,v)
else:
o=self._data.get(k)
if isinstance(o,ReactiveProp):
# print("SET EXISTING REACTIVE",k,repr(v))
if isinstance(v,ReactiveProp):
# v is RP
self._data[k]=v
super().__setattr__(k,v)
else:
# v is real
o.setValue( v )
else:
# print("CREATE REACTIVE",k,repr(v))
if isinstance(v,ReactiveProp):
# v is RP
self._data[k]=v #(put RP in RP !!!!!!!!!!!!!!!!!!!!!!)
super().__setattr__(k,v)
else:
# v is real
self._data[k]=v
super().__setattr__(k,ReactiveProp(self._data,k,v))
@property
def scripts(self):
return ";".join(self._scripts)
def __call__(self,js):
self._scripts.append(js)
def _getScripts(self) -> str:
ll=[]
for g in self._getChilds().values():
js=g.scripts
if js:
ll.append( "(function(tag){%s})(document.getElementById('%s'))" % (str(js),g.id) )
return ";".join(ll)
def _render(self) -> dict:
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# resolve all declaredJsVars from all childs
pool={}
for id,o in self._getChilds().items():
pool.update( o._declaredJsInputs )
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
h=str(self)
d=dict(
id = self.id,
content = h,
scripts = self._getScripts(),
exchange = jjs(pool),
)
log(">>>UPDATE:",repr(self),json.dumps(d,indent=True))
#~ log(self._tree())
return d
def run(self,*a,start=None,**k) -> any:
""" Run as Guy App (using Chrome) """
self._call=start
g=GTagApp(self,False)
g._name=self.__class__.__name__
return g.run(*a,**k)
def runCef(self,*a,start=None,**k) -> any:
""" Run as Guy App (using Cef) """
self._call=start
g=GTagApp(self,False)
g._name=self.__class__.__name__
return GTagApp.runCef(*a,**k)
def serve(self,*a,start=None,**k) -> any:
""" Run as Guy Server App """
self._call=start
g=GTagApp(self,True)
g._name=self.__class__.__name__
return g.serve(*a,**k)
class GTagApp(guy.Guy):
""" The main guy instance app, which can run a gtag inside """
def __init__(self,gtag,isMultipleSessionPossible=False):
assert isinstance(gtag,GTag)
self._originalGTag=gtag
self.size=gtag.size
if isMultipleSessionPossible:
self._ses={}
else:
self._ses=None
super().__init__()
def render(self,path=None):
hh=self._originalGTag._guessHeaders()
return """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<script>
if(!sessionStorage["gtag"]) sessionStorage["gtag"]=Math.random().toString(36).substring(2);
var GID=sessionStorage["gtag"];
async function getSessionId() {return GID}
async function execute(id,content,scripts,exchange) {
document.querySelector("#"+id).outerHTML=content;
eval(scripts)
return eval("x="+exchange);
}
async function callEvent(id,event,a,k,e) {
let r=await self.bindUpdate(id,GID,event,a,k,e);
await execute( r.id, r.content, r.scripts, r.exchange );
}
</script>
%s
</head>
<body>
<div id="gtag">
<script src="guy.js"></script>
</div>
</body>
</html>""" % "\n".join([str(h) for h in hh])
async def init(self):
if self._ses is not None: # web mode
gid=await self.js.getSessionId()
log("CREATE SESSION:",gid)
gtag = self._ses.get(gid)
if gtag is None:
gtag = self._originalGTag._clone()
self._ses[gid] = gtag
else: # app mode
gtag = self._originalGTag
gtag.exit = self.exit #plug the exit()
log(">>>SERVE",repr(gtag))
log(gtag._tree())
r=gtag._render()
scripts=r["scripts"]+";"+gtag.bind._start()
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
gtag.main._localInputs.update( await self.js.execute( "gtag", r["content"], scripts, r["exchange"] ) )
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
async def bindUpdate(self,id:str,gid:str,method:str,args,kargs,jsArgs={}):
""" inner (js exposed) guy method, called by gtag.bind.<method>(*args) """
async def asyncRender(g):
g._rebuild(clearScripts=False)
log(">>>Force UPDATE:",repr(g))
r=g._render()
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
gtag.main._localInputs.update(await self.js.execute( r["id"], r["content"], r["scripts"], r["exchange"] ))
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
g._clearScripts()
if self._ses is None:
gtag=self._originalGTag
else:
gtag=self._ses[gid]
#////////////////////////////////////////////////////////////////// THE MAGIC TODO: move to gtag
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# dispatch jsArgs in gtag childs
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
gtag.main._localInputs.update(jsArgs)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
obj=gtag._getRef(id)
log("BINDUPDATE on",repr(gtag),"----->",repr(obj),"%s(%s %s)"% (method,args,kargs))
proc=getattr(obj,method)
if Capacity(proc).has(render.local):
toRender=obj
elif Capacity(proc).has(render.parent):
toRender=obj._parent if obj._parent else obj
elif Capacity(proc).has(render.none):
toRender=None
else:
toRender=gtag
if toRender: toRender._clearScripts()
if asyncio.iscoroutinefunction( proc ):
rep=await proc(*args,**kargs)
else:
rep=proc(*args,**kargs)
if rep:
if isAsyncGenerator(rep):
async for _ in rep: # could use yielded thing to update all or local ?!
assert _ is None, "wtf (event returns something)?"
if toRender:
await asyncRender(toRender)
elif isinstance(rep, types.GeneratorType):
for _ in rep:
assert _ is None, "wtf (event returns something)?"
if toRender:
await asyncRender(toRender)
else:
raise Exception("wtf (event returns something)?")
if toRender:
toRender._rebuild(clearScripts=False)
return toRender._render()
#////////////////////////////////////////////////////////////////// THE MAGIC
| 32.301587 | 160 | 0.522768 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.