text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'admin'
class Contact:
def __init__(self,
firstname=None,
middlename=None,
lastname=None,
nickname=None,
title=None,
company=None,
address=None,
phone_home=None,
phone_mobile=None,
phone_work=None,
fax=None,
email_first=None,
email_second=None,
email_third=None,
homepage=None,
birth_day_list_item=None,
birth_month_list_item=None,
birth_year=None,
anniversary_day_list_item=None,
anniversary_month_list_item=None,
anniversary_year=None,
second_address=None,
second_phone=None,
notes=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.phone_home = phone_home
self.phone_mobile = phone_mobile
self.phone_work = phone_work
self.fax = fax
self.email_first = email_first
self.email_second = email_second
self.email_third = email_third
self.homepage = homepage
self.birth_day_list_item = birth_day_list_item
self.birth_month_list_item = birth_month_list_item
self.birth_year = birth_year
self.anniversary_day_list_item = anniversary_day_list_item
self.anniversary_month_list_item = anniversary_month_list_item
self.anniversary_year = anniversary_year
self.second_address = second_address
self.second_phone = second_phone
self.notes = notes
| {
"repo_name": "dimchenkoAlexey/python_training",
"path": "model/contact.py",
"copies": "1",
"size": "1888",
"license": "apache-2.0",
"hash": -62076236253906664,
"line_mean": 33.3272727273,
"line_max": 70,
"alpha_frac": 0.5296610169,
"autogenerated": false,
"ratio": 4.1677704194260485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
__author__ = 'admin'
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("groups.php") and len(wd.find_elements_by_name("new")) > 0 ):
# open groups page
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# new group creation
wd.find_element_by_name("new").click()
# fill fields
self.fill_attributes(group)
wd.find_element_by_name("submit").click()
def modify(self, group):
wd = self.app.wd
self.open_groups_page()
# modify group
self.click_edit_button()
# fill fields
self.fill_attributes(group)
wd.find_element_by_name("update").click()
def set_field_value(self, field_name, value):
if value is not None:
wd = self.app.wd
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(value)
def fill_attributes(self, group):
self.set_field_value("group_name", group.name)
self.set_field_value("group_header", group.header)
self.set_field_value("group_footer", group.footer)
def delete_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
wd.find_element_by_name("delete").click()
self.open_groups_page()
def click_edit_button(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='content']/form/input[6]").click()
def delete_all_groups(self):
wd = self.app.wd
checkboxes = wd.find_elements_by_name("selected[]")
for checkbox in checkboxes:
if not checkbox.is_selected():
checkbox.click()
wd.find_element_by_name("delete").click()
self.open_groups_page()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
| {
"repo_name": "dimchenkoAlexey/python_training",
"path": "fixture/group.py",
"copies": "1",
"size": "2151",
"license": "apache-2.0",
"hash": 4080216671805314000,
"line_mean": 31.1044776119,
"line_max": 101,
"alpha_frac": 0.5788005579,
"autogenerated": false,
"ratio": 3.5262295081967214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46050300660967214,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admin'
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from ResumeViewer.models import Job
from tastypie.authentication import SessionAuthentication
from tastypie.authorization import Authorization
from django.contrib.auth.models import User
from tastypie import fields
from ResumeViewer.api_authorization import AuthorizationByResume
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
excludes = ['email', 'password', 'is_active', 'is_staff', 'is_superuser']
class JobResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
def obj_create(self, bundle, **kwargs):
"""
Assign created notes to the current user
"""
return super(JobResource, self).obj_create(bundle, user=bundle.request.user)
class Meta:
authentication = SessionAuthentication()
authorization = AuthorizationByResume()
# authorization = Authorization()
queryset = Job.objects.all()
resource_name = 'job'
allowed_methods = ['post', 'get', 'put']
excludes = ['meta']
always_return_data = True
| {
"repo_name": "fawazn/Resume-Viewer",
"path": "ResumeViewer/api.py",
"copies": "1",
"size": "1230",
"license": "mit",
"hash": 4851212037286071000,
"line_mean": 35.2727272727,
"line_max": 84,
"alpha_frac": 0.6723577236,
"autogenerated": false,
"ratio": 4.315789473684211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.548814719728421,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admiral0'
import os.path as path
from os import walk
from .Common import read_json, mod_file_name
import re
from .Exceptions import JsonNotValid, RepositoryDirectoryDoesNotExist, RepositoryDoesNotHaveMetaJson, ModDoesNotExistInRepo
from .Mod import Mod
class ModRepository:
_metaname = 'meta.json'
def __init__(self, repopath):
if not path.isdir(repopath):
raise RepositoryDirectoryDoesNotExist(repopath)
if not path.isfile(path.join(repopath, self._metaname)):
raise RepositoryDoesNotHaveMetaJson(path.join(repopath, self._metaname))
self.repo = repopath
self.metadata = read_json(path.join(repopath, self._metaname))
self.validate()
def validate(self):
errors = []
if 'authors' not in self.metadata:
errors.append('Does not contain authors property')
else:
if not type(self.metadata['authors']) is list:
errors.append('authors is not a list')
else:
for author in self.metadata['authors']:
if not type(author) is str:
errors.append(str(author) + ' is not a string')
else:
if not re.match(r'^.+?@.+?\..+?$', author):
errors.append(author + ' must be an email')
if len(errors) > 0:
raise JsonNotValid(path.join(self.repo, self._metaname), errors)
def list_mods(self):
mods = {}
for (mod_path, dirs, files) in walk(self.repo):
if mod_file_name in files:
mods[path.basename(mod_path)] = mod_path
return mods
class ModManager(ModRepository):
mods = {}
def __init__(self, repopath):
super().__init__(repopath)
self.mods_lazy = self.list_mods()
def get_mod(self, name):
if name not in self.mods.keys():
if name in self.mods_lazy.keys():
self.mods[name] = Mod(self.mods_lazy[name])
else:
raise ModDoesNotExistInRepo(name)
return self.mods[name]
| {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/ModRepository.py",
"copies": "1",
"size": "2121",
"license": "bsd-2-clause",
"hash": -7735896267788268000,
"line_mean": 34.35,
"line_max": 123,
"alpha_frac": 0.5799151344,
"autogenerated": false,
"ratio": 4.00945179584121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.508936693024121,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admiral0'
import os.path as path
import re
from .Exceptions import JsonNotValid, ModDoesNotExist, ModJsonDoesNotExist, ModVersionDoesNotExistInRepo
from .Common import *
def validate_version(ver):
assert type(ver) is str
if not re.match(r'^[a-zA-Z_0-9\.\-()]+$', ver):
return ['Version ' + ver + ' must match ^[a-zA-Z_0-9\.\-()]+$']
return []
def validate_minecraft(mver, vv):
if not type(mver) is list:
return ['Minecraft version must be an array for version ' + vv]
for mv in mver:
if not re.match(minecraft_version_regex, mv):
return ['Minecraft version ' + mv + ' does not match ^\d+\.\d+(\.\d+)?$ pattern in version ' + vv]
return []
def validate_versions(d, m):
if not type(d) is dict:
return ['Versions is not a dict!']
error = []
for ver in d.keys():
for err in validate_version(ver):
error.append(err)
try:
if not path.isfile(path.join(m.mod_dir, d[ver]['file'])):
error.append('File ' + d[ver]['file'] + ' has not been found in mod folder:' + m.mod_dir)
except KeyError:
error.append('Key \'file\' is missing in version ' + ver)
try:
for err in validate_minecraft(d[ver]['minecraft'], ver):
error.append(err)
except KeyError:
error.append('Key \'minecraft\' is missing in version ' + ver)
if 'type' in d[ver]:
if d[ver]['type'] not in ['universal', 'client', 'server']:
error.append('Type for ver ' + ver + 'must be one of universal, client or server')
return error
class Mod:
_elements = {
'author': {
'type': str,
'required': True,
'validate': lambda val, m: [] if 0 < len(val) < 255 else ['Length of author must be between 0 and 255']
},
'description': {
'type': str,
'required': False,
'validate': lambda val, m: [],
},
'name': {
'type': str,
'required': True,
'validate': lambda val, m: [] if 0 < len(val) < 255 else ['Length of the name must be between 0 and 255']
},
'url': {
'type': str,
'required': True,
'validate': lambda val, m: [] if re.match(url_regex, val) else ['Must be a link']
},
'versions': {
'type': dict,
'required': True,
'validate': lambda val, m: validate_versions(val, m)
}
}
def __init__(self, mod_path):
if not path.isdir(mod_path):
raise ModDoesNotExist(mod_path)
self.mod_dir = mod_path
self.json_path = path.join(mod_path, mod_file_name)
if not path.isfile(self.json_path):
raise ModJsonDoesNotExist(self.json_path)
self.data = read_json(self.json_path)
self.validate()
self.slug = path.basename(mod_path)
def validate(self):
errors = validate(self._elements, self.data, self)
if len(errors) > 0:
raise JsonNotValid(self.json_path, errors)
def get_version(self, version):
if version not in self.data['versions'].keys():
raise ModVersionDoesNotExistInRepo(self.slug, version)
return self.data['versions'][version]
| {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/Mod.py",
"copies": "1",
"size": "3351",
"license": "bsd-2-clause",
"hash": -4919725087683980000,
"line_mean": 32.8484848485,
"line_max": 117,
"alpha_frac": 0.5455088033,
"autogenerated": false,
"ratio": 3.6864686468646863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673068260503208,
"avg_score": 0.011781837932295753,
"num_lines": 99
} |
__author__ = 'admiral0'
from .Exceptions import JsonNotValid as JsonError
import json
mod_file_name = 'mod.json'
minecraft_version_regex = r'^\d+\.\d+(\.\d+)?$'
url_regex = r'^https?:.*$'
def validate(entities, json, obj):
assert type(entities) is dict
assert type(json) is dict
errors = []
for key in entities.keys():
ck = entities[key]
if key in json.keys():
if type(json[key]) is ck['type']:
for err in ck['validate'](json[key], obj):
assert type(err) is str
errors.append(err)
else:
errors.append('Key ' + key + ' should be of type ' + str(ck['type']) + ' instead of '
+ str(type(json[key])))
else:
if ck['required']:
errors.append('Key ' + key + ' is required.')
return errors
def read_json(file):
try:
with open(file) as meta_data:
data = json.load(meta_data)
return data
except ValueError:
raise JsonError(file, ['Cannot parse. Is it a valid JSON file?']) | {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/Common.py",
"copies": "1",
"size": "1115",
"license": "bsd-2-clause",
"hash": 8982802856973529000,
"line_mean": 28.3684210526,
"line_max": 101,
"alpha_frac": 0.5264573991,
"autogenerated": false,
"ratio": 3.8054607508532423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4831918149953242,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admiral0'
from . import *
from .Exceptions import JsonNotValid
import argparse
import os.path as path
def is_mod_repo(x):
if path.isdir(x):
return x
raise argparse.ArgumentTypeError(x + ' is not a Directory')
def validate(args):
try:
repo = ModRepository(args.mod_repo)
for m in repo.list_mods().values():
try:
Mod(m)
except JsonNotValid as e:
print(str(e))
except JsonNotValid as e:
print(str(e))
def list_mods(args):
repo = ModManager(args.mod_repo)
for mod in repo.mods.values():
print(mod.slug + ' ')
print(','.join(mod.data['versions'].keys()))
def pack(args):
try:
ModRepository(args.mod_repo)
except JsonNotValid as e:
print(str(e))
actions = {
'mods': validate,
'ls-mods': list_mods,
'pack': pack,
'list-pack': pack
}
parser = argparse.ArgumentParser(description='TechnicAntani ModRepo Tools')
parser.add_argument('action', metavar='action', type=str, default='mods', choices=actions.keys(),
help='Action to perform. One of ' + ','.join(actions.keys()))
parser.add_argument('mod_repo', metavar='path', type=is_mod_repo, help='The path to be inspected',
default='.') # , nargs='+')
parser.add_argument('-m', dest='ModPath', type=is_mod_repo, help='Mod Repo parameter. Used in pack actions')
def init():
args = parser.parse_args()
actions[args.action](args) | {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/Util.py",
"copies": "1",
"size": "1509",
"license": "bsd-2-clause",
"hash": -669733687798787700,
"line_mean": 24.593220339,
"line_max": 108,
"alpha_frac": 0.6037110669,
"autogenerated": false,
"ratio": 3.5011600928074245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46048711597074243,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admiral0'
from os import path
import re
from .Exceptions import RepositoryDirectoryDoesNotExist
from .Exceptions import JsonNotValid
from .Exceptions import RepositoryDoesNotHaveMetaJson
from .Exceptions import ModDoesNotExistInRepo, ModVersionDoesNotExistInRepo
from .Exceptions import BranchDoesNotExist, TagDoesNotExist
from .ModRepository import read_json
from .ModRepository import ModManager
from .Common import minecraft_version_regex, url_regex, validate
import pygit2
def validate_mods(x, m):
errors = []
for key in x:
if type(key) is not str:
errors.append('Key ' + str(key) + ' must be a string in "mods"')
if type(x[key]) is not str:
errors.append('Value' + str(x[key]) + ' must be a string in "mods"')
class PackRepository:
_modpack_file = 'modpack.json'
_entities = {
'description': {
'required': True,
'type': str,
'validate': lambda x, m: []
},
'forgever': {
'required': True,
'type': str,
'validate':
lambda x, m: [] if re.match(r'^\d+\.\d+\.\d+\.\d+$', x) else [x + ' is not a valid forge version']
},
'mcversion': {
'required': True,
'type': str,
'validate':
lambda x, m: [] if re.match(minecraft_version_regex, x) else [x + ' is not a valid minecraft version']
},
'url': {
'required': True,
'type': str,
'validate':
lambda x, m: [] if re.match(url_regex, x) else [x + ' is not a valid URL']
},
'version': {
'required': True,
'type': str,
'validate':
lambda x, m: [] if re.match(r'^[0-9\.\-_a-z]+$', x) else [x + ' is not a valid version']
},
'mods': {
'required': True,
'type': dict,
'validate': validate_mods
}
}
def __init__(self, repopath, modrepo=None):
if not path.isdir(repopath):
raise RepositoryDirectoryDoesNotExist(repopath)
self.json = path.join(repopath, self._modpack_file)
if not path.isfile(self.json):
raise RepositoryDoesNotHaveMetaJson(self.json)
# TODO Configless and default assets
self.repo_dir = repopath
self.data = read_json(self.json)
self.validate()
self.modrepo = modrepo
if modrepo is not None:
self.validate_semantic()
def validate(self):
assert type(self.data) is dict
err = validate(self._entities, self.data, self)
if len(err) > 0:
raise JsonNotValid(self.json, err)
def validate_semantic(self):
assert type(self.modrepo) is ModManager
errors = []
for name, version in self.data['mods'].items():
try:
m = self.modrepo.get_mod(name)
m.get_version(version)
except ModDoesNotExistInRepo:
errors.append('Mod ' + name + ' does not exist in repo')
except ModVersionDoesNotExistInRepo:
errors.append('Version ' + version + ' of mod ' + name + ' is not in the repo.')
if len(errors) > 0:
raise JsonNotValid(self.json, errors)
| {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/PackRepository.py",
"copies": "1",
"size": "3328",
"license": "bsd-2-clause",
"hash": -3360804042245640700,
"line_mean": 33.6666666667,
"line_max": 118,
"alpha_frac": 0.5522836538,
"autogenerated": false,
"ratio": 3.9199057714958774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49721894252958776,
"avg_score": null,
"num_lines": null
} |
__author__ = 'admiral0'
class RepositoryDirectoryDoesNotExist(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return 'The mod repository does not exist. Missing directory:' + self.path
class RepositoryDoesNotHaveMetaJson(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return self.path + ' doesn\'t exist.'
class JsonNotValid(Exception):
def __init__(self, file, errors):
assert type(errors) is list
self.errors = errors
self.file = file
def __str__(self):
err = 'Cannot parse ' + self.file + ". Reasons:\n"
for error in self.errors:
err += "\t" + error + "\n"
return err
class ModDoesNotExist(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return 'Mod directory doesn\'t exist:' + self.path
class ModJsonDoesNotExist(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return 'Mod.json doesn\'t exist:' + self.path
class ModDoesNotExistInRepo(Exception):
def __init__(self, mod):
self.name = mod
def __str__(self):
return 'Mod ' + self.name + ' has not been found in mod repository.'
class ModVersionDoesNotExistInRepo(Exception):
def __init__(self, mod, ver):
self.name = mod
self.ver = ver
def __str__(self):
return 'Version ' + self.ver + ' of mod ' + self.name + ' has not been found in mod repository.'
class BranchDoesNotExist(Exception):
def __init__(self, br):
self.name = br
def __str__(self):
return 'Branch ' + self.name + ' has not been found in pack repository.'
class TagDoesNotExist(Exception):
def __init__(self, br):
self.name = br
def __str__(self):
return 'Branch ' + self.name + ' has not been found in pack repository.'
class RefHasErrors(Exception):
def __init__(self, ref, exceptions):
self.name = ref
self.exc = exceptions
def __str__(self):
str_exc = self.name + ' has exceptions:'
for e in self.exc:
str_exc += str_exc(e)
return str_exc | {
"repo_name": "admiral0/AntaniRepos",
"path": "antanirepos/Exceptions.py",
"copies": "1",
"size": "2222",
"license": "bsd-2-clause",
"hash": 168683478481213950,
"line_mean": 23.4285714286,
"line_max": 104,
"alpha_frac": 0.5814581458,
"autogenerated": false,
"ratio": 3.8442906574394464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49257488032394464,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adnan Siddiqi<kadnanATgmail.com>'
import os
import json
def get_json(jsondata):
json_object = None
try:
json_object = json.loads(jsondata)
except ValueError, e:
return None
return json_object
def generate_manifest_text(json_dict):
content = ''
content = '{\n'
content += '"manifest_version": 2,\n'
content += '"name" : "'+json_dict['name']+'",\n'
content += '"description" : "'+json_dict['description']+'",\n'
content += '"version" : "1.0",\n'
content += '"content_scripts" : [{\n'
content += '\t"matches" : ["'+json_dict['contentscript_matches']+'"],\n'
if 'contentscript_file' in json_dict:
content += '\t"js" : ["'+json_dict['contentscript_file']+'"],\n'
content += '\t"run_at": "document_end"\n'
content += '}],\n'
if 'backgroundscript_file' in json_dict:
content += '"background":{\n'
content += '\t"scripts": ["'+json_dict['backgroundscript_file']+'"]\n'
content += '},\n'
content += '"browser_action": {\n'
content += '\t"default_title": "'+json_dict['name']+'"\n'
#content += '\t"default_icon": "icon16.png"\n'
content += '\t}\n'
content += '}'
return content
def read_app_json():
jsoncontent = ''
file = ''
try:
file = open('app.json','r')
for line in file:
jsoncontent += line.replace('\n','')
except Exception, ex:
print(str(ex))
return jsoncontent
#check if app.json exist
def process():
jsontext = read_app_json()
name = ''
contentscript_file = ''
backgroundscript_file = ''
if jsontext != '':
jsondict = get_json(jsontext)
if jsondict is not None:
if not 'name' in jsondict:
print('missing "name" key in app.json')
elif not 'description' in jsondict:
print('missing "description" key in app.json')
else:
name = jsondict['name']
name = name.replace(' ','_')
if 'contentscript_file' in jsondict:
contentscript_file = jsondict['contentscript_file']
if 'backgroundscript_file' in jsondict:
backgroundscript_file = jsondict['backgroundscript_file']
manifest = generate_manifest_text(jsondict)
if manifest != '':
try:
if not os.path.exists(name):
os.makedirs(name)
os.chdir(name)
manifest_file = open('manifest.json','w')
manifest_file.write(manifest)
manifest_file.flush()
manifest_file.close()
if contentscript_file != '':
open(contentscript_file, 'w').close()
if contentscript_file != '':
open(backgroundscript_file, 'w').close()
print('Process finished successfully. You can now visit directory "'+name+'" and check required extension files.')
else:
print('Directory already exist')
except Exception, e:
print(str(e))
if __name__ == "__main__":
process()
| {
"repo_name": "kadnan/extGen",
"path": "extgen.py",
"copies": "1",
"size": "3399",
"license": "mit",
"hash": 8086151418734336000,
"line_mean": 32.6534653465,
"line_max": 142,
"alpha_frac": 0.496616652,
"autogenerated": false,
"ratio": 4.175675675675675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5172292327675675,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ad'
from abc import ABCMeta
from collections import OrderedDict
import importhelpers
class BaseField(object):
__metaclass__ = ABCMeta
def __init__(self, required=False, field_name=None):
super(BaseField, self).__init__()
self.required = required
self.field_name = field_name
def validate(self, value):
"""
Validate `value`
:raise ValueError: in case of validation errors
"""
if value is None and self.required:
raise ValueError("missed value for required field")
def from_python(self, value):
"""
Do serialization steps on `value`
"""
return value
def to_python(self, value):
"""
Do de-from_python steps from converting object from JSON representation to python representation
"""
self.validate(value)
return value
class String(BaseField):
"""
Class represent JSON string type
>>> some_field = String(max_len=1000)
>>> some_field.to_python("Some thing") == "Some thing"
"""
def __init__(self, max_len=None, **kwargs):
"""
Constructor
:param max_len: Restrict maximum length of the field
:type max_len: int
:param force_conversion: Set to true in order to ignore real type of a value and convert it into unicode.
We have to have this parameter b/c YAML format doesn't not have schema and string '17.30' is always
translated to float 17.30
:type force_conversion: bool
"""
super(String, self).__init__(**kwargs)
self._max_len = max_len
def validate(self, value):
super(String, self).validate(value)
if value is None:
return
if not isinstance(value, basestring):
raise ValueError("{!r} expected to be string but got {}".format(value, type(value).__name__))
if self._max_len is not None:
value_len = len(value)
if value_len > self._max_len:
raise ValueError("length of field is exceeds maximum allowed: {} but expect no more then {}".format(
value_len, self._max_len))
def to_python(self, value):
"""
Convert value to python representation
:param value: a string to process
:type value: basestring
:return: string
:rtype: basestring
"""
self.validate(value)
return value
class Bool(BaseField):
"""
Class represent JSON bool type
>>> some_field = Bool()
>>> some_field.to_python(True) == True
"""
def __init__(self, **kwargs):
super(Bool, self).__init__(**kwargs)
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: bool
:return: None
"""
super(Bool, self).validate(value)
if value is None:
return
if not isinstance(value, bool):
raise ValueError("{!r} expected to be bool".format(value))
def to_python(self, value):
"""
Convert value to python representation
:param value: a string to process
:type value: bool
:return: None
"""
self.validate(value)
return value
class Int(BaseField):
"""
Class represent JSON integer type
>>> some_field = Int()
>>> some_field.to_python(1) == 1
"""
def __init__(self, **kwargs):
super(Int, self).__init__(**kwargs)
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: int or long
:return: None
"""
super(Int, self).validate(value)
if value is None:
return
if not isinstance(value, (int, long)):
raise ValueError("{!r} expected to be integer".format(value))
def to_python(self, value):
"""
Convert value to python representation
:param value: a string to process
:type value: int or long
:return: int or long
:rtype: int or long
"""
self.validate(value)
return value
class Float(BaseField):
"""
Class represent JSON integer type
>>> some_field = Float()
>>> some_field.to_python(1.0) == 1.0
"""
def __init__(self, **kwargs):
super(Float, self).__init__(**kwargs)
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: float
:return: None
"""
super(Float, self).validate(value)
if value is None:
return
if not isinstance(value, float):
raise ValueError("{!r} expected to be integer but got {}".format(value, type(value).__name__))
def to_python(self, value):
"""
Convert value to python representation
:param value: a string to process
:type value: float
:return: float
:rtype: float
"""
self.validate(value)
return value
class List(BaseField):
"""
Class represent JSON list type
>>> some_field = List(String(max_len=100))
>>> some_field.to_python(["Some string"]) == ["Some string"]
>>> some_field.to_python([2])
Traceback (most recent call last):
...
ValueError: '2' expected to be string
"""
def __init__(self, element_type, min_len=None, max_len=None, **kwargs):
"""
Constructor for List field type
:param element_type: list element type
:type element_type: instance of BaseField
"""
super(List, self).__init__(**kwargs)
if not isinstance(element_type, BaseField):
raise ValueError(
"Invalid type of 'element_type': expected to be instance of subclass of BaseField but it is {!r}".format(
element_type))
self._min_len = min_len
self._max_len = max_len
self._element_type = element_type
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: list
:return: None
"""
super(List, self).validate(value)
if value is None:
return
if not isinstance(value, list):
raise ValueError("{!r} expected to be list".format(value))
value_len = len(value)
if self._max_len is not None and value_len > self._max_len:
raise ValueError("length of field is exceeds maximum allowed: {} but expect no more then {}".format(
value_len, self._max_len))
if self._min_len is not None and value_len < self._min_len:
raise ValueError("length of field is less then minimum allowed: {} but expect no less then {}".format(
value_len, self._max_len))
def to_python(self, value):
"""
Convert value to python representation
:param value: a list to process
:type value: list
:return: list
:rtype: list
"""
if value is not None:
value = [self._element_type.to_python(element) for element in value]
self.validate(value)
return value
class Map(BaseField):
"""
Class represent JSON object type
>>> some_field = Map(String, List(String))
>>> some_field.to_python({"f1": ["val"]}) == {"f1": ["val"]}
>>> some_field.to_python({2: ["val"]})
Traceback (most recent call last):
...
ValueError: '2' expected to be string
"""
def __init__(self, key_type, value_type, **kwargs):
"""
Constructor for List field type
:param element_type: list element type
:type element_type: instance of BaseField
"""
super(Map, self).__init__(**kwargs)
if not isinstance(key_type, BaseField):
raise ValueError(
"Invalid type of 'key_type': expected to be instance of subclass of BaseField but it is {!r}".format(
key_type))
if not isinstance(value_type, BaseField):
raise ValueError(
"Invalid type of 'value_type': expected to be instance of subclass of BaseField but it is {!r}".format(
value_type))
self._value_type = value_type
self._key_type = key_type
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: dict
:return: None
"""
super(Map, self).validate(value)
if value is None:
return
if not isinstance(value, dict):
raise ValueError("{!r} expected to be dict".format(value))
for key, val in value.iteritems():
self._key_type.validate(key)
self._value_type.validate(val)
def to_python(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: list or dict
:return: None
"""
if value is not None:
# At this point we could get list of dict or dict
if isinstance(value, list):
_value = OrderedDict()
for item in value:
if not isinstance(item, (dict, OrderedDict)):
raise ValueError("{!r} expected to be dict or list of dict".format(value))
_value.update(
OrderedDict([
(self._key_type.to_python(key), self._value_type.to_python(val))
for key, val in item.items()])
)
value = _value
else:
_value = OrderedDict()
for key, val in value.iteritems():
_value[self._key_type.to_python(key)] = self._value_type.to_python(val)
value = _value
self.validate(value)
return value
class Reference(BaseField):
"""
Class represent reference to another model
>>> from model import Model
>>> class RamlDocumentation(Model):
>>> content = String()
>>> title = String()
>>> some_field = List(Reference(RamlDocumentation))
>>> doc = RamlDocumentation(content="Test content", title="Title")
>>> some_field.to_python([doc]) == [doc]
>>> some_field.to_python([2])
Traceback (most recent call last):
...
ValueError: '2' expected to be RamlDocumentation
"""
def __init__(self, ref_class, **kwargs):
"""
Constructor for Reference
:param ref_class: model class to reference to
:type ref_class: class of pyraml.model.Model
:param kwargs: additional attributes for BaseField constructor
:type kwargs: dict
"""
super(Reference, self).__init__(**kwargs)
self.ref_class = ref_class
def _lazy_import(self):
"""
If self.ref_class is string like "pyraml.entities.RamlTrait" just import the class
and assign it to self.ref_class
:return: None
"""
if isinstance(self.ref_class, basestring):
self.ref_class = importhelpers.dotted(self.ref_class)
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: pyraml.model.Model
:return: None
"""
self._lazy_import()
super(Reference, self).validate(value)
if value is None:
return
if not isinstance(value, self.ref_class):
raise ValueError("{!r} expected to be {}".format(value, self.ref_class))
def to_python(self, value):
"""
Convert value to python representation
:param value: a model to process
:type value: pyraml.model.Model or dict
:return: int or long
:rtype: int or long
"""
self._lazy_import()
if isinstance(value, self.ref_class):
# Value is already instance of ref_class, don't need to convert it
pass
elif isinstance(value, dict):
# Value is JSON object, convert it to `ref_class`
value = self.ref_class.from_json(value)
elif value is None:
# Value empty, just instantiate empty `ref_class`
value = self.ref_class()
# elif isinstance(value, list):
# # Value maybe is list of `ref_class`
# value = []
else:
raise ValueError("{!r} expected to be dict".format(value))
self.validate(value)
return value
class Or(BaseField):
"""
Class represent reference to another model
>>> some_field = Or(String(),Float())
>>> some_field.to_python("1") == "1"
>>> some_field.to_python(2.1) == 2.1
>>> some_field.to_python(False)
Traceback (most recent call last):
...
ValueError: u'False' expected to be one of: String, Float
"""
def __init__(self, *args, **kwargs):
"""
Constructor for Reference
:param args: list of fields
:type args: list of BaseField
:param kwargs: additional attributes for BaseField constructor
:type kwargs: dict
"""
super(Or, self).__init__(**kwargs)
self.variants = []
for field in args:
if not isinstance(field, BaseField):
raise ValueError("Invalid argument supplied {!r}: expected list of BaseField instances".format(field))
self.variants.append(field)
if len(self.variants) < 2:
raise ValueError("Required at least 2 variants but got only {}".format(len(self.variants)))
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:return: None
:raise ValueError: in case of validation errors
"""
super(Or, self).validate(value)
if value is None:
return
for field in self.variants:
try:
field.validate(value)
break
except ValueError as e:
pass
else:
# No one of variants doesn't accept `value`
raise ValueError("{!r} expected to be one of: {}".format(value,
",".join(
[type(f).__name__ for f in self.variants])))
def to_python(self, value):
"""
Convert value to python representation
:param value: a field to process
:type value: any
:return: first of accepted variant
"""
for field in self.variants:
try:
res = field.to_python(value)
field.validate(res)
return res
except ValueError:
pass
else:
# Raise ValueError
self.validate(value) | {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/pyraml/fields.py",
"copies": "1",
"size": "15212",
"license": "mit",
"hash": 4171094303208123400,
"line_mean": 26.3615107914,
"line_max": 121,
"alpha_frac": 0.5474625296,
"autogenerated": false,
"ratio": 4.482027106658809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00039912345089200416,
"num_lines": 556
} |
__author__ = 'ad'
from collections import OrderedDict
from pyapi.libraries.pyraml_parser_master.pyraml.model import Model
from pyapi.libraries.pyraml_parser_master.pyraml.fields import List, String, Reference, Map, Or, Float
def test_model_structure_inheritance():
class Thing(Model):
inner = List(String())
class SubThing(Thing):
external = List(String())
thing = SubThing()
assert thing._structure.keys() == ['external', 'inner'], thing._structure.keys()
assert all(isinstance(a, List) for a in thing._structure.values()), thing._structure.values()
def test_model_standard_constructor_without_values():
class Thing(Model):
inner = String()
thing = Thing()
assert thing.inner is None, thing.inner
def test_model_constructor_with_keyword_arguments():
class Thing(Model):
inner = String()
thing = Thing(inner="some string")
assert thing.inner == "some string", thing.inner
def test_model_with_reference():
class Thing(Model):
title = String()
class TopThing(Model):
things = List(Reference(Thing))
thing = Thing(title="some string")
top_thing = TopThing(things=[thing])
assert len(top_thing.things) == 1, top_thing.things
assert top_thing.things[0] is thing, top_thing.things
def test_model_with_map():
class Thing(Model):
title = String()
class MapThing(Model):
map = Map(String(), Reference(Thing))
thing = Thing(title="some string")
map_thing = MapThing(map={"t1": thing})
assert isinstance(map_thing.map, OrderedDict), map_thing.map
assert len(map_thing.map) == 1, map_thing.map
assert map_thing.map["t1"] is thing, map_thing.map
def test_model_with_reference_and_aliased_field():
class Thing(Model):
id_ = String(field_name='id')
class RefThing(Model):
ref = Reference(Thing)
res = RefThing.ref.to_python({"id": "some field"})
assert isinstance(res, Thing), res
assert res.id_ == "some field", res
def test_model_with_or_successfully():
class Thing(Model):
a = Or(String(), Float())
res = Thing.a.to_python("a")
assert res == "a", res
res = Thing.a.to_python(1.1)
assert res == 1.1, res | {
"repo_name": "mpetyx/pyapi",
"path": "tests/unit/raml/tests/test_model.py",
"copies": "1",
"size": "2243",
"license": "mit",
"hash": -1278097456890495000,
"line_mean": 25.4,
"line_max": 102,
"alpha_frac": 0.6495764601,
"autogenerated": false,
"ratio": 3.5101721439749607,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9655622776790218,
"avg_score": 0.0008251654569484956,
"num_lines": 85
} |
__author__ = 'ad'
from fields import BaseField, String, List
class ValidationError(StandardError):
def __init__(self, validation_errors):
self.errors = validation_errors
def __repr__(self):
return u"ValidationError: " + repr(self.errors)
class BaseModel(object):
pass
class Schema(type):
def __new__(mcs, name, bases, attrs):
# Initialize special `_structure` class attribute which contains info about all model fields'
_structure = {_name: _type for _name, _type in attrs.items() if isinstance(_type, BaseField)}
# Merge structures of parent classes into the structure of new model class
for base in bases:
parent = base.__mro__[0] # Multi inheritance is evil )
if issubclass(parent, BaseModel) and not parent is BaseModel:
for field_name, field_type in parent._structure.items():
if field_name not in _structure:
_structure[field_name] = field_type
# Propagate field name from structure to the field, so we can access RAML field name
for field_name, field_obj in _structure.iteritems():
if field_obj.field_name is None:
field_obj.field_name = field_name
attrs['_structure'] = _structure
return super(Schema, mcs).__new__(mcs, name, bases, attrs)
class Model(BaseModel):
"""
Base class for models
>>> class Thing(Model):
... field1 = String(max_len=100)
... field2 = List(String(max_len=200), required=False)
>>> t = Thing(field2=[u"field2 value"])
>>> t.validate()
Traceback (most recent call last):
...
ValidationError: { 'field1': 'missed value for required field' }
>>> t.field1 = u"field1 value"
>>> t.validate()
>>> t
{ 'field1': 'field1 value', 'field2': [ 'field2 value' ] }
"""
__metaclass__ = Schema
def __init__(self, **kwargs):
super(BaseModel, self).__init__()
# Propagate an object attributes from field names
for field_name, field_type in self.__class__._structure.items():
if field_name in kwargs:
setattr(self, field_name, field_type.to_python(kwargs[field_name]))
else:
setattr(self, field_name, None)
def __repr__(self):
rv = {}
for field_name in self.__class__._structure.keys():
rv[field_name] = getattr(self, field_name, None)
return rv.__repr__()
def validate(self):
errors = {}
for field_name, field_type in self.__class__._structure.items():
# Validate and process a field of JSON object
try:
field_type.validate(getattr(self, field_name))
except ValueError as e:
errors[field_name] = unicode(e)
if errors:
raise ValidationError(errors)
@classmethod
def from_json(cls, json_object):
"""
Initialize a model from JSON object
:param json_object: JSON object to initialize a model
:type json_object: dict
:return: instance of BaseModel
:rtype: instance of BaseModel
"""
rv = cls()
errors = {}
for model_field_name, field_type in cls._structure.items():
# Validate and process a field of JSON object
try:
value = field_type.to_python(json_object.get(model_field_name, None))
setattr(rv, model_field_name, value)
except ValueError as e:
errors[model_field_name] = unicode(e)
# Look for aliased attributes
for field_name, field_value in json_object.items():
if not field_name in cls._structure:
for model_field_name, field_type in cls._structure.items():
if field_type.field_name == field_name:
try:
value = field_type.to_python(field_value)
setattr(rv, model_field_name, value)
except ValueError as e:
errors[model_field_name] = unicode(e)
if errors:
raise ValidationError(errors)
return rv
| {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/pyraml/model.py",
"copies": "1",
"size": "4247",
"license": "mit",
"hash": -5772385761777201000,
"line_mean": 32.7063492063,
"line_max": 101,
"alpha_frac": 0.5669884624,
"autogenerated": false,
"ratio": 4.255511022044089,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5322499484444089,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ad'
from model import Model
from fields import String, Reference, Map, List, Bool, Int, Float, Or
class RamlDocumentation(Model):
content = String()
title = String()
class RamlSchema(Model):
name = String()
type = String()
schema = String()
example = String()
class RamlQueryParameter(Model):
name = String()
description = String()
example = Or(String(), Int(), Float())
displayName = String()
type = String()
enum = List(Or(String(), Float(), Int()))
pattern = String()
minLength = Int()
maxLength = Int()
repeat = Bool()
required = Bool()
default = Or(String(), Int(), Float())
minimum = Or(Int(), Float())
maximum = Or(Int(), Float())
class RamlHeader(Model):
type = String()
required = Bool()
class RamlBody(Model):
schema = String()
example = String()
notNull = Bool()
formParameters = Map(String(), Or(Reference(RamlQueryParameter), List(Reference(RamlQueryParameter))))
headers = Map(String(), Reference(RamlHeader))
body = Map(String(), Reference("pyraml.entities.RamlBody"))
is_ = List(String(), field_name="is")
class RamlResponse(Model):
schema = String()
example = String()
notNull = Bool()
description = String()
headers = Map(String(), Reference(RamlHeader))
body = Reference("pyraml.entities.RamlBody")
class RamlTrait(Model):
"""
traits:
- secured:
usage: Apply this to any method that needs to be secured
description: Some requests require authentication.
queryParameters:
access_token:
description: Access Token
type: string
example: ACCESS_TOKEN
required: true
"""
name = String()
usage = String()
description = String()
displayName = String()
responses = Map(Int(), Reference(RamlResponse))
method = String()
queryParameters = Map(String(), Reference(RamlQueryParameter))
body = Reference(RamlBody)
# Reference to another RamlTrait
is_ = List(String(), field_name="is")
class RamlResourceType(Model):
methods = Map(String(), Reference(RamlTrait))
type = String()
is_ = List(String(), field_name="is")
class RamlMethod(Model):
"""
Example:
get:
description: ...
headers:
....
queryParameters:
...
body:
text/xml: !!null
application/json:
schema: |
{
....
}
responses:
200:
....
<<:
404:
description: not found
"""
notNull = Bool()
description = String()
body = Map(String(), Reference(RamlBody))
responses = Map(Int(), Reference(RamlBody))
queryParameters = Map(String(), Reference(RamlQueryParameter))
class RamlResource(Model):
displayName = String()
description = String()
uri = String()
is_ = Reference(RamlTrait, field_name="is")
type = Reference(RamlResourceType)
parentResource = Reference("pyraml.entities.RamlResource")
methods = Map(String(), Reference(RamlBody))
resources = Map(String(), Reference("pyraml.entities.RamlResource"))
class RamlRoot(Model):
raml_version = String(required=True)
title = String()
version = String()
baseUri = String()
protocols = List(String())
mediaType = String()
documentation = List(Reference(RamlDocumentation))
traits = Map(String(), Reference(RamlTrait))
resources = Map(String(), Reference(RamlResource))
resourceTypes = Map(String(), Reference(RamlResourceType)) | {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/pyraml/entities.py",
"copies": "1",
"size": "3805",
"license": "mit",
"hash": -8072963671755983000,
"line_mean": 25.4305555556,
"line_max": 106,
"alpha_frac": 0.5802890933,
"autogenerated": false,
"ratio": 4.246651785714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00017690851840686028,
"num_lines": 144
} |
__author__ = 'ad'
import contextlib
import urllib2
import mimetypes
import os.path
import urlparse
from collections import OrderedDict
import yaml
from raml_elements import ParserRamlInclude
from fields import String, Reference
from entities import RamlRoot, RamlResource, RamlMethod, RamlBody, RamlResourceType, RamlTrait
from constants import RAML_SUPPORTED_FORMAT_VERSION
__all__ = ["RamlException", "RamlNotFoundException", "RamlParseException",
"ParseContext", "load", "parse"]
HTTP_METHODS = ("get", "post", "put", "delete", "head")
class RamlException(StandardError):
pass
class RamlNotFoundException(RamlException):
pass
class RamlParseException(RamlException):
pass
class ParseContext(object):
def __init__(self, data, relative_path):
self.data = data
self.relative_path = relative_path
def get(self, property_name):
"""
Extract property with name `property_name` from context
:param property_name: property name to extract
:type property_name: str
:return: object
:rtype: object or None or dict
"""
# Handle special case with null object
if self.data is None:
return None
property_value = self.data.get(property_name)
if isinstance(property_value, ParserRamlInclude):
_property_value, file_type = self._load_include(property_value.file_name)
if _is_mime_type_raml(file_type):
relative_path = _calculate_new_relative_path(self.relative_path, property_value.file_name)
property_value = ParseContext(yaml.load(_property_value), relative_path)
else:
property_value = _property_value
return property_value
def __iter__(self):
return iter(self.data)
def get_string_property(self, property_name, required=False):
property_value = self.get_property_with_schema(property_name, String(required=required))
return property_value
def get_property_with_schema(self, property_name, property_schema):
property_value = self.get(property_name)
return property_schema.to_python(property_value)
def _load_include(self, file_name):
"""
Load RAML include from file_name.
:param file_name: name of file to include
:type file_name: str
:return: 2 elements tuple: file content and file type
:rtype: str,str
"""
if not _is_network_resource(self.relative_path):
full_path = os.path.join(self.relative_path, file_name)
return _load_local_file(full_path)
else:
url = urlparse.urljoin(self.relative_path, file_name)
return _load_network_resource(url)
def load(uri):
"""
Load and parse RAML file
:param uri: URL which points to a RAML resource or path to the RAML resource on local file system
:type uri: str
:return: RamlRoot object
:rtype: pyraml.entities.RamlRoot
"""
if _is_network_resource(uri):
relative_path = _build_network_relative_path(uri)
c, _ = _load_network_resource(uri)
else:
relative_path = os.path.dirname(uri)
c, _ = _load_local_file(uri)
return parse(c, relative_path)
def parse(c, relative_path):
"""
Parse RAML file
:param c: file content
:type c: str
:return:
"""
# Read RAML header
first_line, c = c.split('\n', 1)
raml_version = _validate_raml_header(first_line)
context = ParseContext(yaml.load(c), relative_path)
root = RamlRoot(raml_version=raml_version)
root.title = context.get_string_property('title', True)
root.baseUri = context.get_string_property('baseUri')
root.version = context.get('version')
root.mediaType = context.get_string_property('mediaType')
root.documentation = context.get_property_with_schema('documentation', RamlRoot.documentation)
root.traits = parse_traits(context, RamlRoot.traits.field_name, root.mediaType)
root.resourceTypes = parse_resource_type(context)
resources = OrderedDict()
for property_name in context.__iter__():
if property_name.startswith("/"):
resources[property_name] = parse_resource(context, property_name, root, root.mediaType)
if resources > 0:
root.resources = resources
return root
def parse_resource(c, property_name, parent_object, global_media_type):
"""
Parse and extract resource with name
:param c:
:type c: ParseContext
:param parent_object: RamlRoot object or RamlResource object
:type parent_object: RamlRoot or RamlResource
:param property_name: resource name to extract
:type property_name: str
:return: RamlResource or None
:rtype: RamlResource
"""
property_value = c.get(property_name)
if not property_value:
return None
resource = RamlResource(uri=property_name)
new_context = ParseContext(property_value, c.relative_path)
resource.description = new_context.get_string_property("description")
resource.displayName = new_context.get_string_property("displayName")
if isinstance(parent_object, RamlResource):
resource.parentResource = parent_object
# Parse methods
methods = OrderedDict()
for _http_method in HTTP_METHODS:
_method = new_context.get(_http_method)
if _method:
methods[_http_method] = parse_method(ParseContext(_method, c.relative_path), global_media_type)
elif _http_method in new_context.data:
# workaround: if _http_method is already in new_context.data than
# it's marked as !!null
methods[_http_method] = RamlMethod(notNull=True)
if len(methods):
resource.methods = methods
# Parse resources
resources = OrderedDict()
for property_name in new_context.__iter__():
if property_name.startswith("/"):
resources[property_name] = parse_resource(new_context, property_name, resource, global_media_type)
if resources > 0:
resource.resources = resources
return resource
def parse_resource_type(c):
"""
Parse and extract resourceType
:param c: ParseContext object
:type c: ParseContext
:return: RamlResource or None
:rtype: RamlResource
"""
json_resource_types = c.get('resourceTypes')
if not json_resource_types:
return None
# We got list of dict from c.get('resourceTypes') so we need to convert it to dict
resource_types_context = ParseContext(json_resource_types[0], c.relative_path)
resource_types = {}
for rtype_name in resource_types_context:
new_c = ParseContext(resource_types_context.get(rtype_name), resource_types_context.relative_path)
rtype_obj = RamlResourceType()
rtype_obj.type = new_c.get_string_property("type")
rtype_obj.is_ = new_c.get_property_with_schema("is", RamlResourceType.is_)
# Parse methods
methods = OrderedDict()
for _http_method in HTTP_METHODS:
_method = new_c.get(_http_method)
if _method:
_method = ParseContext(_method, new_c.relative_path).get_property_with_schema('traits',
Reference(RamlTrait))
methods[_http_method] = _method
elif _http_method in new_c.data:
# workaround: if _http_method is already in new_context.data than
# it's marked as !!null
_method = RamlMethod(notNull=True)
methods[_http_method] = _method
if len(methods):
rtype_obj.methods = methods
resource_types[rtype_name] = rtype_obj
return resource_types
def parse_method(c, global_media_type):
"""
Parse RAML method
:param c: ParseContext object which contains RamlMethod
:type c: ParseContext
:param parent_object: RamlRoot, RamlResource or RamlResourceType object
:type parent_object: RamlRoot or RamlResource or RamlResourceType
:return: RamlMethod or None
:rtype: RamlMethod
"""
method = RamlMethod()
method.description = c.get_string_property("description")
method.body = parse_inline_body(c.get("body"), c.relative_path, global_media_type)
parsed_responses = parse_inline_body(c.get("responses"), c.relative_path, global_media_type)
if parsed_responses:
new_parsed_responses = OrderedDict()
for resp_code, parsed_data in parsed_responses.iteritems():
if resp_code == "<<":
# Check for default code (equivalent of wildcard "*")
new_parsed_responses.setdefault(parsed_data)
else:
# Otherwise response code should be numeric HTTP response code
try:
resp_code = int(resp_code)
except ValueError:
raise RamlParseException(
"Expected numeric HTTP response code in responses but got {!r}".format(resp_code))
new_parsed_responses[resp_code] = parsed_data
method.responses = new_parsed_responses
method.queryParameters = c.get_property_with_schema("queryParameters", RamlMethod.queryParameters)
return method
def parse_traits(c, property_name, global_media_type):
"""
Parse and extract RAML trait from context field with name `property_name`
:param c: parsing context
:type c: ParseContext
:param property_name: resource name to extract from context
:type property_name: str
:return: dict of (str,RamlTrait) or None
:rtype: dict of (str,RamlTrait)
"""
property_value = c.get(property_name)
if not property_value:
return None
traits = {}
# We got list of dict from c.get(property_name) so we need to iterate over it
for trait_raw_value in property_value:
traits_context = ParseContext(trait_raw_value, c.relative_path)
for trait_name in traits_context:
new_context = ParseContext(traits_context.get(trait_name), traits_context.relative_path)
trait = RamlTrait()
for field_name, field_class in RamlTrait._structure.iteritems():
# parse string fields
if isinstance(field_class, String):
setattr(trait, field_name, new_context.get_string_property(field_class.field_name))
trait.queryParameters = c.get_property_with_schema(RamlTrait.queryParameters.field_name,
RamlTrait.queryParameters)
trait.body = parse_body(ParseContext(new_context.get("body"), new_context.relative_path), global_media_type)
trait.is_ = new_context.get_property_with_schema(RamlTrait.is_.field_name, RamlTrait.is_)
trait.responses = c.get_property_with_schema(RamlTrait.responses.field_name, RamlTrait.responses)
traits[trait_name] = trait
return traits
def parse_map_of_entities(parser, context, relative_path, parent_resource):
"""
:param parser: function which accepts 3 arguments: data, relative_path and parent_resource
where entity is content
:type parser: callable
:param context: current parse context
:type context: dict
:param relative_path:
:param parent_resource:
:return:
"""
res = OrderedDict()
if context:
for key, value in context.items():
if value:
res[key] = parser(value, relative_path, parent_resource)
else:
# workaround: if `key` is already in `context` than
# it's marked as !!null
res[key] = RamlMethod(notNull=True)
return res
def parse_body(c, global_media_type):
"""
Parse and extract resource with name
:param c: ParseContext object which contains RamlBody
:type c: ParseContext
:return: RamlBody or None
:rtype: RamlBody
"""
if c.data is None:
return None
body = RamlBody()
body.example = c.get_string_property("example")
body.body = parse_inline_body(c.get("body"), c.relative_path, global_media_type)
body.schema = c.get_string_property("schema")
body.example = c.get_string_property("example")
body.formParameters = c.get_property_with_schema("formParameters", RamlBody.formParameters)
body.headers = c.get_property_with_schema("headers", RamlBody.headers)
return body
def parse_inline_body(data, relative_path, global_media_type):
"""
Parse not null `body` inline property
:param data: value of `body` property
:type data: dict
:param relative_path: relative path on filesystem to a RAML resource for handling `include` tags
:type relative_path: str
:return: OrderedDict of RamlBody or None
:rtype: OrderedDict of RamlBody
"""
if data is None:
return None
res = OrderedDict()
# Data could be map of mime_type => body, http_code => body but also it could be direct
# value of RamlBody with global mediaType (grrr... so consistent)
for field_name in RamlBody._structure:
if field_name in data:
# This is direct value of RamlBody
parsed_data = parse_body(ParseContext(data, relative_path), global_media_type)
res[global_media_type] = parsed_data
return res
for key, body_data in data.iteritems():
if body_data:
res[key] = parse_body(ParseContext(body_data, relative_path), global_media_type)
else:
# body marked as !!null
res[key] = RamlBody(notNull=True)
return res
def _validate_raml_header(line):
"""
Parse header of RAML file and ensure than we can work with it
:param line: RAML header
:type line: str
:return: RAML format string
:rtype: str
:raise RamlParseException: in case of parsing errors
"""
# Line should look like "#%RAML 0.8". Split it by whitespace and validate
header_tuple = line.split()
if len(header_tuple) != 2:
raise RamlParseException("Invalid format of RAML header")
if header_tuple[0] != "#%RAML":
raise RamlParseException("Unable to found RAML header")
try:
# Extract first 2 numbers from format version, e.g. "0.8.2" -> "0.8"
major_format_version = ".".join(header_tuple[1].split(".")[:2])
if float(major_format_version) > RAML_SUPPORTED_FORMAT_VERSION:
raise RamlParseException("Unsupported format of RAML file", header_tuple[1])
return header_tuple[1]
except ValueError:
raise RamlParseException("Invalid RAML format version", header_tuple[1])
def _is_mime_type_raml(mime_type):
return mime_type.lower() in ["text/yaml", "application/raml+yaml",
"text/x-yaml", "application/yaml", "application/x-yaml"]
def _is_mime_type_json(mime_type):
return mime_type.lower() == "application/json"
def _is_mime_type_xml(mime_type):
return mime_type.lower() == "application/xml"
def _is_network_resource(uri):
return urlparse.urlparse(uri).scheme
def _build_network_relative_path(url):
p = urlparse.urlparse(url)
return urlparse.urlunparse(urlparse.ParseResult(p.scheme, p.netloc, os.path.dirname(p.path), '', '', ''))
def _calculate_new_relative_path(base, uri):
if _is_network_resource(base):
return _build_network_relative_path(urlparse.urljoin(base, uri))
else:
return os.path.dirname(os.path.join(base, uri))
def _load_local_file(full_path):
# include locates at local file system
if not os.path.exists(full_path):
raise RamlNotFoundException("No such file {} found".format(full_path))
# detect file type... we should able to parse raml, yaml, json, xml and read all other content types as plain
# files
mime_type = mimetypes.guess_type(full_path)[0]
if mime_type is None:
mime_type = "text/plain"
with contextlib.closing(open(full_path, 'rU')) as f:
return f.read(), mime_type
def _load_network_resource(url):
with contextlib.closing(urllib2.urlopen(url, timeout=60.0)) as f:
# We fully rely of mime type to remote server b/c according
# of specs it MUST support RAML mime
mime_type = f.headers.gettype()
return f.read(), mime_type
def _parse_raml_version(content):
"""
Get optional property `version` and make sure that it is a string.
If the property does not exist the function returns None
:return: string - property value or None
:rtype : basestring or None
"""
property_value = content.get('version')
if not property_value:
return None
# version should be string but if version specified as "0.1" yaml package recognized
# it as float, so we should handle this situation
if not (isinstance(property_value, (basestring, float)) or (isinstance(property_value, (basestring, int)))):
raise RamlParseException("Property `version` must be string")
if isinstance(property_value, float) or isinstance(property_value, int):
res = str(property_value)
return property_value
| {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/pyraml/parser.py",
"copies": "1",
"size": "17263",
"license": "mit",
"hash": 3258387050526762000,
"line_mean": 31.5103578154,
"line_max": 120,
"alpha_frac": 0.6459479812,
"autogenerated": false,
"ratio": 3.9030070088175446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008608151332455064,
"num_lines": 531
} |
__author__ = 'ad'
import os.path
from collections import OrderedDict
from pyapi.libraries.pyraml_parser_master import pyraml
from pyapi.libraries.pyraml_parser_master.pyraml import parser
from pyapi.libraries.pyraml_parser_master.pyraml.entities import RamlResource, RamlMethod, RamlQueryParameter
fixtures_dir = os.path.join(os.path.dirname(__file__), '../', 'samples')
def test_resource_nested():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/resource-nested.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert len(p.resources) == 1, p.resources
# Validate root resource
assert "/media" in p.resources, p.resources
root_resource = p.resources["/media"]
assert isinstance(root_resource, RamlResource), p.resources
assert root_resource.parentResource is None, p.resources
assert root_resource.methods is not None, p.resources
assert root_resource.description == "Media Description", root_resource
assert "get" in root_resource.methods, p.resources
assert isinstance(root_resource.methods["get"], RamlMethod), p.resources
assert root_resource.methods["get"].notNull, p.resources
# validate sub-resources
assert root_resource.resources is not None, root_resource
assert "/search" in root_resource.resources is not None, root_resource
assert root_resource.resources["/search"].displayName == "Media Search", root_resource
assert root_resource.resources["/search"].description == "Media Search Description", root_resource
assert "get" in root_resource.resources["/search"].methods, root_resource
assert root_resource.resources["/search"].methods["get"].notNull, root_resource
assert "/tags" in root_resource.resources is not None, root_resource
assert root_resource.resources["/tags"].displayName == "Tags", root_resource
assert root_resource.resources["/tags"].description == "Tags Description", root_resource
assert "get" in root_resource.resources["/tags"].methods, root_resource
assert root_resource.resources["/tags"].methods["get"].notNull, root_resource
# /media/tags has their own resource /search
tag_resource = root_resource.resources["/tags"]
assert tag_resource.resources is not None, tag_resource
assert "/search" in tag_resource.resources, tag_resource
assert tag_resource.resources["/search"].displayName == "Tag Search", tag_resource
assert tag_resource.resources["/search"].description == "Tag Search Description", tag_resource
assert tag_resource.resources["/search"].methods["get"].notNull, root_resource
# Ensure than every sub-resource have correct parentResource
assert root_resource.resources["/search"].parentResource is root_resource
assert root_resource.resources["/tags"].parentResource is root_resource
assert tag_resource.resources["/search"].parentResource is tag_resource
def test_resource_with_responses():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/null-elements.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert "/leagues" in p.resources, p
leagues_resource = p.resources["/leagues"]
assert leagues_resource.displayName == "Leagues", leagues_resource
assert leagues_resource.description is None, leagues_resource
assert leagues_resource.methods, leagues_resource
assert leagues_resource.methods["get"], leagues_resource
leagues_resource_get = leagues_resource.methods["get"]
assert leagues_resource_get.responses, leagues_resource_get
assert leagues_resource_get.responses[200], leagues_resource_get
assert leagues_resource_get.responses[200].body, leagues_resource_get
assert "application/json" in leagues_resource_get.responses[200].body, leagues_resource_get
assert "text/xml" in leagues_resource_get.responses[200].body, leagues_resource_get
def test_resource_with_params():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/params', 'param-types.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert "/simple" in p.resources, p
simple_res = p.resources["/simple"]
assert "get" in simple_res.methods, simple_res
queryParameters = simple_res.methods["get"].queryParameters
assert "name" in queryParameters, queryParameters
assert "age" in queryParameters, queryParameters
assert "price" in queryParameters, queryParameters
assert "time" in queryParameters, queryParameters
assert "alive" in queryParameters, queryParameters
assert "default-enum" in queryParameters, queryParameters
queryParam1 = queryParameters["name"]
assert isinstance(queryParam1, RamlQueryParameter), queryParam1
assert queryParam1.example == "two", queryParam1
assert queryParam1.enum == ["one", "two", "three"], queryParam1
assert queryParam1.displayName == "name name", queryParam1
assert queryParam1.description == "name description"
assert queryParam1.default == "three", queryParam1
assert queryParam1.minLength == 3, queryParam1
assert queryParam1.type == "string", queryParam1
assert queryParam1.maxLength == 5, queryParam1
assert queryParam1.pattern == '[a-z]{3,5}', queryParam1
assert queryParam1.required == False, queryParam1
assert queryParam1.repeat == False, queryParam1
| {
"repo_name": "mpetyx/pyapi",
"path": "tests/unit/raml/tests/test_resources.py",
"copies": "1",
"size": "5323",
"license": "mit",
"hash": 5145549093047658000,
"line_mean": 46.954954955,
"line_max": 109,
"alpha_frac": 0.7422506106,
"autogenerated": false,
"ratio": 3.8544532947139754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5096703905313975,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ad'
import os.path
from pyapi.libraries.pyraml_parser_master import pyraml
from pyapi.libraries.pyraml_parser_master.pyraml import parser
from pyapi.libraries.pyraml_parser_master.pyraml.entities import RamlRoot, RamlDocumentation
fixtures_dir = os.path.join(os.path.dirname(__file__), '../', 'samples')
def test_include_raml():
p = parser.load(os.path.join(fixtures_dir, '../samples/root-elements-includes.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.raml_version == "0.8", p.raml_version
assert p.title == "included title", p.title
assert p.version == "v1", p.version
assert p.baseUri == "https://sample.com/api", p.baseUri
assert len(p.documentation) == 2, p.documentation
assert isinstance(p.documentation[0], RamlDocumentation), p.documentation
assert isinstance(p.documentation[1], RamlDocumentation), p.documentation
assert p.documentation[0].title == "Home", p.documentation[0].title
assert p.documentation[0].content == \
"""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna...
""", p.documentation[0].content
assert p.documentation[1].title == "Section", p.documentation[0].title
assert p.documentation[1].content == "section content", p.documentation[1].content
def test_numeric_version():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/numeric-api-version.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.version == 1, p.version
| {
"repo_name": "mpetyx/pyapi",
"path": "tests/unit/raml/tests/test_documentation.py",
"copies": "1",
"size": "1551",
"license": "mit",
"hash": 5138264116498412000,
"line_mean": 39.8157894737,
"line_max": 93,
"alpha_frac": 0.7137330754,
"autogenerated": false,
"ratio": 3.3426724137931036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45564054891931033,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ad'
import os.path
import pyraml.parser
from pyraml.entities import RamlRoot, RamlDocumentation
fixtures_dir = os.path.join(os.path.dirname(__file__), '..', 'samples')
def test_include_raml():
p = pyraml.parser.load(os.path.join(fixtures_dir, 'root-elements-includes.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.raml_version == "0.8", p.raml_version
assert p.title == "included title", p.title
assert p.version == "v1", p.version
assert p.baseUri == "https://sample.com/api", p.baseUri
assert len(p.documentation) == 2, p.documentation
assert isinstance(p.documentation[0], RamlDocumentation), p.documentation
assert isinstance(p.documentation[1], RamlDocumentation), p.documentation
assert p.documentation[0].title == "Home", p.documentation[0].title
assert p.documentation[0].content == \
"""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna...
""", p.documentation[0].content
assert p.documentation[1].title == "Section", p.documentation[0].title
assert p.documentation[1].content == "section content", p.documentation[1].content
def test_numeric_version():
p = pyraml.parser.load(os.path.join(fixtures_dir, 'numeric-api-version.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.version == 1, p.version
| {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/tests/test_documentation.py",
"copies": "1",
"size": "1400",
"license": "mit",
"hash": 3066141277163579000,
"line_mean": 36.8378378378,
"line_max": 86,
"alpha_frac": 0.7021428571,
"autogenerated": false,
"ratio": 3.341288782816229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4543431639916229,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ad'
import os.path
import pyraml.parser
from pyraml.entities import RamlRoot, RamlTrait, RamlBody, RamlResourceType
fixtures_dir = os.path.join(os.path.dirname(__file__), '..', 'samples')
def test_parse_traits_with_schema():
p = pyraml.parser.load(os.path.join(fixtures_dir, 'media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 1, p.traits
assert isinstance(p.traits["traitOne"], RamlTrait), p.traits
assert isinstance(p.traits["traitOne"].body, RamlBody), p.traits["traitOne"]
assert p.traits["traitOne"].body.schema == """{ "$schema": "http://json-schema.org/draft-03/schema",
"type": "object",
"description": "A product presentation",
"properties": {
"id": { "type": "string" },
"title": { "type": "string" }
}
}
""", p.traits["traitOne"].body.schema
def test_parse_raml_with_many_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, 'full-config.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 2, p.traits
assert isinstance(p.traits["simple"], RamlTrait), p.traits
assert isinstance(p.traits["knotty"], RamlTrait), p.traits
assert p.traits["simple"].displayName == "simple trait"
assert p.traits["knotty"].displayName == "<<value>> trait"
def test_parse_resource_type_with_references_to_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, 'media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.resourceTypes, "Property `traits` should be set"
assert len(p.resourceTypes)
assert 'typeParent' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeParent'], RamlResourceType), p.resourceTypes
parent_resource_type = p.resourceTypes['typeParent']
assert parent_resource_type.methods, p.resourceTypes['typeParent']
assert 'get' in parent_resource_type.methods
assert 'typeChild' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeChild'], RamlResourceType), p.resourceTypes | {
"repo_name": "mpetyx/pyapi",
"path": "pyapi/libraries/pyraml_parser_master/tests/test_traits.py",
"copies": "1",
"size": "2161",
"license": "mit",
"hash": -8278884886732771000,
"line_mean": 39.037037037,
"line_max": 105,
"alpha_frac": 0.6968995835,
"autogenerated": false,
"ratio": 3.3093415007656968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9501018946155176,
"avg_score": 0.0010444276221040993,
"num_lines": 54
} |
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_allclose
from mne.chpi import read_head_pos
from mne.datasets import testing
from mne.io import read_raw_fif
from mne.preprocessing import (annotate_movement, compute_average_dev_head_t,
annotate_muscle_zscore, annotate_break)
from mne import Annotations, events_from_annotations
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pos_fname = op.join(sss_path, 'test_move_anon_raw.pos')
raw_fname = op.join(sss_path, 'test_move_anon_raw.fif')
@testing.requires_testing_data
def test_movement_annotation_head_correction():
"""Test correct detection movement artifact and dev_head_t."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
pos = read_head_pos(pos_fname)
# Check 5 rotation segments are detected
annot_rot, [] = annotate_movement(raw, pos, rotation_velocity_limit=5)
assert(annot_rot.duration.size == 5)
# Check 2 translation vel. segments are detected
annot_tra, [] = annotate_movement(raw, pos, translation_velocity_limit=.05)
assert(annot_tra.duration.size == 2)
# Check 1 movement distance segment is detected
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
# Check correct trans mat
raw.set_annotations(annot_rot + annot_tra + annot_dis)
dev_head_t = compute_average_dev_head_t(raw, pos)
dev_head_t_ori = np.array([
[0.9957292, -0.08688804, 0.03120615, 0.00698271],
[0.09020767, 0.9875856, -0.12859731, -0.0159098],
[-0.01964518, 0.1308631, 0.99120578, 0.07258289],
[0., 0., 0., 1.]])
assert_allclose(dev_head_t_ori, dev_head_t['trans'], rtol=1e-5, atol=0)
# Smoke test skipping time due to previous annotations.
raw.set_annotations(Annotations([raw.times[0]], 0.1, 'bad'))
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
@testing.requires_testing_data
def test_muscle_annotation():
"""Test correct detection muscle artifacts."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
raw.notch_filter([50, 110, 150])
# Check 2 muscle segments are detected
annot_muscle, scores = annotate_muscle_zscore(raw, ch_type='mag',
threshold=10)
onset = annot_muscle.onset * raw.info['sfreq']
onset = onset.astype(int)
np.testing.assert_array_equal(scores[onset].astype(int), np.array([23,
10]))
assert(annot_muscle.duration.size == 2)
@testing.requires_testing_data
def test_muscle_annotation_without_meeg_data():
"""Call annotate_muscle_zscore with data without meg or eeg."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes')
raw.crop(0, .1).load_data()
raw.pick_types(meg=False, stim=True)
with pytest.raises(ValueError, match="No M/EEG channel types found"):
annot_muscle, scores = annotate_muscle_zscore(raw, threshold=10)
@testing.requires_testing_data
def test_annotate_breaks():
"""Test annotate_breaks."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes')
annots = Annotations(onset=[12, 15, 16, 20, 21],
duration=[1, 1, 1, 2, 0.5],
description=['test'],
orig_time=raw.info['meas_date'])
raw.set_annotations(annots)
min_break_duration = 0.5
t_start_after_previous = 0.1
t_stop_before_next = 0.1
expected_onsets = np.array(
[
raw.first_time,
13 + t_start_after_previous,
17 + t_start_after_previous,
22 + t_start_after_previous
]
)
expected_durations = np.array(
[
12 - raw.first_time - t_stop_before_next,
15 - 13 - t_start_after_previous - t_stop_before_next,
20 - 17 - t_start_after_previous - t_stop_before_next,
raw._last_time - 22 - t_start_after_previous
]
)
break_annots = annotate_break(
raw=raw,
min_break_duration=min_break_duration,
t_start_after_previous=t_start_after_previous,
t_stop_before_next=t_stop_before_next
)
assert_allclose(break_annots.onset, expected_onsets)
assert_allclose(break_annots.duration, expected_durations)
assert all(description == 'BAD_break'
for description in break_annots.description)
# `ignore` parameter should be respected
raw.annotations.description[0] = 'BAD_'
break_annots = annotate_break(
raw=raw,
min_break_duration=min_break_duration,
t_start_after_previous=t_start_after_previous,
t_stop_before_next=t_stop_before_next
)
assert_allclose(break_annots.onset,
expected_onsets[[True, False, True, True]])
assert_allclose(
break_annots.duration,
[15 - raw.first_time - t_stop_before_next] +
list(expected_durations[2:])
)
# Restore annotation description
raw.annotations.description[0] = 'test'
# Test with events
events, _ = events_from_annotations(raw=raw)
raw.set_annotations(None)
expected_onsets = np.array(
[
raw.first_time,
12 + t_start_after_previous,
15 + t_start_after_previous,
16 + t_start_after_previous,
20 + t_start_after_previous,
21 + t_start_after_previous
]
)
expected_durations = np.array(
[
12 - raw.first_time - t_stop_before_next,
15 - 12 - t_start_after_previous - t_stop_before_next,
16 - 15 - t_start_after_previous - t_stop_before_next,
20 - 16 - t_start_after_previous - t_stop_before_next,
21 - 20 - t_start_after_previous - t_stop_before_next,
raw._last_time - 21 - t_start_after_previous
]
)
break_annots = annotate_break(
raw=raw,
events=events,
min_break_duration=min_break_duration,
t_start_after_previous=t_start_after_previous,
t_stop_before_next=t_stop_before_next
)
assert_allclose(break_annots.onset, expected_onsets)
assert_allclose(break_annots.duration, expected_durations)
# Not finding any break periods
break_annots = annotate_break(
raw=raw,
events=events,
min_break_duration=1000,
)
assert len(break_annots) == 0
# Implausible parameters (would produce break annot of duration < 0)
with pytest.raises(ValueError, match='must be greater than 0'):
annotate_break(
raw=raw,
min_break_duration=5,
t_start_after_previous=5,
t_stop_before_next=5
)
# Empty events array
with pytest.raises(ValueError, match='events array must not be empty'):
annotate_break(raw=raw, events=np.array([]))
# Invalid `ignore` value
with pytest.raises(TypeError, match='must be an instance of str'):
annotate_break(raw=raw, ignore=('foo', 1))
# No annotations to work with
raw.set_annotations(None)
with pytest.raises(ValueError, match='Could not find.*annotations'):
annotate_break(raw=raw)
| {
"repo_name": "bloyl/mne-python",
"path": "mne/preprocessing/tests/test_artifact_detection.py",
"copies": "3",
"size": "7531",
"license": "bsd-3-clause",
"hash": 1854888127119699500,
"line_mean": 34.0279069767,
"line_max": 79,
"alpha_frac": 0.6154561147,
"autogenerated": false,
"ratio": 3.4403837368661487,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 215
} |
__author__ = "Adrian 'LucidCharts' Campos, Johnson Nguyen, Josh Hicken"
from string import punctuation as punc_chars # this is a string of punctuation chars from python standard lib
from collections import OrderedDict
# noinspection SpellCheckingInspection
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
def get_frequency_dist_characters(input_string):
"""
Calculate frequency of each character's occurrence
:param input_string: String containing characters to be counted
:return: A dictionary of characters with their values
"""
# Set up blank dictionary to store occurrences of each character
character_dist = {}
# Iterate through each letter in alphabet and count occurrences in mString
for letter in ALPHABET:
# Store number of occurrences in dictionary
character_dist[letter] = input_string.lower().count(letter)
# Clean up dictionary; if this letter doesn't exist in mString, remove it
if character_dist[letter] == 0:
del character_dist[letter]
return character_dist
def get_frequency_dist_words_top10(input_string):
"""
Calculate frequency of top ten words occurring in a phrase
:param input_string: String containing words to be ranked and limited to 10
:return: A dictionary of words with their values (length 10)
"""
# generate the full distribution by stripping bad chars, splitting into tokens, lowering then counting
phrase = input_string.replace('\r', '').replace('\n', '')
for char in punc_chars:
phrase = phrase.replace(char, ' ')
normalized_tokens = [token.lower() for token in phrase.split(' ')]
unique_tokens = list(set(normalized_tokens))
freq_dist = {word:normalized_tokens.count(word) for word in unique_tokens if word != ''}
# now sort so we can get top 10 items in distribution
ordered_dist = OrderedDict(sorted(freq_dist.items(), key=lambda x: x[1], reverse=True))
# trim to top ten. alas, the sort is lost after this, but the worthiness of the items lives on
freq_dist = {pair[0]: pair[1] for pair in list(ordered_dist.items())[:10]}
return freq_dist
def get_frequency_dist_first_letters(input_string):
"""
Calculate frequency of first letter of each word's occurrence
:param input_string: String containing words whose first letters are to be counted
:return: A dictionary of characters with their values
"""
text = input_string.upper()
text = " " + text
chararacterFrequency_FirstLetter = {}
for i in range(len(text)):
if text[i] == " ":
if (text[i + 1] not in chararacterFrequency_FirstLetter.keys()) and (text[i + 1] not in punc_chars):
chararacterFrequency_FirstLetter[text[i + 1]] = 0
# Characters like '-' will throw a KeyError.
# We don't care about them, so let's sloppily ignore anything that gives us trouble.
try:
chararacterFrequency_FirstLetter[text[i + 1]] += 1
except KeyError:
pass
return chararacterFrequency_FirstLetter
| {
"repo_name": "adriancampos/LetsPlotMoby",
"path": "dist_calculators.py",
"copies": "1",
"size": "3086",
"license": "mit",
"hash": 2544198012438133000,
"line_mean": 40.1466666667,
"line_max": 112,
"alpha_frac": 0.6824368114,
"autogenerated": false,
"ratio": 4.136729222520107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015046903324566806,
"num_lines": 75
} |
__author__ = 'adrianmo'
import string
import re
import codecs,sys, unicodedata
import pprint
import MySQLdb
import os
xuser = "root"
xpasswd = "cRe33Eth"
xhost = '127.0.0.1'
xport = 3334
class MMICProgram():
def openFile(self, fileName):
with codecs.open (fileName, "r", "utf-8") as line:
f = line.read()
self.code = f.splitlines()
print self.code
def getField(self, table, field, syid=1):
mega = MySQLdb.connect(host=xhost,port=xport,user=xuser, passwd=xpasswd,db='charls')
mega.autocommit(False)
c = mega.cursor()
c.execute("SET CHARACTER SET utf8")
c.execute("SET collation_connection = 'utf8_general_ci'")
c.execute("""SELECT %s FROM %s WHERE syid=%s order by reid asc """ % (field, table, syid))
mega.close()
rst = c.fetchall()
stringify = ""
for i in range(0,len(rst)):
stringify += rst[i][0] + "\n\r"
return stringify
def getCode(self):
file = ""
for i in self.code:
if isinstance(i, unicode):
file += "\n" + i
return file
| {
"repo_name": "maplechori/pyblasv3",
"path": "MMICProgram.py",
"copies": "1",
"size": "1173",
"license": "mit",
"hash": 5552391818719018000,
"line_mean": 16.5074626866,
"line_max": 99,
"alpha_frac": 0.5549872123,
"autogenerated": false,
"ratio": 3.3901734104046244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4445160622704624,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrian'
from PyQt4 import QtGui
from parking_app.UI.PlatformUI import PlatformUI
import parking_app.Common as Common
class CylinderUI(QtGui.QWidget):
def __init__(self, cylinder):
super(CylinderUI, self).__init__()
self.cylinder = cylinder
self.init_ui()
def init_ui(self):
grid = QtGui.QGridLayout()
self.setLayout(grid)
self.platformsUI = [[PlatformUI() for col in
range(self.cylinder.qtty_columns())]
for lvl in range(self.cylinder.qtty_levels())]
[[grid.addWidget(self.platformsUI[lvl][col], *[lvl, col]) for col in
range(self.cylinder.qtty_columns())]
for lvl in range(self.cylinder.qtty_levels())]
#self.resize(300, 300)
#self.setAutoFillBackground(True)
#p = self.palette()
#p.setColor(self.backgroundRole(), QtGui.QColor(200, 0, 0))
#self.setPalette(p)
def updatePlatform(self, level, column, vehicle_patent, vehicle_weight, alarm):
self.platformsUI[level][column].updateUI(vehicle_patent, vehicle_weight, alarm) | {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/UI/CylinderUI.py",
"copies": "1",
"size": "1177",
"license": "mit",
"hash": 4083419530887316500,
"line_mean": 30,
"line_max": 87,
"alpha_frac": 0.5972812234,
"autogenerated": false,
"ratio": 3.7129337539432177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4810214977343218,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrian'
from PyQt4 import QtGui
from PyQt4 import QtCore
import parking_app.Common as Common
import random
class WithdrawFormUI(QtGui.QWidget):
# level, column, vehicle id, vehicle weight
update = QtCore.pyqtSignal(int, str, int)
def __init__(self, parking_slot, parent=None):
super(WithdrawFormUI, self).__init__(parent)
self.__parking_slot = parking_slot
self.initUI()
def initUI(self):
self.setWindowTitle('Withdraw Car')
self.setWindowIcon(QtGui.QIcon('Logo.png'))
# Patente
patenteLayout = QtGui.QHBoxLayout()
lbl1 = QtGui.QLabel("Patente",self)
self.patente = QtGui.QLineEdit(self)
self.patente.setInputMask(">AAA-999")
patenteLayout.addWidget(lbl1)
patenteLayout.addWidget(self.patente)
# Boton de aceptar y cancelar
buttonLayout = QtGui.QHBoxLayout()
aceptButton = QtGui.QPushButton('Aceptar')
aceptButton.clicked.connect(self.acept)
cancelButton = QtGui.QPushButton('Cancelar')
cancelButton.clicked.connect(self.cancel)
buttonLayout.addWidget(aceptButton)
buttonLayout.addWidget(cancelButton)
layout = QtGui.QVBoxLayout()
layout.addLayout(patenteLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def acept(self):
print('Aceptar')
parking_slots = self.__parking_slot.data
lvl = parking_slots.get_car(self.patente.text())
self.__parking_slot.data = parking_slots
if lvl >= 0:
self.update.emit(lvl, "", Common.Weights.empty.value)
self.close()
def cancel(self):
print('Cancelar')
self.close()
| {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/UI/WithdrawFormUI.py",
"copies": "1",
"size": "1733",
"license": "mit",
"hash": -7218549468163006000,
"line_mean": 27.4098360656,
"line_max": 65,
"alpha_frac": 0.64050779,
"autogenerated": false,
"ratio": 3.617954070981211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47584618609812107,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrian'
from PyQt4 import QtGui
from PyQt4 import QtCore
from parking_app.UI.WarningConfirmationUI import WarningConfirmationUI
import parking_app.Common as Common
import random
class PlatformUI(QtGui.QWidget):
def __init__(self):
super(PlatformUI, self).__init__()
self.initUI()
def initUI(self):
vertical = QtGui.QVBoxLayout()
horizontal = QtGui.QHBoxLayout()
# Vehicle ID
self.lbl_patente = QtGui.QLabel('AAAA',self)
horizontal.addWidget(self.lbl_patente)
# Alarm
self.warningButton = QtGui.QPushButton()
self.warningButton.setIcon(QtGui.QIcon('Warning.png'))
self.warningButton.setIconSize(QtCore.QSize(15,15))
self.warningButton.clicked.connect(self.showWarningOffConfirmation)
self.warningButton.setVisible(False)
horizontal.addWidget(self.warningButton)
vertical.addLayout(horizontal)
self.lbl_vehicle = QtGui.QLabel(self)
vertical.addWidget(self.lbl_vehicle)
self.setLayout(vertical)
color = QtGui.QColor(150, 150, 150)
self.setBackgroundColor(color)
self.lbl_vehicle.setHidden(True)
self.lbl_patente.setHidden(True)
def showWarningOffConfirmation(self):
self.confirmationMessage = WarningConfirmationUI()
self.confirmationMessage.resize(400, 200)
self.confirmationMessage.move(50,50)
QtCore.QObject.connect(self.confirmationMessage, QtCore.SIGNAL('stopWarning()'), self.turnOffWarning)
self.confirmationMessage.show()
print("Mostrar mensaje de confirmacion")
def turnOffWarning(self):
print("Apagando warning")
self.warningButton.setHidden(True)
def setBackgroundColor(self, color):
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), color)
self.setPalette(p)
""""
def update(self):
color = QtGui.QColor(150, 150, 150)
self.lbl_vehicle.setHidden(True)
self.lbl_patente.setHidden(True)
if not self.platform.is_empty():
color = QtGui.QColor(100, 100, 255)
# Vehicle ID
self.lbl_patente.text(self.platform.vehicle.patent)
#vertical.addWidget(lbl1)
# Current vehicle
vehicleName = ''
if self.platform.get_weight() == Common.Weights.veryLight:
vehicleName = 'MotoSide.png'
elif self.platform.get_weight() == Common.Weights.light:
vehicleName = 'CarSide.png'
elif self.platform.get_weight() == Common.Weights.heavy:
vehicleName = '.png'
elif self.platform.get_weight() == Common.Weights.veryHeavy:
vehicleName = 'TrukSide.png'
pixmap2 = QtGui.QPixmap(vehicleName)
pixmap2 = pixmap2.scaled(40, 40, QtCore.Qt.KeepAspectRatio)
self.lbl_vehicle.setPixmap(pixmap2)
self.lbl_vehicle.setHidden(False)
self.lbl_patente.setHidden(False)
self.setBackgroundColor(color)
"""
def updateUI(self, vehicle_patent, vehicle_weight, alarm):
color = QtGui.QColor(150, 150, 150)
self.lbl_vehicle.setHidden(True)
self.lbl_patente.setHidden(True)
if vehicle_weight != Common.Weights.empty.value:
if alarm == Common.Alarm.stay.value:
color = QtGui.QColor(100, 100, 255)
elif alarm == Common.Alarm.oneLevelDown.value:
color = QtGui.QColor(150, 100, 255)
elif alarm == Common.Alarm.twoLevelDown.value:
color = QtGui.QColor(200, 100, 255)
elif alarm == Common.Alarm.lessThanMarginTime.value:
color = QtGui.QColor(255, 100, 255)
elif alarm == Common.Alarm.deliver.value:
color = QtGui.QColor(255, 100, 100)
# Vehicle ID
self.lbl_patente.setText(vehicle_patent)
# Current vehicle
vehicleName = ''
if vehicle_weight == Common.Weights.veryLight.value:
vehicleName = 'MotoSide.png'
elif vehicle_weight == Common.Weights.light.value:
vehicleName = 'CarSide.png'
elif vehicle_weight == Common.Weights.heavy.value:
vehicleName = 'AutoTruckSide.png'
elif vehicle_weight == Common.Weights.veryHeavy.value:
vehicleName = 'TrukSide.png'
pixmap2 = QtGui.QPixmap(vehicleName)
pixmap2 = pixmap2.scaled(40, 40, QtCore.Qt.KeepAspectRatio)
self.lbl_vehicle.setPixmap(pixmap2)
self.lbl_vehicle.setHidden(False)
self.lbl_patente.setHidden(False)
self.setBackgroundColor(color) | {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/UI/PlatformUI.py",
"copies": "1",
"size": "4814",
"license": "mit",
"hash": 31891601509830390,
"line_mean": 31.7551020408,
"line_max": 109,
"alpha_frac": 0.6186123806,
"autogenerated": false,
"ratio": 3.910641754670999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5029254135271,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrian'
from PyQt4 import QtGui
from PyQt4 import QtCore
import random
class WarningConfirmationUI(QtGui.QWidget):
stopAlarm = QtCore.pyqtSignal()
def __init__(self,parent=None):
super(WarningConfirmationUI, self).__init__(parent)
self.initUI()
def initUI(self):
self.setWindowTitle('Alarm')
self.setWindowIcon(QtGui.QIcon('Logo.png'))
verticalLayout = QtGui.QVBoxLayout()
# Titulo
titleLabel = QtGui.QLabel("Alarma",self)
titleFont = QtGui.QFont()
titleFont.setBold(True)
titleFont.setPixelSize(20)
titleFont.setItalic(True)
titleLabel.setFont(titleFont)
# Mensaje
messageLabel = QtGui.QLabel("Esta seguro que desea apagar esta alarma?",self)
# Boton de aceptar y cancelar
buttonLayout = QtGui.QHBoxLayout()
aceptButton = QtGui.QPushButton('Aceptar')
aceptButton.clicked.connect(self.acept)
cancelButton = QtGui.QPushButton('Cancelar')
cancelButton.clicked.connect(self.cancel)
buttonLayout.addWidget(aceptButton)
buttonLayout.addWidget(cancelButton)
verticalLayout.addWidget(titleLabel)
verticalLayout.addWidget(messageLabel)
verticalLayout.addLayout(buttonLayout)
self.setLayout(verticalLayout)
def acept(self):
print('Aceptar')
self.stopAlarm.emit()
self.close()
def cancel(self):
print('Cancelar')
self.close()
| {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/UI/WarningConfirmationUI.py",
"copies": "1",
"size": "1515",
"license": "mit",
"hash": 763752386227031800,
"line_mean": 25.1206896552,
"line_max": 85,
"alpha_frac": 0.6501650165,
"autogenerated": false,
"ratio": 3.9763779527559056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002444621988670562,
"num_lines": 58
} |
__author__ = 'adrian'
from PyQt4 import QtGui
import parking_app.Common as Common
from multiprocessing import Queue
class CarFormUI(QtGui.QWidget):
def __init__(self, input_queue):
super(CarFormUI, self).__init__()
self.__input_queue = input_queue
self.initUI()
def initUI(self):
self.setWindowTitle('New Car')
self.setWindowIcon(QtGui.QIcon('Logo.png'))
# Patente
patenteLayout = QtGui.QHBoxLayout()
lbl1 = QtGui.QLabel("Patente",self)
self.patente = QtGui.QLineEdit(self)
#self.patente.setValidator(PatentValidator())
self.patente.setInputMask(">AAA-999")
#self.patente.setLineWrapMode(QtGui.QTextEdit.NoWrap)
patenteLayout.addWidget(lbl1)
patenteLayout.addWidget(self.patente)
# Horas
hoursLayout = QtGui.QHBoxLayout()
lbl2 = QtGui.QLabel("Cantidad de horas",self)
optionsLayout = QtGui.QVBoxLayout()
self.estadia = QtGui.QRadioButton("Estadia")
self.estadia.toggled.connect(self.estadiaSelected)
self.mediaEstadia = QtGui.QRadioButton("Media Estadia")
self.mediaEstadia.toggled.connect(self.estadiaSelected)
otherOptionLayout = QtGui.QHBoxLayout()
self.otro = QtGui.QRadioButton("Otro")
self.otro.setChecked(True)
self.otro.toggled.connect(self.otroSelected)
self.otroSpinBox = QtGui.QSpinBox()
self.otroSpinBox.setMinimum(1)
self.otroSpinBox.setMaximum(1440)
otherOptionLayout.addWidget(self.otro)
otherOptionLayout.addWidget(self.otroSpinBox)
optionsLayout.addWidget(self.estadia)
optionsLayout.addWidget(self.mediaEstadia)
optionsLayout.addLayout(otherOptionLayout)
hoursLayout.addWidget(lbl2)
hoursLayout.addLayout(optionsLayout)
#Tipo de vehiculo
self.__vehicle = QtGui.QComboBox()
self.__vehicle.addItem('Moto')
self.__vehicle.addItem('Auto')
self.__vehicle.addItem('Camioneta')
self.__vehicle.addItem('Utilitario')
# Boton de aceptar y cancelar
buttonLayout = QtGui.QHBoxLayout()
aceptButton = QtGui.QPushButton('Aceptar')
aceptButton.clicked.connect(self.acept)
cancelButton = QtGui.QPushButton('Cancelar')
cancelButton.clicked.connect(self.cancel)
buttonLayout.addWidget(aceptButton)
buttonLayout.addWidget(cancelButton)
layout = QtGui.QVBoxLayout()
layout.addLayout(patenteLayout)
layout.addLayout(hoursLayout)
layout.addWidget(self.__vehicle)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def otroSelected(self):
self.otroSpinBox.setEnabled(True)
def estadiaSelected(self):
self.otroSpinBox.setEnabled(False)
def getWeight(self):
if self.__vehicle.currentIndex() == 0:
return Common.Weights.veryLight
elif self.__vehicle.currentIndex() == 1:
return Common.Weights.light
elif self.__vehicle.currentIndex() == 2:
return Common.Weights.heavy
elif self.__vehicle.currentIndex() == 3:
return Common.Weights.veryHeavy
def acept(self):
print('Car Form UI - Aceptar - Enviar los datos al estacionamiento')
# Enviar los datos al estacionamiento
hours = int(self.otroSpinBox.text())
if self.estadia.isChecked():
hours = 720
elif self.mediaEstadia.isChecked():
hours = 360
vehicle = Common.Vehicle(self.patente.text(), self.getWeight())
self.__input_queue.put([vehicle, hours/60])
print('Car Form UI - Aceptar - Enviados los datos al estacionamiento')
self.close()
def cancel(self):
print('Cancelar')
self.close()
| {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/UI/CarFormUI.py",
"copies": "1",
"size": "3836",
"license": "mit",
"hash": -8870239060691166000,
"line_mean": 31.7863247863,
"line_max": 78,
"alpha_frac": 0.6470281543,
"autogenerated": false,
"ratio": 3.7644749754661433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4911503129766143,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrian'
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
import time
from parking_app.UI.CylinderUI import CylinderUI
from parking_app.UI.CarFormUI import CarFormUI
from parking_app.UI.ParkingSlotsUI import ParkingSlotsUI
from parking_app.UI.WithdrawFormUI import WithdrawFormUI
import parking_app.Common as Common
import parking_app.Platform_Controller as Platform_Controller
import parking_app.Robotic_Dispatcher as Robotic_Dispatcher
import parking_app.Robotic_Hand as Robotic_Hand
import parking_app.Robotic_Deliverer as Robotic_Deliverer
import parking_app.concurrent.SharedHandler as ShHan
import parking_app.concurrent.SharedAlarms as SharedAlarms
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Lock, Queue, Array, Manager, SimpleQueue, Pipe
import queue
import copy
class ParkingUI(QtGui.QMainWindow):
def __init__(self, qtty_cylinders, levels, columns, qtty_slots):
super(ParkingUI, self).__init__()
self.cylindersUI = []
cylinders = []
for i in range(qtty_cylinders):
cylinder_manager = CylinderManager()
cylinder_manager.start()
cylinders.append(cylinder_manager.Cylinder(i, levels, columns))
parking_manager = ParkingSlotManager()
parking_manager.start()
parking_slot = parking_manager.ParkingSlots(qtty_slots)
self.__input_queue = Queue()
deliver_queue = Queue()
mutex_cylinders = [Lock() for _ in range(qtty_cylinders)]
mutex_alarms = [Lock() for _ in range(qtty_cylinders)]
mutex_buffers = [Lock() for _ in range(qtty_cylinders)]
mutex_parking_slot = Lock()
alarms = [[None for _ in range(columns)] for _ in range(levels)]
alarms = [copy.deepcopy(alarms) for _ in range(qtty_cylinders)]
car_and_hours = [None, None]
sh_alarms = [Manager().list(alarms[i]) for i in range(qtty_cylinders)]
sh_buffers = [Manager().list(car_and_hours) for _ in range(qtty_cylinders)]
platform_controller = Platform_Controller.PlatformController(qtty_cylinders)
platform_controller.initialize(cylinders, mutex_cylinders, sh_alarms,
mutex_alarms)
QtCore.QObject.connect(platform_controller, QtCore.SIGNAL('update(int, int, int, QString, int, int)'), self.updateUI)
platform_controller.start()
dispatcher_controller = Robotic_Dispatcher.RoboticDispatcher(qtty_cylinders)
dispatcher_controller.initialize(cylinders, mutex_cylinders,
self.__input_queue, sh_buffers, mutex_buffers)
dispatcher_controller.start()
robotic_deliverer_controller = Robotic_Deliverer.RoboticDeliverer()
robotic_deliverer_controller.initialize(deliver_queue, parking_slot,
mutex_parking_slot)
self.__parking_slot = ShHan.SharedHandler(parking_slot, mutex_parking_slot)
self.__parking_slot_UI = ParkingSlotsUI(self.__parking_slot)
QtCore.QObject.connect(robotic_deliverer_controller, QtCore.SIGNAL('update(int, QString, int)'),
self.__parking_slot_UI.updateSlot)
robotic_deliverer_controller.start()
for i in range(qtty_cylinders):
hand_controller = Robotic_Hand.RoboticHand(i, levels, columns)
hand_controller.initialize(cylinders[i], mutex_cylinders[i], sh_buffers[i],
mutex_buffers[i], sh_alarms[i], mutex_alarms[i], deliver_queue)
QtCore.QObject.connect(hand_controller, QtCore.SIGNAL('update(int, int, int, QString, int, int)'), self.updateUI)
hand_controller.start()
time.sleep(1)
# va a ser raro el connect dado que el update esta dentro de la clase de parking slot, si anda genial, si no anda
# hay que hacer que el metodo devuelva col y lvl para la visual.
self.__cylinders = [ShHan.SharedHandler(cylinders[i], mutex_cylinders[i]) for i in range(len(cylinders))]
self.init_ui()
def init_ui(self):
self.resize(600, 600)
self.center()
self.setWindowTitle('Parking')
self.setWindowIcon(QtGui.QIcon('Logo.png'))
self.create_menu()
self.create_toolbar()
# main layout
main_layout = QtGui.QHBoxLayout()
# cylinder_layout = QtGui.QHBoxLayout()
# Se muestra Error en caso de que haya algun problema con algun cilindro
self.statusBar().showMessage('Normal')
for i in self.__cylinders:
cylinder = i.data
cylinderUI = CylinderUI(cylinder)
main_layout.addWidget(cylinderUI)
self.cylindersUI.append(cylinderUI)
i.data = cylinder
#main_layout.addLayout(cylinder_layout)
#main_layout.addWidget(ParkingSlotsUI(self.__parking_slot))
self.__parking_slot_UI.setMaximumWidth(100)
main_layout.addWidget(self.__parking_slot_UI)
# central widget
central_widget = QtGui.QWidget()
central_widget.setLayout(main_layout)
self.setCentralWidget(central_widget)
self.show()
def create_menu(self):
file_menu = self.menuBar().addMenu('&File')
exit_action = QtGui.QAction(QtGui.QIcon('Exit.png'), '&Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(QtGui.qApp.quit)
file_menu.addAction(exit_action)
simulate_menu = self.menuBar().addMenu('&Simulate')
alarm_action = QtGui.QAction(QtGui.QIcon('Warning.png'), 'Alarma Aleatoria', self)
alarm_action.triggered.connect(self.createCustomAlarm)
new_car_action = QtGui.QAction(QtGui.QIcon('Logo.png'), 'Estacionar Vehiculo', self)
new_car_action.triggered.connect(self.addNewCar)
withdraw_car_action = QtGui.QAction(QtGui.QIcon('Car.png'), 'Retirar Vehiculo', self)
withdraw_car_action.triggered.connect(self.withdrawCar)
simulate_menu.addAction(alarm_action)
simulate_menu.addAction(new_car_action)
simulate_menu.addAction(withdraw_car_action)
def create_toolbar(self):
exit_action = QtGui.QAction(QtGui.QIcon('Exit.png'), 'Salir', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.triggered.connect(QtGui.qApp.quit)
alarm_action = QtGui.QAction(QtGui.QIcon('Warning.png'), 'Alarma Aleatoria', self)
alarm_action.triggered.connect(self.createCustomAlarm)
new_car_action = QtGui.QAction(QtGui.QIcon('Logo.png'), 'Estacionar Vehiculo', self)
new_car_action.triggered.connect(self.addNewCar)
withdraw_car_action= QtGui.QAction(QtGui.QIcon('Car.png'), 'Retirar Vehiculo', self)
withdraw_car_action.triggered.connect(self.withdrawCar)
exit_toolbar = self.addToolBar('Exit')
exit_toolbar.addAction(exit_action)
simulate_toolbar = self.addToolBar('Simulate')
simulate_toolbar.addAction(alarm_action)
simulate_toolbar.addAction(new_car_action)
simulate_toolbar.addAction(withdraw_car_action)
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def createCustomAlarm(self):
# Show that the slot has been called.
print("Creando un error aleatorio")
def addNewCar(self):
print("Muestra pop up para agregar un nuevo auto")
self.car_form = CarFormUI(self.__input_queue)
self.car_form.resize(400, 200)
self.car_form.move(150,150)
self.car_form.show()
def withdrawCar(self):
print("Muestra pop up para retirar un auto")
self.withdraw_car_form = WithdrawFormUI(self.__parking_slot)
self.withdraw_car_form.resize(400, 200)
self.withdraw_car_form.move(150,150)
QtCore.QObject.connect(self.withdraw_car_form, QtCore.SIGNAL('update(int, QString, int)'),
self.__parking_slot_UI.updateSlot)
self.withdraw_car_form.show()
def updateUI(self, cylinder, level, column, vehicle_patent, vehicle_weight, alarm):
#print("Should update ui - cylinder %d level %d column %d patent %s"%(cylinder, level, column, vehicle_patent))
self.cylindersUI[cylinder].updatePlatform(level, column, vehicle_patent, vehicle_weight, alarm)
class CylinderManager(BaseManager):
pass
CylinderManager.register("Cylinder", Common.Cylinder)
class ParkingSlotManager(BaseManager):
pass
ParkingSlotManager.register("ParkingSlots", Common.ParkingSlots)
def main():
# we must use the bounded semaphore
app = QtGui.QApplication(sys.argv)
levels = 6
columns = 3
qtty_cylinders = 3
qtty_slots = 10
parkingUI = ParkingUI(qtty_cylinders, levels, columns, qtty_slots)
sys.exit(app.exec_())
if __name__ == '__main__':
main() | {
"repo_name": "Nebla/cylindricalParkingPrototype",
"path": "parking_app/application.py",
"copies": "1",
"size": "9061",
"license": "mit",
"hash": 5700877106391525000,
"line_mean": 36.601659751,
"line_max": 125,
"alpha_frac": 0.6618474782,
"autogenerated": false,
"ratio": 3.514740108611327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46765875868113266,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adria'
#!/usr/bin/python
from dataBase import *
import sys
sys.path.insert(0, '../model') #sino no deixa importar...
from owner import *
class UserLogin:
def __init__(self, owner):
self.owner = owner
self.db = DataBase()
self.registered = False #si l'usuari ja ha fet loguin o no
def enterLogin(self):
"""Like the 'login' method, but asks for the user data to be written in the terminal"""
self.askUserData()
while True:
result = self.login()
if result == 1:
self.askUserData()
elif result == 2:
create = input("Would you like to create it?(Y/N): ")
if create.lower() == "y" or create.lower() == "":
self.db.afegeixUsuari(self.owner.dni, self.owner.nombre, self.owner.apellidos)
break
else:
break
def askUserData(self):
"""Sets the self.owner information with the parameters the user writes on the terminal"""
while True:
print("Insert your personal information to log in:")
name = input("Name: ")
surname = input("Surname: ")
dni = input("DNI: ")
if name and surname and dni:
self.owner = Owner(dni, surname, name)
break
else:
print("Error, one or more of the fields is empty, write it again:\n")
def login(self, owner=None):
"""Checks if the user is on the database and logs in"""
result = 0
if owner is not None:
self.owner = owner
if self.userExists():
if self.checkUser():
self.registered = True
print("You have succesfully logged in\n")
else:
print("Error! name or surname incorrect\n")
result = 1
else:
print("Error, user with DNI "+self.owner.dni+" doesn't exist\n")
result = 2
return result
def llistaDNI(self):
"""Lists all DNI's"""
llista = []
llistacompleta = self.db.llistaUsers()
for user in llistacompleta:
llista.append(user[0])
return llista
def userExists(self, dni = None):
"""Checks if a user exists by searching the DNI in the database"""
if dni is None:
dni = self.owner.dni
exists = False
for dniactual in self.llistaDNI():
if dniactual == dni:
exists = True
return exists
def checkUser(self):
"""Checks if self.owner data is correct"""
result = False
for user in self.db.llistaUsers():
dni = user[0]
name = user[1]
surname = user[2]
if dni == self.owner.dni:
if name == self.owner.nombre and surname == self.owner.apellidos:
result = True
break
return result
def isLogged(self):
"""Returns if the user is logged in or not"""
return self.registered
def guardaUsuari(self, owner=None):
"""Saves owner to the database if it doesn't exist"""
if owner is None:
owner = self.owner
if self.userExists(owner.dni):
print("User with DNI '"+owner.dni+"' already exists!")
else:
result = self.db.afegeixUsuari(owner.dni, owner.nombre, owner.apellidos)
if result:
print("User "+owner.nombre+" added!")
else:
print("User could not be added")
def getIbanList(self):
"""Returns a list of the IBAN codes of the owners' accounts"""
llista = self.db.llistaComptes()
ibanList = []
for account in llista:
for user in account[3:]:
if user == self.owner.dni:
ibanList.append(account[0])
break
return ibanList
def getOwner(self):
return self.owner | {
"repo_name": "aramusss/contableplus",
"path": "controller/userLogin.py",
"copies": "1",
"size": "4041",
"license": "apache-2.0",
"hash": 7223837244720342000,
"line_mean": 32.6833333333,
"line_max": 98,
"alpha_frac": 0.5288294976,
"autogenerated": false,
"ratio": 4.024900398406374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5053729896006374,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adria'
#!/usr/bin/python
import os.path, random
class DataBase:
def __init__(self, rutaUsers="../database/usuaris.txt", rutaComptes="../database/comptes.txt"):
self.rutaUsers = rutaUsers
self.rutaComptes = rutaComptes
#User management methods:
def creaUsers(self):
"""Creates users file"""
if self.comprovaUsers():
print("Users' file already exists!")
else:
open(self.rutaUsers, 'w').close()
def comprovaUsers(self):
"""Comprova que existeixi el fitxer on es guarden els usuaris"""
result = False
if os.path.isfile(self.rutaUsers):
result = True
return result
def llegeixUsers(self):
"""Llegeix tot el fitxer d'usuaris, els retorna en una string"""
resultat = ""
if self.comprovaUsers():
with open(self.rutaUsers, 'r') as f:
resultat = f.read()
else:
print("Error! no s'ha trobat el fitxer d'usuaris")
#llençar una excepcio
return resultat
def llistaUsers(self):
"""Agafa els usuaris (amb el seu dni, nom i cognoms) i els retorna en una llista"""
llista = []
text = self.llegeixUsers()
llistacomes = text.split("\n")
for linia in llistacomes:
llista.append(linia.split(","))
llista.pop()
#fer alguna cosa si no troba usuaris?
return llista
def afegeixUsuari(self, dni, nombre, apellidos):
"""Afegeix un usuari al fitxer d'usuaris sense comprovar que existeixi"""
added = False
if self.comprovaUsers():
if dni and nombre and apellidos:
with open(self.rutaUsers, 'a') as f:
f.write(dni+","+nombre+","+apellidos+"\n")
added = True
else:
print("Error, a user can't have empty fields")
else:
print("Error! couldn't find users file")
return added
def esborraUsuari(self, dni):
"""Esborra un usuari al fitxer d'usuaris sense comprovar que existeixi"""
if self.comprovaUsers():
file = open(self.rutaUsers, 'r')
llista = file.readlines()
file.close()
trobat = False
with open(self.rutaUsers, 'w') as file:
for linia in llista:
if linia.split(",")[0] != dni:
file.write(linia)
else:
trobat = True
if not trobat:
print("No s'ha trobat l'usuari!")
#error
else:
print("Error! no s'ha trobat el fitxer")
#Account management methods:
def creaComptes(self):
"""Creates accounts file"""
if self.comprovaComptes():
print("Accounts file already exists!")
else:
open(self.rutaComptes, 'w').close()
def comprovaComptes(self):
"""Checks if the accounts' file (self.rutaComptes) exists"""
result = False
if os.path.isfile(self.rutaComptes):
result = True
return result
def llegeixComptes(self):
"""Reads all the accounts' file"""
resultat = ""
if self.comprovaComptes():
with open(self.rutaComptes, 'r') as f:
resultat = f.read()
else:
print("Error! no s'ha trobat el fitxer de comptes")
#llençar una excepcio
return resultat
def llistaComptes(self):
"""Returns a list of all the accounts"""
list = []
text = self.llegeixComptes()
listbylines = text.split("\n")
for linia in listbylines:
list.append(linia.split(","))
if len(list) > 0:
list.pop()
return list
def llistaIBAN(self):
"""Returns a list of all the IBAN codes of the bank accounts"""
list = []
accounts = self.llistaComptes()
for a in accounts:
list.append(a[0])
return list
def afegeixCompta(self, balance, currency, *owners):
"""Adds a new bank account, all fields are mandatory except for iban code"""
iban = self.getRandomIban() #creates a random iban code
added = False
ibanExists = False
if self.comprovaComptes():
ibanList = self.llistaIBAN()
for i in ibanList:
if i == iban:
ibanExists = True
if not ibanExists:
if iban and balance and currency and (len(owners) > 0) and ("" not in owners) and (None not in owners):
ownerslist = ""
for o in owners:
ownerslist += o + ","
ownerslist = ownerslist[:len(ownerslist)-1]
with open(self.rutaComptes, 'a') as f:
f.write(iban+","+balance+","+currency+","+"".join(ownerslist)+"\n")
added = True
else:
print("Error, a bank account can't have any empty fields")
else:
print("Error: bank account could not be added: IBAN code already exists")
else:
print("Error: could not find bank accounts' file")
return added
def modificaCompta(self, ibanInput, newAmount):
accountList = []
ibanExists = False
filePath = '../database/comptes.txt'
with open(filePath, 'r') as accounts:
for line in accounts:
cuenta = line.split(",")
if(cuenta[0] == ibanInput):
cuenta[1] = str(float(cuenta[1]) + newAmount)
changedLine = ','.join(cuenta)
accountList.append(changedLine)
else:
accountList.append(line)
with open(filePath, 'w') as accounts:
for item in accountList:
if(item != ''):
accounts.write(item)
def esborraCompta(self, iban):
"""Removes an account with the selected IBAN code"""
if self.comprovaComptes():
file = open(self.rutaComptes, 'r')
llista = file.readlines()
file.close()
trobat = False
with open(self.rutaComptes, 'w') as file:
for linia in llista:
if linia.split(",")[0] != iban:
file.write(linia)
else:
trobat = True
if not trobat:
print("Couldn't find account "+iban)
else:
print("Error! Accounts file doesn't exist")
def getRandomIban(self):
""" Returns a random IBAN code that doesn't exist on the database"""
iban = "ES"
for n in range(22):
iban += str(random.randrange(10))
if iban in self.llistaIBAN():
iban = self.getRandomIban()
return iban
def getAccount(self,iban):
"""Returns a list cointaining all the account's data of the selected iban
If the iban code is not on the database, returns an empty list """
account = []
accountsList = self.llistaComptes()
for a in accountsList:
if a[0]==iban:
account = a
break
return account
def addOwnerAccount(self,iban,owner):
"""Esborra un usuari al fitxer d'usuaris sense comprovar que existeixi"""
if self.comprovaComptes():
file = open(self.rutaComptes, 'r')
llista = file.readlines()
file.close()
trobat = False
with open(self.rutaComptes, 'w') as file:
for linia in llista:
if linia.split(",")[0] != iban:
file.write(linia)
else:
file.write(linia[:len(linia)-1]+","+owner+"\n")
trobat = True
print("Owner added!")
if not trobat:
print("Couldn't find the account!")
#error
else:
print("Error! File not found")
| {
"repo_name": "aramusss/contableplus",
"path": "controller/dataBase.py",
"copies": "1",
"size": "8241",
"license": "apache-2.0",
"hash": 4140672061878445600,
"line_mean": 34.9781659389,
"line_max": 119,
"alpha_frac": 0.5162034227,
"autogenerated": false,
"ratio": 3.9084440227703983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9910398924086428,
"avg_score": 0.0028497042767940345,
"num_lines": 229
} |
# import the necessary packages
import numpy as np
class Searcher:
def __init__(self, index):
# store our index of images
self.index = index
def search(self, queryFeatures):
# initialize our dictionary of results
results = {}
# loop over the index
for (k, features) in self.index.items():
# compute the chi-squared distance between the features
# in our index and our query features -- using the
# chi-squared distance which is normally used in the
# computer vision field to compare histograms
d = self.chi2_distance(features, queryFeatures)
# now that we have the distance between the two feature
# vectors, we can udpate the results dictionary -- the
# key is the current image ID in the index and the
# value is the distance we just computed, representing
# how 'similar' the image in the index is to our query
results[k] = d
# sort our results, so that the smaller distances (i.e. the
# more relevant images are at the front of the list)
results = sorted([(v, k) for (k, v) in results.items()])
# return our results
return results
def chi2_distance(self, histA, histB, eps = 1e-10):
# compute the chi-squared distance
d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)
for (a, b) in zip(histA, histB)])
# return the chi-squared distance
return d | {
"repo_name": "fffy2366/image-processing",
"path": "bin/python/pyimagesearch/searcher.py",
"copies": "3",
"size": "1417",
"license": "mit",
"hash": 7234219419886892000,
"line_mean": 30.5111111111,
"line_max": 61,
"alpha_frac": 0.691601976,
"autogenerated": false,
"ratio": 3.430992736077482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025317295028322032,
"num_lines": 45
} |
# USAGE
# python index.py --dataset images --index index.cpickle
# import the necessary packages
from pyimagesearch.rgbhistogram import RGBHistogram
import argparse
import cPickle
import glob
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required = True,
help = "Path to the directory that contains the images to be indexed")
ap.add_argument("-i", "--index", required = True,
help = "Path to where the computed index will be stored")
args = vars(ap.parse_args())
# initialize the index dictionary to store our our quantifed
# images, with the 'key' of the dictionary being the image
# filename and the 'value' our computed features
index = {}
# initialize our image descriptor -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])
# use glob to grab the image paths and loop over them
for imagePath in glob.glob(args["dataset"] + "/*.jpg"):
# extract our unique image ID (i.e. the filename)
k = imagePath[imagePath.rfind("/") + 1:]
# load the image, describe it using our RGB histogram
# descriptor, and update the index
image = cv2.imread(imagePath)
features = desc.describe(image)
index[k] = features
# we are now done indexing our image -- now we can write our
# index to disk
f = open(args["index"], "w")
f.write(cPickle.dumps(index))
f.close()
# show how many images we indexed
print "done...indexed %d images" % (len(index)) | {
"repo_name": "fffy2366/image-processing",
"path": "bin/python/search_index.py",
"copies": "1",
"size": "1551",
"license": "mit",
"hash": -222828673452366000,
"line_mean": 30.04,
"line_max": 71,
"alpha_frac": 0.7272727273,
"autogenerated": false,
"ratio": 3.525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710007986772415,
"avg_score": 0.008452948105516958,
"num_lines": 50
} |
# USAGE
# python search_external.py --dataset images --index index.cpickle --query queries/rivendell-query.png
# import the necessary packages
from pyimagesearch.rgbhistogram import RGBHistogram
from pyimagesearch.searcher import Searcher
import numpy as np
import argparse
import cPickle
import cv2
import time
from pyimagesearch import logger
conf = logger.Logger()
# conf.debug('debug')
# conf.warn('tr-warn')
# conf.info('ds-info')
# conf.error('ss-error')
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory that contains the images we just indexed")
ap.add_argument("-i", "--index", required=True,
help="Path to where we stored our index")
ap.add_argument("-q", "--query", required=True,
help="Path to query image")
args = vars(ap.parse_args())
print 'start waiting:', time.strftime('%H:%M:%S')
# load the query image and show it
queryImage = cv2.imread(args["query"])
# cv2.imshow("Query", queryImage)
print "query: %s" % (args["query"])
# describe the query in the same way that we did in
# index.py -- a 3D RGB histogram with 8 bins per
# channel
desc = RGBHistogram([8, 8, 8])
queryFeatures = desc.describe(queryImage)
# load the index perform the search
index = cPickle.loads(open(args["index"]).read())
searcher = Searcher(index)
results = searcher.search(queryFeatures)
# initialize the two montages to display our results --
# we have a total of 25 images in the index, but let's only
# display the top 10 results; 5 images per montage, with
# images that are 400x166 pixels
w = 1024
h = 768
montageA = np.zeros((h * 5, w, 3), dtype="uint8")
montageB = np.zeros((h * 5, w, 3), dtype="uint8")
# loop over the top ten results
for j in xrange(0, 50):
# grab the result (we are using row-major order) and
# load the result image
(score, imageName) = results[j]
if(score>0.01):
break
if(imageName==args["query"]):
continue
path = args["dataset"] + "/%s" % (imageName)
result = cv2.imread(path)
(h, w) = result.shape[:2]
print "\t%d. %s : %.3f" % (j + 1, imageName, score) # check to see if the first montage should be used
# cv2.imshow("img", result)
# cv2.waitKey(0)
# if j < 5:
# montageA[j * h:(j + 1) * h, :w] = result
#
# # otherwise, the second montage should be used
# else:
# montageB[(j - 5) * h:((j - 5) + 1) * h, :] = result
print 'stop waiting', time.strftime('%H:%M:%S')
# cv2.imshow("Results 6-10", result)
# cv2.waitKey(0) # show the results
# cv2.imshow("Results 1-5", montageA)
# # cv2.imshow("Results 6-10", montageB)
# cv2.waitKey(0)
| {
"repo_name": "fffy2366/image-processing",
"path": "bin/python/search_external.py",
"copies": "1",
"size": "2821",
"license": "mit",
"hash": -7159670178026951000,
"line_mean": 30.3444444444,
"line_max": 107,
"alpha_frac": 0.6621765331,
"autogenerated": false,
"ratio": 3.1484375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43106140331,
"avg_score": null,
"num_lines": null
} |
# USAGE
# python search_index_one.py --dataset ../../public/uploads/similar --index ../../public/uploads/similar.cpickle --file 1464318452058AFC4E73.jpg
# import the necessary packages
from pyimagesearch.rgbhistogram import RGBHistogram
import argparse
import cPickle
import glob
import cv2
import os
import sys
import datetime
from models.similar_images import SimilarImages
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--file", required=True,
help="The file to be indexed")
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory that contains the images to be indexed")
ap.add_argument("-i", "--index", required=True,
help="Path to where the computed index will be stored")
args = vars(ap.parse_args())
# initialize the index dictionary to store our our quantifed
# images, with the 'key' of the dictionary being the image
# filename and the 'value' our computed features
# if(os.path.isfile(args["index"])):
#
# index = cPickle.loads(open(args["index"]).read())
# if(index.has_key(args["file"])):
# print "has exist"
# sys.exit(0)
s = SimilarImages()
i = s.findByName(args["file"])
if(i!=None):
print "has exist"
sys.exit(0)
index = {}
# initialize our image descriptor -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])
# load the image, describe it using our RGB histogram
# descriptor, and update the index
image = cv2.imread(args["dataset"] +"/"+ args["file"])
features = desc.describe(image)
index[args["file"]] = features
# we are now done indexing our image -- now we can write our
# index to disk
s.insert({'name':args["file"],'features':cPickle.dumps(features),'created_at':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
# f = open(args["index"], "a")
# f.write(cPickle.dumps(index))
# f.close()
# show how many images we indexed
#index = cPickle.loads(open(args["index"]).read())
print "done...add indexed %d images" % (len(index))
| {
"repo_name": "fffy2366/image-processing",
"path": "bin/python/search_index_one.py",
"copies": "1",
"size": "2159",
"license": "mit",
"hash": -2060498738231572700,
"line_mean": 28.1756756757,
"line_max": 144,
"alpha_frac": 0.6873552571,
"autogenerated": false,
"ratio": 3.4324324324324325,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.46197876895324325,
"avg_score": null,
"num_lines": null
} |
# USAGE
# python search.py --dataset images --index index.cpickle
# import the necessary packages
from pyimagesearch.searcher import Searcher
import numpy as np
import argparse
import cPickle
import cv2
import time
from pyimagesearch import logger
conf = logger.Logger()
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory that contains the images we just indexed")
ap.add_argument("-i", "--index", required=True,
help="Path to where we stored our index")
args = vars(ap.parse_args())
print 'start waiting:', time.strftime('%H:%M:%S')
# load the index and initialize our searcher
index = cPickle.loads(open(args["index"]).read())
searcher = Searcher(index)
# loop over images in the index -- we will use each one as
# a query image
for (query, queryFeatures) in index.items():
# perform the search using the current query
results = searcher.search(queryFeatures)
# load the query image and display it
path = args["dataset"] + "/%s" % (query)
queryImage = cv2.imread(path)
# cv2.imshow("Query", queryImage)
print "query: %s" % (query)
# # initialize the two montages to display our results --
# # we have a total of 25 images in the index, but let's only
# # display the top 10 results; 5 images per montage, with
# # images that are 400x166 pixels
# montageA = np.zeros((166 * 5, 400, 3), dtype = "uint8")
# montageB = np.zeros((166 * 5, 400, 3), dtype = "uint8")
# loop over the top ten results
for j in xrange(0, 10):
# grab the result (we are using row-major order) and
# load the result image
(score, imageName) = results[j]
if (score > 0.01):
break
if(imageName==query):
continue
path = args["dataset"] + "/%s" % (imageName)
result = cv2.imread(path)
str = "\t%d. %s : %.3f" % (j + 1, imageName, score)
print str
conf.info(query)
conf.info(str)
# # check to see if the first montage should be used
# if j < 5:
# montageA[j * 166:(j + 1) * 166, :] = result
#
# # otherwise, the second montage should be used
# else:
# montageB[(j - 5) * 166:((j - 5) + 1) * 166, :] = result
# show the results
# cv2.imshow("Results 1-5", montageA)
# cv2.imshow("Results 6-10", montageB)
# cv2.waitKey(0)
print 'stop waiting query', time.strftime('%H:%M:%S')
print 'stop waiting', time.strftime('%H:%M:%S')
| {
"repo_name": "fffy2366/image-processing",
"path": "bin/python/search.py",
"copies": "1",
"size": "2710",
"license": "mit",
"hash": 2786086098621288000,
"line_mean": 31.2619047619,
"line_max": 86,
"alpha_frac": 0.6180811808,
"autogenerated": false,
"ratio": 3.465473145780051,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45835543265800505,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
from __future__ import print_function
import imutils
import cv2
# print the current OpenCV version on your system
print("Your OpenCV version: {}".format(cv2.__version__))
# check to see if you are using OpenCV 2.X
print("Are you using OpenCV 2.X? {}".format(imutils.is_cv2()))
# check to see if you are using OpenCV 3.X
print("Are you using OpenCV 3.X? {}".format(imutils.is_cv3(or_better=False)))
# check to see if you are using OpenCV 4.X
print("Are you using OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))
# check to see if you are using *at least* OpenCV 2.X
print("Are you using at least OpenCV 2.X? {}".format(imutils.is_cv2(or_better=True)))
# check to see if you are using *at least* OpenCV 3.X
print("Are you using at least OpenCV 3.X? {}".format(imutils.is_cv3(or_better=True)))
# check to see if you are using *at least* OpenCV 4.X
print("Are you using at least OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))
# should throw a deprecation warning
print("Checking for OpenCV 3: {}".format(imutils.check_opencv_version("3"))) | {
"repo_name": "jrosebr1/imutils",
"path": "demos/opencv_versions.py",
"copies": "1",
"size": "1166",
"license": "mit",
"hash": 8573881915434662000,
"line_mean": 36.6451612903,
"line_max": 86,
"alpha_frac": 0.7152658662,
"autogenerated": false,
"ratio": 2.9974293059125965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42126951721125966,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
from scipy.spatial import distance as dist
import numpy as np
import cv2
def order_points(pts):
# sort the points based on their x-coordinates
xSorted = pts[np.argsort(pts[:, 0]), :]
# grab the left-most and right-most points from the sorted
# x-roodinate points
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
# now, sort the left-most coordinates according to their
# y-coordinates so we can grab the top-left and bottom-left
# points, respectively
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
# now that we have the top-left coordinate, use it as an
# anchor to calculate the Euclidean distance between the
# top-left and right-most points; by the Pythagorean
# theorem, the point with the largest distance will be
# our bottom-right point
D = dist.cdist(tl[np.newaxis], rightMost, "euclidean")[0]
(br, tr) = rightMost[np.argsort(D)[::-1], :]
# return the coordinates in top-left, top-right,
# bottom-right, and bottom-left order
return np.array([tl, tr, br, bl], dtype="float32")
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped | {
"repo_name": "jrosebr1/imutils",
"path": "imutils/perspective.py",
"copies": "2",
"size": "2785",
"license": "mit",
"hash": -8267604286048441000,
"line_mean": 37.6944444444,
"line_max": 70,
"alpha_frac": 0.6427289048,
"autogenerated": false,
"ratio": 3.355421686746988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49981505915469876,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
import cv2
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return cnts, boundingBoxes
def label_contour(image, c, i, color=(0, 255, 0), thickness=2):
# compute the center of the contour area and draw a circle
# representing the center
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and label number on the image
cv2.drawContours(image, [c], -1, color, thickness)
cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 255, 255), 2)
# return the image with the contour number drawn on it
return image
| {
"repo_name": "zhanggyb/imutils",
"path": "imutils/contours.py",
"copies": "3",
"size": "1505",
"license": "mit",
"hash": -444586085024175300,
"line_mean": 32.4444444444,
"line_max": 84,
"alpha_frac": 0.6279069767,
"autogenerated": false,
"ratio": 3.4049773755656108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5532884352265611,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
import numpy as np
import cv2
import sys
# import any special Python 2.7 packages
if sys.version_info.major == 2:
from urllib import urlopen
# import any special Python 3 packages
elif sys.version_info.major == 3:
from urllib.request import urlopen
def translate(image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w / 2, h / 2)
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
def opencv2matplotlib(image):
# OpenCV represents images in BGR order; however, Matplotlib
# expects the image in RGB order, so simply convert from BGR
# to RGB and return
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
# return the image
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def is_cv2():
# if we are using OpenCV 2, then our cv2.__version__ will start
# with '2.'
return check_opencv_version("2.")
def is_cv3():
# if we are using OpenCV 3.X, then our cv2.__version__ will start
# with '3.'
return check_opencv_version("3.")
def check_opencv_version(major, lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major) | {
"repo_name": "xuanhan863/imutils",
"path": "imutils/convenience.py",
"copies": "1",
"size": "4565",
"license": "mit",
"hash": -279873061204399400,
"line_mean": 30.0612244898,
"line_max": 72,
"alpha_frac": 0.6556407448,
"autogenerated": false,
"ratio": 3.637450199203187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4793090944003187,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
import numpy as np
import cv2
def order_points(pts):
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
| {
"repo_name": "zhanggyb/imutils",
"path": "imutils/perspective.py",
"copies": "3",
"size": "2554",
"license": "mit",
"hash": -99463562443379180,
"line_mean": 37.1194029851,
"line_max": 70,
"alpha_frac": 0.6405638215,
"autogenerated": false,
"ratio": 3.338562091503268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5479125913003268,
"avg_score": null,
"num_lines": null
} |
# import the necessary packages
import numpy as np
import urllib
import cv2
def translate(image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w / 2, h / 2)
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
def opencv2matplotlib(image):
# OpenCV represents images in BGR order; however, Matplotlib
# expects the image in RGB order, so simply convert from BGR
# to RGB and return
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
# return the image
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def is_cv2():
# if we are using OpenCV 2, then our cv2.__version__ will start
# with '2.'
return check_opencv_version("2.")
def is_cv3():
# if we are using OpenCV 3.X, then our cv2.__version__ will start
# with '3.'
return check_opencv_version("3.")
def check_opencv_version(major, lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major) | {
"repo_name": "PanTomaszRoszczynialski/imutils",
"path": "imutils/convenience.py",
"copies": "2",
"size": "4357",
"license": "mit",
"hash": 7126762098372383000,
"line_mean": 30.3525179856,
"line_max": 72,
"alpha_frac": 0.65182465,
"autogenerated": false,
"ratio": 3.6278101582014988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5279634808201499,
"avg_score": null,
"num_lines": null
} |
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python fps_demo.py
# python fps_demo.py --display 1
# import the necessary packages
from __future__ import print_function
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=100,
help="# of frames to loop over for FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
# grab a pointer to the video stream and initialize the FPS counter
print("[INFO] sampling frames from webcam...")
stream = cv2.VideoCapture(0)
fps = FPS().start()
# loop over some frames
while fps._numFrames < args["num_frames"]:
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
(grabbed, frame) = stream.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
stream.release()
cv2.destroyAllWindows()
# created a *threaded *video stream, allow the camera senor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs = VideoStream(src=0).start()
fps = FPS().start()
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | {
"repo_name": "jrosebr1/imutils",
"path": "demos/fps_demo.py",
"copies": "1",
"size": "2426",
"license": "mit",
"hash": -2168471019115952600,
"line_mean": 28.5975609756,
"line_max": 70,
"alpha_frac": 0.7135201979,
"autogenerated": false,
"ratio": 3.2005277044854883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44140479023854884,
"avg_score": null,
"num_lines": null
} |
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python image_basics.py
# import the necessary packages
import matplotlib.pyplot as plt
import imutils
import cv2
# load the example images
bridge = cv2.imread("../demo_images/bridge.jpg")
cactus = cv2.imread("../demo_images/cactus.jpg")
logo = cv2.imread("../demo_images/pyimagesearch_logo.jpg")
workspace = cv2.imread("../demo_images/workspace.jpg")
# 1. TRANSLATION
# show the original image
cv2.imshow("Original", workspace)
# translate the image x-50 pixels to the left and y=100 pixels down
translated = imutils.translate(workspace, -50, 100)
cv2.imshow("Translated", translated)
cv2.waitKey(0)
# translate the image x=25 pixels to the right and y=75 pixels up
translated = imutils.translate(workspace, 25, -75)
cv2.imshow("Translated", translated)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 2. ROTATION
# loop over the angles to rotate the image
for angle in range(0, 360, 90):
# rotate the image and display it
rotated = imutils.rotate(bridge, angle=angle)
cv2.imshow("Angle=%d" % (angle), rotated)
# wait for a keypress, then close all the windows
cv2.waitKey(0)
cv2.destroyAllWindows()
# 3. RESIZING
# loop over varying widths to resize the image to
for width in (400, 300, 200, 100):
# resize the image and display it
resized = imutils.resize(workspace, width=width)
cv2.imshow("Width=%dpx" % (width), resized)
# wait for a keypress, then close all the windows
cv2.waitKey(0)
cv2.destroyAllWindows()
# 4. SKELETONIZATION
# skeletonize the image using a 3x3 kernel
cv2.imshow("Original", logo)
gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
skeleton = imutils.skeletonize(gray, size=(3, 3))
cv2.imshow("Skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 5. MATPLOTLIB
# INCORRECT: show the image without converting color spaces
plt.figure("Incorrect")
plt.imshow(cactus)
# CORRECT: convert color spaces before using plt.imshow
plt.figure("Correct")
plt.imshow(imutils.opencv2matplotlib(cactus))
plt.show()
# 6. URL TO IMAGE
# load an image from a URL, convert it to OpenCV, format, and
# display it
url = "http://pyimagesearch.com/static/pyimagesearch_logo_github.png"
logo = imutils.url_to_image(url)
cv2.imshow("URL to Image", logo)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 7. AUTO CANNY
# convert the logo to grayscale and automatically detect edges
gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
edgeMap = imutils.auto_canny(gray)
cv2.imshow("Original", logo)
cv2.imshow("Automatic Edge Map", edgeMap)
cv2.waitKey(0)
| {
"repo_name": "jrosebr1/imutils",
"path": "demos/image_basics.py",
"copies": "2",
"size": "2621",
"license": "mit",
"hash": 3774586243874028000,
"line_mean": 28.1222222222,
"line_max": 69,
"alpha_frac": 0.7428462419,
"autogenerated": false,
"ratio": 2.9549041713641486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9697750413264149,
"avg_score": 0,
"num_lines": 90
} |
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python picamera_fps_demo.py
# python picamera_fps_demo.py --display 1
# import the necessary packages
from __future__ import print_function
from imutils.video import VideoStream
from imutils.video import FPS
from picamera.array import PiRGBArray
from picamera import PiCamera
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=100,
help="# of frames to loop over for FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
# initialize the camera and stream
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(320, 240))
stream = camera.capture_continuous(rawCapture, format="bgr",
use_video_port=True)
# allow the camera to warmup and start the FPS counter
print("[INFO] sampling frames from `picamera` module...")
time.sleep(2.0)
fps = FPS().start()
# loop over some frames
for (i, f) in enumerate(stream):
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
frame = f.array
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame and update
# the FPS counter
rawCapture.truncate(0)
fps.update()
# check to see if the desired number of frames have been reached
if i == args["num_frames"]:
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
stream.close()
rawCapture.close()
camera.close()
# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
fps = FPS().start()
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | {
"repo_name": "jrosebr1/imutils",
"path": "demos/picamera_fps_demo.py",
"copies": "1",
"size": "2980",
"license": "mit",
"hash": -895183849479864700,
"line_mean": 28.2254901961,
"line_max": 71,
"alpha_frac": 0.7231543624,
"autogenerated": false,
"ratio": 3.225108225108225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386754239307127,
"avg_score": 0.012301669640219646,
"num_lines": 102
} |
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python sorting_contours.py
# import the necessary packages
from imutils import contours
import imutils
import cv2
# load the shapes image clone it, convert it to grayscale, and
# detect edges in the image
image = cv2.imread("../demo_images/shapes.png")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = imutils.auto_canny(gray)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the (unsorted) contours and label them
for (i, c) in enumerate(cnts):
orig = contours.label_contour(orig, c, i, color=(240, 0, 159))
# show the original image
cv2.imshow("Original", orig)
# loop over the sorting methods
for method in ("left-to-right", "right-to-left", "top-to-bottom", "bottom-to-top"):
# sort the contours
(cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)
clone = image.copy()
# loop over the sorted contours and label them
for (i, c) in enumerate(cnts):
sortedImage = contours.label_contour(clone, c, i, color=(240, 0, 159))
# show the sorted contour image
cv2.imshow(method, sortedImage)
# wait for a keypress
cv2.waitKey(0)
| {
"repo_name": "jrosebr1/imutils",
"path": "demos/sorting_contours.py",
"copies": "1",
"size": "1343",
"license": "mit",
"hash": -7837198840968445000,
"line_mean": 28.1956521739,
"line_max": 83,
"alpha_frac": 0.7297096054,
"autogenerated": false,
"ratio": 2.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40630429387333333,
"avg_score": null,
"num_lines": null
} |
__author__ = "Adrian Soghoian & Omar Ahmad"
import subprocess
import reference
import models
"""
This subsystem contains functionality to scan the local network for connected
devices, their OS fingerprints, and any open ports that they may have.
"""
def scan_network(range, gateway="Unknown"):
"""
This method scans a given IP range and collects information on all of the hosts currently
connected, along with their OS.
"""
devices = []
scan = subprocess.Popen(["nmap", "-PR", str(range)], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[
0]
scanlist = scan.split()
if scanlist.count("up") > 0:
indices = [i + 2 for i, x in enumerate(scanlist) if x == "report"]
ip_list = [scanlist[i] for i in indices]
for ip in ip_list:
devices.append(scan_device(ip, gateway))
return devices
def extract_manufacturer(scanlist):
"""
Pulls out the manufacturer name from the scan output.
"""
if "Address:" in "".join(scanlist):
index = scanlist.index("Address:") + 1
substring = " ".join(scanlist[index + 1:index + 5])
manufacturer = substring[substring.find("(") + 1:substring.find(")")]
return manufacturer.strip()
else:
return "Unknown"
def extract_mac_address(scanlist):
"""
Extracts MAC address from the scan output.
"""
if "Address:" in "".join(scanlist):
mac_address_index = scanlist.index("Address:") + 1
mac_address = scanlist[mac_address_index]
return mac_address.strip()
else:
return "Unknown"
def extract_ports(scan):
"""
Extracts port information from the scan output.
"""
if " are closed" in scan:
return []
else:
ports = []
scanlist = scan.split()
index_start = scanlist.index("SERVICE") + 1
try:
index_end = scanlist.index("MAC")
except ValueError:
index_end = scanlist[index_start:].index("Nmap")
i = index_start
while i < index_end:
ports.append(models.Port(scanlist[i].rpartition("/")[0], scanlist[i + 1], scanlist[i + 2]))
i += 3
len(ports)
return ports
def extract_ip(scan):
"""
Grabs IP address from the Nmap scan output.
"""
scanlist = scan.split()
ip = scanlist[scanlist.index("report") + 2]
return ip
def scan_device(ip, gateway):
"""
Generates an OS fingerprint for a given host.
"""
scan = subprocess.Popen(["nmap", "-sS", str(ip)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ).communicate()[0]
scanlist = scan.split()
if "Host is up" not in scan:
return models.Host(is_down=True)
mac_address = extract_mac_address(scanlist)
manufacturer = extract_manufacturer(scanlist)
ports = extract_ports(scan)
ip = extract_ip(scan)
try:
os_index = scanlist.index("Running:") + 1
os_type = scanlist[os_index]
except ValueError:
os_type = "Unknown"
for each in reference.OS_TYPES:
if each in scan:
os_type = reference.OS_TYPES[each]
if ip == gateway:
return models.Router(os=os_type, ip=ip, manufacturer=manufacturer, mac_address=mac_address, open_ports=ports)
return models.Host(os=os_type, ip=ip, manufacturer=manufacturer, mac_address=mac_address, open_ports=ports)
| {
"repo_name": "adriansoghoian/security-at-home",
"path": "scanner.py",
"copies": "1",
"size": "3395",
"license": "mit",
"hash": -7437007821162580000,
"line_mean": 29.0442477876,
"line_max": 120,
"alpha_frac": 0.6159057437,
"autogenerated": false,
"ratio": 3.7144420131291027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48303477568291026,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adrian Strilchuk'
from datetime import datetime, date
import json
def jsonify(obj):
return json.dumps(obj, ensure_ascii=False, separators=(u",", u":"))
# http://stackoverflow.com/questions/14163399/convert-list-of-datestrings-to-datetime-very-slow-with-python-strptime
def parse_datetime(dt_str):
return datetime(*map(int, [dt_str[0:4], dt_str[5:7], dt_str[8:10], dt_str[11:13], dt_str[14:16], dt_str[17:19]]))
def parse_date(d_str):
return date(*map(int, d_str.split(u"-")))
def unique(iterable, key_func=None):
if key_func is None:
key_func = lambda x: x
seen = set()
for item in iterable:
key = key_func(item)
if key not in seen:
seen.add(key)
yield item
def batchx(iterable, batch_size):
for index in xrange(0, len(iterable), batch_size):
yield iterable[index:index + batch_size]
def batched(iterable, batch_size):
batch = []
for item in iterable:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) != 0:
yield batch
def result_iterator(cursor, fetch_size=250):
results = cursor.fetchmany(fetch_size)
while results:
for result in results:
yield result
results = cursor.fetchmany(fetch_size)
| {
"repo_name": "astrilchuk/sd2xmltv",
"path": "libschedulesdirect/__init__.py",
"copies": "1",
"size": "1338",
"license": "mit",
"hash": -5098791886143433000,
"line_mean": 24.7307692308,
"line_max": 117,
"alpha_frac": 0.620328849,
"autogenerated": false,
"ratio": 3.3118811881188117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4432210037118811,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adrie_000'
# -*- coding: utf8 -*-
import numpy as np
class StrategicMind():
def __init__(self, data_center):
self.data_center = data_center
def set_objective(self):
best_obj = None
best_score = 0
for objective in self.data_center.objectives:
score = self.compute_score(objective)
if best_obj is None:
best_obj = objective
best_score = score
else:
if best_score < score:
best_score = score
best_obj = objective
return best_obj
def compute_score(self, objective):
distance = np.linalg.norm(np.array(objective.position) - np.array(self.data_center.position))
return objective.max_score - distance
def update_objective(self):
for objective in self.data_center.objectives:
if not objective.completed:
if objective.is_startable_from_here():
return objective
return None
| {
"repo_name": "adrien-bellaiche/ia-cdf-rob-2015",
"path": "Strategy.py",
"copies": "1",
"size": "1044",
"license": "apache-2.0",
"hash": 9211860640672674000,
"line_mean": 29.7058823529,
"line_max": 101,
"alpha_frac": 0.5632183908,
"autogenerated": false,
"ratio": 4.261224489795918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002883506343713956,
"num_lines": 34
} |
__author__ = 'adrie_000'
import numpy as np
class Pathfinder():
def __init__(self, data_center):
self.data_center = data_center
def get_orders(self, objective):
# Renvoie les ordres en [direction, vitesse, vitesse_rotation]
objective_location = objective.position
v = np.array([0, 0])
if objective_location is not None:
obstacles = self.data_center.obstacles
# On cible l'approche par l'angle en question
targ = np.array(objective_location) + np.array(
[np.cos(objective.side * np.pi / 180), np.sin(objective.side * np.pi / 180)]) * self.data_center.radius
loc = np.array(self.data_center.position)
v = np.array([0, 0])
v += (targ - loc) / np.linalg.norm(targ - loc)
for obstacle in obstacles:
v2obstacle = np.array(obstacle.center) - loc
d2obstacle = np.linalg.norm(v2obstacle)
v2obstacle /= np.linalg.norm(v2obstacle)
v -= v2obstacle / (d2obstacle * d2obstacle)
va = 0
if objective.atelier is not None:
target_ori = objective.atelier * 2 * np.pi / 3 - objective.side
va = 3 * divmod(self.data_center.orientation - target_ori, 2 * np.pi)
return [v[0], v[1], va] | {
"repo_name": "adrien-bellaiche/ia-cdf-rob-2015",
"path": "Pathfinding.py",
"copies": "1",
"size": "1326",
"license": "apache-2.0",
"hash": 8245232157281335000,
"line_mean": 41.8064516129,
"line_max": 119,
"alpha_frac": 0.5739064857,
"autogenerated": false,
"ratio": 3.3400503778337534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44139568635337534,
"avg_score": null,
"num_lines": null
} |
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import sys
import pickle
# Third-party
from astropy import log as logger
import astropy.coordinates as coord
import astropy.units as u
import emcee
import matplotlib.pyplot as pl
import numpy as np
import scipy.optimize as so
import h5py
import gary.dynamics as gd
from gary.util import get_pool
from gary.units import galactic
from gary.dynamics.orbitfit import rotate_sph_coordinate
# Project
from streambfe import potentials, FRAME
from streambfe.data import observe_data
from streambfe.plot import plot_data, plot_orbit
from streambfe import orbitfit
def main(true_potential_name, fit_potential_name, index, pool,
frac_distance_err=1, n_stars=32,
n_walkers=None, n_burn=0, n_iterations=1024,
overwrite=False, dont_optimize=False, name=None):
true_potential = potentials[true_potential_name]
_path,_ = os.path.split(os.path.abspath(__file__))
top_path = os.path.abspath(os.path.join(_path, ".."))
simulation_path = os.path.join(top_path, "output", "simulations", true_potential_name)
output_path = os.path.join(top_path, "output", "orbitfit",
true_potential_name, fit_potential_name,
"d_{:.1f}percent".format(frac_distance_err))
plot_path = os.path.join(output_path, "plots")
sampler_file = os.path.join(output_path, "{}-emcee-{}.h5".format(name, index))
model_file = os.path.join(output_path, "{}-model-{}.pickle".format(name, index))
if os.path.exists(sampler_file) and not overwrite:
logger.info("Orbit index {} already complete.".format(index))
return
if not os.path.exists(output_path):
os.makedirs(output_path)
if not os.path.exists(plot_path):
os.mkdir(plot_path)
logger.info("Potential: {}".format(fit_potential_name))
if fit_potential_name == 'plummer':
Model = orbitfit.PlummerOrbitfitModel
kw = dict()
freeze = dict()
freeze['potential_b'] = 10.
potential_guess = [6E11]
mcmc_potential_std = [1E8]
potential_truth = [true_potential.parameters['m'].value]
elif fit_potential_name == 'scf':
Model = orbitfit.SCFOrbitfitModel
kw = dict(nmax=8)
freeze = dict()
freeze['potential_r_s'] = 10.
potential_guess = [6E11] + [1.3,0,0,0,0,0,0,0,0] # HACK: try this first
mcmc_potential_std = [1E8] + [1E-3]*9
potential_truth = [true_potential.parameters['m'].value] + true_potential.parameters['Snlm'].ravel().tolist()
elif fit_potential_name == 'triaxialnfw':
Model = orbitfit.TriaxialNFWOrbitfitModel
kw = dict()
freeze = dict()
freeze['potential_r_s'] = 20.
freeze['potential_a'] = 1.
potential_guess = [(200*u.km/u.s).decompose(galactic).value, 0.8, 0.6]
mcmc_potential_std = [1E-5, 1E-3, 1E-3]
potential_truth = [true_potential.parameters['v_c'].value,
true_potential.parameters['b'].value,
true_potential.parameters['c'].value]
else:
raise ValueError("Invalid potential name '{}'".format(fit_potential_name))
with h5py.File(os.path.join(simulation_path, "mock_stream_data.h5"), "r") as f:
g = f[str(index)]
pos = g['pos'][:] * u.Unit(f[str(index)]['pos'].attrs['unit'])
vel = g['vel'][:] * u.Unit(f[str(index)]['vel'].attrs['unit'])
R = g['R'][:]
dt = g.attrs['dt']
n_steps = g.attrs['n_steps']
stream = gd.CartesianPhaseSpacePosition(pos=pos, vel=vel)
idx = np.concatenate(([0], np.random.permutation(pos.shape[1])[:n_stars-1]))
true_stream_c,true_stream_v = stream[idx].to_frame(coord.Galactic, **FRAME)
true_stream_rot = rotate_sph_coordinate(true_stream_c, R)
# intrinsic widths - all are smaller than errors except sky pos, distance
rtide = 0.5*u.kpc
phi2_sigma = (rtide / true_stream_rot.distance.mean()).decompose().value
d_sigma = rtide.to(u.kpc).value
stream_rot = coord.SphericalRepresentation(lon=true_stream_rot.lon,
lat=np.random.normal(true_stream_rot.lat.radian,
phi2_sigma)*u.radian,
distance=np.random.normal(true_stream_rot.distance.
value, d_sigma)*u.kpc)
# set all proper motions to zero because they shouldn't matter
stream_v = [true_stream_v[0]*0., true_stream_v[1]*0., true_stream_v[2]]
data,err = observe_data(stream_rot, stream_v,
frac_distance_err=frac_distance_err,
vr_err=10*u.km/u.s)
# freeze all intrinsic widths
freeze['phi2_sigma'] = phi2_sigma
freeze['d_sigma'] = d_sigma
freeze['vr_sigma'] = 5E-4
freeze['mu_sigma'] = 1000.
model = Model(data=data, err=err, R=R, dt=dt, n_steps=int(1.5*n_steps),
freeze=freeze, **kw)
# save the truth
model.true_p = ([stream_rot[0].lat.radian, stream_rot[0].distance.value] +
[v[0].decompose(galactic).value for v in stream_v] +
potential_truth)
# pickle the model
with open(model_file, 'wb') as f:
pickle.dump(model, f)
# starting position for optimization
p_guess = ([true_stream_rot[0].lat.radian, true_stream_rot[0].distance.value] +
[v[0].decompose(galactic).value for v in true_stream_v] +
potential_guess)
logger.debug("ln_posterior at initialization: {}".format(model(p_guess)))
if n_walkers is None:
n_walkers = 8*len(p_guess)
if not dont_optimize:
logger.debug("optimizing ln_posterior first...")
res = so.minimize(lambda p: -model(p), x0=p_guess, method='powell')
p_best = res.x
logger.debug("...done. optimization returned: {}".format(p_best))
if not res.success:
pool.close()
raise ValueError("Failed to optimize!")
logger.debug("ln_posterior at optimized p: {}".format(model(p_best)))
# plot the orbit of the optimized parameters
orbit = true_potential.integrate_orbit(model._mcmc_sample_to_w0(res.x),
dt=dt, nsteps=n_steps)
fig,_ = plot_data(data, err, R, gal=False)
fig,_ = plot_orbit(orbit, fig=fig, R=R, gal=False)
fig.savefig(os.path.join(plot_path, "{}-optimized-{}.png".format(name, index)))
mcmc_p0 = emcee.utils.sample_ball(res.x, 1E-3*np.array(p_best), size=n_walkers)
else:
# mcmc_std = ([freeze['phi2_sigma'], freeze['d_sigma'], freeze['mu_sigma']] +
# [freeze['mu_sigma'], freeze['vr_sigma']] + mcmc_potential_std)
# HACK:
mcmc_std = ([freeze['phi2_sigma'], freeze['d_sigma'], 1E-4] +
[1E-4, freeze['vr_sigma']] + mcmc_potential_std)
mcmc_p0 = emcee.utils.sample_ball(p_guess, mcmc_std, size=n_walkers)
# now, create initial conditions for MCMC walkers in a small ball around the
# optimized parameter vector
sampler = emcee.EnsembleSampler(nwalkers=n_walkers, dim=len(p_guess),
lnpostfn=model, pool=pool)
if n_burn > 0:
logger.info("burning in sampler for {} steps".format(n_burn))
pos,_,_ = sampler.run_mcmc(mcmc_p0, N=n_burn)
logger.debug("finished burn-in")
sampler.reset()
else:
pos = mcmc_p0
logger.info("running mcmc sampler with {} walkers for {} steps".format(n_walkers, n_iterations))
# restart_p = np.median(sampler.chain[:,-1], axis=0)
# mcmc_p0 = emcee.utils.sample_ball(restart_p, 1E-3*restart_p, size=n_walkers)
# sampler.reset()
_ = sampler.run_mcmc(pos, N=n_iterations)
logger.info("finished sampling")
pool.close()
logger.debug("saving sampler data")
with h5py.File(sampler_file, 'w') as g:
g['chain'] = sampler.chain
g['acceptance_fraction'] = sampler.acceptance_fraction
g['lnprobability'] = sampler.lnprobability
g.attrs['n_stars'] = n_stars
g.attrs['frac_distance_err'] = frac_distance_err
if n_iterations > 256:
logger.debug("plotting...")
flatchain = np.vstack(sampler.chain[:,-256::4])
fig,_ = plot_data(data, err, R, gal=False)
for i,link in enumerate(flatchain):
orbit = true_potential.integrate_orbit(model._mcmc_sample_to_w0(link),
dt=dt, nsteps=n_steps)
fig,_ = plot_orbit(orbit, fig=fig, R=R, gal=False, alpha=0.25)
if i == 32: break
fig.savefig(os.path.join(plot_path, "{}-mcmc-{}.png".format(name, index)))
sys.exit(0)
def continue_sampling(true_potential_name, fit_potential_name, index, pool, n_iterations,
frac_distance_err=1, name=None):
true_potential = potentials[true_potential_name]
_path,_ = os.path.split(os.path.abspath(__file__))
top_path = os.path.abspath(os.path.join(_path, ".."))
output_path = os.path.join(top_path, "output", "orbitfit",
true_potential_name, fit_potential_name,
"d_{:.1f}percent".format(frac_distance_err))
plot_path = os.path.join(output_path, "plots")
sampler_file = os.path.join(output_path, "{}-emcee-{}.h5".format(name, index))
model_file = os.path.join(output_path, "{}-model-{}.pickle".format(name, index))
try:
with open(model_file, 'rb') as f:
model = pickle.load(f)
except UnicodeDecodeError:
with open(model_file, 'rb') as f:
model = pickle.load(f, encoding='latin1')
with h5py.File(sampler_file, 'r') as g:
n_walkers,n_prev_steps,n_dim = g['chain'][:].shape
mcmc_pos = g['chain'][:,-1,:][:]
# now, create initial conditions for MCMC walkers in a small ball around the
# optimized parameter vector
sampler = emcee.EnsembleSampler(nwalkers=n_walkers, dim=n_dim,
lnpostfn=model, pool=pool)
logger.info("continuing mcmc sampler with {} walkers for {} steps".format(n_walkers, n_iterations))
_ = sampler.run_mcmc(mcmc_pos, N=n_iterations)
logger.info("finished sampling")
pool.close()
logger.debug("saving sampler data")
with h5py.File(sampler_file, 'r+') as g:
prev_chain = g['chain'][:]
prev_lnprobability = g['lnprobability'][:]
del g['chain']
del g['acceptance_fraction']
del g['lnprobability']
g['chain'] = np.hstack((prev_chain, sampler.chain))
g['acceptance_fraction'] = sampler.acceptance_fraction
g['lnprobability'] = np.hstack((prev_lnprobability, sampler.lnprobability))
if n_iterations > 256:
logger.debug("plotting...")
flatchain = np.vstack(sampler.chain[:,-256::4])
fig,_ = plot_data(model.data, model.err, model.R, gal=False)
for i,link in enumerate(flatchain):
orbit = true_potential.integrate_orbit(model._mcmc_sample_to_w0(link),
dt=model.dt, nsteps=model.n_steps)
fig,_ = plot_orbit(orbit, fig=fig, R=model.R, gal=False, alpha=0.25)
if i == 32: break
fig.savefig(os.path.join(plot_path, "{}-mcmc-{}.png".format(name, index)))
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-o", "--overwrite", action="store_true", dest="overwrite",
default=False, help="DESTROY. FILES.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("--seed", dest="seed", default=42,
type=int, help="Random number seed")
parser.add_argument("-i", "--index", dest="index", required=True,
type=int, help="Index of stream to fit.")
parser.add_argument("-fp", "--fit-potential", dest="fit_potential_name", required=True,
type=str, help="Name of the fitting potential can be: "
"plummer, scf, triaxialnfw")
parser.add_argument("-tp", "--true-potential", dest="true_potential_name", required=True,
type=str, help="Name of the true potential can be: "
"plummer, scf, triaxialnfw")
parser.add_argument("--name", dest="name", required=True,
type=str, help="Name of the run.")
parser.add_argument("--frac-d-err", dest="frac_distance_err", default=1,
type=float, help="Fractional distance errors.")
parser.add_argument("-n", "--n-stars", dest="n_stars", default=32,
type=int, help="Number of 'stars'.")
parser.add_argument("--dont-optimize", action="store_true", dest="dont_optimize",
default=False, help="Don't optimize, just sample from prior.")
# emcee
parser.add_argument("--mpi", dest="mpi", default=False, action="store_true",
help="Run with MPI.")
parser.add_argument("--mcmc-walkers", dest="mcmc_walkers", type=int, default=None,
help="Number of walkers.")
parser.add_argument("--mcmc-steps", dest="mcmc_steps", type=int,
help="Number of steps to take MCMC.")
parser.add_argument("--mcmc-burn", dest="mcmc_burn", type=int,
help="Number of burn-in steps to take MCMC.")
parser.add_argument("--continue", action="store_true", dest="_continue",
default=False, help="Continue the mcmc")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
np.random.seed(args.seed)
pool = get_pool(mpi=args.mpi)
if args._continue:
continue_sampling(true_potential_name=args.true_potential_name,
fit_potential_name=args.fit_potential_name, index=args.index,
pool=pool, n_iterations=args.mcmc_steps,
frac_distance_err=args.frac_distance_err, name=args.name)
sys.exit(0)
main(true_potential_name=args.true_potential_name, fit_potential_name=args.fit_potential_name,
n_stars=args.n_stars, n_burn=args.mcmc_burn, pool=pool, n_walkers=args.mcmc_walkers,
n_iterations=args.mcmc_steps, overwrite=args.overwrite, index=args.index,
dont_optimize=args.dont_optimize, frac_distance_err=args.frac_distance_err,
name=args.name)
| {
"repo_name": "adrn/StreamBFE",
"path": "scripts/fit-streams.py",
"copies": "1",
"size": "15207",
"license": "mit",
"hash": -7465350424517582000,
"line_mean": 41.0082872928,
"line_max": 117,
"alpha_frac": 0.5885447491,
"autogenerated": false,
"ratio": 3.442055228610231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4530599977710231,
"avg_score": null,
"num_lines": null
} |
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
from astropy.constants import G
from astropy import log as logger
from astropy.coordinates.angles import rotation_matrix
import astropy.coordinates as coord
import astropy.units as u
import matplotlib.pyplot as pl
import numpy as np
import scipy.optimize as so
import h5py
import gary.coordinates as gc
import gary.dynamics as gd
from gary.units import galactic
from gary.dynamics.orbitfit import rotate_sph_coordinate
# Project
from streambfe import potentials, FRAME
from streambfe.coordinates import compute_stream_rotation_matrix
from streambfe.plot import plot_orbit, plot_data, plot_stream_obs
# this sets the number of simulations to run
# per_apo = [(15.,25)]*8 + [(25.,60)]*8 + [(85.,125)]*8
per_apo = [(15.,25), (25,80.), (80, 150.), (100., 250.)]
def peri_apo_to_random_w0(pericenter, apocenter, potential, frac_r_start=None):
def _func(E, L, r):
return 2*(E - potential.value([r,0,0.]).value) - L**2/r**2
def f(p):
E,L = p
return np.array([_func(E,L,apocenter), _func(E,L,pericenter)])
if frac_r_start is None:
frac_r_start = np.random.uniform()
r_start = frac_r_start * (apocenter - pericenter) + pericenter
E0 = 0.5*0.2**2 + potential.value([(apocenter+pericenter)/2.,0,0]).value[0]
L0 = 0.2 * r_start
E,L = so.broyden1(f, [E0, L0])
_rdot = np.sqrt(2*(E-potential.value([r_start,0,0.]).value[0]) - L**2/r_start**2)
w0 = gd.CartesianPhaseSpacePosition(pos=[r_start,0.,0]*u.kpc,
vel=[_rdot, L/r_start, 0.]*u.kpc/u.Myr)
T = 2*np.pi*r_start / (L/r_start)
logger.debug("Period: {}".format(T))
# sample a random rotation matrix
q = gc.Quaternion.random()
random_R = q.rotation_matrix
# now rotate by random rotation matrix
new_pos = random_R.dot(w0.pos)
new_vel = random_R.dot(w0.vel)
w0 = gd.CartesianPhaseSpacePosition(pos=new_pos, vel=new_vel)
orbit = potential.integrate_orbit(w0, dt=1., nsteps=10000)
logger.debug("Desired (peri,apo): ({:.1f},{:.1f}), estimated (peri,apo): ({:.1f},{:.1f})"
.format(pericenter, apocenter, orbit.pericenter(), orbit.apocenter()))
return w0,T
def main(progenitor_mass, n_stars, seed=42):
np.random.seed(seed)
_path,_ = os.path.split(os.path.abspath(__file__))
top_path = os.path.abspath(os.path.join(_path, ".."))
output_path = os.path.join(top_path, "output", "simulations")
if not os.path.exists(output_path):
os.makedirs(output_path)
for potential_name,potential in potentials.items():
logger.info("Potential: {}".format(potential_name))
this_output_path = os.path.join(output_path, potential_name)
this_plot_path = os.path.join(this_output_path, 'plots')
if not os.path.exists(this_output_path):
os.mkdir(this_output_path)
if not os.path.exists(this_plot_path):
os.mkdir(this_plot_path)
with h5py.File(os.path.join(this_output_path, "mock_stream_data.h5"), "w") as f:
for i,(per,apo) in enumerate(per_apo):
g = f.create_group(str(i))
g.attrs['apocenter'] = apo
g.attrs['pericenter'] = per
# get random initial conditions for given pericenter, apocenter
w0,T = peri_apo_to_random_w0(per, apo, potential, frac_r_start=0.1)
# integration time
t1 = T/2*0.9
n_steps = 10000
dt = t1/n_steps
g.attrs['n_steps'] = n_steps
g.attrs['dt'] = dt
logger.debug("dt: {:.2f}, N steps: {}".format(dt, n_steps))
# integrate orbit
prog_orbit = potential.integrate_orbit(w0, dt=dt, nsteps=n_steps)
sph,_ = prog_orbit.represent_as(coord.SphericalRepresentation)
logger.debug("Data distance min,max = {}, {}".format(sph.distance.min(),
sph.distance.max()))
m = progenitor_mass*u.Msun
rtide = (m/potential.mass_enclosed(w0.pos))**(1/3.) * np.sqrt(np.sum(w0.pos**2))
vdisp = np.sqrt(G*m/(2*rtide)).to(u.km/u.s)
logger.debug("rtide, vdisp: {}, {}".format(rtide, vdisp))
# Option 1: generate mock stream
# stream = mockstream.fardal_stream(potential, prog_orbit=prog_orbit,
# prog_mass=m, release_every=1,
# Integrator=gi.DOPRI853Integrator)
# Option 2: integrate a ball of test particle orbits
# std = gd.CartesianPhaseSpacePosition(pos=[rtide.value]*3*u.kpc,
# vel=[vdisp.value]*3*u.km/u.s)
# ball_w = w0.w(galactic)[:,0]
# ball_std = std.w(galactic)[:,0]
# ball_w0 = np.random.normal(ball_w, ball_std, size=(n_stars,6))
# ball_w0 = gd.CartesianPhaseSpacePosition.from_w(ball_w0.T, units=galactic)
# stream_orbits = potential.integrate_orbit(ball_w0, dt=1., nsteps=n_steps)
# stream = stream_orbits[-1]
# Option 3: just take single orbit, convolve with uncertainties
prog_orbit = prog_orbit[-n_steps::2]
stream = gd.CartesianPhaseSpacePosition(pos=prog_orbit.pos, vel=prog_orbit.vel)
# save simulated stream data
g.attrs['mass'] = progenitor_mass
g.create_dataset('pos', shape=stream.pos.shape, dtype=np.float64,
data=stream.pos.decompose(galactic).value)
g['pos'].attrs['unit'] = 'kpc'
g.create_dataset('vel', shape=stream.vel.shape, dtype=np.float64,
data=stream.vel.decompose(galactic).value)
g['vel'].attrs['unit'] = 'kpc/Myr'
# plot the orbit in cartesian coords
fig = prog_orbit.plot(color='lightblue', alpha=0.5)
fig = stream.plot(axes=fig.axes, marker='.', alpha=0.5)
for ax in fig.axes:
ax.set_xlim(-apo-10, apo+10)
ax.set_ylim(-apo-10, apo+10)
fig.savefig(os.path.join(this_plot_path, "orbit-{}.png".format(i)))
# convert to sky coordinates and compute the stream rotation matrix
stream_c,stream_v = stream.to_frame(coord.Galactic, **FRAME)
R = compute_stream_rotation_matrix(stream_c, zero_pt=stream_c[0])
stream_rot = rotate_sph_coordinate(stream_c, R)
if stream_rot.lon.wrap_at(180*u.degree).degree[-1] < 0:
logger.debug("flipping stream...")
flip = rotation_matrix(180*u.degree, 'x')
stream_rot = rotate_sph_coordinate(stream_rot, flip)
R = flip*R
g['R'] = R
# plot the orbit on the sky in galactic and in stream coordinates
fig_gal,_ = plot_stream_obs(stream_c, stream_v)
fig_rot,_ = plot_stream_obs(stream_rot, stream_v)
fig_gal.savefig(os.path.join(this_plot_path, "stream-{}-gal.png".format(i)))
fig_rot.savefig(os.path.join(this_plot_path, "stream-{}-rot.png".format(i)))
pl.close('all')
# if i == 7: return
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("--seed", dest="seed", default=42,
type=int, help="Random number seed")
parser.add_argument("--prog-mass", dest="prog_mass", default=1E4,
type=float, help="Progenitor mass")
parser.add_argument("--nstars", dest="n_stars", default=128,
type=int, help="Number of stars")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
main(n_stars=args.n_stars, progenitor_mass=args.prog_mass, seed=args.seed)
| {
"repo_name": "adrn/StreamBFE",
"path": "scripts/make-streams.py",
"copies": "1",
"size": "8752",
"license": "mit",
"hash": 4981783500701987000,
"line_mean": 41.6926829268,
"line_max": 96,
"alpha_frac": 0.5631855576,
"autogenerated": false,
"ratio": 3.3700423565652677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9412621393346321,
"avg_score": 0.004121304163789226,
"num_lines": 205
} |
__author__ = "aemerick <emerick@astro.columbia.edu>"
class constants:
"""
Helpful contants. In cgs or cgs conversions
except ionization energies (eV)
"""
def __init__(self):
self.eV_erg = 6.24150934326E11
self.k_boltz = 1.380658E-16
self.c = 2.99792458E10
self.h = 6.6260755E-27
self.G = 6.6743E-8
self.Msun = 1.998E33 # solar masses in cgs
self.Rsun = 69.63E9 # solar radius in cgs
self.Lsun = 3.9E33 # solar luminosity in cgs
self.Tsun = 5777.0 # solar temperature in cgs
self.tau_sun = 10.0E9 * 3.1536E7 # solar lifetime in cgs
self.E_HI = 13.6 # eV
self.E_HeI = 24.587
self.Zsolar_ostar = 0.01700 # Grevesse & Sauval 1998 - used in OSTAR2002
self.Zsolar_parsec = 0.01524 # Caffau et. al. 2009 / 2011 - used in PARSEC SE code
self.Zsolar_s99 = 0.02 # starburst 99
self.black_body_q0 = [2.89, 0.1]
self.black_body_q1 = [5.20, 0.01]
self.black_body_fuv = [1.0E-4, 1.0/4.35E4]
self.yr_to_s = 3.16224E7
self.km = 1.0E5
self.pc = 3.0865677E18
self.kpc = 1.0E3 * self.pc
self.Mpc = 1.0E3 * self.kpc
return None
@property
def hubble_time(self, H_o = 70.0, z = 0.0):
tau = 1.0 / H_o
return tau * self.Mpc / self.km
CONST = constants()
| {
"repo_name": "aemerick/onezone",
"path": "constants.py",
"copies": "1",
"size": "1446",
"license": "mit",
"hash": 3753205545507099000,
"line_mean": 28.5102040816,
"line_max": 94,
"alpha_frac": 0.5380359613,
"autogenerated": false,
"ratio": 2.6386861313868613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.858270708366782,
"avg_score": 0.01880300180380834,
"num_lines": 49
} |
__author__ = "aemerick <emerick@astro.columbia.edu>"
# --- external ---
from collections import OrderedDict
# --- internal ---
from constants import CONST as const
import imf as imf
#
# --------- Superclass for all parameters -------
#
class _parameters(object):
def __init__(self):
pass
def help(self):
print self.__doc__
def reset_parameters_to_default(self):
self.__init__()
class _globals(_parameters):
"""
Glbal vals
"""
def __init__(self):
self.time = 0.0
global_values = _globals()
#
# ----------- Units ----------------------
#
class _units(_parameters):
"""
Units:
Set the conversions between code units and 'base'
quantities. For the sake of the onezone code,
the base unit of time is in seconds and the base unit
of mass is in solar masses. Therefore, in order to
set the time units to Myr (default) and mass units
to solar masses (default), one would just do:
>>> units.time = 3.1536E13
>>> units.mass = 1.0
Output masses are always in solar, while everything else (e.g.
luminosity) is in cgs, regardless of what the code units are
set to.
"""
def __init__(self):
self.time = const.yr_to_s * 1.0E6
self.mass = 1.0
self.omega_matter = 0.258 # WMAP 5 year
self.omega_lambda = 0.742 # WMAP 5 year
self.H_o = 69.32 # km / s / Mpc
return
@property
def hubble_time(self):
return (1.0 / (self.H_o * (const.km) / (const.Mpc))) / (self.time)
units = _units()
#
# -------- Global Zone Parameters -------
#
class _zone_parameters(_parameters):
"""
Zone Parameters:
The below is a list of all parameters that are set
by the zone, including default values. Parameters
are listed in order of : 1) required, 2) not required but
highly suggested, or 3) optional
Required Parameters:
initial_gas_mass (float) : initial gas mass in solar masses
initial_dark_matter_mass (float) : DM mass of halo in solar
initial_metallicity (float) : initial gas metal fraction
dt (float) : constant timestep size in code time
t_final (float) : time to end simulation
Suggested Parameters:
imf (function) : IMF function used to generate stars. Currently
function must be one of the imf functional forms defined
in the ``imf'' module, but user can supply their own.
Default imf.salpeter()
star_formation_method (int) : switch between star formation
schemes:
1) constant, uniform SFR throughout evolution
2) SFR computed based on gas mass and input SFR rate efficiency (cosmological)
3) SFH table provided using SFH_filename parameter where
either two columns are provided, time and SFR, or
time and stellar mass. Column headers must be named
appropriately as ("time" or "SFR" or "mass").
use_SF_mass_reservoir (bool , optional) : One of two ways to deal with low
SFR's to ensure accurate sampling of the IMF (see the second
below). Make sure you understand these parameters and their
implications before setting -- know also that they may need to
be set in certain situations. This parameter turns on the
reservoir method whereby M_sf = dt * SFR is added to a
reservoir each timestep. If the reservoir exceeds the
mass threshold ``SF_mass_reservoir_size'', SF occurs in that timestep
using up all of the reservoir. This may lead to bursty, intermittent SF
depnding on size of resivoir. Default = False
use_stochastic_mass_sampling (bool, optional) : Second and preferred method to deal
with low SFR's. Prevent normal SF if M_sf = dt * SFR is below some threshold
value, ``stochastic_sample_mass''. Instead of shutting off SF that
timestep completely, however, and instead of accumulating mass in a
reseroir, compute the probablility that a chunk of gas of mass
``stochastic_sample_mass'' is made into stars that timestep as
P = M_sf / stochastic_sample_mass . That chunk is then formed
completely into stars using a random number draw. Default is True
SF_mass_reservoir_size (float, optional) : Size of accumulation reservoir used with
``use_SF_mass_reservoir''. Default is 1000.0
stochastic_sample_mass (float, optional) : Size of mass chunk allowed to form
stochastically when SFR is low. Be careful setting this to too large
a value. Not recommended to set below ~200.0 Msun depending
on one's choice of maximum star particle mass. Default is 250.0.
inflow_factor (float, optional) : Sets the mass inflow rate as a function of
the star formation rate. Default 0.05
mass_loading_factor (float, optional) : Sets the mass outlflow rate as a function of
the star formation rate. Default 0.1
SFR_efficiency (float, optional) : For cosmologically derived SFR's, sets the
star formation rate efficiency of converging gas to stars in a free fall time
Default is 0.01
Optional:
t_o (float, optional) : initial time. Default is 0.0
t_final (float, optional) : simulation end time. Default is 10 Gyr
"""
def __init__(self):
self.initial_gas_mass = 0.0
self.initial_dark_matter_mass = 0.0
self.initial_metallicity = 0.0
self.initial_stellar_mass = 0.0
self.species_to_track = OrderedDict()
self.initial_abundances = None
self.imf = imf.salpeter()
self.M_min = self.imf.M_min
self.M_max = self.imf.M_max
self.alpha = self.imf.alpha
self.star_formation_method = 1 # 0, 1, 2, 3
self.SFH_filename = None
self.constant_SFR = 10.0 # code mass / code time
self.cosmological_evolution = False # on or off
self.initial_redshift = 10000 #
self.final_redshift = 0.0
self.use_SF_mass_reservoir = False
self.SF_mass_reservoir_size = 1000.0
self.use_stochastic_mass_sampling = True
self.stochastic_sample_mass = 200.0
# - inflow, outflow, and efficiency parameters
self.inflow_factor = 1.03 # ratio of inflow to outflow
self.mass_loading_factor = 15.7 # constant if non-cosmological
# value at z = 0 if cosmological
self.mass_loading_index = 3.32 # index to redshift power law
self.SFR_efficiency = 6.10E-4 # 1 / Myr
self.SFR_dyn_efficiency = 0.0950 # unitless (sf / f_dyn)
self.t_o = 0.0 # Myr
self.t_final = 1.0E4 # Myr
self.dt = 1.0 # Myr
self.adaptive_timestep = True
self.timestep_safety_factor = 4
self._maximum_stars = None
self.optimize = True
# assert time units here
@property
def maximum_stars(self):
if self._maximum_stars == None:
return self._maximum_stars
else:
return int(self._maximum_stars)
@maximum_stars.setter
def maximum_stars(self, val):
self._maximum_stars = val
return
@property
def M_min(self):
return self.__M_min
@property
def M_max(self):
return self.__M_max
@property
def alpha(self):
return self.__alpha
@M_min.setter
def M_min(self, value):
self.__M_min = value
self.imf.M_min = self.__M_min
return
@M_max.setter
def M_max(self, value):
self.__M_max = value
self.imf.M_max = self.__M_max
return
@alpha.setter
def alpha(self, value):
self.__alpha = value
self.imf.alpha = self.__alpha
return
zone = _zone_parameters()
#
# ----------------- Stars and Stellar Evolution ------------------
#
class _star_particle_parameters(_parameters):
"""
Star and Stellar Physics Parameters:
The below is a list of all parameters that are set to be
used in evolving stars and controlling the underlying stellar
physics properties.
SNII_mass_threshold (float) : Lower mass limit for stars to
explode as a Type II supernovae at the end of their life.
Default is 8.0
SNIa_candidate_mass_bounds (list or array of floats) : Size
two (lower and upper bound) boundaries for mass range where
stars turn into WD's that are candidates for exploding as
Type 1a. Default [3.0, 8.0]
DTD_slope (float) : Slope of the delay time distribution (DTD)
model used to compute probability of SNIa candidates
exploding as SNIa in a given timestep. Slope is beta,
where probability goes as t^(-beta). Default 1.0
NSNIa (float) : Fraction of SNIa candidates that will explode
as Type Ia supernovae in a hubble time. Default 0.043
"""
def __init__(self):
self.SNII_mass_threshold = 8.0
self.SNIa_candidate_mass_bounds = [3.0, 8.0]
self.DTD_slope = 1.0
self.NSNIa = 0.043
self.use_snII = True
self.use_snIa = True
self.use_stellar_winds = True
self.use_AGB_wind_phase = True
self.AGB_wind_phase_mass_threshold = 8.0
self.normalize_black_body_to_OSTAR = True
self.black_body_correction_mass = 20.0
self.black_body_q0_factors = const.black_body_q0
self.black_body_q1_factors = const.black_body_q1
self.black_body_FUV_factors = const.black_body_fuv
stars = _star_particle_parameters()
#
# ----------------- Input and Output --------------
#
class _io_parameters(_parameters):
def __init__(self):
self.dump_output_basename = 'dump'
self.dt_dump = 0.0
self.cycle_dump = 0
self.summary_output_filename = 'summary_output.txt'
self.dt_summary = 0.0
self.cycle_summary = 0
io = _io_parameters()
#
# ------------------ Data Table ----------------------
#
class _data_table(_parameters):
def __init__(self):
self.yields_mass_limits = [1.0, 25.0]
data = _data_table()
#
# ------------- Helper Functions -------------
#
def information():
"""
Welcome to the configuration parameters for the onezone
chemical enrichment model. Parameters are classified by
whether or not they belong to the more global onezone gas
reservoir, the stars (and stellar physics) itself, or
input/output. The parameters can be accessed and modified
as attributes of the following objects:
Zone : zone
Stars : stars
Input / Output : io
More information about these parameters can be found by
calling the help method on a given object, e.g.:
zone.help()
which will print an explanation about each parameter, whether or
not it requires user to set (i.e. will fail if defaults are kept),
and its default value.
Call the 'help' function in this module to see a description of
all parameters at once.
"""
print information.__doc__
def help():
"""
Print the docstrings for all config parameter types.
"""
for obj in [zone, stars, io]:
obj.help()
return
def reset_all_parameters():
"""
Reset all parameter types to their default values
"""
for obj in [zone, stars, io]:
obj.reset_parameters_to_default()
print "ALL parameters reset to default"
return
| {
"repo_name": "aemerick/onezone",
"path": "config.py",
"copies": "1",
"size": "12565",
"license": "mit",
"hash": 3984535296525362700,
"line_mean": 32.0657894737,
"line_max": 93,
"alpha_frac": 0.5671309192,
"autogenerated": false,
"ratio": 3.9901556049539537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010199992347106026,
"num_lines": 380
} |
__author__ = "aemerick <emerick@astro.columbia.edu>"
# --- external ---
import numpy as np
# --- internal ---
from constants import CONST as const
# helper functions for computing physics models
def s99_wind_velocity(L, M, T, Z):
"""
Starburt99 stellar wind velocity model which computes
the stellar wind as a four parameter function. Wind is
returned in cm/s
"""
v_wind = 1.23 - 0.30 * np.log10(L/const.Lsun) + 0.55*np.log10(M) +\
0.64 * np.log10(T) + 0.13 * np.log10(Z / const.Zsolar_s99)
return 10.0**(v_wind) * 1.0E5 # km/s -> cm/s
def s99_wind_mdot(L, M, T, Z):
"""
STARBURST99 stellar wind mass ejecta rate model which
computes the stellar wind as a four parameter function. Mdot
is returned in Msun / s
"""
Mdot = -24.06 + 2.45*np.log10(L/const.Lsun) - 1.10 * np.log10(M) +\
1.31*np.log10(T) + 0.80 * np.log10(Z/const.Zsolar_s99)
return 10.0**(Mdot) / const.yr_to_s
def SNIa_yields( elements , return_dict = False):
"""
Wrapper around dictionary of SNIa yields from
Thieleman et. al. 1986 (Table 5). All isotopes for each element
are summed together. If return_dict is true, returns yield
dictionary instead, and 'elements' is ignored. 'elements' can be
a single string or list of strings, where strings are atomic symbols
"""
# dict of SNIa values
yields_dict ={'m_tot' : 1.2447714757,
'm_metal' : 1.2447714757,
'C' : 5.0E-2 + 4.5E-13,
'N' : 2.7E-9 + 4.4E-9,
'O' : 1.3E-1 + 1.1E-10 + 1.7E-12,
'F' : 2.5E-13,
'Ne' : 1.8E-3 + 1.1E-8 + 2.5E-3,
'Na' : 1.8E-6,
'Mg' : 1.6E-6 + 5.8E-6 + 4.0E-6,
'Al' : 4.4E-4,
'Si' : 1.5E-1 + 3.0E-4 + 3.4E-3,
'P' : 1.4E-4,
'S' : 8.2E-2 + 7.2E-4 + 1.5E-3 + 2.5E-8,
'Cl' : 1.2E-4 + 2.8E-5,
'Ar' : 1.7E-2 + 1.2E-3,
'K' : 9.9E-5 + 6.6E-6,
'Ca' : 1.5E-2 + 3.6E-5 + 4.2E-8 + 1.8E-5 + 1.3E-9 + 5.7E-12,
'Sc' : 1.6E-7,
'Ti' : 1.9E-5 + 3.1E-7 + 2.0E-4 + 9.3E-6 + 1.6E-6,
'V' : 5.0E-9 + 2.8E-5,
'Cr' : 2.3E-4 + 5.2E-3 + 6.6E-4 + 3.8E-5,
'Mn' : 6.7E-3,
'Fe' : 9.0E-2 + 6.3E-1 + 2.2E-2 + 2.5E-4,
'Co' : 7.3E-4,
'Ni' : 1.3E-2 + 1.4E-2 + 2.4E-4 + 5.1E-3 + 2.6E-7,
'Cu' : 2.0E-6 + 8.5E-6,
'Zn' : 1.3E-5 + 1.9E-5 + 8.2E-8 + 3.5E-7 + 1.0E-9,
'Ga' : 1.0E-7 + 6.1E-9,
'Ge' : 8.4E-7 + 5.7E-8 + 8.1E-11 + 1.8E-8}
zero_elements = ['H','He','Li','Be','B','As','Se','Br','Kr','Rb','Sr','Y','Zr',
'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb','Te','I',
'Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy',
'Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au',
'Hg','Tl','Pb','Bi']
for e in zero_elements:
yields_dict[e] = 0.0
if return_dict:
return yields_dict
if isinstance(elements, basestring):
return yields_dict[elements]
else:
return np.asarray([ yields_dict[x] for x in elements ])
def SNIa_probability(t, t_form, lifetime, DTD_slope = 1.0, NSNIa = 0.043):
"""
Delay time distribution model to calculate dP/dt for a given
white dwarf to explode as a Type Ia supernova as a function of
the time since the formation of its main sequence star projenitor.
Parameters to set are the slope of the DTD and NSNIa, or the percent
of WD's that explode as a Type Ia supernova within a hubble time. This
number is observationally informed, but depends on one's choice of IMF
and the mass range of MS stars that can form Type Ia white dwarf
projenitors
"""
dPdt = NSNIa
if (DTD_slope == 1.0):
dPdt /= np.log( (const.hubble_time + lifetime) / lifetime )
else:
dPdt *= (- DTD_slope + 1.0)
dPdt /= ( (hubble_time + lifetime)**(-DTD_slope + 1.0) - (lifetime)**(-DTD_slope+1.0))
dPdt *= (t - t_form)**(-DTD_slope)
return dPdt
def white_dwarf_mass(M):
"""
Initial to final mass function to return white dwarf mass
as a function of the mass of its main sequence star projenitor.
IFMF taken from Salaris et. al. 2009
"""
if M < 4.0:
wd_mass = 0.134 * M + 0.331
else:
wd_mass = 0.047 * M + 0.679
return wd_mass
| {
"repo_name": "aemerick/onezone",
"path": "physics.py",
"copies": "1",
"size": "4845",
"license": "mit",
"hash": 9168840922944632000,
"line_mean": 35.7045454545,
"line_max": 94,
"alpha_frac": 0.4811145511,
"autogenerated": false,
"ratio": 2.7311161217587374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37122306728587373,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aerospike'
import copy
import ntpath
from lib import logutil
import os
from lib.logsnapshot import LogSnapshot
from lib.serverlog import ServerLog
from lib.logreader import LogReader, SHOW_RESULT_KEY, COUNT_RESULT_KEY, END_ROW_KEY, TOTAL_ROW_HEADER
from lib import terminal
import re
DT_FMT = "%b %d %Y %H:%M:%S"
DT_TO_MINUTE_FMT = "%b %d %Y %H:%M"
DT_TIME_FMT = "%H:%M:%S"
DATE_SEG = 0
DATE_SEPARATOR = "-"
YEAR = 0
MONTH = 1
DATE = 2
TIME_SEG = 1
TIME_SEPARATOR = ":"
HH = 0
MM = 1
SS = 2
class Logger(object):
logInfo = {}
all_cluster_files = {}
selected_cluster_files = {}
all_server_files = {}
selected_server_files = {}
def __init__(self, log_path):
self.log_path = log_path
self.log_reader = LogReader()
self.add_cluster_snapshots(path=log_path)
fg_color_re = re.compile("^(fg_(.*))$")
self.fg_colors = map(
lambda v: (
fg_color_re.match(v).groups()[1], getattr(
terminal, fg_color_re.match(v).group(1))), filter(
lambda x: fg_color_re.search(x) and "clear" not in x, dir(terminal)))
bg_color_re = re.compile("^(bg_(.*))$")
self.bg_colors = map(
lambda v: (
bg_color_re.match(v).groups()[1], getattr(
terminal, bg_color_re.match(v).group(1))), filter(
lambda x: bg_color_re.search(x) and "clear" not in x, dir(terminal)))
def __str__(self):
files = self.get_list(cluster_snapshot=True, all_list=True)
retval = ""
i = 1
for timestamp in sorted(files.keys()):
nodes = self.log_reader.get_nodes(files[timestamp])
if len(nodes) == 0:
continue
retval += "\n " + str(i) + ": "
retval += ntpath.basename(files[timestamp])
retval += " ("
retval += str(timestamp)
retval += ")"
retval += "\n\tFound %s nodes" % (len(nodes))
retval += "\n\tOnline: %s" % (", ".join(nodes))
retval += "\n"
i = i + 1
return retval
def create_log_snapshot(self, timestamp="", file=""):
if not file:
return None
if not timestamp:
timestamp = self.log_reader.get_timestamp(file)
if not timestamp:
return None
return LogSnapshot(timestamp=timestamp, cluster_file=file, log_reader=self.log_reader)
def create_server_log(self, display_name="", file=""):
if not file:
return None
if not display_name:
display_name = self.log_reader.get_server_node_id(file)
if not display_name:
return None
return ServerLog(display_name=display_name, server_file=file, log_reader=self.log_reader)
def get_log_snapshot(self, timestamp=""):
if not timestamp or timestamp not in self.all_cluster_files:
return None
return self.all_cluster_files[timestamp]
def get_server_log(self, display_name=""):
if not display_name or display_name not in self.all_server_files:
return None
return self.all_server_files[display_name]
def get_node(self, path):
for node, fpath in self.selected_server_files.iteritems():
if path == fpath:
return node
return path
def get_files_by_index(self, clusterMode, indices=[]):
if clusterMode:
files = {}
if indices:
timestamps = sorted(self.all_cluster_files.keys())
for index in indices:
try:
files[timestamps[index -1]] = [self.all_cluster_files[timestamps[index-1]]]
except Exception:
continue
else:
for timestamp in self.selected_cluster_files:
try:
files[timestamp] = [self.selected_cluster_files[timestamp]]
except Exception:
continue
return files
else:
files = []
if indices:
nodes = sorted(self.all_server_files.keys())
for index in indices:
try:
files.append(self.all_server_files[nodes[index - 1]])
except Exception:
continue
else:
for node in sorted(self.selected_server_files.keys()):
try:
files.append(self.selected_server_files[node])
except Exception:
continue
return {"cluster": files}
def get_files(self, clusterMode, dir_path=""):
try:
if not dir_path:
dir_path = self.log_path
files = logutil.get_all_files(dir_path)
if clusterMode:
cluster_files = []
for file in files:
try:
if self.log_reader.is_cluster_log_file(file):
cluster_files.append(file)
except Exception:
pass
return cluster_files
else:
server_files = []
for file in files:
try:
if self.log_reader.is_server_log_file(file):
server_files.append(file)
except Exception:
pass
return server_files
except Exception:
return []
def add_cluster_snapshots(self, path=""):
snapshots_added = 0
if not path:
return snapshots_added, ">>> Wrong path <<<"
error = ""
if os.path.isdir(path):
for file in self.get_files(True, path):
timestamp = self.log_reader.get_timestamp(file)
if timestamp:
log_snapshot = self.create_log_snapshot(timestamp, file)
self.selected_cluster_files[timestamp] = log_snapshot
self.all_cluster_files[timestamp] = log_snapshot
snapshots_added += 1
else:
error += ">>> Cannot add collectinfo file from asmonitor or any other log file other than collectinfo. Use the one generated by asadm (>=0.0.13). Ignoring " + file + " <<<\n"
if snapshots_added==0:
error += ">>> No aerospike collectinfo file available in " + path + ". <<<\n"
elif os.path.isfile(path) and self.log_reader.is_cluster_log_file(path):
timestamp = self.log_reader.get_timestamp(path)
if timestamp:
log_snapshot = self.create_log_snapshot(timestamp, path)
self.selected_cluster_files[timestamp] = log_snapshot
self.all_cluster_files[timestamp] = log_snapshot
snapshots_added += 1
else:
error += ">>> Missing Timestamp in file. Use the collectinfo generated by asadm (>=0.0.13). <<<\n"
else:
error += ">>> " + path + " is incorrect path or not an aerospike collectinfo file <<<\n"
return snapshots_added, error
def add_server_logs(self, prefix="", path=""):
server_logs_added = 0
if not path:
return server_logs_added, ">>> Wrong path <<<"
error = ""
if os.path.isdir(path):
count = 0
for file in self.get_files(False, path):
file_key = self.log_reader.get_server_node_id(file)
if not file_key:
if not prefix:
error += ">>> " + file + " is not new aerospike server log file with node id. Please provide prefix to set name for it. <<<\n"
continue
file_key = prefix + str(count)
count += 1
server_log = self.create_server_log(display_name=file_key, file=file)
self.all_server_files[file_key] = server_log
self.selected_server_files[file_key] = server_log
server_logs_added += 1
if server_logs_added==0:
error += ">>> No aerospike server log file available in " + path + ". <<<\n"
elif os.path.isfile(path) and self.log_reader.is_server_log_file(path):
file_key = self.log_reader.get_server_node_id(path)
if file_key or prefix:
if not file_key:
file_key = prefix
server_log = self.create_server_log(display_name=file_key, file=path)
self.all_server_files[file_key] = server_log
self.selected_server_files[file_key] = server_log
server_logs_added += 1
else:
error += ">>> " + path + " is not new aerospike server log file with node id. Please provide prefix to set name for it. <<<\n"
else:
error += ">>> " + path + " is incorrect path or not an aerospike server log file. <<<\n"
return server_logs_added, error
def get_name_by_index(self, indices, cluster_snapshot=True, from_all_list=True):
selected_names = []
if not indices:
return selected_names
if cluster_snapshot:
if from_all_list:
log_names = sorted(self.all_cluster_files.keys())
else:
log_names = sorted(self.selected_cluster_files.keys())
else:
if from_all_list:
log_names = sorted(self.all_server_files.keys())
else:
log_names = sorted(self.selected_server_files.keys())
if isinstance(indices, int):
indices = [indices]
if indices=='all' or 'all' in indices:
indices = range(len(log_names))
if isinstance(indices, list):
for index in indices:
try:
selected_names.append(log_names[index])
except Exception:
continue
return selected_names
def remove_logs(self, logs, cluster_snapshot=True, from_all_list=True):
if not logs:
return
for log in logs:
try:
if cluster_snapshot:
if from_all_list:
if log in self.all_cluster_files:
self.all_cluster_files[log].destroy()
del self.all_cluster_files[log]
if log in self.selected_cluster_files:
del self.selected_cluster_files[log]
else:
if from_all_list:
if log in self.all_server_files:
self.all_server_files[log].destroy()
del self.all_server_files[log]
if log in self.selected_server_files:
del self.selected_server_files[log]
except Exception:
continue
def get_list(self, cluster_snapshot=True, all_list=True):
log_entries = {}
if cluster_snapshot:
if all_list:
snapshot_list = self.all_cluster_files
else:
snapshot_list = self.selected_cluster_files
for snapshot in snapshot_list:
log_entries[snapshot] = snapshot_list[snapshot].cluster_file
else:
if all_list:
server_list = self.all_server_files
else:
server_list = self.selected_server_files
for server in server_list:
log_entries[server] = server_list[server].server_file
return log_entries
def select_cluster_snapshots(self, year="", month="", date="", hr="", minutes="", sec=""):
snapshots = self.all_cluster_files.keys()
if year:
snapshots = filter(lambda timestamp: logutil.check_time(year,self.log_reader.get_time(timestamp),DATE_SEG,YEAR),snapshots)
if month:
snapshots = filter(lambda timestamp: logutil.check_time(month,self.log_reader.get_time(timestamp),DATE_SEG,MONTH),snapshots)
if date:
snapshots = filter(lambda timestamp: logutil.check_time(date,self.log_reader.get_time(timestamp),DATE_SEG,DATE),snapshots)
if hr:
snapshots = filter(lambda timestamp: logutil.check_time(hr,self.log_reader.get_time(timestamp),TIME_SEG,HH),snapshots)
if minutes:
snapshots = filter(lambda timestamp: logutil.check_time(minutes,self.log_reader.get_time(timestamp),TIME_SEG,MM),snapshots)
if sec:
snapshots = filter(lambda timestamp: logutil.check_time(sec,self.log_reader.get_time(timestamp),TIME_SEG,SS),snapshots)
self.selected_cluster_files.clear()
for snapshot in snapshots:
self.selected_cluster_files[snapshot] = self.all_cluster_files[snapshot]
def select_logs(self, indices="all", cluster_snapshot=True):
if not indices or not isinstance(indices,list):
return
if cluster_snapshot:
all_list = self.all_cluster_files
selected_list = self.selected_cluster_files
else:
all_list = self.all_server_files
selected_list = self.selected_server_files
all_log_keys = sorted(all_list.keys())
if indices=='all' or 'all' in indices:
indices = range(len(all_log_keys))
#selected_list.clear()
for index in indices:
try:
selected_list[all_log_keys[int(index) - 1]] = all_list[all_log_keys[int(index) - 1]]
except Exception:
continue
def get_data(self, type="", stanza=""):
res_dic = {}
if not stanza or not type:
return res_dic
for timestamp in sorted(self.selected_cluster_files.keys()):
try:
res_dic[timestamp] = self.selected_cluster_files[timestamp].get_data(type=type, stanza=stanza)
except Exception:
continue
return res_dic
def infoGetConfig(self, stanza=""):
return self.get_data(type="config", stanza=stanza)
def infoStatistics(self, stanza=""):
return self.get_data(type="statistics", stanza=stanza)
def infoGetHistogram(self, stanza=""):
return self.get_data(type="distribution", stanza=stanza)
def infoSummary(self, stanza=""):
return self.get_data(type="summary", stanza=stanza)
def get_diff_fg_bg_color(self, old_fg_index, old_bg_index):
new_fg_index = old_fg_index + 1
new_bg_index = old_bg_index
if new_fg_index >= len(self.fg_colors):
new_fg_index = 0
new_bg_index = (new_bg_index + 1) % len(self.bg_colors)
while(self.bg_colors[new_bg_index][0] == self.fg_colors[new_fg_index][0]):
new_fg_index += 1
if new_fg_index >= len(self.fg_colors):
new_fg_index = 0
new_bg_index = (new_bg_index + 1) % len(self.bg_colors)
return new_fg_index, new_bg_index
def get_fg_bg_color_index_list(self, list_size):
fg_color = 2
bg_color = 6
colors = []
for i in range(list_size):
fg_color, bg_color = self.get_diff_fg_bg_color(fg_color, bg_color)
colors.append((fg_color, bg_color))
return colors
def grep(
self,
file_handlers, search_strs, ignore_strs=[], is_and=False, is_casesensitive=True, start_tm_arg="head", duration_arg="",
uniq=False, grep_cluster_logs=True, output_page_size = 10, system_grep=False
):
if file_handlers and search_strs:
if grep_cluster_logs:
for file_handler in file_handlers:
file_handler.set_input(search_strs=search_strs, ignore_strs=ignore_strs, is_and=is_and, is_casesensitive=is_casesensitive)
show_it = file_handler.show_iterator()
show_result = {}
show_result[SHOW_RESULT_KEY] = show_it.next()
yield show_result
show_it.close()
else:
show_its = {}
min_start_tm = min(s.get_start_tm(start_tm=start_tm_arg) for s in file_handlers)
for file_handler in file_handlers:
file_handler.set_input(search_strs=search_strs, ignore_strs=ignore_strs, is_and=is_and, is_casesensitive=is_casesensitive,
start_tm=min_start_tm, duration=duration_arg, system_grep=system_grep, uniq=uniq)
show_its[file_handler.display_name] = file_handler.show_iterator()
merger = self.server_log_merger(show_its, return_strings=True, output_page_size=output_page_size)
for val in merger:
yield val
for it in show_its:
show_its[it].close()
merger.close()
def grepCount(self,
file_handlers, search_strs, ignore_strs=[], is_and=False, is_casesensitive=True, start_tm_arg="head", duration_arg="",
uniq=False, slice_duration="600", grep_cluster_logs=True, output_page_size=10, system_grep=False
):
try:
if file_handlers and search_strs:
try:
if grep_cluster_logs:
for file_handler in file_handlers:
file_handler.set_input(search_strs=search_strs, ignore_strs=ignore_strs, is_and=is_and, is_casesensitive=is_casesensitive)
count_it = file_handler.count_iterator()
count_result = {}
count_result[file_handler.timestamp] = {}
count_result[file_handler.timestamp][COUNT_RESULT_KEY] = {}
count_result[file_handler.timestamp][COUNT_RESULT_KEY][TOTAL_ROW_HEADER] = count_it.next()
yield count_result
count_it.close()
else:
count_its = {}
min_start_tm = min(s.get_start_tm(start_tm=start_tm_arg) for s in file_handlers)
for file_handler in file_handlers:
file_handler.set_input(search_strs=search_strs, ignore_strs=ignore_strs, is_and=is_and, is_casesensitive=is_casesensitive,
start_tm=min_start_tm, duration=duration_arg, slice_duration=slice_duration, uniq=uniq, system_grep=system_grep)
count_its[file_handler.display_name] = file_handler.count_iterator()
merger = self.server_log_merger(count_its, output_page_size=output_page_size, default_value=0)
for val in merger:
yield val
for it in count_its:
count_its[it].close()
merger.close()
except Exception:
pass
except Exception:
pass
def grepDiff(self,
file_handlers, search_strs, is_casesensitive=True, start_tm_arg="head", duration_arg="",
slice_duration="600", every_nth_slice=1, upper_limit_check="", output_page_size=10
):
try:
if file_handlers and search_strs:
diff_its = {}
min_start_tm = min(s.get_start_tm(start_tm=start_tm_arg) for s in file_handlers)
for file_handler in file_handlers:
file_handler.set_input(search_strs=search_strs, is_casesensitive=is_casesensitive, is_and=True,
start_tm=min_start_tm, duration=duration_arg, slice_duration=slice_duration, upper_limit_check=upper_limit_check,
every_nth_slice=every_nth_slice)
diff_its[file_handler.display_name] = file_handler.diff_iterator()
merger = self.server_log_merger(diff_its, output_page_size=output_page_size)
for val in merger:
yield val
for it in diff_its:
diff_its[it].close()
merger.close()
except Exception:
pass
def loglatency(self,
file_handlers, hist, start_tm_arg="head", duration_arg="", slice_duration="10",
bucket_count=3, every_nth_bucket=1, rounding_time=True, output_page_size=10, ns=None
):
try:
if file_handlers and hist:
latency_its = {}
min_start_tm = min(s.get_start_tm(start_tm=start_tm_arg) for s in file_handlers)
for file_handler in file_handlers:
file_handler.set_input(search_strs=hist, start_tm=min_start_tm, duration=duration_arg, slice_duration=slice_duration,
bucket_count=bucket_count, every_nth_bucket=every_nth_bucket,
read_all_lines=True, rounding_time=rounding_time, ns=ns)
latency_its[file_handler.display_name] = file_handler.latency_iterator()
merger = self.server_log_merger(latency_its, output_page_size=output_page_size)
for val in merger:
yield val
for it in latency_its:
latency_its[it].close()
merger.close()
except Exception:
pass
def server_log_merger(self, file_streams, output_page_size=3, return_strings=False, end_key=END_ROW_KEY, default_value=[]):
latency_end={}
result = {}
merge_result = {}
tm_keys = {}
need_to_process = False
keys_in_input = []
result_count = 0
for key in file_streams.keys():
if not return_strings:
merge_result[key] = {}
try:
tm, res = file_streams[key].next()
if not tm:
continue
if tm == end_key:
latency_end[key] = res
continue
except Exception:
continue
need_to_process = True
result[key] = {}
tm_keys[key] = {}
if not return_strings:
if not keys_in_input:
keys_in_input = res.keys()
tm_keys[key] = tm
result[key] = res
if return_strings:
colors = self.get_fg_bg_color_index_list(len(file_streams))
while need_to_process:
need_to_process = False
try:
min_keys = [k for k, x in tm_keys.items() if not any(y < x for y in tm_keys.values())]
except Exception:
break
if not min_keys:
break
current_tm = tm_keys[min_keys[0]]
for file_key in sorted(file_streams.keys()):
if file_key in min_keys:
if return_strings:
try:
merge_result[SHOW_RESULT_KEY] += "%s %s%s::" % (self.bg_colors[colors[(file_streams.keys().index(file_key))][0]][1](), terminal.reset(), file_key)
except Exception:
merge_result[SHOW_RESULT_KEY] = "%s %s%s::" % (self.bg_colors[colors[(file_streams.keys().index(file_key))][0]][1](), terminal.reset(), file_key)
merge_result[SHOW_RESULT_KEY] += result[file_key]
else:
if merge_result[file_key]:
for k in keys_in_input:
merge_result[file_key][k].update(result[file_key][k])
else:
merge_result[file_key].update(result[file_key])
del result[file_key]
del tm_keys[file_key]
try:
tm, res = file_streams[file_key].next()
if not tm:
continue
if tm == end_key:
latency_end[file_key] = res
continue
except Exception:
continue
need_to_process = True
tm_keys[file_key] = tm
result[file_key] = res
else:
if file_key in tm_keys and tm_keys[file_key]:
need_to_process = True
if return_strings:
continue
for k in keys_in_input:
if k not in merge_result[file_key]:
merge_result[file_key][k] = {}
merge_result[file_key][k][current_tm.strftime(DT_FMT)] = default_value
result_count += 1
if result_count == output_page_size:
yield merge_result
result_count = 0
merge_result = {}
if return_strings:
continue
for key in file_streams.keys():
merge_result[key] = {}
if not latency_end:
yield merge_result
else:
self.balance_dict(latency_end, file_streams.keys(), default_value)
for file_key in latency_end:
if file_key not in merge_result or not merge_result[file_key]:
merge_result[file_key] = latency_end[file_key]
else:
for sub_key in latency_end[file_key]:
if sub_key not in merge_result[file_key] or not merge_result[file_key][sub_key]:
merge_result[file_key][sub_key] = latency_end[file_key][sub_key]
else:
merge_result[file_key][sub_key].update(latency_end[file_key][sub_key])
yield merge_result
def balance_dict(self, d, keys, default_value):
if not d or not isinstance(d, dict):
return d
structure = self.get_dict_structure(d[d.keys()[0]], default_value)
for key in keys:
if not key in d.keys() or not d[key]:
d[key] = structure
def get_dict_structure(self, d, val=[]):
if not isinstance(d, dict):
return val
structure = {}
for key in d:
if not isinstance(d[key], dict):
structure[key] = val
else:
structure[key] = self.get_dict_structure(d[key], val)
return structure
| {
"repo_name": "tejassp/asadmn-web",
"path": "webapp/lib/logger.py",
"copies": "1",
"size": "27000",
"license": "unlicense",
"hash": 4269503029939395600,
"line_mean": 42.2692307692,
"line_max": 194,
"alpha_frac": 0.516,
"autogenerated": false,
"ratio": 4.184100418410042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5200100418410042,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Afief'
from datetime import datetime
from peewee import CharField, TextField, BooleanField, ForeignKeyField, \
DateField
from apps.models import db
from apps.models.auth import User
class Phile(db.Model):
filename = CharField(max_length=100)
filetype = CharField(max_length=100)
filepath = TextField()
class Post(db.Model):
judul = CharField(max_length=100)
konten = TextField()
date_created = DateField(default=datetime.now)
publik = BooleanField(default=True)
author = ForeignKeyField(User)
class Meta:
order_by = ('-date_created',)
class MataKuliah(db.Model):
kode = CharField(max_length=5)
judul = CharField(max_length=100)
dosen = ForeignKeyField(User)
class Meta:
order_by = ('kode',)
class Tugas(db.Model):
judul = CharField(max_length=100)
keterangan = TextField(null=True)
mata_kuliah = ForeignKeyField(MataKuliah)
tanggal_dibuat = DateField(default=datetime.now)
tanggal_terakhir = DateField()
class Meta:
order_by = ('-id',)
class TugasFile(db.Model):
tugas = ForeignKeyField(Tugas)
phile = ForeignKeyField(Phile)
class KumpulTugas(db.Model):
tugas = ForeignKeyField(Tugas)
mahasiswa = ForeignKeyField(User)
tanggal_mengumpulkan = DateField(default=datetime.now)
phile = ForeignKeyField(Phile)
class Meta:
order_by = ('-tanggal_mengumpulkan',)
| {
"repo_name": "ap13p/elearn",
"path": "apps/models/others.py",
"copies": "1",
"size": "1428",
"license": "bsd-3-clause",
"hash": 4063173094192638500,
"line_mean": 22.4098360656,
"line_max": 73,
"alpha_frac": 0.6862745098,
"autogenerated": false,
"ratio": 3.230769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417043740569231,
"avg_score": 0,
"num_lines": 61
} |
__author__ = 'aftab'
import atom
import basisset
import molecule
import pertabdict
import shell
#Basis set parser for standard basis set files in Quantum Chemistry
#Tested as working on 2/3/2014 by Aftab Patel
#TODO: Add some safety
#utility function to count no of lines in a file
def file_len(file_reference):
position = file_reference.tell()
for i, l in enumerate(file_reference):
pass
file_reference.seek(position)
return i + 1
class Parser:
def count_primitives(self, file_reference):
no_primitives = 0
condition = True
position = file_reference.tell()
while condition:
line = file_reference.readline()
contents = line.split()
if line == '':
condition = False
continue
elif contents[0] in pertabdict.periodic_table:
condition = False
else:
no_primitives = no_primitives + 1
# print 'primitives counted', no_primitives
file_reference.seek(position)
return no_primitives
#Main parser, fully initializes an Atom object
def gen_atom(self, center, filename):
file_reference = open(filename, 'r')
atom_gen = atom.Atom()
atom_gen.center = center
no_lines = file_len(file_reference)
print no_lines
for lc in range(no_lines):
#print 'master line no', lc
line = file_reference.readline()
contents = line.split()
if line == '':
continue
elif contents[0] in pertabdict.periodic_table:
atom_gen.atomic_number = pertabdict.periodic_table[contents[0]]
ang_mom = pertabdict.shell_types[contents[1]]
position = file_reference.tell()
probe_line = file_reference.readline()
no_shells_added = len(probe_line.split()) - 1
#print 'shells added', no_shells_added
shells = [shell.Shell() for i in range(no_shells_added)]
#initialize shells
file_reference.seek(position)
no_primitives = self.count_primitives(file_reference)
for i in range(no_primitives):
data_line = file_reference.readline()
data_content = data_line.split()
for i in range(no_shells_added):
shells[i].angular_momentum = ang_mom
shells[i].exponents.append(data_content[0])
shells[i].coefficients.append(data_content[i + 1])
atom_gen.shells.extend(shells)
atom_gen.no_shells = atom_gen.no_shells + no_shells_added
file_reference.seek(position)
else:
continue
file_reference.close()
return atom_gen
if __name__ == '__main__':
#basis_file = open('./cc_pvdz_c.basis','r')
par = Parser()
C_atom = par.gen_atom([0.0, 0.0, 0.0],'./cc_pvdz_c.basis')
print C_atom.no_shells
| {
"repo_name": "stringtheorist/chem_parser",
"path": "parser.py",
"copies": "1",
"size": "3090",
"license": "mit",
"hash": -8491390544762819000,
"line_mean": 29.2941176471,
"line_max": 79,
"alpha_frac": 0.5598705502,
"autogenerated": false,
"ratio": 4.0025906735751295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9995212862479745,
"avg_score": 0.013449672259076605,
"num_lines": 102
} |
__author__ = 'agopalak'
import forecastio
import datetime
import pytz
import json
import os
from geopy import geocoders
import logging
# Setting up logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s: %(name)s: %(message)s', level=logging.INFO)
# Forecast.io API key
forecastIO_api_key = os.environ['FORECASTIO_API_KEY']
# GoogleV3 API Key
googlev3_api_key = os.environ['GOOGLEV3_API_KEY']
# Setting up access to Google V3
googv3 = geocoders.GoogleV3(api_key=googlev3_api_key)
# Location Format: <City>, <State Abbreviation>
# Date Format: MM/DD/YYYY
# Time Format: HH:MM:SS AM|PM
# Limitations: No format checking done
def fetch_weather(city, gdate, gtime):
# Preparing Date, Time data for API calls
(month, day, year) = map(int, gdate.split('/'))
(hour, minute) = map(int, (gtime.split())[0].split(':'))
# Handling AM/PM
if (gtime.split())[1] == 'PM':
if hour != 12:
hour += 12
# Geo Location of a given city
geoloc = googv3.geocode(city)
# Time Zone for a given Latitude & Longitude
tz = googv3.timezone((geoloc.latitude, geoloc.longitude))
#print city, tz
logger.debug('City: %s, Time Zone: %s', city, tz)
# Date in UTC Format
date = datetime.datetime(year, month, day, hour, minute)
#print date
logger.debug('Date: %s', date)
# Get Weather Information for given location & time
forecast = forecastio.load_forecast(forecastIO_api_key, geoloc.latitude,\
geoloc.longitude, time=date)
forecast_data = forecast.currently()
#print forecast_data.d
return forecast_data.d
if __name__ == '__main__':
fetch_weather('Tampa, FL', '12/19/2010', '1:00 PM')
| {
"repo_name": "agopalak/football_pred",
"path": "pre_proc/get_weather.py",
"copies": "1",
"size": "1742",
"license": "mit",
"hash": 642985710849296900,
"line_mean": 27.0967741935,
"line_max": 86,
"alpha_frac": 0.6549942595,
"autogenerated": false,
"ratio": 3.1730418943533696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43280361538533696,
"avg_score": null,
"num_lines": null
} |
__author__ = 'agopalak'
import nflgame
import csv
import get_weather
import stadium_info
import os.path
import json
import logging
# Setting up logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s: %(name)s: %(message)s', level=logging.INFO)
# Get NFL data from NFLgame package
def get_nfldata():
# File Names
weather_json = 'game_weather.json'
# Years to process
years = [2010, 2011, 2012, 2013, 2014]
# Initializing Game Weather Dictionary
game_weather = {}
if os.path.exists(weather_json):
with open(weather_json) as ifile:
game_weather = json.load(ifile)
weather_lookedup = False
# For header initialization
started_proc = 0
f = open('nflData.csv', 'w+')
# Load data by year
for year in years:
logger.info('Processing Year: %d', year)
games = nflgame.games(year)
# Create a dictionary of positions
players = nflgame.combine_game_stats(games)
position = {}
for p in players:
stats = [
(p.passing_att, 'QB'),
(p.rushing_att, 'RB'),
(p.receiving_rec, 'WR'),
(p.defense_tkl, 'DEF'),
(p.defense_ast, 'DEF'),
(p.kicking_fga, 'K'),
(p.punting_yds, 'P'),
]
position[p.playerid] = sorted(stats, reverse=True)[0][1]
# Load data for each game
for game in games:
# Game Related Information
gdate = str(game.schedule['month']) + '/' + str(game.schedule['day']) + '/' + str(game.schedule['year'])
# All games are in the afternoon
# OPEN: Handle exceptions with International Games
gtime = game.schedule['time'] + ' PM'
# Prepping Game Data
gline = {
'game_eid': game.eid,
'game_week': game.schedule['week'],
'game_wday': game.schedule['wday'],
'game_date': gdate,
'game_time': gtime,
'home_team': game.home,
'away_team': game.away,
'score_home': game.score_home,
'score_away': game.score_away,
'home_stadium': stadium_info.teamStadium[game.home],
'home_field': stadium_info.fieldType[game.home]
}
logger.info('Loading Year %d, Week %d: %s v %s on %s at %s', year, game.schedule['week'], game.away, game.home, gdate, gtime)
# Extract Weather Information
logger.debug('Location: %s, Date: %s, Time: %s', stadium_info.teamLocation[game.home], gdate, gtime)
if game.eid not in game_weather.keys():
weather_lookedup = True
# Calling weather subroutine
# game_weather[game.eid] = get_weather.fetch_weather(stadium_info.teamLocation[game.home], gdate, gtime)
# Initializing Data if Absent
if 'temperature' not in game_weather[game.eid].keys():
game_weather[game.eid]['temperature'] = 0
if 'windSpeed' not in game_weather[game.eid].keys():
game_weather[game.eid]['windSpeed'] = 0
if 'humidity' not in game_weather[game.eid].keys():
game_weather[game.eid]['humidity'] = 0
if 'apparentTemperature' not in game_weather[game.eid].keys():
game_weather[game.eid]['apparentTemperature'] = game_weather[game.eid]['temperature']
if 'summary' not in game_weather[game.eid].keys():
game_weather[game.eid]['summary'] = 'None'
logger.debug('Weather Info: %s', game_weather[game.eid])
# Prepping Weather Data
gline['game_temp'] = game_weather[game.eid]['temperature']
gline['game_ftemp'] = game_weather[game.eid]['apparentTemperature']
gline['game_weather'] = game_weather[game.eid]['summary']
gline['game_wind'] = game_weather[game.eid]['windSpeed']
gline['game_humid'] = game_weather[game.eid]['humidity']
# Query player data per game
plines = game.players.cummstats()
# Header information
hline = plines.pop(0)
if started_proc == 0:
# Dictionary Header Information
fields = ['year', 'game_eid', 'game_week', 'game_wday', 'game_date', \
'game_time', 'home_team', 'away_team', 'score_home', 'score_away', \
'game_temp', 'game_ftemp', 'game_weather', 'game_wind', 'game_humid', \
'home_stadium', 'home_field'] \
+ [key for key in hline]
writer = csv.DictWriter(f, fields)
writer.writeheader()
started_proc = 1
# Populating statline information
# Includes Year, Game Information & Player Information
for pline in plines:
pline['pos'] = position[pline['id']]
sline = {'year': year}
sline.update(gline)
sline.update(pline)
writer.writerow(sline)
# Save JSON for weather information if new data accessed
# Avoid unnecessary pings forecastIO server
if weather_lookedup:
with open(weather_json, 'w') as ofile:
json.dump(game_weather, ofile)
f.close()
if __name__ == '__main__':
get_nfldata() | {
"repo_name": "agopalak/football_pred",
"path": "pre_proc/get_nfldata.py",
"copies": "1",
"size": "5551",
"license": "mit",
"hash": 260221441578181600,
"line_mean": 36.2617449664,
"line_max": 137,
"alpha_frac": 0.5379210953,
"autogenerated": false,
"ratio": 3.865598885793872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4903519981093872,
"avg_score": null,
"num_lines": null
} |
__author__ = 'agostino'
from pycomm.ab_comm.slc import Driver as SlcDriver
import logging
if __name__ == '__main__':
logging.basicConfig(
filename="SlcDriver.log",
format="%(levelname)-10s %(asctime)s %(message)s",
level=logging.DEBUG
)
c = SlcDriver()
if c.open('192.168.1.15'):
while 1:
try:
print c.read_tag('S:1/5')
print c.read_tag('S:60', 2)
print c.write_tag('N7:0', [-30, 32767, -32767])
print c.write_tag('N7:0', 21)
print c.read_tag('N7:0', 10)
print c.write_tag('F8:0', [3.1, 4.95, -32.89])
print c.write_tag('F8:0', 21)
print c.read_tag('F8:0', 3)
print c.write_tag('B3:100', [23, -1, 4, 9])
print c.write_tag('B3:100', 21)
print c.read_tag('B3:100', 4)
print c.write_tag('T4:3.PRE', 431)
print c.read_tag('T4:3.PRE')
print c.write_tag('C5:0.PRE', 501)
print c.read_tag('C5:0.PRE')
print c.write_tag('T4:3.ACC', 432)
print c.read_tag('T4:3.ACC')
print c.write_tag('C5:0.ACC', 502)
print c.read_tag('C5:0.ACC')
c.write_tag('T4:2.EN', 0)
c.write_tag('T4:2.TT', 0)
c.write_tag('T4:2.DN', 0)
print c.read_tag('T4:2.EN', 1)
print c.read_tag('T4:2.TT', 1)
print c.read_tag('T4:2.DN',)
c.write_tag('C5:0.CU', 1)
c.write_tag('C5:0.CD', 0)
c.write_tag('C5:0.DN', 1)
c.write_tag('C5:0.OV', 0)
c.write_tag('C5:0.UN', 1)
c.write_tag('C5:0.UA', 0)
print c.read_tag('C5:0.CU')
print c.read_tag('C5:0.CD')
print c.read_tag('C5:0.DN')
print c.read_tag('C5:0.OV')
print c.read_tag('C5:0.UN')
print c.read_tag('C5:0.UA')
c.write_tag('B3:100', 1)
print c.read_tag('B3:100')
c.write_tag('B3/3955', 1)
print c.read_tag('B3/3955')
c.write_tag('N7:0/2', 1)
print c.read_tag('N7:0/2')
print c.write_tag('O:0.0/4', 1)
print c.read_tag('O:0.0/4')
except Exception as e:
print e
pass
c.close()
| {
"repo_name": "bpaterni/pycomm",
"path": "examples/test_slc_only.py",
"copies": "3",
"size": "2520",
"license": "mit",
"hash": -7070770079832689000,
"line_mean": 32.6,
"line_max": 63,
"alpha_frac": 0.4222222222,
"autogenerated": false,
"ratio": 2.9612220916568743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48834443138568745,
"avg_score": null,
"num_lines": null
} |
__author__ = 'agross'
import pandas as pd
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from Figures.Pandas import series_scatter
from Figures.FigureHelpers import init_ax, prettify_ax
from Helpers.Pandas import match_series
def linear_regression(a, b):
a, b = match_series(a, b)
res = sp.stats.linregress(a, b)
return pd.Series({'slope': res[0], 'intercept': res[1], 'r-value': res[2],
'p-value': res[3], 'stderr': res[4]})
def remove_leading_zero(f, places=2):
f = round(f, places)
if abs(f - 1) < .01:
return '1.0'
elif abs(f) < .01:
return ''
elif abs(f) > 1:
f = str(f)
elif f > 0:
f = str(f)[1:]
else:
f = '-' + str(f)[2:]
return f
def regression_string(reg):
r_value = round(reg['r-value'], 2)
#r_value = str(r_value)[1:] if r_value != 1 else '1.0'
r_value = str(r_value)
r_value = 'r={}'.format(r_value)
#slope = remove_leading_zero(reg['slope'])
slope = str(np.round(reg['slope'], 2))
#intercept = remove_leading_zero(reg['intercept'])
if np.abs(reg['intercept']) < .01:
intercept = ''
else:
intercept = str(np.round(reg['intercept'],2))
if (len(intercept) != 0) and (intercept[0] != '-'):
intercept = '+' + intercept
line = 'y={}x {}'.format(slope, intercept)
return '\n'.join([r_value, line])
def line_me(slope, intercept, start=0, end=100, ax=None,
**plot_args):
if ax is None:
ax = plt.gca()
line = lambda x: (slope * x) + intercept
ax.plot([start, end], [line(start), line(end)],
**plot_args)
def process_line_args(line_args):
if line_args is None:
l1 = {}
l2 = {}
elif isinstance(line_args, list):
l1 = line_args[0]
l2 = line_args[1]
elif isinstance(line_args, dict):
l1 = line_args
l2 = line_args
return l1, l2
def plot_regression_plain(x, y, ax=None, line_args=None,
**plt_args):
x, y = match_series(x, y)
fig, ax = init_ax(ax, figsize=(5, 5))
series_scatter(x, y, ax=ax, ann=None, **plt_args)
reg = linear_regression(x, y)
ax.annotate(regression_string(reg), (.5, .05),
xycoords='axes fraction', size=14)
l1, l2 = process_line_args(line_args)
line_me(reg['slope'], reg['intercept'], start=x.min(), end=x.max(),
ax=ax, **l1)
line_me(1, 0, start=x.min(), end=x.max(), ax=ax, **l2)
xy = x.append(y)
ax.set_xbound(xy.min() - 3, xy.max() + 3)
ax.set_ybound(xy.min() - 3, xy.max() + 3)
prettify_ax(ax)
def check_set(key, value, d):
if key not in d:
d[key] = value
return d
def plot_regression_density(x, y, rad=3, ax=None, line_args=None,
**plt_args):
"""
Color density modified from Gordon Bean's Matlab code.
https://github.com/brazilbean/bean-matlab-toolkit/blob/master/denscat.m
"""
fig, ax = init_ax(ax, figsize=(5, 5))
x, y = match_series(x, y)
pts = pd.concat([x, y], axis=1)
d = sp.spatial.distance_matrix(pts, pts)
d = pd.DataFrame(d, pts.index, pts.index)
area = np.pi * rad ** 2
dens = 1. * (d < rad).sum() / area
idx = dens.order().index
series_scatter(x.ix[idx], y.ix[idx], c=list(dens.ix[idx]), lw=0,
ann=None, ax=ax, cmap=cm.jet, **plt_args)
reg = linear_regression(x, y)
ax.annotate(regression_string(reg), (.7,.05),
xycoords='axes fraction', size=18)
l1, l2 = process_line_args(line_args)
default = [('lw', 4), ('ls', '--'), ('color', 'grey'),
('dash_capstyle', 'round'), ('alpha', .75)]
for (k, v) in default:
l1 = check_set(k, v, l1)
l2 = check_set(k, v, l2)
line_me(1, 0, start=x.min(), end=x.max(), ax=ax, **l1)
line_me(reg['slope'], reg['intercept'], start=x.min(),
end=x.max(), ax=ax, **l2)
xy = x.append(y)
ax.set_xbound(xy.min() - rad, xy.max() + rad)
ax.set_ybound(xy.min() - rad, xy.max() + rad)
prettify_ax(ax)
def plot_regression(x, y, density=False, rad=3, ax=None, line_args=None,
**plt_args):
if density is True:
plot_regression_density(x, y, rad, ax, line_args, **plt_args)
else:
plot_regression_plain(x, y, ax, line_args, **plt_args) | {
"repo_name": "theandygross/Figures",
"path": "src/Figures/Regression.py",
"copies": "1",
"size": "4411",
"license": "mit",
"hash": 8430698963059050000,
"line_mean": 29.0136054422,
"line_max": 78,
"alpha_frac": 0.5477216051,
"autogenerated": false,
"ratio": 2.888670595939751,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8929304256666422,
"avg_score": 0.001417588874666002,
"num_lines": 147
} |
__author__ = 'agross'
import re
import itertools as itertools
import urllib
import pandas as pd
from matplotlib.colors import rgb2hex
from matplotlib.cm import RdBu
KEGG_PATH = 'http://www.kegg.jp/kegg-bin/'
from Figures.KEGG import *
def pull_pathway_info_from_kegg(kegg_id):
o = urllib.urlopen('http://rest.kegg.jp/get/' + kegg_id).read()
o = o.splitlines()
'''need to parse out when new sections start'''
sections = {}
for i, n in enumerate(o):
s = n.split()[0]
if n[0] != ' ' and s not in sections:
sections[s] = i
sections = pd.Series(sections).order()
o = [l[12:] for l in o] # get rid of fixed-width section headings
'''Pull out gene information, ec = enzyme, ko = complex'''
start = sections['GENE']
stop = [sections.iloc[i+1] for i, s in enumerate(sections.index)
if s == 'GENE'][0]
gene = o[start:stop]
#return gene
mapping = []
for g in gene:
try:
g = g.split(';')
gene_id = g[0].split()[0]
gene_name = g[0].split()[1]
desc = re.findall('\[(.*?)\]', g[1]) # stuff in [brackets]
ko = [e for e in desc if 'KO:' in e]
if len(ko) > 0:
ko = ko[0][3:].split()
else:
ko = ['None']
ec = [e for e in desc if 'EC:' in e]
if len(ec) > 0:
ec = ec[0][3:].split()
else:
ec = ['None']
for i, j in itertools.product(ec, ko):
mapping.append(pd.Series({'id': gene_id, 'gene': gene_name,
'ec': i, 'ko': j}))
except:
print(g)
mapping = pd.DataFrame(mapping)
return mapping
def plot_data_on_pathway(kegg_id, mapping, dist):
mapping = mapping[mapping.gene.isin(dist.index)]
order = mapping.gene.map(mapping.groupby('gene').size()).order()
mapping = mapping.ix[order.index]
symbol_to_kegg = mapping.set_index('gene').id
symbol_to_kegg = symbol_to_kegg.groupby(level=0).first()
dist = pd.Series(dist, name='dist')
ec = mapping.set_index('gene').join(dist).groupby('ko').median()
ec = ec.dist.dropna().order()
gm = pd.concat([mapping.groupby('ko').first().gene, ec], 1)
gm = gm.set_index('gene').dist.groupby(level=0).first()
cmap = gm.map(lambda v: rgb2hex(RdBu(1-v)).upper()[1:])
s = '%0D%'.join(['hsa:{}+%23{}'.format(symbol_to_kegg.ix[i], cmap.ix[i])
for i in gm.index if i in symbol_to_kegg])
s = '{}show_pathway?map={}&multi_query={}'.format(KEGG_PATH, kegg_id, s)
print(s)
def parse_entry(e):
d = dict(e.attrib.iteritems())
components = [c.attrib['id'] for c in e.findall('component')]
d['components'] = components
d = pd.Series(d)
return d
| {
"repo_name": "theandygross/Figures",
"path": "src/Figures/KEGG.py",
"copies": "1",
"size": "2832",
"license": "mit",
"hash": -7338090559315704000,
"line_mean": 32.3176470588,
"line_max": 76,
"alpha_frac": 0.5434322034,
"autogenerated": false,
"ratio": 3.2108843537414966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9241138182136888,
"avg_score": 0.002635675000921845,
"num_lines": 85
} |
__author__ = 'agross'
"""
Code taken from MinRK's Gist.
http://nbviewer.ipython.org/gist/minrk/6011986
"""
import io, os, sys, types
#from IPython import nbformat
import nbformat as nbformat
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import display_html
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
disp = "importing IPython notebook from "
disp += "<a href='./{}' target='_blank'>{}</a>".format(path, path[:-6])
display_html(disp, raw=True)
#print disp
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, 3)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.worksheets[0].cells:
if cell.cell_type == 'code' and cell.language == 'python':
# transform the input to executable Python
if not cell.input.startswith('#Do not import'):
code = self.shell.input_transformer_manager.transform_cell(cell.input)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
| {
"repo_name": "theandygross/NotebookImport",
"path": "NotebookImport.py",
"copies": "1",
"size": "2806",
"license": "apache-2.0",
"hash": 6419217960560200000,
"line_mean": 29.5,
"line_max": 90,
"alpha_frac": 0.5894511761,
"autogenerated": false,
"ratio": 3.886426592797784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49758777688977834,
"avg_score": null,
"num_lines": null
} |
__author__ = 'agross'
"""
Code taken from MinRK's Gist.
http://nbviewer.ipython.org/gist/minrk/6011986
"""
import io, os, sys, types
from IPython.nbformat import current
from IPython.core.interactiveshell import InteractiveShell
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing IPython notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = current.read(f, 'json')
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.worksheets[0].cells:
if cell.cell_type == 'code' and cell.language == 'python':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.input)
# run the code in themodule
exec code in mod.__dict__
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
| {
"repo_name": "PeterUlz/TCGA_analysis",
"path": "NotebookImport.py",
"copies": "1",
"size": "2547",
"license": "mit",
"hash": -2941774650132067000,
"line_mean": 28.275862069,
"line_max": 86,
"alpha_frac": 0.5928543384,
"autogenerated": false,
"ratio": 3.8826219512195124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969333754737038,
"avg_score": 0.0012285069764950514,
"num_lines": 87
} |
__author__ = "aguha@colgate.edu"
from numpy import *
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def deriv(vector, t, beta_I, beta_H, beta_F, alpha, gamma_H, gamma_I, gamma_D, gamma_DH, gamma_F, gamma_IH, delta1, delta2, delta3, iota):
S = vector[0]
E = vector[1]
I = vector[2]
H = vector[3]
F = vector[4]
M = vector[5]
R = vector[6]
N = S + E + I + H + F + M + R
Sprime = - ( beta_I * S * I + beta_H * S * H + beta_F * S * F ) / N
Eprime = ( ( beta_I * S * I + beta_H * S * H + beta_F * S * F ) / N ) - alpha * E
Iprime = alpha * E - ( gamma_H * iota + gamma_I * (1 - iota) * (1 - delta1) + gamma_D * (1 - iota) * delta1 ) * I
Hprime = gamma_H * iota * I - ( gamma_DH * (delta2 + delta3) + gamma_IH * (1 - (delta2 + delta3)) ) * H
Fprime = gamma_D * (1 - iota) * delta1 * I + gamma_DH * (delta2 + delta3) * H - gamma_F * F
Mprime = gamma_IH*(1 - (delta2 + delta3))*H
Rprime = gamma_I * (1 - iota) * (1 - delta1) * I + gamma_F*F
return [Sprime, Eprime, Iprime, Hprime, Fprime, Mprime, Rprime]
def run_odeint(beta_I=0.16, beta_H=0.062, beta_F=0.489, alpha=(1/12.0), gamma_H=(1/3.24), gamma_I=(1/15.0), gamma_D=(1/13.31), gamma_DH=(1/10.07), gamma_F=(1/2.01), gamma_IH=(1/15.88), delta1=0.5, delta2=0.5, delta3=0, iota=0.197):
init = [1667843, 20, 131, 162, 0, 142, 2000]
times = linspace(0, 300, 400)
y = odeint(deriv, init, times, args=(beta_I, beta_H, beta_F, alpha, gamma_H, gamma_I, gamma_D, gamma_DH, gamma_F, gamma_IH, delta1, delta2, delta3, iota))
return y
def draw_figure(y, figno, figname):
times = linspace(0, 300, 400)
plt.figure(figno)
plt.plot(times, y[:,0], '-g', label='S')
plt.plot(times, y[:,1], '-y', label='E')
plt.plot(times, y[:,2], '-b', label='I')
plt.plot(times, y[:,3], '-m', label='H')
plt.plot(times, y[:,4], '-c', label='F')
plt.plot(times, y[:,5], '#2E0854', label='M')
plt.plot(times, y[:,6], '-r', label='R')
plt.legend()
#plt.title('Figure ' + str(figno))
plt.xlabel('Time (days)')
plt.ylabel('Population')
plt.savefig(figname)
#plt.hold(False)
if __name__ == '__main__':
draw_figure(run_odeint(), 1, 'no_intervention.png')
draw_figure(run_odeint(delta2=0.2, delta3=0.25), 2, 'vaccination_early_stage.png')
draw_figure(run_odeint(delta2=0.1, delta3=0.25), 3, 'potent_vaccination_early_stage.png')
draw_figure(run_odeint(delta2=0.05, delta3=0.25), 4, 'more_potent_vaccination_early_stage.png')
draw_figure(run_odeint(delta2=0, delta3=0.25), 5, 'fully_potent_vaccination_early_stage.png')
draw_figure(run_odeint(delta2=0.1, delta3=0.1), 6, 'fully_potent_vaccination_all_stages.png')
draw_figure(run_odeint(beta_H=0.0375, delta2=0.0, delta3=0.25, iota=0.24625), 7, 'identify_and_isolate_es_1.png')
draw_figure(run_odeint(beta_H=0.025, delta2=0.0, delta3=0.25, iota=0.2955), 8, 'identify_and_isolate2_es_2.png')
draw_figure(run_odeint(beta_H=0.00125, delta2=0.0, delta3=0.25, iota=0.34475), 9, 'identify_and_isolate3_es_3.png')
draw_figure(run_odeint(beta_I=0.12, beta_H=0.0375, delta2=0, delta3=0.25, iota=0.24625, gamma_F=(1/1.51)), 10, 'close_down1_es.png')
draw_figure(run_odeint(beta_I=0.11, beta_H=0.025, delta2=0, delta3=0.25, iota=0.2955, gamma_F=(1/1.21)), 11, 'close_down2_es.png')
| {
"repo_name": "anindyabd/ebola_eradication",
"path": "diff_eq.py",
"copies": "1",
"size": "3447",
"license": "mit",
"hash": -8056049991920374000,
"line_mean": 45.5810810811,
"line_max": 232,
"alpha_frac": 0.5955903684,
"autogenerated": false,
"ratio": 2.4104895104895103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8336703915472762,
"avg_score": 0.03387519268334967,
"num_lines": 74
} |
__author__ = 'aguzun'
from flask import json
import requests
from uwsgi_tasks import task, TaskExecutor
from core import app
SLACK_NOTIFY_HOOK_CONFIG = "SLACK_NOTIFY_HOOK_CONFIG"
@task(executor=TaskExecutor.AUTO)
def notify_camera_state_changed(camera):
# some long running task here
if SLACK_NOTIFY_HOOK_CONFIG in app.config:
try:
if camera.get('active'):
state_text = "Activated"
else:
state_text = "Deactivated"
payload = {
'text': "Camera <" + camera.get('name') + "> was " + state_text
}
requests.post(app.config.get(SLACK_NOTIFY_HOOK_CONFIG), data=json.dumps(payload))
except Exception, e:
app.logger.error("Could not send slack notification", e)
else:
app.logger.info("Slack hook not configured. No notification is going to be sent")
@task(executor=TaskExecutor.AUTO)
def notify_new_image(camera, image_url):
# some long running task here
if SLACK_NOTIFY_HOOK_CONFIG in app.config:
try:
payload = {
'text': camera.get('name') + " captured new <" + image_url + "|image>"
}
requests.post(app.config.get(SLACK_NOTIFY_HOOK_CONFIG), data=json.dumps(payload))
except Exception, e:
app.logger.error("Could not send slack notification", e)
else:
app.logger.info("Slack hook not configured. No notification is going to be sent")
| {
"repo_name": "SillentTroll/rascam_server",
"path": "wsgi/notifier.py",
"copies": "1",
"size": "1493",
"license": "apache-2.0",
"hash": -8414024506077243000,
"line_mean": 32.1777777778,
"line_max": 93,
"alpha_frac": 0.6128600134,
"autogenerated": false,
"ratio": 3.779746835443038,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4892606848843038,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aguzun'
from urlparse import urljoin
import requests
class ControlOption(object):
def __init__(self, option_name):
self.option_name = option_name
self.control_url = "http://localhost:8080" # change the port in motion.config
self.thread_nr = "0" # multiple cameras can be connected. For now only one is supported.
def execute(self, option_name, command):
params = [self.thread_nr, option_name, command]
url = urljoin(self.control_url, "/".join(params))
print url
response = requests.get(url)
if response.status_code == requests.codes.ok:
return str(response.text)
else:
return None
class Detection(ControlOption):
def __init__(self):
ControlOption.__init__(self, "detection")
def get_status(self):
response_text = self.execute(self.option_name, "status")
if "PAUSE" in response_text:
return False
elif "ACTIVE" in response_text:
return True
else:
print "Got an invalid response %s" % response_text
return None
def start(self):
return self.execute(self.option_name, "start")
def pause(self):
return self.execute(self.option_name, "pause")
class Action(ControlOption):
def __init__(self):
ControlOption.__init__(self, "action")
def take_snapshot(self):
return self.execute(self.option_name, "snapshot")
detection = Detection()
| {
"repo_name": "SillentTroll/rascam_client",
"path": "motion/motion_control.py",
"copies": "1",
"size": "1503",
"license": "apache-2.0",
"hash": -3247739610879324000,
"line_mean": 26.8333333333,
"line_max": 97,
"alpha_frac": 0.6141051231,
"autogenerated": false,
"ratio": 3.9448818897637796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058987012863779,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aharvey'
import serial
import string
import ystockquote
import time
INIT = chr(170) + chr(170)+ chr(170)+chr(170)+chr(170)+chr(187)+chr(146)
CLEAR = chr(140) + chr(140)
def cvtStr(msg):
msg = string.replace(msg," ", "%20")
msg = string.replace(msg, ":", " ")
msg = string.replace(msg, "%20", ":")
msg = string.replace(msg, "<LEFT>", chr(129))
msg = string.replace(msg, "<RIGHT>", chr(130))
msg = string.replace(msg, "<UP>", chr(131))
msg = string.replace(msg, "<DOWN>", chr(132))
msg = string.replace(msg, "<JUMP>", chr(133))
msg = string.replace(msg, "<OPEN>", chr(134))
msg = string.replace(msg, "<CLOSE>", chr(135))
msg = string.replace(msg, "<FLASH>", chr(136))
msg = string.replace(msg, "<FLASHGO>", chr(137))
msg = string.replace(msg, "<SPELL>", chr(138))
msg = string.replace(msg, "<FAT>", chr(139))
msg = string.replace(msg, "<CLEAR>", chr(140))
msg = string.replace(msg, "<SPEED>", chr(141))
msg = string.replace(msg, "<RANDOM>", chr(142))
msg = string.replace(msg, "<PAUSE>", chr(143))
return msg
ser = serial.Serial("/dev/ttyUSB0", 2400, timeout=1)
ser.write(INIT)
while True:
FTNT = ystockquote.get_all('FTNT')
ser.write(CLEAR)
print "FTNT %s %s" % (FTNT['change'], FTNT['price'])
ser.write(cvtStr("FTNT %s %s" % (FTNT['change'], FTNT['price'])))
ser.write(chr(128))
time.sleep(60)
ser.close()
| {
"repo_name": "infamy/ledsignstockticker",
"path": "ticker.py",
"copies": "1",
"size": "1471",
"license": "mit",
"hash": 1075729989474509400,
"line_mean": 31.4318181818,
"line_max": 72,
"alpha_frac": 0.578518015,
"autogenerated": false,
"ratio": 2.9186507936507935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39971688086507934,
"avg_score": null,
"num_lines": null
} |
__author__ = "Ahmad Al-Sajid"
__email__ = "ahmadalsajid@gmail.com"
distance = {
'a': 366,
'b': 0,
'c': 160,
'd': 242,
'e': 161,
'f': 176,
'g': 77,
'h': 151,
'i': 226,
'l': 244,
'm': 241,
'n': 234,
'o': 380,
'p': 10,
'r': 193,
's': 253,
't': 329,
'u': 80,
'v': 199,
'z': 374
}
adj = {
'a': ['s', 't', 'z'],
'b': ['u', 'g', 'p', 'f'],
'c': ['d', 'r', 'p'],
'd': ['c', 'm'],
'e': ['h'],
'f': ['b', 's'],
'g': ['b'],
'h': ['e', 'u'],
'i': ['n', 'v'],
'l': ['m', 't'],
'm': ['d', 'l'],
'n': ['i'],
'o': ['s', 'z'],
'p': ['b', 'c', 'r'],
'r': ['c', 'p,', 's'],
's': ['a', 'f', 'o', 'r'],
't': ['a', 'l'],
'u': ['b', 'h', 'v'],
'v': ['i', 'u'],
'z': ['a', 'o']
}
source_to_destination_distance = 0
path = list()
def greedy_best_fast_search(source_city, destination_city):
global source_to_destination_distance
# if the city is reached, no need to search further
if source_city == destination_city:
return
# temporarily store adjacent cities to a dictionary
adj_city = dict()
for city in adj[source_city]:
adj_city[city] = distance[city]
'''debug
print(adj_city)'''
# get the nearest city according to distance
nearest_city = sorted(adj_city, key=adj_city.__getitem__)
source_to_destination_distance = source_to_destination_distance + distance[nearest_city[0]]
'''debug
print(adj_city)'''
# append the nearest city to path
path.append(nearest_city[0])
# call greedy_best_fast_search() for the nearest city again
greedy_best_fast_search(nearest_city[0], destination_city)
def main():
global path
source = 'a'
destination = 'b'
path.append(source)
greedy_best_fast_search(source, destination)
total_city_in_path = len(path)
for c in range(total_city_in_path-1):
print(path[c], end='->')
print(path[total_city_in_path-1])
# print(source_to_destination_distance)
if __name__ == '__main__':
main()
| {
"repo_name": "ahmadalsajid/PythonNotes",
"path": "GreedyBestFastSearch.py",
"copies": "1",
"size": "2090",
"license": "mit",
"hash": -8747873405934222000,
"line_mean": 21.2340425532,
"line_max": 95,
"alpha_frac": 0.4933014354,
"autogenerated": false,
"ratio": 2.7718832891246685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37651847245246683,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmad Syarif'
import pika
import json
class CommandHandler(object):
avatarKey = 'avatar.NAO.command'
def __init__(self):
credential = pika.PlainCredentials('lumen', 'lumen')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, '/', credential))
self.channel = connection.channel()
pass
def sendCommand(self,command):
self.channel.basic_publish(exchange='amq.topic',routing_key=CommandHandler.avatarKey,body=command)
pass
def LS_say(self,toSay):
par = json.dumps({'text':toSay})
com = json.dumps({'type':'texttospeech','method':'say','parameter':{'text':toSay}})
self.sendCommand(command=com)
pass
def LS_goToPosture(self,posture,speed):
com = json.dumps({'type':'posture','method':'goToPosture','parameter':{'postureName':posture,'speed':speed}})
self.sendCommand(command=com)
pass
def LS_wakeUp(self):
com = json.dumps({'type':'motion','method':'wakeUp'})
self.sendCommand(command=com)
pass
def LS_rest(self):
com = json.dumps({'type':'motion','method':'rest'})
self.sendCommand(command=com)
pass
pass
| {
"repo_name": "ahmadsyarif/Python-Agent",
"path": "Command.py",
"copies": "2",
"size": "1230",
"license": "apache-2.0",
"hash": 1528711197354987000,
"line_mean": 38.6774193548,
"line_max": 117,
"alpha_frac": 0.6317073171,
"autogenerated": false,
"ratio": 3.649851632047478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021537536570925515,
"num_lines": 31
} |
__author__ = 'Ahmad Syarif'
import pika
import json
from pydispatch import dispatcher
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_RECOGNITION ='VISUAL_FACE_RECOGNITION'
VISUAL_FACE_TRACKING = 'VISUAL_FACE_TRACKING'
VISUAL_HUMAN_TRACKING = 'VISUAL_HUMAN_TRACKING'
AUDIO_SPEECH_RECOGNITION = 'AUDIO_SPEECH_RECOGNITION'
AUDIO_TEXT_TO_SPEECH = 'AUDIO_TEXT_TO_SPEECH'
AUDIO_GENDER_RECOGNITION = 'AUDIO_GENDER_RECOGNITION'
AVATAR_DATA_TACTILE = 'AVATAR_DATA_TACTILE'
class DataHandler(object):
'class to control connection'
credential = pika.PlainCredentials('lumen', 'lumen')
isConnected = None
def __init__(self):
try:
self.connection = pika.SelectConnection(parameters=pika.ConnectionParameters('localhost', 5672, '/', DataHandler.credential),on_open_callback=self.on_connected)
DataHandler.isConnected = True
except RuntimeError as e:
print 'unable to connect', e
pass
def start(self):
self.connection.ioloop.start()
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open,channel_number=1)
connection.channel(self.on_channel_open,channel_number=2)
#connection.channel(self.on_channel_open,channel_number=3)
#connection.channel(self.on_channel_open,channel_number=4)
#connection.channel(self.on_channel_open,channel_number=5)
#connection.channel(self.on_channel_open,channel_number=6)
#connection.channel(self.on_channel_open,channel_number=7)
connection.channel(self.on_channel_open,channel_number=8)
pass
def on_channel_open(self,channel):
if channel.channel_number ==1:
self.channelVisualFaceDetection = channel
self.channelVisualFaceDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==2:
self.channelVisualFaceRecognition = channel
self.channelVisualFaceRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==3:
self.channelVisualFaceTracking = channel
self.channelVisualFaceTracking.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.tracking',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==4:
self.channelVisualHumanDetection = channel
self.channelVisualHumanDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.human.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==5:
self.channelAudioSpeechRecognition = channel
self.channelAudioSpeechRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.speech.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==6:
self.channelAudioTextToSpeech = channel
self.channelAudioTextToSpeech.queue_declare(self.on_queue_declareOk,queue='lumen.audio.text.to.speech',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==7:
self.channelAudioGenderRecognition = channel
self.channelAudioGenderRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.gender.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==8:
self.channelAvatarDataTactile = channel
self.channelAvatarDataTactile.queue_declare(self.on_queue_declareOk,queue='avatar.NAO.data.tactile',durable=True,exclusive=False,auto_delete=True)
else:
print 'print do nothing'
pass
pass
def on_queue_declareOk(self,workQueue):
if workQueue.channel_number == 1:
self.channelVisualFaceDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 2:
self.channelVisualFaceRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 3:
self.channelVisualFaceTracking.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 4:
self.channelVisualHumanDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 5:
self.channelAudioSpeechRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 6:
self.channelAudioTextToSpeech.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 7:
self.channelAudioGenderRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 8:
self.channelAvatarDataTactile.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
else:
pass
pass
def on_bindOK(self,frame):
if frame.channel_number == 1:
self.channelVisualFaceDetection.basic_consume(self.faceDetectionCallback,queue='lumen.visual.face.detection',no_ack=True)
elif frame.channel_number==2:
self.channelVisualFaceRecognition.basic_consume(self.faceRecognitionCallback,queue='lumen.visual.face.recognition',no_ack=True)
elif frame.channel_number==3:
self.channelVisualFaceTracking.basic_consume(self.faceTrackingCallback,queue='lumen.visual.face.tracking',no_ack=True)
elif frame.channel_number==4:
self.channelVisualHumanDetection.basic_consume(self.humanDetectionCallback,queue='lumen.visual.human.detection',no_ack=True)
elif frame.channel_number==5:
self.channelAudioSpeechRecognition.basic_consume(self.speechRecognitionCallback,queue='lumen.audio.speech.recognition',no_ack=True)
elif frame.channel_number==6:
self.channelAudioTextToSpeech.basic_consume(self.textToSpeechCallback,queue='lumen.audio.text.to.speech',no_ack=True)
elif frame.channel_number==7:
self.channelAudioGenderRecognition.basic_consume(self.genderRecognitionCallback,queue='lumen.audio.gender.recognition',no_ack=True)
elif frame.channel_number==8:
self.channelAvatarDataTactile.basic_consume(self.tactileDataCallback,queue='avatar.NAO.data.tactile',no_ack=True)
else:
pass
pass
# defenition of event handler
def faceDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
faceLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_FACE_DETECTION,sender=self,result=faceLocation)
pass
def faceRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
faceName = result['name']
dispatcher.send(signal=VISUAL_FACE_RECOGNITION,sender=self,result = faceName)
pass
def faceTrackingCallback(self,ch, method, property, body):
dispatcher.send(signal=VISUAL_FACE_TRACKING,sender=self,result = body)
pass
def humanDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
humanLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_HUMAN_TRACKING,sender=self,result = humanLocation)
pass
def speechRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
recognizedWord = result['result']
dispatcher.send(signal=AUDIO_SPEECH_RECOGNITION,sender=self,result = recognizedWord)
pass
def textToSpeechCallback(self,ch, method, property, body):
result = json.loads(body)
sound = result['sound']
dispatcher.send(signal=AUDIO_TEXT_TO_SPEECH,sender=self,result = sound)
pass
def genderRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
gender = result['gender']
dispatcher.send(signal=AUDIO_GENDER_RECOGNITION,sender=self,result = gender)
pass
def tactileDataCallback(self,ch, method, property, body):
result = json.loads(body)
value = result['value']
dispatcher.send(signal=AVATAR_DATA_TACTILE,sender=self,result = value)
pass
pass
| {
"repo_name": "ahmadsyarif/Python-AgentIntelligent",
"path": "Data.py",
"copies": "2",
"size": "8882",
"license": "apache-2.0",
"hash": -1902323057293045200,
"line_mean": 57.4342105263,
"line_max": 172,
"alpha_frac": 0.713578023,
"autogenerated": false,
"ratio": 3.5886868686868687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5302264891686869,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed G. Ali'
import dbms
def retrieve_connection(db):
"""
Retrieves Database connection object for a given connection parameters.
:param db: Json object containing connection parameters
:type db: dict
:return: oracle.dbms.Connection object
"""
con = dbms.connect.oracle(user=db['username'], password=db['password'], database=db['name'], host=db['host'],
port=db['port'], is_service=db.get('is_service', False), encoding="UTF-8", nencoding="UTF-8")
return con
def execute_select(sql_stmt, db, keep_connection=False):
"""Executes select statement and returning list of results.
:param sql_stmt: SQL statement to be executed.
:type sql_stmt: str
:param db: Json object containing connection string parameters
:type db: dict
:param keep_connection: Keep the connection opened so that can be used to retrieve other nested objects.
e.g. the content of an xml object.
:return: list of results from DB
"""
res = []
con = None
try:
con = retrieve_connection(db)
cur = con.cursor()
# print sql_stmt
cur.execute(sql_stmt)
res = cur.fetchall()
except Exception, e:
raise
print e
finally:
if keep_connection:
return res, con
if con:
con.close()
return res
def execute_insert(sql_stmt, db):
"""
Executes insert/update statement
:param sql_stmt: SQL statement to be executed.
:type sql_stmt: str
:param db: Json object containing connection string parameters
:type db: dict
"""
con = None
try:
con = retrieve_connection(db)
cur = con.cursor()
cur.execute(sql_stmt)
con.commit()
except Exception, e:
raise Exception(str(e) + '\n' + sql_stmt)
print e
print "Error %d: %s" % (e.args[0], e.args[1])
finally:
if con:
con.close()
| {
"repo_name": "arrayexpress/ae_auto",
"path": "dal/oracle/common.py",
"copies": "1",
"size": "2045",
"license": "apache-2.0",
"hash": -5602979348798659000,
"line_mean": 27.8028169014,
"line_max": 123,
"alpha_frac": 0.5828850856,
"autogenerated": false,
"ratio": 4.09,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51728850856,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed G. Ali'
import MySQLdb as mdb
def retrieve_connection(db):
"""
Retrieves Database connection object for a given connection parameters.
:param db: Json object containing connection parameters
:type db: dict
:return: MySQLDB.Connection object
"""
con = mdb.connect(host=db['host'], user=db['username'], passwd=db['password'], port=db['port'], db=db['name'])
return con
def execute_select(sql_stmt, db):
"""
Executes select statement and returning list of results.
:param sql_stmt: SQL statement to be executed.
:type sql_stmt: str
:param db: Json object containing connection string parameters
:type db: dict
:return: list of results from DB
"""
res = []
con = None
try:
con = retrieve_connection(db)
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute(sql_stmt)
res = cur.fetchall()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
finally:
if con:
con.close()
return res
def execute_insert(sql_stmt, db):
"""
Executes insert/update statement
:param sql_stmt: SQL statement to be executed.
:type sql_stmt: str
:param db: Json object containing connection string parameters
:type db: dict
"""
con = None
try:
con = retrieve_connection(db)
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute(sql_stmt)
con.commit()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
finally:
if con:
con.close()
| {
"repo_name": "arrayexpress/ae_auto",
"path": "dal/mysql/common.py",
"copies": "1",
"size": "1612",
"license": "apache-2.0",
"hash": -9083614565205179000,
"line_mean": 24.1875,
"line_max": 114,
"alpha_frac": 0.6073200993,
"autogenerated": false,
"ratio": 3.688787185354691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4796107284654691,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed G. Ali'
f = open('/home/gemmy/E-GEOD-16256_NIH_epigenome_cells_RNA-seq.sdrf.txt', 'r')
lines = f.readlines()
f.close()
extra_header = ['sample_term_id', 'assay_term_id', 'nucleic_acid_term_id', 'Design_description', 'Library_name',
'EDACC_Genboree_Experiment_Page', 'EDACC_Genboree_Sample_Page']
write_lines = ['\t'.join(lines[0].strip().split('\t') + sorted(extra_header))]
for line in lines[1:]:
line = line.strip().split('\t')
txt = line[2]
txt = txt.replace('Design description', 'Design_description').replace('Library name', 'Library_name').replace(
'EDACC Genboree Experiment Page', 'EDACC_Genboree_Experiment_Page').replace('EDACC Genboree Sample Page',
'EDACC_Genboree_Sample_Page')
words = txt.split(' ')
extra_index = 0
d = {}
for i in range(len(words)):
if words[i].strip().replace(':', '') == extra_header[extra_index]:
extra_index += 1
if extra_index == len(extra_header):
d[extra_header[extra_index - 1]] = words[i + 1]
break
else:
next_index = words.index(extra_header[extra_index] + ':')
d[extra_header[extra_index - 1]] = ' '.join(words[i + 1:next_index])
for k in sorted(d.keys()):
line.append(d[k])
write_lines.append('\t'.join(line))
f = open('/home/gemmy/E-GEOD-16256_NIH_epigenome_cells_RNA-seq.sdrf.txt_modified', 'w')
f.write('\n'.join(write_lines))
f.close()
| {
"repo_name": "arrayexpress/ae_auto",
"path": "misc/extract_combined_columns.py",
"copies": "1",
"size": "1572",
"license": "apache-2.0",
"hash": 945249577302342500,
"line_mean": 43.9142857143,
"line_max": 114,
"alpha_frac": 0.5655216285,
"autogenerated": false,
"ratio": 3.2081632653061223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4273684893806122,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed G. Ali'
def geo_email_parse(email_body):
ids = {}
for word in email_body.split(" "):
word = word.replace(',', '')
if word.startswith('GSE'):
geo_id = word
ae_id = 'E-GEOD-%s' % word.replace('GSE', '')
ids[geo_id] = ae_id
elif word.startswith('GDS'):
geo_id = word
ae_id = 'E-GEOD-%s' % word.replace('GDS', '')
ids[geo_id] = ae_id
return ids
if __name__ == '__main__':
email = u"""
[ Microarray OTRS ] Ahmed Ali (ahmed@ebi.ac.uk) 2016-07-04 12:50:34
Logout
Logout
QueueView
QueueView
StatusView
StatusView
Phone-Ticket
Phone-Ticket
Email-Ticket
Email-Ticket
Search
Search
Customer
Customer
Bulk-Action
Bulk-Action
-
Calendar
Calendar
Preferences
Preferences
Responsible (165)
Responsible (165)
Watched Tickets (0)
Watched Tickets (0)
New message (1)
New message (1)
Locked Tickets (1)
Locked Tickets (1)
Info : You have 1 new message(s)!
[ Queue: developers::ahmed ]
Tickets shown: 1-50 - Page: 1 2 - Tickets available: 72 - All tickets: 73
Queues: My Queues (199) - annotare (42) - arrayexpress (19) - biosamples (15) - biostudies (1) - developers (186) - HTS new (19) - junk (5222) - miamexpress (1953) - raw (36866) - twitter (2)
ahmed (72) - atlas (77)
[ Ticket#: 1605090205 ] CRAM Spot Length Help [ Age: 55 days 20 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-05-10 12:02:02
From:
Vadim Zalunin <vadim@ebi.ac.uk>
To:
datasubs@ebi.ac.uk, arrayexpress@ebi.ac.uk
Cc:
dsmirnov@ebi.ac.uk
Subject:
Re: CRAM Spot Length Help [Ticket#1605090205] (SUB#916874)
Hello,
For Illumina paired reads this is a valid approach unless the reads have
been trimmed or hard clipped.
Vadim
On 10/05/2016 11:40, datasubs@ebi.ac.uk wrote:
> Hi Ahmed,
>
> Dmitriy and/or Vadim are best suited to advise because the processing pipeline
> is already calculating these things from the submitted cram files. For Illumina
> output where forward and reverse reads are the same length you may be able to do
> something like this for spot length:
>
>> cramtools fastq -I LCK_1_10.cram | head -n2 | tail -n1 | wc -c
> 126
>
[...]
State:
open
Priority:
5 very high
Queue:
developers::ahmed
CustomerID:
Owner:
ahmed
Trac:
Curation_Status:
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 2015120910000291 ] Changing Release Date [ Age: 207 days 21 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-01-08 10:02:03
From:
datasubs@ebi.ac.uk
To:
arrayexpress@ebi.ac.uk
Subject:
Re: [Ticket#2015120910000291] Changing Release Date (neilg) (SUB#907788)
Notes:
Ahmed: I've tested it using the same submission xml on the Webin test server and it is now working.
This should go to Amy to open the ticket for the Dev team to implement in AE.
Amy: Pivotal ticket logged ( https://www.pivotaltracker.com/story/show/86417364 ). Hope it'll be delivered soon! (25 Jan 2016)
Hi Ahmed,
The ability to update the hold date for a study has been added using the
HoldUntilDate attribute in the HOLD element in the submission XML.
Regards
Neil
Neil Goodgame
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
State:
open
Priority:
4 high
Queue:
developers::ahmed
CustomerID:
Owner:
ahmed
Trac:
Curation_Status:
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1607040041 ] ENA (Webin-24): file processing errors [ Age: 6 hours 48 minutes ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-07-04 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1607030294 ] ENA (Webin-24): file processing errors [ Age: 1 day 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-07-03 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1607020563 ] [geo] GEO->AE unpublish notification: GSE67754 [NCBI trackin[..] [ Age: 1 day 20 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-07-02 16:16:07
From:
"GEO - Emily Clough" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE67754 [NCBI tracking system #17956664[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE67754 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:229:4334271
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1607020545 ] ENA (Webin-24): file processing errors [ Age: 2 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-07-02 06:03:17
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606280061 ] ENA (Webin-24): file processing errors [ Age: 6 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-28 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606270036 ] ENA (Webin-24): file processing errors [ Age: 7 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-27 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606260065 ] ENA (Webin-24): file processing errors [ Age: 8 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-26 06:02:01
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606220125 ] ENA (Webin-24): file processing errors [ Age: 12 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-22 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606210403 ] [geo] GEO->AE unpublish notification: GSE65169 [NCBI trackin[..] [ Age: 12 days 22 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-21 14:14:11
From:
"GEO - Hyeseung Lee" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE65169 [NCBI tracking system #17943901[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE65169 was returned to private status.
Regards,
The GEO Team
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:227:1360630
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
name.
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606210109 ] ENA (Webin-24): file processing errors [ Age: 13 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-21 06:02:04
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606210065 ] [geo] GEO->AE unpublish notification: GSE71536 [NCBI trackin[..] [ Age: 13 days 9 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-21 03:20:05
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE71536 [NCBI tracking system #17943092[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE71536 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:222:4015098
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606200334 ] [geo] GEO->AE unpublish notification: GSE74365 [NCBI trackin[..] [ Age: 13 days 19 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-20 17:14:03
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE74365 [NCBI tracking system #17939672[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE74365 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:225:1000312
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606200129 ] [geo] GEO->AE unpublish notification: GSE83501 [NCBI trackin[..] [ Age: 14 days 1 hour ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-20 11:28:03
From:
"GEO - Irene Kim" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE83501 [NCBI tracking system #17939075[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE83501 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:217:2782673
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606200111 ] [geo] GEO->AE unpublish notification: GSE83467 [NCBI trackin[..] [ Age: 14 days 1 hour ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-20 11:28:03
From:
"GEO - Irene Kim" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE83467 [NCBI tracking system #17939074[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE83467 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:228:4025240
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606200076 ] ENA (Webin-24): file processing errors [ Age: 14 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-20 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606190113 ] ENA (Webin-24): file processing errors [ Age: 15 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-19 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606180081 ] ENA (Webin-24): file processing errors [ Age: 16 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-18 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606170091 ] ENA (Webin-24): file processing errors [ Age: 17 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-17 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606160799 ] [geo] GEO->AE unpublish notification: GSE69697 [NCBI trackin[..] [ Age: 17 days 23 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-16 13:48:03
From:
"GEO - Emily Clough" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE69697 [NCBI tracking system #17936769[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE69697 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:237:2924732
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606160673 ] ENA (Webin-24): file processing errors [ Age: 18 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-16 06:02:04
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606150531 ] [geo] GEO->AE unpublish notification: GSE54076 [NCBI trackin[..] [ Age: 18 days 18 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-15 18:28:03
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE54076 [NCBI tracking system #17936045[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE54076 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:222:122640
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606150513 ] [geo] GEO->AE unpublish notification: GSE83333 [NCBI trackin[..] [ Age: 18 days 20 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-15 15:56:03
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE83333 [NCBI tracking system #17935492[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE83333 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:220:426540
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606150184 ] ENA (Webin-24): file processing errors [ Age: 19 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-15 06:06:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606140981 ] [geo] GEO->AE unpublish notification: GSE69770 [NCBI trackin[..] [ Age: 19 days 18 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-14 18:40:10
From:
"GEO - Kimberly Marshall" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE69770 [NCBI tracking system #17934521[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE69770 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:229:2192785
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606140122 ] ENA (Webin-24): file processing errors [ Age: 20 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-14 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606130357 ] [geo] GEO->AE unpublish notification: GSE75956 [NCBI trackin[..] [ Age: 20 days 19 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-13 16:54:04
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE75956 [NCBI tracking system #17931818[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE75956 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:232:84982
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606130339 ] [geo] GEO->AE unpublish notification: GSE51636 [NCBI trackin[..] [ Age: 20 days 20 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-13 16:18:03
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE51636 [NCBI tracking system #17931736[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE51636 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:221:5205224
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606130062 ] ENA (Webin-24): file processing errors [ Age: 21 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-13 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606120064 ] ENA (Webin-24): file processing errors [ Age: 22 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-12 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606110084 ] ENA (Webin-24): file processing errors [ Age: 23 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-11 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606100273 ] [geo] GEO->AE unpublish notification: GSE75285, GSE75271, [..] [ Age: 23 days 15 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-10 21:28:03
From:
"GEO - Irene Kim" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE75285, GSE75271, GSE75283, GSE75284 [[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE75285, GSE75271, GSE75283, GSE75284 were returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:24:61:20:1:3097888
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606100086 ] ENA (Webin-24): file processing errors [ Age: 24 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-10 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606090311 ] [geo] GEO->AE unpublish notification: GSE78722 [NCBI trackin[..] [ Age: 24 days 19 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-09 17:32:03
From:
"GEO - Carlos Evangelista" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE78722 [NCBI tracking system #17929395[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE78722 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:226:5937297
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
encrypted messages or attachments unless agreed. We always sign emails by
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606090258 ] Data withdrawal [ Age: 24 days 22 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-09 14:16:04
From:
Quan Lin <ql3@sanger.ac.uk>
To:
arrayexpress <arrayexpress@ebi.ac.uk>
Cc:
Data submission service <datahose@sanger.ac.uk>
Subject:
Data withdrawal
Hello,
Could you please remove E-ERAD-465 from ArrayExpress as the linked data
in EGA has been withdrawn?
Thanks,
Quan
--
The Wellcome Trust Sanger Institute is operated by Genome Research
Limited, a charity registered in England with number 1021457 and a
company registered in England with number 2742969, whose registered
office is 215 Euston Road, London, NW1 2BE.
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: ql3@sanger.ac.uk
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606090098 ] ENA (Webin-24): file processing errors [ Age: 25 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-09 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606080367 ] [geo] GEO->AE unpublish notification: GSE66041 [NCBI trackin[..] [ Age: 25 days 20 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-08 15:52:07
From:
"GEO - Patti Sherman" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE66041 [NCBI tracking system #17927140[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE66041 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:217:2784839
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606080198 ] Data for ERA037246 is marked as 'paired-end' but loaded as [..] [ Age: 26 days 2 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-08 10:08:03
From:
datasubs@ebi.ac.uk
To:
annotare@ebi.ac.uk
Subject:
Data for ERA037246 is marked as 'paired-end' but loaded as single-end (SUB#9189[..]
Dear Array Express colleagues,
Can you investigate the following?
E-MTAB-721/ERP000760/PRJEB2597
Thank you,
Marc
>Date: Tue, 7 Jun 2016 15:51:20 +0000
>From: jonathan.trow@nih.gov
>Reply-to:
>To: "datasubs@ebi.ac.uk" <datasubs@ebi.ac.uk>
>Subject: Data for ERA037246 is marked as 'paired-end' but loaded as single-end
> Dear Colleague,
> We received an inquiry about ERA037246 where some of the data (examples: ER=
> X014989, ERX014986) are indicated to be paired-end data, but dump as single=
> -end. It appears that matching forward and reverse reads were loaded to sep=
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606080171 ] ENA (Webin-24): file processing errors [ Age: 26 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-08 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606080152 ] ENA (Webin-24): your data will become public in the next 14 [..] [ Age: 26 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-08 06:02:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): your data will become public in the next 14 days
Dear Colleague,
We would like to inform you of your studies that will become public in the next 14 days
with all associated data.The list of studies is shown at the bottom of this email.If you
wish to extend the release date, please find the instructions in the following link:
http://www.ebi.ac.uk/ena/about/data-release-mechanism
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of studies nearing their publication date:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606070538 ] [geo] GEO->AE unpublish notification: GSE75443 [NCBI trackin[..] [ Age: 26 days 19 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-07 16:52:06
From:
"GEO - Steve Wilhite" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE75443 [NCBI tracking system #17925213[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE75443 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:223:6236982
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606070341 ] [geo] GEO->AE unpublish notification: GSE58175 [NCBI trackin[..] [ Age: 26 days 21 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-07 15:00:04
From:
"GEO - Steve Wilhite" <geo@ncbi.nlm.nih.gov>
To:
miamexpress@ebi.ac.uk
Subject:
[geo] GEO->AE unpublish notification: GSE58175 [NCBI tracking system #17925047[..]
------ MESSAGE BODY. YOU MAY CHANGE IT OR ADD COMMENTS ABOVE ------
Dear ArrayExpress Team,
The Series GSE58175 was returned to private status.
Regards,
The GEO Team
*************
---- END OF MESSAGE BODY. PLEASE DO NOT CHANGE THE DATA BELOW ----
SK#:15:60:5:226:988891
Please leave the subject line unchanged, and do not change the message
at end from the line with "END OF MESSAGE BODY" to the end.
NOTE: the geo email is often abused (spoofed) by spammers. We never send
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: geo@ncbi.nlm.nih.g[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606070065 ] ENA (Webin-24): file processing errors [ Age: 27 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-07 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606060085 ] Data Files [ Age: 28 days 0 hour ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-10 13:04:02
From:
datasubs@ebi.ac.uk
To:
arrayexpress@ebi.ac.uk
Subject:
Re: Data Files(rasko) (SUB#918814) [Ticket#1606060085]
Hi Ahmed,
> That would be great, thank you very much :)
>
> It really makes sense that cancelled data are no longer needed, but we have
> this case - and others - where the submitter decided to modify and re-arrange
> one or more experiments. In this specific case, he wants to split his study
> into 2 studies with different release dates! That's why we cancelled the
> project, and will submit two different submissions. But due to some migrations
> in the submission tool, many data files were deleted assuming that they were
> already brokered to ENA.
>
> Thanks for your help Rasko, I agree with your proposed solutions. Kindly make
> sure I have read access to these directories.
Done. There are ~250k new files which will gradually start showing up (will take
a bit of time for the changes to propagate to FIRE storage). Checked that one of
the two submitted fastqs is already available:
[...]
State:
open
Priority:
3 normal
Queue:
developers::ahmed
CustomerID:
Owner:
ahmed
Trac:
Curation_Status:
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606060031 ] ENA (Webin-24): file processing errors [ Age: 28 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-06 06:04:02
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606050041 ] ENA (Webin-24): file processing errors [ Age: 29 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-05 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606030054 ] ENA (Webin-24): file processing errors [ Age: 31 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-03 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1606010129 ] ENA (Webin-24): file processing errors [ Age: 33 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-06-01 06:02:05
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
[ Ticket#: 1605310164 ] ENA (Webin-24): file processing errors [ Age: 34 days 6 hours ]
Lock - Zoom - History - Priority - Note - Close
Created: 2016-05-31 06:02:03
From:
"ENA" <datasubs@ebi.ac.uk>
To:
ArrayExpress submissions<annotare@ebi.ac.uk>
Subject:
ENA (Webin-24): file processing errors
Dear Colleague,
During processing of your submitted files, we have found problems with one or more of your
submitted files. Please review the error report provided at the end of this email and
re-upload corrected files to your upload area. We will automatically scan for these files
on a daily basis and attempt to re-process them.
This is an automatically generated email. If you wish to enquire about the contents of
this email, please reply to this email without any changes to the subject line.
Kind regards,
European Nucleotide Archive
European Molecular Biology Laboratory
European Bioinformatics Institute (EMBL-EBI),
Wellcome Trust Genome Campus, Hinxton, Cambridge CB10 1SD, U.K.
Tel: +44 1223 494444.
List of file processing errors:
[...]
State:
new
Priority:
3 normal
Queue:
developers::ahmed
CustomerID: datasubs@ebi.ac.uk[..]
Owner:
root@localhost
Compose Answer (email):
Empty answer
Two-colour Annotare problem
Contact customer (phone):
Phone call
Change queue:
Move
Top of Page
Powered by OTRS 2.2.7
"""
print ' '.join(geo_email_parse(email).keys())
| {
"repo_name": "arrayexpress/ae_auto",
"path": "utils/email/parser.py",
"copies": "1",
"size": "62039",
"license": "apache-2.0",
"hash": 4712202046893401000,
"line_mean": 25.2210481826,
"line_max": 191,
"alpha_frac": 0.7554763939,
"autogenerated": false,
"ratio": 3.084369096151934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43398454900519345,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed G. Ali'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='submits and loads sequencing experiment to ENA and ArrayExpress')
parser.add_argument('dir_name', metavar='MAGE-TAB_xxxx', type=str,
help='''The directory name where the submission meta-date files exists.
If used without the base_dir argument then the default base directory is:
/ebi/microarray/ma-exp/AutoSubmissions/annotare/''')
parser.add_argument('accession', metavar='E-MTAB-xxxx', type=str,
help='''The accession number for the experiment''')
parser.add_argument('-bd', '--base_dir', metavar='path/to/experiment/directory/__without__/MAGE-TAB_xxx', type=str,
help="""The base directory for the experiment's data.
If not given the default value is /ebi/microarray/ma-exp/AutoSubmissions/annotare/""")
parser.add_argument('-ed', '--ena_dir', metavar='path/to/fastq/files/directory/', type=str,
help="""The location of fastq files on ENA machine.
If not given the default value is /fire/staging/aexpress/""")
parser.add_argument('-ic', '--is_combined', action='store_true',
help='A flag indicating that the IDF and SDRF are in the same file.') | {
"repo_name": "arrayexpress/ae_auto",
"path": "automation/ena/replace_runs.py",
"copies": "1",
"size": "1426",
"license": "apache-2.0",
"hash": 6264442848415832000,
"line_mean": 66.9523809524,
"line_max": 119,
"alpha_frac": 0.6051893408,
"autogenerated": false,
"ratio": 4.194117647058824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004819534137146773,
"num_lines": 21
} |
__author__ = 'Ahmed Hani Ibrahim'
from LearningAlgorithm import *
class Backpropagation(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate: double
:param input: list
:param output: list
:param network: [[Neuron]]
:return: [[Neuron]]
Training the network with Backpropagation algorithm, it does the following
1- Calculate the error signal for each neuron on each layer
2- Update the weights of each neuron according to its update formula
3- Return the new weights of the whole network
"""
for i in range(len(network) - 1, 0, -1):
for j in range(0, len(network[i])):
currentNeuron = network[i][j]
if i == len(network) - 1:
currentNeuron.SignalError = (output[j] - currentNeuron.Output) * \
currentNeuron.ActivationFunction.derivative(currentNeuron.Net)
else:
summation = 0.0
for k in range(0, len(network[i + 1])):
nextNeuron = network[i + 1][k]
summation += (nextNeuron.Weights[j] * nextNeuron.SignalError)
currentNeuron.SignalError = summation * currentNeuron.ActivationFunction.derivative(
currentNeuron.Net)
network[i][j] = currentNeuron
for i in range(0, len(network)):
for j in range(0, len(network[i])):
x = len(network[i])
currentWeights = network[i][j].Weights
currentBias = network[i][j].Bias
for k in range(0, len(currentWeights)):
if i == 0:
currentWeights[k] += learningRate * network[i][j].SignalError * input[k]
else:
currentWeights[k] += learningRate * network[i][j].SignalError * network[i - 1][k].Output
currentBias += learningRate * network[i][j].SignalError
network[i][j].update(currentWeights, currentBias)
x = len(network[i])
return network | {
"repo_name": "AhmedHani/Python-Neural-Networks-API",
"path": "OptimizationAlgorithms/Backpropagation.py",
"copies": "1",
"size": "2236",
"license": "mit",
"hash": -8875118512977930000,
"line_mean": 39.6727272727,
"line_max": 112,
"alpha_frac": 0.5348837209,
"autogenerated": false,
"ratio": 4.4541832669322705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5489066987832271,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed Hani Ibrahim'
from NeuralNetwork.Neuron import Neuron
from ActivationFunctions.Sigmoid import *
import numpy as np
class FeedforwardNeuralNetwork(object):
__numberOfLayers = 0
__numberOfInput = 0
__network = [[Neuron]]
__numberOfNeuronsPerLayer = 0
def __init__(self, numberOfLayers):
if numberOfLayers < 2:
raise Exception("Can't Initiate Network with lower than 2 layers")
self.__numberOfLayers = numberOfLayers
def setNetwork(self, numberOfNeuronsPerLayer):
"""
:param numberOfNeuronsPerLayer: list
Construct the network given the number of neurons per layer
"""
if self.__numberOfLayers != len(numberOfNeuronsPerLayer):
raise Exception("Wrong List size for numOfNeuronsPerLayer")
self.__numberOfNeuronsPerLayer = numberOfNeuronsPerLayer
self.__numberOfInput = numberOfNeuronsPerLayer[0]
self.__network = [[Neuron(Sigmoid(), numberOfNeuronsPerLayer[i - 1])
for j in range(0, numberOfNeuronsPerLayer[i])] for i in range(1, self.__numberOfLayers)]
def setLayer(self, layerIndex, activationFunction):
"""
:param layerIndex: int
:param activationFunction: ActivationFunction
Create neurons and it them to specific layerIndex
"""
if layerIndex == 0:
raise Exception("Can't set Input Layer")
for i in range(0, self.__numberOfNeuronsPerLayer[layerIndex]):
neuron = Neuron(activationFunction, self.__numberOfNeuronsPerLayer[layerIndex - 1])
self.__network[layerIndex - 1].append(neuron)
def setNeuron(self, layerIndex, neuronIndex, weights, bias):
"""
:param layerIndex: int
:param neuronIndex: int
:param weights: list
:param bias: double
Set neuron in layer given its index, the index of neuron, the weights and bias
"""
if len(weights) != self.__numberOfNeuronsPerLayer[layerIndex - 1]:
raise Exception("Invalid Weights Size!")
self.__network[layerIndex - 1][neuronIndex].update(weights, bias)
def computOutput(self, input):
"""
:param input: list
:return: list
Compute the output the network
"""
if len(input) != self.__numberOfInput:
raise Exception("Invalid Input Size!")
currentInput = input
nextInput = []
for i in range(1, self.__numberOfLayers):
for j in range(0, self.__numberOfNeuronsPerLayer[i]):
res = self.__network[i - 1][j].feedforward(currentInput)
nextInput.append(res)
print(res)
currentInput = nextInput
nextInput = []
output = currentInput
return output
def train(self, trainingSamples, trainingLabels, learningRate, learningAlgorithm):
"""
:param trainingSamples: [[double]]
:param trainingLabels: [double]
:param learningRate: double
:param learningAlgorithm: LearningAlgorithm
Train the network given the training samples (features) and the labels (classes),
then using the chosen learning algorithm, the weights are updated
"""
for i in range(0, len(trainingSamples)):
output = self.computOutput(trainingSamples[i])
self.__network = learningAlgorithm.learn(learningRate, trainingSamples[i], trainingLabels[i], self.__network)
return self.__network
| {
"repo_name": "AhmedHani/Python-Neural-Networks-API",
"path": "NeuralNetwork/FeedforwardNeuralNetwork.py",
"copies": "1",
"size": "3553",
"license": "mit",
"hash": -7092574169344296000,
"line_mean": 31.8981481481,
"line_max": 121,
"alpha_frac": 0.6296087813,
"autogenerated": false,
"ratio": 4.543478260869565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5673087042169566,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed Hani Ibrahim'
from State import State
from Transition import Transition
class QLearning(object):
def train(self, initState, actions):
currentState = initState
foundState = False
#iterator = iter(actions)
for action in actions:
for transition in currentState.GetTransitions:
if foundState:
break
if transition.GetAction().name == action.GetActionName():
transition.SetQValue(transition.GetReinforcementCost() + self.__learningRate *
self.__maxValue(transition.GetDestinationState()))
currentState = transition.GetDestinationState()
foundState = True
foundState = False
def agentPlay(self, initState):
currentState = initState
temp = Transition
print(currentState.GetName + " -> ")
while True:
temp = self.__bestMove(currentState.GetTransitions())
print(temp.GetDestinationState().GetName() + " -> ")
currentState = temp.GetDestinationState()
def __maxValue(self, state):
maxVal = 0.0
for transition in state.GetTransitions():
if transition.GetQValue() > maxVal:
maxVal = transition.GetQValue()
return maxVal
def addState(self, state):
self.__states.append(state)
def __bestMove(self, transitions):
bestTransition = transitions.get(0)
temp = Transition
for transition in transitions:
temp = transition
if temp.GetQValue() > bestTransition.GetQValue():
bestTransition = temp
return bestTransition
def logs(self):
for state in self.__states:
for transition in state.GetTransitions():
print(state.GetName() + " " + transition.GetAction().GetName + " " + transition.GetQValue())
def __init__(self, learningRate):
self.__states = [State]
self.__learningRate = learningRate
| {
"repo_name": "AhmedHani/Deep-Q-Learning",
"path": "DeepQLearning/QLearning.py",
"copies": "1",
"size": "2097",
"license": "mit",
"hash": -5623856706218656000,
"line_mean": 28.5352112676,
"line_max": 108,
"alpha_frac": 0.582260372,
"autogenerated": false,
"ratio": 4.733634311512415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01529397804823343,
"num_lines": 71
} |
__author__ = 'Ahmed Hani Ibrahim'
from Structures.Cell import Cell
from Structures.Point import Point
from Utilities.Utilities import *
class Astar(object):
__directions = []
__path = [[]]
__source = Cell
__destination = Cell
__map = [[]]
def __init__(self, map):
self.__map = map
self.__path = [[0 for j in range(0, len(self.__map[0]))] for i in range(0, len(self.__map))]
def __initDirections(self):
self.__directions = [
Point(1, 0), Point(0, 1), Point(1, 1),
Point(0, -1), Point(-1, 0), Point(-1, 1),
Point(1, -1), Point(-1, -1)
]
def aStarPathFinder(self, source, destination):
self.__initDirections()
self.__source = source
self.__destination = destination
PQ = [0.0 for i in range(0, len(self.__map) * len(self.__map[0]))]
closeList = [[False for j in range(0, len(self.__map[0]))] for i in range(0, len(self.__map))]
visited = [[False for j in range(0, len(self.__map[0]))] for i in range(0, len(self.__map))]
fgh = [[0 for j in range(0, len(self.__map[0]))] for i in range(0, len(self.__map))]
currentCell = Cell.Cell
currentCell.position = source
currentCell.g = 0.0
currentCell.h = Utilities.getEuclideanDistance(source.position, destination.position)
currentCell.f = currentCell.g + currentCell.h
PQ.append(currentCell)
nextCell = Cell.Cell
while PQ.__len__() != 0:
PQ.sort(key=lambda it: it.f, reverse=True)
currentCell = PQ.pop(0)
if currentCell.position == destination:
return self.__getPath(source, destination)
closeList[currentCell.position.x, currentCell.position.y] = True
for i in range(0, 8):
nextCell.position.x = currentCell.position.x + self.__directions[i].x
nextCell.position.y = currentCell.position.y + self.__directions[i].y
if visited[nextCell.position.x][nextCell.position.y]:
nextCell = fgh[nextCell.position.x][nextCell.position.y]
if nextCell.position.x >= 0 and nextCell.position.x < len(self.__map) \
and nextCell.position.y >= 0 and nextCell.position.y < len(self.__map[0]):
if self.__map[nextCell.position.x, nextCell.position.y] != None: #Wall to be handled
continue
currentG = currentCell.g + self.__map[nextCell.position.x][nextCell.position.y]
if self.__isDiagonal(currentCell.position, nextCell.position):
currentG += 5 #Heu
if closeList[nextCell.position.x][nextCell.position.y] == False:
closeList[nextCell.position.x][nextCell.position.y] = True
visited[nextCell.position.x][nextCell.position.y] = True
self.__path[nextCell.position.x][nextCell.position.y] = currentCell.position
nextCell.h = Utilities.getEuclideanDistance(currentCell.position, destination) * 10.0
nextCell.g = currentG
nextCell.f = nextCell.g + nextCell.h
fgh[nextCell.position.x][nextCell.position.y] = nextCell
PQ.append(nextCell)
elif currentG < nextCell.g:
closeList[nextCell.position.x][nextCell.position.y] = True
visited[nextCell.position.x][nextCell.position.y] = True
PQ.remove(nextCell)
nextCell.h = Utilities.getEuclideanDistance(currentCell.position, destination) * 10.0
nextCell.g = currentG
nextCell.f = nextCell.g + nextCell.h
fgh[nextCell.position.x][nextCell.position.y] = nextCell
PQ.append(nextCell)
self.__path[nextCell.position.x][nextCell.position.y] = currentCell.position
return None
def __getPath(self, source, destination):
resultedPath = []
currentPoint = Point
while currentPoint != source:
resultedPath.append(currentPoint)
currentPoint = self.__path[currentPoint.x, currentPoint.y]
resultedPath = resultedPath.reverse()
return resultedPath
def __isDiagonal(self, nextPoint, currentPoint):
return True if ((nextPoint.y < currentPoint.y and nextPoint.x > currentPoint.x)
or (nextPoint.y < currentPoint.y and nextPoint.X < currentPoint.X)
or (nextPoint.y > currentPoint.y and nextPoint.X > currentPoint.X)
or (nextPoint.y > currentPoint.y and nextPoint.X < currentPoint.X)) else False
| {
"repo_name": "AhmedHani/Frontier-based-Multi-Agent-Map-Exploration",
"path": "Frontier-based Map Exploration/PathFinder/Astar.py",
"copies": "1",
"size": "4872",
"license": "apache-2.0",
"hash": 8406026599532931000,
"line_mean": 43.6972477064,
"line_max": 109,
"alpha_frac": 0.5632183908,
"autogenerated": false,
"ratio": 3.8728139904610495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9918084144897852,
"avg_score": 0.0035896472726392975,
"num_lines": 109
} |
__author__ = 'Ahmed Hani Ibrahim'
from Structures.MultipleArmedBandit import MultipleArmedBandit
import numpy as np
class Player(object):
__Q = dict()
__game = 0
__epsilon = 0.0
__numberOfBandits = 0
__numberOfGames = dict()
__rewardValue = 0.0
__saveAction = []
__saveActionValue = []
@property
def AvgRewardValue(self):
pass
@AvgRewardValue.getter
def AvgRewardValue(self):
return self.__saveActionAverageValue
def __init__(self, numberOfBandits, epsilon):
self.__numberOfBandits = numberOfBandits
self.__epsilon = epsilon
self.__game = MultipleArmedBandit(self.__numberOfBandits)
self.__Q = dict()
self.__numberOfGames = dict()
self.__rewardValue = 0.0
self.__saveAction = []
self.__saveActionAverageValue = []
for i in range(0, self.__numberOfBandits):
self.__Q[i] = 100000
self.__numberOfGames[i] = 0
def runGame(self, games=1, epsilonDecayFactor=None, epsilonDecayStep=None):
for game in range(0, games):
if epsilonDecayFactor != None:
if game % epsilonDecayStep == 0:
self.__epsilon *= epsilonDecayFactor
bandit = None
randomValue = np.random.random()
if randomValue >= self.__epsilon:
bandit = max(self.__Q, key=self.__Q.get)
else:
bandit = np.random.randint(0, self.__numberOfBandits)
currentReward = self.__game.drawBanditData(bandit)
self.__Q[bandit] += (1.0 / (1.0 + self.__numberOfGames[bandit])) * \
(currentReward - self.__Q[bandit])
self.__numberOfGames[bandit] += 1
self.__rewardValue += currentReward
self.__saveAction.append(bandit)
self.__saveActionAverageValue.append(float(self.__rewardValue / float(game + 1)))
| {
"repo_name": "AhmedHani/Banditology",
"path": "Banditology/Player.py",
"copies": "1",
"size": "1956",
"license": "mit",
"hash": -6590180677935649000,
"line_mean": 30.5483870968,
"line_max": 93,
"alpha_frac": 0.5715746421,
"autogenerated": false,
"ratio": 3.873267326732673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49448419688326734,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed Hani Ibrahim'
import pandas as pnd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
def get_train_data():
training_data = pnd.read_csv("./train.csv", header=0, parse_dates=['Dates'])
#training_data = pnd.read_csv("./train.csv", header=0)
return training_data
def get_test_data():
testing_data = pnd.read_csv("./test.csv", header=0, parse_dates=['Dates'])
#testing_data = pnd.read_csv("./test.csv", header=0)
return testing_data
def vectorize_training_data(training_data):
training_data['Year'] = training_data['Dates'].map(lambda y: y.year)
training_data['Week'] = training_data['Dates'].map(lambda w: w.week)
training_data['Hour'] = training_data['Dates'].map(lambda h: h.hour)
categories = list(enumerate(sorted(np.unique(training_data['Category']))))
descripts = list(enumerate(sorted(np.unique(training_data['Descript']))))
day_of_weeks = list(enumerate(sorted(np.unique(training_data['DayOfWeek']))))
pd_districts = list(enumerate(sorted(np.unique(training_data['PdDistrict']))))
resolutions = list(enumerate(sorted(np.unique(training_data['Resolution']))))
#addresses = list(enumerate(sorted(np.unique(training_data['Address']))))
#set indices
categories_values = {name: i for i, name in categories}
descripts_values = {name: i for i, name in descripts}
day_of_weeks_values = {name: i for i, name in day_of_weeks}
pd_districts_values = {name: i for i, name in pd_districts}
resolutions_values = {name: i for i, name in resolutions}
#addresses_values = {name: i for i, name in addresses}
training_data['Category'] = training_data['Category'].map(lambda c: categories_values[c]).astype(int)
training_data['Descript'] = training_data['Descript'].map(lambda c: descripts_values[c]).astype(int)
training_data['DayOfWeek'] = training_data['DayOfWeek'].map(lambda c: day_of_weeks_values[c]).astype(int)
training_data['PdDistrict'] = training_data['PdDistrict'].map(lambda c: pd_districts_values[c]).astype(int)
training_data['Resolution'] = training_data['Resolution'].map(lambda c: resolutions_values[c]).astype(int)
training_data['X'] = training_data['X'].map(lambda x: "%.2f" % round(x, 2)).astype(float)
training_data['Y'] = training_data['Y'].map(lambda y: "%.2f" % round(y, 2)).astype(float)
return training_data
def vectorize_testing_data(testing_data):
testing_data['Year'] = testing_data['Dates'].map(lambda y: y.year)
testing_data['Week'] = testing_data['Dates'].map(lambda w: w.week)
testing_data['Hour'] = testing_data['Dates'].map(lambda h: h.hour)
day_of_weeks = list(enumerate(sorted(np.unique(testing_data['DayOfWeek']))))
pd_districts = list(enumerate(sorted(np.unique(testing_data['PdDistrict']))))
day_of_weeks_values = {name: i for i, name in day_of_weeks}
pd_districts_values = {name: i for i, name in pd_districts}
testing_data['DayOfWeek'] = testing_data['DayOfWeek'].map(lambda c: day_of_weeks_values[c]).astype(int)
testing_data['PdDistrict'] = testing_data['PdDistrict'].map(lambda c: pd_districts_values[c]).astype(int)
testing_data['X'] = testing_data['X'].map(lambda x: "%.2f" % round(x, 2)).astype(float)
testing_data['Y'] = testing_data['Y'].map(lambda y: "%.2f" % round(y, 2)).astype(float)
return testing_data | {
"repo_name": "AhmedHani/Kaggle-Machine-Learning-Competitions",
"path": "Easy/SanFranciscoCrimeClassification/get_data.py",
"copies": "1",
"size": "3368",
"license": "mit",
"hash": -6884163420428945000,
"line_mean": 49.2835820896,
"line_max": 111,
"alpha_frac": 0.6802256532,
"autogenerated": false,
"ratio": 3.089908256880734,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9244220362294822,
"avg_score": 0.005182709557182498,
"num_lines": 67
} |
__author__ = 'Ahmed Hani Ibrahim'
from read_data import *
import numpy as np
import pickle
from draw_data import *
from get_image import *
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn import svm
labels, train_features = read_train_data(
"G:\\Github Repositories\\KaggleMachineLearningCompetitions\\Easy\\DigitRecognizer\\train.csv")
train_features = np.array(train_features).reshape((len(train_features), 28, 28)).astype(np.uint8)
print(len(train_features))
sample_image = get_image(train_features, 2)
image_black_pixels_x = pickle.load(open('G:\Github Repositories\KaggleMachineLearningCompetitions\Easy\DigitRecognizer\image_black_pixels_x', "rb"))
image_black_pixels_y = pickle.load(open('G:\Github Repositories\KaggleMachineLearningCompetitions\Easy\DigitRecognizer\image_black_pixels_y', "rb"))
'''index = 0
for t in range(len(train_features)):
for i in range(len(train_features[t])):
image_black_pixels_x.append([])
image_black_pixels_y.append([])
for j in range(len(train_features[i])):
if train_features[t][i][j] > 0:
image_black_pixels_x[t].append(i)
image_black_pixels_y[t].append(j)'''
#print(len(image_black_pixels_x))
max_length = 0
for i in range(len(train_features)):
if len(image_black_pixels_x[i]) > max_length:
max_length = len(image_black_pixels_x[i])
for t in range(len(train_features)):
if len(image_black_pixels_x[t]) < max_length:
for i in range(max_length - len(image_black_pixels_x[t])):
image_black_pixels_x[t].append(0)
image_black_pixels_y[t].append(0)
features_2_data = []
for t in range(len(train_features)):
features_2_data.append([])
for i in range(len(image_black_pixels_x[t])):
bx = image_black_pixels_x[t][i]
by = image_black_pixels_y[t][i]
features_2_data[t].append([bx, by])
# for i in range(len(sample_image)):
# idx = 0
# for j in range(len(sample_image[i])):
# if sample_image[i][j] > 0:
# v = sample_image[i][j]
# image_black_pixels_x.append(i)
# image_black_pixels_y.append(j)
#idx += 1
#show_image(sample_image)
#print(image_black_pixels_x)
#print(image_black_pixels_y)
print(len(features_2_data[2][1]))
'''svm_classifier =
svm_classifier.fit(features_2_data, labels)'''
lr = LogisticRegression()
lr.fit(features_2_data, labels)
| {
"repo_name": "AhmedHani/Kaggle-Machine-Learning-Competitions",
"path": "Easy/DigitRecognizer/main.py",
"copies": "1",
"size": "2414",
"license": "mit",
"hash": -5345464781353137000,
"line_mean": 31.6216216216,
"line_max": 148,
"alpha_frac": 0.6694283347,
"autogenerated": false,
"ratio": 2.9511002444987775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9030151564685729,
"avg_score": 0.01807540290260988,
"num_lines": 74
} |
__author__ = 'Ahmed Hani Ibrahim'
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn import svm
from get_data import *
from get_data_2 import *
from sklearn.feature_extraction.text import CountVectorizer
import scipy.sparse
import csv
ls = svm.LinearSVC()
labels, training_data_matrix, unique_ingredients = get_training_data_matrix(get_train_data())
ls = ls.fit(training_data_matrix, labels)
print("Training Done")
print(cross_val_score(ls, training_data_matrix, labels, cv=5).mean())
print("CV done")
test_data, ids = get_test_data()
test_data_matrix = get_test_data_matrix(test_data, unique_ingredients)
res = ls.predict(test_data_matrix)
print("Predicting Done")
submission = dict(zip(ids, res))
wr = csv.writer(open('Linear_SVC_Result.csv', 'wb'))
wr.writerow(['id', 'cuisine'])
for first, second in submission.items():
wr.writerow([first, second])
print("done")
| {
"repo_name": "AhmedHani/Kaggle-Machine-Learning-Competitions",
"path": "Easy/What's Cooking/linear_svc.py",
"copies": "1",
"size": "1063",
"license": "mit",
"hash": -3008878360568274000,
"line_mean": 26.9736842105,
"line_max": 93,
"alpha_frac": 0.7591721543,
"autogenerated": false,
"ratio": 3.231003039513678,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4490175193813678,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed Hani Ibrahim'
import random
class GeneralizedHebbian(object):
__input = []
__numberOfFeatures = 0
__output = []
__weights = [[]]
__learningRate = 0.0
@property
def Weights(self):
pass
@Weights.getter
def Weights(self):
return self.__weights
def __init__(self, input, numberOfFeatures, learningRate):
"""
:param input: Number of features to be reduced --[double]
:param numberOfFeatures: The number of resultant features after reduction --int
:param learningRate: --double
"""
self.__input = input
self.__numberOfFeatures = numberOfFeatures
self.__learningRate = learningRate
self.__weights = [[random.random() for j in range(0, len(input))] for i in range(0, self.__numberOfFeatures)]
def train(self, epochs, trainingSamples):
"""
:param epochs: Number of iterations --int
:param trainingSamples: The training data --[[double]]
"""
for iter in range(0, epochs):
for i in range(0, len(trainingSamples)):
self.__output = self.featuresReduction(trainingSamples[i])
self.__update(trainingSamples[i])
def featuresReduction(self, features):
"""
:param features: The features that to be reduced --[double]
:return: The new features after reduction [double]
"""
output = [0.0 for i in range(0, self.__numberOfFeatures)]
for i in range(0, self.__numberOfFeatures):
for j in range(0, len(features)):
output[i] += features[j] * self.__weights[i][j]
return output
@classmethod
def __update(cls, features):
for i in range(0, len(cls.__weights)):
for j in range(0, len(cls.__weights[i])):
cls.__weights[i][j] += cls.__learningRate * cls.__output[i] * (features[j] - cls.__computeOutput(i, j))
@classmethod
def __computeOutput(cls, outputIndex, inputIndex):
sum = 0.0
sum += [cls.__output[i] * cls.__weights[i][inputIndex] for i in range(0, outputIndex)]
return sum
| {
"repo_name": "AhmedHani/Python-Neural-Networks-API",
"path": "DimensionalityReduction/GeneralizedHebbian.py",
"copies": "1",
"size": "2167",
"license": "mit",
"hash": -7934239786521102000,
"line_mean": 30.4057971014,
"line_max": 119,
"alpha_frac": 0.5823719428,
"autogenerated": false,
"ratio": 4.088679245283019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171051188083019,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ahmedlawi92@gmail.com'
import json
import requests
import url_constants
class NBAStatsScraper:
player_ids = {}
def __init__(self):
self.populate_players_dict()
def get_player_tracking_stats(self, **kwargs):
base, args = self.build_url(url_constants.player_tracking_url)
self.update_options(args, kwargs)
response = requests.get(base, args)
return self.extract_data(response)
def get_player_shot_tracking_stats(self, **kwargs):
base, args = self.build_url(url_constants.player_shot_tracking)
self.update_options(args, kwargs)
response = requests.get(base, args)
return self.extract_data(response)
def get_team_shot_tracking_stats(self, **kwargs):
base, args = self.build_url(url_constants.team_shot_tracking)
self.update_options(args, kwargs)
response = requests.get(base, args)
return self.extract_data(response)
def get_lineup_stats(self, **kwargs):
base, args = self.build_url(url_constants.team_lineups)
self.update_options(args, kwargs)
response = requests.get(base, args)
return self.extract_data(response)
def get_player_shot_chart_data(self, player, **kwargs):
kwargs['PlayerID'] = self.player_ids[player.lower()]
base, args = self.build_url(url_constants.shot_chart_url)
self.update_options(args, kwargs)
response = requests.get(base, args)
return self.extract_data(response)
def get_player_list(self):
base, args = self.build_url(url_constants.player_list_url)
response = requests.get(base, args)
return self.extract_data(response)
def extract_data(self, response):
data = json.loads(response.content)['resultSets'][0]
header = data['headers']
return [dict(zip(header, row)) for row in data['rowSet']]
def build_url(self, url):
x = url.split('?', 2)
base = x[0]
args = {i.split('=')[0] : i.split('=')[1].replace('+', ' ') for i in x[1].split("&")}
return base, args
def update_options(self, args, options):
for key in options.keys():
if key in args.keys():
args[key] = options[key]
def populate_players_dict(self):
players = self.get_player_list()
for player in players:
self.player_ids[player['DISPLAY_LAST_COMMA_FIRST'].lower()] = player['PERSON_ID']
| {
"repo_name": "ahmedlawi92/basketball-stats",
"path": "bballstats/statsnba/stats_nba_scraper.py",
"copies": "1",
"size": "2460",
"license": "apache-2.0",
"hash": 777384893442659700,
"line_mean": 33.6478873239,
"line_max": 93,
"alpha_frac": 0.6235772358,
"autogenerated": false,
"ratio": 3.591240875912409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9637828654152991,
"avg_score": 0.01539789151188351,
"num_lines": 71
} |
__author__ = 'ahmedlawi92@gmail.com'
import json
import string
from enum import Enum
from bs4 import BeautifulSoup
import requests
class BBRefScraper:
__base_url = 'http://www.basketball-reference.com{s}'
__url_key = 'info_page'
def __init__(self, json_file):
self.players = json.load(file(json_file))
def get_table(self, player, table_type):
player_url = self.players[player][self.__url_key]
page = requests.get(self.__base_url.format(s=player_url))
soup = BeautifulSoup(page.content, 'html.parser')
return self.__scrape_table(soup.find('table', id=table_type.value))
def __scrape_table(self, table):
columns = [col.string for col in table.find_all('th')]
stats = [{columns[i]: self.__format_line(cell) for i, cell in enumerate(row.find_all("td"))} for row in table.tbody.find_all('tr')]
return stats
def __format_line(self, v):
t = v.a.string if v.a is not None else v.string
if t is None:
return t
try:
t = float(t)
return t
except ValueError:
return t
class TableTypes(Enum):
TOTALS = 'totals'
ADVANCED = 'advanced'
SHOOTING = 'shooting'
POSSESSION = 'per_poss'
PER_GAME = 'per_game'
PER_36 = 'per_minute'
def create_players_info_json():
base_url = "http://www.basketball-reference.com/players/{s}"
players = {}
letters = string.ascii_lowercase
for letter in letters:
page = requests.get(base_url.format(s=letter))
soup = BeautifulSoup(page.content, 'html.parser')
player_table = soup.find(id="players")
if player_table is None:
continue
columns = [col.string for col in player_table.find_all('th')]
for player_data in player_table.tbody.find_all('tr'):
name = player_data.td.a.string
players[name] = {columns[i]: cell.string for i, cell in enumerate(player_data.find_all("td"))}
players[name]["info_page"] = player_data.td.a['href']
f = open('players_info.json', 'w')
f.write(json.dumps(players, sort_keys=True, indent=4))
f.close
| {
"repo_name": "ahmedlawi92/basketball-stats",
"path": "bballstats/bballreference/bbref_scraper.py",
"copies": "1",
"size": "2169",
"license": "apache-2.0",
"hash": -3160957800398579700,
"line_mean": 31.3731343284,
"line_max": 139,
"alpha_frac": 0.6090364223,
"autogenerated": false,
"ratio": 3.3627906976744186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44718271199744186,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ahmed'
from pymongo import MongoClient
import json
import re
from os import listdir
from os.path import isfile, join
client = MongoClient()
db = client.hotelinfo
j = 0
for i in [ f for f in listdir('json') if isfile(join('json',f)) ]:
if i.find(".json") == -1:
continue
print i
hotel = {}
try:
hotelinfo = json.load(open('json/'+i))
except:
continue
hotel["HotelInfo"] = hotelinfo["HotelInfo"]
reviews = hotelinfo["Reviews"]
finalReviewList = []
for i in range(len(reviews)):
review ={}
text = re.sub('[^A-Za-z0-9\.\s]+', '', reviews[i]["Content"])
text = '"' +text + '"'
try:
review["Content"] = text
review["ReviewID"] = reviews[i]["ReviewID"]
review["Ratings"] = review[i]["Ratings"]
review["Author"] = review[i]["Author"]
review["AuthorLocation"] = review[i]["AuthorLocation"]
review["Title"] = review[i]["Title"]
review["Date"] = review[i]["Date"]
finalReviewList.append(review)
except:
continue
hotel["Reviews"] = finalReviewList
db.hotels.insert(hotel)
print j
j+=1
| {
"repo_name": "ahmedshabib/evergreen-gainsight-hack",
"path": "mongodumper.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": 8569189593942164000,
"line_mean": 26.7173913043,
"line_max": 71,
"alpha_frac": 0.5262745098,
"autogenerated": false,
"ratio": 3.581460674157303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9329970913019019,
"avg_score": 0.055552854187656694,
"num_lines": 46
} |
__author__ = 'Ahmed'
import time
import calendar
from flask import Flask, request, session, g, redirect, url_for, render_template, flash
from pymongo import MongoClient
import json
import uuid
from werkzeug.utils import secure_filename
from werkzeug.security import check_password_hash, generate_password_hash
import random
from flask_oauth import OAuth
import math
from StringIO import StringIO
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('MYDIARY_SETTINGS', silent=True)
client = MongoClient()
db = client.hotelinfo
@app.route('/hotelinfo/')
def get_hotel_insight():
hotelid = request.args.get("hotelid")
reviews = db.insight.find({"HotelID":hotelid}, {"_id": 0})
hotelinfo = db.hotels.find_one({"HotelInfo.HotelID":hotelid}, {"_id": 0})
review_arr = [a for a in reviews]
print review_arr
print hotelinfo
overallrating = 0
sentiment =0
hotelinfo["HotelInfo"]["Rating"] = 3
hotelinfo["HotelInfo"]["Sentiment"] = 50
total_reviews = len(review_arr)
for i in range(total_reviews):
overallrating += float(review_arr[i].get("Overall"))
sentiment += int(review_arr[i]["Sentiment"])
print sentiment
if review_arr[i]["Sentiment"] == '1' :
review_arr[i]["Sentiment"] = True
else:
review_arr[i]["Sentiment"] = False
if(len(review_arr) > 0):
print str(total_reviews) + "I am"
hotelinfo["HotelInfo"]["Rating"] = int(overallrating/total_reviews)
hotelinfo["HotelInfo"]["Sentiment"] = math.ceil(sentiment*100.0/total_reviews)
print hotelinfo["HotelInfo"]["Rating"]
print hotelinfo["HotelInfo"]["Sentiment"]
return render_template("hotelinfo.html",reviews=review_arr,hotelinfo=hotelinfo)
@app.route('/gethotelinfo/')
def get_hotel_info():
hotelid = str(request.args.get("hotelid"))
print hotelid
out = db.hotels.find({"HotelInfo.HotelID":hotelid}, {"_id": 0})
return json.dumps([a for a in out], indent=4)
@app.route('/search/')
def get_hotel_search():
keyword = str(request.args.get("keyword"))
print keyword
text_results = db.command('text', 'hotels', search=keyword, limit=20)['results']
doc_matches = [res['obj'] for res in text_results]
return json.dumps([a["HotelInfo"] for a in doc_matches], indent=4)
@app.route('/')
def index():
return render_template("index.html",error=None)
if __name__ == '__main__':
app.run(host='0.0.0.0',threaded=True)
| {
"repo_name": "ahmedshabib/evergreen-gainsight-hack",
"path": "webapi.py",
"copies": "1",
"size": "2483",
"license": "mit",
"hash": -5947337194787184000,
"line_mean": 30.4303797468,
"line_max": 87,
"alpha_frac": 0.6653242046,
"autogenerated": false,
"ratio": 3.069221260815822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4234545465415822,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ahmed'
import boto3, argparse, yaml
from time import sleep
import os.path
def tag_instances(awsTags):
reservations = ec2Client.describe_instances()
instances = [ i['Instances'] for i in reservations['Reservations']]
# Iterate EC2 instances ...
# if instance is part of Cloudformation, inherit tags from stack
for i in instances:
stackId = filter(lambda a:a["Key"] == 'aws:cloudformation:stack-id', i[0]['Tags'])
if stackId:
try:
stack = cfClient.describe_stacks(StackName=stackId[0]['Value'])
except:
continue
for t in awsTags["mandatory"] + awsTags["optional"]:
if filter(lambda a:a["Key"] == t, i[0]['Tags']):
if verbose:
print('%s already exists on %s, skipping' % (t, i[0]['InstanceId']))
else:
for tag in stack['Stacks'][0]['Tags']:
if t in tag['Key']:
if verbose:
print('Tagging instance %s with tag %s and value %s' % (i[0]['InstanceId'], tag['Key'], tag['Value']))
ec2Client.create_tags(
Resources=[i[0]['InstanceId']],
Tags=[tag]
)
sleep(sleep_time)
def tag_volumes(awsTags):
# Iterate EBS volumes ...
volumes = ec2Client.describe_volumes()
for v in volumes['Volumes']:
try:
volumeTags = v['Tags']
except:
volumeTags= []
# Inherit Tags for attached volumes from EC2 instance
if v['State'] == 'in-use':
reservations = ec2Client.describe_instances(InstanceIds=[v['Attachments'][0]['InstanceId']])
instances = [ i['Instances'] for i in reservations['Reservations']]
for t in awsTags["mandatory"] + awsTags["optional"]:
if filter(lambda a:a["Key"] == t, volumeTags):
if verbose:
print('%s already exists on %s, skipping' % (t, v['VolumeId']))
else:
for tag in instances[0][0]['Tags']:
if t in tag['Key']:
if verbose:
print('Tagging volume %s with tag %s and value %s' % (v['VolumeId'], tag['Key'], tag['Value']))
ec2Client.create_tags(
Resources=[v['VolumeId']],
Tags=[tag]
)
sleep(sleep_time)
def tag_snapshots(awsTags):
# Iterate EBS snapshots ...
try:
snapshots = ec2Client.describe_snapshots(OwnerIds=['self'])
except:
snapshots = []
for s in snapshots['Snapshots']:
try:
snapshotTags = s['Tags']
except KeyError:
snapshotTags = []
# Inherit myTags for snapshots from volumes
try:
volumes = ec2Client.describe_volumes(VolumeIds=[s['VolumeId']])
except:
# If snapshot has it's original volume delete
continue
for t in awsTags["mandatory"] + awsTags["optional"]:
if filter(lambda a:a["Key"] == t, snapshotTags):
if verbose:
print('%s already exists on %s, skipping' % (t, s['SnapshotId']))
else:
try:
if not volumes['Volumes'][0]['Tags']:
continue
except KeyError:
print('%s has no tags, skipping' % volumes['Volumes'][0]['VolumeId'])
break
for tag in volumes['Volumes'][0]['Tags']:
if t in tag['Key']:
if verbose:
print('Tagging snapshot %s with tag %s and value %s' % (s['SnapshotId'], tag['Key'], tag['Value']))
ec2Client.create_tags(
Resources=[s['SnapshotId']],
Tags=[tag]
)
sleep(sleep_time)
def tag_elbs(awsTags):
# Iterate ELB resources ...
# if ELB is part of Cloudformation inherit tags from stack otherwise inherit from VPC
elbs = elbClient.describe_load_balancers()
for i in elbs['LoadBalancerDescriptions']:
elbTags = elbClient.describe_tags(LoadBalancerNames=[i['LoadBalancerName']])['TagDescriptions'][0]['Tags']
stackId = filter(lambda a:a["Key"] == "aws:cloudformation:stack-id", elbTags)
for t in awsTags["mandatory"] + awsTags["optional"]:
if filter(lambda a:a["Key"] == t, elbTags):
if verbose:
print('%s already exists on %s, skipping' % (t, i['LoadBalancerName']))
else:
if stackId:
try:
stack = cfClient.describe_stacks(StackName=stackId[0]['Value'])
except:
print('fail')
break
for tag in stack['Stacks'][0]['Tags']:
if t in tag['Key']:
if verbose:
print('Tagging ELB %s with tag %s and value %s' % (i['LoadBalancerName'], tag['Key'], tag['Value']))
elbClient.add_tags(
LoadBalancerNames=[i['LoadBalancerName']],
Tags=[tag]
)
sleep(sleep_time)
else:
vpc = ec2Client.describe_vpcs(VpcIds=[i['VPCId']])
for tag in vpc['Vpcs'][0]['Tags']:
if filter(lambda a:a["Key"] == t, elbTags):
if verbose:
print('%s already exists on %s, skipping' % (t, i['LoadBalancerName']))
elif t in tag['Key']:
if verbose:
print('Tagging ELB %s with tag %s and value %s' % (i['LoadBalancerName'], tag['Key'], tag['Value']))
elbClient.add_tags(
LoadBalancerNames=[i['LoadBalancerName']],
Tags=[tag]
)
sleep(sleep_time)
def tag_rds(awsTags, region):
# Iterate RDS resources ...
# if RDS instance is part of Cloudformation inherit tags from stack otherwise inherit from VPC
instances = rdsClient.describe_db_instances()
# Construct the ARN to be used for looking up RDS instances
user = iamClient.list_users()
if user['Users']:
accountId = user['Users'][0]['Arn'].split(':')[4]
arnbase = 'arn:aws:rds:%s:%s:db:' % (region, accountId)
else:
role = iamClient.list_roles()
accountId = role['Roles'][0]['Arn'].split(':')[4]
arnbase = 'arn:aws:rds:%s:%s:db:' % (region, accountId)
for i in instances['DBInstances']:
dbarn = arnbase + i['DBInstanceIdentifier']
rdsTags = rdsClient.list_tags_for_resource(ResourceName=dbarn)['TagList']
stackId = filter(lambda a:a["Key"] == "aws:cloudformation:stack-id", rdsTags)
for t in awsTags["mandatory"] + awsTags["optional"]:
if filter(lambda a:a["Key"] == t, rdsTags):
if verbose:
print('%s already exists on %s, skipping' % (t, dbarn))
else:
if stackId:
try:
stack = cfClient.describe_stacks(StackName=stackId[0]['Value'])
except:
print('fail')
break
for tag in stack['Stacks'][0]['Tags']:
if t in tag['Key']:
if verbose:
print('Tagging RDS instance %s with tag %s and value %s' % (dbarn, tag['Key'], tag['Value']))
rdsClient.add_tags_to_resource(
ResourceName=dbarn,
Tags=[tag]
)
sleep(sleep_time)
else:
vpc = ec2Client.describe_vpcs(VpcIds=[i['DBSubnetGroup']['VpcId']])
for tag in vpc['Vpcs'][0]['Tags']:
if filter(lambda a:a["Key"] == t, rdsTags):
if verbose:
print('%s already exists on %s, skipping' % (t, dbarn))
elif t in tag['Key']:
if verbose:
print('Tagging RDS instance %s with tag %s and value %s' % (dbarn, tag['Key'], tag['Value']))
rdsClient.add_tags_to_resource(
ResourceName=dbarn,
Tags=[tag]
)
sleep(sleep_time)
def main():
global ec2Client, cfClient, elbClient, rdsClient, iamClient, sleep_time, verbose
argparser = argparse.ArgumentParser(description='Enforces tagging of AWS Resources')
argparser.add_argument('--profile', help='AWS Account profile to authenticate with', default=None, required=False)
argparser.add_argument('--tags', help='Yaml file containing mandatory and optional tags to copy', required=True)
argparser.add_argument("--verbose", help="increase output verbosity", action="store_true")
argparser.add_argument('--region', help='AWS Region to work within, defaults to eu-central-1', default='eu-central-1', required=False)
args = argparser.parse_args()
profile = args.profile
tagFile = args.tags
verbose = args.verbose
region = args.region
#Run some tests to make sure we're working with a valid yaml file
if os.path.isfile(tagFile):
f = open(tagFile, 'r')
awsTags = yaml.load(f)
else:
print('Supplied file does not exist: %s' % tagFile)
raise Exception('Supplied file does not exist: %s' % tagFile)
if 'mandatory' not in awsTags:
print('This yaml file is not valid!\n It does not contain the mandatory tags!')
raise Exception('This yaml file is not valid!\n It does not contain the mandatory tags!')
elif 'optional' not in awsTags:
print('This yaml file is not valid!\n It does not contain the optional tags!')
raise Exception('This yaml file is not valid!\n It does not contain the optional tags!')
sleep_time = 1
if profile != None: boto3.setup_default_session(profile_name=profile)
ec2Client = boto3.client('ec2', region_name=region)
cfClient = boto3.client('cloudformation', region_name=region)
elbClient = boto3.client('elb', region_name=region)
rdsClient = boto3.client('rds', region_name=region)
iamClient = boto3.client('iam', region_name=region)
print('Tagging instances\n')
tag_instances(awsTags)
print('Tagging volumes\n')
tag_volumes(awsTags)
print('Tagging snapshots\n')
tag_snapshots(awsTags)
print('Tagging ELBs\n')
tag_elbs(awsTags)
print('Tagging RDS instances\n')
tag_rds(awsTags, region)
if __name__ == '__main__':
main()
| {
"repo_name": "borkit/scriptdump",
"path": "AWS/tag_aw_resources.py",
"copies": "1",
"size": "11248",
"license": "mit",
"hash": 2657007533907976700,
"line_mean": 42.2834645669,
"line_max": 138,
"alpha_frac": 0.5090682788,
"autogenerated": false,
"ratio": 4.366459627329193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009882271445433589,
"num_lines": 254
} |
import json, time, logging
from os import path, getcwd, system, chdir
from sys import stdout
from shutil import copyfile
from subprocess import check_call, STDOUT, DEVNULL
from update_values_helpers import *
logging.basicConfig(stream=stdout, level=logging.INFO)
logger = logging.getLogger("build_resume")
# set absolute paths for 'build/' and 'data/' directories
src_dir = path.abspath(path.dirname(__file__))
build_dir = path.abspath(path.join(src_dir, "../build"))
data_dir = path.abspath(path.join(src_dir, "../data"))
def get_json_from_data_file(filename):
json_to_return = {}
try:
data_file = path.join(data_dir, filename)
json_to_return = json.load(open(data_file))
except FileNotFoundError:
logger.error("Error loading file: {}".format(filename), exc_info=True)
finally:
return json_to_return
def sanitize_latex_syntax(line):
return line.replace("#", "\#")
def update_shared_values(dict_values):
logger.debug("adding header, date data to 'dict_values'")
# about me
about = get_json_from_data_file('about.json')
generate_about(dict_values, about)
# date created
dict_values.update({
"DATE~CREATED": time.strftime("%Y-%m-%d")
})
def update_resume_values(dict_values):
logger.debug("adding resume values data to 'dict_values'")
# education
educations = get_json_from_data_file('education.json')
generate_school_info(dict_values, educations[0])
# work experience
experiences = get_json_from_data_file('experience.json')
for i, work_experience in enumerate(experiences[:3], start=1):
generate_work_experience(dict_values, work_experience, i)
# projects
projects = get_json_from_data_file('projects.json')
for i, project in enumerate(projects[:3], start=1):
generate_project(dict_values, project, i)
# languages
additional = get_json_from_data_file('additional.json')
languages = additional['languages']
generate_languages(dict_values, languages)
def update_references_values(dict_values):
logger.debug("adding references data to 'dict_values'")
references = get_json_from_data_file('references.json')
for i, project in enumerate(references[:3], start=1):
generate_reference(dict_values, project, i)
def generate_new_tex_file_with_values(values, input_template, output_filename):
logger.debug("generating new tex file '{}' using input template '{}'".format(output_filename, input_template))
# copy .tex template into a new 'output_filename.tex'
copyfile(input_template, output_filename)
# use `dict_values` to replace placeholders in template with real values in the new one
resume_template = open(input_template, 'r')
output_tex = open(output_filename, 'w')
for line in resume_template:
for key in values:
line = line.replace(key, values[key])
output_tex.write(sanitize_latex_syntax(line))
# close files
resume_template.close()
output_tex.close()
def generate_pdf_from_tex_template(output_tex_filename):
logger.debug("generating pdf from tex file '{}'".format(output_tex_filename))
chdir(build_dir)
# export filename.tex into a pdf
check_call(['pdflatex', '-interaction=nonstopmode', output_tex_filename], stdout=DEVNULL, stderr=STDOUT)
logger.info("pdf created at {}".format(output_tex_filename.replace('.tex','.pdf')))
def build_resume():
logger.info("\n\nbuilding resume...")
# create and update value dictionary from json files
dict_values = {}
update_shared_values(dict_values)
update_resume_values(dict_values)
# manage/generate filenames and paths
tex_template_filepath = path.join(build_dir, "resume.tex")
last_name = dict_values['FULL~NAME'].split()[-1]
filename = "Resume{}".format("_"+last_name if last_name else "")
tex_new_filepath = path.join(build_dir, filename + ".tex")
# use values to generate a pdf
generate_new_tex_file_with_values(dict_values, tex_template_filepath, tex_new_filepath)
generate_pdf_from_tex_template(tex_new_filepath)
def build_references():
logger.info("\n\nbuilding references...")
# create and update value dictionary from json files
dict_values = {}
update_shared_values(dict_values)
update_references_values(dict_values)
# manage/generate filenames and paths
tex_template_filepath = path.join(build_dir, "references.tex")
last_name = dict_values['FULL~NAME'].split()[-1]
filename = "References{}".format("_" + last_name if last_name else "")
tex_new_filepath = path.join(build_dir, filename + ".tex")
# use values to generate a pdf
generate_new_tex_file_with_values(dict_values, tex_template_filepath, tex_new_filepath)
generate_pdf_from_tex_template(tex_new_filepath)
def build_cover_letter():
logger.info("\n\nbuilding cover letter...")
dict_values = {}
update_shared_values(dict_values)
with open(path.join(data_dir, "coverletter.txt"),'r') as cover_letter_text:
cl_text = ""
for line in cover_letter_text:
cl_text += line.replace("’", "'").replace("‘","'")
dict_values.update({
"CL~TEXT": cl_text,
"FORMAL~DATE": humanize_date(dict_values["DATE~CREATED"], formalize=True)
})
# manage/generate filenames and paths
tex_template_filepath = path.join(build_dir, "coverletter.tex")
last_name = dict_values['FULL~NAME'].split()[-1]
filename = "CoverLetter{}".format("_" + last_name if last_name else "")
tex_new_filepath = path.join(build_dir, filename + ".tex")
# use values to generate a pdf
generate_new_tex_file_with_values(dict_values, tex_template_filepath, tex_new_filepath)
generate_pdf_from_tex_template(tex_new_filepath)
def clean_up():
system("rm *.aux")
system("rm *.fls")
system("rm *.aux")
system("rm *.gz")
system("rm *latexmk")
if __name__ == "__main__":
build_resume()
build_references()
build_cover_letter()
clean_up()
| {
"repo_name": "atla5/resume",
"path": "src/build_resume.py",
"copies": "1",
"size": "6189",
"license": "mit",
"hash": -1120989275830357100,
"line_mean": 32.6141304348,
"line_max": 114,
"alpha_frac": 0.6740501213,
"autogenerated": false,
"ratio": 3.5586881472957423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732738268595742,
"avg_score": null,
"num_lines": null
} |
import logging
logger = logging.getLogger(__name__)
months = ["Jan", "Feb", "March", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"]
months_full = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
def humanize_date(yyyy_mm, formalize=False):
output = yyyy_mm
try:
if '-' not in yyyy_mm:
return yyyy_mm
else:
tokens = yyyy_mm.split('-')
year = tokens[0]
month = int(tokens[1])
if 0 < month <= 12:
str_month = months_full[month-1] if formalize else months[month-1]
output = "{} {}".format(str_month, year)
else:
logger.warning("Invalid month: {}\n".format(yyyy_mm))
except IndexError:
logger.warning("Improperly formatted date: {}\n".format(yyyy_mm))
finally:
return output
def humanize_list(ls):
return ", ".join(str(s) for s in ls)
def generate_about(dict_values, about):
contact = about['contact']
accounts = about['accounts']
highlights = about['overview']
dict_values.update({
"FULL~NAME": about['name'],
"OBJECTIVE": about['objective'],
"EMAIL": contact['email'] if contact['email'] else "",
"PHONE": contact['phone'] if contact['phone'] else "",
"GITHUB": "{} - {}".format(accounts['github'], accounts['github-org']),
"WEBSITE": about['url'].replace('http://', ''),
"HIGHLIGHT~1": highlights[0],
"HIGHLIGHT~2": highlights[1]
})
def generate_school_info(dict_values, school, id=None):
logging.debug("updating school values...")
prefix = "SCHOOL~" + (str(id) if id else "")
school_notes = school['notes']
dict_values.update({
prefix + "NAME": school['school_name'],
prefix + "DEGREE": "{} in {}".format(school['degree'], school['major']),
prefix + "TIME~START": humanize_date(school['time_start']),
prefix + "TIME~END": humanize_date(school['time_end']),
prefix + "NOTE~1": school_notes[0] if school_notes else "Minor in {}".format(school['minor']),
prefix + "NOTE~2": school_notes[1] if len(school_notes) >= 2 else ""
})
def generate_work_experience(dict_values, work, id=1):
logging.debug("updating work experience values for work '{}'".format(id))
prefix = "W{}~".format(id)
responsibilities = work['responsibilities']
num_responsibilities = len(responsibilities)
dict_values.update({
prefix + "NAME": work['company_name'],
prefix + "POSITION": work['position'],
prefix + "TIME~START": humanize_date(work['time_start']),
prefix + "TIME~END": humanize_date(work['time_end']) if 'time_end' in work else "Present",
prefix + "ADVISOR~NAME": work['advisor_name'],
prefix + "ADVISOR~POSITION": work['advisor_position'],
prefix + "ADVISOR~CONTACT": work['advisor_contact'],
prefix + "RESPONSIBILITY~1": responsibilities[0] if num_responsibilities >= 1 else work['summary_short'],
prefix + "RESPONSIBILITY~2": responsibilities[1] if num_responsibilities >= 2 else "",
prefix + "RESPONSIBILITY~3": responsibilities[2] if num_responsibilities >= 3 else "",
prefix + "SUMMARY": work['summary_short'] if 'summary_short' in work else ""
})
def generate_reference(dict_values, reference, id=1):
logging.debug("updating reference '{}'".format(id))
prefix = "R{}~".format(id)
contact = reference['email']
if 'phone' in reference and reference['phone']:
contact += " - {}".format(reference['phone'])
dict_values.update({
prefix + "NAME": reference['name'],
prefix + "CONTACT": contact,
prefix + "POSITION": reference['position'],
prefix + "DATE~START": humanize_date(reference['date_start']),
prefix + "DATE~END": humanize_date(reference['date_end']) if 'date_end' in reference else "Present",
prefix + "RELATIONSHIP": reference['relationship'],
prefix + "IMPORTANCE": reference['importance'],
})
def generate_project(dict_values, project, id=1):
logging.debug("updating project info for project '{}'".format(id))
prefix = "P{}~".format(id)
dict_values.update({
prefix+"NAME": project['name'],
prefix+"DESCRIPTION": project['description_short']
})
def generate_language_entry(dict_values, level, languages, id=1):
logging.debug("updating language entry for level '{}'".format(level))
suffix = "~{}".format(id)
dict_values.update({
"LEVEL" + suffix: level if languages else "",
"LANGUAGES" + suffix: humanize_list([lang['name'] for lang in languages] if languages else "")
})
def generate_languages(dict_values, languages):
# establish name for proficiencies
lvl_1 = "Intermediate"
lvl_2 = "Functional"
lvl_3 = "Novice"
# sort languages into lists based on proficiency
ls_intermediate = []
ls_functional = []
ls_limited = []
for language in languages:
if language['proficiency'] == lvl_1:
ls_intermediate.append(language)
elif language['proficiency'] == lvl_2:
ls_functional.append(language)
else:
ls_limited.append(language)
# update dict_values with each grouping of languages
generate_language_entry(dict_values, lvl_1, ls_intermediate, 1)
generate_language_entry(dict_values, lvl_2, ls_functional, 2)
generate_language_entry(dict_values, lvl_3, ls_limited, 3)
| {
"repo_name": "atla5/resume",
"path": "src/update_values_helpers.py",
"copies": "1",
"size": "5687",
"license": "mit",
"hash": -5475702164675141000,
"line_mean": 36.9133333333,
"line_max": 136,
"alpha_frac": 0.6135044839,
"autogenerated": false,
"ratio": 3.6199872692552515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9729830819681051,
"avg_score": 0.0007321866948398729,
"num_lines": 150
} |
__author__ = "Aishwarya Sharma"
# This class represents the "posts" table in the blog database.
class Post:
def __init__(self, post_id=None, title=None, content=None, create_date=None, edit_date=None, summary=None):
self.post_id = post_id
self.title = title
self.summary = summary
self.content = content
self.create_date = create_date
self.edit_date = edit_date
def __str__(self):
result = ("Post ID: " + str(self.post_id) +
"\nTitle: " + str(self.title) +
"\nSummary: " + str(self.summary) +
"\nContent:\n" + str(self.content) +
"\nCreated On: " + str(self.create_date) +
"\nEdited On: " + str(self.edit_date)
)
return result
def dictionary_mapper(self, dictionary):
self.post_id = dictionary["post_id"]
self.title = dictionary["title"]
self.summary = dictionary["summary"]
self.content = dictionary["title"]
self.create_date = dictionary["create_date"]
self.edit_date = dictionary["edit_date"]
return self
def to_dict(self):
result = {
"post_id": self.post_id,
"title": self.title,
"summary": self.summary,
"content": self.content,
"create_date": self.create_date.__str__(),
"edit_data": self.edit_date.__str__()
}
return result
| {
"repo_name": "aishsharma/Weirdo_Blog",
"path": "src/database/tables.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 4089009249247639000,
"line_mean": 34.1428571429,
"line_max": 111,
"alpha_frac": 0.5177865613,
"autogenerated": false,
"ratio": 3.9224806201550386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9939213662702404,
"avg_score": 0.00021070375052675939,
"num_lines": 42
} |
__author__ = 'Ajay'
from django.conf.urls import url, patterns, include
from . import views
from blog.views import Index, PeopleList
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
#url(r'^$', views.post_list, name='post_list'),
url(r'^hello$', views.hello, name='post_list'),
url (r'^$', Index.as_view()),
url (r'^people$', PeopleList.as_view()),
url (r'^people_list$', views.people_list),
url (r'^admin/', include (admin.site.urls)),
url(r'^person/new/$', views.person_new, name='person_new'),
url (r'^person/edit/(?P<pk>[0-9]+)/$', views.person_edit, name='person_edit'),
url (r'^person/(?P<pk>[0-9]+)/$', views.person_detail, name='person_detail'),
url (r'^person/(?P<pk>[0-9]+)/spouse/', views.spouse_view, name='spouse_view')
]
#To Do from here
"""
url (r'^person/parents/add/(?P<pk>[0-9]+)/$', views.person_add_parents, name='person_add_parents'),
url (r'^person/parents/edit/(?P<pk>[0-9]+)/$', views.person_edit_parents, name='person_edit_parents'),
url (r'^person/parents/remove/(?P<pk>[0-9]+)/$', views.person_remove_parents, name='person_remove_parents'),
url (r'^person/parents/view/(?P<pk>[0-9]+)/$', views.person_view_parents, name='person_view_parents'),
url (r'^person/(?P<pk>[0-9]+)/children/', views.person_view_children, name='person_view_children'),
""" | {
"repo_name": "ajaycode/django1",
"path": "blog/urls.py",
"copies": "1",
"size": "1338",
"license": "apache-2.0",
"hash": 53830016138077830,
"line_mean": 45.1724137931,
"line_max": 108,
"alpha_frac": 0.6434977578,
"autogenerated": false,
"ratio": 2.966740576496674,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4110238334296674,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ajay'
import re, collections
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(open('big.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
if __name__ == '__main__':
correct ("helelowlong") | {
"repo_name": "ajaycode/django1",
"path": "blog/spell_check.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": -9200422109785398000,
"line_mean": 29.8055555556,
"line_max": 85,
"alpha_frac": 0.6263537906,
"autogenerated": false,
"ratio": 2.9546666666666668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9002636587895576,
"avg_score": 0.015676773874217904,
"num_lines": 36
} |
#********************************************List of Dependencies*******************************************************
#The following code has been tested with the indicated versions on 64bit Linux and PYTHON 2.7.3
#os: Use standard library with comes with python.
#pint: 0.5.1
#***********************************************************************************************************************
from ..wrapper import *
import os
from pint import UnitRegistry
ureg = UnitRegistry()
def getMorphMeasures(swcfName):
"""
:param swcfName: relative/absolute path of the swc file.
:return: a dictionary of scalar measurements as key value pairs. The name of the key is the name of the measurement. The values are pint quantities.
"""
swcfName = os.path.abspath(swcfName)
measureNames = ['Width', 'Height', 'Depth', 'Length', 'Volume', 'Surface', 'N_bifs']
LMOutputSimple = getMeasure(measureNames, [swcfName])
width = LMOutputSimple[0]['WholeCellMeasures'][0][0]
height = LMOutputSimple[1]['WholeCellMeasures'][0][0]
depth = LMOutputSimple[2]['WholeCellMeasures'][0][0]
length = LMOutputSimple[3]['WholeCellMeasures'][0][0]
volume = LMOutputSimple[4]['WholeCellMeasures'][0][0]
surface = LMOutputSimple[5]['WholeCellMeasures'][0][0]
nbifs = LMOutputSimple[6]['WholeCellMeasures'][0][0]
scalarDict = dict(
Width=width * ureg.um,
Height=height * ureg.um,
Depth=depth * ureg.um,
Length=length * ureg.um,
Volume=volume * (ureg.um) ** 3,
Surface=surface * (ureg.um) ** 2,
NumberofBifurcations=ureg.Quantity(nbifs, None) ,
)
retDict = dict(scalarMeasurements=scalarDict)
return retDict
#******************************************************************************************************************* | {
"repo_name": "DaisukeMiyamoto/python-Lmeasure",
"path": "LMIO/util/morphometricMeasurements.py",
"copies": "1",
"size": "2053",
"license": "apache-2.0",
"hash": 8812055204942862000,
"line_mean": 37.037037037,
"line_max": 152,
"alpha_frac": 0.5353141744,
"autogenerated": false,
"ratio": 3.753199268738574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4788513443138574,
"avg_score": null,
"num_lines": null
} |
#********************************************List of Dependencies*******************************************************
#The following code has been tested with the indicated versions on 64bit Linux and PYTHON 2.7.3
#blender: 2.6.9
#***********************************************************************************************************************
import bpy
from mathutils import Vector, Matrix
from math import pi as PI
from math import cos, sin, acos
import os
import numpy as np
class BlenderSWCImporter:
#*******************************************************************************************************************
def parseSWCData(self, swcData, restrictRadiusTo=0.5):
nInitPts = int(max([x[0] for x in swcData]))
nExtra = 0
for ind in range(len(swcData)):
pt = swcData[ind]
pt[5] = max(pt[5], restrictRadiusTo)
halfR = pt[5] * 0.5
if pt[6] < 0:
for ind1 in range(1, 3):
toApp = [nInitPts + nExtra + ind1, pt[1], pt[2], pt[3], pt[4] + halfR, pt[5], pt[0]]
if len(pt) > 7:
toApp.append(pt[7])
swcData.append(toApp)
nExtra += 2
swcData[ind][5] = max(restrictRadiusTo, swcData[ind][5])
swcPointData = {}
extraCol = {}
for entries in swcData:
swcPointData[int(entries[0])] = [float(x) for x in entries[2:7]]
if len(entries) > 7:
extraCol[int(entries[0])] = float(entries[7])
return swcPointData, extraCol
#*******************************************************************************************************************
def __init__(self, swcFName, add=False, matchRootOrigin=True, swcData=None,
sswcMaterials=None, scaleDownBy=100, restrictRadiusTo=0.5):
if not add == True:
#Remove the default objects in the blender scene.
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.delete()
self.swcFName = swcFName
self.swcName = os.path.split(swcFName)[1].rstrip('.swc')
self.sswcMaterials = sswcMaterials
self.isSSWC = False
if swcData is None:
swcData = np.loadtxt(swcFName).tolist()
elif type(swcData) == np.ndarray:
swcData = swcData.tolist()
self.swcPointData, self.extraCol = self.parseSWCData(swcData, restrictRadiusTo)
if self.extraCol:
self.isSSWC = True
extraCols = np.array(list(self.extraCol.values()))
maxEC = extraCols.max()
minEC = extraCols.min()
nMaterials = len(self.sswcMaterials)
for x, y in self.extraCol.items():
self.extraCol[x] = int((nMaterials - 1) * (y - minEC) / (maxEC - minEC))
self.nCirclePoints = 8
assert self.nCirclePoints % 2 == 0, 'No of points on the circle circumference has to be even'
if matchRootOrigin:
self.originPoint = Vector(self.swcPointData[1][:3])
else:
self.originPoint = Vector([0, 0, 0])
self.scaleDownBy = scaleDownBy
ks = self.swcPointData.keys()
self.swcPointDone = dict(zip(ks, [False for x in ks]))
self.blenderCircleIndsPerSWCPoint = dict(zip(ks,[[] for x in ks]))
#For these, each entry corresponds to one set of circle added to blender.
self.vertIndexStartsPerPoint = []
self.normals = []
self.refVecsInPlane = []
self.verts = []
self.faces = []
self.faceColInds = []
self.nBlenderCircles = 0
#*******************************************************************************************************************
def getFaceIndices(self, vertsOfVec1Start, vertsOfVec2Start):
"""
It is assumed that self.nCirclePoints contains of a list of integer indices. Also, it is assumed that the set of points belonging to a circle are contiguous in this list. Given the starting points of the two sets of vertex indices belonging to two circles, returns a list of quadruplets, each quadruplet representing a quadrilateral face forming the surface joining the two circles.
:param vertsOfVec1Start: interger, start index in self.nCirlePoints of the set of vertices belonging to the first circle.
:param vertsOfVec2Start: interger, start index in self.nCirlePoints of the set of vertices belonging to the second circle.
:return:a list of quadruplets, each quadruplet representing a quadrilateral face forming the surface joining the two circles.
"""
return [(vertsOfVec1Start + x,
vertsOfVec1Start + ((x + 1) % self.nCirclePoints),
vertsOfVec2Start + ((x + 1) % self.nCirclePoints),
vertsOfVec2Start + x) for x in range(self.nCirclePoints)]
#*******************************************************************************************************************
def getNormDiffVector(self, vector1, vector2):
"""
Returns the normalized difference of vector1 and vector2
:param vector1: mathutils.Vector object of Blender
:param vector2: mathutils.Vector object of Blender
:return:normalized difference of vector1 and vector2
"""
diffVector = vector1 - vector2
diffVector.normalize()
return diffVector
#*******************************************************************************************************************
def getCircleVerts(self, pointVec, pointNormal, pointDiam, refVec=None):
"""
Given point, a normal and a diameter, generates self.nCirclePoints number of points on the circle defined by the input. If the 'refVec' vector is given, the first point (of the list of points returned) will be the projection of the tip of refVec moved to pointVec onto the plane perpendicular to pointNormal.
:param pointVec: mathutils.Vector object of Blender
:param pointNormal: mathutils.Vector object of Blender
:param pointDiam: integer
:param refVec: mathutils.Vector object of Blender
:return: circlePoints, refVecInPlane
circlePoints: list of length self.nCirclePoints. Each element is a mathutils.Vector objects of Blender
refVecInPlane: first point in the above list - pointVec
"""
#rotation matrix for rotating b to a: Matrix.Rotation(b.angle(a), dimension, b.cross(a))
#here pointNormal is 'a', (0,0,1) is 'b'
rotationMatrix = Matrix.Rotation(acos(pointNormal.z), 3, Vector([-pointNormal.y, pointNormal.x, 0]))
# the points of the circle are first constructed assuming the pointNormal is (0,0,1). Let this frame of
# reference be call Ref1
thetaAdditional = 0
if refVec is not None:
#shift and rotate refVec to bring it to Ref1
refVec.normalize()
refVecShifted = refVec
newRefVec = rotationMatrix.inverted() * refVecShifted
#nearest point on the r=1 circle is the projection of newRefVec on XY plane(just set z=0)
#thetaAdditional is just angle between newRefVec with (1,0,0)
newRefVec.z = 0
newRefVec.normalize()
thetaAdditional = acos(newRefVec.x)
#Using cylindrical coordinates r, theta and z
r = pointDiam
thetaInterval = 2 * PI / self.nCirclePoints
thetas = [x * thetaInterval for x in range(self.nCirclePoints)]
circlePointsRotated = [Vector([r * cos(theta + thetaAdditional), r * sin(theta + thetaAdditional), 0])
for theta in thetas]
circlePoints = [pointVec + rotationMatrix * x for x in circlePointsRotated]
return circlePoints, circlePoints[0] - pointVec
#*******************************************************************************************************************
def addVertices(self, verts, normal, refVecInPlane, swcPointInd):
"""
Adds all the inputs to corresponding collections defined in __init__. Increments self.nBlenderPoints and marks the point done in self.swcPointDone
:param verts: list of length self.nCirclePoints. Each element is a mathutils.Vector objects of Blender
:param normal: mathutils.Vector object of Blender
:param refVecInPlane: mathutils.Vector object of Blender
:param swcPointInd: integer, index of the point in the swcfile(col 1)
:return:
"""
self.swcPointDone[swcPointInd] = True
self.vertIndexStartsPerPoint.append(self.nBlenderCircles * self.nCirclePoints)
self.blenderCircleIndsPerSWCPoint[swcPointInd].append(self.nBlenderCircles)
self.verts.extend(verts)
self.normals.append(normal)
self.refVecsInPlane.append(refVecInPlane)
self.nBlenderCircles += 1
#*******************************************************************************************************************
def correctNumbering(self, verts):
"""
Returns verts[0,-1, -2,.....,2,1]
:param verts: list
:return:
"""
allButFirst = verts[1:]
allButFirst.reverse()
correctedVerts = [verts[0]]
correctedVerts.extend(allButFirst)
return correctedVerts
#*******************************************************************************************************************
def addNewSection(self, pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd):
"""
Adds equally spaced points along the circumference of the two circles around pointVec and rootVec with diameters pointDiam and rootDiam, respectively, to self.verts. Defines faces using self.getFaceIndices() and add to self.faces.
:param pointVec: mathutils.Vector object of Blender
:param pointDiam: integer
:param rootVec: mathutils.Vector object of Blender
:param rootDiam: integer
:param pointInd: integer, index of the point in the swcfile(col 1)
:param rootInd: integer, index of the root of the point in the swcfile(col 7)
:return:
"""
rootNormal = self.getNormDiffVector(pointVec, rootVec)
vertsOfRoot, refVecIPRoot = self.getCircleVerts(rootVec, rootNormal, rootDiam)
self.addVertices(vertsOfRoot, rootNormal, refVecIPRoot, rootInd)
pointNormal = self.getNormDiffVector(rootVec, pointVec)
vertsOfPoint, refVecIPPoint = self.getCircleVerts(pointVec, rootNormal, pointDiam,
self.refVecsInPlane[-1])
#because the normals are antiparallel, the direction of numbering the points would be opposite. Correcting.
self.addVertices(vertsOfPoint, pointNormal, refVecIPPoint, pointInd)
secFaces = self.getFaceIndices(self.vertIndexStartsPerPoint[-2], self.vertIndexStartsPerPoint[-1])
self.faces.extend(secFaces)
if self.isSSWC:
self.faceColInds.extend([self.extraCol[int(pointInd)]] * len(secFaces))
#*******************************************************************************************************************
# def getNewSection(self, pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd):
#
# """Adds equally spaced points along the circumference of the two circles around pointVec and rootVec with diameters pointDiam and rootDiam, respectively, to self.verts. Defines faces using self.getFaceIndices() and add to self.faces.
# :param pointVec: mathutils.Vector object of Blender
# :param pointDiam: integer
# :param rootVec: mathutils.Vector object of Blender
# :param rootDiam: integer
# :param pointInd: integer, index of the point in the swcfile(col 1)
# :param rootInd: integer, index of the root of the point in the swcfile(col 7)
# :return:
# """
#
# rootNormal = self.getNormDiffVector(pointVec, rootVec)
#
# vertsOfRoot, refVecIPRoot = self.getCircleVerts(rootVec, rootNormal, rootDiam)
#
#
# pointNormal = self.getNormDiffVector(rootVec, pointVec)
#
# vertsOfPoint, refVecIPPoint = self.getCircleVerts(pointVec, rootNormal, pointDiam,
# refVecIPRoot)
#
# vertsToReturn = vertsOfRoot[:]
# vertsToReturn.extend(vertsOfPoint[:])
#
# secFaces2Return = self.getFaceIndices(0, self.nCirclePoints)
#
# return vertsToReturn, secFaces2Return
#*******************************************************************************************************************
def addPointsAndFaces(self, pointVec, pointDiam, pointNormal, indexOfRootPoint, refVecIP, pointInd):
"""
Adds self.nCirclePoints number of points for the given pointVec, pointDiam and pointNormal. These point are equally spaced along the circumference of the circle at pointVec, with diameter pointDiam and in the plane perpendicular to pointNormal. Adds to self.faces, the faces between the points just defined and the points on the circle around a point whose index in the swc file(col 1) is indexOfRootPoint. Read documentation of getCircleVerts() for more of refVecIP.
:param pointVec: mathutils.Vector object of Blender
:param pointDiam: integer
:param pointNormal:mathutils.Vector object of Blender
:param indexOfRootPoint: integer
:param refVecIP: mathutils.Vector object of Blender
:param pointInd: integer
:return:
"""
vertsOfPoint, refVecIP = self.getCircleVerts(pointVec, pointNormal,
pointDiam, refVecIP)
self.addVertices(vertsOfPoint, pointNormal, refVecIP, pointInd)
secFaces = self.getFaceIndices(self.vertIndexStartsPerPoint[indexOfRootPoint], self.vertIndexStartsPerPoint[-1])
self.faces.extend(secFaces)
if self.isSSWC:
self.faceColInds.extend([self.extraCol[int(pointInd)]] * len(secFaces))
#*******************************************************************************************************************
def addSection(self, pointInd):
"""
Adds to self.verts, equally spaced points along the circumferences of the two circles, one around the point with index pointInd in the swc file(col 1) and one around it's root. Add to self.faces, faces forming the surface between these two circles using getFaceIndices.
:param pointInd: integer
:return:
"""
minAngle = lambda vec1, vec2: min(vec1.angle(vec2), vec1.angle(-vec2))
pointData = self.swcPointData[pointInd]
pointVec = (Vector(pointData[:3]) - self.originPoint) / self.scaleDownBy
pointDiam = pointData[3] / self.scaleDownBy
rootInd = int(pointData[4])
rootData = self.swcPointData[rootInd]
rootVec = (Vector(rootData[:3]) - self.originPoint) / self.scaleDownBy
rootDiam = rootData[3] / self.scaleDownBy
if pointVec == rootVec:
print('Warning: Points at line ' + str(pointInd) + 'and line ' + str(rootInd) +
'have the same XYZ Coordinates in file ' + self.swcName)
else:
#***************************************************************************************************************
# This line can replace all the code below so that for each section, two new circles are added.
# self.addNewSection(pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd)
#***************************************************************************************************************
#if both the point and root have not been added
if not self.swcPointDone[rootInd]:
self.addNewSection(pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd)
#if the root point has already been added
else:
rootPointIndices = self.blenderCircleIndsPerSWCPoint[rootInd]
pointNormal = self.getNormDiffVector(rootVec, pointVec)
anglesWithRootNormals = [minAngle(pointNormal, self.normals[x]) for x in rootPointIndices]
minAngle = min(anglesWithRootNormals)
if minAngle > (PI / 4.0):
self.addNewSection(pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd)
else:
indexOfRootPointToUse = rootPointIndices[anglesWithRootNormals.index(minAngle)]
#if the closest vector(by angle) was antiparallel to the actual normal vector, invert the stored refVecInPlane
if pointNormal.angle(self.normals[indexOfRootPointToUse]) < (PI / 4.0):
refVecIP = self.refVecsInPlane[indexOfRootPointToUse]
else:
refVecIP = -self.refVecsInPlane[indexOfRootPointToUse]
self.addPointsAndFaces(pointVec, pointDiam, pointNormal, indexOfRootPointToUse, refVecIP, pointInd)
#*******************************************************************************************************************
# def getSection(self, pointInd):
#
# pointData = self.swcPointData[pointInd]
# pointVec = (Vector(pointData[:3]) - self.originPoint) / self.scaleDownBy
# pointDiam = pointData[3] / self.scaleDownBy
#
# rootInd = int(self.rootInds[pointData[4]])
# rootData = self.swcPointData[self.rootInds[rootInd]]
# rootVec = (Vector(rootData[:3]) - self.originPoint) / self.scaleDownBy
# rootDiam = rootData[3] / self.scaleDownBy
#
# if pointVec == rootVec:
# print('Warning: Points at line ' + str(pointInd) + 'and line ' + str(rootInd) +
# 'have the same XYZ Coordinates in file ' + self.swcName)
# return None
#
# else:
#
# return self.getNewSection(pointVec, pointDiam, rootVec, rootDiam, pointInd, rootInd)
#*******************************************************************************************************************
def definePoints(self, col):
"""
For each point of the swc file which is not the root, adds the circles and faces that define the 3D frustrum representing the section.
:return:
"""
for pointInd in self.swcPointData.keys():
pointInd = int(pointInd)
if self.swcPointData[pointInd][4] > 0:
self.addSection(pointInd)
# else:
# self.addSphere(radius=1, position=self.swcPointData[pointInd][:3], col=col)
#*******************************************************************************************************************
def drawWholeInBlender(self, col):
"""
Uses the defined points in self.verts and faces in self.faces to construct a 3D object in Blender
:return:
"""
mesh = bpy.data.meshes.new(self.swcName)
nrn = bpy.data.objects.new(self.swcName, mesh)
bpy.context.scene.objects.link(nrn)
if not self.isSSWC:
mat = bpy.data.materials.new(self.swcName)
mat.diffuse_color = col
nrn.active_material = mat
else:
for mat in self.sswcMaterials:
# matNew = bpy.data.materials.new(mat.name)
# matNew.diffuse_color = mat.diffuse_color
# matNew.diffuse_intensity = mat.diffuse_color
# nrn.data.materials.append(matNew)
nrn.data.materials.append(mat)
mesh.from_pydata(self.verts, [], self.faces)
mesh.update(calc_edges=True)
if self.isSSWC:
nrnObj = bpy.context.scene.objects[self.swcName]
for polygon, facColInd in zip(nrnObj.data.polygons, self.faceColInds):
polygon.material_index = facColInd
nrn.show_transparent = True
#*******************************************************************************************************************
# def drawSectionInBlender(self, pointInd, secVerts, secFaces, col):
# """
# Uses the defined points in self.verts and faces in self.faces to construct a 3D object in Blender
# :return:
# """
# mesh = bpy.data.meshes.new(self.swcName + '_' + str(pointInd))
# sec = bpy.data.objects.new(self.swcName + '_' + str(pointInd), mesh)
# bpy.context.scene.objects.link(sec)
#
# mat = bpy.data.materials.new(self.swcName + '_' + str(pointInd))
# mat.diffuse_color = col
# sec.active_material = mat
#
# mesh.from_pydata(secVerts, [], secFaces)
# mesh.update(calc_edges=True)
#*******************************************************************************************************************
def export2Obj(self, fileName, col=[1, 0, 0]):
"""
This function generates an OBJ file taking the swcfile path.
:param fileName: Absolute path of swc file.
:return:
"""
self.definePoints(col)
self.drawWholeInBlender(col)
bpy.ops.export_scene.obj(filepath=fileName)
#*******************************************************************************************************************
def importWholeSWC(self, col=[1, 0, 0]):
"""
This function imports the neuron in swcfile.
:param fileName: Absolute path of swc file.
:param col: RGB triplet defining the color of the neuron
:return:
"""
self.definePoints(col)
self.drawWholeInBlender(col)
#*******************************************************************************************************************
#
# def importSectionWiseSWC(self, col = [1, 0, 0]):
#
# for pointInd in self.swcPointData.keys():
#
# pointInd = int(pointInd)
# if not self.swcPointData[pointInd][-1] == -1:
#
# print(pointInd)
# rtrned = self.getSection(pointInd)
#
# if rtrned is None:
# continue
#
# if self.colFunc is not None:
# col = self.colFunc(self.extraCols[pointInd])
#
# self.drawSectionInBlender(pointInd, rtrned[0], rtrned[1], col)
#
# #*******************************************************************************************************************
def addSphere(self, radius=1, position=[0, 0, 0], col=[1, 0, 0]):
bpy.ops.mesh.primitive_uv_sphere_add(size=radius / self.scaleDownBy, location=np.array(position) / self.scaleDownBy)
sph = bpy.context.active_object
mat = bpy.data.materials.new("sphereMat")
mat.diffuse_color = col
mat.diffuse_intensity = 1.0
sph.active_material = mat
#***********************************************************************************************************************
#To add color
#
#mat = bpy.data.materials.new("dorsalBranch")
#mat.diffuse_color = (r,g,b)
#object.active_material = mat
def addVoxelized(fle, voxelSize, add=False, col=[1, 0, 0]):
scaleDownBy = float(100)
nCubes = 1
if not add:
#Remove the default objects in the blender scene.
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.select_all(action='TOGGLE')
bpy.ops.object.delete()
nCubes = 0
vData = np.loadtxt(fle)
mat = bpy.data.materials.new(fle)
mat.diffuse_color = col
mat.translucency = 0.75
for vPt in vData:
pt = vPt[2:5]
ptCentered = voxelSize * np.round(pt / voxelSize)
bpy.ops.mesh.primitive_cube_add(location=ptCentered / scaleDownBy, radius=voxelSize / scaleDownBy)
if nCubes == 1:
bpy.data.objects['Cube'].active_material = mat
else:
bpy.data.objects['Cube.' + str(nCubes - 1).zfill(3)].active_material = mat
nCubes += 1 | {
"repo_name": "dEvasEnApati/BlenderSWCVizualizer",
"path": "blenderHelper.py",
"copies": "2",
"size": "24404",
"license": "apache-2.0",
"hash": 7811701832075533000,
"line_mean": 41.0051635112,
"line_max": 475,
"alpha_frac": 0.5535977709,
"autogenerated": false,
"ratio": 4.022416350749959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5576014121649958,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ajdanelz'
import subprocess
from datetime import *
from dateutil.relativedelta import relativedelta
pipeline = []
pipeline.append("git tag")
pipeline.append("xargs -I@ git log --format=format:'%ai @%n' -1 @")
pipeline.append("sort")
pipeline.append("awk '{print $1,$4}'")
command = "|".join(pipeline)
out, err = subprocess.Popen(command, stdout=subprocess.PIPE,
shell=True).communicate()
tags = []
dates = []
collect = False
#print out
for row in out.split('\n'):
if row != '':
arDate = row.split(' ')[0].split('-')
tagDate = date(int(arDate[0]), int(arDate[1]), int(arDate[2]))
tag = row.split(' ')[1]
startDate = date.today() - relativedelta(months=3)
if tagDate > startDate:
collect = True
if collect:
if tag != '':
dates.append(tagDate)
tags.append(tag)
output = 'date,version,files_changed,lines_changed\n'
while len(tags) > 1:
begin = "refs/tags/" + tags[0]
end = "refs/tags/" + tags[1]
output += dates[1].strftime("%m/%d/%y") + ","
output += tags[1] + ","
p = subprocess.Popen(["git", "diff", "--numstat", begin, end], stdout=subprocess.PIPE)
out, err = p.communicate()
files = 0
lines = 0
for file in out.split('\n'):
if file != '':
files += 1
add = file.split("\t")[0]
rem = file.split("\t")[1]
if add.isdigit():
lines += int(filter(str.isdigit, add))
if rem.isdigit():
lines += int(filter(str.isdigit, rem))
output += str(files) + ","
output += str(lines) + "\n"
dates.remove(dates[0])
tags.remove(tags[0])
with open('GitVersionChanges.csv', 'w+') as F:
F.write(output)
print output
| {
"repo_name": "scoobah36/GitVersionParsing",
"path": "VersionsByDate.py",
"copies": "1",
"size": "1790",
"license": "mit",
"hash": -2286020013892744200,
"line_mean": 26.5384615385,
"line_max": 90,
"alpha_frac": 0.5530726257,
"autogenerated": false,
"ratio": 3.4225621414913956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44756347671913954,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aje'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import re
import datetime
import time
from bson.objectid import ObjectId
import pymongo
# The Blog Post Data Access Object handles interactions with the Posts collection
class PutsDAO:
def __init__(self, database):
self.db = database
self.puts = database.puts
def insert_entry(self, userid, treatment, valToBetOn, price, shares, period, opend):
put = { "userid": userid,
"treatment": treatment,
"valToBetOn": valToBetOn,
"price": int(price),
"shares":int(shares),
"date": datetime.datetime.utcnow(),
"period": period,
"open": opend}
try:
print "Inserting the put", put
newId = self.puts.insert(put)
return str(newId)
except:
print "Error inserting post"
print "Unexpected error:", sys.exc_info()[0]
raise
# returns an array of num_posts posts, reverse ordered
def get_puts(self, treatment, period, demo_mode):
l = []
if demo_mode:
cursor = self.puts.find({'period': period, 'open': 1}).sort('price', pymongo.ASCENDING)
else:
cursor = self.puts.find({'treatment': treatment, 'period':period, 'open': True}).sort('price', pymongo.ASCENDING)
for put in cursor:
put['date'] = str(time.time()) # fix up date
put['id'] = str(put['_id']);
put['price'] = str(put['price'])
put['shares'] = str(put['shares'])
put["_id"] = str(put['_id']);
if 'period' not in put:
put['period'] = 0
else:
put['period'] = str(put['period']);
l.append(put)
return l
def get_put_by_id(self, item_id):
#put = None
#Work here to retrieve the specified post
put=self.puts.find_one({'_id':ObjectId(item_id)})
#if put is not None:
# fix up date
#put['date'] = put['date'].strftime("%A, %B %d %Y at %I:%M%p")
#new= put.next()
return put
def accept_put(self, id, accepter_id):
put = self.get_put_by_id(id)
putId = put['_id']
putMods = {}
putMods["open"] = False
putMods['accepted'] = accepter_id
self.puts.update({'_id': putId}, {"$set": putMods}, upsert=False)
def computer_accept(self, id):
put = self.get_put_by_id(id)
putId = put['_id']
putMods = {}
putMods["open"] = False
putMods['accepted'] = -1
self.puts.update({'_id': putId}, {"$set": putMods}, upsert=False)
# add a comment to a particular blog post
def add_comment(self, id, name, email, body, commentId):
comment = {'author': name, 'body': body, 'id':commentId}
if (email != ""):
comment['email'] = email
try:
last_error = {'n':-1} # this is here so the code runs before you fix the next line
# XXX HW 3.3 Work here to add the comment to the designated post
put=self.puts.find_one({'_id':ObjectId(id)})
put['comments'].append(comment);
self.puts.save(put)
return last_error['n'] # return the number of documents updated
except:
return 0
| {
"repo_name": "jac2130/BettingIsBelieving",
"path": "Betting/putsDAO.py",
"copies": "1",
"size": "3977",
"license": "mit",
"hash": -542300794793250700,
"line_mean": 33.2844827586,
"line_max": 125,
"alpha_frac": 0.561981393,
"autogenerated": false,
"ratio": 3.702979515828678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4764960908828678,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aje'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import random
import string
# #Import Python Libraries
import json
import pymongo
# #Connect to the Mongo Database
client = pymongo.MongoClient("localhost", 27017)
# The session Data Access Object handles interactions with the sessions collection
class SessionDAO:
def __init__(self, database):
# self.db = database
self.db = client.students
self.sessions = database.sessions
# will start a new session id by adding a new document to the sessions collection
# returns the sessionID or None
def start_session(self, username):
session_id = self.get_random_str(32)
session = {'username': username, '_id': session_id}
try:
self.sessions.insert_one(session)
except:
print "Unexpected error on start_session:", sys.exc_info()[0]
return None
return str(session['_id'])
# will send a new user session by deleting from sessions table
def end_session(self, session_id):
if session_id is None:
return
self.sessions.delete_one({'_id': session_id})
return
# if there is a valid session, it is returned
def get_session(self, session_id):
if session_id is None:
return None
session = self.sessions.find_one({'_id': session_id})
return session
# get the username of the current session, or None if the session is not valid
def get_username(self, session_id):
session = self.get_session(session_id)
if session is None:
return None
else:
return session['username']
def get_random_str(self, num_chars):
random_string = ""
for i in range(num_chars):
random_string = random_string + random.choice(string.ascii_letters)
return random_string
| {
"repo_name": "KartikKannapur/MongoDB_M101P",
"path": "Week_2/homework/homework_2_3/login_logout_signup/sessionDAO.py",
"copies": "1",
"size": "2481",
"license": "mit",
"hash": 7494334837058727000,
"line_mean": 26.8764044944,
"line_max": 85,
"alpha_frac": 0.6565900846,
"autogenerated": false,
"ratio": 4.128119800332779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5284709884932779,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.