blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6bc8c735def6cada2c4cabcea9501368bd6d6430 | 0c997ec448aeb9252b4b165832733de4698fae92 | /flocx_ui/api/flocx_market.py | dc81f9c54ed8c55b6e6ed600b64f6dd6ce7e777f | [
"Apache-2.0"
] | permissive | CCI-MOC/flocx-ui | 29946f17c209018526f4659948510ad02987db66 | 29001f90d5e023c3402cdbb90ac8ca5fbe6733f8 | refs/heads/master | 2021-06-12T07:05:33.141311 | 2019-08-12T23:30:42 | 2019-08-12T23:30:42 | 194,907,802 | 1 | 5 | Apache-2.0 | 2021-06-10T21:39:52 | 2019-07-02T17:32:48 | JavaScript | UTF-8 | Python | false | false | 2,162 | py | from flocx_ui.api import schema
from flocx_ui.api.utils import generic_market_request as generic_request
from flocx_ui.api.utils import validate_data_with
def get(path, **kwargs):
"""An alias for generic_request with the type set to 'GET'
:param path: A url path
:param **kwargs: The keyword arguments to be passed to the request function
:return: A request for the given path
"""
return generic_request('GET', path, **kwargs)
def post(path, **kwargs):
"""An alias for generic_request with the type set to 'POST'
:param path: A url path
:param **kwargs: The keyword arguments to be passed to the request function
:return: A request for the given path
"""
return generic_request('POST', path, **kwargs)
def offer_list(request):
"""Retrieve a list of offers
:param request: HTTP request
:return A list of offers
"""
response = get('/offer', token=request.user.token.id)
data = response.json()
return data
@validate_data_with(None, schema.validate_uuid)
def offer_get(request, offer_id):
"""Get an offer
:param request: HTTP request
:param offer_id: The offer id used to get the offer details
:return: The offer associated with the offer_id
"""
response = get('/offer/{}'.format(offer_id), token=request.user.token.id)
data = response.json()
return data
def bid_list(request):
"""Retrieve a list of bids
:param request: HTTP request
:return: A list of bids
"""
response = get('/bid', token=request.user.token.id)
data = response.json()
return data
@validate_data_with(None, schema.validate_bid)
def bid_create(request, bid):
"""Create a bid
:param request: HTTP Request
:param bid: The bid to be created
:return: The bid that was created
"""
response = post('/bid', json=bid, token=request.user.token.id)
data = response.json()
return data
def contract_list(request):
"""Retrieve a list of contracts
:param request: HTTP request
:return: A list of contracts
"""
response = get('/contract', token=request.user.token.id)
data = response.json()
return data
| [
"jdtzmn@gmail.com"
] | jdtzmn@gmail.com |
1f0ba2eb90839c85462d5f63334dbc88a90db375 | 1d672c52ada009c6aeeafec6caeae0adf064060d | /docs/source/conf.py | a97a5551e3ae8bef36af79a0972f0eb8404b6190 | [
"BSD-3-Clause"
] | permissive | sakshiseth/fury | 9927487aaf5dd1b2dc0db5cd31facdb4743f86dd | 5799e445a5a306852a674396803bbefa922f0ae6 | refs/heads/master | 2021-01-13T20:18:49.848717 | 2020-02-22T20:54:59 | 2020-02-22T20:54:59 | 242,483,253 | 0 | 1 | NOASSERTION | 2020-02-23T08:39:05 | 2020-02-23T08:39:04 | null | UTF-8 | Python | false | false | 7,705 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# FURY documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
from datetime import datetime
# Add current path
sys.path.insert(0, os.path.abspath('.'))
# Add doc in path for finding tutorial and examples
sys.path.insert(0, os.path.abspath('../..'))
# Add custom extensions
sys.path.insert(0, os.path.abspath('./ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'ext.build_modref_templates',
'ext.github',
'ext.github_tools',
'ext.rstjinja'
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = []
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FURY'
copyright = '2010-{0}, FURY'.format(datetime.now().year)
author = 'FURY'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import fury
# The short X.Y version.
version = fury.__version__
# The full version, including alpha/beta/rc tags.
release = fury.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'versions.html',
]
}
# ghissue config
github_project_url = "https://github.com/fury-gl/fury"
import github_tools as ght
all_versions = ght.get_all_versions(ignore='micro')
html_context = {'all_versions': all_versions,
'versions_list': ['dev', 'latest'] + all_versions,
'basic_stats': ght.fetch_basic_stats(),
'contributors': ght.fetch_contributor_stats(),
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fury'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fury.tex', 'FURY Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fury', 'FURY Documentation',
[author], 1)
]
# -- Options for sphinx gallery -------------------------------------------
from scrap import ImageFileScraper
sc = ImageFileScraper()
sphinx_gallery_conf = {
'doc_module': ('fury',),
# path to your examples scripts
'examples_dirs': ['../examples', '../tutorials'],
# path where to save gallery generated examples
'gallery_dirs': ['auto_examples', 'auto_tutorials'],
'image_scrapers': (sc),
'backreferences_dir': 'api',
'reference_url': {'fury': None, },
'filename_pattern': re.escape(os.sep)
}
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fury', 'FURY Documentation',
author, 'fury', 'Free Unified Rendering in Python',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
'dipy': ('https://dipy.org/documentation/latest',
'https://dipy.org/documentation/1.0.0./objects.inv/'),
}
| [
"skab12@gmail.com"
] | skab12@gmail.com |
ed637d9a5c33ed53f381056726ee3d448dd46715 | ac0e7afa7105b0d20baa164cb23e1b563baa837f | /practica08/application/controllers/productos/index.py | bd1d50635a32f69ab44484397682abfbb3a83daf | [] | no_license | IsaiasManuelAranda/Webpy_-_MVC | 54ebd5b8fc6b466869d634ebdc78dde25f46b8ca | d31c238c4b2c0a71d494d2b7cf76b639976f5312 | refs/heads/master | 2020-04-20T10:36:15.200006 | 2019-02-02T04:27:09 | 2019-02-02T04:27:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import web
import config as config
class Index_produ:
def __init__(self):
pass
def GET(self):
productos = config.model_productos.select_productos().list() #Selecciona los registros de productos y los lista
return config.render.index_p(productos) #Envia los datos como parametro para la pagina web
| [
"noreply@github.com"
] | IsaiasManuelAranda.noreply@github.com |
b57c993b80ea8744fcbdb134420326c2234f45de | 9d72da433f5e0ef9bea202d8773f77ca60470ed3 | /cyanochew/provescyanobyte.py | 32921dff69165baf62e761aff162cb70a9bd7113 | [] | no_license | polfeliu/Cyanochew | 50fbd28f7b1a073b72c2b63625d092e3f480c7f9 | 6526b17aabac7c9fab2f9ad3a9446e9a2a6887d8 | refs/heads/master | 2023-05-11T04:12:22.973183 | 2021-06-01T19:40:27 | 2021-06-01T19:40:27 | 343,541,935 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from cyanobyte.codegen import gen
from cyanobyte.validator import click_valdiate
from click.testing import CliRunner
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
runner = CliRunner()
# Validate
result = runner.invoke(click_valdiate, [
"test/peripherals/example.yaml"
])
# Build
'''
result = runner.invoke(gen, [
"-t",
"generic.c",
"-o",
".build",
"example.yaml"
])'''
| [
"feliupol@gmail.com"
] | feliupol@gmail.com |
48ecd5395f82be6e21ab9af2756dce7583fb69e7 | f21ec5d99b50bea470adbffc2422f3112117e322 | /projects/2020/x/commerce/auctions/models.py | 5d1379685184489178ce2b6bdbc94387c413f323 | [] | no_license | noahjeffers/web50 | 86694e5809d445f262fc0e645c1985b9ec786a83 | eaf18b51a69019b6d0c35759c71706b620ae7e64 | refs/heads/master | 2022-12-03T21:57:59.881152 | 2020-08-27T20:27:38 | 2020-08-27T20:27:38 | 285,107,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
pass
class Listing(models.Model):
title = models.CharField(max_length=64)
description = models.TextField()
imageLink = models.CharField(max_length=200, blank=True, default=None, null=True)
uploadDate = models.DateTimeField()
listedBy = models.ForeignKey(User, on_delete=models.CASCADE, related_name="listedBy")
watched = models.ManyToManyField(User, blank=True, related_name="watching")
currentbid = models.ForeignKey('Bids',on_delete=models.DO_NOTHING, null=True, blank=True, related_name="currentbid")
category = models.ForeignKey('Category', on_delete=models.SET_NULL, related_name="categorize", blank=True, default=None, null=True)
active = models.BooleanField(default=True)
class Bids(models.Model):
listingID = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="bided")
userID = models.ForeignKey(User, on_delete=models.CASCADE, related_name="bidder")
amount = models.DecimalField(max_digits=10,decimal_places=2)
class Comments(models.Model):
listingID = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="commented")
userID = models.ForeignKey(User, on_delete=models.CASCADE, related_name="commenter")
content = models.TextField()
class Category(models.Model):
title = models.CharField(max_length=64) | [
"59740042+noahjeffers@users.noreply.github.com"
] | 59740042+noahjeffers@users.noreply.github.com |
4513165496d6f2e83579ac9cf0684f88a705068e | d020606f5e9174aa669e4b6b316bdb0fcb05ce02 | /run_test.py | dc79693a586c6b2f47af9c3cd513684781ca785c | [] | no_license | Hanlen520/AutomationProject | 4c1270fba570b256493cd6681d715e0b5136a4f5 | 95a7cb61d8b339a6409483d738de5a0d9d85b321 | refs/heads/master | 2023-04-02T20:23:07.696753 | 2021-04-07T07:57:04 | 2021-04-07T07:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,342 | py | # coding = utf8
import logging
import multiprocessing
import subprocess
import pytest
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
from config import install_app_necessary, SERIAL_NUMBER
from page.fota.fota_page import Fota_Page
from page.main_page import Main_Page
from page.system.system import System
from toolsbar.common import test_device
from toolsbar.permissionGrant import grant_permission
os.path.abspath(".")
# 过滤airtest log只打印ERROR的Log
logger_airtest = logging.getLogger("airtest")
logger_airtest.setLevel(logging.ERROR)
cur_time = time.strftime("%Y%m%d_%H%M%S")
"""
@File:run_test.py
@Author:Bruce
@Date:2020/12/15
@Description:项目运行函数,存放测试和调试函数
"""
"""
单个设备poco、device不需要初始化
多个设备poco、device都需要创建新对象poco_item
后续将poco_item传入使用即可,airtest相关api,使用对应device_item进行调用
case不需要重复写
UI 进程和底部进程不要在同一个进程中容易出问题
"""
# 多机测试进程池:兼容单机和多机运行
"""
@description:多进程创建进行多台设备测试
@tip:
Pycharm调用adb缺陷,需要使用terminal输入charm来启动pycharm,以获得dash权限
执行case前,手动将pocoservice.apk的contniue安装好并将授权界面点掉,防止后续错误发生
"""
def start_test():
print("当前设备数量:" + str(len(SERIAL_NUMBER)))
if len(SERIAL_NUMBER) > 1:
for i in test_device:
install_app_necessary(i)
grant_permission(i)
else:
install_app_necessary(test_device)
grant_permission(test_device)
test_pool = multiprocessing.Pool(len(SERIAL_NUMBER))
for device_ in SERIAL_NUMBER:
test_pool.apply_async(func=fota_test_area, args=(device_,))
sleep(10)
test_pool.close()
test_pool.join()
"""
@description:Fota checklist测试函数执行区域
@param:
device_:设备序列号
"""
def fota_test_area(device_):
pytest.main(["-v", "-s", "--cmdopt={}".format(device_), "{}".format("./test_case/test_before_fota.py"),
"--reruns={}".format(1),
"--alluredir={}".format("./temp/need_data[{}_{}]/".format(cur_time, device_))])
# 设置差异化
subprocess.Popen(
args=["allure", "generate", "./temp/need_data[{}_{}]/".format(cur_time, device_), "-o",
"./report/test_report[{}_{}]/".format(cur_time, device_),
"--clean"],
shell=False).communicate()[0]
updatesw(device_)
# subprocess.Popen(
# "allure generate ./temp/need_data[{}_{}] -o ./report/test_report[{}_{}]/ --clean".format(cur_time, device_,
# cur_time, device_),
# shell=True).communicate()[0]
"""
@description:Fota checklist测试软件升级函数执行区域
@param:
device_:设备序列号
"""
def updatesw(device_):
print("开始新版本升级")
try:
device_c = connect_device("Android:///{}".format(device_))
poco = AndroidUiautomationPoco(device=device_c, use_airtest_input=False,
screenshot_each_action=False)
main_page = Main_Page(device_c, poco)
system = System(main_page)
system.unlock_screen()
fota_page = Fota_Page(main_page)
fota_page.start_fota_page()
fota_page.skip_guide()
fota_page.updatesw()
print("升级结果:" + str(fota_page.check_update_result(device_)))
print("Fota升级测试结束")
except Exception as ex:
print(str(ex))
"""
@description:Fota checklist测试函数区域
"""
def fota_checklist_test_module():
start_test()
"""
@description:main函数,主要运行函数
"""
if __name__ == '__main__':
print("脚本开始测试,Fota checklist模块测试正在运行中……")
for i in range(5):
print("这是第{}次测试该脚本".format(i))
fota_checklist_test_module()
print("This is {} times running and time is {}".format(str(i), time.strftime("%Y%m%d_%H%M%S")))
print("脚本测试结束,请检查测试结果")
| [
"39899119+792607724@users.noreply.github.com"
] | 39899119+792607724@users.noreply.github.com |
1d7bdba4d6116b2ddb547eb124da03e5267a6f0b | 6a2190c6d48db0c0708b267aeef9f0237be6441d | /forestrysafe/manage.py | 98dc7d65acf82e6ef94bbb2584728221402fdcc2 | [] | no_license | ValeriaSerdiuk/forestrysafe1 | 60e5dc47c7b78e388510415ceab92822fda808a6 | fc15dab19f78385d1ed349769e1922688ce94297 | refs/heads/master | 2023-05-21T05:36:50.386781 | 2021-06-09T16:45:09 | 2021-06-09T16:45:09 | 375,371,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'forestrysafe.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"valeriia.serdiuk@nure.ua"
] | valeriia.serdiuk@nure.ua |
04ccf5f203c5f325f1ea501592b7041c757b936e | db75555962fec912e0906ceebe1ffd9a7d3ebe3d | /test.py | 493dde5cd20dc454d0ddf5426029ef2e17f42a97 | [] | no_license | mikan3rd/youtube-rank-api | 620ed91a92c114e9369ad37528f8f8008fe7ed03 | 5a11fa1e12c98663ece9ae857a51304c7bad4f21 | refs/heads/master | 2021-05-09T18:54:13.933699 | 2019-07-14T13:16:47 | 2019-07-14T13:16:47 | 119,176,819 | 2 | 0 | null | 2020-05-20T16:21:44 | 2018-01-27T15:36:59 | Python | UTF-8 | Python | false | false | 308 | py | from app.tasks import tweet_crawl, twitter, twitter_tool
twitter.search_and_retweet('splatoon')
# exit()
# api = twitter.get_twitter_api('splatoon')
# twitter_tool.search_and_retweet(
# username=api.username,
# password=api.password,
# status='人気ツイート',
# query=api.query,
# )
| [
"mikan.no.kikoushi@gmail.com"
] | mikan.no.kikoushi@gmail.com |
eb525e2ac4b98dac4261e2d6857bca7619fda42c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_disgraced.py | e19865eec3d6c3da4393171f3fa501464fae3db9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.verbs._disgrace import _DISGRACE
#calss header
class _DISGRACED(_DISGRACE, ):
def __init__(self,):
_DISGRACE.__init__(self)
self.name = "DISGRACED"
self.specie = 'verbs'
self.basic = "disgrace"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4c80939b4dbe72cfb19b4f2f92fe4e430cc84247 | 1a36525eb6219b4d4f4628ebd3dbf72c3df4dcb7 | /taller6/ED2.py | 9d7777a06042d5c094475cea8bf386fb5d967738 | [] | no_license | lichobaron/TalleresAA | 06484987b8b6f09416277608190e91f5696d2c37 | 09f326e1e58d43c437eeb7a0a58bd6d4f8eacb02 | refs/heads/master | 2020-04-19T14:06:13.277073 | 2019-01-29T23:16:30 | 2019-01-29T23:16:30 | 168,233,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | def ED(X, Y):
return ED_Aux(X, Y, len(X)-1, len(Y)-1)
def ED_Aux(X,Y,i,j):
if i == -1:
return j
elif j == -1:
return i
else:
det = ED_Aux(X, Y, i-1, j) +1
ins = ED_Aux(X, Y, i, j-1) +1
chg = ED_Aux(X, Y, i-1, j-1) + d(X,Y,i,j)
if det < ins and det < chg:
return det
elif ins < det and ins < chg:
return ins
else:
return chg
def d(X, Y, i, j):
if X[i] == Y[i]:
return 0
else:
return 1
def Table(X,Y):
table = []
for i in range(0, len(X)):
table_aux = []
for j in range(0, len(Y)):
table_aux.append(0)
table.append(table_aux)
return table
X = "tigre"
Y = "trigo"
print(ED(X, Y)) | [
"noreply@github.com"
] | lichobaron.noreply@github.com |
c191062443c1d272c2416080c10147314a90ee02 | c795a3b9121536935dea57490ef5e70e05494a41 | /python/166/D/00.py | c416afba2b23f03e0cd8665cc4ec1a508f9dd871 | [] | no_license | Koya-Katayama/abc_submits | 8d209252ad2b0f2c34b24f4845dcc98a8945c533 | ae45a4f1dbd07c523d9790e0399a5b39062f201a | refs/heads/master | 2022-05-29T15:46:37.690029 | 2020-05-03T14:04:50 | 2020-05-03T14:04:50 | 257,000,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | x = int(input())
r = range(-500, 501)
flag = False
for i in r:
for j in r:
if i**5 - j**5 == x:
print(i, j)
flag = True
break
if flag:
break
| [
"noreply@github.com"
] | Koya-Katayama.noreply@github.com |
deb2816391776a3b2e5000af63e756eca309611d | 5a2c6dcb1f14eb56bef782ffde6b305e9250bda5 | /chap14/moneys/expression.py | c558e033327dc59f5c0427b660b053c54b036f09 | [] | no_license | RY908/tdd_python_implementation | 8da910bd555e5901b159bd166d6a705608ed7a80 | 8e0cafd6327c481baa17f700233978dc1812dd6c | refs/heads/master | 2023-07-10T10:05:48.738231 | 2021-08-24T06:38:16 | 2021-08-24T06:38:16 | 396,357,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .money import Money
from .bank import Bank
class Expression(metaclass=ABCMeta):
@abstractmethod
def reduce(self, bank: Bank, to: str) -> Money:
pass
| [
"ryuuryuu908@gmail.com"
] | ryuuryuu908@gmail.com |
03cf906edb96cb427cd37f0de2a53228c70ea321 | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /zpt/trunk/site-packages/zpt/_pytz/zoneinfo/Africa/Asmera.py | 9ccd9c3141892ef8d1d76acc48c773f5c5c4c4cf | [
"MIT",
"ZPL-2.1"
] | permissive | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | '''tzinfo timezone information for Africa/Asmera.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Asmera(DstTzInfo):
'''Africa/Asmera timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Asmera'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1936,5,4,21,24,40),
]
_transition_info = [
i(9300,0,'ADMT'),
i(10800,0,'EAT'),
]
Asmera = Asmera()
| [
"chad@zetaweb.com"
] | chad@zetaweb.com |
45e1ebd5a074a09a1d65ca03853cf503707a1b2c | 7c4ef470f7822810760f397c4b4a398476a65986 | /pytorch_lightning/plugins/training_type/sharded_spawn.py | e73fcd43cf33c184fa60d2b8344ab88544f807c6 | [
"Apache-2.0"
] | permissive | bamblebam/pytorch-lightning | 559dffd9ecffe05a642dacb38813c832618ae611 | c784092013d388e45ae83a043675c627e7ca527f | refs/heads/master | 2023-08-09T21:04:47.327114 | 2021-09-14T10:27:56 | 2021-09-14T10:27:56 | 374,527,452 | 0 | 0 | Apache-2.0 | 2021-06-07T03:51:56 | 2021-06-07T03:51:55 | null | UTF-8 | Python | false | false | 4,266 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
import pytorch_lightning as pl
from pytorch_lightning.plugins.precision.sharded_native_amp import ShardedNativeMixedPrecisionPlugin
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _FAIRSCALE_AVAILABLE:
from fairscale.nn.data_parallel.sharded_ddp import ShardedDataParallel
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
from pytorch_lightning.overrides.fairscale import LightningShardedDataParallel, unwrap_lightning_module_sharded
class DDPSpawnShardedPlugin(DDPSpawnPlugin):
"""Optimizer sharded training provided by FairScale."""
def configure_ddp(self) -> None:
self._wrap_optimizers()
self._model = ShardedDataParallel(
LightningShardedDataParallel(self.model),
sharded_optimizer=self.lightning_module.trainer.optimizers,
**self._ddp_kwargs
)
setattr(self._model, "require_backward_grad_sync", False)
def _reinit_optimizers_with_oss(self):
optimizers = self.lightning_module.trainer.optimizers
for x, optimizer in enumerate(optimizers):
if not isinstance(optimizer, OSS):
optim_class = type(optimizer)
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
optimizers[x] = zero_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = optimizers
def _wrap_optimizers(self):
if self.model.trainer.state.fn != TrainerFn.FITTING:
return
self._reinit_optimizers_with_oss()
def optimizer_state(self, optimizer: "OSS") -> Optional[dict]:
if isinstance(optimizer, OSS):
optimizer.consolidate_state_dict()
return self._optim_state_dict(optimizer)
@rank_zero_only
def _optim_state_dict(self, optimizer):
"""
Retrieves state dict only on rank 0, which contains the entire optimizer state after calling
:meth:`consolidate_state_dict`.
"""
return optimizer.state_dict()
@property
def lightning_module(self) -> "pl.LightningModule":
if not _FAIRSCALE_AVAILABLE: # pragma: no cover
raise MisconfigurationException(
"`DDPSpawnShardedPlugin` requires `fairscale` to be installed."
" Install it by running `pip install fairscale`."
)
return unwrap_lightning_module_sharded(self._model)
def pre_backward(self, closure_loss: torch.Tensor) -> None:
pass
def post_training_step(self):
pass
def new_process(self, process_idx, trainer, mp_queue):
# Ensure that the scaler points to the correct process group
# which is re-initialized in a new process
precision_plugin = trainer.accelerator.precision_plugin
if isinstance(precision_plugin, ShardedNativeMixedPrecisionPlugin):
precision_plugin.scaler = ShardedGradScaler()
super().new_process(process_idx, trainer, mp_queue)
@classmethod
def register_plugins(cls, plugin_registry: Dict) -> None:
plugin_registry.register(
"ddp_sharded_spawn_find_unused_parameters_false",
cls,
description="DDP Spawn Sharded Plugin with `find_unused_parameters` as False",
find_unused_parameters=False,
)
| [
"noreply@github.com"
] | bamblebam.noreply@github.com |
8cacf66cf3e3f8a6bb6617d430bb54bd6e717fb0 | 399a500184e7e3111896e4584d35c300f49328b7 | /store/migrations/0008_alter_reviewrating_user.py | 47e88af6337592c71ee2cfbdd7aa6b15630c0418 | [] | no_license | Fakhrillo/Fun-shop-v2 | f0f51530256fbee38884bd1b6f84c23de21cbca6 | 00f08f44c8b317763985b4f24d5ddd3fba3c54e1 | refs/heads/main | 2023-08-06T17:03:59.278832 | 2021-10-09T07:43:22 | 2021-10-09T07:43:22 | 411,153,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # Generated by Django 3.2.7 on 2021-09-27 21:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_alter_userprofile_user'),
('store', '0007_alter_productgallery_options'),
]
operations = [
migrations.AlterField(
model_name='reviewrating',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.account'),
),
]
| [
"dombit1999@gmail.com"
] | dombit1999@gmail.com |
f112a407a6740348d20de06c4c9e012ea20b1565 | 3b46f7b096efb9d814c3c343ef7c6cd4cd6b6a6e | /JOSAApaper/slopes_vs_whole.py | 83e1b728503786a824a0f15a8b29640ccce1005e | [] | no_license | ComputationalAstrologer/Optics | d265774ab4af009f482e1a4eb4af4e1027b1965f | fc7c277f45866ee0a746b517a184680232eae0f3 | refs/heads/master | 2023-06-23T23:30:47.889328 | 2023-06-12T21:42:58 | 2023-06-12T21:42:58 | 132,665,393 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,536 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 26 16:22:41 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from os import path
import time
import Pyr as pyr
#%%
strehl = 0.4
pcount = 1.e7
reg = 1.e-7
ntrials = 1
sigma = pyr.StrehlToSigma(strehl)
print("initialzing...")
t0 = time.time()
p = pyr.Pyr()
t0 = (time.time() - t0)/60.
print("done initializing in ", str(t0), "minutes.")
c0 = np.zeros(p.nc)
#%% see how much of the light falls in the pupil images
ntrials = 30
strehl = np.linspace(.08, .999, 16)
sigma = pyr.StrehlToSigma(strehl)
frac = np.zeros((len(strehl), ntrials))
for k in range(len(strehl)):
for tr in range(ntrials):
ph = sigma[k]*np.random.randn(p.nc)
ph -= np.mean(ph)
If, dIf = p.Intensity(ph)
Ip, dIp = p.PupilImageIntensity(ph, g=None, slopes=False, normalized=True)
frac[k, tr] = np.sum(Ip)/np.sum(If)
mean_frac = np.mean(frac, axis=1)
std_frac = np.std(frac, axis=1)
#%%
plt.figure(41)
plt.plot(strehl,1 - mean_frac, 'k-', lw=1)
plt.errorbar(strehl,1 - mean_frac, yerr=std_frac, fmt="none", elinewidth=3)
plt.xlabel('Strehl ratio', FontSize='large')
plt.ylabel('fraction lost',FontSize='large')
plt.title('Light Outside of Pupil Images', FontSize='large')
plt.xticks(.1*np.arange(10) + .1)
if True:
dir = '/Users/rfrazin/docs/Papers/JOSAA/pyramid/figs/'
fn = dir + 'light_lost.eps'
plt.savefig(fn)
#%% eigen analysis stuff
If, dIf = p.Intensity(c0)
Ip, dIp = p.PupilImageIntensity(c0, g=None, slopes=False, normalized=True)
Is, dIs, Istot = p.PupilImageIntensity(c0, g=None, slopes=True, normalized=True)
Ir, dIr, Irtot = p.PupilImageIntensity(c0, g=None, slopes=True, normalized=False)
ur, sr, vr = np.linalg.svd(dIr, full_matrices=False, compute_uv=True)
uf, sf, vf = np.linalg.svd(dIf, False, True)
up, sp, vp = np.linalg.svd(dIp, False, True)
us, ss, vs = np.linalg.svd(dIs, False, True)
sr /= np.max(sr)
sp /= np.max(sp)
ss /= np.max(ss)
sf /= np.max(sf)
pIr = np.linalg.pinv(dIr, rcond=1.e-6)
pIf = np.linalg.pinv(dIf, rcond=1.e-6)
pIs = np.linalg.pinv(dIs, rcond=1.e-6)
pIp = np.linalg.pinv(dIp, rcond=1.e-6)
#%%
picklename = 'slopes_vs_whole.p'
if not path.isfile(picklename):
print 'file: ', picklename, 'not found. doing calculations.'
ntrials = 60
strehl = np.hstack((np.linspace(.1,.7,7),np.linspace(.8, .999, 10)))
sigma = pyr.StrehlToSigma(strehl)
mean_s = np.zeros(len(strehl))
mean_r = np.zeros(len(strehl))
mean_f = np.zeros(len(strehl))
mean_p = np.zeros(len(strehl))
std_p = np.zeros(len(strehl))
std_r = np.zeros(len(strehl))
std_s = np.zeros(len(strehl))
std_f = np.zeros(len(strehl))
score_s = np.zeros((len(strehl), ntrials))
score_f = np.zeros((len(strehl), ntrials))
score_r = np.zeros((len(strehl), ntrials))
score_p = np.zeros((len(strehl), ntrials))
for k in range(len(strehl)):
for tr in range(ntrials):
ph = sigma[k]*np.random.randn(p.nc)
ph -= np.mean(ph)
ys, _1, _2 = p.PupilImageIntensity(ph, g=None, slopes=True, normalized=True)
yr, _1, _2 = p.PupilImageIntensity(ph, g=None, slopes=True, normalized=False)
yp, _1 = p.PupilImageIntensity(ph, g=None, slopes=False, normalized=True)
yf, _1 = p.Intensity(ph)
xs = np.dot(pIs, ys - Is)
xp = np.dot(pIp, yp - Ip)
xr = np.dot(pIr, yr - Ir)
xf = np.dot(pIf, yf - If)
score_s[k, tr] = np.std(ph - xs)
score_r[k, tr] = np.std(ph - xr)
score_f[k, tr] = np.std(ph - xf)
score_p[k, tr] = np.std(ph - xp)
std_r = np.std(score_r, axis=1)*180/np.pi
std_f = np.std(score_f, axis=1)*180/np.pi
std_p = np.std(score_p, axis=1)*180/np.pi
std_s = np.std(score_s, axis=1)*180/np.pi
mean_r = np.mean(score_r, axis=1)*180/np.pi
mean_f = np.mean(score_f, axis=1)*180/np.pi
mean_p = np.mean(score_p, axis=1)*180/np.pi
mean_s = np.mean(score_s, axis=1)*180/np.pi
stuff = {}
stuff['ntrials'] = ntrials
stuff['strehl'] = strehl
stuff['sigma'] = sigma
stuff['mean_r'] = mean_r
stuff['mean_s'] = mean_s
stuff['mean_f'] = mean_f
stuff['mean_p'] = mean_p
stuff['std_r'] = std_r
stuff['std_s'] = std_s
stuff['std_f'] = std_f
stuff['std_p'] = std_p
fp = open(picklename, 'w')
pickle.dump(stuff, fp)
fp.close()
else:
fp = open(picklename, 'r')
stuff = pickle.load(fp)
fp.close()
ntrials = stuff['ntrials']
strehl = stuff['strehl']
sigma = stuff['sigma']
mean_r = stuff['mean_r']
mean_s = stuff['mean_s']
mean_f = stuff['mean_f']
mean_p = stuff['mean_p']
std_r = stuff['std_r']
std_s = stuff['std_s']
std_f = stuff['std_f']
std_p = stuff['std_p']
#%%
lsigma = np.log10(sigma*180/np.pi)
lmean_s = np.log10(mean_s)
lmean_f = np.log10(mean_f)
lmean_r = np.log10(mean_r)
lmean_p = np.log10(mean_p)
lstd_s = np.log10(mean_s + std_s) - np.log10(mean_s)
lstd_f = np.log10(mean_f + std_f) - np.log10(mean_f)
lstd_r = np.log10(mean_r + std_r) - np.log10(mean_r)
lstd_p = np.log10(mean_p + std_p) - np.log10(mean_p)
plt.figure(33)
plt.clf()
h0, = plt.plot(strehl, sigma*180/np.pi, 'k-', lw=4, label='no gain')
hs, = plt.plot(strehl, mean_s, ':', color='r', lw=3, label='NormalizedSlope')
plt.errorbar(strehl, mean_s, yerr=std_s, fmt="none", elinewidth=2, ecolor='r')
hp, = plt.plot(strehl, mean_p, 'm-', lw=2, label='FourImages')
plt.errorbar(strehl, mean_p, yerr=std_p, fmt="none", elinewidth=2, ecolor='m')
hr, = plt.plot(strehl, mean_r, 'b-.', lw=2, label='UnnormalizedSlope')
plt.errorbar(strehl, mean_r, yerr=std_r, fmt="none", elinewidth=2, ecolor='b')
hf, = plt.plot(strehl, mean_f, '--', color='orange', lw=3, label='AllPixels')
plt.errorbar(strehl, mean_f, yerr=std_f, fmt="none", elinewidth=2, ecolor='orange')
plt.legend(handles=[h0, hr,hs,hp,hf], loc=1)
plt.xlabel('input Strehl ratio', FontSize='large')
plt.ylabel('RMS error (deg)', FontSize='large')
plt.title('Psuedo-inverse solutions, no noise',FontSize='large')
plt.xlim((0.84,.99))
plt.xticks(0.85 + .02*np.arange(8))
plt.ylim((-2,35.))
if True:
dir = '/Users/rfrazin/docs/Papers/JOSAA/pyramid/figs/'
fn = dir + 'pinv_solutions-HiStrehl.eps'
plt.savefig(fn)
plt.figure(34)
plt.clf()
h0, = plt.plot(strehl, sigma*180/np.pi, 'k-', lw=4, label='no gain')
hs, = plt.plot(strehl, mean_s, ':', color='r', lw=3, label='NormalizedSlope')
plt.errorbar(strehl, mean_s, yerr=std_s, fmt="none", elinewidth=3, ecolor='r')
hp, = plt.plot(strehl, mean_p, 'm-', lw=2, label='FourImages')
plt.errorbar(strehl, mean_p, yerr=std_p, fmt="none", elinewidth=4, ecolor='m')
hr, = plt.plot(strehl, mean_r, 'b-.', lw=2, label='UnnormalizedSlope')
plt.errorbar(strehl, mean_r, yerr=std_r, fmt="none", elinewidth=2, ecolor='b')
hf, = plt.plot(strehl, mean_f, '--', color='orange', lw=3, label='AllPixels')
plt.errorbar(strehl, mean_f, yerr=std_f, fmt="none", elinewidth=2, ecolor='orange')
plt.legend(handles=[h0, hr,hs,hp,hf], loc=1)
plt.xlabel('input Strehl ratio', FontSize='large')
plt.ylabel('RMS error (deg)', FontSize='large')
plt.title('Psuedo-inverse solutions, no noise',FontSize='large')
plt.xlim((.09,.851))
plt.ylim((0,140.))
if False:
dir = '/Users/rfrazin/docs/Papers/JOSAA/pyramid/figs/'
fn = dir + 'pinv_solutions-LoStrehl.eps'
plt.savefig(fn)
#%%
plt.figure(10)
a = np.linspace(0, p.nc-2, p.nc-1).astype('int')
hf, = plt.plot(a, sf[0:-1], '--', color='orange', lw=3, label='AllPixels')
hp, = plt.plot(a, sp[0:-1], 'm-', lw=2,label='FourImages')
plt.plot([796],[0], 'ko', markeredgecolor='k', markerfacecolor='w',markersize=8,markeredgewidth=2)
hs, = plt.plot(a, ss[0:-1], 'r:', lw=3, label='NormalizedSlope')
plt.plot([796],[0], 'o', markeredgecolor='r', markerfacecolor='r',markersize=5,markeredgewidth=2)
hr, = plt.plot(a, sr[0:-1], 'b-.', lw=2, label='UnnormalizedSlope')
plt.plot([796],[0], '.', markeredgecolor='b', markerfacecolor='b',markersize=2,markeredgewidth=2)
plt.xlabel('index', FontSize='large')
plt.ylabel('normalized singular value', FontSize='large')
plt.title('Intensity Jacobian Singular Values', FontSize='large')
plt.ylim((-.01,1.))
plt.yticks(([0,.05, .1, .15, .2, .3, .4, .6, .8, 1.]))
plt.legend(handles=[hr,hs,hp,hf], loc=1)
if True:
dir = '/Users/rfrazin/docs/Papers/JOSAA/pyramid/figs/'
fn = dir + 'singular_values.eps'
plt.savefig(fn)
| [
"polygraph911@gmail.com"
] | polygraph911@gmail.com |
2736cd03881b87e222ecd21b6b92c7e5445f98f5 | 31d5bebb9667b17a17fe98a5c3064cac5a0fd4dd | /calisanProfil/urls.py | 219a0406652a00ff45e0ff330c77ec07ab045d24 | [] | no_license | refik/audio | d6b8829fafcfa2c54e6f477ceede210a2f5d4f41 | 011b7b0f01d14529b91bf6f4d3c5919823e19e6b | refs/heads/master | 2021-01-17T17:06:06.202561 | 2015-09-17T05:38:22 | 2015-09-17T05:38:22 | 1,948,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from django.conf.urls.defaults import *
urlpatterns = patterns('audio.calisanProfil.views',
(r'^musteri-temsilcisi/', 'temsilci'),
)
| [
"refik.rfk@gmail.com"
] | refik.rfk@gmail.com |
0fe9c746bbc3125b3782ee930a2c84333c287b39 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/Summer20UL17/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8_cff.py | 554733df78bf4f09d59066ea4d14db9921190388 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 105,012 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/040F76C0-87F9-1E40-AF31-3839718CE4FA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/0ED5D3A8-76D6-834D-9D57-11AE93630524.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/104E86BB-B4B5-A440-9900-9CEDC0E4462C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/169B7151-D553-294C-B6FC-A61CBC65E981.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/17F1FBF2-CEB1-D046-A12F-2E656895323A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/1A6EF3DD-96B3-D74F-926F-D13178E2BAA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/20158F70-DF2A-BA4F-9AD4-B293D000B25C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/29BCEAE2-A276-A644-A311-4C7B36915430.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/2E3EFFAB-C362-4B42-BC6D-B5B5FD3FA319.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/2F3018CB-5898-0846-9A6D-CD37A7639B4E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/33FA73F1-2420-B94F-BD40-717F23AB713E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/34A39E04-F0E4-A548-9DFF-5528F8882B96.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/37C16094-B604-F34B-BFC0-021DD99CF58A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/39314427-5F22-394C-A4BA-ABDE88A80F64.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/3C168CF8-96F0-BF49-9BF9-846DA1A717F9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/3E7712CF-6072-EB4D-87D4-E66B19A62D7B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/44F8C83B-C4A2-9841-8146-2375B18FDC64.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/46FAC75C-262E-234F-8165-F035016EEF34.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/4ADED794-FD71-E849-9B1F-E84F9EAA192E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/4CEDDD7C-0A0E-6043-BBB6-10ACF6092138.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/4D533C05-E4FA-EF44-8887-79732CA99DD8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/4E7C6A46-B7BD-AD40-835F-20A03A3CFD3E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/52663E9C-0CDC-2449-BC02-762868681005.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/543FAAE3-8C2C-CE42-BF60-DFC2B41135D7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/54AD8154-7315-3145-B13D-8A08C9DFDE01.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/5E3D50D1-3AA2-9D4F-AEE7-F296C15C78A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/6173FC89-EF0E-424B-90CF-DD683F164A83.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/64068C1C-3644-8149-ABA5-83FE60FDAA5C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/645F42A6-BB7B-5444-8505-F3F9E55C23DD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/6663F93E-7CAF-0F4D-85A0-E7382074942B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/6B3C235F-193D-8C4B-82B9-72BEBB7C8E74.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/6E5EE289-F0F6-9B40-9197-DCFBF36E114F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/716CCBE8-72BE-DB45-80AF-251FF614A1F8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/73247DD8-2F4B-064B-92E5-F65B1B234C09.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/7D422F0D-CA1F-E24F-BD59-FABA861FB6A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/81F70542-41B0-014D-B077-049CE4BE3804.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/943A82EC-7B16-8747-AE44-5C42207BC8D1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/9B54A8B8-C366-284A-BEE4-0B4F13DB6640.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/9E098250-B3F0-B34F-B229-59E6C02DE706.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/9F562254-F439-284C-BEE0-4AAF92F2282F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/9F73D23D-BCC9-0144-A87F-EEBD4820B8D6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/9FD2B99B-1A10-B944-9B8D-F3712E34A989.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/B0606CC2-EE18-114A-B5FA-B34D7339B7BD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/B0646D47-3D12-FE48-8160-5676096E3FEE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/B7FEA6F8-47DD-D64A-B551-9FD590B18AEE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/B8BFE5FB-1A8F-9F40-85D1-149212DC7179.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/C057013A-EE37-2C4A-B869-BE0F72BF25F2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/C38C148A-F608-4B43-BB32-1D4C99D237E3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/C44D29C2-87EB-DF4B-97CA-C0B79B10494B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/C786572C-6711-8541-8E66-FB4AE5F7D286.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/CB1C829B-D783-0442-9CED-1F6631C0FD18.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/D2616535-6FB7-544B-A3C5-9BE5BBA010B6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/D4561D48-4A88-2E48-83F9-98CAC8761578.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/D8151204-D88F-CC48-9E1B-0BC197294FA3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/D91BA312-2BCB-744D-9F39-BD59DFB1F3C4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/DDCCCC66-401B-4D4C-BF8D-538E5C416240.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/DF79181F-6FF4-554F-8BF9-0C2C8061C9F9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/E3211257-9E34-3448-A29E-DB93425435D0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/E3D8D243-29B4-104E-942F-3945370859B9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/F5F20E96-B3AB-C749-BEFB-D5165198BD38.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/F62B67C4-8AE2-1745-930D-7D7D2C10E31F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2430000/F976AE76-E95C-9840-9EA3-09EBC715FF99.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/00777B57-81C2-4D45-BBBC-DC66CB13205C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/00C74ACE-A715-2D44-B290-955D8DB034CB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/029DEC73-933B-2A4B-AA7B-4454475F6065.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/042EF133-E994-2042-9581-E8F292D4E693.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/047EE3D3-8D9C-1843-BA16-1F2848C97630.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/05196994-4E58-134B-94FD-8A59FDB6E08B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/07600D68-2C77-E246-ACFC-6F8FEF4E1427.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0A58321F-A7FE-9940-B0F0-BFBF7B4B206F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0CCD1841-7EBE-A442-B5CC-1170A7167B10.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0CD86BC1-E37F-2B43-99A5-C4899386C5C9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0CE6A61F-C334-A04A-AD05-69FB574F54D2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0D367BF0-846B-3144-B664-22F52562E3A5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0F0977FE-63D3-2045-9797-63255D22B534.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/0FA5F865-303E-004B-9E63-5F20EC317022.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/118B8A9F-64D5-D34B-A4B7-EEDDD299FF0F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/13694B20-2734-BD4E-A250-889BF0783246.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/138AC7AE-7FDE-2645-8B64-462D4F44C76F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1496EB1E-5BDA-864A-8F48-52F4A83951F5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/14E9B5A4-82FF-D944-A553-91B67E4A5CBE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/150B3888-30BE-DC4E-8A7C-802DEE569E31.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1872EF53-7DBD-C543-AD01-12005F7B2185.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/188125E4-FF59-CF40-A4CA-B8808D429D21.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1B956203-923F-B54F-A604-DF3941A327CF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1C384746-185C-D64D-A80F-23EFA3AEAE3C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1C5C319F-A2CD-B941-A897-E83550ECBA8B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1D82469C-BC77-3D4F-B878-EFFD7691D574.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1E73EA4C-F196-DF4C-8EAB-756F94441BEB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/1F40E503-923F-9542-BBF4-5164C1D17F5F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/20377682-6C0A-A042-BE0C-868C876DDB84.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2054F4C0-30DC-CD4B-9924-C35473D92413.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/217E51A2-4774-4A48-AB02-13FE41026084.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/21C7EF48-46FF-5F49-837A-A3D708BC17EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2253EEEA-98AE-7A4B-B6E3-EFC1EC009844.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/236C4A5A-32F8-1A46-BD79-727223E10DC9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/23EFCD49-94F2-7D40-90AE-FC481386888A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/25D7368A-9F2B-794B-932A-CED5EA6D783F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/26949464-3F9B-6147-865A-99202ED63217.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/27AAF8AC-0FE4-6C4F-9E87-AA943C67C33C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/28547707-F9B9-8F44-A3B9-64E56D962879.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/29B93AA2-AC7D-134E-AB56-65D31599A5AF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2A01401E-9E21-7A4F-8D0B-2B77BBD8F623.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2C325FB6-F0FC-814E-A3B5-9E917B22B7F9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2C4D7DC4-9011-9545-B411-BEF6BBC7D444.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2D779E68-18A7-3043-A4B1-7D6C2A27EA05.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/2E4D91B0-CBB5-F449-9B18-00185293EAC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/301C771F-C079-7B41-B6BC-6CE2B72D48E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/30350157-53A6-E542-8300-9190A499D14E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/303CB3E5-4DCA-8446-9793-1D8676709ACF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/30968F79-4C3B-AA45-8596-F144C6988052.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/32A088F9-9769-8B45-831E-27E839133333.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/347D95F0-31D1-FC4F-98BF-F6C421D8A317.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/36A6DF53-EE63-0248-AD6E-A48497621D31.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/38926556-9AEF-474C-A1AF-9A17D099D9C2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/3E41755F-03BC-1046-8569-E1C26C74EC40.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/3F2A3ECE-657F-8A4C-B3EE-08EAE92FFC9A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/3F7AD088-E8F3-8B43-889C-355038DD99E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4078FD40-487B-DB40-9DD2-CB5C47157624.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/409E4B19-0090-4B41-A8FC-20AA80666423.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4100F425-1303-8344-AD7E-86C25ECE34AB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4140939B-1857-F947-ACFB-5BF9ABF594AB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/44AD1EA1-D2AE-834F-9DEC-F6D782DD962E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/467F3890-B4D2-6F41-8285-B31F70D5E955.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/473B6D39-9340-714A-AF88-AAB81D130444.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/493A474C-2429-4146-BB04-2A12FF6AA7B4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4D1FA4DB-6045-FC48-8645-593F7A1E31D4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4E7C714F-B9D3-9949-BC70-9969A126A437.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4E815F0C-EE82-E842-9992-6BA1549A204B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4E8A1EC0-6230-C14E-9F04-EDD03BBDD669.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/4E9BC393-E042-B040-BFD7-4CD0C12E4D20.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/50BCFA54-FFF5-D640-8A78-F081624A63EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/52D9D3D5-0E65-8B4C-BBBC-39AC8E771464.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/540AF028-C653-474D-AFAB-A00752162AF9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5850C4E9-6A60-A641-ABE4-E0585CE77FAC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/58A8BBA9-8397-C74D-BFF0-4710743CB416.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/58B6325E-4030-9D4A-9B0D-F1756BD74B87.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5914FC48-8226-2F42-A004-1F6870DA7084.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/59191BDD-BD28-B445-BD04-0061ED5912F7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5A65EA71-B4BE-F446-84E4-0B777716BBF1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5B2D2379-BC9C-3349-B51E-83C6E0BF763C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5D476D0E-AFE0-7B43-A12F-80943C4CA1AF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5F6BAD25-EBCF-D14F-9F8E-155F697532E6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/5FF9033B-F9B7-DF45-AD9F-96B40F4AFE24.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/603A9239-242C-A14A-822A-2BA55A1E119B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/630748BD-6677-0B4C-91AD-ACA0DE286CDF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/63175522-C9DC-384A-B28D-ABA6F6DA3FE3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/63768F97-657D-E544-9D94-F26A97C4B8C4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/656EF9F6-364C-9F4A-9AF7-DB2F7BFB58E0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/657A6F28-7626-3B4A-9729-35DC5B495113.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/660A400D-E7FF-F140-A6AC-523B44144647.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/66B72500-28E3-F146-B70D-D92AD6777C85.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/677B2B97-F067-2047-9AD1-88DAD65C8344.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6883B711-4C3A-C444-9045-9FB8D8C6C1FA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6901DF07-5E7B-644F-B569-E8EBD0D9A514.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6945D3FE-E708-5F42-A6CC-FD4106E83A4F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6C08B1DF-9F2D-7D45-8070-106A106A31C1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6CDE63A0-7240-9F49-B7EB-054451468AC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6D525D4E-9655-0945-AD4C-B590910D8D79.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6DCA647B-49C2-7648-9B4D-DFBB94955F40.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6DF6E314-3066-B348-8617-399858EEBB55.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6E47A340-692A-0142-B016-B5A546733A31.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/6F7099A7-5839-AA42-9555-5ADB85166F53.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/70174527-7019-A04E-82C1-111A6E8376EC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/730A10E2-0FF7-BA4B-A4F3-CC03FF4D8552.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7321B551-1AEC-BA4C-9C85-B8866151E187.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7369D6B0-7F37-C440-AF74-7E83BE2AAC52.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/73893747-4CD5-3444-9F52-5E991645AF69.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/748828EC-C328-E345-8304-F9B07F1A30C5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/76DB2C7D-609A-DA4E-A3BC-791EC58DC2EC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7A734FE7-992B-9A45-8270-CA5D25453153.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7B8432C3-CFBB-084D-ABD8-B490B8BDB458.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7C34F013-233F-B74E-BD0A-82E37290078D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/7CAD78A4-0E90-AD4C-A915-D90704C310AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8016FE8B-02E4-6B46-849F-52C3E3831F07.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/80A5DF73-B8D0-F942-84E3-8CA42423526E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/815E8E47-4259-1E45-861E-0129D94328B8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8181B93D-4C5B-1149-8B56-CA475E94F8FE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/82C1426D-9FD4-F148-BD26-D211981D0321.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/83595795-6C5E-0648-9529-BA841DD40E2A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8AE4C08C-7E77-334E-A757-78F8EC68C993.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8BD286EE-55B7-DC40-82D9-2B9CC5746262.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8C096C9F-EFE5-5B4F-8684-66D0AF896840.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/8E54D8A6-36A3-2940-B5B1-D2D8B11BFD44.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/913EB983-AD4D-2543-9862-559CBF6D393C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/91858A48-D4F0-224A-9B02-9F4AD031ABCB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9269975D-E47C-E842-AE09-27F761955122.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/932005BA-22BB-F64B-BEA7-BE25E9597FBE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9571BCA1-38E9-AA4A-ACD2-6108F90AAE9F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/97B2B0DE-D228-174D-B372-CF64FDA0D119.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9BBBC218-2BF8-554B-B285-E117DD7A42E1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9C4F84E3-F633-5549-AA05-211F5A147D4F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9DB7913C-2BD5-7D46-BDB6-6A10350AF781.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9E8E7394-6AFA-0846-AE2B-DF2B90D4C490.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/9E924120-35AC-EE44-9BF3-2B75250B863F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A00995CB-BDDE-4543-BA66-0FF163787686.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A054F21E-756F-B94E-B329-2D3EF5880AFB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A1887F2D-2BA2-2643-9A46-110687978AB0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A3293DA1-54E6-6342-A912-2817A2795D58.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A3EBE262-065B-C449-9F50-BFCBAE5203ED.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A422A9AF-D765-BC4B-B2B2-5F3457B77743.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A4C74C16-BFAD-0E4E-9FBF-14A7A07C17A5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/A68D0205-9C36-FE41-8BA2-0CAE5E5E4B71.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/AA157633-EC08-604D-A9CA-60E80718CA97.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/AA6BA623-FA6B-6A4E-8887-9A35EBC45A34.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/AC224873-2AAF-2B4A-B101-638F74F13AD1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/AC5AEAA0-3BB3-A942-9C77-862E75404AD2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B07D9AFE-26A1-EC4C-9757-542A49D28EDC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B0AF0C65-9E94-9645-A09D-6806730E3B92.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B0B689DB-8D3D-7740-89CC-60E6E3A1B21E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B12D9EBF-4626-E04F-A907-DF3AF675F37F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B36AD251-69A8-3840-9485-5DB32E878778.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B42422B4-CAF8-5542-B975-21C342831460.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/B91D50CD-29BE-DB48-8688-2E44E4781822.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BA915018-4BBC-6344-B6ED-500168508D09.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BB087B68-BC41-564B-937F-56D5C31D8F06.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BC50CD01-3D2E-CA40-B2E4-D3B2631555DA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BC7DB403-843B-3749-8C57-5E02A6AABF1C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BCF598D1-4A70-1643-9419-3889EEEC1D13.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BD1A1455-3AD4-2543-B447-209C65DF60F1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BE6B1817-42B7-C844-AFF6-A2A3BA55C9F3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/BF0AB3EE-3055-6845-8D43-7E2E9F5010F5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C255AE8D-B549-A742-B9E2-3A1B3FD774F0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C2651212-9D07-1945-B9E6-37D57B7BC8C5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C27E1A28-7E5B-B04F-9C46-7351239AE26D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C32C49A7-7157-3D46-B8B6-2D13C91E9A39.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C36E9FA8-B17B-B74A-B136-6E20AA18BF74.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C5AB0CBE-1900-DD45-A2AB-300C20E7D68C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C5C8B096-061D-9445-9B0E-EA210E1E0C0C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C66B00CC-593E-604F-875E-A5DDB0281772.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C679F22C-6826-D847-B78F-3378FFDC3F5D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/C8CFCA4C-10AF-984C-9BAB-6602DF98F4CE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CAFA98B8-9DB6-7C49-94DF-9D4B8A9CFEF6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CB081021-1483-1544-80AA-0ECBB0035DF6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CBB986AC-E0A7-3247-A78B-352EE8597431.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CC2BE306-7B87-A64D-AE1C-C00E52EBEF9F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CC85DBB2-DD3B-7147-BE42-8AF22A8B5708.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/CFF969A0-9E4A-9B46-AE9C-2E8FC182356C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/D2B26CB8-F23C-444A-8CBA-B8CEF4F39293.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/D3B19CEA-7280-BE42-AB91-26B9F03FAA8D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/D4FD49B4-A4BE-BD44-A371-1CE0A869C34D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/D5A93A11-11E9-9B45-809A-42521078D7AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/D8EF8F5F-3E25-4F4E-A9B5-B16E3FCB4E9F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DB38C316-013B-B142-A4AB-D23AA834581B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DB978157-77AC-B046-8508-2793B54792EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DBD2E541-4D63-D94D-961A-6C9849CF0093.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DBECB541-787A-E54A-9A10-EFD68B00EAE4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DC71ED02-9857-3B41-B6C6-241C65AD3B5E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DCC2F861-2B3C-AB46-A907-AAEF041A0354.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DCD15F46-4252-604D-A503-90F6B8BD9B7A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DCED4A3A-24EC-5345-B18E-DF4A0EEF5AC4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DD684B78-5DB6-CF44-86BB-65C7B210A51A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DDD69870-35F1-E149-B14A-4522C9FB01A8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DE800EF6-CDEA-B44C-A3A8-2BE2415344EC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DF4FBE57-94D4-F54A-873C-80CFF8594ADC.root',
] )
readFiles.extend( [
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DF5CAC58-ACF0-E84B-8FE9-016F0F6C832A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DF6D18B3-8156-1049-B293-94D3AEB405C4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DFCCFFF1-88E9-1943-BFCC-7275E0331484.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/DFFBCEFF-D35D-D447-936D-6842EEF781FB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E1DD9338-9B03-8240-852D-7481CEA7CFB5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E36CF180-85D6-9D45-AB3A-5242746091B4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E3FC0EC0-EEA9-DA48-BDE0-8BAEA5766FD4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E43EC89A-0667-1744-901A-218E164C0BD6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E6E1D413-D403-D443-BEA1-14A7692C1020.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/E9FE08D4-542C-824D-9A9C-5DF0C7A349FF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/ED487309-DE19-7F47-A752-4533439BD2C5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/EDE1BE0C-E3CE-7A41-97E4-7E275A50655E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/EDE5F7CB-72EC-1543-B6E8-49542DF59F93.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/EE476D5D-4894-514D-B8F7-8B861873316F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/EE4B7ECE-17A2-3E4A-97B6-739056B7ACC3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/EE6A5BCA-DE5E-AE4C-BDF5-6920A3643D8B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F0980C96-C55A-6446-A15F-C76B41B663A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F0D0F5A4-B723-7F4B-86B1-30BBDE5BB961.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F10BE888-DDC0-C345-9FE1-D8B4EC550C01.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F24FB930-872E-AA47-B380-2F2E97E86EB8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F2D05199-1AB3-B74B-8F8E-CB7F9F12AF76.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F4A2597D-BA5A-0540-B8BE-F42BFACD41E7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F7E242C9-A334-F745-A27D-83C097097377.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/F974938D-1D61-B041-BE1A-AF781C4F3988.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FA617A96-8E3F-A44F-8B24-589D3B603486.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FACDD593-B8E0-2D4D-A033-D20DF72E0FC4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FBE301F7-7320-1640-B60B-B9AF11F098C6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FC01AAF4-C620-5D42-AE67-84B6BD99EC19.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FC7E7C44-BF7D-184A-A2CC-9938CD34A848.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FD9BA385-76A0-4846-ADFB-4756287282FE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FEC364E5-55EC-A446-9F70-51BCA250A75B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/2530000/FFEBEA04-69CA-004B-A6E1-A966999B7E9D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/015D2D67-DB95-DB4B-B9C7-52E124208AFE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/190B02DC-C207-4E4B-8D72-5181673C11FA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/1C494398-6963-B942-846B-E6AB14C90046.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/26658BF2-5863-3740-B26D-4C0B08684982.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/29A19B48-E90C-CC43-9441-298767DBEEA4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/37263419-6E54-6C47-9505-4CD485603CEF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/4CFC154D-A3E6-A745-93E2-AF8E6CDEEE6F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/52D50C21-0DFD-BF45-A0B0-B1107F34D541.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/546167FB-AC19-CC4B-9EC1-F7B006EB0890.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/56B0C283-3F35-E443-A07B-E262C2DE4BDD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/5D7C59F8-EADC-8446-930E-ABD916915004.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/8DDDE812-094C-0A46-AE18-FA8228CDF520.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/97A1D404-28EB-1148-94C5-7FF74B3EE847.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/98B20C9C-3EE9-4943-9686-47D85630328B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/ADA95267-DC42-0144-8E3E-E3A5A997F354.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/AE92704B-C447-4C44-B2E9-D59E16B45AD8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/BA2EC183-F9C6-BA47-998F-D8CDBCC443CF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/BCBE3B75-D824-084D-B996-557B2DB8F1B4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/D671A052-F742-E444-BF22-BF8F8103006A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/DCDAB40E-952A-4743-994C-B3B0F59CD098.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/ECCFDA7B-9649-4A49-8043-32BDD5493154.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/EE3C5F93-0552-EA4E-A694-9AAFCA6BB5A3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/F1FE5CF5-ED94-8D45-9EFB-6725C1A28126.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/25310000/F22C3E97-FFF9-4348-9AD1-25D8D5B08DEB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/00740E9A-8BA7-484C-A0D4-FF86E01EEFAB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/0161D154-B4D0-5F48-B902-0AE570CC2DAC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/0B0912CB-72EB-6E4D-A6FE-68F241CCBA71.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/0BCA6920-2CBD-0441-AF2A-69884804D521.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/0D1A9BAA-AD22-B243-970D-DE6E5BA9DC41.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/0D988CAF-C1EF-3646-8D0A-8876BFCF4FC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/111C953D-02FB-5B48-BE40-BD604D911B04.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/11682C0D-C993-2047-B87F-63A4E2646499.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/150B096D-1E45-8C49-9E16-86E643BC9A7B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/18C5CDD7-7570-E141-9807-7BE8BA018B7B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/1CEA1288-7E80-684E-BDBC-2A3BDB69D553.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/1E2F6545-93DB-C142-8480-640AD39F09B1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/1E5B4168-69A5-C44F-98CF-F92E246AEED1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/1ECFCAE5-2C1B-144B-BB4B-7A913CCAF500.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/1FAFC742-EED5-B043-957E-69151D17068C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/201C51A3-D79D-C74E-A106-0A65469B863E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/2040FD3F-6A17-4949-ABB9-61DFDAB8737E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/23438FEE-970D-D349-9817-098CA28964F2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/23B4FA32-BF44-1044-8C6B-C7BEACE56216.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/270BAE0E-589E-514B-82DA-33D2016D38D4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/27B38C51-94F7-444B-AAE7-5505214055BA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/2B1BDEFD-03E6-8B48-AEE3-23E56644F52E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/2C09245D-E719-A844-8363-4F8A1D7516AE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/2DE808EC-BEF2-DB4F-81CB-96EBAA5B3EC6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/39317E16-5FC7-3647-ACC5-FC74B734810D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/3AC09085-3618-1C4E-8E25-FAE720990F3C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/3C72D0C2-5734-D745-9EDA-D3F165A43BAA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/3CB21D18-8689-B647-83B2-FE387CEA713A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/3FD63C13-3AEB-9742-B101-EE4E9D56AEE7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/43B2FB16-C73E-1640-8A66-85F7E4FDAA98.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/477DB1EE-77B4-0149-A974-7DC3EBA5BAE5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/4B43C2B9-3B9C-1344-B0F3-A3BBFB14488D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/4D846B54-EBF9-744B-9B72-2451F6D529AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/51494C26-D3AC-1848-B076-298BCA6ACB8E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/51BCE606-959A-A24C-B622-AEC39CBAB5DC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/59F9BDB8-0587-864C-922C-4C332BC1F7B9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/5A12621D-31C9-E042-9039-0538D941D476.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/5A6018B8-A63F-6846-8B67-3786AC748981.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/5AA76158-A842-B346-B00B-63B841080D54.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/5ED16D3D-3FC1-864E-998C-1924C5F5E08C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6271C35B-30F1-E644-B281-FB1876C632E7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/62E5E210-E7A1-BA4A-BB5A-853BF9F6AFE6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/645FBC5E-F83F-4C4A-AA89-AC550ACCB9C0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/66115F82-6336-204D-B4F4-28119BC6EC0B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/666805FC-2772-CD47-8AF1-27770B2F7FC1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/69187B03-3343-F846-A791-F2C00D533B6E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6A5022EB-A38F-F94A-97F9-7491EE238E01.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6C564911-A235-254D-9122-14A6A65758B4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6CFD655D-0B98-4C4F-B92A-CE21534AA5E6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6D1783E8-E980-6844-9D7C-B2E445B1A44F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6D9C072A-5D50-004F-9BC8-B949A8EDFC6B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/6F301DAF-90B4-D149-A0E8-896E68C28AA9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/727319D4-0ABC-3E40-8F24-BCEF44BACAE5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/73A53631-14C2-D542-863B-74D9752F810C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/74C1D45C-DEB0-4A4C-B712-204E68AC0635.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/75915C8A-0B87-8745-8AB0-704691127803.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/76D44B34-98A1-B74B-B308-1F44CFF9906B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/7B758ECE-1CFD-A34D-9784-13E8FF1A0581.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/7F3CC92B-2F07-484C-894D-58DF2E0A833C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/83B30849-ECA4-BD4E-AF8E-A7564C49F886.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/863497D0-819C-7549-A4AD-32DA2DF49359.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/8B265EE9-717B-C544-980F-056D5E878827.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/8D207266-97BE-DE42-A740-51739F207CAD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/8D97B67D-2FCC-944C-8642-BDFCDC2C5AAD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/8D998915-AA99-384B-814F-53DF381B63AD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/8E31A25B-1959-A14B-A93B-2626004C48B6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/9070F146-796D-B343-AE70-666F5688F8A3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/91F37FB2-159D-6840-8A90-C486D9BEFB7C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/9454403E-3128-D54A-9799-4CA0D88CD3E5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/95FB9626-980F-144E-A9CA-68DF9140B843.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/9700E9DB-8F15-8346-AF8D-65FF3B81967E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/99120665-72E5-B243-B51C-9757E49DE3B0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/9C8B4769-6DC6-9840-BBD1-D9ADA445F200.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/9D15E50A-12E9-C540-A97E-0CF9D06DB205.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/A48CB09B-6C7F-4646-A353-249D80B106E3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/A527FC42-3039-AE45-8B75-FB3C57FA218A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/A7EB113F-98A5-694A-802E-E60D359B97D1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/A84E717D-CAEB-8847-AC4E-60574936D16B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/A87C29FF-C57F-A343-9E7E-157D07DA466D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/AAB88C15-B0C5-464A-8B6D-9D493A77AB2F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/ABE33CFC-34E2-4547-B1F3-5F76FFF265BE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B1724A97-BFFA-C348-96D9-4B7846984F0E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B26F1F1E-1C0F-B441-BE6D-2370F2A076E7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B3E5071B-EF8A-C649-8631-7499ECA2B401.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B58486B8-71A4-D840-9666-796E8F6862FE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B5D7799D-0AEE-9846-9DF7-B5F5945DA332.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B779845F-AAB2-4947-BCAD-9A05173498C6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B86D9958-F78F-7D47-A3BD-F2B5499E2C69.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B9648D77-F101-1344-96C2-174877651376.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/B9CFD511-A1F5-604F-AAD1-EC02D3DAAABF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/BB3E06DA-8135-884E-9696-69FFFA99078E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/BE3A8576-2C80-CD4B-AAA7-CA3A77652079.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C2DA763E-98A9-6044-B832-BCC50B97D3FA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C33DF600-0728-7F45-B017-B55727DCFF92.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C3765AEF-05BC-BA43-A994-C0F3C90B6C41.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C554B73E-3BC0-1F4A-9139-D9864E88F744.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C647F26B-54CC-AE4D-9313-04837AB13A3B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C67F7B3C-0B07-0D46-A70E-F96E3183FFBF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C92BACB5-26A9-BF44-8FB7-0F0A2BE0D696.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/C9F26C3D-CD7E-F14A-8150-D2920AB85462.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/CA3654A7-07F3-774D-AC38-8673D2961481.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/CB4A76B1-1962-514E-91FB-5077230CBD6F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/CE714D4E-EBDD-314D-BED9-1B31B810B315.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/CFBA3BD4-42D3-804A-8D0B-D8D83B4D740F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/CFF5C01D-6448-2D45-B3ED-DAD2AA8C97F0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D2051375-B0F7-0645-84ED-79DEF894D9D1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D3A6847E-9372-6A43-9C39-D453102974B0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D46E0B1F-8A95-3448-A2E3-F6408F88A6D7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D613D5C6-1205-DF41-9E35-57B691B051CC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D691450B-6947-CD47-9DD3-2DB402AB51FF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D70B18A4-40F4-9648-B0B2-0C7D44027C53.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D7ED8FF1-F45D-3B4B-9B9B-ABD1E91E8C18.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/D9564D5A-3A56-3F44-9762-F68091A92DA4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/DD5D4C75-43A5-8A48-B153-BBD06CD17AD8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/DE473EB5-9EC7-6F4D-A181-44C8EFC374B3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/DEE650CC-9296-7743-847E-9BDF78D597A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/E1C86D1E-01D7-814B-879B-7794C7CF2EF7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/E1E06E86-E35B-9E4D-A042-C3DBB15A0B63.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/E9551DD6-0AAF-1B4F-9A62-D02F0A6083AE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/E9B09FB5-7C40-8E4B-8C33-B965D166C83E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/EBD3EEA4-308A-234E-81C3-712EB672CB76.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/EC3E4F45-57C7-644D-AF86-647F53A629BB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/ECAFE7B1-DD96-F144-A978-BB8697D44285.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/ECD42A40-6C6E-DE4D-9108-4F4001C7169C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/ED847D37-8300-FC4B-B438-505E7CCA6E53.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/EF17F264-BE9B-1F44-BF26-47C6611D0960.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F1BAF12A-9A5A-AA4B-9C47-C02DFDE28A2F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F270FA26-52AD-1944-878F-C129305D475F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F5442211-FE04-2049-822C-3E6764A210BD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F5BEA595-F4E8-5040-AA06-8362E08AD004.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F6009DF0-02CF-9040-B0DF-C7BE58696855.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/F77C29C4-33F5-DB44-AE70-6FEF217F5098.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/FA51E14F-705E-FE41-B213-402616DAF0E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/FA6E521E-38EC-E84D-A6A2-E9958E0624C2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/FC4BFA0C-1799-3F44-9DAD-76D15EB59912.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/40000/FC887885-0F30-534C-89FD-1657891C36D3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/002D4B3D-C7F2-2B4F-9034-FF97937139B9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/01F1F9C3-7A93-8645-BD96-EA968C632EA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/029C34F8-3D3F-1A43-9FB2-D93D3CDB0ED8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/0387D222-84DA-374C-BA40-A059740A42D6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/0498BA3D-B69F-6344-ADF5-A3A2A4B018A6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/0782DE15-D7C7-2341-8D0D-CAFC19777513.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/0866115E-2911-204D-9727-A22AEDF7BC60.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/09AB6FB8-2460-4748-B434-4A53D642F5BB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/149600D5-0583-C248-8CB6-FC83C14291A0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/183CC50D-827F-A446-AA85-7BD5099F0AA0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/18A37D02-5305-2546-9D6A-6077FD0A61B3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/1DC8B9CA-C76B-6146-AC55-D828F9D3E649.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/1E445232-BBB3-0D48-BBAB-52A8B94E2CCA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/27BAC5BD-0B1C-174F-BCCD-11628C3E3684.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/2AA6BA7B-D8D6-9144-A725-9774C67D4F3D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/2B3B691F-5605-A042-BF3E-9EBA58FF961C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/2DB0C2CC-616D-A840-A116-DA1C3179291E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/2FBFDD3D-BC8E-1E4A-BD31-220003966D81.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/33556457-C263-CB47-98EB-63B6ED8631DE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/3414B9AB-03CC-5F40-B249-5C6D1A2BF1BB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/3BDF6440-D74E-F644-8FFB-9FB223A41E73.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/3C27867F-56D4-3740-BE71-9F5E64C1B36A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/3E32040F-7EFA-4541-A44F-93A553083BF8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/436AA578-58CB-C342-8D37-50F3EB02CB4E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4449D2D9-412C-6A4C-8F22-94A5EE227F33.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/476A394F-1E0B-B74A-B5A3-F6F304542449.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/47E3965D-5403-264C-B08F-A457CF8B8E0F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4864425B-A42B-694D-9965-CCFC448D548E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/48AB27E3-0456-FF45-B905-B31AF06F2755.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4A5B93D3-8721-8043-A1D0-422E7E113D15.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4C03BB5B-1C90-A84E-B4A7-EFC7E080B44D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4D392416-0BA7-EC4B-853A-A553B0B94CEB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/4E06634D-E6F3-A349-A88B-5B3DD6330328.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/518BF959-1B26-4A4D-A655-C639F52588F5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/52BECA74-432F-514B-98FE-86CC9E64C41F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/58B1F9B2-8A32-B742-B9F8-055D30325BCC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/58E32EEF-D979-F74C-8881-26DC6686FE81.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/5B77D5BC-500E-684A-B372-F0C8137EC8AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/5E3DE40E-8DF6-E440-BFF9-032ECE898990.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/6799572D-265E-D94E-A865-FB85AA45430F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/6A57A94C-0E8E-1041-8EB9-91D90E24CF4F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/6C4EA686-81AC-2546-B402-620ED5EFD878.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/727BF05F-1445-D349-A70B-2067A714B486.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/77238886-635D-FB47-A67A-868D3E09AB87.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/7A2D12B1-072C-BC42-AA7E-3440E685B938.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/81724AAC-E114-C049-BC66-958EBD3F2E93.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/818DEDDB-2F8B-3847-B665-D61CC080E26D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/8BC7F343-2A7F-E84E-B48D-9BBA6263647D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/8E445F22-DE00-154A-9F00-537D5C960137.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/929A28EA-D8A6-494F-BF55-AF2C4B7A1679.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/93002E71-837B-E243-8176-38FDEABC6B81.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/94042E3D-7253-5946-98CE-95852FC13109.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/941F8DDF-FBA7-F948-AF98-EC16FB688C2B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/97B8E8DC-CDBC-014C-877F-55532F55320B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/9A5308B5-8FA4-4347-B0DC-78144B057D74.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/9AC9838A-830E-D440-80C3-9D0B4F91C9AE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/9DC72B7C-94DE-0D4B-9078-D4AF239C1379.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/A3B615BF-A504-704B-A032-DF3CE690D525.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/AA08B693-16F9-A841-8CE1-BDF1E445C3B4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/ACC54A2F-4FCE-F541-BDE7-35C11D3647C7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/AD8BFABD-2140-D44C-99B6-45CE7408FD53.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B01B2D68-5B68-6B4E-B0B0-A27F3E3CDB14.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B0750808-F74E-7949-9940-DC4D42857569.root',
] )
readFiles.extend( [
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B1CEDBDD-D5A8-4542-B097-6194E8535B04.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B364313C-47CD-084E-9F13-9456CF1C9EA5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B80E64D9-0647-7A40-9C54-0B39C1021084.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B90C3C5D-665C-0348-B27F-DD55C6E72172.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/B910BE35-2775-224D-9CE9-9787481E3508.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/BC4DF0D3-E259-3749-B1E0-0B4C75D20908.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/BE5B3DF0-F2F4-8F4A-9282-DCB5399F11DA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/C03A467F-9F32-154F-99EE-D2CAEBE53DC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/CB14F6DF-E1A7-B542-B4DE-1A8B68E6E5D8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/CBDBB752-6D3A-C945-900E-B7A65675B917.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/CBE06EA5-B9B9-7D46-A3FE-8A7C89D92CBD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/CF147427-4A05-8545-B69D-342EFD660081.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/D17C9D76-AD69-5B47-8D83-DE921D7104A0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/D47025BC-CD06-C749-88CF-8FED3212BCF5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/D5030B38-D321-484E-9B57-B33BF1FC6EDF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/D5C096A2-435D-864C-9C23-FB87B4AE1EC7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/D7D8FDD3-F69B-2545-A96C-4F93FFDD7AE7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/DAD23E9E-6ADC-2F45-8072-0DEA8ADCEBE9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/E734BC86-A13C-9D4B-A552-9ABC21C2EDA9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/E92B481B-1B83-0648-9F9C-B55511883031.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/EE036D0B-EEA4-F145-AC50-32E355FC9754.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/F1AFB333-9EEC-E249-9511-AAB0E230B06A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/F51D1953-4491-494B-81E0-6600E08B0B1D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/F735D96C-E721-6540-B179-E13A4660739F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/F86185C5-320A-1D42-AF9F-1229F83C1E53.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/FD39B205-B180-5F4A-885F-AC6F14E102BD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/FD827B02-7AC3-6C45-A408-B69C0BD256BE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/FDDA9BB9-DA38-2D46-95F7-5308BA76BED1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v2/50000/FFF16FD3-A17E-F346-B744-5B757955DACF.root',
] )
| [
"hiltbran@umn.edu"
] | hiltbran@umn.edu |
4707f0ba46408fd186e5c23b162be47f50530c0a | 5f7baa2b078e0e983f57b5e7537a1fcb3ea4e081 | /demud/dataset_des.py | 681e1ab377153fecd5dac19cb3bc56679467f1b9 | [
"BSD-2-Clause",
"Apache-2.0"
] | permissive | jakehlee/DEMUD | 82e2d542f47ed875b2aed80b5aeffe4511eb43a2 | 81ac7d1002ae95a3aaf99cf2070f1e022a2ffa81 | refs/heads/master | 2020-12-02T18:00:16.914460 | 2020-06-13T08:00:18 | 2020-06-13T08:00:18 | 96,459,900 | 2 | 0 | Apache-2.0 | 2020-06-12T19:51:42 | 2017-07-06T18:22:37 | Python | UTF-8 | Python | false | false | 16,037 | py | #!/usr/bin/env python
# File: dataset_des.py
# Author: Kiri Wagstaff, 8/28/17
#
# Class for reading in DES data in FITS format.
# DES: Dark Energy Survey
# Collaboration with Tim Eifler and Eric Huff
#
# Copyright 2017, by the California Institute of Technology. ALL
# RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all applicable U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
import os, sys, re
import numpy as np
import math
import matplotlib
matplotlib.use('Agg')
import pylab
from dataset import Dataset
class DESData(Dataset):
def __init__(self, desfilename=None):
"""DESData(desfilename=None)
Read in DES catalog data.
"""
Dataset.__init__(self, desfilename, "DESData", '')
self.readin()
def readin(self):
"""readin()
Read in DES catalog data from FITS or .npy files.
"""
if self.filename.endswith('.fits'):
# Assumes Science Verification data
self.read_SV_fits()
elif self.filename.endswith('.npz'):
# Assumes DES Y3 Gold data
self.read_Y3_2_2_npz()
else:
print('Unrecognized file type: ' + self.filename)
# Read (filtered) DES Y3 2.2 gold data set
def read_Y3_2_2_npz(self):
d = np.load(self.filename)
data = d['data']
# Testing
#data = data[:10,:]
feat_names = list(d['features'])
# Features to use
#self.features = ['lup_r', 'color_g_minus_r',
# 'color_i_minus_r', 'color_z_minus_r']
self.features = ['color_g_minus_r', 'lup_r',
'color_i_minus_r', 'color_z_minus_r',
'T', 'snr'] # add two features
feat_inds = [feat_names.index(f) for f in self.features]
self.data = data[:,feat_inds]
# Trrrrranspose for DEMUD (feat x items)
self.data = self.data.T
# Scale some features as needed
for f in self.features:
if f == 'lup_r':
# Subtract the mean value
mean_lup_r = np.mean(self.data[self.features.index(f),:])
self.data[self.features.index(f),:] -= mean_lup_r
print 'Subtracting mean (%.2f) from %s.' % (mean_lup_r, f)
newf = 'lup_r_minus_mean'
self.features[self.features.index(f)] = newf
f = newf
'''
if 'MAG' in f: # subtract the min
minval = np.min(self.data[self.features.index(f),:])
self.data[self.features.index(f),:] -= minval
print 'Subtracting %f from %s.' % (minval, f)
newf = f + '-sub%.2f' % minval
self.features[self.features.index(f)] = newf
f = newf
'''
print '%s range: ' % f,
print self.data[self.features.index(f),:].min(),
print self.data[self.features.index(f),:].max()
# Also store errors for reporting in explanation plots
self.expl_features = ['color_err_g_minus_r', 'lup_err_r',
'color_err_i_minus_r', 'color_err_z_minus_r',
'T_err']
expl_feat_inds = [feat_names.index(f) for f in self.expl_features]
self.expl_data = data[:,expl_feat_inds]
# Fill zeros errors for the T and snr features until we extract them too
self.expl_data = np.hstack((self.expl_data, np.zeros((len(self.expl_data), 2))))
# Labels
self.labels = ['%s_%.6f_%.6f' % (id, ra, dec) for (id, ra, dec) in
zip(data[:,feat_names.index('coadd_object_id')],
data[:,feat_names.index('ra_x')], # gold
data[:,feat_names.index('dec_x')])] # gold
#self.xvals = np.arange(self.data.shape[0]).reshape(-1,1)
self.xvals = np.arange(self.data.shape[0])
self.features = np.array(self.features)
# Read DES Y3 2.0 gold data set
def read_Y3_2_0_npy(self):
data = np.load(self.filename)
# We want R, G-R, I-R, Z-R
self.data = data[3,:]
self.features = ['MAG_R']
# G-R
self.data = np.vstack([self.data, data[2,:] - data[3,:]])
self.features += ['G-R']
# I-R
self.data = np.vstack([self.data, data[4,:] - data[3,:]])
self.features += ['I-R']
# Z-R
self.data = np.vstack([self.data, data[5,:] - data[3,:]])
self.features += ['Z-R']
# Filter out bogus MAG_R values
# TODO: remove this with new version of data file (already filtered)
keep = self.data[0,:] > -1
self.data = self.data[:,keep]
data = data[:,keep]
print('Removed MAG_R values <= -1.')
# Data is d x n
print self.data.shape
# Scale some features as needed
for f in self.features:
'''
if 'MAG' in f: # subtract the min
minval = np.min(self.data[self.features.index(f),:])
self.data[self.features.index(f),:] -= minval
print 'Subtracting %f from %s.' % (minval, f)
newf = f + '-sub%.2f' % minval
self.features[self.features.index(f)] = newf
f = newf
'''
print '%s range: ' % f,
print self.data[self.features.index(f),:].min(),
print self.data[self.features.index(f),:].max()
# TODO: add/readin id
self.labels = ['None_%.6f_%.6f' % (ra, dec) for (ra, dec) in
zip(data[0,:], data[1,:])]
self.xvals = np.arange(self.data.shape[0]).reshape(-1,1)
self.features = np.array(self.features)
# Read Science Verification data set
def read_SV_fits(self):
import fitsio
subset_photoz_bin = False
if subset_photoz_bin:
# Subset to a single photo-z bin
photoz_bin = 0
Dataset.__init__(self, desfilename, "DESData_colordiff_bin" +
str(photoz_bin), '')
data = fitsio.read(self.filename)
# Mask out the bad objects
SVA1_FLAG_mask = (data['SVA1_FLAG'] == 0)
NGMIX_FLAG_mask = (data['NGMIX_FLAG'] == 0)
PHOTOZ_FLAG_mask = (data['PHOTOZ_BIN'] > -1)
data = data[SVA1_FLAG_mask &
NGMIX_FLAG_mask &
PHOTOZ_FLAG_mask]
# Read in the desired columns.
# from SVA_GOLD
# WLINFO filtered by Umaa to omit objects with
# SVA1_FLAG != 0
# NGMIX_FLAG != 0
# PHOTOZ_BIN != -1
# We want R, G-R, R-I, I-Z
self.data = data['MAG_AUTO_R']
self.features = ['MAG_AUTO_R']
# G-R
self.data = np.vstack([self.data,
data['MAG_AUTO_G'] - data['MAG_AUTO_R']])
self.features += ['G-R']
# R-I
self.data = np.vstack([self.data,
data['MAG_AUTO_R'] - data['MAG_AUTO_I']])
self.features += ['R-I']
# I-Z
self.data = np.vstack([self.data,
data['MAG_AUTO_I'] - data['MAG_AUTO_Z']])
self.features += ['I-Z']
# MEAN_PHOTOZ
self.data = np.vstack([self.data,
data['MEAN_PHOTOZ']])
self.features += ['MEAN_PHOTOZ']
# PHOTOZ_BIN
self.data = np.vstack([self.data,
data['PHOTOZ_BIN']])
self.features += ['PHOTOZ_BIN']
# Data is d x n
print self.data.shape
# Scale some features as needed
for f in self.features:
if 'MAG_AUTO' in f: # subtract the min
minval = np.min(self.data[self.features.index(f),:])
self.data[self.features.index(f),:] -= minval
print 'Subtracting %f from %s.' % (minval, f)
newf = f + '-sub%.2f' % minval
self.features[self.features.index(f)] = newf
f = newf
print '%s range: ' % f,
print self.data[self.features.index(f),:].min(),
print self.data[self.features.index(f),:].max()
self.labels = ['%d_%.6f_%.6f' % (id,ra,dec) for (id,ra,dec) in \
zip(data['COADD_OBJECTS_ID'],
data['RA'],
data['DEC'])]
self.xvals = np.arange(self.data.shape[0]).reshape(-1,1)
self.features = np.array(self.features)
if subset_photoz_bin:
# Subset to a single photo-z bin
keep = (self.data[np.where(self.features == 'PHOTOZ_BIN')[0][0],:] == \
photoz_bin)
self.data = self.data[:,keep]
# Still annoys me that you can't index a list with a list
self.labels = [self.labels[k] for k in np.where(keep)[0]]
# Remove the MEAN_PHOTOZ and PHOTOZ_BIN features
print('Removing PHOTOZ features.')
features_keep = ((self.features != 'PHOTOZ_BIN') &
(self.features != 'MEAN_PHOTOZ'))
self.data = self.data[features_keep,:]
self.features = self.features[features_keep]
self.xvals = np.arange(self.data.shape[0]).reshape(-1,1)
def plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights):
"""plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights)
Borrowed from UCIDataset.
Plot selection m (index ind, data in x) and its reconstruction r,
with k and label to annotate of the plot.
U and rerr are here ignored. Could use them to plot a projection
into the first two PCs' space (see dataset_libs.py).
If feature_weights are specified, omit any 0-weighted features
from the plot.
"""
if x == [] or r == []:
print "Error: No data in x and/or r."
return
# Select the features to plot
if feature_weights != []:
goodfeat = [f for f in range(len(feature_weights)) \
if feature_weights[f] > 0]
else:
goodfeat = range(len(self.xvals))
fig = pylab.figure()
ax = fig.add_subplot(1, 1, 1)
xvals = self.xvals[goodfeat]
x = x[goodfeat]
r = r[goodfeat]
feat_names = self.features[goodfeat]
# Make an errorbar plot to show measurement uncertainty
# for the color/luptitude features
color_lup_feats = [f for f in feat_names if 'minus' in f]
color_lup_inds = [color_lup_feats.index(f) for f in color_lup_feats]
pylab.errorbar([xvals[f] for f in color_lup_inds],
[x[f] for f in color_lup_inds],
yerr=[self.expl_data[ind,f] for f in color_lup_inds],
color='b', marker='o', linestyle='-',
ecolor='k',
markersize=10, label='Observations')
pylab.plot([xvals[f] for f in color_lup_inds],
[r[f] for f in color_lup_inds],
'rd-', markersize=10, label='Expected')
# Add T and snr in log form, bar plots
for f_name in ['T', 'snr']:
f = np.where(feat_names == f_name)[0]
feat_names[f] = 'log(%s)' % f_name
if f != -1:
pylab.bar(xvals[f]-0.2, math.log(r[f]), width=0.4, color='red')
if f_name == 'T': # include error bar
pylab.bar(xvals[f]+0.2, math.log(x[f]),
yerr=[math.log(self.expl_data[ind,f])],
width=0.4, color='blue')
else:
pylab.bar(xvals[f]+0.2, math.log(x[f]), width=0.4, color='blue')
# dashed line to show 0
pylab.plot([0, len(goodfeat)], [0, 0], 'k--')
pylab.xlabel(self.xlabel)
pylab.ylabel(self.ylabel)
pylab.title('DEMUD selection %d (%s),\n item %d, using K=%d' % \
(m, label, ind, k))
pylab.legend(fontsize=12)
if len(self.features) == 0:
pylab.xticks(pylab.arange(len(goodfeat)), range(len(x))[goodfeat], fontsize=12)
else:
pylab.xticks(pylab.arange(len(goodfeat)), feat_names,
rotation=-30, ha='left', fontsize=12)
pylab.tight_layout()
if not os.path.exists('results'):
os.mkdir('results')
outdir = os.path.join('results', self.name)
if not os.path.exists(outdir):
os.mkdir(outdir)
figfile = os.path.join(outdir, 'sel-%d-k-%d-(%s).png' % (m, k, label))
pylab.savefig(figfile)
print 'Wrote plot to %s' % figfile
pylab.close()
# Write a list of the selections in CSV format
def write_selections_csv(self, i, k, orig_ind, label, ind, scores):
outdir = os.path.join('results', self.name)
selfile = os.path.join(outdir, 'selections-k%d.csv' % k)
(objid, RA, DEC) = label.split('_')
# If this is the first selection, open for write
# to clear out previous run.
if i == 0:
fid = open(selfile, 'w')
# Output a header. For some data sets, the label is a class;
# for others it is an object identifier. To be generic,
# here we call this 'Name'.
fid.write('# Selection, Index, Name, RA, DEC, Score\n')
# If scores is empty, the (first) selection was pre-specified,
# so there are no scores. Output 0 for this item.
if scores == []:
fid.write('%d,%d,%s,%s,%s,0.0\n' % (i, orig_ind, objid,
RA, DEC))
else:
fid.write('%d,%d,%s,%s,%s,%g\n' % (i, orig_ind, objid,
RA, DEC, scores[ind]))
else:
# Append to the CSV file
fid = open(selfile, 'a')
fid.write('%d,%d,%s,%s,%s,%g\n' % (i, orig_ind, objid,
RA, DEC, scores[ind]))
# Close the file
fid.close()
# Also, append selections to a growing .html file
self.write_selections_html(10, i, k, ind, label, scores)
# Write a list of n selections that are similar to selection i (index ind)
# using scores (with respect to selection i).
def write_selections_html(self, n, i, k, ind, label, scores):
outdir = os.path.join('results', self.name)
selfile = os.path.join(outdir, 'selections-k%d.html' % k)
(objid, RA, DEC) = label.split('_')
# If this is the first selection, open for write
# to clear out previous run.
if i == 0:
# Start up the HTML file
fid = open(selfile, 'w')
fid.write('<html><head><title>DEMUD: %s, k=%d</title></head>\n' % (self.name, k))
fid.write('<body>\n')
fid.write('<h1>DEMUD experiments on %s with k=%d</h1>\n' % (self.name, k))
fid.write('%d (%g) items analyzed.<br>\n' %
(self.data.shape[1], self.data.shape[1]))
fid.write('<ul>\n')
fid.write('<li>Selections are presented in decreasing order of novelty.</li>\n')
fid.write('<li>Cutouts (left) are RGB images generated from the DES DR1 archive.</li>\n')
fid.write('<li>The bar plot shows the <font color="blue">observed</font> values compared to the <font color="red">expected (modeled)</font> values. Discrepancies explain why the chosen object is considered novel. Click to enlarge.</li>\n')
fid.write('<li>Scores close to 0 (for items other than the first one) indicate an arbitrary choice; novelty has been exhausted.</li>\n')
fid.write('</ul>\n\n')
# If scores is empty, the (first) selection was pre-specified,
# so there are no scores. Output -1 for this item.
if scores == []:
score = 'N/A'
else:
score = '%f' % scores[ind]
else:
# Append to the HTML file
fid = open(selfile, 'a')
score = scores[ind]
fid.write('<h2>Selection %d: %s, RA %s, DEC %s, score %s</h2>\n' %
(i, objid, RA, DEC, score))
fid.write('<a href="selection-%d-cutout.png"><img title="[%d] %s" src="selection-%d-cutout.png" height=270></a>\n' %
(i, i, objid, i))
figfile = 'sel-%d-k-%d-(%s).png' % (i, k, label)
fid.write('<a href="%s"><img height=270 src="%s"></a>\n\n' %
(figfile, figfile))
# Close the file
fid.close()
if __name__ == "__main__":
# Run inline tests
import doctest
(num_failed, num_tests) = doctest.testmod()
filename = os.path.basename(__file__)
if num_failed == 0:
print "%-20s All %3d tests passed!" % (filename, num_tests)
else:
sys.exit(1)
| [
"kiri.l.wagstaff@jpl.nasa.gov"
] | kiri.l.wagstaff@jpl.nasa.gov |
44de94bd2f50751590318f5e1ab00984c2fa3868 | 714b0411a7c14dbebeceee3c16eea32525db2c24 | /src/google_calendar.py | c432d10527475e4449a79a1ec360a308c5586f77 | [
"MIT"
] | permissive | hroncok/Google-Calendar-Simple-API | 76c1ecbceff8e1430e9a222edce3075453c46624 | 0891485005d909283d862dc5a41a96ccb93209ac | refs/heads/master | 2020-04-11T02:57:11.495504 | 2018-12-11T22:39:27 | 2018-12-11T22:39:27 | 161,462,603 | 0 | 0 | MIT | 2018-12-12T09:17:10 | 2018-12-12T09:17:10 | null | UTF-8 | Python | false | false | 2,223 | py | import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
def _get_default_credentials_path():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'calendar-python.json')
return credential_path
class GoogleCalendar:
_READ_WRITE_SCOPES = 'https://www.googleapis.com/auth/calendar'
_DEFAULT_CLIENT_SECRET_FILE = 'client_secret.json'
def __init__(self,
calendar,
credentials_path=_get_default_credentials_path(),
read_only=False,
secret_file=_DEFAULT_CLIENT_SECRET_FILE,
application_name=None):
self._credentials_path = credentials_path
self._scopes = self._READ_WRITE_SCOPES + ('.readonly' if read_only else '')
self._secret_file = secret_file
self._application_name = application_name
self.calendar = calendar
credentials = self._get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
def _get_credentials(self):
store = Storage(self._credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self._secret_file, self._scopes)
flow.user_agent = self._application_name
credentials = tools.run_flow(flow, store)
return credentials
def create_event(self, event):
return self.service.events().insert(calendarId=self.calendar, body=event.get_body()).execute()
def delete_event(self, event_id):
return self.service.events().delete(calendarId=self.calendar, eventId=event_id).execute()
def list_events(self):
return self.service.events().list(calendarId=self.calendar).execute()['items']
def main():
calendar = GoogleCalendar('kuzmovich.goog@gmail.com')
print(calendar.list_events())
if __name__ == '__main__':
main()
| [
"kuzmovich.box@mail.ru"
] | kuzmovich.box@mail.ru |
a4a98a151d78e00cc97f3606cb1aed431e962869 | b672c800c634e195e006d791bc8d9ae500eff944 | /life.py | c4bf9c8b69ac175dca87c0880777981bb567f5c4 | [] | no_license | george-hm/game_of_life | 01f72c8d0543e475ca798e9f7bb3141c48323144 | eec948f269fd3c635b1913cf89d5974c52862570 | refs/heads/master | 2020-04-29T16:34:00.097947 | 2019-03-18T11:09:29 | 2019-03-18T11:09:29 | 176,265,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | import random
import time
import os
class Map(object):
on = "1"
off = " "
def __init__(self, seed=None, size=None): # generate map
if not seed or seed == "": # seeding
seed = random.random()
if not size: # sizing
size = 100
self.size = size
self.seed = seed
random.seed(seed) # set the seed
self.map_sheet = [] # the map
# generate map
for y in range(0, (int(size/2))): # y axis
self.map_sheet.append([]) # add new row
for x in range(0, size): # x axis
if random.random() < 0.25: # alive
self.map_sheet[y].append(self.on)
else: # dead
self.map_sheet[y].append(self.off)
# need to do the check before printing in case we generate an invalid life grid
print(self.printMap())
def printMap(self):
ret_ar = []
for x in range(0, len(self.map_sheet)): # go through each row
# join everything, for easy showing
ret_ar.append("".join(self.map_sheet[x]))
return "\n".join(ret_ar) # the map!
def performCheck(self): # neighbor checks, the big one
def gridCheck(row, column): # actual neighbor check
to_app = []
def checkLooped(x_type=None, y_type=None): # less repetition
# this is used to check if we looped (e.g. array[-1])
if x_type:
if row+(-x) == -1:
return True
if y_type:
if column+(-y) == -1:
return True
return False
def checkErrors(x_type=None, y_type=None): # less repetition
# check cell with + or - values
if x_type:
x_type = -x
else:
x_type = x
if y_type:
y_type = -y
else:
y_type = y
try:
if self.map_sheet[row+(x_type)][column+(y_type)] == self.on:
to_app.append(True)
else:
to_app.append(False)
except IndexError:
to_app.append(False)
for x in range(0, 2): # for loop to check surrounding cell
for y in range(0, 2): # ^ same as above, we need a nested
if x == 0 and y == 0: # this is our cell, skip
continue
if x == 1 and y == 1: # if the last step, we also need -x, y and x, -y
if checkLooped(x_type=True): # are we looped?
to_app.append(False) # we are, fail
else:
# otherwise, check as normal
checkErrors(x_type=True)
if checkLooped(y_type=True):
to_app.append(False)
else:
checkErrors(y_type=True)
checkErrors() # check with two positive values
if checkLooped(x_type=True, y_type=True): # check with double negative
to_app.append(False)
else:
checkErrors(x_type=True, y_type=True)
return to_app # return the results of surrounding cells
new_map = [] # building a new map so no changes are made until complete
for row in range(0, len(self.map_sheet)): # go through each row
new_map.append([]) # create new row in new map
# go through each column
for column in range(0, len(self.map_sheet[row])):
check = gridCheck(row, column) # get neighbor checks
# the cell itself (is it alive or dead?)
cell = self.map_sheet[row][column]
# the rules of life
if cell == self.on and check.count(True) < 2:
new_map[row].append(self.off)
elif cell == self.on and check.count(True) > 1 and check.count(True) < 4:
new_map[row].append(self.on)
elif cell == self.on and check.count(True) > 3:
new_map[row].append(self.off)
elif check.count(True) == 3:
new_map[row].append(self.on)
else:
new_map[row].append(self.off)
self.map_sheet = new_map # new map assembled
def playLife(self): # play!
count = 1
while True: # infinite loop, wait, check, print, repeat
count += 1
self.performCheck()
time.sleep(0.750)
os.system('cls')
print(self.printMap())
print("Generation:", count)
print("Seed:", self.seed)
print(f"Size: {self.size}x{int(self.size/2)}")
# PLAY LIFE
test = Map(input("Enter a seed, or leave blank for a random seed.\n> "), int(input("Enter a size.\n> ")))
test.playLife()
| [
"georgehm@pm.me"
] | georgehm@pm.me |
cf1372cfb3c393f87438310aae22c2f748cdc1e6 | d90455a350ae167002a29630037038caae8c5b94 | /agent.py | 24acc696b0f90670a48879d519b352f77d6b9eb0 | [] | no_license | ericchen168/smart-cab | 80e8db14c36990bfde05287a709d8269bf5056bb | 9c4d9b59c26eb51edb179aeff7357832bd5ae5db | refs/heads/master | 2021-01-01T17:28:13.733449 | 2017-01-08T00:10:28 | 2017-01-08T00:10:28 | 78,311,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,603 | py | import random
import math
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
class LearningAgent(Agent):
""" An agent that learns to drive in the Smartcab world.
This is the object you will be modifying. """
def __init__(self, env, learning=False, epsilon=1, Gamma=1,alpha=0.7,action=None):
super(LearningAgent, self).__init__(env) # Set the agent in the evironment
self.planner = RoutePlanner(self.env, self) # Create a route planner
self.valid_actions = self.env.valid_actions # The set of valid actions
# Set parameters of the learning agent
self.learning = learning # Whether the agent is expected to learn
self.Q = dict() # Create a Q-table which will be a dictionary of tuples
self.epsilon = epsilon # Random exploration factor
self.alpha = alpha # Learning factor
###########
## TO DO ##
###########
# Set any additional class parameters as needed
self.action=action
self.Gamma=Gamma
self.trial=0
def reset(self, destination=None, testing=False):
""" The reset function is called at the beginning of each trial.
'testing' is set to True if testing trials are being used
once training trials have completed. """
# Select the destination as the new location to route to
self.planner.route_to(destination)
###########
## TO DO ##
###########
# Update epsilon using a decay function of your choice
# Update additional class parameters as needed
# If 'testing' is True, set epsilon and alpha to 0
self.trial=self.trial+1
if testing:
self.epsilon=0
self.alpha=0
else:
a=0.02
self.epsilon=self.epsilon*math.exp(-a)
#self.epsilon=self.epsilon-0.05
#self.epsilon = math.exp(-1 * self.alpha * self.trial)
return None
def build_state(self):
""" The build_state function is called when the agent requests data from the
environment. The next waypoint, the intersection inputs, and the deadline
are all features available to the agent. """
# Collect data about the environment
waypoint = self.planner.next_waypoint() # The next waypoint
inputs = self.env.sense(self) # Visual input - intersection light and traffic
deadline = self.env.get_deadline(self) # Remaining deadline
###########
## TO DO ##
###########
# Set 'state' as a tuple of relevant data for the agent
# When learning, check if the state is in the Q-table
# If it is not, create a dictionary in the Q-table for the current 'state'
# For each action, set the Q-value for the state-action pair to 0
if self.learning is True:
state = (waypoint,inputs['light'], inputs['oncoming'], inputs['left'])#, inputs['right'], inputs['left'])#, deadline )
return state
def get_maxQ(self, state):
""" The get_max_Q function is called when the agent is asked to find the
maximum Q-value of all actions based on the 'state' the smartcab is in. """
###########
## TO DO ##
###########
# Calculate the maximum Q-value of all actions for a given state
#maxQ = max(self.Q[state], key=(lambda x: self.Q[state][x]))
maxQ = max(self.Q[state], key=(lambda x: self.Q[state][x]))
return self.Q[state][maxQ]
def createQ(self, state):
""" The createQ function is called when a state is generated by the agent. """
###########
## TO DO ##
###########
# When learning, check if the 'state' is not in the Q-table
# If it is not, create a new dictionary for that state
# Then, for each action available, set the initial Q-value to 0.0
if self.learning is True:
if state not in self.Q.keys():
self.Q[state]= {None:0,'left':0, 'right':0, 'forward':0}
return
def choose_action(self, state):
""" The choose_action function is called when the agent is asked to choose
which action to take, based on the 'state' the smartcab is in. """
# Set the agent state and default action
self.state = state
self.next_waypoint = self.planner.next_waypoint()
action = None
###########
## TO DO ##
###########
# When not learning, choose a random action
# When learning, choose a random action with 'epsilon' probability
# Otherwise, choose an action with the highest Q-value for the current state
if self.learning!=True:
action_opt=[None, 'forward', 'left', 'right']
action=random.choice(action_opt)
elif random.uniform(0, 1)<self.epsilon:
action_opt=[None, 'forward', 'left', 'right']
action=random.choice(action_opt)
else:
Q_max = self.get_maxQ(state)
action_opt=[None, 'forward', 'left', 'right']
#action_Q=map(lambda x: self.get_maxQ(x),action_opt)
action_Q=map(lambda x: self.Q[state][x],action_opt)
action_opt2=[]
for i in range(4):
if action_Q[i]==Q_max:
action_opt2.append(action_opt[i])
action=random.choice(action_opt2)
return action
def learn(self, state, action, reward):
""" The learn function is called after the agent completes an action and
receives an award. This function does not consider future rewards
when conducting learning. """
###########
## TO DO ##
###########
# When learning, implement the value iteration update rule
# Use only the learning rate 'alpha' (do not use the discount factor 'gamma')
if self.learning is True:
self.Q[state][action] = (1-self.alpha)*self.Q[state][action] + self.alpha*(reward)
## get the next state,action Q(s',a')
#next_inputs = self.env.sense(self)
#nextwaypoint = self.planner.next_waypoint()
#nextdeadline = self.env.get_deadline(self) # Remaining deadline
#next_state = (nextwaypoint, next_inputs['light'],next_inputs['oncoming'], next_inputs['right'], next_inputs['left'], nextdeadline)
## update Q table
##self.Q[self.state][self.A.index(action)] = \
# (1-alpha)*self.Q[self.state][self.A.index(action)] + \
# (alpha * (reward + gamma * max(self.Q[next_state])))
#max_Q = max(self.Q[next_state], key=(lambda x: self.Q[next_state][x]))
#self.Q[state][action]=(1-self.alpha) *self.Q[state][action]+self.alpha*(reward+self.Gamma*self.Q[next_state][max_Q])
#self.get_maxQ(next_state))
return None
def update(self):
""" The update function is called when a time step is completed in the
environment for a given trial. This function will build the agent
state, choose an action, receive a reward, and learn if enabled. """
state = self.build_state() # Get current state
self.createQ(state) # Create 'state' in Q-table
action = self.choose_action(state) # Choose an action
reward = self.env.act(self, action) # Receive a reward
self.learn(state, action, reward) # Q-learn
return
def run():
""" Driving function for running the simulation.
Press ESC to close the simulation, or [SPACE] to pause the simulation. """
##############
# Create the environment
# Flags:
# verbose - set to True to display additional output from the simulation
# num_dummies - discrete number of dummy agents in the environment, default is 100
# grid_size - discrete number of intersections (columns, rows), default is (8, 6)
env = Environment(verbose=False)
##############
# Create the driving agent
# Flags:
# learning - set to True to force the driving agent to use Q-learning
# * epsilon - continuous value for the exploration factor, default is 1
# * alpha - continuous value for the learning rate, default is 0.5
agent = env.create_agent(LearningAgent,learning=True)
##############
# Follow the driving agent
# Flags:
# enforce_deadline - set to True to enforce a deadline metric
env.set_primary_agent(agent,enforce_deadline=True)
##############
# Create the simulation
# Flags:
# update_delay - continuous time (in seconds) between actions, default is 2.0 seconds
# display - set to False to disable the GUI if PyGame is enabled
# log_metrics - set to True to log trial and simulation results to /logs
# optimized - set to True to change the default log file name
sim = Simulator(env,display=False,log_metrics=True,update_delay=0.02,optimized=True)
##############
# Run the simulator
# Flags:
# tolerance - epsilon tolerance before beginning testing, default is 0.05
# n_test - discrete number of testing trials to perform, default is 0
sim.run(n_test=10,tolerance=0.05)
if __name__ == '__main__':
run()
| [
"noreply@github.com"
] | ericchen168.noreply@github.com |
4f4e897088183bbff64d0256899d01d4ccc8848a | d53887a279936b023ba2213702ec15b3f85d2fa1 | /iridiumtk/rx_stats_hist.py | bb099f43896fb40d7b0d258c0e7312b1b550fd19 | [
"BSD-2-Clause"
] | permissive | TheBiggerGuy/iridium-toolkit | 658e017e2440e813f1ea310d295e54d77a007b7c | 6dddc27a45992f049d3aab36d9a4a227ee684e2b | refs/heads/master | 2021-01-13T13:43:01.236214 | 2018-05-30T12:41:44 | 2018-05-30T12:41:44 | 76,336,269 | 2 | 0 | null | 2016-12-13T07:58:44 | 2016-12-13T07:58:44 | null | UTF-8 | Python | false | false | 5,662 | py | #!/usr/bin/env python
# vim: set ts=4 sw=4 tw=0 et pm=:
# Parses .bits files and displays the distribution
# of the "HIST_DIMENTION_KEY" of received frames
import argparse
from collections import namedtuple
from datetime import datetime
import fileinput
import logging
import re
import sys
import dateparser
try:
import matplotlib.pyplot as plt
except ImportError:
print('Failed to import matplotlib. This prevents any GUI.', file=sys.stderr)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
Dimension = namedtuple('Dimension', ['key', 'bin_size'])
HIST_DIMENSIONS = {
'frequency': Dimension('freq', 10000),
'length': Dimension('length', 1),
'time': Dimension('timestamp', 3600),
}
def extract_timestamp(filename, dt):
mm = re.match(r'i-(\d+(?:\.\d+)?)-[vbsrtl]1.([a-z])([a-z])', filename)
if mm:
b26 = (ord(mm.group(2)) - ord('a')) * 26 + ord(mm.group(3)) - ord('a')
timestamp = float(mm.group(1)) + float(dt) / 1000 + b26 * 600
return timestamp
mm = re.match(r'i-(\d+(?:\.\d+)?)-[vbsrtl]1(?:-o[+-]\d+)?$', filename)
if mm:
timestamp = float(mm.group(1)) + float(dt) / 1000
return timestamp
mm = re.match(r'(\d\d)-(\d\d)-(20\d\d)T(\d\d)-(\d\d)-(\d\d)-[sr]1', filename)
if mm:
month, day, year, hour, minute, second = map(int, mm.groups())
timestamp = datetime(year, month, day, hour, minute, second)
timestamp = (timestamp - datetime(1970, 1, 1)).total_seconds()
timestamp += float(dt) / 1000
return timestamp
return 0
def parse_line_to_message(line):
line = line.split()
if not line[0] == 'RX' and ('A:OK' not in line or len(line) < 10):
return None
access = True
lead_out = 'L:OK' in line
name = line[1]
if name == "X":
timestamp = float(line[2])
else:
timestamp = extract_timestamp(name, line[2])
freq = int(line[3])
confidence = int(line[6][:-1])
strength = float(line[7])
length = int(line[8])
if name == "X":
error = line[9] == 'True'
msgtype = line[10]
else:
error = False
msgtype = None
return {
'name': name,
'timestamp': timestamp,
'freq': freq,
'access': access,
'lead_out': lead_out,
'confidence': confidence,
'strength': strength,
'length': length,
'error': error,
'msgtype': msgtype,
}
def read_lines(input_files, start_time_filter, end_time_filter):
for line in fileinput.input(files=input_files):
try:
message = parse_line_to_message(line)
except (IndexError, ValueError):
continue
if not message:
continue
timestamp = datetime.utcfromtimestamp(message['timestamp'])
if start_time_filter and start_time_filter > timestamp:
continue
if end_time_filter and end_time_filter < timestamp:
continue
yield message
def main():
parser = argparse.ArgumentParser(description='Convert iridium-parser.py VOC output to DFS')
parser.add_argument('--start', metavar='DATETIME', type=str, default=None, help='Filter events before this time')
parser.add_argument('--end', metavar='DATETIME', type=str, default=None, help='Filter events after this time')
parser.add_argument('--bin-size', metavar='INT', type=int, default=None, help='Size of bins')
parser.add_argument('--minimum-length', metavar='INT', type=int, default=0)
parser.add_argument('--minimum-confidence', metavar='INT', type=int, default=0)
parser.add_argument('--lead-out-required', metavar='INT', type=bool, default=False)
parser.add_argument('--show-errors', metavar='INT', type=bool, default=False)
parser.add_argument('--dimension', choices=HIST_DIMENSIONS.keys(), required=True)
parser.add_argument('input', metavar='FILE', nargs='*', help='Files to read, if empty or -, stdin is used')
args = parser.parse_args()
input_files = args.input if len(args.input) > 0 else ['-']
start_time_filter = dateparser.parse(args.start) if args.start else None
end_time_filter = dateparser.parse(args.end) if args.end else None
dimension = HIST_DIMENSIONS[args.dimension]
bin_size = args.bin_size if args.bin_size else dimension.bin_size
minimum_confidence = args.minimum_confidence
minimum_length = args.minimum_length
lead_out_required = args.lead_out_required
show_errors = args.show_errors
lines = list(read_lines(input_files, start_time_filter, end_time_filter))
number_of_lines = len(lines)
logger.info('Read %d lines from input', number_of_lines)
if number_of_lines == 0:
print('No usable data found', file=sys.stderr)
sys.exit(1)
data = [s[dimension.key] for s in lines if s['length'] > minimum_length and s['confidence'] > minimum_confidence and (s['lead_out'] or not lead_out_required) and (s['error'] == show_errors) and s['freq'] < 1.626e9]
bins = int((max(data) - min(data)) / bin_size)
title = "File: %s : Distribution of message %s. Bin Size: %d, Minimum Confidence: %d" % (input_files, args.dimension, bin_size, minimum_confidence)
if lead_out_required:
title += ', lead out needs to be present'
else:
title += ', lead out does not need to be present'
if show_errors:
title += " and having decoding errors"
fig = plt.figure()
subplot = fig.add_subplot(1, 1, 1)
subplot.hist(data, bins)
plt.title(title)
plt.xlabel(args.dimension)
plt.ylabel('count')
plt.show()
if __name__ == '__main__':
main()
| [
"thebigguy.co.uk@gmail.com"
] | thebigguy.co.uk@gmail.com |
7489ba6b9b45df79183f421d24084deb03316467 | a9203123133390a00c8518e2c57ba73bdf9edba7 | /fit-yeah-app/app/seeds/users_followers.py | bb6ae1b5eb13612da5f2276675eef4848163727e | [] | no_license | bavithareddy6/fit-yeah | 611958d5c36de52644bfb93533c632c358afd9b2 | 1a51ea3ded16a48c94b657b36b2a28bac9cf115a | refs/heads/main | 2023-03-19T13:21:18.170895 | 2021-02-18T21:33:44 | 2021-02-18T21:33:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from werkzeug.security import generate_password_hash
from app.models import db, followers_table
print("Hello")
| [
"ryoung7986@gmail.com"
] | ryoung7986@gmail.com |
0d74495bd1cc1679a451768d66fda5ef8194d179 | ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3 | /code-festival-2016-qualc/b/main.py | 987ac90bc403547651d6d2456180210a150a8701 | [] | no_license | kussy-tessy/atcoder | 5604919747242ee9740b9131bb6e168e96af0151 | ee917fa5a5218d4a9e72f710d0d844e7c203f13b | refs/heads/master | 2023-07-21T09:25:15.464881 | 2021-09-04T14:06:02 | 2021-09-04T14:06:02 | 311,221,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | #!/usr/bin/env python3
K, T = map(int,(input().split()))
As = list(map(int,(input().split())))
# if len(As) == 1:
# print(As[0] - 1)
As.sort()
As_max = As[-1]
As_other = sum(As[:-1])
print(max(0, As_max - As_other - 1)) | [
"teppei04285000@gmail.com"
] | teppei04285000@gmail.com |
7c562bd59160cfb964891453b9d4a28be9ae4772 | c6b74df572dc2dcf7034c25860d18cb3c9143d4f | /linearizer/generative.py | ec00a10615d362bf6ab18a3138b457f78f83330b | [] | no_license | kadarakos/LinearAMR | 55f2dfedb5d100988be5511004be690e2808ad17 | d8408924171960e84c92cfe46bb531b3b3ee17e0 | refs/heads/master | 2021-01-19T23:19:41.226183 | 2017-04-21T11:29:02 | 2017-04-21T11:29:02 | 88,961,862 | 0 | 0 | null | 2017-04-21T08:37:38 | 2017-04-21T08:37:38 | null | UTF-8 | Python | false | false | 3,595 | py | __author__ = 'thiagocastroferreira'
from sys import path
path.append('/home/tcastrof/amr/scp_repo')
path.append('/home/tcastrof/amr/Grammar')
path.append('../')
from compression_tree.compressor import Compressor
from ERG import AMR
import kenlm
import os
import utils
import itertools
class Generative(object):
def __init__(self, lm_path):
self.model = kenlm.Model(lm_path)
self.compressor = compressor
def process(self, amr):
self.amr = amr
return self.linearize(self.amr.root)
def ranking(self, base):
candidates = []
for candidate in itertools.permutations(base):
snt = []
for e in candidate:
for span in e.split():
snt.extend(span.split('~'))
snt = ' '.join(snt)
score = self.model.score(snt)
candidates.append((' '.join(candidate), score))
return sorted(candidates, key=lambda x: x[1], reverse=True)
def linearize(self, root):
linear = []
for edge in self.amr.edges[root]:
linear_child = self.linearize(edge.node_id)
if linear_child.strip() != '':
if edge.status == '+':
linear_child = edge.name + '~' + linear_child
linear.append(linear_child)
status = self.amr.nodes[root].status
name = self.amr.nodes[root].name
if 0 < len(linear) <= 9:
if status == '+':
linear.append(name)
rank = self.ranking(linear)
return rank[0][0]
elif len(linear) > 9:
if status == '+':
linear.insert(len(linear)-1, name)
return ' '.join(linear)
else:
if status == '+':
return name
else:
return ''
if __name__ == '__main__':
CLF_NODE_PATH = '../compression/results/clf_node.cPickle'
CLF_EDGE_PATH = '../compression/results/clf_edge.cPickle'
EDGE_PATH = '../compression/validation/edge_feat.cPickle'
EDGE_PARENT_PATH = '../compression/validation/edge_parent_feat.cPickle'
EDGE_CHILD_PATH = '../compression/validation/edge_child_feat.cPickle'
NODE_PATH = '../compression/validation/node_feat.cPickle'
NODE_PARENT_PATH = '../compression/validation/node_parent_feat.cPickle'
LM_PATH = 'lm/6gram.arpa'
compressor = Compressor(clf_node_path=CLF_NODE_PATH,
clf_edge_path=CLF_EDGE_PATH,
edge_path=EDGE_PATH,
edge_parent_path=EDGE_PARENT_PATH,
edge_child_path=EDGE_CHILD_PATH,
node_path=NODE_PATH,
node_parent_path=NODE_PARENT_PATH)
linearizer = Generative(lm_path=LM_PATH)
amrs_path = '../data/LDC2016E25/data/amrs/split/test'
amrs = []
for fname in os.listdir(amrs_path):
f = os.path.join(amrs_path, fname)
amrs.extend(utils.parse_corpus(f, False))
linears = []
for amr in amrs:
print amr['sentence']
linear = linearizer.process(amr['amr'].lower())
final = []
for l in linear.split():
final.extend(l.split('~'))
linears.append(' '.join(final))
de = open('../data/LDC2016E25/corpus/test.gen', 'w')
# en = open('../data/LDC2016E25/corpus/dev.lex', 'w')
for i, linear in enumerate(linears):
de.write(linear)
de.write('\n')
# en.write(amrs[i]['sentence'].lower())
# en.write('\n')
de.close()
# en.close() | [
"thiago.castro.ferreira@gmail.com"
] | thiago.castro.ferreira@gmail.com |
4a0570c65c81d3d58ef799132c1206c6d01be707 | bcf88b912b9443c3326466c226f68a7e7ad5aa9d | /bdbag/__init__.py | ab5519ea26b97ecb75b741254c95bea69f7adaf3 | [
"Apache-2.0"
] | permissive | mvdbeek/bdbag | 33bc7e0275c720104af77654b0016024cb6ab012 | fe67b5bffc68b7dac823ce03d450ede3affccbef | refs/heads/master | 2020-03-25T05:17:09.646537 | 2018-07-12T03:58:06 | 2018-07-12T03:58:06 | 143,438,809 | 0 | 0 | null | 2018-08-03T14:42:27 | 2018-08-03T14:42:27 | null | UTF-8 | Python | false | false | 6,188 | py | import os
import re
import sys
import json
import logging
import mimetypes
from requests.utils import requote_uri
from pkg_resources import get_distribution, DistributionNotFound
__version__ = "1.4.2"
if sys.version_info > (3,):
from urllib.parse import quote as urlquote, unquote as urlunquote, urlsplit, urlunsplit
from urllib.request import urlretrieve, urlopen
else:
from urllib import quote as urlquote, unquote as urlunquote, urlretrieve, urlopen
from urlparse import urlsplit, urlunsplit
try:
VERSION = get_distribution("bdbag").version
except DistributionNotFound:
VERSION = __version__ + '-dev'
PROJECT_URL = 'https://github.com/fair-research/bdbag'
try:
BAGIT_VERSION = get_distribution("bagit").version
except DistributionNotFound:
BAGIT_VERSION = 'unknown'
BAG_PROFILE_TAG = 'BagIt-Profile-Identifier'
BDBAG_PROFILE_ID = 'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-profile.json'
BDBAG_RO_PROFILE_ID = 'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-ro-profile.json'
ID_RESOLVER_TAG = 'identifier_resolvers'
DEFAULT_ID_RESOLVERS = ['n2t.net', 'identifiers.org']
DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.bdbag')
DEFAULT_CONFIG_FILE = os.path.join(DEFAULT_CONFIG_PATH, 'bdbag.json')
DEFAULT_CONFIG = {
'bag_config':
{
'bag_algorithms': ['md5', 'sha256'],
'bag_processes': 1,
'bag_metadata':
{
BAG_PROFILE_TAG: BDBAG_PROFILE_ID
}
},
ID_RESOLVER_TAG: DEFAULT_ID_RESOLVERS
}
CONTENT_DISP_REGEX = re.compile(r"^filename[*]=UTF-8''(?P<name>[-_.~A-Za-z0-9%]+)$")
FILTER_REGEX = re.compile(r"(?P<column>^.*)(?P<operator>==|!=|=\*|!\*|\^\*|\$\*|>=|>|<=|<)(?P<value>.*$)")
FILTER_DOCSTRING = "\"==\" (equal), " \
"\"!=\" (not equal), " \
"\"=*\" (wildcard substring equal), " \
"\"!*\" (wildcard substring not equal), " \
"\"^*\" (wildcard starts with), " \
"\"$*\" (wildcard ends with), " \
"or \">\", \">=\", \"<\", \"<=\""
if not mimetypes.inited:
mimetypes.init()
def get_typed_exception(e):
exc = "".join(("[", type(e).__name__, "] "))
return "".join((exc, str(e)))
def add_mime_types(types):
if not types:
return
for t in types.keys():
for e in types[t]:
mimetypes.add_type(type=t, ext=e if e.startswith(".") else "".join([".", e]))
def guess_mime_type(file_path):
mtype = mimetypes.guess_type(file_path)
content_type = 'application/octet-stream'
if mtype[0] is not None and mtype[1] is not None:
content_type = "+".join([mtype[0], mtype[1]])
elif mtype[0] is not None:
content_type = mtype[0]
elif mtype[1] is not None:
content_type = mtype[1]
return content_type
def parse_content_disposition(value):
m = CONTENT_DISP_REGEX.match(value)
if not m:
raise ValueError('Cannot parse content-disposition "%s".' % value)
n = m.groupdict()['name']
try:
n = urlunquote(str(n))
except Exception as e:
raise ValueError('Invalid URL encoding of content-disposition filename component. %s.' % e)
try:
if sys.version_info < (3,):
n = n.decode('utf8')
except Exception as e:
raise ValueError('Invalid UTF-8 encoding of content-disposition filename component. %s.' % e)
return n
def escape_uri(uri, illegal_only=True, safe="/"):
if not uri:
return uri
if illegal_only:
return requote_uri(uri)
else:
urlparts = urlsplit(uri)
path = urlquote(urlunquote(urlparts.path), safe=safe)
query = urlquote(urlunquote(urlparts.query), safe=safe)
fragment = urlquote(urlunquote(urlparts.fragment), safe=safe)
return urlunsplit((urlparts.scheme, urlparts.netloc, path, query, fragment))
def filter_dict(expr, entry):
if not expr:
return True
match = FILTER_REGEX.search(expr)
if not match:
raise ValueError("Unable to parse expression: %s" % expr)
expr_dict = match.groupdict()
filter_col = expr_dict["column"]
filter_val = expr_dict["value"]
operator = expr_dict["operator"]
filter_neg = filter_substring = filter_relation = filter_startswith = filter_endswith = False
if "==" == operator:
pass
elif "!=" == operator:
filter_neg = True
elif "=*" == operator:
filter_substring = True
elif "^*" == operator:
filter_startswith = True
elif "$*" == operator:
filter_endswith = True
elif "!*" == operator:
filter_substring = True
filter_neg = True
elif (">" == operator) or (">=" == operator) or ("<" == operator) or ("<=" == operator):
filter_relation = True
else:
raise ValueError("Unsupported operator type in filter expression: %s" % expr)
result = False
filter_val = filter_val.strip()
filter_col = filter_col.strip()
if filter_col in set(entry.keys()):
value = entry[filter_col]
if filter_neg:
if filter_substring:
result = filter_val not in str(value)
else:
result = filter_val != value
else:
if filter_substring:
result = filter_val in str(value)
elif filter_startswith:
result = str(value).startswith(filter_val)
elif filter_endswith:
result = str(value).endswith(filter_val)
elif filter_relation:
try:
statement = "%d%s%d" % (int(value), operator, int(filter_val))
result = eval(statement)
except Exception as e:
logging.warning("Unable to evaluate filter expression [%s]: %s" %
(expr, get_typed_exception(e)))
else:
result = filter_val == value
if not result:
logging.debug(
"Excluding %s because it does not match the filter expression: [%s]." %
(json.dumps(entry), expr))
return result
| [
"mikedarcy@users.noreply.github.com"
] | mikedarcy@users.noreply.github.com |
daa15d98df1f67e5bdcef17cc40f0ecc55846745 | 6eed164d42a6a834441f7e895adce1edf04a8dc3 | /ynab_csv_converter/formats/saxotradergo.py | 36c7840583f3bad43ac4d6105eb250fe66561c89 | [] | no_license | gasbarroni8/ynab-csv-converter | 9477cf2f4dd12665684841620b91a48df1c2628e | 64fe94515c113ce9b1b87ac2ace2c78f0c6ebfe6 | refs/heads/master | 2020-12-22T18:41:43.059591 | 2020-01-03T08:25:41 | 2020-01-03T08:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | # -*- coding: utf-8 -*-
import re
from collections import namedtuple
SaxoTraderGoLine = namedtuple('SaxoTraderGoLine', [
'account_id',
'posting_date',
'value_date',
'product',
'net_change',
'cash_balance'
])
date_pattern = r'^[0123]\d-([01]\d|[a-z]{3})-[12]\d{3}$'
amount_pattern = r'^-?\d+\.\d{1,2}$'
# 'account_id': r'^\d{6}INET$',
column_patterns = {'posting_date': date_pattern,
'value_date': date_pattern,
# 'product': ,
'net_change': amount_pattern,
'cash_balance': amount_pattern,
}
column_patterns = {column: re.compile(regex) for column, regex in column_patterns.items()}
txn_date_descends = True
def getlines(path):
import csv
import datetime
import locale
from . import validate_line
from .ynab import YnabLine
with open(path, 'r', encoding='utf-8') as handle:
transactions = csv.reader(handle, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# Skip headers
next(transactions)
for raw_line in transactions:
try:
line = SaxoTraderGoLine(*raw_line)
validate_line(line, column_patterns)
try:
date = datetime.datetime.strptime(line.value_date, '%d-%m-%Y')
except ValueError:
date = datetime.datetime.strptime(line.value_date, '%d-%b-%Y')
payee, memo = parse_text(line.product)
category = u''
amount = locale.atof(line.net_change)
if amount > 0:
outflow = 0.0
inflow = amount
else:
outflow = -amount
inflow = 0.0
except Exception:
import sys
msg = (u"There was a problem on line {line} in {path}\n"
.format(line=transactions.line_num, path=path))
sys.stderr.write(msg)
raise
yield YnabLine(date, payee, category, memo, outflow, inflow)
def parse_text(text):
result = re.match(r'^(?P<payee>.+) (?P<txnid>\d{9,})$', text)
if result is not None:
matches = result.groupdict()
return matches['payee'], '{payee} txn #{txnid}'.format(payee=matches['payee'], txnid=matches['txnid'])
else:
return text, u''
| [
"anders@ingemann.de"
] | anders@ingemann.de |
ca893e5aeee0c7456739c4457ae664105c5c96c6 | 46c3fd904e7b1c45541ffe0518afe50dfdafb089 | /movie/migrations/0003_movielink_link.py | 84dc7889b0f52b99ff6733291f6811344b4d8db2 | [] | no_license | Shirhussain/Movies | 6ab10b27748bc1cdd3a904861092e5246ce01190 | 4f6639491a86708a5d04a8de7f928500ecba3fdc | refs/heads/master | 2023-01-01T07:52:25.639564 | 2020-10-26T02:15:23 | 2020-10-26T02:15:23 | 306,643,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.1 on 2020-10-24 17:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0002_auto_20201024_1659'),
]
operations = [
migrations.AddField(
model_name='movielink',
name='link',
field=models.URLField(default=''),
preserve_default=False,
),
]
| [
"sh.danishyar@gmail.com"
] | sh.danishyar@gmail.com |
8eed1ed6028bc3b6519aa76a3e6c8f3a602d320a | 1641ec2be9bbfbfa3f385ba04942159f74738282 | /scripts/precip_interpolation.py | 03bbfa703dbe4b95e279a7365d04f2cb5716a4b1 | [] | no_license | moghimis/CPR | 458fcee983e9752c2c00a0d3df3e137135ce5f5d | 93724b1b3d3639b48460653570e6494602941828 | refs/heads/master | 2022-11-23T21:38:11.079115 | 2020-08-06T01:03:48 | 2020-08-06T01:03:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | import __init__
import geopandas as gpd
import pandas as pd
import numpy as np
import rasterio
import rasterio.plot
from rasterio import features
import matplotlib.pyplot as plt
import os
import sys
sys.path.append('../')
from CPR.configs import data_path
# ======================================================================================================================
# Burn vector of kriging contours into a raster
img_list = os.listdir(str(data_path / 'images'))
removed = {'4115_LC08_021033_20131227_test'}
img_list = [x for x in img_list if x not in removed]
raster_path = data_path / 'precip' / 'kriged_rasters'
vector_path = data_path / 'precip' / 'kriged_vectors'
for i, img in enumerate(img_list):
print('Image {}/{}, ({})'.format(i+1, len(img_list), img))
contour_vector = gpd.read_file(vector_path / img / '{}'.format(img + '_krig_vector.shp'))
contour_vector = contour_vector[~contour_vector.isna().geometry] # Remove any empty geometry artifacts
out_file = raster_path / '{}'.format(img + '_precip.tif')
tif_file = 'zip://' + str(data_path / 'images' / img / img) + '.zip!' + img + '.aspect.tif'
stack_path = data_path / 'images' / img / 'stack' / 'stack.tif'
with rasterio.open(str(stack_path), 'r') as src:
in_arr = src.read(1).astype('float32')
in_arr[:] = np.nan
meta = src.meta.copy()
meta = src.meta
meta['dtype'] = 'float32'
meta.update(compress='lzw')
with rasterio.open(out_file, 'w+', **meta) as out:
shapes = ((geom, value) for geom, value in zip(contour_vector.geometry, contour_vector.Value_Max))
burned = features.rasterize(shapes=shapes, fill=np.nan, out=in_arr, transform=out.transform)
out.write_band(1, burned)
# Examine images
for i, img in enumerate(img_list):
print('Image {}/{}, ({})'.format(i + 1, len(img_list), img))
out_file = raster_path / '{}'.format(img + '_precip.tif')
with rasterio.open(out_file, 'r', crs='EPSG:4326') as ds:
rasterio.plot.plotting_extent(ds)
fig, ax = plt.subplots(figsize=(8, 8))
rasterio.plot.show(ds, ax=ax, with_bounds=True)
plt.waitforbuttonpress()
plt.close()
| [
"yon.davies@gmail.com"
] | yon.davies@gmail.com |
e77ae6ec767e5e1c57f23fa6e1ee2bc3da3cc0e4 | 422359d9039af33693f7f2893da80e78e2465088 | /wav_read/WavInfo.py | 7453103ab77a703bb15b64a152011261bda2e412 | [] | no_license | qwe111845/EOTRTS | 5ca6ba2f17744939519f03ad7bbf1bceeac15ab9 | 85290d9ba9f4b2b2b7143937f2962e737d940847 | refs/heads/master | 2020-07-28T22:46:45.369135 | 2019-09-19T18:00:19 | 2019-09-19T18:00:19 | 209,566,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
import wave
def get_wav_time(path):
f = wave.open(path, 'rb')
params = f.getparams()
frame_rate, number_of_frames = params[2:4]
time = number_of_frames / (1.0 * frame_rate)
f.close()
return time
def get_wav_frame(path):
f = wave.open(path, 'rb')
params = f.getparams()
number_of_channels = params[0]
frame_rate = params[2]
return number_of_channels, frame_rate
def get_wav_info(path):
f = wave.open(path, 'rb')
params = f.getparams()
number_of_channels, sampling_width, frame_rate, number_of_frames = params[:4]
print(params)
print(number_of_channels, sampling_width, frame_rate, number_of_frames)
| [
"qwe111845@gmail.com"
] | qwe111845@gmail.com |
47a724810b4e9c7cfd2870858a2472067fe6ff19 | 1cceef957954ec516cd8bcd9e9d51e8d9120200b | /test_retring_async.py | 1c13088f03af4ed39ea5ab6e8ea213792c02dbe7 | [
"MIT"
] | permissive | coneagoe/retrying-async | 3b8c4a51a7adcbaa2149b110199e6d0b6b5a1f7e | 54eec24e4183b4ea31c0e133ed11ec0f0535a194 | refs/heads/master | 2022-12-21T05:12:17.930689 | 2020-09-21T02:38:42 | 2020-09-21T02:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | # coding: utf-8
import asyncio
import requests
from retrying_async import retry
def request_api_sync():
print('正在获取')
response = requests.get(url="http://www.baidu.com")
print(response.status_code, response.content)
raise Exception("异常")
@retry(attempts=3, delay=3)
async def request_api_async():
print('正在获取')
response = requests.get(url="http://www.baidu.com")
print(response.status_code, response.content)
raise Exception("异常")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(request_api_async())
| [
"lixiaolong@sensoro.com"
] | lixiaolong@sensoro.com |
d751ba839e41585536769b62bfa2c50a150fb12d | 6559d2c69ddcd73df844f9e26470c8ea06d92a6c | /xnr_0429/xnr/_facebook/feedback_comment.py | 550d853c6fbd9b7769168390aeafe3c05e801dbe | [] | no_license | yuanhuiru/xnr2 | cc4199fbb136fa5bdf18d879bb77ceb5155627f3 | b37ec9beccf7332efcda9bdff0c34fa3198b816c | refs/heads/master | 2020-03-21T12:22:17.392966 | 2020-01-14T06:40:55 | 2020-01-14T06:40:55 | 138,549,389 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/env python
#encoding: utf-8
from launcher import Launcher
import time
from es import Es_fb
class Comment():
def __init__(self):
self.list = []
def get_comment(self):
for url in comment_list:
driver.get(url)
root_content = driver.find_element_by_xpath('//div[@class="_58jw"]/p').text
root_time = driver.find_element_by_xpath('//abbr[@class="_5ptz"]').get_attribute('data-utime')
for each in driver.find_elements_by_xpath('//div[@aria-label="评论"]'):
author_name = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[1]/a').text
author_id = ''.join(re.findall(re.compile('id=(\d+)'),each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[1]/a').get_attribute('data-hovercard')))
pic_url = each.find_element_by_xpath('./div/div/div/div[1]/a/img').get_attribute('src')
content = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[2]/span/span/span/span').text
time = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div[2]/span[4]/a/abbr').get_attribute('data-utime')
self.list.append({'author_name':author_name,'author_id':author_id,'pic_url':pic_url,'content':content,'time':time})
return self.list
def save(self,indexName,typeName,item):
es.executeES(indexName,typeName,item)
if __name__ == '__main__':
fb = Launcher('18538728360','zyxing,0513')
es = es_twitter()
comment_list = fb.get_comment_list()
comment = Comment()
list = comment.get_comment()
comment.save(list)
| [
"bingqulee@gmail.com"
] | bingqulee@gmail.com |
1aaafa9b5403e7331b1d730439c5a8e67fa3debb | d1e4f29e583ee964d63bc48554eaa73d67d58eb2 | /zerver/migrations/0264_migrate_is_announcement_only.py | 073eb22a23670741fdc4d7155701549b168dfc77 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | hygolei/zulip | 299f636f9238f50b0d2746f1c371748f182f1f4e | 39fe66ab0824bc439929debeb9883c3046c6ed70 | refs/heads/master | 2023-07-11T22:50:27.434398 | 2021-08-09T10:07:35 | 2021-08-09T10:07:35 | 375,401,165 | 1 | 1 | Apache-2.0 | 2021-08-09T10:07:36 | 2021-06-09T15:20:09 | Python | UTF-8 | Python | false | false | 972 | py | # Generated by Django 1.11.26 on 2020-01-25 23:47
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def upgrade_stream_post_policy(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Stream.STREAM_POST_POLICY_EVERYONE = 1
Stream.STREAM_POST_POLICY_ADMINS = 2
Stream.objects.filter(is_announcement_only=False).update(
stream_post_policy=Stream.STREAM_POST_POLICY_EVERYONE
)
Stream.objects.filter(is_announcement_only=True).update(
stream_post_policy=Stream.STREAM_POST_POLICY_ADMINS
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0263_stream_stream_post_policy"),
]
operations = [
migrations.RunPython(
upgrade_stream_post_policy, reverse_code=migrations.RunPython.noop, elidable=True
),
]
| [
"tabbott@zulipchat.com"
] | tabbott@zulipchat.com |
574d94704c47e4f854d58fe91f99724020f11019 | 848c1537490d53d4d7dc20607bc73a7471d25d54 | /day7/intcode_d7p2.py | ab85637a2b7a65c3cdb8631fc86698e845f81314 | [] | no_license | adam-troyer/advent2019 | 57b83ac883de71740573d0a0a750cc194630865b | 37fe6a1096819718a56ff976f82dc9ce91daf14d | refs/heads/master | 2020-11-26T12:55:53.790957 | 2020-01-10T04:34:00 | 2020-01-10T04:34:00 | 228,924,356 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,930 | py | from collections import namedtuple, deque
from itertools import permutations, cycle
Opcode = namedtuple('Opcode', ['num_args', 'func'])
def writer(func):
func._writer = True
return func
class Intcode:
def __init__(self, memory, interactive=False, inputs=None, debug=False):
self.mem = memory
self.pointer = 0
self.interactive = interactive
if inputs is None:
self.inputs = list()
else:
self.inputs = inputs.copy()
if not interactive:
self.output_list = []
self.debug = debug
self.hold_for_input = False
self.halt = False
def run(self):
# Run until halt or we run out of inputs
while (not self.halt) and (not self.hold_for_input):
if self.debug:
print(f'Memory: {self.mem}')
print(f'Instruction pointer: {self.pointer}')
self._fetch_op()
# Running out of inputs will break out of the run loop,
# but we want to go back into it next time run() is called,
# so reset hold_for_input
self.hold_for_input = False
def add_inputs(self, inputs):
self.inputs.extend(inputs)
def _fetch_op(self):
# Parse out the instruction value, parameter modes, and parameter values
if self.pointer >= len(self.mem):
raise IndexError(f'Instruction pointer out of bounds: '
f'pointer={self.pointer}, mem length={len(self.mem)}')
opcode_val = self.mem[self.pointer]
opcode, param_modes = self._parse_opcode_val(opcode_val)
if opcode.num_args != 0:
# Grab the parameters from memory, then fetch their values based on position/immediate mode
params = self.mem[self.pointer+1:self.pointer+1+opcode.num_args]
param_vals = self._fetch_params_by_mode(params, param_modes, opcode)
if self.debug:
print(f'Fetching: '
f'Opcode value: {opcode_val}; '
f'Opcode: {opcode}; '
f'Param modes: {param_modes}; ')
if opcode.num_args != 0:
print(f'\tParam values (after fetch): {param_vals}')
# Set the default next instruction pointer location to the end of the
# current opcode. Opcodes may overwrite this (e.g. jumps)
self.next_pointer = self.pointer + opcode.num_args + 1
# Run the instruction
if opcode.num_args > 0:
opcode.func(self, param_vals)
else:
opcode.func(self)
# Move the instruction pointer to the next opcode
self.pointer = self.next_pointer
def _parse_opcode_val(self, val):
val_str = str(val)
opcode = self._OPCODES[int(val_str[-2:])]
modes_str = val_str[:-2]
# Prepend leading 0s for parameter modes
if len(modes_str) < opcode.num_args:
modes_str = '0'*(opcode.num_args - len(modes_str)) + modes_str
# Parse the parameter mode values, right to left
# 0 = position mode, 1 = immediate mode
param_modes = [int(v) for v in modes_str[::-1]]
return opcode, param_modes
def _fetch_params_by_mode(self, params, modes, opcode):
# Fetch all params by position/immediate mode except the final one
vals = [self.mem[param] if mode == 0 else param for param, mode in list(zip(params, modes))[:-1]]
# For the last parameter, check if the opcode function is a writer.
# If so, ignore the mode and add the parameter value directly so
# the opcode properly gets pointed to the memory address for its result
if hasattr(opcode.func, "_writer"):
vals.append(params[-1])
else:
param = params[-1]
mode = modes[-1]
val = self.mem[param] if mode == 0 else param
vals.append(val)
return vals
@writer
def _add2(self, param_vals):
self.mem[param_vals[2]] = param_vals[0] + param_vals[1]
if self.debug:
print(f'add2: {param_vals[0]}+{param_vals[1]}='
f'{self.mem[param_vals[2]]}->mem[{param_vals[2]}]')
@writer
def _mult2(self, param_vals):
self.mem[param_vals[2]] = param_vals[0] * param_vals[1]
if self.debug:
print(f'mult2: {param_vals[0]}*{param_vals[1]}='
f'{self.mem[param_vals[2]]}->mem[{param_vals[2]}]')
@writer
def _input(self, param_vals):
if self.interactive:
in_val = int(input('Enter input: '))
else:
try:
in_val = self.inputs.pop(0)
except IndexError: # Raised when input list is empty
# Keep the instruction pointer from advancing so this op
# is run again at next run() command, and flag hold
# to break out of run loop
self.next_pointer = self.pointer
self.hold_for_input = True
return
self.mem[param_vals[0]] = in_val
if self.debug:
print(f'_input: {in_val}->mem[{param_vals[0]}]')
def _output(self, param_vals):
if not self.interactive:
self.output_list.append(param_vals[0])
if self.debug:
print(f'_output:{param_vals[0]} added to output list')
print(f' Output list:{self.output_list}')
else:
print(f'Output: {param_vals[0]}')
def _jump_if_true(self, param_vals):
if param_vals[0]:
self.next_pointer = param_vals[1]
if self.debug:
print(f'_jump_if_true: {param_vals[0]} true, next_pointer set to {param_vals[1]}')
elif self.debug:
print(f'_jump_if_true: {param_vals[0]} not true, next_pointer unchanged')
def _jump_if_false(self, param_vals):
if not param_vals[0]:
self.next_pointer = param_vals[1]
if self.debug:
print(f'_jump_if_false: {param_vals[0]} false, next_pointer set to {param_vals[1]}')
elif self.debug:
print(f'__jump_if_false: {param_vals[0]} not false, next_pointer unchanged')
@writer
def _less_than(self, param_vals):
result = int(param_vals[0] < param_vals[1])
self.mem[param_vals[2]] = result
if self.debug:
print(f'_less_than: {param_vals[0]}<{param_vals[1]}={result} -> mem[{param_vals[2]}')
@writer
def _equal(self, param_vals):
result = int(param_vals[0] == param_vals[1])
self.mem[param_vals[2]] = result
if self.debug:
print(f'_equal: {param_vals[0]}=={param_vals[1]}={result} -> mem[{param_vals[2]}')
def _halt(self):
self.halt = True
_OPCODES = {1: Opcode(num_args=3, func=_add2),
2: Opcode(num_args=3, func=_mult2),
3: Opcode(num_args=1, func=_input),
4: Opcode(num_args=1, func=_output),
5: Opcode(num_args=2, func=_jump_if_true),
6: Opcode(num_args=2, func=_jump_if_false),
7: Opcode(num_args=3, func=_less_than),
8: Opcode(num_args=3, func=_equal),
99: Opcode(num_args=0, func=_halt)}
def run_amps_nofb(code_str):
code = [int(s) for s in code_str.split(',')]
# results will hold the output value for each phase sequence, then we'll find the max
results = []
# Iterate through every permutation of [0, 1, 2, 3, 4]
for phase_seq in permutations(range(5)):
# inputs[0] = phase value, inputs[1] = input signal
inputs = [0, 0]
for phase in phase_seq:
inputs[0] = phase
amp = Intcode(code.copy(), interactive=False, inputs=inputs)
amp.run()
# Set the input signal of the next amp to the output signal of this amp
inputs[1] = amp.output_list[0]
# Add the phase seq and the output signal of the final amp to results
results.append((phase_seq, amp.output_list[0]))
# Find the max output signal, print that value and corresponding phase sequence
print(max(results, key=lambda x: x[1]))
def run_amps_fb(code_str):
code = [int(s) for s in code_str.split(',')]
# results will hold the output value for each phase sequence, then we'll find the max
results = []
# Iterate through every permutation of [5, 6, 7, 8, 9]
for phase_seq in permutations(range(5, 10)):
amps = []
# Initialize all the amps, with their phase as their first input
for phase in phase_seq:
amp = Intcode(code.copy(), inputs=[phase])
amps.append(amp)
# Add the initial 0 signal to the first amp's inputs
amps[0].add_inputs([0])
# Loop through the amps until they've all halted.
# Each amp will run until it halts or runs out of inputs.
# By the next time that amp is reached, the previous amp should
# have generated new inputs for it.
i = 0
amp = amps[0]
amp.run()
while any(not amp.halt for amp in amps):
# Previous amp might not have generated an output yet.
# Not sure this will ever happen, but can't hurt
if amp.output_list:
outputs = amp.output_list.copy()
amp.output_list = []
else:
outputs = None
# Grab next amp in the list, looping back to the beginning
# after the last
i = (i + 1) % 5
amp = amps[i]
if outputs:
# Previous amp's outputs become next amp's inputs
amp.add_inputs(outputs)
amp.run()
results.append((phase_seq, amp.output_list[0]))
print(max(results, key=lambda x: x[1]))
if __name__ == "__main__":
# Ex 1: 43210 at sequence 4,3,2,1,0
nofb_ex1 = "3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0"
# Ex 2: 54321 at sequence 0,1,2,3,4
nofb_ex2 = "3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24," \
"23,23,4,23,99,0,0"
# Ex 3: 65210 at sequencye 1,0,4,3,2
nofb_ex3 = "3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002," \
"33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0"
# Part 1: Result is 20413 at phase seq 4,1,0,2,3
with open('day7_input.txt', 'r') as infile:
input_str = infile.read()
# Part 2 Ex 1: Result is 139629729 at seq 9 8 7 6 5
fb_ex1 = "3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28," \
"-1,28,1005,28,6,99,0,0,5"
# Part 2 Ex 2: Result is 18216 at seq 9 7 8 5 6
fb_ex2 = "3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26," \
"1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1," \
"55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10"
# run_amps_nofb(nofb_ex3)
run_amps_fb(input_str)
| [
"adam.troyer@gmail.com"
] | adam.troyer@gmail.com |
9f0fe44398ecdc7bda9c8cb213e2256c43819598 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/334/90931/submittedfiles/testes.py | 12f71f0bb7b483e5f7d1e3b92c7403e72b738f64 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def maximo (a,b):
if a>b:
return a
else:
return b
x=input()
y=input()
print(maximo(a,b) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0ab3204a2f5926f0e823fb6c752fe0816a0d23b0 | 3f63371bf7cdf4e8f875a90fdf4967674bb0766e | /django_start/dj_prj/dj_prj/asgi.py | bc83cc8c830b5249fdf5d11e674c5471f674a6d8 | [] | no_license | Wistick/homeworks_skillfactory | c94a425b765a826d845bf428a17a8ded276790a4 | 652e1553d38e53b2a86a7c000462b624112d72d9 | refs/heads/main | 2023-05-03T15:07:08.104767 | 2021-05-14T14:14:10 | 2021-05-14T14:14:10 | 315,729,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for dj_prj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj_prj.settings')
application = get_asgi_application()
| [
"vadim.ska8@yandex.ru"
] | vadim.ska8@yandex.ru |
c192d8071b7f3b0bb0b5ef18f6607561d9d0b1e5 | c72f17d9658d980524d4e3a4677d2eb657f08178 | /Cab_Fare_Calculator.py | a21c80671601739e90f7b52c99efee60eba71e12 | [] | no_license | EstrellaDionis/Python-Basics | 132f5f515eb513877c0aa1c523f6f1bd3a784997 | c15c17d963c38f1355bcf462c65e6c1ef4b99561 | refs/heads/main | 2023-08-28T08:11:05.657835 | 2021-10-18T03:56:35 | 2021-10-18T03:56:35 | 387,291,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | ride_type = "Black"
credits = 4
ride_price = 0
final_price = 0
if ride_type == "Doober X":
ride_price = 20.5
elif ride_type == "Black":
ride_price = 37.9
else:
ride_price = 18.7
print("Ride Price:")
print(ride_price)
if credits > 0:
final_price = ride_price - credits
print("Final price:")
print(final_price)
| [
"EstrellaDionis@gmail.com"
] | EstrellaDionis@gmail.com |
7a2ab137cc8a28bba8ebf010e73fdd27b812387a | 270262c3bae7a1c2ec4b5f36eb79e7b7d0a9e06a | /prototype/__init__.py | 6f87fada88704cc80496f856fdc10492199f94de | [
"MIT"
] | permissive | jsakas/prototype | 6168d5866687c829e1b8dfc50f69d65aca7edb23 | 88e62f99b37bdc0401ae2bfd101653cfd910aab7 | refs/heads/master | 2020-06-14T17:05:19.874052 | 2018-08-18T06:06:33 | 2018-08-18T06:06:33 | 75,357,846 | 1 | 2 | null | 2017-04-12T16:04:31 | 2016-12-02T03:40:28 | Python | UTF-8 | Python | false | false | 916 | py | import os, sys, shutil
class InitializeProject(object):
def __init__(self, project_name):
package_location = os.path.dirname(os.path.realpath(__file__))
package_template_location = os.path.join(package_location, 'template')
project_location = os.path.join(os.getcwd(), project_name)
if os.path.exists(project_location):
exit('Cannot create project "{}" - directory already exists.'.format(project_name))
os.makedirs(project_location)
try:
for f in os.listdir(package_template_location):
if os.path.isfile(os.path.join(package_template_location, f)):
shutil.copyfile(os.path.join(package_template_location, f), os.path.join(project_location, f))
else:
shutil.copytree(os.path.join(package_template_location, f), os.path.join(project_location, f))
except Exception as e:
print(e)
print('Creating new Prototype project "{}" ... done.'.format(project_name))
return
| [
"jon.sakas@beatport.com"
] | jon.sakas@beatport.com |
2ee054750fe8cc331ccdac81371c3d981511f136 | e42a1fd45e1d634d6392a616190414c4ab5c2bbb | /app/__init__.py | 4ff90bd1d145a13f85a8aa601655dfc3361941a9 | [] | no_license | FrancineU/Flask_Project3 | ae9a711bd2e00b238911d87e501b39e6ce3fbf63 | 004ba315f660f3be2bc790f2e6c880eb82af4ff3 | refs/heads/master | 2023-04-04T07:34:34.524377 | 2021-03-31T19:48:14 | 2021-03-31T19:48:14 | 353,472,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | from flask import Flask
from config import config_options
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
#Initializing Flask extension
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
#Initializing application
app = Flask(__name__)
#Creating the app configurations
app.config.from_object(config_options[config_name])
#Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')
#Initializing flask extension
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
return app | [
"uwizeyimanafrancine62@gmail.com"
] | uwizeyimanafrancine62@gmail.com |
b4a5f698bb10d79488f816794cb1ad2222a9a073 | b0586562458227bcaf1a673c92ddaf4141b661d6 | /example_etl.py | 27286d1c178c626a166e13b834a4a4ba39d72373 | [] | no_license | HyunTruth/luigi-redshift-example | d922ff8916c0f6ce42295b29431a4eb06bcb2816 | 8731ab6a1ac49e1d396047c5ce704ba56e84e490 | refs/heads/master | 2020-03-09T22:54:03.519281 | 2018-04-11T07:04:37 | 2018-04-11T07:04:37 | 129,041,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,236 | py | import logging
from datetime import datetime, date, timedelta, time
import pandas as pd
import requests
import os
# import luigi modules for redshift / s3
import luigi
from luigi.contrib import redshift, s3
# in case the intended folder such as 'tmp' does not exist, create one
def check_and_mkdir(path):
if os.path.exists(path) is False:
os.mkdir(path)
return
#set up logging, using logger data
logger = logging.getLogger('luigi-interface')
# meta data
__author__ = 'Hyun Jin Lee'
DATE_FORMAT = '%Y-%m-%d'
DATEHOUR_FORMAT = '%Y-%m-%dT%H'
DATEMINUTE_FORMAT = '%Y-%m-%dT%H%M'
class path(luigi.Config):
tmp_path = luigi.Parameter()
tos3_path = luigi.Parameter()
s3_load_bucket = luigi.Parameter()
class redshift_auth(luigi.Config):
"""Loads sensitive infos via Luigi's config system to get config variables from the <class_name> tag from luigi.cfg."""
host = luigi.Parameter(default='')
database = luigi.Parameter(default='')
user = luigi.Parameter(default='')
password = luigi.Parameter(default='')
class s3_auth(luigi.Config):
"""Loads sensitive infos via Luigi's config system to get config variables from the <class_name> tag from luigi.cfg."""
aws_access_key_id = luigi.Parameter(default='')
aws_secret_access_key = luigi.Parameter(default='')
region = luigi.Parameter(default='')
class ExampleTask(luigi.WrapperTask):
start_date = luigi.DateParameter()
end_date = luigi.DateParameter()
def requires(self):
"""
Anything returned or yielded by requires must have a 'true' complete() method (aka successful output) before
this class's run method will execute.
"""
yield ExampleToRedshift(
start_date=self.start_date,
end_date=self.end_date,
table='annotation_history',
fn='annotation_history'
)
class ExampleToRedshift(redshift.S3CopyToTable):
"""A child of redshift.S3CopyToTable class, with overrides such as copy_options"""
start_date = luigi.DateParameter()
end_date = luigi.DateParameter()
fn = luigi.Parameter()
table_type = luigi.Parameter(default='temp')
table = luigi.Parameter()
queries = luigi.ListParameter(default=[])
copy_options = "CSV BLANKSASNULL EMPTYASNULL TIMEFORMAT 'auto' DATEFORMAT 'auto'"
# Call all authentication infos
host = redshift_auth().host
database = redshift_auth().database
user = redshift_auth().user
password = redshift_auth().password
aws_access_key_id = s3_auth().aws_access_key_id
aws_secret_access_key = s3_auth().aws_secret_access_key
def s3_load_path(self):
return self.input()[0].path
def requires(self):
return [
ExampleToS3(start_date=self.start_date, end_date=self.end_date, fn=self.fn)
]
class ExampleToS3(luigi.Task):
"""Uses luigi.s3 to send an input file to designated s3_load_bucket."""
start_date = luigi.DateParameter()
end_date = luigi.DateParameter()
fn = luigi.Parameter()
client = s3.S3Client(aws_access_key_id = s3_auth().aws_access_key_id, aws_secret_access_key = s3_auth().aws_secret_access_key, host = s3_auth().region)
@property
def fn_src(self):
return '/'.join(self.input()[0].path.split('/')[-2:])
def requires(self):
return [
LoadingTask(start_date=self.start_date, end_date=self.end_date, fn=self.fn)
]
def output(self):
return s3.S3Target("{}/{}".format(path().s3_load_bucket, self.fn_src), client=self.client)
def run(self):
print('sending to s3 @ {}/{}'.format(path().s3_load_bucket, self.fn_src))
logger.info('Uploading {} to {}'.format(self.input()[0].path, self.output().path))
self.client.put(self.input()[0].path, self.output().path)
class LoadingTask(luigi.Task):
"""The main task to be performed, without any dependency. If there are dependencies, then requires() method might be added, as well"""
start_date = luigi.DateParameter()
end_date = luigi.DateParameter()
fn = luigi.Parameter()
def fn_src(self):
return '/'.join(self.input()[0].path.split('/')[-1])
def output(self):
check_and_mkdir("{path}/{fn}".format(
path=path().tos3_path,
fn=self.fn))
return luigi.LocalTarget(
"{path}/{fn}/{start_date}_{end_date}.csv".format(
path=path().tos3_path,
fn=self.fn,
start_date=self.start_date.strftime(DATE_FORMAT),
end_date=self.end_date.strftime(DATE_FORMAT)
)
)
def run(self):
# load the data in various ways- from sql, read csv, etc and load to pandas.
# For time-sensitive filtering, the start_date and the end_date parameters may be called using `self.start_date` / `self.end_date`
# For this example, I'll just create a data with two columns - 'input' and 'output'.
example = pd.DataFrame([{'input': 'hello', 'output': 'world'}, {'input': 'sql', 'output': 'redshift'}])
example.to_csv(self.output().path, encoding='utf-8')
print('serialized locally @ {path}/{fn}/{start_date}_{end_date}.csv'.format(
path=path().tos3_path,
fn=self.fn,
start_date=self.start_date.strftime(DATE_FORMAT),
end_date=self.end_date.strftime(DATE_FORMAT)
))
logger.info('serialized locally @ {path}/{fn}/{start_date}_{end_date}.csv'.format(
path=path().tos3_path,
fn=self.fn,
start_date=self.start_date.strftime(DATE_FORMAT),
end_date=self.end_date.strftime(DATE_FORMAT)
))
if __name__ == "__main__":
luigi.run() # from cli, use `python example_etl.py ExampleTask`
# if using external parameters, use the form of `python example_etl.py ExampleTask --start-date XXXX-XX-XX --end-date YYYY-YY-YY`
# For time-scheduling, use cronjobs & scripts
# add `--local-scheduler` to command if you want to run in a dev mode
# if using centralized scheduler, luigid daemon process must be running on the background
| [
"mysky901117@gmail.com"
] | mysky901117@gmail.com |
a6f0916d518548df3893eff0465b94ea40acb5bd | 39c9ff7d86abb04fbce9ecc0f32e2f93802d09e0 | /risultatoEsameStudente.py | e071280d523a95ae4571f3dabf5cd87c3dce8e02 | [] | no_license | rosariodp20/pythonAcademy | 0004e21df493799ff58d28296b525966aca70ebb | 53ce98e40c2e4e5135f7daa8271d97469aa1ddd1 | refs/heads/main | 2023-03-20T10:00:17.805790 | 2021-03-05T16:49:28 | 2021-03-05T16:49:28 | 343,469,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | votoScritto = int(input('Inserisci il voto per lo scritto: '))
while votoScritto<-8 or votoScritto>8:
votoScritto = int(input('Inserisci il voto per lo scritto corretto: '))
votoPratica = int(input('Inserisci il voto per la prarica: '))
while votoPratica<0 or votoPratica>24:
votoPratica = int(input('Inserisci il voto per la pratica corretto: '))
risFinale=votoScritto+votoPratica
if votoScritto<=0 and risFinale>18:
print('BOCCIATO')
elif votoScritto<=0 and votoPratica<18:
print('BOCCIATO')
elif votoScritto>0 and risFinale<18:
print('BOCCIATO')
elif risFinale==31 or risFinale==32:
print('CONGRATULAZIONI 30 E LODE')
else:
print('PROMOSSO CON IL SEGUENTE VOTO: ', risFinale) | [
"noreply@github.com"
] | rosariodp20.noreply@github.com |
18a0765043f45d98f842c701ce109b9d7adf6ff2 | 064dd52bc84b357c4398cb942949ecd8926260fc | /Misc/result_generator_test.py | 649e8a58659959ffb0b82be7829ab7e320f01e12 | [
"MIT"
] | permissive | nirantak/programming-exercises | 600ae21c44bfdaf5baddccd5a27fca522b42f46e | bd2ea5afe5fe1b21618266005130d1898df80c33 | refs/heads/main | 2023-06-19T08:39:13.781856 | 2021-07-20T13:12:45 | 2021-07-20T13:12:45 | 142,882,635 | 2 | 0 | MIT | 2021-04-01T14:57:03 | 2018-07-30T13:55:16 | Jupyter Notebook | UTF-8 | Python | false | false | 4,747 | py | import result_generator
class TestResultGenerator:
def test_one(self):
problems = {
"A": {"s": 30, "t": 4},
"B": {"s": 30, "t": 7},
"C": {"s": 100, "t": 6},
"D": {"s": 200, "t": 6},
}
solutions = [
("1", "A", "1", ["WA", "A", "TL", "A"]),
("2", "D", "1", ["WA", "A", "TL", "A", "A", "ML"]),
("2", "D", "2", ["RT", "WA", "TL", "A", "A", "A"]),
("1", "C", "2", ["WA", "RT", "TL", "A", "ML", "RT"]),
("2", "C", "3", ["A", "A", "TL", "A", "A", "A"]),
("1", "C", "4", ["A", "A", "A", "A", "A", "A"]),
("2", "A", "3", ["A", "A", "TL", "WA"]),
("2", "D", "4", ["A", "A", "A", "A", "A", "A"]),
]
assert result_generator.main(problems, solutions) == [
(1, "2", 200, 98.33),
(2, "1", 100, 15.0),
]
def test_two(self):
problems = {"A": {"s": 250, "t": 11}, "B": {"s": 200, "t": 9}}
solutions = [
(
"3",
"B",
"1",
["TL", "ML", "WA", "WA", "WA", "WA", "WA", "A", "TL"],
),
(
"3",
"B",
"2",
["ML", "WA", "ML", "A", "WA", "A", "TL", "ML", "RT"],
),
(
"2",
"A",
"1",
[
"TL",
"A",
"RT",
"ML",
"ML",
"A",
"ML",
"ML",
"RT",
"RT",
"ML",
],
),
(
"0",
"A",
"1",
["TL", "RT", "ML", "RT", "A", "TL", "ML", "A", "RT", "RT", "A"],
),
(
"1",
"A",
"1",
["A", "ML", "A", "A", "WA", "RT", "RT", "ML", "WA", "RT", "WA"],
),
(
"2",
"A",
"2",
["A", "ML", "TL", "RT", "A", "WA", "ML", "A", "A", "TL", "RT"],
),
(
"4",
"B",
"1",
["ML", "TL", "A", "TL", "A", "A", "WA", "RT", "ML"],
),
(
"1",
"B",
"2",
["RT", "RT", "WA", "RT", "TL", "RT", "WA", "TL", "A"],
),
(
"0",
"B",
"2",
["RT", "TL", "TL", "A", "A", "A", "RT", "TL", "ML"],
),
(
"1",
"A",
"3",
[
"TL",
"WA",
"ML",
"TL",
"ML",
"ML",
"ML",
"TL",
"TL",
"TL",
"RT",
],
),
(
"2",
"A",
"3",
["A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "A"],
),
(
"0",
"B",
"3",
["TL", "RT", "A", "A", "WA", "A", "ML", "WA", "ML"],
),
(
"4",
"A",
"2",
[
"A",
"WA",
"TL",
"RT",
"WA",
"ML",
"WA",
"RT",
"A",
"WA",
"RT",
],
),
(
"3",
"A",
"3",
["A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "A"],
),
(
"4",
"A",
"3",
[
"TL",
"ML",
"WA",
"TL",
"TL",
"RT",
"WA",
"A",
"A",
"WA",
"WA",
],
),
]
assert result_generator.main(problems, solutions) == [
(1, "3", 250, 44.44),
(2, "2", 250, 0.0),
(3, "0", 0, 134.85),
(4, "4", 0, 112.12),
(5, "1", 0, 90.4),
]
| [
"me@nirantak.com"
] | me@nirantak.com |
38ec011061f02e3d75ac4e7fc7043f094cd97e87 | 71a757f128912be582f52c9ab7fb8bf6d6ad19fb | /code.py | 2d46afb89e70dfb4dea6a223f8b7c8895ea06089 | [] | no_license | lalitharaopolavarapu/logbookfacerecognisation | 4e7e20b5a2a6d263ceb9d1d454d884b7c298048f | 5e13f0b9e3aa76e01344cf897dba8988fc31a14e | refs/heads/master | 2020-05-02T11:08:25.314768 | 2019-03-27T04:48:44 | 2019-03-27T04:48:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py |
import cv2
import numpy as np
import os
import csv
import time
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
t=1
#iniciate id counter
id = 0
# names related to ids: example ==> Marcelo: id=1, etc
names = ['None', 'Surya']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
img = cv2.flip(img, 1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence <100):
sname= names[id]
confidence = " {0}%".format(round(100 - confidence))
while(t==1):
fr=open('StudentDetails.csv',"+a")
writer = csv.writer(fr)
writer.writerow(sname)
fr.close()
t+=1
else:
id = "unknown"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | lalitharaopolavarapu.noreply@github.com |
a779cd7e1846b64ba7dce70c13456b60d065cb13 | 95759eb03f2dfb872a48e85f840a9cd0f99b7f39 | /python_projects/arrays1.py | 8994229675fe2ce734c9d84fb1d609fd1c6ca3e8 | [
"MIT"
] | permissive | amogh-dongre/dotfiles | 81d395e789aeaf6f32b70835080e2be2bc8d49e2 | 973edf2f933bf7279ef78b1381d57b00f960fea6 | refs/heads/main | 2023-06-16T08:54:06.923875 | 2021-07-04T13:53:37 | 2021-07-04T13:53:37 | 325,792,123 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python3
def bubblesort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
arr = [5, 3, 8, 4, 9, 12, 2, 1, 98, 16]
n = len(arr)
bubblesort(arr)
print("The Sorted array is:")
for i in range(n):
print("%d" % arr[i], end=" ")
| [
"amoghdongre16@gmail.com"
] | amoghdongre16@gmail.com |
1fd6f92f79cd265af470244e0460ad9853def643 | e18a353582609732c795401f1a01bc762bd939f2 | /top/python/MuonTracking.RunII.py | 9f1a8ca4dcada005ae643bc5e39eb41edab8c6d8 | [] | no_license | s-farry/workspaces | 06741807bb464bb0712d52108c2d1b7ae62b1353 | 0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1 | refs/heads/master | 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,463 | py | from Jawa import EfficiencyClass
from ROOT import TFile, TCut, TTree, TMath
phicut= TCut("(abs(tag_PHI-probe_PHI)<TMath::Pi() ? abs(tag_PHI-probe_PHI) : 2*TMath::Pi()-abs(tag_PHI-probe_PHI))>0.1")
ptcut = TCut("tag_PT > 20000 && probe_PT > 20000")
triggercut = TCut("tag_Hlt2EWSingleMuonVHighPtDecision_TOS==1 && tag_Hlt1SingleMuonHighPTDecision_TOS == 1 && tag_L0MuonEWDecision_TOS ==1")
run1triggercut = TCut("tag_Hlt2SingleMuonHighPTDecision_TOS==1 && tag_Hlt1SingleMuonHighPTDecision_TOS == 1 && tag_L0MuonDecision_TOS ==1")
trkqual = TCut("(sqrt(tag_PERR2)/tag_P) < 0.1")
eta = TCut("tag_ETA > 2 && tag_ETA < 4.5 && probe_ETA > 2 && probe_ETA < 4.5")
vtxcut = TCut("boson_ENDVERTEX_CHI2/boson_ENDVERTEX_NDOF < 5")
isocut = TCut("tag_cpt_0.50 < 2000")
pt25 = TCut("probe_PT > 25000")
pt30 = TCut("probe_PT > 30000")
passcut = TCut("probe_AssocZM == 1")
passcutW = TCut("probe_AssocWM == 1")
passcutStd = TCut("probe_AssocStdM == 1")
mass = TCut("boson_M > 70000 && boson_M < 110000")
selcut = ptcut + phicut + triggercut + vtxcut + eta + mass
f = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MD.2016.root')
g = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MU.2016.root')
h = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MD.2015.root')
i = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MU.2015.root')
t = f.Get("PlusTag/DecayTree")
u = f.Get("MinusTag/DecayTree")
v = g.Get("PlusTag/DecayTree")
w = g.Get("MinusTag/DecayTree")
tt = h.Get("PlusTag/DecayTree")
uu = h.Get("MinusTag/DecayTree")
vv = i.Get("PlusTag/DecayTree")
ww = i.Get("MinusTag/DecayTree")
magup = TCut("Polarity == 1")
magdown = TCut("Polarity == -1")
selcutMU = selcut + magup
selcutMD = selcut + magdown
'''
etabins = [2.0 , 2.25 , 2.5 , 2.75 , 3.00 , 3.25 , 3.5 , 4.0 , 4.5]
etabins2 = [2.0 , 2.25 , 2.5 , 2.75 , 2.875, 3.00 , 3.1225, 3.25 , 3.375, 3.5 , 4.0 , 4.5]
tckbins = [3500000.0, 4600000.0, 4800000.0, 5700000.0, 5900000.0, 6000000.0, 7100000.0, 7300000.0, 7400000.0,
7500000.0, 7600000.0, 7700000.0, 7900000.0, 7929912.0, 8000000.0]
effvars = [
["ETA", "probe_ETA", 10 , 2 , 4.5 ],
["ETA5", "probe_ETA", 5 , 2 , 4.5 ],
["ETA8", "probe_ETA", etabins ],
["PT", "probe_PT", 10 , 20000 , 70000],
["PT5", "probe_PT", 5 , 20000 , 70000],
["P", "probe_P", 8 , 100000 , 500000],
["PHI", "probe_PHI", 10 , -TMath.Pi() , TMath.Pi()],
["PHI5", "probe_PHI", 5 , -TMath.Pi() , TMath.Pi()],
["VeloClusters", "nVeloClusters", 8 , 0 , 4000 , "I"],
["ITClusters", "nITClusters", 8 , 0 , 2000 , "I"],
["PVs", "nPVs", 6 , -0.5 , 5.5 , "I"],
["TCK", "OdinTCK", tckbins, "I"],
["SPDHits", "nSPDHits", 20 , 0 , 1000, "I"]
]
eff2dvars = [
["ETA_PHI", "ETA5","PHI5"],
["ETA_PT" , "ETA5","PT5"]
]
'''
from effbins_config import *
def makeMuonTrackingRunII(name, selcut, passcut):
MuonTrackingRunIIMagUpMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMagUpMuPlus")
MuonTrackingRunIIMagDownMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMagDownMuPlus")
MuonTrackingRunIIMagUpMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMagUpMuMinus")
MuonTrackingRunIIMagDownMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMagDownMuMinus")
MuonTrackingRunIIMagUpMuMinus.AddTree(v)
MuonTrackingRunIIMagUpMuMinus.AddTree(vv)
MuonTrackingRunIIMagUpMuMinus.SetSelectionCut(selcut + magup)
MuonTrackingRunIIMagUpMuMinus.SetPassCut(passcut)
MuonTrackingRunIIMagUpMuMinus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagUpMuMinus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagUpMuMinus.Run()
MuonTrackingRunIIMagUpMuMinus.SaveToFile()
MuonTrackingRunIIMagUpMuPlus.AddTree(w)
MuonTrackingRunIIMagUpMuPlus.AddTree(ww)
MuonTrackingRunIIMagUpMuPlus.SetSelectionCut(selcut + magup)
MuonTrackingRunIIMagUpMuPlus.SetPassCut(passcut)
MuonTrackingRunIIMagUpMuPlus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagUpMuPlus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagUpMuPlus.Run()
MuonTrackingRunIIMagUpMuPlus.SaveToFile()
MuonTrackingRunIIMagDownMuMinus.AddTree(t)
MuonTrackingRunIIMagDownMuMinus.AddTree(tt)
MuonTrackingRunIIMagDownMuMinus.SetSelectionCut(selcut + magdown)
MuonTrackingRunIIMagDownMuMinus.SetPassCut(passcut)
MuonTrackingRunIIMagDownMuMinus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagDownMuMinus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagDownMuMinus.Run()
MuonTrackingRunIIMagDownMuMinus.SaveToFile()
MuonTrackingRunIIMagDownMuPlus.AddTree(u)
MuonTrackingRunIIMagDownMuPlus.AddTree(uu)
MuonTrackingRunIIMagDownMuPlus.SetSelectionCut(selcut + magdown)
MuonTrackingRunIIMagDownMuPlus.SetPassCut(passcut)
MuonTrackingRunIIMagDownMuPlus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagDownMuPlus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagDownMuPlus.Run()
MuonTrackingRunIIMagDownMuPlus.SaveToFile()
MuonTrackingRunIIMagDown = EfficiencyClass("Muon"+name+"TrackingRunIIMagDown", MuonTrackingRunIIMagDownMuPlus, MuonTrackingRunIIMagDownMuMinus)
MuonTrackingRunIIMagDown.MakeEfficiencyGraph()
MuonTrackingRunIIMagDown.SaveToFile()
MuonTrackingRunIIMagUp = EfficiencyClass("Muon"+name+"TrackingRunIIMagUp", MuonTrackingRunIIMagUpMuPlus, MuonTrackingRunIIMagUpMuMinus)
MuonTrackingRunIIMagUp.MakeEfficiencyGraph()
MuonTrackingRunIIMagUp.SaveToFile()
MuonTrackingRunIIMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMuPlus", MuonTrackingRunIIMagDownMuPlus, MuonTrackingRunIIMagUpMuPlus)
MuonTrackingRunIIMuPlus.MakeEfficiencyGraph()
MuonTrackingRunIIMuPlus.SaveToFile()
MuonTrackingRunIIMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMuMinus", MuonTrackingRunIIMagDownMuMinus, MuonTrackingRunIIMagUpMuMinus)
MuonTrackingRunIIMuMinus.MakeEfficiencyGraph()
MuonTrackingRunIIMuMinus.PrintEfficiencies("ETA")
MuonTrackingRunIIMuMinus.SaveToFile()
MuonTrackingRunII = EfficiencyClass("Muon"+name+"TrackingRunII", MuonTrackingRunIIMagDown, MuonTrackingRunIIMagUp)
MuonTrackingRunII.MakeEfficiencyGraph()
MuonTrackingRunII.SaveToFile()
makeMuonTrackingRunII("",selcut,passcut)
#makeMuonTrackingRunII("W",selcut,passcutW)
| [
"sfarry@hep.ph.liv.ac.uk"
] | sfarry@hep.ph.liv.ac.uk |
3974cb911735a3882b5c81a63453add4a57ef547 | b003208e8383c85ce0086ee2333268f19de14943 | /src/scripts/sources/reports/ZpkTransactionsReport.py | 453ed3ea977da07aa2c7aa1069a5e8e19ae24fef | [] | no_license | lobo1111/agora-configuration | 6ff2b46df0c8466b36c8ff9c10876e2373f932b6 | a199428deb8c8fdb80b0443583f7f306895b35d8 | refs/heads/master | 2020-12-24T07:45:39.948380 | 2016-11-15T12:15:41 | 2016-11-15T12:15:41 | 73,372,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,730 | py | from reports.Report import Report
from javax.persistence import TemporalType
from structures.BookingPeriod import BookingPeriodManager
from java.text import SimpleDateFormat
from java.util import Date
from reports.ZpksStatusReport import ZpksStatusReport
class ZpkTransactionsReport(Report):
def obtainData(self):
self._community = self.findById("Community", self._svars.get('communityId'))
self._from = self.getFrom()
self._to = self.getTo()
self._zpk = self.findById("ZakladowyPlanKont", self._svars.get('zpkId'))
self._transactions = self.collectTransactions()
def collectTransactions(self):
output = []
currentDebit, currentCredit = self.calculateCurrentStatus()
for transaction in self.getQuery().getResultList():
item = dict([])
item['type'] = self.getType(transaction)
item['subject'] = self.getSubject(transaction)
item['createdAt'] = SimpleDateFormat('dd-MM-yyyy').format(transaction.getCreatedAt())
item['value'] = transaction.getValue()
item['zpkDebit'] = transaction.getDebitZpk().getLabel()
item['zpkCredit'] = transaction.getCreditZpk().getLabel()
currentDebit = self.calculateDebitStatus(currentDebit, transaction)
currentCredit = self.calculateCreditStatus(currentCredit, transaction)
item['zpkDebitStatus'] = currentDebit
item['zpkCreditStatus'] = currentCredit
output.append(item)
return output
def calculateCurrentStatus(self):
return ZpksStatusReport().calculate(self._zpk, self._from)
def getType(self, transaction):
if transaction.getDocument().getType() == "INVOICE":
return self._label.get('document.invoice')
elif transaction.getDocument().getType() == "BANK_NOTE":
return self._label.get('document.bankNote')
elif transaction.getDocument().getType() == "ACCOUNT_PROVISION":
return self._label.get('document.accountProvision')
elif transaction.getDocument().getType() == "POSSESSION_PAYMENT":
return self._label.get('document.possessionPayment')
elif transaction.getDocument().getType() == "CHARGING":
return self._label.get('document.charging')
def getSubject(self, transaction):
if transaction.getDocument().getPossession() != None:
return transaction.getDocument().getPossession().getFullAddress()
elif transaction.getDocument().getContractor() != None:
return transaction.getDocument().getContractor().getName()
else:
return ""
def calculateDebitStatus(self, currentDebit, transaction):
if self._zpk.getId() == transaction.getDebitZpk().getId():
return currentDebit.add(transaction.getValue())
else:
return currentDebit
def calculateCreditStatus(self, currentCredit, transaction):
if self._zpk.getId() == transaction.getCreditZpk().getId():
return currentCredit.add(transaction.getValue())
else:
return currentCredit
def getQuery(self):
sql = "Select dp From DocumentPosition dp Where (dp.debitZpk.id = :did or dp.creditZpk.id = :cid) and dp.bookingPeriod.defaultPeriod = 1 and dp.createdAt >= :from and dp.createdAt <= :to Order By dp.createdAt ASC"
query = self._entityManager.createQuery(sql)
query.setParameter("did", self._zpk.getId())
query.setParameter("cid", self._zpk.getId())
query.setParameter("from", SimpleDateFormat('dd-MM-yyyy').parse(self._from), TemporalType.DATE)
query.setParameter("to", SimpleDateFormat('dd-MM-yyyy').parse(self._to), TemporalType.DATE)
return query
def getFrom(self):
if self._svars.get('from') == '':
year = BookingPeriodManager().findDefaultBookingPeriod().getName()
return "01-01-%s" % year
else:
return self._svars.get('from')
def getTo(self):
if self._svars.get('to') == '':
return str(SimpleDateFormat('dd-MM-yyyy').format(Date()))
else:
return self._svars.get('to')
def fillTemplate(self):
self._context.put("community", self._community)
self._context.put("fromDate", self._from)
self._context.put("toDate", self._to)
self._context.put("zpk", self._zpk)
self._context.put("transactions", self._transactions)
self._context.put("labelDocumentCreationDate", self._label.get('report.documentCreationDate'))
self._context.put("labelZpkTransactions", self._label.get('report.zpkTransactions'))
self._context.put("labelCommunity", self._label.get('report.community'))
self._context.put("labelAddress", self._label.get('report.address'))
self._context.put("labelZpk", self._label.get('report.zpk'))
self._context.put("labelFromDate", self._label.get('report.from'))
self._context.put("labelToDate", self._label.get('report.to'))
self._context.put("labelType", self._label.get('report.documentType'))
self._context.put("labelSubject", self._label.get('report.subject'))
self._context.put("labelCreatedAt", self._label.get('report.createdAt'))
self._context.put("labelValue", self._label.get('report.value'))
self._context.put("labelDebit", self._label.get('report.debit'))
self._context.put("labelCredit", self._label.get('report.credit'))
self._context.put("labelDescription", self._label.get('report.description'))
def getTemplateName(self):
return "report-zpk-transactions" | [
"tomasz@kopacki.eu"
] | tomasz@kopacki.eu |
17a4c3efc94fc1e6caad8a5a7ade5f392c075824 | 5c7db30d59cd28fe1923bb5fdb9280ffe2070b70 | /django-polls/polls/migrations/0001_initial.py | cca72afb3465cec2f3f673e3e259b8a64609593e | [] | no_license | golkedj/django_test | 6816b640e675aabd311de98907ff38fc8034b7d5 | d1ab4b5bf6984aee78163a94638460f187ca12a9 | refs/heads/master | 2021-01-22T16:44:30.569480 | 2017-09-06T16:56:23 | 2017-09-06T16:56:23 | 100,724,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 14:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"="
] | = |
2986bd526cbcb9a31eb6f2de6b0f0dd89c6fa7cc | 742cde49ee24d14f939e40dc660b8d358b206066 | /src/video/client.py | 736f9667ae9295cb5df530f1246cbd988b8b3269 | [] | no_license | LdMe/flask_docker_video_stream | d397c0a4cd59326004d7b17ad1b7d5eac02591b2 | 03b1cbcc03712e5d1878bf0113a9796f637f7ef5 | refs/heads/master | 2023-06-15T08:05:06.150285 | 2021-07-09T12:02:52 | 2021-07-09T12:08:38 | 360,114,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # import the necessary packages
from imutils.video import VideoStream
import imagezmq
import argparse
import socket
import time
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server-ip", required=True,
help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
args["server_ip"]))
# get the host name, initialize the video stream, and allow the
# camera sensor to warmup
rpiName = socket.gethostname()
#vs = VideoStream(usePiCamera=True).start()
vs = VideoStream(src=0).start()
time.sleep(2.0)
while True:
# read the frame from the camera and send it to the server
frame = vs.read()
#frame = imutils.resize(frame, width=320)
sender.send_image(rpiName, frame) | [
"dlafuente003@gmail.com"
] | dlafuente003@gmail.com |
778373ee38e2b8e500a508492b5c81d519f80a09 | f8671d120f8f32b0febe94f4dc84570603e34fac | /utils_driver.py | c9b9a0185636c8784dadc34512484fe9360420ca | [] | no_license | ahashisyuu/OpenSpider | f35772a53c4de4217df9dc1ee8f2078e1c2eb281 | 31da122dc2ab658142c34089f3cc0fe71a5016ca | refs/heads/master | 2022-03-19T01:37:58.965682 | 2019-12-10T12:40:02 | 2019-12-10T12:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from selenium import webdriver
import platform
#print platform.system()
def get_driver():
system = platform.system()
if system == "Linux":
return webdriver.PhantomJS(executable_path='/home/ubuntu/phantomjs-2.1.1-linux-x86_64/bin/phantomjs')
else:
return webdriver.Chrome()
#return webdriver.PhantomJS()
#driver = get_driver()
#driver.get("http://www.baidu.com")
#driver.close()
| [
"1451607278@qq.com"
] | 1451607278@qq.com |
674d83709f2b3e4f8e2c3423ab4e9aebae3431aa | a538e551561c55eed35b03ae76a95b4556652b0d | /project_functions.py | cac56ca068e87e16f138d73cde4c0ba2129af8da | [] | no_license | Killaars/CBS-themes | 52363a9ef6c7aadd553fea792017b833593799a4 | c428e7f80ba3b03d49991d82d2fe4dd5702635bf | refs/heads/master | 2020-07-01T18:47:03.657494 | 2019-08-08T13:24:52 | 2019-08-08T13:24:52 | 201,261,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | #%%
import pandas as pd
#%%
# Variables
upwindow = 7
lowwindow = 2
'''
Preprocessing fuction for both the children as the parents
'''
def preprocessing_child(children):
# Children
children.loc[:,'title'] = children.loc[:,'title'].str.lower()
children.loc[:,'content'] = children.loc[:,'content'].str.lower()
children['related_parents'] = children['related_parents'].str.replace('matches/','').str.split(',')
children.loc[:,'publish_date_date'] = pd.to_datetime(children.loc[:,'publish_date_date'])
# children.loc[:,'content'] = children.loc[:,'content'].str.replace('-',' ') Breaks check_link
# children.loc[:,'content'] = children.loc[:,'content'].str.replace(' ',' ')
# replace other references to cbs with cbs itself
children.loc[:,'content'] = children.loc[:,'content'].str.replace('centraal bureau voor de statistiek','cbs')
children.loc[:,'content'] = children.loc[:,'content'].str.replace('cbs(cbs)','cbs')
children.loc[:,'content'] = children.loc[:,'content'].str.replace('cbs (cbs)','cbs')
children.loc[:,'content'] = children.loc[:,'content'].str.replace('cbs ( cbs )','cbs')
return children
def remove_stopwords_from_content(row, column = 'content'):
'''
Function to remove stopwords from the content and return it as a string.
'''
import nltk
from nltk.corpus import stopwords
import re
stop_words = set(stopwords.words('dutch'))
filtered_content = '' # Set as empty string for rows without content
content = row[column]
if type(content) != float: # Some parents have no content (nan)
content = re.sub(r'[^\w\s]','',content) # Remove punctuation
content = nltk.tokenize.word_tokenize(content)
filtered_content = [w for w in content if not w in stop_words] # Remove stopwords
return ' '.join(filtered_content) # Convert from list to space-seperated string
| [
"27420806+Killaars@users.noreply.github.com"
] | 27420806+Killaars@users.noreply.github.com |
da9860f9b1ff839f2aa581ee98b6af1448a81426 | ce86b45514ce8c097dbec5468f6fda24998be8f1 | /Airport.py | 259378958636c320a35c6299eedef21abc2213d4 | [] | no_license | Emmet62/DataStructuresProject | cec7c7a91be8746666663c259536e6bfac38c54d | 683136a7a2706a9f4a856534f133eb457e411796 | refs/heads/master | 2020-03-14T12:38:52.059512 | 2018-04-30T16:06:04 | 2018-04-30T16:06:04 | 131,616,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | '''
Created on 28 Mar 2018
@author: Emmet
'''
from math import pi, sin, cos, acos
import os.path
import csv
class Airport():
''' Stores Airport objects '''
def __init__(self, airportCode, airportName, country, airportLat, airportLong):
''' Define what attributes we want the Airport instances to have '''
self.airportCode = airportCode
self.airportName = airportName
self.country = country
self.airportLat = airportLat
self.airportLong = airportLong
def getAirportCode(self):
''' Returns the code for the airport instance'''
return self.airportCode
def getAirportName(self):
''' Returns the name for the airport instance'''
return self.airportName
def getCountry(self):
''' Returns the country of the airport instance'''
return self.country
def getLatitude(self):
''' Returns the latitude for the airport instance '''
return float(self.airportLat)
def getLongitude(self):
''' Returns the longitude for the airport instance '''
return float(self.airportLong)
class AirportAtlas():
''' Holds info. on all of the airports '''
def __init__(self):
''' AirportAtlas invokes the loadData method when called '''
self.loadData("../airport.csv")
def loadData(self, csvFile):
''' Reads the CSV file and creates instances of the class Airport. Key:Object pairs '''
self.airports = {}
with open(os.path.join("input", csvFile), "rt", encoding="utf8") as f:
reader = csv.reader(f)
for line in reader:
self.airports[line[4]] = Airport(line[4], line[1], line[3], line[6], line[7])
# airportCode = airportCode, airportName, countryName, latitude, longitude
return self.airports
def getAirport(self, code):
''' Takes a three letter code as input and returns the Airport object corresponding to the code '''
Location = self.airports[code]
return Location
@staticmethod
def greatCircleDistance(lat1, long1, lat2, long2):
''' Calculates the distance from one airport to another
Return the answer as a float '''
radius_earth = 6371
theta1 = long1 * (2 * pi) / 360
theta2 = long2 * (2 * pi) / 360
phi1 = (90 - lat1) * (2 * pi) / 360
phi2 = (90 - lat2) * (2 * pi) / 360
distance = acos(sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2)) * radius_earth
return distance
def getDistanceBetweenAirports(self, code1, code2):
''' Takes in 2 airport codes and pulls out the latitude and longitude for each
Passes these values to greatCircleDistance which uses them to return distance between the airports '''
airport1 = self.getAirport(code1)
airport2 = self.getAirport(code2)
lat1 = airport1.getLatitude()
long1 = airport1.getLongitude()
lat2 = airport2.getLatitude()
long2 = airport2.getLongitude()
return self.greatCircleDistance(lat1, long1, lat2, long2)
| [
"emtracey@tcd.ie"
] | emtracey@tcd.ie |
d5706657c7a3d28103d085bb0dbf7d12e11bac82 | 173b7e08d9fdbfeda8349570f7ccd93cbd6c02d4 | /example_model/model_node_label.py | 84ea201452534e2e144905c11f081a4272f8ac42 | [
"LicenseRef-scancode-other-permissive"
] | permissive | embeddedsamurai/kGCN-1 | ef647d539fb79d6b5ebe090a3b27b349933d6ca4 | 7bc4dc32afd7a76e31b3bd37e2cb71611ba1fc5f | refs/heads/master | 2020-08-04T16:51:36.430607 | 2019-10-01T05:02:31 | 2019-10-01T05:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | import tensorflow as tf
import tensorflow.contrib.keras as K
import kgcn.layers
from kgcn.default_model import DefaultModel
import tensorflow.contrib.keras as K
class GCN(DefaultModel):
def build_placeholders(self,info,config,batch_size):
# input data types (placeholders) of this neural network
return self.get_placeholders(info,config,batch_size,
['adjs','nodes','mask','dropout_rate',
'node_label','mask_node_label',
'enabled_node_nums','is_train','features'])
def build_model(self,placeholders,info,config,batch_size):
adj_channel_num=info.adj_channel_num
embedding_dim=config["embedding_dim"]
in_adjs=placeholders["adjs"]
features=placeholders["features"]
in_nodes=placeholders["nodes"]
labels=placeholders["node_label"]
mask_labels=placeholders["mask_node_label"]
mask=placeholders["mask"]
enabled_node_nums=placeholders["enabled_node_nums"]
is_train=placeholders["is_train"]
layer=features
input_dim=info.feature_dim
if features is None:
layer=K.layers.Embedding(info.all_node_num,embedding_dim)(in_nodes)
input_dim=embedding_dim
# layer: batch_size x graph_node_num x dim
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphConv(2,adj_channel_num)(layer,adj=in_adjs)
prediction=tf.nn.softmax(layer)
# computing cost and metrics
cost=tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=layer)
cost=mask*tf.reduce_mean(cost,axis=1)
cost_opt=tf.reduce_mean(cost)
metrics={}
cost_sum=tf.reduce_sum(cost)
pre_count=tf.cast(tf.equal(tf.argmax(prediction,2), tf.argmax(labels,2)),tf.float32)
correct_count=mask*tf.reduce_mean(pre_count,axis=1)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return layer,prediction,cost_opt,cost_sum,metrics
| [
"kojima.ryosuke.8e@kyoto-u.ac.jp"
] | kojima.ryosuke.8e@kyoto-u.ac.jp |
725223f8d060081f839ffe104c2a1a8f0c49e687 | 920f81d8f5fbd45eb15f2970d0bd528b921a3d46 | /pyplot/plot_loss.py | 81bb50f95b08e5d8fafdc78fc8d47652605f5877 | [] | no_license | minhnd3796/RIVF2019_Minh | 740a4015b7741bea9d2503088e99bc1a97a1f18f | c2439421efcbae3bad09f459a3d582b7fcf735c4 | refs/heads/master | 2020-03-25T03:49:20.533009 | 2018-08-03T01:27:14 | 2018-08-03T01:27:14 | 143,361,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from pandas import read_csv
import pylab
from sys import argv
train_data_8s = read_csv('FCN-8s-ResNet101_Vaihingen/run_train-tag-entropy_1.csv')
train_step_8s = train_data_8s.iloc[:, 1].values
train_acc_8s = train_data_8s.iloc[:, 2].values
validation_data_8s = read_csv('FCN-8s-ResNet101_Vaihingen/run_validation-tag-entropy_1.csv')
validation_step_8s = validation_data_8s.iloc[:, 1].values
validation_acc_8s = validation_data_8s.iloc[:, 2].values
pylab.plot(train_step_8s, train_acc_8s, 'green', label='Training with 2 skips')
pylab.plot(validation_step_8s, validation_acc_8s, 'purple', label='Validation 2 skips')
train_data_4s = read_csv('FCN-4s-ResNet101_Vaihingen/run_train-tag-entropy_1.csv')
train_step_4s = train_data_4s.iloc[:, 1].values
train_acc_4s = train_data_4s.iloc[:, 2].values
validation_data_4s = read_csv('FCN-4s-ResNet101_Vaihingen/run_validation-tag-entropy_1.csv')
validation_step_4s = validation_data_4s.iloc[:, 1].values
validation_acc_4s = validation_data_4s.iloc[:, 2].values
pylab.plot(train_step_4s, train_acc_4s, 'r', label='Training with 3 skips')
pylab.plot(validation_step_4s, validation_acc_4s, 'b', label='Validation 3 skips')
pylab.legend(loc='upper left')
pylab.xlabel('Step')
pylab.ylabel('Loss')
pylab.show()
| [
"gordonnguyen3796@gmail.com"
] | gordonnguyen3796@gmail.com |
700b224ee02e77b913b24e1e85f043711a0b9e60 | 2c4dbf80b7493beccdd6e8e3f3dbcfe05c235ae5 | /Lecture.7.Django/airline/flights/urls.py | cfe3d666b42eaf61df2c4525a73683ce10804267 | [] | no_license | mbegumgit/cs50 | 9cbe88dbc5d9d1751cb9c020ba1a6337dcfab8b5 | d7d641ef039794c18be3228596bbfbe6e58662d8 | refs/heads/master | 2021-07-12T11:12:14.864921 | 2021-02-26T05:02:12 | 2021-02-26T05:02:12 | 237,889,463 | 0 | 0 | null | 2020-02-03T05:08:26 | 2020-02-03T05:08:25 | null | UTF-8 | Python | false | false | 215 | py | from django.urls import path
from .import views
urlpatterns = [
path('', views.index, name="index"),
path('<int:flight_id>', views.flight, name="flight"),
path('<int:flight_id</book', views.book, name="book")
] | [
"taqi.official@gmail.com"
] | taqi.official@gmail.com |
1ed26d619a29fe66356aaa17f6c266960328b775 | fdccff2b819c5423df77fca3ee16ca23feb8205e | /weather_report/cli.py | fa12b189acebb323e264621d6d0e52f2726a230b | [
"BSD-3-Clause"
] | permissive | austinorr/weather-report | a48f9a1be9856d55b7b0bc115cc10639aefa3eee | a0beddd62b0a459842fa43ad62edcb17a7c2f27f | refs/heads/master | 2021-03-31T01:54:07.726517 | 2018-04-09T22:28:56 | 2018-04-09T22:28:56 | 124,453,316 | 1 | 0 | BSD-3-Clause | 2018-04-09T22:09:18 | 2018-03-08T22:01:32 | Python | UTF-8 | Python | false | false | 308 | py | import sys
from .weather_report import WeatherReport
from .default_config import defaults
def main():
args = sys.argv[1:]
if any(i in args for i in ["--project-file"]):
print(defaults)
else:
for i in args:
f = WeatherReport.from_input_file(i)
f.run()
| [
"AOrr@geosyntec.com"
] | AOrr@geosyntec.com |
fb468ac210f00990d47274537361078ddce64b34 | ab1009dfd09c08d0898cb490284127552387c27e | /caller_v2/app/api/v1/process.py | 9190ef78cef59bfcbe45676cc1bbc80d97a56ddb | [
"MIT"
] | permissive | tienthegainz/pipeline_executor_docker_call | 2d6d4187b22cfe017596e59090c993c663c414cc | b2b9478056e4b818f5963b0b266375fe6d39627a | refs/heads/main | 2023-08-05T14:51:38.144250 | 2021-09-27T06:40:15 | 2021-09-27T06:40:15 | 405,798,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from app import schemas
from app.api.deps import get_db
from app.service import process_service, layer_service
import json
from copy import deepcopy
router = APIRouter()
# TODO: Add file handler
@router.get("", response_model=List[schemas.ProcessResponse])
def read_processes(db: Session = Depends(get_db), skip: int = 0, limit: int = 100) -> Any:
"""
Retrieve all processes.
"""
processes = process_service.get_multi(db, skip=skip, limit=limit)
return processes
@router.post("", response_model=schemas.ProcessResponse)
def create_process(*, db: Session = Depends(get_db), process_in: schemas.ProcessCreateInput) -> Any:
"""
Create new processes.
# TODO: Move this to service layer
"""
p_input = schemas.ProcessCreate()
if process_in.name:
p_input.name = process_in.name
process = process_service.create(db, obj_in=p_input)
_layers = process_in.layers
layers = []
for i in range(len(_layers)):
l = _layers[i]
l_input = None
input_params: str = json.dumps(l.input_params)
if l.next_image:
l_input = schemas.LayerCreate(
process_id=process.id,
cur_image = l.cur_image,
input_params = input_params,
next_image = l.next_image,
)
else:
l_input = schemas.LayerCreate(
process_id=process.id,
cur_image = l.cur_image,
input_params = input_params,
)
layers.append(layer_service.create(db, obj_in=l_input))
process = process_service.update(db, db_obj=process, obj_in={"first_image": layers[0].id})
# TODO: start first container here
return process
# @router.put("", response_model=schemas.ProcessResponse)
# def update_process(*, db: Session = Depends(get_db), process_in: schemas.ProcessUpdateInput) -> Any:
# """
# Update existing processes.
# """
# process = process_service.get(db, model_id=process_in.id)
# if not process:
# raise HTTPException(
# status_code=status.HTTP_404_NOT_FOUND,
# detail="The process with this ID does not exist in the system.",
# )
# process = process_service.update(db, db_obj=process, obj_in=process_in)
# return process
# @router.delete("", response_model=schemas.ProcessResponse)
# def delete_process(*, db: Session = Depends(get_db), id: int) -> Any:
# """
# Delete existing process.
# """
# process = process_service.get(db, model_id=id)
# if not process:
# raise HTTPException(
# status_code=status.HTTP_404_NOT_FOUND,
# detail="The process with this ID does not exist in the system.",
# )
# process_service.remove(db, model_id=process.id)
# return process
| [
"viettienha98@gmail.com"
] | viettienha98@gmail.com |
61fd7a6c35836d296e1a527370004aeece802e25 | a61555adb096171a0f477dd953e3a500df29a425 | /dokdo/cli/prepare_lefse.py | 43615951208bf69ac009a9a942f37885ce2cbbd8 | [
"MIT"
] | permissive | caio-andrey/dokdo | a9cbf5595bf993ad7340ee5411c11357dcc3a523 | 334d80bdc9118daf90e448eaca7124c6ff270cf2 | refs/heads/master | 2023-06-28T09:40:38.015124 | 2021-08-04T11:36:54 | 2021-08-04T11:36:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,527 | py | import dokdo
import pandas as pd
from qiime2 import Artifact
from qiime2 import Metadata
from qiime2.plugins import feature_table
from qiime2.plugins import taxa
def prepare_lefse(table_file,
taxonomy_file,
metadata_file,
output_file,
class_col,
subclass_col=None,
subject_col=None,
where=None):
"""Create a TSV file which can be used as input for the LEfSe tool.
This command
1) collapses the input feature table at the genus level,
2) computes relative frequency of the features,
3) performs sample filtration if requested,
4) changes the format of feature names,
5) adds the relevant metadata as 'Class', 'Subclass', and 'Subject', and
6) writes a text file which can be used as input for LEfSe.
Parameters
----------
table_file : str
Path to the table file with the 'FeatureTable[Frequency]' type.
taxonomy_file : str
Path to the taxonomy file with the 'FeatureData[Taxonomy]' type.
metadata_file : str
Path to the metadata file.
output_file : str
Path to the output file.
class_col : str
Metadata column used as 'Class' by LEfSe.
subclass_col : str, optional
Metadata column used as 'Subclass' by LEfSe.
subject_col : str, optional
Metadata column used as 'Subject' by LEfSe.
where : str, optional
SQLite 'WHERE' clause specifying sample metadata criteria.
"""
_ = taxa.methods.collapse(
table=Artifact.load(table_file),
taxonomy=Artifact.load(taxonomy_file),
level=6)
_ = feature_table.methods.relative_frequency(
table=_.collapsed_table)
if where is None:
df = _.relative_frequency_table.view(pd.DataFrame)
else:
_ = feature_table.methods.filter_samples(
table=_.relative_frequency_table,
metadata=Metadata.load(metadata_file),
where=where)
df = _.filtered_table.view(pd.DataFrame)
def f(x):
for c in ['-', '[', ']', '(', ')', ' ']:
x = x.replace(c, '_')
ranks = x.split(';')
base = ranks[0]
result = [base]
for i, rank in enumerate(ranks[1:], start=2):
if rank == '__':
result.append(f'{base}_x__L{i}')
elif rank.split('__')[1] == '':
result.append(f'{base}_{rank}L{i}')
else:
result.append(rank)
base = rank
return '|'.join(result)
df.columns = [f(x) for x in df.columns.to_list()]
mf = dokdo.get_mf(metadata_file)
mf = mf.replace(' ', '_', regex=True)
cols = mf.columns.to_list()
df = pd.concat([df, mf], axis=1, join="inner")
df.insert(0, class_col, df.pop(class_col))
cols.remove(class_col)
if subclass_col is None and subject_col is None:
pass
elif subclass_col is not None and subject_col is None:
df.insert(1, subclass_col, df.pop(subclass_col))
cols.remove(subclass_col)
elif subclass_col is None and subject_col is not None:
df.insert(1, subject_col, df.pop(subject_col))
cols.remove(subject_col)
else:
df.insert(1, subclass_col, df.pop(subclass_col))
df.insert(2, subject_col, df.pop(subject_col))
cols.remove(subclass_col)
cols.remove(subject_col)
df.drop(columns=cols, inplace=True)
df.T.to_csv(output_file, header=False, sep='\t')
| [
"sbstevenlee@gmail.com"
] | sbstevenlee@gmail.com |
42bf33970ef08bc11406a2df223e00f8d93bae03 | 88e98a147d0fd2915048d307332e6269593d94f6 | /ext/Outros/EnviaPacoteTCP.py | 62c6a4d8f6eeffa5fbf50ea24ffba11b13b091fb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brunoalmeidamartins/pox | 452c6d8a959aae9d28279128d7eb04e76b7c756e | 2c9f13ef53f7cf15c369d15ba4b1051036d74e00 | refs/heads/master | 2020-03-12T13:11:41.915516 | 2018-07-17T18:25:23 | 2018-07-17T18:25:23 | 130,635,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from scapy.all import *
ip = IP(dst="192.168.0.1")
tcp= TCP(dport=80)
pkt = ip/udp
t = sr(pkt)
print(t)
| [
"sgtbrunoalmeida@gmail.com"
] | sgtbrunoalmeida@gmail.com |
fe40af596e008133901e8eb437974a14e6f29f8f | 82770c7bc5e2f27a48b8c370b0bab2ee41f24d86 | /microblog/flask/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.py | 15826d4685a7eefdaf622f6b05ad0ecfe5c8f25c | [
"Apache-2.0"
] | permissive | johankaito/fufuka | 77ddb841f27f6ce8036d7b38cb51dc62e85b2679 | 32a96ecf98ce305c2206c38443e58fdec88c788d | refs/heads/master | 2022-07-20T00:51:55.922063 | 2015-08-21T20:56:48 | 2015-08-21T20:56:48 | 39,845,849 | 2 | 0 | Apache-2.0 | 2022-06-29T23:30:11 | 2015-07-28T16:39:54 | Python | UTF-8 | Python | false | false | 29,654 | py | #!/usr/bin/env python
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test()'
Run tests if fftpack is not installed:
python tests/test_basic.py
"""
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_raises, run_module_suite,
assert_array_less, TestCase, dec)
from scipy.fftpack import ifft,fft,fftn,ifftn,rfft,irfft, fft2
from scipy.fftpack import _fftpack as fftpack
from scipy.fftpack.basic import _is_safe_size
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, double, cdouble)
import numpy as np
import numpy.fft
# "large" composite numbers supported by FFTPACK
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
from numpy.random import rand
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x,axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x,axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n,dtype=double)
for i in range(n//2+1):
y = dot(exp(i*w),x)
if i:
r[2*i-1] = y.real
if 2*i < n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n,dtype=cdouble)
for i in range(n//2+1):
if i:
if 2*i < n:
x1[i] = x[2*i-1] + 1j*x[2*i]
x1[n-i] = x[2*i-1] - 1j*x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase(TestCase):
def setUp(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x)
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
def test__is_safe_size(self):
vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False),
(15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True),
(120, True), (210, False)]
for n, is_safe in vals:
assert_equal(_is_safe_size(n), is_safe)
class TestDoubleFFT(_TestFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
@dec.knownfailureif(True, "single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
def test_notice(self):
pass
class _TestIFFTBase(TestCase):
def setUp(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x,direction=-1)
y2 = numpy.fft.ifft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x,direction=-1)
assert_array_almost_equal(y,y2)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
class TestDoubleIFFT(_TestIFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase(TestCase):
def setUp(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.rdt)
def test_djbfft(self):
from numpy.fft import fft as numpy_fft
for i in range(2,14):
n = 2**i
x = list(range(n))
y2 = numpy_fft(x)
y1 = zeros((n,),dtype=double)
y1[0] = y2[0].real
y1[-1] = y2[n//2].real
for k in range(1, n//2):
y1[2*k-1] = y2[k].real
y1[2*k] = y2[k].imag
y = fftpack.drfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
class TestRFFTDouble(_TestRFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(TestCase):
def setUp(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
from numpy.fft import ifft as numpy_ifft
for i in range(2,14):
n = 2**i
x = list(range(n))
x1 = zeros((n,),dtype=cdouble)
x1[0] = x[0]
for k in range(1, n//2):
x1[k] = x[2*k-1]+1j*x[2*k]
x1[n-k] = x[2*k-1]-1j*x[2*k]
x1[n//2] = x[-1]
y1 = numpy_ifft(x1)
y = fftpack.drfft(x,direction=-1)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
class TestIRFFTDouble(_TestIRFFTBase):
def setUp(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setUp(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(TestCase):
def setUp(self):
np.random.seed(1234)
def test_regression_244(self):
"""fft returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4,4,2))
y = fft2(x, shape=(8,8), axes=(-3,-2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1,1],[2,2]], (4, -3))
class TestFftnSingle(TestCase):
def setUp(self):
np.random.seed(1234)
def test_definition(self):
x = [[1,2,3],[4,5,6],[7,8,9]]
y = fftn(np.array(x, np.float32))
if not y.dtype == np.complex64:
raise ValueError("double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
def test_size_accuracy(self):
for size in SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
class TestFftn(TestCase):
def setUp(self):
np.random.seed(1234)
def test_definition(self):
x = [[1,2,3],[4,5,6],[7,8,9]]
y = fftn(x)
assert_array_almost_equal(y,direct_dftn(x))
x = random((20,26))
assert_array_almost_equal(fftn(x),direct_dftn(x))
x = random((5,4,3,20))
assert_array_almost_equal(fftn(x),direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1,2,3],[4,5,6],[7,8,9]]
plane2 = [[10,11,12],[13,14,15],[16,17,18]]
plane3 = [[19,20,21],[22,23,24],[25,26,27]]
ki_plane1 = [[1,2,3],[10,11,12],[19,20,21]]
ki_plane2 = [[4,5,6],[13,14,15],[22,23,24]]
ki_plane3 = [[7,8,9],[16,17,18],[25,26,27]]
jk_plane1 = [[1,10,19],[4,13,22],[7,16,25]]
jk_plane2 = [[2,11,20],[5,14,23],[8,17,26]]
jk_plane3 = [[3,12,21],[6,15,24],[9,18,27]]
kj_plane1 = [[1,4,7],[10,13,16],[19,22,25]]
kj_plane2 = [[2,5,8],[11,14,17],[20,23,26]]
kj_plane3 = [[3,6,9],[12,15,18],[21,24,27]]
ij_plane1 = [[1,4,7],[2,5,8],[3,6,9]]
ij_plane2 = [[10,13,16],[11,14,17],[12,15,18]]
ij_plane3 = [[19,22,25],[20,23,26],[21,24,27]]
ik_plane1 = [[1,10,19],[2,11,20],[3,12,21]]
ik_plane2 = [[4,13,22],[5,14,23],[6,15,24]]
ik_plane3 = [[7,16,25],[8,17,26],[9,18,27]]
ijk_space = [jk_plane1,jk_plane2,jk_plane3]
ikj_space = [kj_plane1,kj_plane2,kj_plane3]
jik_space = [ik_plane1,ik_plane2,ik_plane3]
jki_space = [ki_plane1,ki_plane2,ki_plane3]
kij_space = [ij_plane1,ij_plane2,ij_plane3]
x = array([plane1,plane2,plane3])
assert_array_almost_equal(fftn(x),fftn(x,axes=(-3,-2,-1))) # kji_space
assert_array_almost_equal(fftn(x),fftn(x,axes=(0,1,2)))
y = fftn(x,axes=(2,1,0)) # ijk_space
assert_array_almost_equal(swapaxes(y,-1,-3),fftn(ijk_space))
y = fftn(x,axes=(2,0,1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3),
-1,-2),
fftn(ikj_space))
y = fftn(x,axes=(1,2,0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3),
-3,-2),
fftn(jik_space))
y = fftn(x,axes=(1,0,2)) # jki_space
assert_array_almost_equal(swapaxes(y,-2,-3),fftn(jki_space))
y = fftn(x,axes=(0,2,1)) # kij_space
assert_array_almost_equal(swapaxes(y,-2,-1),
fftn(kij_space))
y = fftn(x,axes=(-2,-1)) # ji_plane
assert_array_almost_equal(fftn(plane1),y[0])
assert_array_almost_equal(fftn(plane2),y[1])
assert_array_almost_equal(fftn(plane3),y[2])
y = fftn(x,axes=(1,2)) # ji_plane
assert_array_almost_equal(fftn(plane1),y[0])
assert_array_almost_equal(fftn(plane2),y[1])
assert_array_almost_equal(fftn(plane3),y[2])
y = fftn(x,axes=(-3,-2)) # kj_plane
assert_array_almost_equal(fftn(x[:,:,0]),y[:,:,0])
assert_array_almost_equal(fftn(x[:,:,1]),y[:,:,1])
assert_array_almost_equal(fftn(x[:,:,2]),y[:,:,2])
y = fftn(x,axes=(-3,-1)) # ki_plane
assert_array_almost_equal(fftn(x[:,0,:]),y[:,0,:])
assert_array_almost_equal(fftn(x[:,1,:]),y[:,1,:])
assert_array_almost_equal(fftn(x[:,2,:]),y[:,2,:])
y = fftn(x,axes=(-1,-2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1),swapaxes(y[0],-2,-1))
assert_array_almost_equal(fftn(ij_plane2),swapaxes(y[1],-2,-1))
assert_array_almost_equal(fftn(ij_plane3),swapaxes(y[2],-2,-1))
y = fftn(x,axes=(-1,-3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),swapaxes(y[:,0,:],-1,-2))
assert_array_almost_equal(fftn(ik_plane2),swapaxes(y[:,1,:],-1,-2))
assert_array_almost_equal(fftn(ik_plane3),swapaxes(y[:,2,:],-1,-2))
y = fftn(x,axes=(-2,-3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),swapaxes(y[:,:,0],-1,-2))
assert_array_almost_equal(fftn(jk_plane2),swapaxes(y[:,:,1],-1,-2))
assert_array_almost_equal(fftn(jk_plane3),swapaxes(y[:,:,2],-1,-2))
y = fftn(x,axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i,j,:]),y[i,j,:])
y = fftn(x,axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i,:,j]),y[i,:,j])
y = fftn(x,axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:,i,j]),y[:,i,j])
y = fftn(x,axes=()) # point
assert_array_almost_equal(y,x)
def test_shape_argument(self):
small_x = [[1,2,3],[4,5,6]]
large_x1 = [[1,2,3,0],[4,5,6,0],[0,0,0,0],[0,0,0,0]]
y = fftn(small_x,shape=(4,4))
assert_array_almost_equal(y,fftn(large_x1))
y = fftn(small_x,shape=(3,4))
assert_array_almost_equal(y,fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1,2,3],[4,5,6],[7,8,9]]
large_x1 = array([[1,2,3,0],
[4,5,6,0],
[7,8,9,0],
[0,0,0,0]])
# Disable tests with shape and axes of different lengths
# y = fftn(small_x,shape=(4,4),axes=(-1,))
# for i in range(4):
# assert_array_almost_equal (y[i],fft(large_x1[i]))
# y = fftn(small_x,shape=(4,4),axes=(-2,))
# for i in range(4):
# assert_array_almost_equal (y[:,i],fft(large_x1[:,i]))
y = fftn(small_x,shape=(4,4),axes=(-2,-1))
assert_array_almost_equal(y,fftn(large_x1))
y = fftn(small_x,shape=(4,4),axes=(-1,-2))
assert_array_almost_equal(y,swapaxes(
fftn(swapaxes(large_x1,-1,-2)),-1,-2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4,4,2))
y = fftn(x, axes=(-3,-2), shape=(8,8))
assert_array_almost_equal(y, numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
"""Test that fftn raises ValueError when s.shape is longer than x.shape"""
x = zeros((4, 4, 2))
assert_raises(ValueError, fftn, x, shape=(8, 8, 2, 1))
def test_invalid_sizes(self):
assert_raises(ValueError, fftn, [[]])
assert_raises(ValueError, fftn, [[1,1],[2,2]], (4, -3))
class _TestIfftn(TestCase):
dtype = None
cdtype = None
def setUp(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=self.dtype)
y = ifftn(x)
assert_equal(y.dtype, self.cdtype)
assert_array_almost_equal_nulp(y,direct_idftn(x),self.maxnlp)
x = random((20,26))
assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)
x = random((5,4,3,20))
assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)
def test_random_complex(self):
for size in [1,2,51,32,64,92]:
x = random([size,size]) + 1j*random([size,size])
assert_array_almost_equal_nulp(ifftn(fftn(x)),x,self.maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)),x,self.maxnlp)
def test_invalid_sizes(self):
assert_raises(ValueError, ifftn, [[]])
assert_raises(ValueError, ifftn, [[1,1],[2,2]], (4, -3))
class TestIfftnDouble(_TestIfftn):
dtype = np.float64
cdtype = np.complex128
maxnlp = 2000
class TestIfftnSingle(_TestIfftn):
dtype = np.float32
cdtype = np.complex64
maxnlp = 3500
class TestLongDoubleFailure(TestCase):
def setUp(self):
np.random.seed(1234)
def test_complex(self):
if np.dtype(np.longcomplex).itemsize == np.dtype(np.complex).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longdouble) + \
1j * np.random.randn(10).astype(np.longdouble)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type %r not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
def test_real(self):
if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longcomplex)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type %r not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions """
real_dtypes = [np.float32, np.float64]
dtypes = real_dtypes + [np.complex64, np.complex128]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for fftsize in [8, 16, 32]:
for overwrite_x in [True, False]:
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis]
and (len(shape) == 1 or
(axis % len(shape) == len(shape)-1
and fftsize == shape[axis])))
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
def test_fft(self):
overwritable = (np.complex128, np.complex64)
for dtype in self.dtypes:
self._check_1d(fft, dtype, (16,), -1, overwritable)
self._check_1d(fft, dtype, (16, 2), 0, overwritable)
self._check_1d(fft, dtype, (2, 16), 1, overwritable)
def test_ifft(self):
overwritable = (np.complex128, np.complex64)
for dtype in self.dtypes:
self._check_1d(ifft, dtype, (16,), -1, overwritable)
self._check_1d(ifft, dtype, (16, 2), 0, overwritable)
self._check_1d(ifft, dtype, (2, 16), 1, overwritable)
def test_rfft(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(rfft, dtype, (16,), -1, overwritable)
self._check_1d(rfft, dtype, (16, 2), 0, overwritable)
self._check_1d(rfft, dtype, (2, 16), 1, overwritable)
def test_irfft(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(irfft, dtype, (16,), -1, overwritable)
self._check_1d(irfft, dtype, (16, 2), 0, overwritable)
self._check_1d(irfft, dtype, (2, 16), 1, overwritable)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for overwrite_x in [True, False]:
for fftshape in fftshape_iter(part_shape):
should_overwrite = (overwrite_x
and data.ndim == 1
and np.all([x < y for x, y in zip(fftshape, part_shape)])
and dtype in overwritable_dtypes)
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
if data.ndim > 1:
# check fortran order: it never overwrites
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=False)
def _check_nd(self, routine, dtype, overwritable):
self._check_nd_one(routine, dtype, (16,), None, overwritable)
self._check_nd_one(routine, dtype, (16,), (0,), overwritable)
self._check_nd_one(routine, dtype, (16, 2), (0,), overwritable)
self._check_nd_one(routine, dtype, (2, 16), (1,), overwritable)
self._check_nd_one(routine, dtype, (8, 16), None, overwritable)
self._check_nd_one(routine, dtype, (8, 16), (0, 1), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (0, 1), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (1, 2), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (0,), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (1,), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (2,), overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), None, overwritable)
self._check_nd_one(routine, dtype, (8, 16, 2), (0,1,2), overwritable)
def test_fftn(self):
overwritable = (np.complex128, np.complex64)
for dtype in self.dtypes:
self._check_nd(fftn, dtype, overwritable)
def test_ifftn(self):
overwritable = (np.complex128, np.complex64)
for dtype in self.dtypes:
self._check_nd(ifftn, dtype, overwritable)
if __name__ == "__main__":
run_module_suite()
| [
"john.g.keto@gmail.com"
] | john.g.keto@gmail.com |
f07d0d152386b5a33ee321365ef0d942cf5da5cc | 0e856246b18da3ca8e52d6208163b876a6457040 | /app_pybot/request_tools/google_request.py | cd20efa6234f0985028b081d8b8488551ebe0fe1 | [] | no_license | Elladan81/P7_GrandPyBot | bdf4e07ef70a178c2d6d577a88e2a805f082c232 | 180f10adb8361129c3b250a58f4d8a9b568759c5 | refs/heads/master | 2022-12-10T04:55:40.000745 | 2020-02-27T16:09:39 | 2020-02-27T16:09:39 | 237,025,205 | 0 | 0 | null | 2022-12-08T03:32:50 | 2020-01-29T16:09:20 | Python | UTF-8 | Python | false | false | 1,498 | py | import requests
from app_pybot.request_tools.apikey import GOO_API_KEY
class GMapsRequest:
"""This class handles request to Google Maps API
"""
URL_BASE = "https://maps.googleapis.com/maps/api/geocode/json?address="
def __init__(self, user_request):
"""Takes user request to build the url to request
"""
self.question = ".".join(user_request.split())
self.url = GMapsRequest.URL_BASE + self.question + \
"&key=" + GOO_API_KEY
def get_coord(self):
"""Extracts coordinates (latitude, longitude) from
the data returned by Google Maps API
"""
api_data = self.get_data()
try:
return api_data['results'][0]['geometry']['location']
except IndexError:
return ""
except KeyError:
return ""
def get_data(self):
"""Requests the Google Maps API
and returns data as a JSON object
"""
gmaps_data = requests.get(self.url)
print("GMAPS DATA >>>", gmaps_data.json()) # FOR DEBUG
return gmaps_data.json()
def get_address(self):
"""Extracts formatted address from the data
returned by google Maps API"""
api_data = self.get_data()
try:
return api_data['results'][0]['formatted_address']
except IndexError:
return ""
except KeyError:
return ""
def main():
pass
if __name__ == "__main__":
main()
| [
"heladan@hotmail.fr"
] | heladan@hotmail.fr |
1095d3c8d3ad72cc7514fa3552a8c3680b8ea0ed | a6de6c6984dd85a5951d2f755a5093ff8c7c1e26 | /Students/Turtle/0603/rgb.py | 889b9cd2ee425b7fbed01375523084adcab47d53 | [] | no_license | P79N6A/PythonExercise | ec4de63adcf1360d25c5278d7e06824611ec325f | 59c2ef7438cb5a135be05f72711cfbd39c1fb49b | refs/heads/master | 2020-04-29T04:04:00.008249 | 2019-03-15T14:17:28 | 2019-03-15T14:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | from PIL import ImageColor
from PIL import Image
print ImageColor.getcolor('red', 'RGBA')
catIm = Image.open('zophie.png')
# print catIm.size
# print catIm.filename
# print catIm.format
# print catIm.format_description
# catIm.save('zophie.jpg')
# catIm.rotate(90).save('rotated90.png')
#
# croppedIm = catIm.crop((335, 345, 565, 560))
# croppedIm.save('cropped.png')
catIm = Image.open('zophie.png')
catCopyIm = catIm.copy()
faceIm = catIm.crop((335, 345, 565, 560))
print faceIm.size
#
# catCopyIm.paste(faceIm, (0, 0))
# catCopyIm.paste(faceIm, (400, 500))
# catCopyIm.save('pasted.png')
# catImWidth, catImHeight = catIm.size
# faceImWidth, faceImHeight = faceIm.size
# catCopyTwo = catIm.copy()
# for left in range(0, catImWidth, faceImWidth):
# for top in range(0, catImHeight, faceImHeight):
# print(left, top)
# catCopyTwo.paste(faceIm, (left, top))
# catCopyTwo.save('tiled.png')
# catIm.rotate(6).save('rotated6.png')
# catIm.rotate(6, expand=True).save('rotated6_ _expanded.png')
#
# catIm.transpose(Image.FLIP_LEFT_RIGHT).save('horizontal_ _flip.png')
# catIm.transpose(Image.FLIP_TOP_BOTTOM).save('vertical_ _flip.png')
im = Image.new('RGBA', (100, 100))
print im.getpixel((0, 0))
for x in range(100):
for y in range(50):
im.putpixel((x, y), (210, 210, 210))
from PIL import ImageColor
for x in range(100):
for y in range(50, 100):
im.putpixel((x, y), ImageColor.getcolor('darkgray', 'RGBA'))
print im.getpixel((0, 0))
print im.getpixel((0, 50))
im.save('putPixel.png')
| [
"18201788952@163.com"
] | 18201788952@163.com |
90c8701966ec00284f17877e12b7623633ac7ca6 | 71d355d11c7150c3dcf9b19100441188c0e10db0 | /DataPersistence/MessageProducer.py | 948cda94f262a476b03f3d22cf2980a78d0ab6c1 | [] | no_license | VeeanPrasad/cu-feedback | efe0aee3e26223e14c43984b7a23d083dfd96e75 | d48c33b56a28362575bfe22b9252ad3f976a07b0 | refs/heads/master | 2023-04-25T08:04:29.408380 | 2021-05-04T21:08:29 | 2021-05-04T21:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | from kafka import KafkaProducer
from db import Database
class MessageProducer:
producer = KafkaProducer('database')
def send_db_update(self):
started = Database.start_db()
if started:
self.producer.send('database', 'Connection Successful')
else:
self.producer.send('database', 'Connection Failed')
| [
"james.m.luther@gmail.com"
] | james.m.luther@gmail.com |
a371f668a614759d77d103527da26e9d5bf59dfa | ea449a25bf5233e657549aca39e9453de4ecb7d7 | /news/migrations/0010_auto_20180425_1433.py | 9eec07e7fa7eab435ddac75eb3c86d2247237e77 | [] | no_license | nasa1024/personl_blog | d35dfbc532d3e06903e3c7e55508c53f505bdbb5 | 02b437a939827899810b778261a974ce0c858c09 | refs/heads/master | 2023-03-06T02:14:41.158541 | 2018-04-25T06:37:58 | 2018-04-25T06:37:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-25 06:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0009_auto_20171102_1143'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '咨询列表'},
),
migrations.AlterModelOptions(
name='classify',
options={'verbose_name': '公司名称'},
),
migrations.AlterModelOptions(
name='tag',
options={'verbose_name': '类别'},
),
migrations.AlterField(
model_name='classify',
name='name',
field=models.CharField(max_length=100, verbose_name='公司名称'),
),
]
| [
"541573560@qq.com"
] | 541573560@qq.com |
c6b54b3af377a2ae56ecaca249b4853dd344dc38 | 1e05839e7c1ffd453515c3788e88d696a224ec9e | /ws_func.py | 81bb3af645664e01fb9e204cfa3ce8db3a4fe5dd | [] | no_license | wplam107/Final_Project | df2cfe61451c44016762c5aded6d4bff3a2e7e26 | 6039c5c180c304f3bef1ab20c613af322e990f7f | refs/heads/master | 2022-07-07T01:30:36.742675 | 2020-05-13T19:56:49 | 2020-05-13T19:56:49 | 251,073,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,145 | py | import numpy as np
import pandas as pd
import re
import os
import time
import datetime
import random
import requests
import pickle
import urllib
from bs4 import BeautifulSoup
from os import system
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
# CNN FUNCTIONS
def urls_scrape_page_cnn(urls, page, driver):
driver.get(page)
time.sleep(5)
xpath = '/html/body/div[5]/div[2]/div/div[2]/div[2]/div/div[3]/div[{}]/div[2]/h3/a'
counter = 0
for i in range(1, 11):
href = driver.find_element_by_xpath(xpath.format(i)).get_attribute('href')
if re.search('live-news', href):
continue
else:
counter += 1
urls.append(href)
print(f'Added {counter} URLs')
return urls
def urls_scrape_all_cnn(driver):
urls = []
search_site = ['https://www.cnn.com/search?size=10&q=%22hong%20kong%20protests%22&type=article']
s_pages = 'https://www.cnn.com/search?size=10&q=%22hong%20kong%20protests%22&page={}&from={}0&type=article'
pages = search_site + [ s_pages.format(i, i-1) for i in range(2, 17) ]
for page in pages:
urls = urls_scrape_page_cnn(urls, page, driver)
return urls
# Helper function to clean body of article
def _clean_body(article):
cleaned = re.sub(r"^.*?\)|(CNN's.*)", "", article)
return cleaned
def scrape_one_cnn(url, counter, driver):
driver.get(url)
body = []
time.sleep(3)
# Click modal button
try:
modal_button = driver.find_element_by_class_name('bx-close bx-close-link bx-close-inside')
modal_button.click()
except:
pass
try:
date = driver.find_element_by_class_name('update-time').text
except:
date = ''
try:
headline = driver.find_element_by_class_name('pg-headline').text
except:
headline = ''
try:
texts = driver.find_elements_by_class_name('zn-body__paragraph')
for text in texts:
if re.search('CNN\'s', text.text):
continue
else:
body.append(text.text)
counter += 1
except:
body.append('')
body = ' '.join(body)
return url, date, headline, body, counter
def scrape_cnn(urls, driver, ret_csv=False, csv=''):
new_urls = []
dates = []
headlines = []
bodies = []
counter = 0
for url in urls:
time.sleep(1)
url, date, headline, body, counter = scrape_one_cnn(url, counter, driver)
new_urls.append(url)
dates.append(date)
headlines.append(headline)
bodies.append(body)
if counter % 10 == 0:
print(f'Articles scraped so far: {counter}')
time.sleep(2)
df = pd.DataFrame()
df['url'] = new_urls
df['date'] = dates
df['headline'] = headlines
df['body'] = bodies
df['source'] = 'CNN'
if ret_csv == True:
df.to_csv(csv, sep='\t')
print(f'File {csv} Created')
return df
# SCMP functions
def _super_scroll(scroll, driver):
'''
Function to scroll down page, approximately 30 new articles per scroll
'''
# driver = webdriver.Chrome()
# driver.get('https://www.scmp.com/topics/hong-kong-protests')
i = 0
while i < scroll:
try:
actions = ActionChains(driver)
more_content = driver.find_element_by_class_name('topic-content__load-more-anchor')
actions.move_to_element(more_content).double_click(more_content).pause(1).send_keys(Keys.SPACE).perform()
i += 1
time.sleep(random.uniform(2, 5))
if i % 10 == 0:
print(f'Scrolls: {i}')
except:
modal_button = driver.find_element_by_class_name('bottom-bar-close-button')
modal_button.click()
actions = ActionChains(driver)
more_content = driver.find_element_by_class_name('topic-content__load-more-anchor')
actions.move_to_element(more_content).double_click(more_content).pause(1).send_keys(Keys.SPACE).perform()
i += 1
time.sleep(random.uniform(2, 5))
if i % 10 == 0:
print(f'Scrolls: {i}')
# Find total articles
time.sleep(8)
total = len(driver.find_elements_by_xpath('//*[@id="topic-detail"]/div[1]/div/div[5]/div[2]/*'))
print(f'Total articles: {total}\n')
time.sleep(1)
return total
def scrape_urls_scmp(scroll):
'''
Function to scrape URLs and dates
Parameters:
scroll : int, number of scrolls through SCMP web search
'''
# Instantiate driver
driver = webdriver.Chrome()
driver.get('https://www.scmp.com/topics/hong-kong-protests')
# Scroll through pages
total = _super_scroll(scroll, driver)
urls = []
dates = []
counter = 0
xpath = '//*[@id="topic-detail"]/div[1]/div/div[5]/div[2]/div[{}]/div[1]/div[2]/div[1]/div/span'
a_xpath = '//*[@id="topic-detail"]/div[1]/div/div[5]/div[2]/div[{}]/div[1]/div[1]/div/div/div[2]/a'
for i in range(total):
try:
ele = driver.find_element_by_xpath(xpath.format(i+1)).text
ts = datetime.strptime(ele, '%d %b %Y - %H:%M%p').date()
href = driver.find_element_by_xpath(a_xpath.format(i+1)).get_attribute('href')
if re.search('news', href):
urls.append(href)
dates.append(ts)
counter += 1
if counter % 50 == 0:
print(f'URLs scraped so far: {counter}')
except:
continue
# driver.quit()
print('\n')
print(f'Number of URLs Scraped: {len(urls)}')
print(f'Number of Dates Scraped: {len(dates)}')
return urls, dates
def scrape_one_scmp(url, driver, count_sc, count_no):
driver.get(url)
if re.search('/video/', url) or re.search('/infographics/', url) or re.search('united-states-canada', url):
time.sleep(10)
headline = ''
body = ''
count_no += 1
return headline, body, count_sc, count_no
try:
head = driver.find_element_by_class_name('info__headline')
if head == None:
head = driver.find_elements_by_css_selector('h1')
headline = head.text
except:
headline = ''
actions = ActionChains(driver)
time.sleep(5)
actions.double_click(head).send_keys(Keys.SPACE).pause(0.5).send_keys(Keys.SPACE).pause(0.5).send_keys(Keys.SPACE).pause(0.5).send_keys(Keys.SPACE).perform()
# Click modal and full article button
try:
modal_button = driver.find_element_by_class_name('bottom-bar-close-button')
modal_button.click()
except:
pass
try:
text_body = driver.find_element_by_class_name('details__body')
texts = text_body.find_elements_by_class_name('generic-article__body')
if texts == None:
texts = text_body.find_elements_by_css_selector('p')
except:
body = ''
count_no += 1
return headline, body, count_sc, count_no
body = []
for text in texts:
cond1 = re.search('Photo:', text.text)
cond2 = re.search('CORONAVIRUS UPDATE NEWSLETTER', text.text)
cond3 = re.search('Privacy Policy', text.text)
cond4 = re.search('Advertisement', text.text)
if cond1 or cond2 or cond3 or cond4:
continue
else:
body.append(text.text)
body = ' '.join(body)
if body == '':
count_no += 1
else:
count_sc += 1
return headline, body, count_sc, count_no
def scrape_articles_scmp(urls, headlines, bodies):
'''
Function to scrape all designated articles
'''
# Instantiate driver
driver = webdriver.Chrome()
driver.maximize_window()
count_sc = 0
count_no = 0
for url in urls:
time.sleep(random.uniform(1, 3))
headline, body, count_sc, count_no = scrape_one_scmp(url, driver, count_sc, count_no)
bodies.append(body)
headlines.append(headline)
if (count_sc + count_no) % 20 == 0 and (count_sc + count_no) != 0:
print(f'Current articles scraped: {count_sc + count_no}')
driver.quit()
time.sleep(20)
driver = webdriver.Chrome()
driver.maximize_window()
time.sleep(20)
if (count_sc + count_no) % 40 == 0 and (count_sc + count_no) != 0:
time.sleep(10)
# Quit driver
driver.quit()
print(f'Number of Articles Scraped: {count_sc}\n')
print(f'Number of Articles w/o Text: {count_no}\n')
return headlines, bodies
def scrape_scmp(urls, dates, ret_csv=False, csv=''):
headlines = []
bodies = []
headlines, bodies = scrape_articles_scmp(urls, headlines, bodies)
df = pd.DataFrame()
df['url'] = urls
df['date'] = dates
df['headline'] = headlines
df['body'] = bodies
df['source'] = 'SCMP'
# Convert to .csv (with tab delimiter)
if ret_csv == True:
df.to_csv(csv, sep='\t')
print(f'File {csv} Created')
return df
# ABC (Australia) Functions
def url_scrap_page_abc(urls, page, driver):
a_xpath = '//*[@id="#content"]/section[2]/div/div[3]/div[2]/ul/li[{}]/div/article/div/div[1]/div/a'
driver.get(page)
time.sleep(random.uniform(2, 4))
for i in range(0, 10):
url = driver.find_element_by_xpath(a_xpath.format(i+1)).get_attribute('href')
if re.search('news', url):
urls.append(url)
return urls
def url_scrape_all_abc(driver):
page_range = range(1, 15)
href = 'https://search-beta.abc.net.au/#/?query=%22hong%20kong%20protests%22&page={}&configure%5BgetRankingInfo%5D=true&configure%5BclickAnalytics%5D=true&configure%5BuserToken%5D=anonymous-02f5b4b2-06b4-4402-9b15-3cc4fc5dbb64&configure%5Banalytics%5D=true&sortBy=ABC_production_all_latest&refinementList%5Bsite.title%5D%5B0%5D=ABC%20News'
pages = [ href.format(i) for i in page_range ]
urls = []
for page in pages:
urls = url_scrap_page_abc(urls, page, driver)
print(f'Total Articles: {len(urls)}')
return urls
def bs_scrape_body_abc(url):
print(url)
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
article = soup.find('div', class_='article section')
if article == None:
article = soup.find('article')
if article == None:
article = soup.find('div', class_='comp-rich-text article-text clearfix')
ps = article.find_all('p')
date = article.find('span', class_='timestamp')
if date == None:
date = article.find('time')['datetime']
else:
date = date.get_text()
try:
date = re.findall(r'([a-zA-Z]+\s\d+\,\s\d+)', date)[0]
except:
date = re.findall(r'([a-zA-Z]+\s\d+\s\d+)', date)[0]
headline = article.find('h1').string
body = []
for p in ps:
text = p.get_text()
cond1 = re.search('Updated', text)
cond2 = re.search('Topics:', text)
cond3 = re.search('First posted', text)
cond4 = re.search('Posted', text)
cond5 = re.search('Source:', text)
if cond1 or cond2 or cond3 or cond4 or cond5:
continue
else:
body.append(text)
body = ' '.join(body)
return url, headline, date, body
def bs_scrape_abc(urls, ret_csv=False, csv=''):
new_urls = []
headlines = []
dates = []
bodies = []
source = 'ABC (Australia)'
counter = 0
for url in urls:
cond1 = re.search('interactive', url)
cond2 = re.search('documentary', url)
cond3 = re.search('the-world', url)
cond4 = re.search('newschannel', url)
cond5 = re.search('science', url)
if cond1 or cond2 or cond3 or cond4 or cond5:
continue
else:
time.sleep(random.uniform(1, 2))
url, headline, date, body = bs_scrape_body_abc(url)
new_urls.append(url)
headlines.append(headline)
dates.append(date)
bodies.append(body)
counter += 1
if counter % 10 == 0:
print(f'Scraped so far: {counter}')
df = pd.DataFrame()
df['url'] = new_urls
df['date'] = dates
df['headline'] = headlines
df['body'] = bodies
df['source'] = source
if ret_csv == True:
df.to_csv(csv, sep='\t')
print(f'File {csv} Created')
return df
# CCTV Functions
def url_scrape_page_cctv(urls, page, counter):
link = urllib.request.urlopen(page)
soup = BeautifulSoup(link, 'html.parser')
heads = soup.find_all('h1')
for head in heads:
href = head.find('a').get('href')
urls.append(href)
counter += 1
return urls, counter
def url_scrape_all_cctv():
urls = []
cctv_search = 'http://so.cntv.cn/language/english/index.php?qtext=hong+kong+protest&type=1&sort=SCORE&page={}&vtime=-1&datepid=5&history=yes'
pages = [ cctv_search.format(i) for i in range(1, 32) ]
counter = 0
for page in pages:
urls, counter = url_scrape_page_cctv(urls, page, counter)
if counter % 10 == 0:
print(f'URLs scraped: {counter}')
print(f'Total URLs scraped: {counter}')
return urls
def scrape_body_cctv(url):
body = []
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
page_body = soup.body
headline = page_body.h3.get_text()
headline = page_body.find('h3').get_text()
date = page_body.find('h3').find_next_sibling('p').find_next_sibling('p').get_text()[:10]
text_body = page_body.find(class_='text')
ps = text_body.find_all('p')
for p in ps:
if re.search('Photo', p.get_text()):
continue
else:
body.append(p.get_text())
body = ' '.join(body)
return url, date, headline, body
def scrape_cctv(urls, ret_csv=False, csv=''):
new_urls = []
dates = []
headlines = []
bodies = []
counter = 0
for url in urls:
time.sleep(1)
url, date, headline, body = scrape_body_cctv(url)
new_urls.append(url)
dates.append(date)
headlines.append(headline)
bodies.append(body)
counter += 1
if counter % 25 == 0:
print(f'Articles scraped: {counter}')
df = pd.DataFrame()
df['url'] = new_urls
df['date'] = dates
df['headline'] = headlines
df['body'] = bodies
df['source'] = 'CCTV'
if ret_csv == True:
df.to_csv(csv, sep='\t')
print(f'File {csv} Created')
return df
# Reuters functions
# Function to clickdown for more articles till no more
def clickdown_reuters(driver):
more_button = driver.find_element_by_xpath('//*[@id="content"]/section[2]/div/div[1]/div[4]/div/div[4]/div[1]')
counter = 80
while counter > 0:
time.sleep(0.5)
try:
more_button.click()
counter += 1
except:
break
def url_scrape_reuters(driver):
eles = driver.find_elements_by_css_selector('h3')
urls = [ ele.find_element_by_css_selector('a').get_attribute('href') for ele in eles ]
return urls
def scrape_one_reu(url):
page = urllib.request.urlopen(url)
time.sleep(1.5)
soup = BeautifulSoup(page, 'html.parser')
headline = soup.find('h1').get_text()
date = soup.find('div', class_='ArticleHeader_date').get_text()
text_body = soup.find('div', class_='StandardArticleBody_body')
texts = text_body.find_all('p')
body = []
for text in texts:
cond1 = re.search('Writing by', text.get_text())
cond2 = re.search('Editing by', text.get_text())
cond3 = re.search('Reporting by', text.get_text())
if cond1 or cond2 or cond3:
continue
else:
body.append(text.get_text())
body = ' '.join(body)
return url, date, headline, body
def scrape_reuters(urls, ret_csv=False, csv=''):
new_urls = []
dates = []
headlines = []
bodies = []
counter = 0
for url in urls:
time.sleep(random.uniform(0.5, 1.5))
url, date, headline, body = scrape_one_reu(url)
new_urls.append(url)
dates.append(date)
headlines.append(headline)
bodies.append(body)
counter += 1
if counter % 25 == 0:
print(f'Articles scraped: {counter}')
df = pd.DataFrame()
df['url'] = new_urls
df['date'] = dates
df['headline'] = headlines
df['body'] = bodies
df['source'] = 'Reuters'
print(f'Total Articles Scraped: {counter}')
if ret_csv == True:
df.to_csv(csv, sep='\t')
print(f'File {csv} Created')
return df | [
"w.p.lam107@gmail.com"
] | w.p.lam107@gmail.com |
2a7e9f11b5bc62591da5f08a99fef13a5861388c | 3ce2cc14c27eb2958210e90ef2d38b4d5eedee55 | /requrl.py | 43e8c150afbf1cb779762dfb7a13b019f33b67fd | [] | no_license | shifeng-xu/monitorurl | 6e537b1a1b28b347d12ea8fbb2cd199ed1d37894 | ae3e43fdfa366dde449a487ec630f6de9820fd0c | refs/heads/master | 2021-05-19T00:02:37.910004 | 2020-03-31T03:06:31 | 2020-03-31T03:06:31 | 251,486,998 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | # coding=UTF-8
import requests
def getUrl(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
}
#url = "http://beautycareapp.com/" # 等价下面的
response = requests.get(url, headers=headers).text
print(response)
return response
| [
"xushifeng@do-global.com"
] | xushifeng@do-global.com |
6dac45eaa99c6894f3e14022654f22cf18d35a57 | caf71dc9e3d90398e60b69190e0555239bada2a7 | /postal-tracker/grab.py | 8d1d7d20fe062100b476f2341123a33ca9e9d82f | [
"MIT"
] | permissive | denisglotov/exp | 6a17d61b576e951f44bbb4ce96526f182ddcacb6 | 78d05b5643bb050c1392c93d653d6d418b505968 | refs/heads/master | 2023-08-21T09:09:30.266468 | 2023-08-13T20:46:49 | 2023-08-13T20:46:49 | 124,665,911 | 0 | 0 | MIT | 2023-03-06T23:33:40 | 2018-03-10T14:57:31 | Python | UTF-8 | Python | false | false | 1,521 | py | import requests
import json
import sys
from bs4 import BeautifulSoup
import config
if len(sys.argv) < 2:
print("Argument required: tracking number.")
sys.exit(1)
sess = requests.Session()
for cookie in config.cookies:
cookie_entity = requests.cookies.create_cookie(domain=cookie['domain'],
name=cookie['name'],
value=cookie['value'],
path='/',
rest={'HttpOnly': None})
sess.cookies.set_cookie(cookie_entity)
params = config.params.copy()
params['id'] = sys.argv[1]
r = sess.get(config.url,
headers=config.headers,
params=params,
allow_redirects=True)
reply = r.text
# print(sess.cookies, file=sys.stderr)
# with open('test.html') as f:
# reply = f.read()
soup = BeautifulSoup(reply, features='html.parser')
delivery = soup.find(class_='b-delivery')
# print(delivery.prettify, file=sys.stderr)
res = []
if delivery:
for tag in delivery.contents:
if tag.name:
res.append({
'time': tag.find(class_='time').get_text(' ', strip=True),
'place': tag.find(class_='place').get_text(strip=True),
'status': tag.find(class_='status').get_text(strip=True),
})
print(json.dumps(res, indent=4))
# Local Variables:
# compile-command: "pipenv run python grab.py AA123456789AA"
# End:
| [
"denis@glotov.org"
] | denis@glotov.org |
c4e4ecc32e36ad0a794647c986a8c562cecfe87e | c73aa29f8a1140a3ef2e8d0cce275ff1fa173036 | /data_app.py | 51f61f47b43ff3a939aacfc2a0115e8daa7a4b02 | [
"MIT"
] | permissive | shinegz/view-python | 642f401032bb9b761b325f0b0a10ca11c9447f7a | ebbafeb2785200c808284deda8c5dbfda92591f1 | refs/heads/master | 2022-12-01T15:28:08.251202 | 2020-08-02T00:15:36 | 2020-08-02T00:15:36 | 275,271,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,531 | py | # encoding: utf-8
import sys
import serial
import serial.tools.list_ports
from PySide2.QtCore import QTimer, QTime, QRegExp
from PySide2.QtGui import QIcon, QRegExpValidator
from PySide2.QtWidgets import QWidget, QApplication, QMainWindow, QMessageBox
import pyqtgraph as pg
from data_deal import Data_Deal
from MainWindow import Ui_MainWindow
class Data_App(QWidget, Ui_MainWindow):
def __init__(self):
super().__init__()
# 创建串口实例对象
self.serial = serial.Serial()
# 创建 QTimer 实例对象
self.timer1 = QTimer()
self.timer2 = QTimer()
self.time = QTime()
self.now_time = ''
# 创建显示窗口
self.main_window = QMainWindow()
self.setupUi(self.main_window)
self.retranslateUi(self.main_window)
# 正则表达式相关
bit_3_validator = QRegExpValidator()
bit_3_validator.setRegExp(QRegExp('[0-9]{1,3}'))
self.fresh_waste_edit.setValidator(bit_3_validator)
self.fresh_edit.setValidator(bit_3_validator)
self.waste_edit.setValidator(bit_3_validator)
self.blood_edit.setValidator(bit_3_validator)
self.ultrafiltration_edit.setValidator(bit_3_validator)
self.debug_send_edit.setValidator(bit_3_validator)
# 储存所有存在的串口 字典
self.Com_Dict = {}
# 创建新csv文件标志
self.create_file_flag = True
self.write_data_flag = False
# 要保存的当前的文件名
self.now_file_name = None
# 串口接收的字符串
self.received_bit_data = None
self.received_data = None
# 图像对象
self.fresh_pressure_plot = None
self.waste_pressure_plot = None
self.fresh_flow_plot = None
self.waste_flow_plot = None
self.blood_flow_plot = None
self.artery_pressure_plot = None
self.vein_pressure_plot = None
self.weight_1_plot = None
self.weight_2_plot = None
self.weight_3_plot = None
self.tmp_plot = None
# self.ph_plot = None
# self.temperature_plot = None
# 保存收到的数据 list
self.x = 0
self.list_fresh_pressure = [0] * 10
self.list_waste_pressure = [0] * 10
self.list_fresh_flow = [0]*10
self.list_waste_flow = [0]*10
self.list_blood_flow = [0]*10
self.list_artery_pressure = [0]*10
self.list_vein_pressure = [0]*10
self.list_weight_1 = [0]*10
self.list_weight_2 = [0]*10
self.list_weight_3 = [0]*10
self.list_tmp = [0]*10
# self.list_ph = [0]*1000
# 接收到的最新的数据
self.flag = ""
self.fresh_pressure_data = 0
self.waste_pressure_data = 0
self.fresh_flow_data = 0
self.waste_flow_data = 0
self.blood_flow_data = 0
self.artery_pressure_data = 0
self.vein_pressure_data = 0
self.weight_1_data = 0
self.weight_2_data = 0
self.weight_3_data = 0
# 跨膜压
self.tmp_data = 0
self.initial_temperature_data = 0
self.process_temperature_data = 0
# self.ph_data = 0
self.ultra_filtration_data = 0
# 判断是否为首次接收到数据
self.times = 0
# 数据为空次数
self.count_err = 0
self.num = 0
self.start_stop_flag = False
self.init()
self.port_check()
# 按键关联
def init(self):
# 串口开关按钮
self.open_serial_button.clicked.connect(self.port_operation)
# 数据接收按钮
self.receive_button.clicked.connect(self.data_begin)
self.start_stop_button.clicked.connect(lambda: self.send_data(self.start_stop_button))
# 数据发送按钮
self.fresh_waste_forward_button.clicked.connect(lambda: self.send_data(self.fresh_waste_forward_button))
self.fresh_waste_reverse_button.clicked.connect(lambda: self.send_data(self.fresh_waste_reverse_button))
self.fresh_forward_button.clicked.connect(lambda : self.send_data(self.fresh_forward_button))
self.fresh_reverse_button.clicked.connect(lambda: self.send_data(self.fresh_reverse_button))
self.waste_forward_button.clicked.connect(lambda: self.send_data(self.waste_forward_button))
self.waste_reverse_button.clicked.connect(lambda: self.send_data(self.waste_reverse_button))
self.blood_forward_button.clicked.connect(lambda: self.send_data(self.blood_forward_button))
self.blood_reverse_button.clicked.connect(lambda: self.send_data(self.blood_reverse_button))
self.ultrafiltration_forward_button.clicked.connect(lambda: self.send_data(self.ultrafiltration_forward_button))
self.ultrafiltration_reverse_button.clicked.connect(lambda: self.send_data(self.ultrafiltration_reverse_button))
self.debug_send_button.clicked.connect(lambda: self.send_data(self.debug_send_button))
# 全部设置按钮
self.all_send_button.clicked.connect(lambda: self.send_data(self.all_send_button))
# 停止按钮信号与槽
self.fresh_waste_stop_button.clicked.connect(lambda: self.send_data(self.fresh_waste_stop_button))
self.fresh_stop_button.clicked.connect(lambda: self.send_data(self.fresh_stop_button))
self.waste_stop_button.clicked.connect(lambda: self.send_data(self.waste_stop_button))
self.blood_stop_button.clicked.connect(lambda: self.send_data(self.blood_stop_button))
self.ultrafiltration_stop_button.clicked.connect(lambda: self.send_data(self.ultrafiltration_stop_button))
# 退出程序
self.quit_button.clicked.connect(self.app_close)
# 串口检测按钮
self.find_port_button.clicked.connect(self.port_check)
# 定时器接收数据
self.timer1.timeout.connect(self.receive_data)
self.timer2.timeout.connect(self.write_data)
# PlotWidget 实例初始化
self.dialysis_pressure_plot_view_init()
self.flow_plot_view_init()
self.pulse_plot_view_init()
self.weight_plot_view_init()
self.tmp_plot_view_init()
# self.ph_plot_view_init()
# 透析液压力趋势
def dialysis_pressure_plot_view_init(self):
self.dialysis_pressure_plot_view.setTitle("透析液压力(red新鲜液;blue废液)",
color='008080',
size='12pt', font='黑体')
# 设置上下左右的label
self.dialysis_pressure_plot_view.setLabel("left", "压强")
self.dialysis_pressure_plot_view.setLabel("bottom", "采样点")
# 设置自适应刻度范围
self.dialysis_pressure_plot_view.enableAutoRange()
# 显示表格线
self.dialysis_pressure_plot_view.showGrid(x=True, y=True)
# 背景色改为黑色
self.dialysis_pressure_plot_view.setBackground('000000')
# 实时显示应该获取 plotItem, 调用setData,
# 这样只重新plot该曲线,性能更高
self.fresh_pressure_plot = self.dialysis_pressure_plot_view.getPlotItem().plot(pen=pg.mkPen('r', width=2))
self.waste_pressure_plot = self.dialysis_pressure_plot_view.getPlotItem().plot(pen=pg.mkPen('b', width=2))
# 透析液流量
def flow_plot_view_init(self):
self.dialysis_flow_plot_view.setTitle("透析液流量(red新鲜;green废液;blue血液)",
color='008080',
size='12pt')
# 设置上下左右的label
self.dialysis_flow_plot_view.setLabel("left", "流量")
self.dialysis_flow_plot_view.setLabel("bottom", "采样点")
# 设置自适应刻度范围
self.dialysis_flow_plot_view.enableAutoRange()
# 显示表格线
self.dialysis_flow_plot_view.showGrid(x=True, y=True)
# 背景色改为白色
self.dialysis_flow_plot_view.setBackground('000000')
# 实时显示应该获取 plotItem, 调用setData,
# 这样只重新plot该曲线,性能更高
self.fresh_flow_plot = self.dialysis_flow_plot_view.getPlotItem().plot(pen=pg.mkPen('r', width=2))
self.waste_flow_plot = self.dialysis_flow_plot_view.getPlotItem().plot(pen=pg.mkPen('g', width=2))
self.blood_flow_plot = self.dialysis_flow_plot_view.getPlotItem().plot(pen=pg.mkPen('b', width=2))
# 动静脉压力
def pulse_plot_view_init(self):
self.pluse_plot_view.setTitle("动静脉压(red动脉;blue静脉)",
color='008080',
size='12pt')
# 设置上下左右的label
self.pluse_plot_view.setLabel("left", "压强")
self.pluse_plot_view.setLabel("bottom", "采样点")
# 设置自适应刻度范围
self.pluse_plot_view.enableAutoRange()
# 显示表格线
self.pluse_plot_view.showGrid(x=True, y=True)
# 背景色改为黑色
self.pluse_plot_view.setBackground('000000')
# 实时显示应该获取 plotItem, 调用setData,
# 这样只重新plot该曲线,性能更高
self.artery_pressure_plot = self.pluse_plot_view.getPlotItem().plot(pen=pg.mkPen('r', width=2))
self.vein_pressure_plot = self.pluse_plot_view.getPlotItem().plot(pen=pg.mkPen('b', width=2))
# 重量显示
def weight_plot_view_init(self):
self.weight_plot_view.setTitle("重量(red重量1;green重量2;blue重量3)",
color='008080',
size='12pt')
# 设置上下左右的label
self.weight_plot_view.setLabel("left", "重量")
self.weight_plot_view.setLabel("bottom", "采样点")
# 设置自适应刻度范围
self.weight_plot_view.enableAutoRange()
# 显示表格线
self.weight_plot_view.showGrid(x=True, y=True)
# 背景色改为黑色
self.weight_plot_view.setBackground('000000')
# 实时显示应该获取 plotItem, 调用setData,
# 这样只重新plot该曲线,性能更高
self.weight_1_plot = self.weight_plot_view.getPlotItem().plot(pen=pg.mkPen('r', width=2))
self.weight_2_plot = self.weight_plot_view.getPlotItem().plot(pen=pg.mkPen('g', width=2))
self.weight_3_plot = self.weight_plot_view.getPlotItem().plot(pen=pg.mkPen('b', width=2))
# TMP显示
def tmp_plot_view_init(self):
self.tmp_plot_view.setTitle("跨膜压",
color='008080',
size='12pt')
# 设置上下左右的label
self.tmp_plot_view.setLabel("left", "压强")
self.tmp_plot_view.setLabel("bottom", "采样点")
# 设置自适应刻度范围
self.tmp_plot_view.enableAutoRange()
# 显示表格线
self.tmp_plot_view.showGrid(x=True, y=True)
# 背景色改为黑色
self.tmp_plot_view.setBackground('000000')
# 实时显示应该获取 plotItem, 调用setData,
# 这样只重新plot该曲线,性能更高
self.tmp_plot = self.tmp_plot_view.getPlotItem().plot(pen=pg.mkPen('r', width=2))
# ph 值
# def ph_plot_view_init(self):
# self.ph_plot_view.setTitle("PH",
# color='008080',
# size='12pt')
# # 设置上下左右的label
# self.ph_plot_view.setLabel("left", "PH值")
# self.ph_plot_view.setLabel("bottom", "采样点")
#
# # 设置自适应刻度范围
# self.ph_plot_view.enableAutoRange()
#
# # 显示表格线
# self.ph_plot_view.showGrid(x=True, y=True)
#
# # 背景色改为白色
# self.ph_plot_view.setBackground('w')
#
# # 实时显示应该获取 plotItem, 调用setData,
# # 这样只重新plot该曲线,性能更高
# self.ph_plot = self.ph_plot_view.getPlotItem().plot( pen=pg.mkPen('r', width=2))
# 串口检测
def port_check(self):
# 检测所有存在的串口,将信息存储在字典中
port_list = list(serial.tools.list_ports.comports())
self.port_combo_box.clear()
if len(port_list) == 0:
self.port_combo_box.addItem("无串口")
QMessageBox.information(self, "信息", "未检测到串口!")
else:
self.port_combo_box.clear()
for port in port_list:
self.Com_Dict["%s" % port[0]] = "%s" % port[1]
self.port_combo_box.addItem(port[0])
# 串口开关操作
def port_operation(self):
if self.serial.is_open:
self.close_serial_port()
else:
self.open_serial_port()
# 开始接收数据
def data_begin(self):
if self.serial.is_open:
# 打开串口接收定时器,周期为100ms
if not self.timer1.isActive():
self.timer1.start(100)
self.receive_button.setText("接收中")
else:
return None
else:
QMessageBox.information(self, 'Port', '串口未打开!')
# 打开串口
def open_serial_port(self):
# 从QComboBox的当前值获取端口号
self.serial.port = self.port_combo_box.currentText()
# self.serial.port = "COM1"
if not self.serial.port:
QMessageBox.critical(self, '错误', '没有选择串口')
# 设置串口通信参数
self.serial.baudrate = 115200
self.serial.bytesize = 8
self.serial.stopbits = 1
self.serial.parity = "N"
# timeout默认为None,若不设置timeout,当使用read()时,一直会等到读取指定的字节数为止
self.serial.timeout = 2
self.serial.write_timeout = 2
# 设置软件控制流开关
self.serial.rtscts = True
self.serial.dsrdtr = True
try:
self.serial.rts = True
self.serial.dtr = True
self.serial.open()
except:
QMessageBox.critical(self, "Port Error", "此串口不能被打开!")
return None
# 判断是否有串口打开
if self.serial.is_open:
# 打开串口接收定时器,周期为100ms
self.statusbar.showMessage("打开串口成功")
self.open_serial_button.setText("关闭串口")
self.port_status_label.setStyleSheet("background-color:green")
self.port_status_label.style().polish(self.port_status_label) # 刷新样式
# 关闭串口
def close_serial_port(self):
self.timer1.stop()
self.timer2.stop()
self.serial.close()
if not self.serial.is_open:
self.open_serial_button.setText("打开串口")
self.port_status_label.setStyleSheet("background-color:gray")
self.port_status_label.style().polish(self.port_status_label) # 刷新样式
self.receive_button.setText("接收")
self.statusbar.showMessage("串口已关闭")
def write_data(self):
self.write_data_flag = True
# 接收数据
def receive_data(self):
try:
num = self.serial.inWaiting()
except:
self.close_serial_port()
QMessageBox.critical(self, "Read Error", "读取输入缓存区数据的字节数失败!")
return None
print(self.serial.rts)
print(num)
self.received_bit_data = self.serial.read(101)
print(self.received_bit_data)
self.received_data = self.received_bit_data.decode('ascii')
# 已经接收到信息说明系统已经开启,将系统状态改为开启
if self.times == 0:
self.system_status_button(self.start_stop_button)
self.times += 1
print(self.received_data)
self.statusbar.showMessage("数据读取成功,准备处理数据")
self.data_operation()
# 处理接收的数据
def data_operation(self):
# 这里的received_data指的是发送端发送的字符串
if len(self.received_data) == 101:
self.count_err = 0
try:
# 使用获得的数据字符串创建一个对象
self.now_time = self.time.currentTime().toString()
data = Data_Deal(self.received_data + self.now_time)
# 对数据进行分析处理,并获得一个元祖类型的返回值,以便后面更新显示
self.flag, self.artery_pressure_data, self.vein_pressure_data, self.fresh_pressure_data,\
self.waste_pressure_data, self.fresh_flow_data, self.waste_flow_data, self.blood_flow_data,\
self.tmp_data, self.weight_1_data, self.weight_2_data, self.weight_3_data,\
self.initial_temperature_data, self.process_temperature_data,\
self.ultra_filtration_data = data.get_num()
except ValueError:
self.statusbar.showMessage('数据解析失败!准备重新接收')
self.serial.reset_input_buffer()
return None
# 判断需要创建一个新的csv还是直接存入当前csv(每次数据接收成功都会为之创建一个csv文件)
if self.create_file_flag:
self.create_file_flag = False
# 每一分钟保存一次数据
self.timer2.start(300000)
# 当前成功接收的数据所存放的文件名
self.now_file_name = self.now_time[0:2] + "_" + self.now_time[3:5] + "_" + self.now_time[6:8] + ".csv"
data.create_csv(self.now_file_name)
else:
if self.write_data_flag and self.flag == 'tmp':
self.write_data_flag = False
data.store_to_csv(self.now_file_name)
if self.flag == 'tmp':
print('数据格式没问题')
self.show_update()
self.statusbar.showMessage("数据格式正确,更新数据完毕")
# print("更新数据完毕")
else:
self.statusbar.showMessage('数据格式不正确!准备重新接收')
# QMessageBox.information(self, "信息", "数据格式不正确!准备重新接收")
self.serial.reset_input_buffer()
return None
elif len(self.received_data) == 0:
self.count_err += 1
print(self.count_err)
if self.count_err == 5:
self.close_serial_port()
QMessageBox.information(self, "信息", "没有读到数据!")
return None
return None
else:
self.count_err = 0
self.statusbar.showMessage("读取的数据的字节数不对!准备重新接收")
# QMessageBox.information(self, "信息", "读取的数据的字节数不对!准备重新接收")
self.serial.reset_input_buffer()
return None
# pass #self.textBrowser.insertPlainText("Data Receive Error: Wrong Data Length!\r\n")
# QMessageBox.critical(self, "Data Length Error", "从输入缓存区读取数据的字节数不对!")
# 更新所有显示
def show_update(self):
# 更新文本显示区域的数据
self.fresh_pressure_value.setText(str(self.fresh_pressure_data))
self.waste_pressure_vlue.setText(str(self.waste_pressure_data))
self.fresh_flow_value.setText(str(self.fresh_flow_data))
self.waste_flow_value.setText(str(self.waste_flow_data))
self.blood_flow_value.setText(str(self.blood_flow_data))
self.artery_pressure_value.setText(str(self.artery_pressure_data))
self.vein_pressure_value.setText(str(self.vein_pressure_data))
# 电导值已经去掉,目前显示重量,未在右边显示
# self.ph_value.setText(str(self.ph_data))
self.initial_temperature_value.setText(str(self.initial_temperature_data))
self.process_temperature_value.setText(str(self.process_temperature_data))
self.ultrafiltration_show_value.setText(str(self.ultra_filtration_data))
# 更新数据
self.x += 1
# self.list_fresh_pressure[:-1] = self.list_fresh_pressure[1:]
# self.list_fresh_pressure[-1] = self.fresh_pressure_data
# self.list_fresh_pressure.insert(0, 0)
# self.list_waste_pressure[:-1] = self.list_waste_pressure[1:]
# self.list_waste_pressure[-1] = self.waste_pressure_data
# self.list_waste_pressure.insert(0, 0)
# self.list_fresh_flow[:-1] = self.list_fresh_flow[1:]
# self.list_fresh_flow[-1] = self.fresh_flow_data
# self.list_waste_flow[:-1] = self.list_waste_flow[1:]
# self.list_waste_flow[-1] = self.waste_flow_data
# self.list_blood_flow[:-1] = self.list_blood_flow[1:]
# self.list_blood_flow[-1] = self.blood_flow_data
# self.list_artery_pressure[:-1] = self.list_artery_pressure[1:]
# self.list_artery_pressure[-1] = self.artery_pressure_data
# self.list_vein_pressure[:-1] = self.list_vein_pressure[1:]
# self.list_vein_pressure[-1] = self.vein_pressure_data
# self.list_weight_1[:-1] = self.list_weight_1[1:]
# self.list_weight_1[-1] = self.weight_1_data
# self.list_weight_2[:-1] = self.list_weight_2[1:]
# self.list_weight_2[-1] = self.weight_2_data
# self.list_weight_3[:-1] = self.list_weight_3[1:]
# self.list_weight_3[-1] = self.weight_3_data
# self.list_tmp[:-1] = self.list_tmp[1:]
# self.list_tmp[-1] = self.tmp_data
# self.list_ph[:-1] = self.list_ph[1:]
# self.list_ph[-1] = self.ph_data
self.list_fresh_pressure.append(self.fresh_pressure_data)
self.list_waste_pressure.append(self.waste_pressure_data)
self.list_fresh_flow.append(self.fresh_flow_data)
self.list_waste_flow.append(self.waste_flow_data)
self.list_blood_flow.append(self.blood_flow_data)
self.list_artery_pressure.append(self.artery_pressure_data)
self.list_vein_pressure.append(self.vein_pressure_data)
self.list_weight_1.append(self.weight_1_data)
self.list_weight_2.append(self.weight_2_data)
self.list_weight_3.append(self.weight_3_data)
self.list_tmp.append(self.tmp_data)
# 更新图形
self.fresh_pressure_plot.setData(self.list_fresh_pressure)
# 给图形对象设置新坐标值,# 参数1:x 轴起点坐标 参数2:y 轴起点坐标
self.fresh_pressure_plot.setPos(self.x, 0)
self.waste_pressure_plot.setData(self.list_waste_pressure)
self.waste_pressure_plot.setPos(self.x, 0)
self.fresh_flow_plot.setData(self.list_fresh_flow)
self.fresh_flow_plot.setPos(self.x, 0)
self.waste_flow_plot.setData(self.list_waste_flow)
self.waste_flow_plot.setPos(self.x, 0)
self.blood_flow_plot.setData(self.list_blood_flow)
self.blood_flow_plot.setPos(self.x, 0)
self.artery_pressure_plot.setData(self.list_artery_pressure)
self.artery_pressure_plot.setPos(self.x, 0)
self.vein_pressure_plot.setData(self.list_vein_pressure)
self.vein_pressure_plot.setPos(self.x, 0)
self.weight_1_plot.setData(self.list_weight_1)
self.weight_1_plot.setPos(self.x, 0)
self.weight_2_plot.setData(self.list_weight_2)
self.weight_2_plot.setPos(self.x, 0)
self.weight_3_plot.setData(self.list_weight_3)
self.weight_3_plot.setPos(self.x, 0)
self.tmp_plot.setData(self.list_tmp)
self.tmp_plot.setPos(self.x, 0)
# self.ph_plot.setData(self.list_ph)
# self.ph_plot.setPos(self.x, 0)
# 发送数据
def send_data(self, btn):
if self.serial.is_open:
bytes_data = self.button_effort(btn)
if bytes_data != "" and len(bytes_data) > 5:
self.serial.write(bytes_data)
# QMessageBox.information(self, 'Send', '发送数据成功!')
else:
QMessageBox.critical(self, "Send Error", "发送数据不能为空!")
else:
QMessageBox.information(self, 'Port', '串口未打开')
return None
# 判断不同的按钮做出不同的响应
def button_effort(self, btn):
data = ""
# 发送新鲜液废旧液数据
if btn.objectName() == "fresh_waste_forward_button":
data = "ffw" + self.fresh_waste_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "fresh_waste_reverse_button":
data = "rfw" + self.fresh_waste_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "fresh_waste_stop_button":
data = "fwaStop\r\n"
return data.encode('ascii')
elif btn.objectName() == "fresh_forward_button":
data = "ffr" + self.fresh_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "fresh_reverse_button":
data = "rfr" + self.fresh_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "fresh_stop_button":
data = "freStop\r\n"
return data.encode('ascii')
elif btn.objectName() == "waste_forward_button":
data = "fwa" + self.waste_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "waste_reverse_button":
data = "rwa" + self.waste_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "waste_stop_button":
data = "wasStop\r\n"
return data.encode('ascii')
elif btn.objectName() == "blood_forward_button":
data = "fbl" + self.blood_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "blood_reverse_button":
data = "rbl" + self.blood_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "blood_stop_button":
data = "bloStop\r\n"
return data.encode('ascii')
elif btn.objectName() == "ultrafiltration_forward_button":
data = "ful" + self.ultrafiltration_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "ultrafiltration_reverse_button":
data = "rul" + self.ultrafiltration_edit.text() + "\r\n"
return data.encode('ascii')
elif btn.objectName() == "ultrafiltration_stop_button":
data = "ultStop\r\n"
return data.encode('ascii')
elif btn.objectName() == "debug_send_button":
data = "cmd" + self.debug_send_edit.text() + "\r\n"
return data.encode('ascii')
# elif btn.objectName() == "all_send_button":
# data = self.get_all_cmd()
# if data != "":
# reply = QMessageBox.question(None, "检查命令", data, QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
# if reply == QMessageBox.Yes:
# data += "\r\n"
elif btn.objectName() == "start_stop_button":
self.system_status_button(btn)
if self.start_stop_flag:
data = "systemStart\r\n"
return data.encode('ascii')
elif not self.start_stop_flag:
data = "systemStop\r\n"
return data.encode('ascii')
else:
return data
#
# if data != '':
# return data.encode('ascii')
# 发送所有命令的按钮指令(还有用吗?)
# def get_all_cmd(self):
# data = ""
# if self.fresh_waste_edit.text() != "":
# data += "fwl" + self.fresh_waste_edit.text()
# if self.ultrafiltration_edit.text() != "":
# data += "ult" + self.ultrafiltration_edit.text()
# if self.blood_pump_edit.text() != "":
# data += "bpu" + self.blood_pump_edit.text()
# if self.debug_send_edit.text() != "":
# data += self.debug_send_edit.text()
#
# if data == "":
# QMessageBox.critical(self, "警告", "未设置参数", QMessageBox.Yes)
# return data
# 更改系统开启与停止按钮的样式
def system_status_button(self, btn):
if btn.text() == "开启":
self.start_stop_flag = True
btn.setText('停止')
self.system_status_label.setStyleSheet('background-color:green')
self.system_status_label.style().polish(self.system_status_label)
elif btn.text() == "停止":
self.start_stop_flag = False
btn.setText('开启')
self.system_status_label.setStyleSheet('background-color:gray')
self.system_status_label.style().polish(self.system_status_label)
# 关闭系统
def app_close(self):
self.close_serial_port()
quit()
if __name__ == "__main__":
app = QApplication(sys.argv)
myShow = Data_App()
myShow.main_window.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | shinegz.noreply@github.com |
ecf3ce846d8243a2eaec317d1632dd1fdaa220c7 | 4979ca22bd3ecbdcfe8aaaacdc95dbd6586003eb | /flaskblog/models.py | 29a485a2b5ef62151c1f548ace3b99a5ca82754c | [] | no_license | Barnez299/flaskblogcs | 53b7ff5ab303f8116e83ba5d76b3943e2f0b28c9 | 33295e6ba4c7bd35f2de2b2b755e907bcd01064c | refs/heads/main | 2023-02-11T11:52:23.453967 | 2021-01-07T21:27:14 | 2021-01-07T21:27:14 | 326,769,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flaskblog import db, login_manager, app
from flask_login import UserMixin
# define method for user to be logged in
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
# method to create token
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
# method to verify token
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)[user_id]
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')" | [
"barnez29@gmail.com"
] | barnez29@gmail.com |
25c98ab3c953b1a34a82318055d81df2e09ab0bd | 4acbe5055a488ae10077448eee206a6c8dbe509a | /examples/reinforcement_learning/atari_1step_qlearning.py | 4d9c9665cdad80795893a67fc9c6115660d78551 | [
"MIT"
] | permissive | barhomi/tflearn | 740a022fe5392be24d39cdebe72c28154222f3b6 | 5e4c706f5ddb3ac362c72a681fda4ce73d182015 | refs/heads/master | 2020-04-04T18:41:57.515280 | 2017-03-25T21:06:23 | 2017-03-25T21:06:23 | 68,142,123 | 0 | 1 | null | 2017-03-23T18:25:22 | 2016-09-13T19:52:12 | Python | UTF-8 | Python | false | false | 15,817 | py | # -*- coding: utf-8 -*-
"""
Teaching a machine to play an Atari game (Pacman by default) by implementing
a 1-step Q-learning with TFLearn, TensorFlow and OpenAI gym environment. The
algorithm is described in "Asynchronous Methods for Deep Reinforcement Learning"
paper. OpenAI's gym environment is used here for providing the Atari game
environment for handling games logic and states. This example is originally
adapted from Corey Lynch's repo (url below).
Requirements:
- gym environment (pip install gym)
- gym Atari environment (pip install gym[atari])
References:
- Asynchronous Methods for Deep Reinforcement Learning. Mnih et al, 2015.
Links:
- Paper: http://arxiv.org/pdf/1602.01783v1.pdf
- OpenAI's gym: https://gym.openai.com/
- Original Repo: https://github.com/coreylynch/async-rl
"""
from __future__ import division, print_function, absolute_import
import threading
import random
import numpy as np
import time
from skimage.transform import resize
from skimage.color import rgb2gray
from collections import deque
import gym
import tensorflow as tf
import tflearn
# Fix for TF 0.12
try:
writer_summary = tf.summary.FileWriter
merge_all_summaries = tf.summary.merge_all
histogram_summary = tf.summary.histogram
scalar_summary = tf.summary.scalar
except Exception:
writer_summary = tf.train.SummaryWriter
merge_all_summaries = tf.merge_all_summaries
histogram_summary = tf.histogram_summary
scalar_summary = tf.scalar_summary
# Change that value to test instead of train
testing = False
# Model path (to load when testing)
test_model_path = '/path/to/your/qlearning.tflearn.ckpt'
# Atari game to learn
# You can also try: 'Breakout-v0', 'Pong-v0', 'SpaceInvaders-v0', ...
game = 'MsPacman-v0'
# Learning threads
n_threads = 8
# =============================
# Training Parameters
# =============================
# Max training steps
TMAX = 80000000
# Current training step
T = 0
# Consecutive screen frames when performing training
action_repeat = 4
# Async gradient update frequency of each learning thread
I_AsyncUpdate = 5
# Timestep to reset the target network
I_target = 40000
# Learning rate
learning_rate = 0.001
# Reward discount rate
gamma = 0.99
# Number of timesteps to anneal epsilon
anneal_epsilon_timesteps = 400000
# =============================
# Utils Parameters
# =============================
# Display or not gym evironment screens
show_training = True
# Directory for storing tensorboard summaries
summary_dir = '/tmp/tflearn_logs/'
summary_interval = 100
checkpoint_path = 'qlearning.tflearn.ckpt'
checkpoint_interval = 2000
# Number of episodes to run gym evaluation
num_eval_episodes = 100
# =============================
# TFLearn Deep Q Network
# =============================
def build_dqn(num_actions, action_repeat):
"""
Building a DQN.
"""
inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84])
# Inputs shape: [batch, channel, height, width] need to be changed into
# shape [batch, height, width, channel]
net = tf.transpose(inputs, [0, 2, 3, 1])
net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
net = tflearn.fully_connected(net, 256, activation='relu')
q_values = tflearn.fully_connected(net, num_actions)
return inputs, q_values
# =============================
# ATARI Environment Wrapper
# =============================
class AtariEnvironment(object):
"""
Small wrapper for gym atari environments.
Responsible for preprocessing screens and holding on to a screen buffer
of size action_repeat from which environment state is constructed.
"""
def __init__(self, gym_env, action_repeat):
self.env = gym_env
self.action_repeat = action_repeat
# Agent available actions, such as LEFT, RIGHT, NOOP, etc...
self.gym_actions = range(gym_env.action_space.n)
# Screen buffer of size action_repeat to be able to build
# state arrays of size [1, action_repeat, 84, 84]
self.state_buffer = deque()
def get_initial_state(self):
"""
Resets the atari game, clears the state buffer.
"""
# Clear the state buffer
self.state_buffer = deque()
x_t = self.env.reset()
x_t = self.get_preprocessed_frame(x_t)
s_t = np.stack([x_t for i in range(self.action_repeat)], axis=0)
for i in range(self.action_repeat-1):
self.state_buffer.append(x_t)
return s_t
def get_preprocessed_frame(self, observation):
"""
0) Atari frames: 210 x 160
1) Get image grayscale
2) Rescale image 110 x 84
3) Crop center 84 x 84 (you can crop top/bottom according to the game)
"""
return resize(rgb2gray(observation), (110, 84))[13:110 - 13, :]
def step(self, action_index):
"""
Excecutes an action in the gym environment.
Builds current state (concatenation of action_repeat-1 previous
frames and current one). Pops oldest frame, adds current frame to
the state buffer. Returns current state.
"""
x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])
x_t1 = self.get_preprocessed_frame(x_t1)
previous_frames = np.array(self.state_buffer)
s_t1 = np.empty((self.action_repeat, 84, 84))
s_t1[:self.action_repeat-1, :] = previous_frames
s_t1[self.action_repeat-1] = x_t1
# Pop the oldest frame, add the current frame to the queue
self.state_buffer.popleft()
self.state_buffer.append(x_t1)
return s_t1, r_t, terminal, info
# =============================
# 1-step Q-Learning
# =============================
def sample_final_epsilon():
"""
Sample a final epsilon value to anneal towards from a distribution.
These values are specified in section 5.1 of http://arxiv.org/pdf/1602.01783v1.pdf
"""
final_epsilons = np.array([.1, .01, .5])
probabilities = np.array([0.4, 0.3, 0.3])
return np.random.choice(final_epsilons, 1, p=list(probabilities))[0]
def actor_learner_thread(thread_id, env, session, graph_ops, num_actions,
summary_ops, saver):
"""
Actor-learner thread implementing asynchronous one-step Q-learning, as specified
in algorithm 1 here: http://arxiv.org/pdf/1602.01783v1.pdf.
"""
global TMAX, T
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
st = graph_ops["st"]
target_q_values = graph_ops["target_q_values"]
reset_target_network_params = graph_ops["reset_target_network_params"]
a = graph_ops["a"]
y = graph_ops["y"]
grad_update = graph_ops["grad_update"]
summary_placeholders, assign_ops, summary_op = summary_ops
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=env,
action_repeat=action_repeat)
# Initialize network gradients
s_batch = []
a_batch = []
y_batch = []
final_epsilon = sample_final_epsilon()
initial_epsilon = 1.0
epsilon = 1.0
print("Thread " + str(thread_id) + " - Final epsilon: " + str(final_epsilon))
time.sleep(3*thread_id)
t = 0
while T < TMAX:
# Get initial game observation
s_t = env.get_initial_state()
terminal = False
# Set up per-episode counters
ep_reward = 0
episode_ave_max_q = 0
ep_t = 0
while True:
# Forward the deep q network, get Q(s,a) values
readout_t = q_values.eval(session=session, feed_dict={s: [s_t]})
# Choose next action based on e-greedy policy
a_t = np.zeros([num_actions])
if random.random() <= epsilon:
action_index = random.randrange(num_actions)
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
# Scale down epsilon
if epsilon > final_epsilon:
epsilon -= (initial_epsilon - final_epsilon) / anneal_epsilon_timesteps
# Gym excecutes action in game environment on behalf of actor-learner
s_t1, r_t, terminal, info = env.step(action_index)
# Accumulate gradients
readout_j1 = target_q_values.eval(session = session,
feed_dict = {st : [s_t1]})
clipped_r_t = np.clip(r_t, -1, 1)
if terminal:
y_batch.append(clipped_r_t)
else:
y_batch.append(clipped_r_t + gamma * np.max(readout_j1))
a_batch.append(a_t)
s_batch.append(s_t)
# Update the state and counters
s_t = s_t1
T += 1
t += 1
ep_t += 1
ep_reward += r_t
episode_ave_max_q += np.max(readout_t)
# Optionally update target network
if T % I_target == 0:
session.run(reset_target_network_params)
# Optionally update online network
if t % I_AsyncUpdate == 0 or terminal:
if s_batch:
session.run(grad_update, feed_dict={y: y_batch,
a: a_batch,
s: s_batch})
# Clear gradients
s_batch = []
a_batch = []
y_batch = []
# Save model progress
if t % checkpoint_interval == 0:
saver.save(session, "qlearning.ckpt", global_step=t)
# Print end of episode stats
if terminal:
stats = [ep_reward, episode_ave_max_q/float(ep_t), epsilon]
for i in range(len(stats)):
session.run(assign_ops[i],
{summary_placeholders[i]: float(stats[i])})
print("| Thread %.2i" % int(thread_id), "| Step", t,
"| Reward: %.2i" % int(ep_reward), " Qmax: %.4f" %
(episode_ave_max_q/float(ep_t)),
" Epsilon: %.5f" % epsilon, " Epsilon progress: %.6f" %
(t/float(anneal_epsilon_timesteps)))
break
def build_graph(num_actions):
# Create shared deep q network
s, q_network = build_dqn(num_actions=num_actions,
action_repeat=action_repeat)
network_params = tf.trainable_variables()
q_values = q_network
# Create shared target network
st, target_q_network = build_dqn(num_actions=num_actions,
action_repeat=action_repeat)
target_network_params = tf.trainable_variables()[len(network_params):]
target_q_values = target_q_network
# Op for periodically updating target network with online network weights
reset_target_network_params = \
[target_network_params[i].assign(network_params[i])
for i in range(len(target_network_params))]
# Define cost and gradient update op
a = tf.placeholder("float", [None, num_actions])
y = tf.placeholder("float", [None])
action_q_values = tf.reduce_sum(tf.mul(q_values, a), reduction_indices=1)
cost = tflearn.mean_square(action_q_values, y)
optimizer = tf.train.RMSPropOptimizer(learning_rate)
grad_update = optimizer.minimize(cost, var_list=network_params)
graph_ops = {"s": s,
"q_values": q_values,
"st": st,
"target_q_values": target_q_values,
"reset_target_network_params": reset_target_network_params,
"a": a,
"y": y,
"grad_update": grad_update}
return graph_ops
# Set up some episode summary ops to visualize on tensorboard.
def build_summaries():
episode_reward = tf.Variable(0.)
scalar_summary("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
scalar_summary("Qmax Value", episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
scalar_summary("Epsilon", logged_epsilon)
# Threads shouldn't modify the main graph, so we use placeholders
# to assign the value of every summary (instead of using assign method
# in every thread, that would keep creating new ops in the graph)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float")
for i in range(len(summary_vars))]
assign_ops = [summary_vars[i].assign(summary_placeholders[i])
for i in range(len(summary_vars))]
summary_op = merge_all_summaries()
return summary_placeholders, assign_ops, summary_op
def get_num_actions():
"""
Returns the number of possible actions for the given atari game
"""
# Figure out number of actions from gym env
env = gym.make(game)
num_actions = env.action_space.n
return num_actions
def train(session, graph_ops, num_actions, saver):
"""
Train a model.
"""
# Set up game environments (one per thread)
envs = [gym.make(game) for i in range(n_threads)]
summary_ops = build_summaries()
summary_op = summary_ops[-1]
# Initialize variables
session.run(tf.initialize_all_variables())
writer = writer_summary(summary_dir + "/qlearning", session.graph)
# Initialize target network weights
session.run(graph_ops["reset_target_network_params"])
# Start n_threads actor-learner training threads
actor_learner_threads = \
[threading.Thread(target=actor_learner_thread,
args=(thread_id, envs[thread_id], session,
graph_ops, num_actions, summary_ops, saver))
for thread_id in range(n_threads)]
for t in actor_learner_threads:
t.start()
time.sleep(0.01)
# Show the agents training and write summary statistics
last_summary_time = 0
while True:
if show_training:
for env in envs:
env.render()
now = time.time()
if now - last_summary_time > summary_interval:
summary_str = session.run(summary_op)
writer.add_summary(summary_str, float(T))
last_summary_time = now
for t in actor_learner_threads:
t.join()
def evaluation(session, graph_ops, saver):
"""
Evaluate a model.
"""
saver.restore(session, test_model_path)
print("Restored model weights from ", test_model_path)
monitor_env = gym.make(game)
monitor_env.monitor.start("qlearning/eval")
# Unpack graph ops
s = graph_ops["s"]
q_values = graph_ops["q_values"]
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(gym_env=monitor_env,
action_repeat=action_repeat)
for i_episode in xrange(num_eval_episodes):
s_t = env.get_initial_state()
ep_reward = 0
terminal = False
while not terminal:
monitor_env.render()
readout_t = q_values.eval(session=session, feed_dict={s : [s_t]})
action_index = np.argmax(readout_t)
s_t1, r_t, terminal, info = env.step(action_index)
s_t = s_t1
ep_reward += r_t
print(ep_reward)
monitor_env.monitor.close()
def main(_):
with tf.Session() as session:
num_actions = get_num_actions()
graph_ops = build_graph(num_actions)
saver = tf.train.Saver(max_to_keep=5)
if testing:
evaluation(session, graph_ops, saver)
else:
train(session, graph_ops, num_actions, saver)
if __name__ == "__main__":
tf.app.run()
| [
"aymeric.damien@gmail.com"
] | aymeric.damien@gmail.com |
cc3c554f8ce1376a8f456651ba4ab924ec6f785d | 97fd2d461e6361ebb91f940ab76d64a430826e6e | /modules/udemy_class.py | c5243337a1b2c08e7b7c2a1025e55fc3cf6ac0c1 | [] | no_license | raishid/udemy_videos | 35f981a04692dcdaabf56209edbbe1acb08f297f | f1cd5664e05c80654d4c2e34d034b6885fc7e328 | refs/heads/main | 2023-04-10T23:15:10.085926 | 2021-04-30T17:19:40 | 2021-04-30T17:19:40 | 342,021,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,938 | py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from time import sleep
import requests
from lxml import html
import json
class udemy:
def __init__(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
'cookie': '__cfduid=d36951e57c6a9c3c2d20de5960f121f361614185616; ud_cache_language=es; ud_cache_modern_browser=1; ud_cache_version=1; ud_cache_release=870ce6051b2a9e467f7d; ud_cache_marketplace_country=VE; ud_cache_campaign_code=KEEPGOING21; ud_cache_price_country=VE; ud_firstvisit=2021-02-24T16:53:36.254256+00:00:1lExPs:NzJxZ2trHFjuJ1MuW7JxrKgZDps; __udmy_2_v57r=68f2b4c1e2004e75aa45c91f7e47dd83; ud_cache_brand=VEes_ES; seen=1; __cfruid=28eacbef6fea32f847469d6bd0e3098a36ab5418-1614185616; __cf_bm=46b92f011329fc116841e055a8bae2f48e82a67a-1614185624-1800-AQuMB9Eut2uzcIqfCp0NNYqNWeQi2/G8NP9VI4EhD5MaShEK2rEAPMy7/aFCG/mo+eHUoIS/V7T0aB7Ixs9gXbvf7j8WrOUwaBRuGQmHgfobVokDy9A18i0s9P+WZV1CIpkwJ+6cYM3TPgW+IFK7ycVSz7FzfCQjQS7urCq5rgzS; EUCookieMessageShown=true; EUCookieMessageState=initial; __ssid=0e8afecb62febcb1ff7a50959fdca17; _gcl_au=1.1.1314726463.1614185640; blisspoint_fpc=8f73ac4b-98f0-4820-b70f-2ccc2891989c; _gid=GA1.2.1055655191.1614185641; _pxhd=9b9761c309952a3870fdb0de59302d7d8ef2ea12728a8741178a7fd1bc2de004:e789d3a1-76c0-11eb-a202-4f281c11097b; _rdt_uuid=1614185671216.61ebe44d-f3de-42e1-916b-94cf1b48eec4; IR_gbd=udemy.com; ki_r=; _pxvid=e789d3a1-76c0-11eb-a202-4f281c11097b; _fbp=fb.1.1614185674325.722870776; client_id=bd2565cb7b0c313f5e9bae44961e8db2; ki_t=1614185673187%3B1614185673187%3B1614185800296%3B1%3B4; _px2=eyJ1IjoiNDE4ZDdmNTAtNzZjMS0xMWViLTkzODUtNzFjYjA0Nzk0ODQxIiwidiI6ImU3ODlkM2ExLTc2YzAtMTFlYi1hMjAyLTRmMjgxYzExMDk3YiIsInQiOjE2MTQxODY3MDA0NjQsImgiOiJmZTU5MzExOGQzMzdkMzRmOWU4ODA0ZGQ4NTRlMTJhY2I2MTE2Y2JjNzQ1ZTlmZDg4OTBlZTI5MzRmYzdkYTA3In0=; _px3=b22660b47d5dd70a5221e880f09a53bb9d26546cd1d6b80d6d03c8159c2bc0ca:7KFjrGiT2756av0vfgX3k9+S63nsBTc8F/0s/eul9GgB2JCTR7Ce35MIv97ku6nEKETvgZC7jD1bJZHsauiZDw==:1000:LGUBTx5EmXzdJo6yguxwHApEnb/kapf3KeJsnu1QmQLBdo88qa6PQquMk7N5FQ/VdBB1O92nHW7SivNZNqkMswzkvZpUyiQ+hLuEJV6RGi6z41skmahMciXhJ6NH8UMgMT2bt0XxMaKXyfEZrB+kCy89M7TvRaOpDWQXF41V6xY=; ud_cache_user=106452026; ud_user_jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc19zdXBlcnVzZXIiOmZhbHNlLCJlbWFpbCI6ImZlZGVyaWNvZWxicm9kZXJAZ21haWwuY29tIiwiaWQiOjEwNjQ1MjAyNiwiZ3JvdXBfaWRzIjpbXX0.JvwD9yajVg70iFQxsh5Hmih68Y7jeQF3WTOk4NA3Xfg; ud_cache_logged_in=1; dj_session_id=ndfkfpcb6tr2vrqhe19qgduha69qsz9f; ud_credit_last_seen=None; ud_credit_unseen=0; access_token=WbUJmfwgdcxXSOEmPh1QGaw2L2p6LA1r7xcDPSlN; ud_last_auth_information="{\"suggested_user_name\": \"Federico Alexander David\"\054 \"suggested_user_email\": \"federicoelbroder@gmail.com\"\054 \"backend\": \"udemy-auth\"\054 \"suggested_user_avatar\": \"https://img-a.udemycdn.com/user/50x50/anonymous_3.png?QbHlvIKm8tii_fRvvIzLPsyKPisT0bmIbMZzsDYeM72G6jq18BmKYiWMknv558nSn4D32qfOiAgh3voZAdz8vIRUaVGr8bPvHNIxQKI056bRsdK2KVgBBps\"}:1lExdd:F6LKgZsepIMoPH79A37VEqtz7ns"; csrftoken=TtlwALATPx69nwvBsz6Avrsv4ok7RmVstrUEvK87T2C2vEKeW8AIR6lp1cliuU8x; ud_cache_device=desktop; _gat=1; eventing_session_id=0PaLyhvLQWiB2V6DsCwMdQ-1614188272376; IR_5420=1614186474944%7C0%7C1614185671373%7C%7C; IR_PI=06009827-76c1-11eb-ad2c-42010a246d2d%7C1614272874944; _ga_7YMFEFLR6Q=GS1.1.1614185640.1.1.1614186474.0; _ga=GA1.1.70325949.1614185641; stc111655=tsa:1614185670904.553375530.6155357.8550080862580169.9:20210224173755|env:1%7C20210327165430%7C20210224173755%7C7%7C1014624:20220224170755|uid:1614185670904.1671943636.798306.111655.1489363879.:20220224170755|srchist:1014624%3A1%3A20210327165430:20220224170755; evi="SlFYNkxYDm4DQRJ5TFgObkcSCXtbWkR6HVFdY1RTCGATQR54VkBPNxMFSmNUVEB6CV8JN0xYRDFMDg=="; ud_rule_vars="eJyFjssOgjAURH-FdKuYvov9libNpdxio7GxFDaEf5corl3NYnLmzEoqlBErDn5JU6q5WN1F3svAkFMq0SgAqcKVRYPSDEMnbMj5npDYhqyOPGCqvuBrxj0HqOj2whFOOWspb7lsmLZKWKEvWkhl9IlSS6kj5-aAa57DzdcCMabgpzyXgH6BkqB_HGu5jPBM4QvFVHbq8_aPkEvVCfkTbmR7A999SA8=:1lExdn:Vm0B-7TD3ELEnljZ2mTTQwPN70U"'
}
self.s = requests.session()
self.opts = webdriver.ChromeOptions()
self.opts.add_argument("user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36")
driver = webdriver.Chrome('./driver/chromedriver.exe', chrome_options=self.opts)
driver.maximize_window()
driver.get('https://udemy.linkhide.xyz')
self.driver = driver
def Get_curso(self, url_curso):
#obtener id del curso
self.s.headers = self.headers
r = self.s.get(url_curso)
soup = html.fromstring(r.text)
id_curso = soup.xpath('//body/@data-clp-course-id')[0]
return id_curso
def Get_all_videos(self, id_curso):
self.s.headers['authorization'] = 'Bearer WbUJmfwgdcxXSOEmPh1QGaw2L2p6LA1r7xcDPSlN'
r = self.s.get(f'https://www.udemy.com/api-2.0/courses/{id_curso}/subscriber-curriculum-items/?page_size=1400&fields[lecture]=title,object_index,is_published,sort_order,created,asset,supplementary_assets,is_free&fields[quiz]=title,object_index,is_published,sort_order,type&fields[practice]=title,object_index,is_published,sort_order&fields[chapter]=title,object_index,is_published,sort_order&fields[asset]=title,filename,asset_type,status,time_estimation,is_external&caching_intent=True')
data = r.json()
capitulos_curso = data['results']
return capitulos_curso
def Descargar_video(self, id_curso, capitulo):
if capitulo['_class'] == 'lecture' and capitulo['asset']['asset_type'] == 'Video':
print('Obteniendo capitulo', capitulo['title'])
id = capitulo['id']
r = self.s.get(f'https://www.udemy.com/api-2.0/users/me/subscribed-courses/{id_curso}/lectures/{id}/?fields[lecture]=asset,description,download_url,is_free,last_watched_second&fields[asset]=asset_type,length,media_license_token,media_sources,captions,thumbnail_sprite,slides,slide_urls,download_urls&q=0.27194179700788634')
data_cap = r.json()
titulo = capitulo['title']
chapter = capitulo['object_index']
titulo = str(chapter)+' - '+titulo
url = data_cap['asset']['media_sources'][0]['src']
self.driver.find_element_by_xpath('//input[@name="titulo"]').clear()
self.driver.find_element_by_xpath('//input[@name="url_udemy"]').clear()
sleep(1)
self.driver.find_element_by_xpath('//input[@name="titulo"]').send_keys(titulo)
self.driver.find_element_by_xpath('//input[@name="url_udemy"]').send_keys(url)
self.driver.find_element_by_xpath('//input[@id="down_here"]').click()
| [
"67575679+raishid@users.noreply.github.com"
] | 67575679+raishid@users.noreply.github.com |
f1b816434823e5ff322719c6e792a034ea4f4c35 | 177bb6567b9564b1feb1d6e25ab1e0d61adf8770 | /ResidualLoss/CNN_l2_prob_far_dist.py | dc834cb205256111664a4feebdedd1accd470493 | [] | no_license | fzdy1914/NUS-FYP | 4ae9b299cf1cb72a01b371998781b9cec333d3f0 | cb7195a8b025eb8ab2becd26886551479796f930 | refs/heads/master | 2023-04-16T05:08:12.529777 | 2021-04-05T06:56:15 | 2021-04-05T06:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py | import random
import sys
from torch.autograd import Variable
from torch import optim
import numpy as np
from torch.backends import cudnn
import torch.nn.functional as F
import torch
from torch.utils.data import DataLoader, WeightedRandomSampler
from ResidualLoss.dataset import cifar10_data_loader_test, cifar10_data_loader_train, cifar10_dataset_train
from ResidualLoss.model import CIFAR_17
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
log_loc = "./log/%s.txt" % sys.argv[0].split("/")[-1].split(".")[0]
self.log = open(log_loc, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush(self):
pass
sys.stdout = Logger()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.deterministic = True
setup_seed(1914)
num_epochs = 200
batch_size = 100
evaluation_batch_size = 2500
learning_rate = 0.0001
ref_model = CIFAR_17().cuda()
model = CIFAR_17().cuda()
state_dict = torch.load('./CIFAR-17-1.pt')
ref_model.eval()
model.train()
# optimizer = optim.Adam([
# {'params': model.conv1.parameters()},
# {'params': model.conv2.parameters()},
# {'params': model.conv3.parameters()}
# ], lr=learning_rate, weight_decay=1e-5)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
train_dataset = cifar10_dataset_train()
train_data_length = len(train_dataset)
sampler = WeightedRandomSampler([1] * train_data_length, num_samples=train_data_length, replacement=True)
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
evaluation_data_loader = cifar10_data_loader_train(batch_size=evaluation_batch_size, shuffle=False)
test_data_loader = cifar10_data_loader_test(batch_size)
prob = torch.ones(len(train_dataset), dtype=torch.float64)
ignore_idx_lst = torch.load('CD/ignore_idx_lst.pt')
for idx in ignore_idx_lst:
prob[idx] = 0
sampler.weights = prob
print(prob.sum())
def residual_train():
total_correct_sum = 0
total_classification_loss = 0
for epoch in range(num_epochs):
total_correct = 0
model.eval()
with torch.no_grad():
for data, target in evaluation_data_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
total_correct += pred.eq(target.view_as(pred)).sum().item()
model.train()
total_train_loss = 0
for data, target in train_data_loader:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output, features = model.features(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_train_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
total_train_loss /= train_data_length
total_correct_sum += total_correct
total_classification_loss += total_train_loss
print('epoch [{}/{}], loss:{:.4f} Accuracy: {}/{}'.format(epoch + 1, num_epochs, total_train_loss, total_correct, train_data_length))
print("average correct:", total_correct_sum / num_epochs)
print("average loss:", total_classification_loss / num_epochs)
def test():
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_data_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1) # get the index of the max log-probability
correct += pred.eq(target).sum().item()
test_loss /= len(test_data_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_data_loader.dataset),
100. * correct / len(test_data_loader.dataset)))
# 1000, 500, 200, 100, 75, 50, 25, 10, 5, 1, 0.5,
if __name__ == '__main__':
ref_model.load_state_dict(state_dict)
model.load_state_dict(state_dict)
residual_train()
loc = "./CNN-l2-far-dist/non-freeze.pt"
torch.save(model.state_dict(), loc)
| [
"1229983126@qq.com"
] | 1229983126@qq.com |
e53ed5542ff0d625dbd54246d67d0dd7adab382d | 70d86ede49185e228c308442aa4781f1188dbc96 | /colorpicker.py | 0a814edda62082bb878e1e1b3ec52e94aab31799 | [
"MIT"
] | permissive | codacy-badger/MS-Paint-shit-edition | 3a42b29483d6233f6f857ea1250ad1cb2f5f460b | 666b0c3ac7ea98b359173e6d236c614face3981a | refs/heads/master | 2020-06-26T01:40:16.064940 | 2019-07-29T16:03:59 | 2019-07-29T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,273 | py | import pygame
import numpy as np
import json
import Slider
def loadSettings():
with open('settings.json') as settingsfile:
return json.load(settingsfile)
settings = loadSettings()
width = settings['WindowSize']['width']
height = settings['WindowSize']['height']
threadsAllowed = settings['insider']['allowThreads']
colorfield = np.zeros((width, height, 3), dtype=np.int)
scanAnimation = settings['insider']['scanAnimation']
def rgb2hsv(r, g, b):
cmax = max(r, g, b) / 255
cmin = min(r, g, b) / 255
delta = cmax - cmin
if delta == 0.0:
h = 0.0
elif cmax == r / 255:
h = 60 * ((g - b) / 255 / delta % 6)
elif cmax == g / 255:
h = 60 * ((b - r) / 255 / delta + 2)
elif cmax == b / 255:
h = 60 * ((r - g) / 255 / delta + 4)
if cmax == 0.0:
s = 0.0
elif cmax != 0.0:
s = delta / cmax * 100
v = cmax * 100
return (h, s, v)
def hsv2rgb(h, s, v):
h = h / 1
s = s / 100
v = v / 100
h60 = h / 60
h60f = int(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
if hi == 0:
r, g, b = v, t, p
elif hi == 1:
r, g, b = q, v, p
elif hi == 2:
r, g, b = p, v, t
elif hi == 3:
r, g, b = p, q, v
elif hi == 4:
r, g, b = t, p, v
elif hi == 5:
r, g, b = v, p, q
r, g, b = r * 255, g * 255, b * 255
return (int(round(r)), int(round(g)), int(round(b)))
def noThreads():
global colorfield
pygame.init()
sliderWidth = width - height
screen = pygame.display.set_mode((width, height))
mousePos = np.array([0, 0], dtype=np.int)
mouseDown = False
h = 0
a = np.arange(height)
s = np.arange(sliderWidth)
thread1(a, h)
thread2(a, s)
colorfield.resize((width, height, 3))
#
# $$\ $$\ $$\ $$\
# $$ | $$ | $$ |\__|
# $$$$$$\ $$$$$$$\ $$$$$$\ $$$$$$\ $$$$$$\ $$$$$$$ |$$\ $$$$$$$\ $$$$$$\
# \_$$ _| $$ __$$\ $$ __$$\ $$ __$$\ \____$$\ $$ __$$ |$$ |$$ __$$\ $$ __$$\
# $$ | $$ | $$ |$$ | \__|$$$$$$$$ | $$$$$$$ |$$ / $$ |$$ |$$ | $$ |$$ / $$ |
# $$ |$$\ $$ | $$ |$$ | $$ ____|$$ __$$ |$$ | $$ |$$ |$$ | $$ |$$ | $$ |
# \$$$$ |$$ | $$ |$$ | \$$$$$$$\ \$$$$$$$ |\$$$$$$$ |$$ |$$ | $$ |\$$$$$$$ |
# \____/ \__| \__|\__| \_______| \_______| \_______|\__|\__| \__| \____$$ |
# $$\ $$ |
# \$$$$$$ |
# \______/
#
def thread1(a, h):
global colorfield
for y in a:
for x in a:
rgb = hsv2rgb(h, y/height*100, (height-x)/height*100)
for i in [0, 1, 2]:
colorfield[y, x, i] = rgb[i]
def thread2(a, s):
global colorfield
for y in a:
rgb = hsv2rgb(y/height*360, 100, 100)
for x in s + height:
for i in [0, 1, 2]:
colorfield[x, y, i] = rgb[i]
def Threads():
from threading import Thread
global colorfield
sliderWidth = width - height
h = 0
a = np.arange(height)
s = np.arange(sliderWidth)
Thread1 = Thread(target=thread1, args=(a, h))
Thread2 = Thread(target=thread2, args=(a, s))
Thread1.start()
Thread2.start()
if not scanAnimation:
Thread2.join()
Thread1.join()
colorfield.resize((width, height, 3))
def main():
pygame.init()
if threadsAllowed:
Threads()
else:
noThreads()
sliderWidth = width - height
screen = pygame.display.set_mode((width, height))
colorRGB = [128, 128, 128]
colorHSV = list(rgb2hsv(colorRGB[0], colorRGB[1], colorRGB[2]))
mousePos = np.array([0, 0], dtype=np.int)
mouseDown = False
while True:
mouseClick = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
elif event.type == pygame.MOUSEMOTION:
mousePos = event.pos
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 0:
mouseDown = True
mouseClick = True
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 0:
mouseDown = False
if mousePos[0] >= height and mouseClick:
colorHSV[0] = mousePos[0] / height * 360
elif mousePos[0] < height and mouseClick:
colorHSV[1] = mousePos[0] / height * 100
colorHSV[2] = mousePos[1] / height * 100
colorRGB = list(hsv2rgb(colorHSV[0], colorHSV[1], colorHSV[2]))
print(f"{colorRGB[0]},{colorRGB[1]},{colorRGB[2]}")
# print(colorRGB)
pygame.surfarray.blit_array(screen, colorfield)
pygame.display.flip()
main()
# from threading import Thread
# import pygame
# import numpy as np
# import json
# import Slider
# from timeit import default_timer as timer
# def loadSettings():
# with open('settings.json') as settingsfile:
# return json.load(settingsfile)
# settings = loadSettings()
# width = settings['WindowSize']['width']
# height = settings['WindowSize']['height']
# def rgb2hsv(r, g, b):
# cmax = max(r, g, b) / 255
# cmin = min(r, g, b) / 255
# delta = cmax - cmin
# if delta == 0.0:
# h = 0.0
# elif cmax == r / 255:
# h = 60 * ((g - b) / 255 / delta % 6)
# elif cmax == g / 255:
# h = 60 * ((b - r) / 255 / delta + 2)
# elif cmax == b / 255:
# h = 60 * ((r - g) / 255 / delta + 4)
# if cmax == 0.0:
# s = 0.0
# elif cmax != 0.0:
# s = delta / cmax * 100
# v = cmax * 100
# return (h, s, v)
# def hsv2rgb(h, s, v):
# h = h / 1
# s = s / 100
# v = v / 100
# h60 = h / 60
# h60f = int(h60)
# hi = int(h60f) % 6
# f = h60 - h60f
# p = v * (1 - s)
# q = v * (1 - f * s)
# t = v * (1 - (1 - f) * s)
# if hi == 0:
# r, g, b = v, t, p
# elif hi == 1:
# r, g, b = q, v, p
# elif hi == 2:
# r, g, b = p, v, t
# elif hi == 3:
# r, g, b = p, q, v
# elif hi == 4:
# r, g, b = t, p, v
# elif hi == 5:
# r, g, b = v, p, q
# r, g, b = r * 255, g * 255, b * 255
# return (int(round(r)), int(round(g)), int(round(b)))
# colorfield = np.zeros((width, height, 3), dtype=np.int)
# def thread1(a, h, y):
# global colorfield
# for x in a:
# rgb = hsv2rgb(h, y/height*100, (height-x)/height*100)
# for i in [0, 1, 2]:
# colorfield[y, x, i] = rgb[i]
# def thread2(y, s):
# global colorfield
# rgb = hsv2rgb(y, 100, 100)
# for x in s + height:
# for i in [0, 1, 2]:
# colorfield[x, y, i] = rgb[i]
# def main():
# global colorfield
# pygame.init()
# sliderWidth = width - height
# screen = pygame.display.set_mode((width, height))
# mousePos = np.array([0, 0], dtype=np.int)
# mouseDown = False
# start = timer()
# h = 0
# a = np.arange(height)
# threadsgroup = [[]]
# for y in a:
# if y == height/10:
# threadsgroup.append([])
# if y == height/10*2:
# threadsgroup.append([])
# elif y == height/10*3:
# threadsgroup.append([])
# elif y == height/10*4:
# threadsgroup.append([])
# elif y == height/2:
# threadsgroup.append([])
# elif y == height/10*6:
# threadsgroup.append([])
# elif y == height/10*7:
# threadsgroup.append([])
# elif y == height/10*8:
# threadsgroup.append([])
# elif y == height/10*9:
# threadsgroup.append([])
# elif y == height:
# threadsgroup.append([])
# threadsgroup[len(threadsgroup) -
# 1].append(Thread(target=thread1, args=(a, h, y)))
# s = np.arange(sliderWidth)
# for y in a:
# if y == height/10:
# threadsgroup.append([])
# elif y == height/10*2:
# threadsgroup.append([])
# elif y == height/10*3:
# threadsgroup.append([])
# elif y == height/10*4:
# threadsgroup.append([])
# elif y == height/2:
# threadsgroup.append([])
# elif y == height/10*6:
# threadsgroup.append([])
# elif y == height/10*7:
# threadsgroup.append([])
# elif y == height/10*8:
# threadsgroup.append([])
# elif y == height/10*9:
# threadsgroup.append([])
# threadsgroup[len(threadsgroup) -
# 1].append(Thread(target=thread2, args=(y, s)))
# count = 0
# for threads in threadsgroup:
# for thread in threads:
# count += 1
# thread.start()
# colorfield.resize((width, height, 3))
# pygame.surfarray.blit_array(screen, colorfield)
# duration = timer() - start
# print(duration)
# for threads in threadsgroup:
# for thread in threads:
# count += 1
# thread._stop()
# s = Slider.Slider(0, 360, screen)
# while True:
# mouseClick = False
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# pygame.quit()
# return
# elif event.type == pygame.MOUSEMOTION:
# mousePos = event.pos
# elif event.type == pygame.MOUSEBUTTONDOWN:
# if event.button == 0:
# mouseDown = True
# mouseClick = True
# elif event.type == pygame.MOUSEBUTTONUP:
# if event.button == 0:
# mouseDown = False
# Slider.Slider.update(mousePos, mouseDown, mouseClick)
# Slider.Slider.showAll()
# pygame.surfarray.blit_array(screen, colorfield)
# pygame.display.flip()
# if __name__ == "__main__":
# main()
| [
"edu.silvan.kohler@gmail.com"
] | edu.silvan.kohler@gmail.com |
1d775b49368f6bf5bc1295ea51f84d6fcc27a0c9 | 7265af084e9a69b45ea8367f53fd91582d6c2d50 | /eelbrain/tests/test_data.py | d7da51188808c19dbe96345e478525a11c255c60 | [] | no_license | phoebegaston/Eelbrain | b2c6a51d4fb249b9252af3814683c659b9df2965 | b5b2ee7374a644cd9ea7b60bed86510ae3d5cc15 | refs/heads/master | 2020-12-31T05:41:30.784904 | 2015-04-26T11:38:01 | 2015-04-26T12:00:37 | 34,686,112 | 0 | 0 | null | 2015-04-27T19:17:59 | 2015-04-27T19:17:59 | null | UTF-8 | Python | false | false | 29,331 | py | # Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from itertools import izip, product
import os
import cPickle as pickle
import shutil
from string import ascii_lowercase
import tempfile
import mne
from nose.tools import (eq_, ok_, assert_almost_equal, assert_is_instance,
assert_raises)
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal)
from eelbrain import (datasets, load, Var, Factor, NDVar, Dataset, Celltable,
align, align1, combine)
from eelbrain._data_obj import asvar, Categorial, SourceSpace, UTS
from eelbrain._stats.stats import rms
from eelbrain._utils.testing import (assert_dataobj_equal, assert_dataset_equal,
assert_source_space_equal)
def test_print():
"Run the string representation methods"
ds = datasets.get_uts()
print ds
print repr(ds)
A = ds['A']
print A
print repr(A)
Y = ds['Y']
print Y
print repr(Y)
Ynd = ds['uts']
print Ynd
print repr(Ynd)
def test_aggregate():
"Test aggregation methods"
ds = datasets.get_uts()
# don't handle inconsistencies silently
assert_raises(ValueError, ds.aggregate, 'A%B')
dsa = ds.aggregate('A%B', drop_bad=True)
assert_array_equal(dsa['n'], [15, 15, 15, 15])
idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
eq_(dsa['Y', 0], ds['Y', idx1].mean())
# unequal cell counts
ds = ds[:-3]
dsa = ds.aggregate('A%B', drop_bad=True)
assert_array_equal(dsa['n'], [15, 15, 15, 12])
idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
eq_(dsa['Y', 0], ds['Y', idx1].mean())
dsa = ds.aggregate('A%B', drop_bad=True, equal_count=True)
assert_array_equal(dsa['n'], [12, 12, 12, 12])
idx1_12 = np.logical_and(idx1, idx1.cumsum() <= 12)
eq_(dsa['Y', 0], ds['Y', idx1_12].mean())
def test_align():
"Testing align() and align1() functions"
ds = datasets.get_uv()
ds.index()
idx4 = np.arange(0, ds.n_cases, 4)
idx4i = idx4[::-1]
ds2 = ds.sub(np.arange(0, ds.n_cases, 2))
# align1: align Dataset to index
dsa = align1(ds2, idx4)
assert_array_equal(dsa['index'], idx4, "align1() failure")
dsa = align1(ds2, idx4i)
assert_array_equal(dsa['index'], idx4i, "align1() failure")
# d_idx as Var
dsa = align1(ds2[::2], idx4, idx4i)
assert_array_equal(dsa['index'], idx4i, "align1() failure")
assert_raises(ValueError, align1, ds2, idx4, idx4i)
# Factor index
assert_raises(ValueError, align1, ds, ds['rm', ::-1], 'rm')
fds = ds[:20]
dsa = align1(fds, fds['rm', ::-1], 'rm')
assert_array_equal(dsa['index'], np.arange(19, -1, -1), "align1 Factor")
# align two datasets
dsa1, dsa2 = align(ds, ds2)
assert_array_equal(dsa1['index'], dsa2['index'], "align() failure")
dsa1, dsa2 = align(ds, ds2[::-1])
assert_array_equal(dsa1['index'], dsa2['index'], "align() failure")
def test_celltable():
"Test the Celltable class."
ds = datasets.get_uts()
ds['cat'] = Factor('abcd', repeat=15)
ct = Celltable('Y', 'A', ds=ds)
eq_(ct.n_cases, 60)
eq_(ct.n_cells, 2)
ct = Celltable('Y', 'A', match='rm', ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.n_cells, 2)
# cat argument
ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.X[0], 'c')
eq_(ct.X[-1], 'b')
assert_raises(ValueError, Celltable, 'Y', 'cat', cat=('c', 'e'), ds=ds)
ct = Celltable('Y', 'A', match='rm', ds=ds)
eq_(ct.n_cases, 30)
assert np.all(ct.groups['a0'] == ct.groups['a1'])
ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.X[0], 'c')
eq_(ct.X[-1], 'b')
# catch unequal length
assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', ds=ds)
assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', match='rm', ds=ds)
# coercion of numerical X
X = ds.eval("A == 'a0'")
ct = Celltable('Y', X, cat=(None, None), ds=ds)
eq_(('False', 'True'), ct.cat)
assert_array_equal(ct.data['True'], ds['Y', X])
ct = Celltable('Y', X, cat=(True, False), ds=ds)
eq_(('True', 'False'), ct.cat)
assert_array_equal(ct.data['True'], ds['Y', X])
# test coercion of Y
ct = Celltable(ds['Y'].x, 'A', ds=ds)
assert_is_instance(ct.Y, np.ndarray)
ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar)
assert_is_instance(ct.Y, Var)
# test sub
ds_sub = ds.sub("A == 'a0'")
ct_sub = Celltable('Y', 'B', ds=ds_sub)
ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds)
assert_dataobj_equal(ct_sub.Y, ct.Y)
# test sub with rm
ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub)
ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds)
assert_dataobj_equal(ct_sub.Y, ct.Y)
# Interaction match
ct = Celltable('Y', 'A', match='B % rm', ds=ds)
ok_(ct.all_within)
assert_dataobj_equal(combine((ct.data['a0'], ct.data['a1'])), ds['Y'])
# test rm sorting
ds = Dataset()
ds['rm'] = Factor('abc', repeat=4)
ds['Y'] = Var(np.arange(3.).repeat(4))
ds['X'] = Factor('ab', repeat=2, tile=3)
idx = np.arange(12)
np.random.shuffle(idx)
ds = ds[idx]
ct = Celltable('Y', 'X', 'rm', ds=ds)
assert_array_equal(ct.match, Factor('abc', tile=2))
assert_array_equal(ct.Y, np.tile(np.arange(3.), 2))
assert_array_equal(ct.X, Factor('ab', repeat=3))
def test_combine():
"Test combine()"
ds1 = datasets.get_uts()
ds2 = datasets.get_uts()
ds = combine((ds1, ds2))
assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
del ds1['Y']
del ds2['YCat']
ds = combine((ds1, ds2))
assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
"missing Var")
ok_(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]), "Combine with missing "
"Factor")
assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))
# combine NDVar with unequel dimensions
ds = datasets.get_uts(utsnd=True)
y = ds['utsnd']
y1 = y.sub(sensor=['0', '1', '2', '3'])
y2 = y.sub(sensor=['1', '2', '3', '4'])
ds1 = Dataset((y1,))
ds2 = Dataset((y2,))
dsc = combine((ds1, ds2))
y = dsc['utsnd']
eq_(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
"intersection failed.")
dims = ('case', 'sensor', 'time')
ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
assert_array_equal(y.get_data(dims), ref, "combine utsnd")
def test_dataset_combining():
"Test Dataset combination methods"
ds = datasets.get_uv()
del ds['fltvar'], ds['intvar'], ds['A']
ds2 = datasets.get_uv()
del ds2['fltvar'], ds2['intvar']
ds.update(ds2)
assert_array_equal(ds['A'], ds2['A'])
ds2 = datasets.get_uv()
del ds2['fltvar'], ds2['intvar']
ds2['B'][5] = 'something_else'
del ds['A']
assert_raises(ValueError, ds.update, ds2)
def test_dataset_indexing():
"""Test Dataset indexing"""
ds = datasets.get_uv()
# indexing values
eq_(ds['A', 1], ds['A'][1])
eq_(ds[1, 'A'], ds['A'][1])
# indexing variables
assert_dataobj_equal(ds[:, 'A'], ds['A'])
assert_dataobj_equal(ds['A', :], ds['A'])
assert_dataobj_equal(ds[:10, 'A'], ds['A'][:10])
assert_dataobj_equal(ds['A', :10], ds['A'][:10])
# new Dataset through indexing
ds2 = Dataset()
ds2['A'] = ds['A']
assert_dataset_equal(ds[('A',)], ds2)
ds2['B'] = ds['B']
assert_dataset_equal(ds['A', 'B'], ds2)
assert_dataset_equal(ds[('A', 'B'), :10], ds2[:10])
assert_dataset_equal(ds[:10, ('A', 'B')], ds2[:10])
# assigning value
ds[2, 'A'] = 'hello'
eq_(ds[2, 'A'], 'hello')
ds['A', 2] = 'not_hello'
eq_(ds[2, 'A'], 'not_hello')
# assigning new factor
ds['C', :] = 'c'
ok_(np.all(ds.eval("C == 'c'")))
# assigning new Var
ds['D1', :] = 5.
ds[:, 'D2'] = 5.
assert_array_equal(ds['D1'], 5)
assert_array_equal(ds['D2'], 5)
# test illegal names
f = Factor('aaabbb')
assert_raises(ValueError, ds.__setitem__, '%dsa', f)
assert_raises(ValueError, ds.__setitem__, '432', f)
assert_raises(ValueError, ds.__setitem__, ('%dsa', slice(None)), 'value')
assert_raises(ValueError, ds.__setitem__, (slice(None), '%dsa'), 'value')
assert_raises(ValueError, ds.__setitem__, ('432', slice(None)), 4.)
assert_raises(ValueError, ds.__setitem__, (slice(None), '432'), 4.)
def test_dataset_sorting():
"Test Dataset sorting methods"
test_array = np.arange(10)
ds = Dataset()
ds['v'] = Var(test_array)
ds['f'] = Factor(test_array)
# shuffle the Dataset
rand_idx = test_array.copy()
np.random.shuffle(rand_idx)
ds_shuffled = ds[rand_idx]
# ascending, Var, copy
dsa = ds_shuffled.sorted('v')
assert_dataset_equal(dsa, ds, "Copy sorted by Var, ascending")
# descending, Factor, in-place
ds_shuffled.sort('f', descending=True)
assert_dataset_equal(ds_shuffled, ds[::-1], "In-place sorted by Factor, "
"descending")
def test_dim_categorial():
"Test Categorial Dimension"
values = ['a', 'b', 'c', 'abc']
name = 'cat'
dim = Categorial(name, values)
# basic properties
print dim
eq_(len(dim), len(values))
# persistence
s = pickle.dumps(dim, pickle.HIGHEST_PROTOCOL)
dim_ = pickle.loads(s)
eq_(dim_, dim)
# indexing
sub_values = values[:2]
idx = dim.dimindex(sub_values)
assert_array_equal(dim.dimindex(tuple(sub_values)), idx)
eq_(dim[idx], Categorial(name, sub_values))
eq_(dim.dimindex('a'), values.index('a'))
eq_(dim.dimindex('abc'), values.index('abc'))
# intersection
dim2 = Categorial(name, ['c', 'b', 'e'])
dim_i = dim.intersect(dim2)
eq_(dim_i, Categorial(name, ['b', 'c']))
# unicode
dimu = Categorial(name, [u'c', 'b', 'e'])
eq_(dimu.values.dtype.kind, 'U')
eq_(dim2.values.dtype.kind, 'S')
eq_(dimu, dim2)
def test_dim_uts():
"Test UTS Dimension"
uts = UTS(-0.1, 0.005, 301)
# make sure indexing rounds correctly for floats
for i, s in enumerate(np.arange(0, 1.4, 0.05)):
idx = uts.dimindex((-0.1 + s, s))
eq_(idx.start, 10 * i)
eq_(idx.stop, 20 + 10 * i)
# intersection
uts1 = UTS(-0.1, 0.01, 50)
uts2 = UTS(0, 0.01, 20)
intersection = uts1.intersect(uts2)
eq_(intersection, uts2)
idx = uts1.dimindex((0, 0.2))
eq_(uts1[idx], uts2)
def test_effect():
"Test _Effect class"
# .enumerate_cells()
f1 = Factor('aabbccaabbcc')
f2 = Factor('abababababab')
i = f1 % f2
n1 = np.concatenate((np.tile([0, 1], 3), np.tile([2, 3], 3)))
assert_array_equal(f1.enumerate_cells(), n1)
assert_array_equal(f2.enumerate_cells(), np.arange(6).repeat(2))
assert_array_equal(i.enumerate_cells(), np.arange(2).repeat(6))
def test_factor():
"Test basic Factor functionality"
# removing a cell
f = Factor('aabbcc')
eq_(f.cells, ('a', 'b', 'c'))
f[f == 'c'] = 'a'
eq_(f.cells, ('a', 'b'))
# cell order
a = np.tile(np.arange(3), 3)
# alphabetical
f = Factor(a, labels={0: 'c', 1: 'b', 2: 'a'})
eq_(f.cells, ('a', 'b', 'c'))
# ordered
f = Factor(a, labels=((0, 'c'), (1, 'b'), (2, 'a')))
eq_(f.cells, ('c', 'b', 'a'))
eq_(f[:2].cells, ('c', 'b'))
f[f == 'b'] = 'c'
eq_(f.cells, ('c', 'a'))
# label length
lens = [2, 5, 32, 2, 32, 524]
f = Factor(['a' * l for l in lens])
assert_array_equal(f.label_length(), lens)
def test_factor_relabel():
"Test Factor.relabel() method"
f = Factor('aaabbbccc')
f.relabel({'a': 'd'})
assert_array_equal(f, Factor('dddbbbccc'))
f.relabel({'d': 'c', 'c': 'd'})
assert_array_equal(f, Factor('cccbbbddd'))
f.relabel({'d': 'c'})
assert_array_equal(f, Factor('cccbbbccc'))
assert_raises(KeyError, f.relabel, {'a':'c'})
def test_interaction():
"Test Interaction"
ds = datasets.get_uv()
A = ds['A']
B = ds['B']
i = A % B
# eq for sequence
assert_array_equal(i == A % B, True)
assert_array_equal(i == B % A, False)
assert_array_equal(i == A, False)
assert_array_equal(i == ds['fltvar'], False)
assert_array_equal(ds.eval("A%B") == Factor(ds['A']) % B, True)
# eq for element
for a, b in product(A.cells, B.cells):
assert_array_equal(i == (a, b), np.logical_and(A == a, B == b))
def test_isin():
"Test .isin() methods"
values = np.array([ 6, -6, 6, -2, -1, 0, -10, -5, -10, -6])
v = values[0]
v2 = values[:2]
labels = {i: c for i, c in enumerate(ascii_lowercase, -10)}
vl = labels[v]
v2l = [labels[v_] for v_ in v2]
target = np.logical_or(values == v2[0], values == v2[1])
inv_target = np.invert(target)
index_target = np.flatnonzero(values == v)
empty = np.array([])
var = Var(values)
assert_array_equal(var.index(v), index_target)
assert_array_equal(var.isin(v2), target)
assert_array_equal(var.isany(*v2), target)
assert_array_equal(var.isnot(*v2), inv_target)
assert_array_equal(var.isnotin(v2), inv_target)
var0 = Var([])
assert_array_equal(var0.isin(v2), empty)
assert_array_equal(var0.isany(*v2), empty)
assert_array_equal(var0.isnot(*v2), empty)
assert_array_equal(var0.isnotin(v2), empty)
f = Factor(values, labels=labels)
assert_array_equal(f.index(vl), index_target)
assert_array_equal(f.isin(v2l), target)
assert_array_equal(f.isany(*v2l), target)
assert_array_equal(f.isnot(*v2l), inv_target)
assert_array_equal(f.isnotin(v2l), inv_target)
f0 = Factor([])
assert_array_equal(f0.isin(v2l), empty)
assert_array_equal(f0.isany(*v2l), empty)
assert_array_equal(f0.isnot(*v2l), empty)
assert_array_equal(f0.isnotin(v2l), empty)
def test_model():
"Test Model class"
# model repr
a = Factor('ab', repeat=2, name='a')
b = Factor('ab', tile=2, name='b')
m = a * b
eq_(repr(m), "a + b + a % b")
# model without explicit names
x1 = Factor('ab', repeat=2)
x2 = Factor('ab', tile=2)
m = x1 * x2
eq_(repr(m), "<?> + <?> + <?> % <?>")
# catch explicit intercept
intercept = Factor('i', repeat=4, name='intercept')
assert_raises(ValueError, a.__mul__, intercept)
def test_ndvar():
"Test the NDVar class"
ds = datasets.get_uts(utsnd=True)
x = ds['utsnd']
# meaningful slicing
assert_raises(KeyError, x.sub, sensor='5')
assert_equal(x.sub(sensor='4'), x.x[:, 4])
assert_equal(x.sub(sensor=['4', '3', '2']), x.x[:, [4, 3, 2]])
assert_equal(x.sub(sensor=['4']), x.x[:, [4]])
assert_equal(x.sub(case=1, sensor='4'), x.x[1, 4])
# setup indices
s_case = slice(10, 13)
s_sensor = slice(2, 4)
s_time = x.time._slice(0.1, 0.2)
b_case = np.zeros(ds.n_cases, dtype=bool)
b_case[s_case] = True
b_sensor = np.array([False, False, True, True, False])
b_time = np.arange(s_time.start, s_time.stop)
a_case = np.arange(10, 13)
a_sensor = np.arange(2, 4)
a_time = np.arange(x.time.dimindex(0.1), x.time.dimindex(0.2))
# slicing with different index kinds
tgt = x.x[s_case, s_sensor, s_time]
eq_(tgt.shape, (3, 2, 10))
# single
assert_equal(x.sub(case=s_case, sensor=s_sensor, time=s_time), tgt)
assert_equal(x.sub(case=a_case, sensor=a_sensor, time=a_time), tgt)
assert_equal(x.sub(case=b_case, sensor=b_sensor, time=b_time), tgt)
# bool & slice
assert_equal(x.sub(case=b_case, sensor=s_sensor, time=s_time), tgt)
assert_equal(x.sub(case=s_case, sensor=b_sensor, time=s_time), tgt)
assert_equal(x.sub(case=s_case, sensor=s_sensor, time=b_time), tgt)
assert_equal(x.sub(case=b_case, sensor=b_sensor, time=s_time), tgt)
assert_equal(x.sub(case=s_case, sensor=b_sensor, time=b_time), tgt)
assert_equal(x.sub(case=b_case, sensor=s_sensor, time=b_time), tgt)
# bool & array
assert_equal(x.sub(case=b_case, sensor=a_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=b_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=a_sensor, time=b_time), tgt)
assert_equal(x.sub(case=b_case, sensor=b_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=b_sensor, time=b_time), tgt)
assert_equal(x.sub(case=b_case, sensor=a_sensor, time=b_time), tgt)
# slice & array
assert_equal(x.sub(case=s_case, sensor=a_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=s_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=a_sensor, time=s_time), tgt)
assert_equal(x.sub(case=s_case, sensor=s_sensor, time=a_time), tgt)
assert_equal(x.sub(case=a_case, sensor=s_sensor, time=s_time), tgt)
assert_equal(x.sub(case=s_case, sensor=a_sensor, time=s_time), tgt)
# all three
assert_equal(x.sub(case=a_case, sensor=b_sensor, time=s_time), tgt)
assert_equal(x.sub(case=a_case, sensor=s_sensor, time=b_time), tgt)
assert_equal(x.sub(case=b_case, sensor=a_sensor, time=s_time), tgt)
assert_equal(x.sub(case=b_case, sensor=s_sensor, time=a_time), tgt)
assert_equal(x.sub(case=s_case, sensor=a_sensor, time=b_time), tgt)
assert_equal(x.sub(case=s_case, sensor=b_sensor, time=a_time), tgt)
# Var
v_case = Var(b_case)
assert_equal(x.sub(case=v_case, sensor=b_sensor, time=a_time), tgt)
# baseline correction
x_bl = x - x.summary(time=(None, 0))
# assert that the baseline is 0
bl = x_bl.summary('case', 'sensor', time=(None, 0))
ok_(abs(bl) < 1e-10, "Baseline correction")
# NDVar as index
sens_mean = x.mean(('case', 'time'))
idx = sens_mean > 0
pos = sens_mean[idx]
assert_array_equal(pos.x > 0, True)
def test_ndvar_binning():
"Test NDVar.bin()"
x = np.arange(10)
time = UTS(-0.1, 0.1, 10)
x_dst = x.reshape((5, 2)).mean(1)
time_dst = np.arange(0., 0.9, 0.2)
# 1-d
ndvar = NDVar(x, (time,))
b = ndvar.bin(0.2)
assert_array_equal(b.x, x_dst, "Binned data")
assert_array_equal(b.time.x, time_dst, "Bin times")
# 2-d
ndvar = NDVar(np.vstack((x, x, x)), ('case', time))
b = ndvar.bin(0.2)
assert_array_equal(b.x, np.vstack((x_dst, x_dst, x_dst)), "Binned data")
assert_array_equal(b.time.x, time_dst, "Bin times")
# time:
x = np.ones((5, 70))
ndvar = NDVar(x, ('case', UTS(0.45000000000000007, 0.005, 70)))
binned_ndvar = ndvar.bin(0.05)
assert_array_equal(binned_ndvar.x, 1.)
eq_(binned_ndvar.shape, (5, 7))
def test_ndvar_graph_dim():
"Test NDVar dimensions with conectvity graph"
ds = datasets.get_uts(utsnd=True)
x = ds['utsnd']
# non-monotonic index
sub_mono = x.sub(sensor=['2', '3', '4'])
sub_nonmono = x.sub(sensor=['4', '3', '2'])
argsort = np.array([2,1,0])
conn = argsort[sub_mono.sensor.connectivity().ravel()].reshape((-1, 2))
assert_equal(sub_nonmono.sensor.connectivity(), conn)
def test_ndvar_summary_methods():
"Test NDVar methods for summarizing data over axes"
ds = datasets.get_uts(utsnd=True)
x = ds['utsnd']
dim = 'sensor'
axis = x.get_axis(dim)
dims = ('case', 'sensor')
axes = tuple(x.get_axis(d) for d in dims)
idx = x > 0
x0 = x[0]
idx0 = idx[0]
xsub = x.sub(time=(0, 0.5))
idxsub = xsub > 0
idx1d = x.mean(('case', 'time')) > 0
# info inheritance
eq_(x.any(('sensor', 'time')).info, x.info)
# numpy functions
eq_(x.any(), x.x.any())
assert_array_equal(x.any(dim), x.x.any(axis))
assert_array_equal(x.any(dims), x.x.any(axes))
assert_array_equal(x.any(idx0), [x_[idx0.x].any() for x_ in x.x])
assert_array_equal(x.any(idx), [x_[i].any() for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.any(idx0), x0.x[idx0.x].any())
assert_array_equal(x.any(idxsub), xsub.any(idxsub))
assert_array_equal(x.any(idx1d), x.x[:, idx1d.x].any(1))
eq_(x.max(), x.x.max())
assert_array_equal(x.max(dim), x.x.max(axis))
assert_array_equal(x.max(dims), x.x.max(axes))
assert_array_equal(x.max(idx0), [x_[idx0.x].max() for x_ in x.x])
assert_array_equal(x.max(idx), [x_[i].max() for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.max(idx0), x0.x[idx0.x].max())
assert_array_equal(x.max(idxsub), xsub.max(idxsub))
assert_array_equal(x.max(idx1d), x.x[:, idx1d.x].max(1))
eq_(x.mean(), x.x.mean())
assert_array_equal(x.mean(dim), x.x.mean(axis))
assert_array_equal(x.mean(dims), x.x.mean(axes))
assert_array_equal(x.mean(idx0), [x_[idx0.x].mean() for x_ in x.x])
assert_array_equal(x.mean(idx), [x_[i].mean() for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.mean(idx0), x0.x[idx0.x].mean())
assert_array_equal(x.mean(idxsub), xsub.mean(idxsub))
assert_array_equal(x.mean(idx1d), x.x[:, idx1d.x].mean(1))
eq_(x.min(), x.x.min())
assert_array_equal(x.min(dim), x.x.min(axis))
assert_array_equal(x.min(dims), x.x.min(axes))
assert_array_equal(x.min(idx0), [x_[idx0.x].min() for x_ in x.x])
assert_array_equal(x.min(idx), [x_[i].min() for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.min(idx0), x0.x[idx0.x].min())
assert_array_equal(x.min(idxsub), xsub.min(idxsub))
assert_array_equal(x.min(idx1d), x.x[:, idx1d.x].min(1))
eq_(x.std(), x.x.std())
assert_array_equal(x.std(dim), x.x.std(axis))
assert_array_equal(x.std(dims), x.x.std(axes))
assert_array_equal(x.std(idx0), [x_[idx0.x].std() for x_ in x.x])
assert_array_equal(x.std(idx), [x_[i].std() for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.std(idx0), x0.x[idx0.x].std())
assert_array_equal(x.std(idxsub), xsub.std(idxsub))
assert_array_equal(x.std(idx1d), x.x[:, idx1d.x].std(1))
# non-numpy
eq_(x.rms(), rms(x.x))
assert_array_equal(x.rms(dim), rms(x.x, axis))
assert_array_equal(x.rms(dims), rms(x.x, axes))
assert_array_equal(x.rms(idx0), [rms(x_[idx0.x]) for x_ in x.x])
assert_array_equal(x.rms(idx), [rms(x_[i]) for x_, i in izip(x.x, idx.x)])
assert_array_equal(x0.rms(idx0), rms(x0.x[idx0.x]))
assert_array_equal(x.rms(idxsub), xsub.rms(idxsub))
assert_array_equal(x.rms(idx1d), rms(x.x[:, idx1d.x], 1))
def test_ols():
"Test NDVar.ols() method"
from rpy2.robjects import r
# simulate data
ds = datasets.get_uts(True)
n_times = len(ds['uts'].time)
x = np.zeros(n_times)
x[20:40] = np.hanning(20)
utsc = ds.eval("uts.copy()")
utsc.x += ds['Y'].x[:, None] * x[None, :]
ds_ = Dataset()
ds_['x'] = Var(ds['Y'].x)
ds_['x2'] = ds_['x'] + np.random.normal(0, 1, ds.n_cases)
# ols regression
m1 = ds_['x']
b1 = utsc.ols(m1)
res1 = utsc.residuals(m1)
t1 = utsc.ols_t(m1)
m2 = ds_.eval("x + x2")
b2 = utsc.ols(m2)
res2 = utsc.residuals(m2)
t2 = utsc.ols_t(m2)
# compare with R
for i in xrange(n_times):
ds_['y'] = Var(utsc.x[:, i])
ds_.to_r('ds')
# 1 predictor
r('lm1 <- lm(y ~ x, ds)')
beta = r('coef(lm1)')[1]
assert_almost_equal(b1.x[0, i], beta)
res = r('residuals(lm1)')
assert_array_almost_equal(res1.x[:, i], res)
t = r('coef(summary(lm1))')[5]
assert_almost_equal(t1.x[0, i], t)
# 2 predictors
r('lm2 <- lm(y ~ x + x2, ds)')
beta = r('coef(lm2)')[1:]
assert_array_almost_equal(b2.x[:, i], beta)
res = r('residuals(lm2)')
assert_array_almost_equal(res2.x[:, i], res)
lm2_coefs = r('coef(summary(lm2))')
t = [lm2_coefs[7], lm2_coefs[8]]
assert_array_almost_equal(t2.x[:, i], t)
# 3d
utsnd = ds['utsnd']
ds_['utsnd'] = utsnd
b1 = ds_.eval("utsnd.ols(x)")
res1 = ds_.eval("utsnd.residuals(x)")
t1 = ds_.eval("utsnd.ols_t(x)")
for i in xrange(len(b1.time)):
ds_['y'] = Var(utsnd.x[:, 1, i])
ds_.to_r('ds')
# 1 predictor
r('lm1 <- lm(y ~ x, ds)')
beta = r('coef(lm1)')[1]
assert_almost_equal(b1.x[0, 1, i], beta)
res = r('residuals(lm1)')
assert_array_almost_equal(res1.x[:, 1, i], res)
t = r('coef(summary(lm1))')[5]
assert_almost_equal(t1.x[0, 1, i], t)
def test_io_pickle():
"Test io by pickling"
ds = datasets.get_uts()
ds.info['info'] = "Some very useful information about the Dataset"
tempdir = tempfile.mkdtemp()
try:
dest = os.path.join(tempdir, 'test.pickled')
with open(dest, 'wb') as fid:
pickle.dump(ds, fid, protocol=pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
ds2 = pickle.load(fid)
finally:
shutil.rmtree(tempdir)
assert_dataset_equal(ds, ds2)
def test_io_txt():
"Test Dataset io as text"
ds = datasets.get_uv()
# Var that has integer values as float
ds['intflt'] = ds.eval('intvar * 1.')
ds['intflt'].name = 'intflt'
# io test
tempdir = tempfile.mkdtemp()
try:
dest = os.path.join(tempdir, 'test.txt')
ds.save_txt(dest)
ds2 = load.tsv(dest)
finally:
shutil.rmtree(tempdir)
assert_dataset_equal(ds, ds2, decimal=6)
def test_r():
"Test interaction with R thorugh rpy2"
from rpy2.robjects import r
r("data(sleep)")
ds = Dataset.from_r("sleep")
eq_(ds.name, 'sleep')
extra = (0.7, -1.6, -0.2, -1.2, -0.1, 3.4, 3.7, 0.8, 0.0, 2.0, 1.9, 0.8,
1.1, 0.1, -0.1, 4.4, 5.5, 1.6, 4.6, 3.4)
assert_array_equal(ds.eval('extra'), extra)
assert_array_equal(ds.eval('ID'), map(str, xrange(1, 11)) * 2)
assert_array_equal(ds.eval('group'), ['1'] * 10 + ['2'] * 10)
# test putting
ds.to_r('sleep_copy')
ds_copy = Dataset.from_r('sleep_copy')
assert_dataset_equal(ds_copy, ds)
def test_source_space():
"Test SourceSpace Dimension"
subject = 'fsaverage'
data_path = mne.datasets.sample.data_path()
mri_sdir = os.path.join(data_path, 'subjects')
mri_dir = os.path.join(mri_sdir, subject)
src_path = os.path.join(mri_dir, 'bem', subject + '-ico-5-src.fif')
label_dir = os.path.join(mri_dir, 'label')
label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
label_ba1_v1 = label_ba1 + label_v1
label_v1_mt = label_v1 + label_mt
src = mne.read_source_spaces(src_path)
source = SourceSpace((src[0]['vertno'], src[1]['vertno']), subject,
'ico-5', mri_sdir)
index = source.dimindex(label_v1)
source_v1 = source[index]
index = source.dimindex(label_ba1_v1)
source_ba1_v1 = source[index]
index = source.dimindex(label_v1_mt)
source_v1_mt = source[index]
index = source_ba1_v1.dimindex(source_v1_mt)
source_v1_intersection = source_ba1_v1[index]
assert_source_space_equal(source_v1, source_v1_intersection)
# index from label
index = source.index_for_label(label_v1)
assert_array_equal(index.source[index.x].vertno[0],
np.intersect1d(source.lh_vertno, label_v1.vertices, 1))
# parcellation and cluster localization
if mne.__version__ < '0.8':
return
parc = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=mri_sdir)
indexes = [source.index_for_label(label) for label in parc
if len(label) > 10]
x = np.vstack([index.x for index in indexes])
ds = source._cluster_properties(x)
for i in xrange(ds.n_cases):
eq_(ds[i, 'location'], parc[i].name)
def test_var():
"Test Var objects"
base = Factor('aabbcde')
y = Var.from_dict(base, {'a': 5, 'e': 8}, default=0)
assert_array_equal(y.x, [5, 5, 0, 0, 0, 0, 8])
# basic operations
info = {'a': 1}
v = Var(np.arange(4.), info=info)
eq_(v.info, info)
w = v - 1
eq_(w.info, info)
assert_array_equal(w.x, v.x - 1)
w = v + 1
eq_(w.info, info)
assert_array_equal(w.x, v.x + 1)
w = v * 2
eq_(w.info, info)
assert_array_equal(w.x, v.x * 2)
w = v / 2
eq_(w.info, info)
assert_array_equal(w.x, v.x / 2)
# assignment
tgt1 = np.arange(10)
tgt2 = np.tile(np.arange(5), 2)
v = Var(np.arange(10))
v[v > 4] = np.arange(5)
assert_array_equal(v, tgt2)
v[5:] = np.arange(5, 10)
assert_array_equal(v, tgt1)
v = Var(np.arange(10))
v[v > 4] = Var(np.arange(5))
assert_array_equal(v, tgt2)
v[5:] = Var(np.arange(5, 10))
assert_array_equal(v, tgt1)
# .split()
y = Var(np.arange(16))
for i in xrange(1, 9):
split = y.split(i)
eq_(len(split.cells), i)
# .as_factor()
v = Var(np.arange(4))
assert_array_equal(v.as_factor(), Factor('0123'))
assert_array_equal(v.as_factor({0: 'a'}), Factor('a123'))
assert_array_equal(v.as_factor({(0, 1): 'a', (2, 3): 'b'}), Factor('aabb'))
assert_array_equal(v.as_factor({(0, 1): 'a', 2: 'b', 'default': 'c'}),
Factor('aabc'))
assert_array_equal(v.as_factor({(0, 1): 'a', (2, 'default'): 'b'}),
Factor('aabb'))
| [
"christianmbrodbeck@gmail.com"
] | christianmbrodbeck@gmail.com |
f6e38588fade84b4b3f75332903ccf0eb3b4a57f | 3abf9c3624af25d301e87b35013ce669356c8237 | /GetEffEvolution.py | 5b4943f30905a472509221784d4180e36229493d | [] | no_license | JeromeBlanchet/EnRoutePerformance | 99662e0e5168b8deda0daa7f8ee3234b315fe08c | ce74448d15ab965d5a0f46f61cd56356b8d5db63 | refs/heads/master | 2021-10-27T21:02:42.188774 | 2019-04-19T18:10:22 | 2019-04-19T18:10:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 2017
@ Author: Liu, Yulin
@ Institute: UC Berkeley
"""
from __future__ import division
import os
import GetCluster
import GetEDA
import numpy as np
class EffEvolution:
def __init__(self, Dep = 'LAX', Arr = 'SEA', Timeframe = [2013, 2014, 2015]):
self.Dep = Dep
self.Arr = Arr
self.Timeframe = Timeframe
def CleanData(self, InputData = False, SaveTrack = True, **kwargs):
A_cutoff = kwargs.get('Cutoff', 0.5)
T_cutoff = kwargs.get('Tcut', 300)
D_cutoff = kwargs.get('Dcut', 100)
V_cutoff = kwargs.get('Vcut', 0.27)
for year in self.Timeframe:
print('Processing flights from %s to %s in %d'%(self.Dep, self.Arr, year))
exec("self.Dep_Arr_%s = GetEDA.EDA_Data(self.Dep, self.Arr, year, A_cutoff, T_cutoff, D_cutoff, V_cutoff, InputData = InputData, db = False, Insert = False)"%str(year))
if SaveTrack:
print('Saving flights from %s to %s in %d'%(self.Dep, self.Arr, year))
exec("self.Dep_Arr_%s.SaveData()"%str(year))
return
def MergeData(self):
i = 0
for year in self.Timeframe:
if i == 0:
exec("self.All_Eff = self.Dep_Arr_%s.Efficiency.copy()"%str(year))
exec("self.All_VTrack = self.Dep_Arr_%s.VTrack.copy()"%str(year))
i += 1
else:
exec("self.All_Eff.update(self.Dep_Arr_%s.Efficiency)"%str(year))
exec("self.All_VTrack = self.All_VTrack.append(self.Dep_Arr_%s.VTrack)"%str(year))
i += 1
self.All_VTrack = self.All_VTrack.reset_index(drop = True)
print('Number of flights with the specified time frame: ', self.All_VTrack.FID.unique().shape)
return self.All_VTrack, self.All_Eff
def Pre_Clustering(self, N_Comp = 5, N_pt = 100):
self.T1 = GetCluster.Traj_Clustering(self.Dep,self.Arr, 9999, N_Comp = N_Comp, N_pt = N_pt, VTRACK = self.All_VTrack, EnEff = self.All_Eff)
def Clustering(self, dist_thres = 1, num_thres = 20, **kwargs):
SaveLabelData = kwargs.get('SaveData', False)
Median = kwargs.get('MEDIAN', True)
Plot = kwargs.get('PLOT', True)
LBdata1, _ = self.T1.DB_Clustering(dist_thres, num_thres, SAVE = SaveLabelData, MEDIAN = Median, PLOT = Plot)
stat_summary = LBdata1.groupby(['YEAR','ClustID']).agg({'FID': np.count_nonzero, 'Efficiency': np.mean}).reset_index()
stat_summary['share'] = stat_summary.groupby('YEAR').FID.transform(lambda x: x/x.sum())
stat_summary = stat_summary.merge(LBdata1.groupby('ClustID').Efficiency.mean().reset_index(), on = 'ClustID')
stat_summary.columns = ['Year', 'ClusterID', 'WithinClusterInefficiency', 'TotalTraffic', 'ShareOfTraffic','ClusterAverageIneff']
return LBdata1, stat_summary[['Year', 'ClusterID','ClusterAverageIneff', 'WithinClusterInefficiency', 'TotalTraffic', 'ShareOfTraffic']].sort_values(by = ['Year', 'ClusterID']) | [
"noreply@github.com"
] | JeromeBlanchet.noreply@github.com |
17f147002517ca6e9ce3f90605cfde55fb9f8c21 | 8f736b5cc28cc1d46506abf1b001eb41cc1f9423 | /apps/trade/migrations/0021_auto_20210322_2247.py | 9d43bf7c166f0d2b8fdeb3cf75abab92377b96c8 | [] | no_license | tang1323/MxShop | 6ac68502f59ae07b483b6145e1b557399192e3dd | 831b5bdd8abdf7d6e547b0bd3fff9341261e4afa | refs/heads/master | 2023-04-04T07:09:32.759476 | 2021-04-14T14:36:00 | 2021-04-14T14:36:00 | 357,937,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # Generated by Django 2.2 on 2021-03-22 22:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0020_auto_20210322_1137'),
]
operations = [
migrations.AlterField(
model_name='orderinfo',
name='pay_status',
field=models.CharField(blank=True, choices=[('TRADE_SUCCESS', '成功'), ('paying', '待支付'), ('TRADE_FINISHED', '交易结束'), ('WAIT_BUYER_PAY', '交易创建'), ('TRADE_CLOSED', '超时关闭')], default='paying', max_length=30, null=True, verbose_name='订单状态'),
),
]
| [
"1171242903@qq.com"
] | 1171242903@qq.com |
62a3be192a9c0b150e0545ad058927746d79b05e | 6a53987c72b048ce3dada42a18d5d9be1a539608 | /Day 1/1-1.py | 70c5f6a9025d7aa3da051b7acfd8ae87405f7202 | [] | no_license | swekung/advent-of-code2020 | c78e171387849575349843120591f66d91e3107e | bf44cacd227becca0db852f99f9eaf7c8efe4bdd | refs/heads/main | 2023-02-05T23:45:03.743432 | 2020-12-22T10:34:41 | 2020-12-22T10:34:41 | 317,514,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | import numpy as np
import sys
import time
start_time = time.time()
def readFile(file):
text = open(file)
out = []
for line in text:
out.append(int(line.rstrip('\n')))
out = np.array(out)
text.close()
return out
def findPairs(arr):
sums = np.add.outer(arr, arr)
index = np.where(sums == 2020)
return index[0]
def __main__():
file = "Day 1\input.txt"
arr = readFile(file)
pair = findPairs(arr)
print(arr[pair[0]] * arr[pair[1]])
__main__()
print("--- %s seconds ---" % (time.time() - start_time)) | [
"simhans@student.chalmers.se"
] | simhans@student.chalmers.se |
e0244c8666835576056b3acce849bdae4bd950dc | a5564fbf541b4fb602f5ad47aa5d06441bcf9f0a | /apps/fyle/migrations/0028_auto_20230112_1050.py | f028ce39578be10371b173b36815be40199c507e | [
"MIT"
] | permissive | fylein/fyle-qbo-api | 1fc222501276ebf0f316f43bb278a6adcc0e08ce | b4c464cc6442ead91ceb3b2840103b27af3e029c | refs/heads/master | 2023-08-31T06:26:07.296341 | 2023-08-24T15:45:55 | 2023-08-24T15:45:55 | 243,419,752 | 1 | 3 | MIT | 2023-09-07T13:31:53 | 2020-02-27T03:15:01 | Python | UTF-8 | Python | false | false | 596 | py | # Generated by Django 3.1.14 on 2023-01-12 10:50
import apps.fyle.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('fyle', '0027_expensegroupsettings_ccc_expense_state')]
operations = [
migrations.AlterField(
model_name='expensegroupsettings',
name='expense_state',
field=models.CharField(default=apps.fyle.models.get_default_expense_state, help_text='state at which the expenses are fetched ( PAYMENT_PENDING / PAYMENT_PROCESSING, PAID)', max_length=100, null=True),
)
]
| [
"noreply@github.com"
] | fylein.noreply@github.com |
88b837be937d1467ce20aca93e1260c500a8cda2 | 8988b6fb22c012f41ee687a66869f2352f1fe67b | /final/project_raspberry.py | 7e8804ef821a1431b2e24b8fa0d5112db9dc40fa | [] | no_license | AT9M/Project | f16e86aa4ed1fbde2c92f3c3312398c22a53ca75 | bd4c3b97ed040bf745bf657c7679eb986b307d5c | refs/heads/master | 2020-04-18T12:35:43.295679 | 2019-03-15T13:54:03 | 2019-03-15T13:54:03 | 167,538,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,229 | py | #!/usr/bin/python3
from pygame import mixer
import os,random
import webbrowser
import datetime
import turtle
import math
import random
from playsound import playsound
min = 1
max = 6
i=1
roll_again = "yes"
from subprocess import call
import vlc
speech="hello Wold !"
call(["espeak",speech])
start=["Hi !","Hello, How are you ?","Greetings"]
now = datetime.datetime.now()
Liam_fulla=r"hihowareyou.mp3"
Liam_hi=r"hi.mp3"
Liam_how=r"how.mp3"
Liam_are=r"are.mp3"
Liam_you=r"you.mp3"
jajoya=r"jajoy2.mp3"
johnnya=r"johndepp.mp3"
hpa=r"hp.mp3"
anakina=r"anakin.mp3"
jaune=r"jaune.mp3"
nani=r"nani.mp3"
deus=r"deus.mp3"
vult=r"vult.mp3"
slav=r"slav.mp3"
sax=r"sax.mp3"
gandalf=r"gandalf.mp3"
pirate=r"pirate.mp3"
nine=r"nine.mp3"
niness=r"90s.mp3"
cristina=r"cristina.mp3"
def music():
def nineties():
playsound(niness)
return
def nnie():
playsound(nine)
return
def nanni():
playsound(nani)
return
def vultt():
playsound(vult)
return
def deus_vult():
playsound(deus)
return
def Liam_full():
playsound(Liam_fulla)
return
def jaunea():
playsound(jaune)
return
def jajoy():
playsound(jajoya)
return
def johnny():
playsound(johnnya)
return
def hp():
playsound(hpa)
return
def anakin():
playsound(anakina)
return
def slave():
playsound(slav)
return
def saxx():
playsound(sax)
return
def gandalff():
playsound(gandalf)
return
def piratte():
playsound(pirate)
return
def cristinaa():
playsound(cristina)
return
switcher = {
1: Liam_full,
2: jajoy,
3: johnny,
4: hp,
5: anakin,
6: jaunea,
7: nanni,
8: deus_vult,
9: vultt,
10: slave,
11: saxx,
12: gandalff,
13: piratte,
14: nnie,
15: nineties,
16: cristinaa
}
def commande(n):
# Get the function from switcher dictionary
func = switcher.get(n, "nothing")
# Execute the function
return func()
okt=int(input(" enter "))
commande(okt)
def say():
phrase= input("what did i must say ?\n")
gg=str(phrase)
call(["espeak",gg])
return
def web():
g="what address do you want"
call(["espeak",g])
w = input("address ?\n")
return webbrowser.open('http://'+w+'.ie')
def today():
day = datetime.date.today().strftime('%A')
month = datetime.date.today().strftime('%B')
date = datetime.date.today().strftime('%d')
g="Today is"+day+date+month
call(["espeak",g])
return str(now)
def wiki():
g="type the subject you want"
call(["espeak",g])
w = input("subject ?\n")
w=w.replace(" ", "_")
w=w.lower()
webbrowser.open('https://en.wikipedia.org/wiki/'+w)
return
def background():
x=random.randint(1,4)
image = {1:r"t.gif",2:r"o.gif",3:r"y.gif",4:r"u.gif"}
screen = turtle.Screen()
screen.addshape(image[x])
turtle.shape(image[x])
turtle.exitonclick()
return
def game():
while roll_again == "yes" or roll_again == "y":
print ("Rolling the dices...")
print ("The values are....")
a=(random.randint(min, max))
print(a)
b=(random.randint(min, max))
print(b)
roll_again = raw_input("Roll the dices again?")
return ""
switcher = {
1: say,
2: web,
3: today,
4: wiki,
5: background,
6: game,
7:music
}
def commande(n):
# Get the function from switcher dictionary
func = switcher.get(n, "nothing")
# Execute the function
return func()
u=0
o=1
while u!=o:
okt=int(input("enter "))
commande(okt)
| [
"noreply@github.com"
] | AT9M.noreply@github.com |
36ff60eb1bc60ceb754c9bb01bb0f5434bc87756 | 4e486f1ee1155b8d27fd56baeedde54350c01873 | /src/carrierEasypost/__init__.py | 46739a11d7b9fa49232a2cb84f159873b7fd9a1b | [
"MIT"
] | permissive | lalithkumart-corp/python-workaround | 6cc860f96fec68d4dc128019676f238946223e62 | a5457e3357b2cef05a75260f601151f74c6d072b | refs/heads/master | 2020-12-09T08:38:51.017872 | 2020-02-22T05:51:52 | 2020-02-22T05:51:52 | 233,252,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from .easypost import Rate, Shipment | [
"lalithkmr94@gmail.com"
] | lalithkmr94@gmail.com |
16c85797693df2dde13e6b3da506299a3603a8f8 | 724c615eb6968d79706619b6d6a75b658bdb1eb5 | /geant_fullsim_ecal_SPG_batch.py | 57f146fdbbb5f80506f442a313c45229f3098924 | [] | no_license | broach1/scripts | faefae9ac8b188fa47b2a8ebad9afd469244d6e8 | c5e726d42402172ccc55bf7cf676e702e8fe5c81 | refs/heads/master | 2020-12-20T22:36:35.463982 | 2016-08-10T11:15:11 | 2016-08-10T11:15:11 | 59,031,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,245 | py | import os
import numpy as np
#loads array of random seeds from file
seed_array = np.loadtxt('seeds.txt',dtype='int',delimiter=',')
#the space below (lines 8-22) are for job options (ENE, EVTMAX, etc)
ENE=20e3
EVTMAX=1
BFIELD=0
PHIMIN=0
PHIMAX=6.28
VX=0
VY=0
VZ=0
i=4
CLUSTER=1
from Gaudi.Configuration import *
# Data service
from Configurables import FCCDataSvc
podioevent = FCCDataSvc("EventDataSvc")
# Magnetic field
from Configurables import G4ConstantMagneticFieldTool
if BFIELD==1:
field = G4ConstantMagneticFieldTool("G4ConstantMagneticFieldTool",FieldOn=True)
else:
field = G4ConstantMagneticFieldTool("G4ConstantMagneticFieldTool",FieldOn=False)
# DD4hep geometry service
# Parses the given xml file
from Configurables import GeoSvc
geoservice = GeoSvc("GeoSvc", detectors=['file:DetectorDescription/Detectors/compact/FCChh_DectMaster.xml',
'file:DetectorDescription/Detectors/compact/FCChh_ECalBarrel_Mockup.xml'
],
OutputLevel = INFO)
# Geant4 service
# Configures the Geant simulation: geometry, physics list and user actions
from Configurables import G4SimSvc, G4SingleParticleGeneratorTool
# Configures the Geant simulation: geometry, physics list and user actions
geantservice = G4SimSvc("G4SimSvc", detector='G4DD4hepDetector', physicslist="G4FtfpBert",
particleGenerator=G4SingleParticleGeneratorTool("G4SingleParticleGeneratorTool",
ParticleName="e-",eMin=ENE,eMax=ENE,etaMin=0.25,etaMax=0.25,phiMin=PHIMIN,phiMax=PHIMAX,VertexX=VX,VertexY=VY,VertexZ=VZ),
actions="G4FullSimActions")
geantservice.G4commands += ["/random/setSeeds "+str(seed_array[i-1])+" 0"]
#since the loop to generate the subjobs begins with 1, we need (i-1) to index
# Geant4 algorithm
# Translates EDM to G4Event, passes the event to G4, writes out outputs via tools
from Configurables import G4SimAlg, G4SaveCalHits
# and a tool that saves the calorimeter hits with a name "G4SaveCalHits/saveHCalHits"
#savehcaltool = G4SaveCalHits("saveHCalHits", caloType = "HCal")
#savehcaltool.DataOutputs.caloClusters.Path = "HCalClusters"
#savehcaltool.DataOutputs.caloHits.Path = "HCalHits"
saveecaltool = G4SaveCalHits("saveECalHits", caloType = "ECal")
saveecaltool.DataOutputs.caloClusters.Path = "ECalClusters"
saveecaltool.DataOutputs.caloHits.Path = "ECalHits"
# next, create the G4 algorithm, giving the list of names of tools ("XX/YY")
geantsim = G4SimAlg("G4SimAlg",
outputs= [#"G4SaveCalHits/saveHCalHits",
"G4SaveCalHits/saveECalHits"])
# PODIO algorithm
from Configurables import PodioOutput
out = PodioOutput("out",
OutputLevel=INFO)
if CLUSTER==1: #otherwise use the generic name output.root for Grid runs
out.filename = "e"+str(int(ENE/1e3))+"_part"+str(i)+".root"
out.outputCommands = ["keep *"]
# ApplicationMgr
from Configurables import ApplicationMgr
ApplicationMgr( TopAlg = [geantsim, out],
EvtSel = 'NONE',
EvtMax = EVTMAX,
# order is important, as GeoSvc is needed by G4SimSvc
ExtSvc = [podioevent, geoservice, geantservice],
OutputLevel=INFO
)
| [
"broach@cern.ch"
] | broach@cern.ch |
6927b91ad3c9f7f337fb78d4a1c5f8811b1977c6 | 3c9d84600099e4ce2fd4d209d2c2c98d7ba9f2b3 | /models/item.py | 2459267155553a5f129621a0f5ab2f1c9b8944d5 | [] | no_license | abelhOrihuela/komet-scrapper | f570f1cac10dd445d327359f0d718b7dbe2dbc0d | de413279369f9667b1cfaa19e83f6cb23e79c6b7 | refs/heads/master | 2023-08-01T04:23:46.092889 | 2021-09-14T20:13:55 | 2021-09-14T20:13:55 | 406,469,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | from db import db
class ItemModel(db.Model):
__tablename__ = "items"
id = db.Column(db.Integer, primary_key=True)
slug = db.Column(db.String(200), nullable=False)
description = db.Column(db.Text(500), nullable=False)
inventory = db.Column(db.Text(500), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
updated_at = db.Column(
db.DateTime(timezone=True),
server_default=db.func.now(),
server_onupdate=db.func.now(),
)
def __init__(self, slug, description, inventory):
self.slug = slug
self.description = description
self.inventory = inventory
@classmethod
def find_by_slug(cls, _slug: str):
return cls.query.filter_by(slug=_slug).first()
def save_to_db(self) -> None:
try:
db.session.add(self)
db.session.commit()
except:
db.session.rollback()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| [
"abelorihuelamendoza@hotmail.com"
] | abelorihuelamendoza@hotmail.com |
8cc46188aaa9c714c01b161566a49af267641640 | b860530cc9f14d23fefada526f36fe3eae34e42c | /tracnghiem/admin.py | 5e86c22bc9bdfb2b7ae7920c83df3a7a07080aee | [] | no_license | anhpt204/hvhc | fa0386f624f9699b56570ec06fa724894d04b60a | 0b8d2f48bc8068413b6e53b989205fef19358a80 | refs/heads/master | 2022-12-06T01:00:45.232069 | 2016-08-16T15:50:52 | 2016-08-16T15:50:52 | 42,731,930 | 0 | 0 | null | 2022-11-22T00:27:27 | 2015-09-18T15:54:38 | Python | UTF-8 | Python | false | false | 9,173 | py | # -*- encoding: utf-8 -*-
from django.contrib.admin.options import TabularInline, ModelAdmin
from tracnghiem.models import Answer, QuestionGroup, MCQuestion, TFQuestion, SinhDeConf, LogSinhDe,\
NganHangDe, KHThi, BaiThi, ImportMCQuestion
from django.http.response import HttpResponseRedirect
from django.contrib import admin
import json
# from django.contrib.auth.decorators import permission_required
#from permission.decorators import permission_required
from hvhc import PERM_BOC_DE, PERM_XEM_IN_DE
from daotao.models import SinhVien
from common.models import MyModelAdmin
#Override modeladmin
# class MyModelAdmin(admin.ModelAdmin):
# def get_form(self, request, obj=None, **kwargs):
# if hasattr(self, 'field_permissions'):
# user = request.user
# for _field in self.opts.fields:
# perm = self.field_permissions.get(_field.name)
# if perm and not user.has_perm(perm):
# if self.exclude:
# self.exclude.append(_field.name)
# else:
# self.exclude=[_field.name]
# return super(MyModelAdmin, self).get_form(request, obj, **kwargs)
class AnswerInLine(TabularInline):
model = Answer
extra=4
max_num=4
# class QuestionGroup_SettingInLine(TabularInline):
# model = QuestionGroup_Setting
# fields=('question_group', 'question_type', 'mark_per_question', 'num_of_questions')
class SinhDeConfInline(TabularInline):
model = SinhDeConf
fields = ('level', 'loaiCauHoi', 'soLuong')
# class Chapter_SettingInLine(TabularInline):
# model = Chapter_Setting
# fields=('chapter', 'num_of_questions')
# class CaThiAdmin(ModelAdmin):
# # form = CaThiAdminForm
# model = CaThi
# filter_horizontal =('ds_thisinh', 'ds_giamthi')
# # form = CaThiForm
# list_display = ('title', 'mon_thi', 'ngay_thi', 'description')
# fields=('title', 'mon_thi', 'ds_giamthi', 'ds_thisinh', 'ngay_thi',
# 'tg_bat_dau', 'tg_ket_thuc', 'pass_mark','tao_moi_de_thi',
# 'description')
# # exclude=('ds_sv_thi',)
#
# def add_view(self, request, form_url='', extra_context=None):
# self.inlines = []
# return ModelAdmin.add_view(self, request, form_url=form_url, extra_context=extra_context)
#
# def change_view(self, request, object_id, form_url='', extra_context=None):
# self.inlines = [QuestionGroup_SettingInLine, Chapter_SettingInLine]
# return ModelAdmin.change_view(self, request, object_id, form_url=form_url, extra_context=extra_context)
#
class QuestionGroupAdmin(ModelAdmin):
model = QuestionGroup
class MCQuestionAdmin(ModelAdmin):
model=MCQuestion
list_display = ('maCauHoi', 'monHoc', 'doiTuong', 'noiDung', 'thuocChuong', 'prior', 'diem')
list_filter = ('monHoc', 'doiTuong')
fields = ('maCauHoi', 'monHoc', 'doiTuong',
'prior', 'thuocChuong', #'taoBoi',
'noiDung', 'diem', 'figure', )#'audio', 'clip' )
search_fields = ('noiDung',)
# filter_horizontal = ('ca_thi',)
inlines = [AnswerInLine]
class LogSinhDeAdmin(ModelAdmin):
model = LogSinhDe
fields = ("monHoc", 'doiTuong', 'soLuong', 'ngayTao')
list_display=("monHoc", 'doiTuong', 'ngayTao', 'nguoiTao', 'soLuong', 'sinhDe')
inlines=[SinhDeConfInline]
def save_model(self, request, obj, form, change):
instance = form.save(commit=False)
instance.nguoiTao = request.user
instance.save()
def sinhDe(self, obj):
# ds_dethi = obj.sinhDe()
return u'<a href="%s">Sinh đề</a>' % ('/hvhc/tracnghiem/sinhde/'+str(obj.pk)+'/')
sinhDe.allow_tags=True
sinhDe.short_description="Sinh đề"
class TFQuestionAdmin(ModelAdmin):
model = TFQuestion
list_display = ('monHoc', 'doiTuong', 'noiDung')
fields = ('monHoc', 'doiTuong', 'prior', 'thuocChuong',
'noiDung', 'figure', 'audio', 'clip', 'isTrue' )
list_filter = ('monHoc',)
class NganHangDeAdmin(ModelAdmin):
model=NganHangDe
list_display=('maDeThi', 'get_monHoc', 'get_doiTuong', 'ngay_tao', 'daDuyet', 'export_pdf')
list_filter=('logSinhDe__doiTuong', 'logSinhDe__monHoc', 'ngay_tao', 'daDuyet')
actions=['duyet_deThi', 'boDuyet_deThi']
def get_monHoc(self, obj):
return obj.logSinhDe.monHoc
get_monHoc.short_description="Môn thi"
def get_doiTuong(self, obj):
return obj.logSinhDe.doiTuong
get_doiTuong.short_description="Đối tượng"
def duyet_deThi(self, request, queryset):
queryset.update(daDuyet=True)
duyet_deThi.short_description = "Duyệt các đề đã chọn"
def boDuyet_deThi(self, request, queryset):
queryset.update(daDuyet=False)
boDuyet_deThi.short_description = "Bỏ duyệt các đề đã chọn"
def export_pdf(self, obj):
return u'<a href="%s">PDF</a>' % ('/hvhc/tracnghiem/export/dethi/'+str(obj.pk)+'/')
export_pdf.allow_tags=True
export_pdf.short_description="Đề thi"
class KHThiAdmin(ModelAdmin):
model=KHThi
filter_horizontal =('ds_thisinh', 'ds_giamthi')
fields = ['ten', 'mon_thi', 'nam_hoc', 'hoc_ky', 'doi_tuong',
'ds_thisinh',
'ds_giamthi',
'ngay_thi', 'tg_bat_dau', 'tg_thi', 'trang_thai',
# for test
#'de_thi', 'dap_an'
]
# field_permissions = {'boc_tron_de':'tracnghiem.khthi.duoc_phep_boc_de',
# # 'in_de':'khthi.duoc_phep_xem_va_in_de'
# }
# @permission_required('tracnghiem.khthi.duoc_phep_boc-de')
def boc_tron_de(self, obj):
dethi = json.loads(obj.de_thi)
if len(dethi) == 0:
return u'<a href="%s">Bốc đề</a>' % ('/hvhc/tracnghiem/khthi/boctrondethi/'+str(obj.pk)+'/')
else:
return u'Đã có, <a href="%s">Bốc lại</a>' % ('/hvhc/tracnghiem/khthi/boctrondethi/'+str(obj.pk)+'/')
boc_tron_de.allow_tags=True
boc_tron_de.short_description="Thực hiện"
def xem_de(self, obj):
dethi = json.loads(obj.de_thi)
if len(dethi) == 0:
return u'Chưa có'
else:
return u'<a href="%s">Xem đề</a>' % ('/hvhc/tracnghiem/khthi/show/'+str(obj.pk)+'/')
xem_de.allow_tags=True
xem_de.short_description="Xem"
def get_list_display(self, request):
ld = ['ten', 'mon_thi', 'doi_tuong', 'nam_hoc', 'hoc_ky',
'ngay_thi',
'tg_bat_dau', 'tg_thi', 'trang_thai', 'nguoi_boc_de']
allow_boc_de = False
allow_xem_de = False
perms = request.user.user_permissions.all()
for perm in perms:
if PERM_BOC_DE == perm.codename:
allow_boc_de = True
break
if perm.codename == PERM_XEM_IN_DE:
allow_xem_de = True
break
for group in request.user.groups.all():
perms = group.permissions.all()
for perm in perms:
if PERM_BOC_DE == perm.codename:
allow_boc_de = True
break
if perm.codename == PERM_XEM_IN_DE:
allow_xem_de = True
break
if allow_boc_de:
ld.append('boc_tron_de')
if allow_xem_de:
ld.append('xem_de')
return ld
# return ModelAdmin.get_list_display(self, request)
class DiemAdmin(ModelAdmin):
model = BaiThi
list_display = ['get_ma_sv', 'get_ho_ten', 'get_lop', 'get_mon_thi', 'diem']
list_filter = ['thi_sinh__lop', 'khthi']
actions=['export_pdf']
def get_ma_sv(self, obj):
return obj.thi_sinh.ma_sv
get_ma_sv.short_description = 'Mã SV'
def get_ho_ten(self, obj):
return '%s %s' %(obj.thi_sinh.ho_dem, obj.thi_sinh.ten)
get_ho_ten.short_description = 'Họ và tên'
def get_lop(self, obj):
return obj.thi_sinh.lop
get_lop.short_description = 'Lớp'
def get_mon_thi(self, obj):
return obj.khthi.mon_thi
get_mon_thi.short_description='Môn thi'
def export_pdf(self, request, queryset):
bts = '-'.join([str(obj.id) for obj in queryset])
return HttpResponseRedirect('/hvhc/tracnghiem/export_bd/' + bts + '/')
export_pdf.short_description = "Xuất bảng điểm"
class ImportMCQuestionAdmin(ModelAdmin):
model = ImportMCQuestion
list_display=['mon_thi', 'doi_tuong', 'import_file', 'import_data']
def import_data(self, obj):
# obj.import_data()
return u'<a href="%s">Import</a>' % ('/hvhc/tracnghiem/import/mc/'+str(obj.pk)+'/')
import_data.allow_tags=True
import_data.short_description="Import"
admin.site.register(LogSinhDe, LogSinhDeAdmin)
admin.site.register(NganHangDe, NganHangDeAdmin)
admin.site.register(QuestionGroup, QuestionGroupAdmin)
admin.site.register(MCQuestion, MCQuestionAdmin)
admin.site.register(TFQuestion, TFQuestionAdmin)
admin.site.register(KHThi, KHThiAdmin)
admin.site.register(BaiThi, DiemAdmin)
admin.site.register(ImportMCQuestion, ImportMCQuestionAdmin)
| [
"anh.pt204@gmail.com"
] | anh.pt204@gmail.com |
ccec29fd6ea83bdc111cb217e95734492d2579ad | 42348c0ff9785bbab18d87f277df791331bbc121 | /tests/test_pitches.py | 2d1420380d1934aca34db60e8b9154860a4255cc | [
"MIT"
] | permissive | vincentouma/thinkout | 783449834bd856d17e5273d9d3a50ecb6d79f6ef | 85306ccec7924ad4fd6fe7ffb75aa537d9fe97c0 | refs/heads/master | 2020-06-29T11:48:09.283309 | 2019-08-06T05:59:14 | 2019-08-06T05:59:14 | 200,524,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import unittest
from app.models import User
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password ('banana'))
| [
"vinceoumah@gmail.com"
] | vinceoumah@gmail.com |
bc4f15d457c427a761e9a4b578cb3ad99200ad4b | cf794e72e2349e08250a3af9d31d7fe5627b3f12 | /src/mfs.py | acaf429dacf9a6b2922be411e74ad3a18de2869e | [] | no_license | chiiph/MusicFS | 257640947dabae0d903fda6c40faf69f04e76d26 | 34e7643ec1c67cc1678b54fae3538b26febf9124 | refs/heads/master | 2016-09-06T19:18:02.171223 | 2011-11-21T19:48:42 | 2011-11-21T19:48:42 | 2,802,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,713 | py | from query import *
import fuse
import argparse
import sys
from time import time
import stat
import os
import errno
fuse.fuse_python_api = (0, 2)
class Stat(fuse.Stat):
def __init__(self):
self.st_mode = stat.S_IFDIR | 0755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 1000
self.st_gid = 1002
self.st_size = 4096
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
def getFileStat(size):
st = Stat()
st.st_size = size
st.st_mode = stat.S_IFREG | 0666
return st
def isDir(path):
if path == "/":
return True
path = path[1:]
parts = path.split(os.sep)
# artist/<band>/<record>/<song>
# album/<record>/<song>
# song/<song>
if len(parts) < 1:
raise Exception("Path incorrecto: %s", path)
if parts[0] == "artist":
if len(parts) == 1:
# artist/
return True
if len(parts) == 2:
# artist/<band>
return True
if len(parts) == 3:
# artist/<band>/<record>
return True
return False
if parts[0] == "album":
if len(parts) == 1:
# album/
return True
if len(parts) == 2:
# album/<record>
return True
return False
if parts[0] == "song":
if len(parts) == 1:
return True
return False
return False
class MusicFS(fuse.Fuse):
def __init__(self, dirs, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self.q = Querier(dirs)
self.q.serialize("ejemplo.xml")
self.files = {}
def getattr(self, path):
st = Stat()
if isDir(path):
return st
path = path[1:]
parts = path.split(os.sep)
if parts < 1:
raise Exception("Path invalido")
if parts[0] == "artist":
if len(parts) == 4:
# artist/<band>/<record>/<song>
band = parts[1]
record = parts[2]
song = parts[3].replace(".mp3","")
song_ref = self.q.sourceByAAS(band, record, song)
if song_ref is None:
return None
return os.stat(str(song_ref))
else:
raise Exception("getattr invalido: %s", path)
elif parts[0] == "album":
if len(parts) == 3:
# album/<record>/<song>
record = parts[1]
song = parts[2].replace(".mp3","")
song_ref = self.q.sourceByAS(record, song)
if song_ref is None:
return None
return os.stat(str(song_ref))
else:
raise Exception("getattr invalido: %s", path)
elif parts[0] == "song":
if len(parts) == 2:
# song/<song>
song = parts[1].replace(".mp3", "")
song_ref = self.q.sourceByS(song)
if song_ref is None:
return None
return os.stat(str(song_ref))
else:
raise Exception("getattr invalido: %s", path)
return None
def readdir(self, path, offset):
yield fuse.Direntry(".")
yield fuse.Direntry("..")
if path == "/":
yield fuse.Direntry("artist")
yield fuse.Direntry("album")
yield fuse.Direntry("song")
else:
path = path[1:]
parts = path.split(os.sep)
if parts < 1:
raise Exception("Path invalido")
if parts[0] == "artist":
if len(parts) == 1:
# artist/
for a in self.q.artists():
yield fuse.Direntry(str(a[0]))
if len(parts) == 2:
# artist/<band>
band = parts[1]
for a in self.q.albumsByArtist(band):
yield fuse.Direntry(str(a[0]))
if len(parts) == 3:
# artist/<band>/<record>
band = parts[1]
record = parts[2]
for s in self.q.songsByArtistAlbum(band, record):
yield fuse.Direntry(str(s[0])+".mp3")
elif parts[0] == "album":
if len(parts) == 1:
# album/
for a in self.q.albums():
yield fuse.Direntry(str(a[0]))
if len(parts) == 2:
# album/<record>
record = parts[1]
for s in self.q.songsByAlbum(record):
yield fuse.Direntry(str(s[0])+".mp3")
elif parts[0] == "song":
if len(parts) == 1:
# song/
for a in self.q.songs():
yield fuse.Direntry(str(a[0])+".mp3")
else:
raise Exception("Path invalido")
def mknod(self, path, mode, dev):
return 0
def unlink(self, path):
return 0
def read(self, path, size, offset):
path = path[1:]
parts = path.split(os.sep)
if parts < 1:
raise Exception("Path invalido")
if parts[0] == "artist":
if len(parts) == 4:
# artist/<band>/<record>/<song>
self.files[path].seek(offset)
return self.files[path].read(size)
else:
raise Exception("Archivo invalido: %s", path)
elif parts[0] == "album":
if len(parts) == 3:
# album/<record>/<song>
self.files[path].seek(offset)
return self.files[path].read(size)
else:
raise Exception("Archivo invalido: %s", path)
elif parts[0] == "song":
if len(parts) == 2:
# song/<song>
self.files[path].seek(offset)
return self.files[path].read(size)
return ""
def write(self, path, buf, offset):
return 0
def release(self, path, flags):
if not path in self.files.keys():
return 1
self.files[path].close()
del(self.files[path])
return 0
def open(self, path, flags):
if path in self.files.keys():
return 0
path = path[1:]
parts = path.split(os.sep)
if parts < 1:
raise Exception("Path invalido")
if parts[0] == "artist":
if len(parts) == 4:
# artist/<band>/<record>/<song>
band = parts[1]
record = parts[2]
song = parts[3].replace(".mp3", "")
song_ref = self.q.sourceByAAS(band, record, song)
if song_ref is None:
return 1
self.files[path] = open(str(song_ref), "r")
return 0
else:
return 1
elif parts[0] == "album":
if len(parts) == 3:
# album/<record>/<song>
record = parts[1]
song = parts[2].replace(".mp3", "")
song_ref = self.q.sourceByAS(record, song)
if song_ref is None:
return 1
self.files[path] = open(str(song_ref), "r")
return 0
else:
return 1
elif parts[0] == "song":
if len(parts) == 2:
# song/<song>
song = parts[1].replace(".mp3", "")
song_ref = self.q.sourceByS(song)
if song_ref is None:
return 1
self.files[path] = open(str(song_ref), "r")
return 0
return 1
def truncate(self, path, size):
return 0
def utime(self, path, times):
return 0
def mkdir(self, path, mode):
return 0
def rmdir(self, path):
return 0
def rename(self, pathfrom, pathto):
return 0
def fsync(self, path, isfsyncfile):
return 0
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dirs', action="store", type=str)
parsed = parser.parse_args(args=sys.argv[-2:])
del(sys.argv[-2:])
usage="""
musicfs
""" + fuse.Fuse.fusage
server = MusicFS(parsed.dirs, version="%prog " + fuse.__version__,
usage=usage, dash_s_do='setsingle')
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()
| [
"chiiph@torproject.org"
] | chiiph@torproject.org |
25cb0158d1b95bf84553c1dce344cc5a099f7fb7 | d698301715313eac017d89953094c0cfd89631b1 | /shorty/user.py | 4a5cfa9fecb3e34ddae6f5f317db807973d22a27 | [
"BSD-2-Clause"
] | permissive | angelcarballo/shorty | 1885353e55ba9f728eac4fe9ca47601bad2df0b8 | c6ac565e7998f131ed48229ea859adfec87ed8aa | refs/heads/master | 2021-08-31T13:15:09.585024 | 2017-12-21T12:12:12 | 2017-12-21T12:12:12 | 114,754,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | import secrets
class User(object):
def __init__(self, email):
self.email = email
self.update_token()
def update_token(self):
self.secure_token = self.__generate_token()
def __generate_token(self):
return secrets.token_urlsafe()
| [
"angel.carballo@simplybusiness.co.uk"
] | angel.carballo@simplybusiness.co.uk |
a968556840fa07770885099bb261d3e2b9d5a6d1 | 8ff1801e8db0483706743675aba09361b4ccc0b3 | /tests/sandbox/.venv_ccf_sandbox/bin/wheel | 14901ac7e748890b4c61e98782c4e713bec86b47 | [
"Apache-2.0"
] | permissive | iLuSIAnn/testing | f8f08e8873ab01b49ad93288baa7742383cbb562 | b8b735c9e4141eb39562d142e61b06f89b23a475 | refs/heads/master | 2023-03-01T10:53:18.526180 | 2021-01-28T10:57:50 | 2021-01-28T10:57:50 | 333,730,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/home/ubuntu/CCF/tests/sandbox/.venv_ccf_sandbox/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ubuntu@ccfproject.e1c000lhmreereztz3ihdi2bsg.zx.internal.cloudapp.net"
] | ubuntu@ccfproject.e1c000lhmreereztz3ihdi2bsg.zx.internal.cloudapp.net | |
6aad1e54b8786ecb8e264520db3f9ee24f1bfb49 | 9ac99a99dc8f79f52fbbe3e8a5b311b518fe45d9 | /apps/hrm/models/employee_types.py | 35bc9fbf61826d7c5683608a6038c4e0d7ac01e7 | [] | no_license | nttlong/quicky-01 | eb61620e01f04909d564244c46a03ca2b69dfecc | 0f5610aa7027429bdd9ca9b45899a472c372c6cc | refs/heads/master | 2020-03-25T17:45:31.633347 | 2018-11-27T15:02:30 | 2018-11-27T15:02:30 | 143,994,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from qmongo import extends, extends_dict,define
from . commons import base
model_name = "employee_types"
extends(
model_name,
base.model_name,
[],
formular = ("text")
)
| [
"zugeliang2000@gmail.com"
] | zugeliang2000@gmail.com |
78628bb42876d7fa9c9b9a641465f23d3409700b | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/l3usageperhist1d.py | 6ce6ba4d67fb03c133a1fb3f10af92213f8034cd | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 17,558 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3UsagePerHist1d(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3UsagePerHist1d", "Layer3 entries usage percentage")
counter = CounterMeta("normalizedv6", CounterCategory.GAUGE, "percentage", "Local v6 L3 entries usage percentage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedv6Min"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedv6Max"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedv6Avg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedv6Spct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedv6Thr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedv6Tr"
meta._counters.append(counter)
counter = CounterMeta("normalizedv4", CounterCategory.GAUGE, "percentage", "Local v4 L3 entries usage percentage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedv4Min"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedv4Max"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedv4Avg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedv4Spct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedv4Thr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedv4Tr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3UsagePerHist1d"
meta.rnFormat = "HDeqptcapacityL3UsagePer1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Layer3 entries usage percentage stats in 1 day"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.eqptcapacity.L3UsagePerHist")
meta.rnPrefixes = [
('HDeqptcapacityL3UsagePer1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 27165, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "normalizedv4Avg", "normalizedv4Avg", 27199, PropCategory.IMPLICIT_AVG)
prop.label = "Local v4 L3 entries usage percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4Avg", prop)
prop = PropMeta("str", "normalizedv4Max", "normalizedv4Max", 27198, PropCategory.IMPLICIT_MAX)
prop.label = "Local v4 L3 entries usage percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4Max", prop)
prop = PropMeta("str", "normalizedv4Min", "normalizedv4Min", 27197, PropCategory.IMPLICIT_MIN)
prop.label = "Local v4 L3 entries usage percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4Min", prop)
prop = PropMeta("str", "normalizedv4Spct", "normalizedv4Spct", 27200, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Local v4 L3 entries usage percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4Spct", prop)
prop = PropMeta("str", "normalizedv4Thr", "normalizedv4Thr", 27201, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Local v4 L3 entries usage percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedv4Thr", prop)
prop = PropMeta("str", "normalizedv4Tr", "normalizedv4Tr", 27202, PropCategory.IMPLICIT_TREND)
prop.label = "Local v4 L3 entries usage percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv4Tr", prop)
prop = PropMeta("str", "normalizedv6Avg", "normalizedv6Avg", 27214, PropCategory.IMPLICIT_AVG)
prop.label = "Local v6 L3 entries usage percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv6Avg", prop)
prop = PropMeta("str", "normalizedv6Max", "normalizedv6Max", 27213, PropCategory.IMPLICIT_MAX)
prop.label = "Local v6 L3 entries usage percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv6Max", prop)
prop = PropMeta("str", "normalizedv6Min", "normalizedv6Min", 27212, PropCategory.IMPLICIT_MIN)
prop.label = "Local v6 L3 entries usage percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv6Min", prop)
prop = PropMeta("str", "normalizedv6Spct", "normalizedv6Spct", 27215, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Local v6 L3 entries usage percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv6Spct", prop)
prop = PropMeta("str", "normalizedv6Thr", "normalizedv6Thr", 27216, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Local v6 L3 entries usage percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedv6Thr", prop)
prop = PropMeta("str", "normalizedv6Tr", "normalizedv6Tr", 27217, PropCategory.IMPLICIT_TREND)
prop.label = "Local v6 L3 entries usage percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedv6Tr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
cda1a433ead7cc28c5869dbdca0b42da084aa990 | ff5a27af28042090966571a23274707b4d27a295 | /download2-8-2.py | 38bb0ccc736634ebaa599bf11239af35f30ccece | [] | no_license | marlonpark1/python_section2 | bec901c4344a1638f7599ed1f9e5b9d96ee00bf2 | 26af66f459fc4babaec4e15eb1f0a48312c28ecf | refs/heads/master | 2020-03-28T05:20:04.446657 | 2018-09-07T18:21:26 | 2018-09-07T18:21:26 | 147,769,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | from bs4 import BeautifulSoup
import urllib.request as req
import urllib.parse as rep
import sys
import io
import os
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
# 403 Error 발생 방지 코드
opener = req.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
req.install_opener(opener)
base = "https://www.inflearn.com/"
quote = rep.quote_plus("추천-강좌")
url = base + quote
print(url)
res = req.urlopen(url)
savePath = "C:\\imagedown\\" # C:/imagedown/ 이미지 다운로드 폴더
try:
if not os.path.isdir(savePath):
os.makedirs(os.path.join(savePath))
except OSError as e:
if e.errno != e.EEXIST:
print("폴더만들기 실패!")
raise # 파이썬 에러발생 코드
soup = BeautifulSoup(res, "html.parser")
imge_list = soup.select("ul.slides")[0]
for i, e in enumerate(imge_list, 1):
with open(savePath+'test_'+str(i)+'.txt', 'wt') as f:
f.write(e.select_one("h4.block_title > a").string)
fullFileName = os.path.join(savePath, savePath+str(i)+'.jpg')
req.urlretrieve(e.select_one("div.block_media > a > img")['src'], fullFileName)
print("다운로드 완료")
| [
"marlonpark@daum.net"
] | marlonpark@daum.net |
096785213f7851045fcee133258e87384a4a1c78 | 1f2310e874c9b42809e1413005a6aa950eee6b8d | /homework_1/account.py | bfba83b886d15507c7c3256c1ca19d6bf53ad772 | [] | no_license | eakarpov/mail.ru_python | 17dd1703405b4a8ce5d2df15d8e3011c7cbe7c8e | fcf43458d0261ed85b17ad185dc12ce3e55da545 | refs/heads/master | 2021-01-21T01:20:25.522926 | 2017-05-06T23:56:31 | 2017-05-06T23:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | import sys
from decimal import Decimal
def round_decorator(inner_func):
def wrapper(self):
return round(inner_func(self), 2)
return wrapper
class Charge:
def __init__(self, value):
self._value = value
def __str__(self):
return str(self.get_value)
@property
@round_decorator
def get_value(self):
return self._value
class Account:
def __init__(self, total=Decimal(0)):
if isinstance(total, (int, float, Decimal)):
if total < 0:
raise AttributeError("Constructor 'Account' parameter 'total'")
else:
self._total = Decimal(total)
self._charges = []
self._current = 0
else:
raise ValueError("Account(" + str(total) + ")")
def __iter__(self):
return iter(self._charges)
def __next__(self):
if self._current > len(self._charges):
self._current = 0
raise StopIteration
else:
self._current += 1
return self._current - 1
@property
@round_decorator
def get_total(self):
return self._total
def income(self, amount):
if isinstance(amount, (int, float, Decimal)):
if amount < 0:
raise AttributeError("Function 'income' parameter 'amount'")
else:
decimal_amount = Decimal(amount)
self._charges.append(Charge(decimal_amount))
self._total += decimal_amount
else:
raise ValueError("income(" + str(amount) + ")")
def outcome(self, amount):
if isinstance(amount, (int, float, Decimal)):
if amount < 0:
raise AttributeError("Function 'outcome' parameter 'amount'")
decimal_amount = Decimal(amount)
if decimal_amount - self._total > 0:
print("There is no requested amount of money: " + str(amount))
else:
self._charges.append(Charge(-decimal_amount))
self._total -= decimal_amount
else:
raise ValueError("outcome(" + str(amount) + ")")
if __name__ == "__main__":
try:
account = Account(2.342)
account.outcome(32)
account.income(3.3232876)
account.income(2.3272)
account.outcome(3.785335)
account.outcome(0.001)
account.income(1.323)
account.outcome(12.323)
print("\nOperation history:")
for elem in account:
print(elem)
print("\nTotal: " + str(account.get_total))
except ValueError:
print("Given parameter must be an instance of int, float or Decimal -", sys.exc_info()[1])
except AttributeError:
print(sys.exc_info()[1], "must be non-negative")
| [
"allxf95@gmail.com"
] | allxf95@gmail.com |
abdaeb2d9d684067ec9569099114518269904dcc | 58e8567e8e337cc3be55ffc60d30692b354d1a41 | /pennylane_sf/tf.py | 8bbbfcf839a5706ed99730b9c2df8f2f91df36a5 | [
"Apache-2.0"
] | permissive | albi3ro/pennylane-sf | 049f0710adae59dcb4931f144c5542b2e95908f9 | 5d306514401e7dd5a3d9a50b5e3c210e7c7d2bd4 | refs/heads/master | 2023-01-01T21:01:53.884960 | 2020-10-20T08:56:42 | 2020-10-20T08:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,913 | py | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Strawberry Fields TF backend for PennyLane.
"""
from collections import OrderedDict
from collections.abc import Sequence # pylint: disable=no-name-in-module
import uuid
import numpy as np
import tensorflow as tf
import strawberryfields as sf
from strawberryfields.backends.tfbackend.states import FockStateTF
# import state preparations
from strawberryfields.ops import (
# Catstate,
Coherent,
DensityMatrix,
DisplacedSqueezed,
Fock,
Ket,
Squeezed,
Thermal,
Gaussian,
)
# import gates
from strawberryfields.ops import (
BSgate,
CKgate,
CXgate,
CZgate,
Dgate,
Kgate,
Pgate,
Rgate,
S2gate,
Sgate,
Vgate,
Interferometer,
)
from pennylane.operation import Operator
from pennylane.wires import Wires
from pennylane.variable import Variable
from .expectations import mean_photon, number_expectation, homodyne, poly_xp
from .simulator import StrawberryFieldsSimulator
def identity(state, device_wires, params):
"""Computes the expectation value of ``qml.Identity``
observable in Strawberry Fields, corresponding to the trace.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Wires): the measured modes
params (Sequence): sequence of parameters (not used)
Returns:
float, float: trace and its variance
"""
# pylint: disable=unused-argument
N = state.num_modes
D = state.cutoff_dim
if N == len(device_wires):
# trace of the entire system
tr = state.trace()
return tr, tr - tr ** 2
# get the reduced density matrix
N = len(device_wires)
dm = state.reduced_dm(modes=device_wires.tolist())
# construct the standard 2D density matrix, and take the trace
new_ax = np.arange(2 * N).reshape([N, 2]).T.flatten()
tr = tf.math.real(tf.linalg.trace(tf.reshape(tf.transpose(dm, new_ax), [D ** N, D ** N])))
return tr, tr - tr ** 2
def fock_state(state, device_wires, params):
"""Computes the expectation value of the ``qml.FockStateProjector``
observable in Strawberry Fields.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Wires): the measured mode
params (Sequence): sequence of parameters
Returns:
float, float: Fock state probability and its variance
"""
# pylint: disable=unused-argument
n = params[0]
N = state.num_modes
if N == len(device_wires):
# expectation value of the entire system
ex = state.fock_prob(n)
return ex, ex - ex ** 2
dm = state.reduced_dm(modes=device_wires.tolist())
ex = tf.math.real(dm[tuple([n[i // 2] for i in range(len(n) * 2)])])
var = ex - ex ** 2
return ex, var
class StrawberryFieldsTF(StrawberryFieldsSimulator):
r"""StrawberryFields TensorFlow device for PennyLane.
For more details, see :doc:`/devices/tf`.
Args:
wires (int, Iterable[Number, str]]): Number of subsystems accessible on the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``).
analytic (bool): indicates if the device should calculate expectations
and variances analytically
cutoff_dim (int): Fock-space truncation dimension
shots (int): Number of circuit evaluations/random samples used
to estimate expectation values of observables. If ``analytic=True``,
this setting is ignored when calculating expectation values.
hbar (float): the convention chosen in the canonical commutation
relation :math:`[x, p] = i \hbar`
"""
name = "Strawberry Fields TensorFlow PennyLane plugin"
short_name = "strawberryfields.tf"
_capabilities = {"model": "cv", "passthru_interface": "tf", "provides_jacobian": True}
_operation_map = {
# Cannot yet support catstates, since they still accept complex parameter
# values in Strawberry Fields.
# "CatState": Catstate,
"CoherentState": Coherent,
"FockDensityMatrix": DensityMatrix,
"DisplacedSqueezedState": DisplacedSqueezed,
"FockState": Fock,
"FockStateVector": Ket,
"SqueezedState": Squeezed,
"ThermalState": Thermal,
"GaussianState": Gaussian,
"Beamsplitter": BSgate,
"CrossKerr": CKgate,
"ControlledAddition": CXgate,
"ControlledPhase": CZgate,
"Displacement": Dgate,
"Kerr": Kgate,
"QuadraticPhase": Pgate,
"Rotation": Rgate,
"TwoModeSqueezing": S2gate,
"Squeezing": Sgate,
"CubicPhase": Vgate,
"Interferometer": Interferometer,
}
_observable_map = {
"NumberOperator": mean_photon,
"TensorN": number_expectation,
"X": homodyne(0),
"P": homodyne(np.pi / 2),
"QuadOperator": homodyne(),
"PolyXP": poly_xp,
"FockStateProjector": fock_state,
"Identity": identity,
}
matrix_gates = {
"FockDensityMatrix",
"GaussianState",
"Interferometer",
"FockStateVector",
}
_circuits = {}
_asarray = staticmethod(tf.convert_to_tensor)
def __init__(self, wires, *, cutoff_dim, analytic=True, shots=1000, hbar=2):
super().__init__(wires, analytic=analytic, shots=shots, hbar=hbar)
self.cutoff = cutoff_dim
self.params = dict()
def apply(self, operation, wires, par):
"""Apply a quantum operation.
Args:
operation (str): name of the operation
wires (Wires): subsystems the operation is applied on
par (tuple): parameters for the operation
"""
# convert PennyLane parameter conventions to
# Strawberry Fields conventions
# translate to consecutive wires used by device
device_wires = self.map_wires(wires)
if operation not in self.matrix_gates:
# store parameters
param_labels = [str(uuid.uuid4()) for _ in range(len(par))]
for l, v in zip(param_labels, par):
self.params[l] = v
par = self.prog.params(*param_labels)
if not isinstance(par, Sequence):
par = (par,)
op = self._operation_map[operation](*par)
op | [self.q[i] for i in device_wires.labels] # pylint: disable=pointless-statement
def pre_measure(self):
self.eng = sf.Engine("tf", backend_options={"cutoff_dim": self.cutoff})
results = self.eng.run(self.prog, args=self.params)
self.state = results.state
self.samples = results.samples
def reset(self):
"""Reset the device"""
self.params = dict()
super().reset()
def probability(self, wires=None):
"""Return the (marginal) probability of each computational basis
state from the last run of the device.
Args:
wires (Iterable[Number, str], Number, str, Wires): wires to return
marginal probabilities for. Wires not provided
are traced out of the system.
Returns:
OrderedDict[tuple, float]: Dictionary mapping a tuple representing the state
to the resulting probability. The dictionary should be sorted such that the
state tuples are in lexicographical order.
"""
wires = wires or self.wires
# convert to a wires object
wires = Wires(wires)
# translate to wires used by device
device_wires = self.map_wires(wires)
N = len(wires)
cutoff = getattr(self, "cutoff", 10)
if N == self.state.num_modes:
# probabilities of the entire system
probs = tf.reshape(self.state.all_fock_probs(cutoff=cutoff), -1)
else:
rdm = self.state.reduced_dm(modes=device_wires.tolist())
new_state = FockStateTF(rdm, N, pure=False, cutoff_dim=cutoff)
probs = tf.reshape(new_state.all_fock_probs(cutoff=cutoff), -1)
ind = np.indices([cutoff] * N).reshape(N, -1).T
probs = OrderedDict((tuple(k), v) for k, v in zip(ind, probs))
return probs
def jacobian(self, queue, observables, parameters):
# pylint: disable=missing-function-docstring
op_params = {}
new_queue = []
variables = []
with tf.GradientTape(persistent=True) as tape:
for operation in queue:
# Copy the operation parameters to the op_params dictionary.
# Note that these are the unwrapped parameters, so PennyLane
# free parameters will be represented as Variable instances.
op_params[operation] = operation.data[:]
# Loop through the free parameter reference dictionary
for _, par_dep_list in parameters.items():
if not par_dep_list:
# parameter is not used within circuit
v = tf.Variable(0, dtype=tf.float64)
variables.append(v)
continue
# get the first parameter dependency for each free parameter
first = par_dep_list[0]
# For the above parameter dependency, get the corresponding
# operation parameter variable, and get the numeric value.
# Convert the resulting value to a TensorFlow tensor.
val = first.op.data[first.par_idx].val
mult = first.op.data[first.par_idx].mult
v = tf.Variable(val / mult, dtype=tf.float64)
# Mark the variable to be watched by the gradient tape,
# and append it to the variable list.
variables.append(v)
for p in par_dep_list:
# Replace the existing Variable free parameter in the op_params dictionary
# with the corresponding tf.Variable parameter.
# Note that the free parameter might be scaled by the
# variable.mult scaling factor.
mult = p.op.data[p.par_idx].mult
op_params[p.op][p.par_idx] = v * mult
# check that no Variables remain in the op_params dictionary
values = [item for sublist in op_params.values() for item in sublist]
assert not any(
isinstance(v, Variable) for v in values
), "A pennylane.Variable instance was not correctly converted to a tf.Variable"
# flatten the variables list in case of nesting
variables = tf.nest.flatten(variables)
tape.watch(variables)
for operation in queue:
# Apply each operation, but instead of passing operation.parameters
# (which contains the evaluated numeric parameter values),
# pass op_params[operation], which contains numeric values
# for fixed parameters, and tf.Variable objects for free parameters.
try:
# turn off domain checking since PassthruQNode qfuncs can take any class as input
Operator.do_check_domain = False
# generate the new operation
new_op = operation.__class__(*op_params[operation], wires=operation.wires)
finally:
Operator.do_check_domain = True
new_queue.append(new_op)
self.reset()
res = self.execute(new_queue, observables, parameters=parameters)
res = tf.cast(tf.squeeze(tf.stack(res)), dtype=tf.float64)
jac = tape.jacobian(res, variables, experimental_use_pfor=False)
jac = tf.stack([i if i is not None else tf.zeros(res.shape, dtype=tf.float64) for i in jac])
return jac.numpy().T
| [
"noreply@github.com"
] | albi3ro.noreply@github.com |
9d20f3f16753593924c66dad24694eb7c72e00c0 | 795435350d2e4fe415acd1fb846abb1c0cf94911 | /client/code/do some researches on crawler/douban/douban.py | 8cac1d9078cbb947968dfe3d23aeff118fa4b940 | [] | no_license | gaoxinge/network | c3575c7f0d95f7458a4ec74880ca5b8a0bff773e | 68d307ec0756abff60914225fd38d69fa4b2a37c | refs/heads/master | 2021-06-06T05:49:36.521243 | 2021-06-02T15:42:39 | 2021-06-02T15:42:39 | 84,033,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import requests
from lxml import etree
from Item import Item
import time
def http(url):
response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
return response
def parse(response):
Movie = Item('Movie', ['title', 'rating', 'vote'])
root = etree.HTML(response.text)
results = root.xpath('//div[@class=\'pl2\']')
for result in results:
movie = Movie()
movie['title'] = result.xpath('a/text()')[0][:-2].strip()
movie['rating'] = float(result.xpath('.//span[@class=\'rating_nums\']/text()')[0])
movie['vote'] = int(result.xpath('.//span[@class=\'pl\']/text()')[0][1:][:-4])
yield movie
def store(item):
f.write(str(item) + '\n')
def http_parse_store(url):
response = http(url)
items = parse(response)
for item in items:
store(item)
urls = ['https://movie.douban.com/tag/2016?start=' + str((i-1)*20) for i in range(1, 10)]
f = open('douban.txt', 'w')
start = time.time()
while urls:
response = http(urls.pop(0))
items = parse(response)
for item in items:
store(item)
print time.time() - start
f.close()
| [
"gaoxx5@gmail.com"
] | gaoxx5@gmail.com |
29ae2abcecf2e6ca20f9197d006cfee2c3be9612 | 030c7628b19142f3ec562f19575652b9ae2db5cc | /02_2018_Spring/ISYE6740_Computational Data Analysis _ Machien Learning/HW4_code/main.py | 1f6ad11caff6fdd8bd59c6db6b6d50b0871485da | [] | no_license | soo-pecialist/GeorgiaTech_CSE | 4dcdc2e3193d1ab304bd8ee183b0ef9634d1e30b | 5e3c22582dea92c8d9035ec9cf986942cc87d008 | refs/heads/master | 2021-07-20T05:37:40.641805 | 2018-07-19T18:28:02 | 2018-07-19T18:28:02 | 135,356,891 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,014 | py | #python2
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import prpy as pr
from rfdigits import loadData, trainRandomForest
#%% SECTION 1: DATA
#%% load images (if this is the first time, else see below)
Xtrain, Ytrain = loadData(num=1000)
Xtest, Ytest = loadData('test', num=200)
#%% save in numpy format for faster loading in future
with open('train1k.npz', 'wb') as f:
np.savez(f, Xtrain=Xtrain, Ytrain=Ytrain)
with open('test.npz', 'wb') as f:
np.savez(f, Xtest=Xtest, Ytest=Ytest)
#%% load data from npz files
trainfiles = np.load('train1k.npz')
print trainfiles['Xtrain'].shape
print trainfiles['Ytrain'].shape
testfiles = np.load('test.npz')
print testfiles['Xtest'].shape
print testfiles['Ytest'].shape
#%% load into X, Y
Xtrain, Ytrain = trainfiles['Xtrain'], trainfiles['Ytrain']
Xtest, Ytest = testfiles['Xtest'], testfiles['Ytest']
#%% print shapes
print 'Xtrain:', Xtrain.shape
print 'Ytrain:', Ytrain.shape
print ''
print 'Xtest:', Xtest.shape
print 'Ytest:', Ytest.shape
print ''
#%% visualize digits
N = Xtrain.shape[0]
rand = [int(r) for r in np.random.rand(12) * N]
fig = plt.figure(figsize=(18, 6))
for i in xrange(12):
ax = fig.add_subplot(2, 6, i+1)
ax.imshow(Xtrain[rand[i], :].reshape(28, 28), cmap='gray')
ax.set_title(Ytrain[rand[i]])
plt.axis('tight')
plt.show()
#%% SECTION 2: TRAIN AND TEST
#%% set verbose = False
pr.trees.kVerbose = False
#%% train on 1000 images per class
num_trees = 10
num_features = 28
tic = time.time()
forest = trainRandomForest(Xtrain, Ytrain, num_trees, num_features)
toc = time.time()
print "elapsed time: %.2f mins" % ((toc - tic) / 60) # 6.69 mins
# This is too long to run several experiments with. Therefore for
# further experiments we use 200 images per class.
#%% score
print 'Accuracy on training data is %.4f\n' % pr.datatools.accuracy(Xtrain, Ytrain, forest)
print 'Accuracy on testing data is %.4f\n' % pr.datatools.accuracy(Xtest, Ytest, forest)
# training accuracy = 0.7322
# testing accuracy = 0.6708
#%% visualize some incorrectly classified digits
perm = np.random.permutation(Xtest.shape[0])
i = 0
r = 0
fig = plt.figure(figsize=(18, 6))
for r in xrange(len(perm)):
x = Xtest[perm[r], :]
y = Ytest[perm[r]]
yhat = forest.classify(x)
if yhat != y:
ax = fig.add_subplot(2, 6, i+1)
ax.imshow(x.reshape(28, 28), cmap='gray')
ax.set_title(yhat)
plt.axis('tight')
i += 1
if i == 12:
break
plt.show()
#%% SECTION 2B: VISUALIZING A TREE!
import graphviz as gv
from collections import deque
#%% a class for visualization nodes
class VizNode(object):
def __init__(self, node, id, parent):
self.node = node
self.id = id
self.parent = parent
if node.isLeaf():
self.label = ''
for i, d in enumerate(node.density):
self.label += '%.2f' % d
if i == len(node.density) / 2 - 1:
self.label += '\n'
elif i != len(node.density) - 1:
self.label += ' '
else:
self.label = str(node.q.ksum.indices[:4]) + '\n' + \
str(round(node.q.tau, 2))
def get(self):
return (str(self.id), {'label': self.label})
#%% a class for visualization trees
class TreeViz(object):
def __init__(self, format='png'):
self.graph = gv.Digraph(format=format)
def add_node(self, node):
if isinstance(node, tuple):
self.graph.node(node[0], **node[1])
else:
self.graph.node(node)
def add_edge(self, edge):
if isinstance(edge[0], tuple):
self.graph.edge(*edge[0], **edge[1])
else:
self.graph.edge(*edge)
def render(self, name):
self.graph.render(name)
#%% function for creating tree visualization
def visualize_tree(tree, name='tree'):
print 'creating tree visualization...'
viz = TreeViz()
i = 0
root = VizNode(tree.root, 0, -1)
vnodes = deque()
vnodes.append(root)
while(vnodes):
vnode = vnodes.popleft()
viz.add_node(vnode.get())
if vnode.parent != -1:
viz.add_edge((str(vnode.parent), str(vnode.id)))
if not vnode.node.isLeaf():
i += 1
lchild = VizNode(vnode.node.child[0], i, vnode.id)
vnodes.append(lchild)
i += 1
rchild = VizNode(vnode.node.child[1], i, vnode.id)
vnodes.append(rchild)
viz.render(name)
#%% visualize one tree
# note: please set kDT_MaxDepth <= 3 for creating the visualization
# otherwise the tree is too big to fit into a reasonably small image
pr.trees.kDT_MaxDepth = 3
toy_forest = trainRandomForest(Xtrain, Ytrain, 2, 10)
pr.trees.kDT_MaxDepth = 5
for i, tree in enumerate(toy_forest.trees):
visualize_tree(tree, 'tree%d' % i)
#%% SECTION 3: EXPLORE EFFECT of FOREST SIZE and NUMBER of FEATURES
#%% create new training set with n = 200 images per class
n = 200
Xtr = np.zeros((n*6, 784), dtype=np.uint8)
Ytr = np.zeros(n*6, dtype=np.uint8)
for i in xrange(6):
Xtr[i*n: (i+1)*n, :] = Xtrain[i*1000: i*1000+n, :]
Ytr[i*n: (i+1)*n] = Ytrain[i*1000: i*1000+n]
print 'Xtr:', Xtr.shape
print 'Ytr:', Ytr.shape
print ''
#%% train on n images per class
num_trees = 10
num_features = 28
tic = time.time()
forest = trainRandomForest(Xtr, Ytr, num_trees, num_features)
toc = time.time()
print "elapsed time: %.2f mins" % ((toc - tic) / 60) # 1.48 mins
# This is too long to run several experiments with. Therefore for
# further experiments we use 200 images per class.
#%% score
print 'Accuracy on training data is %.4f\n' % pr.datatools.accuracy(Xtr, Ytr, forest)
print 'Accuracy on testing data is %.4f\n' % pr.datatools.accuracy(Xtest, Ytest, forest)
# training accuracy = 0.7992
# testing accuracy = 0.6275
#%% evaluate forests on a grid of num_trees x num_features
num_trees = [10, 50, 100]
num_features = [10, 28, 64, 128]
acc_tr = np.zeros((len(num_trees), len(num_features)))
acc_te = np.zeros((len(num_trees), len(num_features)))
tic = time.time()
for j, M in enumerate(num_trees):
for k, K in enumerate(num_features):
print M, K
randforest = trainRandomForest(Xtr, Ytr, M, K)
acc_tr[j, k] = pr.datatools.accuracy(Xtr, Ytr, randforest)
acc_te[j, k] = pr.datatools.accuracy(Xtest, Ytest, randforest)
toc = time.time()
print "elapsed time: %.2f mins" % ((toc - tic) / 60) # 98.02 mins
#%%
print acc_tr
print acc_te
#%%
fig = plt.figure(figsize=(9.0, 18.0))
ax = fig.add_subplot(211)
res = ax.imshow(acc_tr, vmin=1/6., vmax=6/6., cmap=cm.jet, interpolation='nearest')
cb = fig.colorbar(res, orientation='horizontal')
ax.set_yticks(range(3))
ax.set_yticklabels(num_trees)
ax.set_xticks(range(4))
ax.set_xticklabels(num_features)
ax.set_ylabel('number of trees')
ax.set_xlabel('number of features')
ax.set_title('training accuracy')
plt.axis('tight')
for j in xrange(len(num_trees)):
for k in xrange(len(num_features)):
ax.annotate('%.4g' % acc_tr[j][k], xy=(k, j),
horizontalalignment='center',
verticalalignment='center')
ax = fig.add_subplot(212)
res = ax.imshow(acc_te, vmin=1/6., vmax=6/6., cmap=cm.jet, interpolation='nearest')
plt.axis('tight')
ax.set_yticks(range(3))
ax.set_yticklabels(num_trees)
ax.set_xticks(range(4))
ax.set_xticklabels(num_features)
ax.set_ylabel('number of trees')
ax.set_xlabel('number of features')
ax.set_title('test accuracy')
plt.axis('tight')
for j in xrange(len(num_trees)):
for k in xrange(len(num_features)):
ax.annotate('%.4g' % acc_te[j][k], xy=(k, j),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res, orientation='horizontal')
plt.show()
#%% EOF | [
"metm.sean@gmail.com"
] | metm.sean@gmail.com |
401b58d62323b36552c64484f20ac5255b774779 | f7b3ae29e28b2e8917981d973a00d71300d0c42c | /assignments/classes_2/pair.py | f818280bf743e858790121c47601b5bc37296d2c | [] | no_license | aronlg/skoli | cdaed59ff3d67bb0d083a65347f7be63ab410c89 | 9ea7e6bc4829bd7c0edba9762d4e219ad49a5563 | refs/heads/main | 2023-09-05T13:56:28.807824 | 2021-11-09T08:58:41 | 2021-11-09T08:58:41 | 400,294,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | class Pair:
def __init__(self, val1=0, val2=0):
self.val1 = val1
self.val2 = val2
def __str__(self):
return "Value 1: {}, Value 2: {}".format(self.val1, self.val2)
def __repr__(self):
return self.__str__()
def __add__(self, other):
return Pair(self.val1 + other.val1, self.val2 + other.val2)
def __mul__(self, other):
return Pair(self.val1 * other.val1, self.val2 * other.val2) | [
"aronlgreen@gmail.com"
] | aronlgreen@gmail.com |
04ad4d19924cc49f42a7f6ac77847e9bb33362eb | 9f8fa29bb5a93f896862806157b10b55e9f26825 | /message_media_conversations/models/message_dto.py | f07f49fac18257112666287a40d5f2c106d9e2f8 | [
"Apache-2.0"
] | permissive | messagemedia/conversations-python-sdk | 1b245ca7f63ca0c6fdbcd17a9bd11565d421e2a0 | b53046540bd5c826de784228f838468c22b863cf | refs/heads/master | 2020-03-19T05:52:05.735297 | 2018-10-16T23:29:31 | 2018-10-16T23:29:31 | 135,969,313 | 0 | 0 | null | 2018-06-04T04:12:18 | 2018-06-04T04:09:34 | null | UTF-8 | Python | false | false | 2,057 | py | # -*- coding: utf-8 -*-
"""
message_media_conversations.models.message_dto
This file was automatically generated for MessageMedia by APIMATIC v2.0 ( https://apimatic.io )
"""
class MessageDto(object):
"""Implementation of the 'MessageDto' model.
TODO: type model description here.
Attributes:
channel (string): TODO: type description here.
id (string): TODO: type description here.
text (string): TODO: type description here.
timestamp (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"channel":'channel',
"id":'id',
"text":'text',
"timestamp":'timestamp'
}
def __init__(self,
channel=None,
id=None,
text=None,
timestamp=None):
"""Constructor for the MessageDto class"""
# Initialize members of the class
self.channel = channel
self.id = id
self.text = text
self.timestamp = timestamp
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
channel = dictionary.get('channel')
id = dictionary.get('id')
text = dictionary.get('text')
timestamp = dictionary.get('timestamp')
# Return an object of this model
return cls(channel,
id,
text,
timestamp)
| [
"git@apimatic.io"
] | git@apimatic.io |
1227e2067f8c114470a88b026b4a6e6c16ee45bd | 4ea43f3f79ad483d83238d88572feb822f451372 | /philo/models/fields/__init__.py | efd315f9c3c5e11afe2ba9802508200ca1a0905c | [
"ISC"
] | permissive | kgodey/philo | c8c433d44b2f31121f13bd0ee101605be11fe9da | c19bf577d44606d2b284e6058d633f4a174b61cc | refs/heads/master | 2020-12-29T02:54:11.746966 | 2011-05-24T21:57:47 | 2011-05-24T21:57:47 | 686,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,547 | py | from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.utils import simplejson as json
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from philo.forms.fields import JSONFormField
from philo.validators import TemplateValidator, json_validator
#from philo.models.fields.entities import *
class TemplateField(models.TextField):
"""A :class:`TextField` which is validated with a :class:`.TemplateValidator`. ``allow``, ``disallow``, and ``secure`` will be passed into the validator's construction."""
def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure))
class JSONDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ?
if self.field.name not in instance.__dict__:
json_string = getattr(instance, self.field.attname)
instance.__dict__[self.field.name] = json.loads(json_string)
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value))
def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None))
class JSONField(models.TextField):
"""A :class:`TextField` which stores its value on the model instance as a python object and stores its value in the database as JSON. Validated with :func:`.json_validator`."""
default_validators = [json_validator]
def get_attname(self):
return "%s_json" % self.name
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls)
def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
# Anything passed in as self.name is assumed to come from a serializer and
# will be treated as a json string.
if self.name in kwargs:
value = kwargs.pop(self.name)
# Hack to handle the xml serializer's handling of "null"
if value is None:
value = 'null'
kwargs[self.attname] = value
def formfield(self, *args, **kwargs):
kwargs["form_class"] = JSONFormField
return super(JSONField, self).formfield(*args, **kwargs)
class SlugMultipleChoiceField(models.Field):
"""Stores a selection of multiple items with unique slugs in the form of a comma-separated list."""
__metaclass__ = models.SubfieldBase
description = _("Comma-separated slug field")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if not value:
return []
if isinstance(value, list):
return value
return value.split(',')
def get_prep_value(self, value):
return ','.join(value)
def formfield(self, **kwargs):
# This is necessary because django hard-codes TypedChoiceField for things with choices.
defaults = {
'widget': forms.CheckboxSelectMultiple,
'choices': self.get_choices(include_blank=False),
'label': capfirst(self.verbose_name),
'required': not self.blank,
'help_text': self.help_text
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
form_class = forms.TypedMultipleChoiceField
return form_class(**defaults)
def validate(self, value, model_instance):
invalid_values = []
for val in value:
try:
validate_slug(val)
except ValidationError:
invalid_values.append(val)
if invalid_values:
# should really make a custom message.
raise ValidationError(self.error_messages['invalid_choice'] % invalid_values)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^philo\.models\.fields\.SlugMultipleChoiceField"])
add_introspection_rules([], ["^philo\.models\.fields\.TemplateField"])
add_introspection_rules([], ["^philo\.models\.fields\.JSONField"]) | [
"stephen.r.burrows@gmail.com"
] | stephen.r.burrows@gmail.com |
191e06347b95e5824b343ec9e0f658e273017523 | a3c3781a870ec37872390466edb5f97a2c7ae445 | /demo.py | 422da52ac3ddc753c225cf8774881d8703e82fb9 | [] | no_license | cryer/poetry_generation | 8438b625dc72954a23c338bb1883739279bba696 | 004ef5e8c80e3cd398120aeee5f11241121aac8c | refs/heads/master | 2021-04-27T18:17:23.807073 | 2018-02-21T14:12:51 | 2018-02-21T14:12:51 | 122,337,277 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,802 | py | # _*_ coding:utf-8 _*_
import sys, os
import torch
from data import get_data
from model import PoetryModel
from torch.autograd import Variable
import numpy as np
import Config as cfg
opt = cfg.Config()
def generate(model, start_words, ix2word, word2ix, prefix_words=None):
results = list(start_words)
start_word_len = len(start_words)
input = Variable(torch.Tensor([word2ix['<START>']]).view(1, 1).long())
if opt.use_gpu: input = input.cuda()
hidden = None
if prefix_words:
for word in prefix_words:
output, hidden = model(input, hidden)
input = Variable(input.data.new([word2ix[word]])).view(1, 1)
for i in range(opt.max_gen_len):
output, hidden = model(input, hidden)
if i < start_word_len:
w = results[i]
input = Variable(input.data.new([word2ix[w]])).view(1, 1)
else:
top_index = output.data[0].topk(1)[1][0]
w = ix2word[top_index]
results.append(w)
input = Variable(input.data.new([top_index])).view(1, 1)
if w == '<EOP>':
del results[-1]
break
return results
def gen_acrostic(model, start_words, ix2word, word2ix, prefix_words=None):
results = []
start_word_len = len(start_words)
input = Variable(torch.Tensor([word2ix['<START>']]).view(1, 1).long())
if opt.use_gpu: input = input.cuda()
hidden = None
index = 0
pre_word = '<START>'
if prefix_words:
for word in prefix_words:
if word in word2ix:
print("true..")
else:
print("false...please use Chinese input method")
continue
output, hidden = model(input, hidden)
input = Variable(input.data.new([word2ix[word]])).view(1, 1)
for i in range(opt.max_gen_len):
output, hidden = model(input, hidden)
top_index = output.data[0].topk(1)[1][0]
w = ix2word[top_index]
if (pre_word in {u'。', u'!', '<START>'}):
if index == start_word_len:
break
else:
w = start_words[index]
index += 1
input = Variable(input.data.new([word2ix[w]])).view(1, 1)
else:
input = Variable(input.data.new([word2ix[w]])).view(1, 1)
results.append(w)
pre_word = w
return results
def gen(**kwargs):
for k, v in kwargs.items():
setattr(opt, k, v)
data, word2ix, ix2word = get_data(opt)
model = PoetryModel(len(word2ix), 128, 256)
map_location = lambda s, l: s
state_dict = torch.load(opt.model_path, map_location=map_location)
model.load_state_dict(state_dict)
if opt.use_gpu:
model.cuda()
if sys.version_info.major == 3:
if opt.start_words.isprintable():
start_words = opt.start_words
prefix_words = opt.prefix_words if opt.prefix_words else None
else:
start_words = opt.start_words.encode('ascii', 'surrogateescape').decode('utf8')
prefix_words = opt.prefix_words.encode('ascii', 'surrogateescape').decode(
'utf8') if opt.prefix_words else None
else:
start_words = opt.start_words.decode('utf8')
prefix_words = opt.prefix_words.decode('utf8') if opt.prefix_words else None
start_words = start_words.replace(',', u',') \
.replace('.', u'。') \
.replace('?', u'?')
gen_poetry = gen_acrostic if opt.acrostic else generate
result = gen_poetry(model, start_words, ix2word, word2ix, prefix_words)
print(''.join(result))
if __name__ == '__main__':
import fire
fire.Fire() | [
"noreply@github.com"
] | cryer.noreply@github.com |
048997487a3581e46272c44e5cdb698450904fb8 | 7ae7c24093d6961d4cf3359441abf6140755ed77 | /api/exe/twitter-translate/en-seed.py | 27620ceafc978a7471ab62c82a567aceaf7a559b | [] | no_license | yuhonghong7035/YumaInaura | 5e518df2b983dc997fbc082bddf66cc260b8128e | 87ea64c1d2e359bbb9bb19e2622ed5b4e0d94eea | refs/heads/master | 2020-05-09T22:31:50.693862 | 2019-04-15T03:21:01 | 2019-04-15T03:21:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | #!/usr/bin/env python3
import sys, json, re, os
tweets = json.loads(sys.stdin.read())
results = []
for tweet in tweets:
seed = {}
seed['text'] = tweet['en_translated_text'][:280]
seed['attachment_url'] = tweet['url']
#seed['text'] = tweet['translated_text'][:240] + ' ' + tweet['url']
results.append(seed)
print(json.dumps(results))
| [
"yuma.inaura@gmail.com"
] | yuma.inaura@gmail.com |
a68302e20767602cfb0f056fe02fe1c7984c0f8c | 738f9aac8c688fc97b7d4addfd8056c3102ad3ff | /sql_graphviz_hdon.py | b53418a85e39d0d11fae3f25118b805b25a2ab0e | [
"MIT"
] | permissive | dvisztempacct/sql_graphviz | 9c4ed370866c2887cdd73320c096f466ce2bc603 | fcf797eb51c30ab13f7f51f2aa470abed9a91319 | refs/heads/master | 2020-04-20T03:46:41.742990 | 2019-01-31T22:41:39 | 2019-01-31T22:41:39 | 168,608,058 | 1 | 0 | null | 2019-01-31T22:41:17 | 2019-01-31T22:41:17 | null | UTF-8 | Python | false | false | 6,474 | py | #!/usr/bin/env python
import sys, hashlib
from datetime import datetime
from pyparsing import alphas, alphanums, Literal, Word, Forward, OneOrMore, ZeroOrMore, CharsNotIn, Suppress, QuotedString, Optional, delimitedList, removeQuotes
def dprint(*args):
print(*args, file=sys.stderr)
def extract_name_from_field(field):
field_name = field.split(' ')[0]
if field_name[0] == '`':
field_name = field_name[1:-1]
dprint('field name', field_name)
return field_name
def field_act(s, loc, tok):
return {
'type': 'field',
'port': extract_name_from_field(tok[0].replace('"', '')),
'the_rest': extract_name_from_field(tok[1].replace('"', '\\"'))
}
def field_list_act(s, loc, tok):
return "\n ".join(tok)
def create_table_act(s, loc, tok):
tableName = tok['tableName']
table_parts = tok['table_parts']
#dprint('table_parts=', table_parts)
fields = '\n'.join([
field_row(field) for field in table_parts if field['type'] == 'field'
])
fk_edges = '\n'.join([
fk_edge(dict(tableName=tableName, **fk)) for fk in table_parts if fk['type'] == 'fk'
])
#dprint('fields=', fields)
#dprint('fk_edges=', fk_edges)
return '''
"{tableName}" [
shape=none
label=<
<table border="0" cellspacing="0" cellborder="1">
<tr><td bgcolor="chartreuse1"><font face="Times-bold" point-size="20">{tableName}</font></td></tr>
{fields}
</table>
>];
{fk_edges}
'''.format(
tableName = tableName,
fields = fields,
fk_edges = fk_edges,
)
def edge_color(t1, f1, t2, f2):
s = '%s-%s-%s-%s' % (t1, f1, t2, f2)
h = hashlib.md5(s.encode('utf-8')).hexdigest()[0:6]
n = int(h, 16) & 0x888888
return '#%06x' % n
def add_fkey_act(s, loc, tok):
color = edge_color(tok['tableName'], tok['keyName'], tok['fkTable'], tok['fkCol'])
return ' "{tableName}":{keyName} -> "{fkTable}":{fkCol} [color="{color}"]'.format(color=color, **tok)
def foreign_key_constraint_act(s, loc, tok):
return {
'type': 'fk',
'keyName': tok['localColumnNames'][1:-1],
'fkTable': tok['foreignTableName'],
'fkCol': tok['foreignColumnNames'][1:-1]
}
def other_statement_act(s, loc, tok):
return ""
def parens(x):
return Literal("(") + x + Literal(")")
def field_row(field_data):
#dprint('lol', field_data)
return '<tr><td bgcolor="grey96" align="left" port="{0}"><font face="Times-bold">{0}</font> <font color="#535353">{1}</font></td></tr>'.format(field_data['port'].replace('"', ''), field_data['the_rest'])
def fk_edge(fk_data):
#dprint('lel', fk_data)
color = edge_color(fk_data['tableName'], fk_data['keyName'], fk_data['fkTable'], fk_data['fkCol'])
return ' "{tableName}":{keyName} -> "{fkTable}":{fkCol} [color="{color}"]'.format(
color=color,
tableName = fk_data['tableName'],
keyName = fk_data['keyName'][0],
fkTable = fk_data['fkTable'],
fkCol = fk_data['fkCol'][0],
)
def debugTap(f):
def _debugTap(*args, **kwargs):
dprint('%s(%s)' % (f.__name__, ', '.join(list(map(repr, args)) + ['%s = %s' % (k, repr(v)) for k, v in kwargs.items()])))
return f(*args, **kwargs)
return _debugTap
def unquotedString(*args, **kwargs):
return QuotedString(*args, **kwargs)
def grammar():
identifier = Word(alphas + '_', alphanums + '_')
rhs = Word(alphanums + '_')
tablename_def = ( Word(alphas + "_") | unquotedString("`") )
colname_def = ( Word(alphas + "_") | unquotedString("`") )
collist_def = delimitedList(colname_def)
parenthesis = Forward()
parenthesis <<= "(" + ZeroOrMore(CharsNotIn("()") | parenthesis) + ")"
foreign_key_constraint_def = (
Literal("CONSTRAINT") +
tablename_def +
Literal("FOREIGN") +
Literal("KEY") +
parens(collist_def).setResultsName('localColumnNames') +
Literal("REFERENCES") +
tablename_def.setResultsName('foreignTableName') +
parens(collist_def).setResultsName('foreignColumnNames')
)
foreign_key_constraint_def.setParseAction(foreign_key_constraint_act)
field_def = OneOrMore(Word(alphanums + "_\"'`:-") | parenthesis)
field_def.setParseAction(field_act)
field_list_def = delimitedList(field_def)
field_list_def.setParseAction(field_list_act)
key_def = (
Optional(Literal("UNIQUE") | Literal("PRIMARY")) + Literal("KEY") + ZeroOrMore(CharsNotIn(','))
)
table_parts_def = delimitedList(foreign_key_constraint_def | field_def)
table_option_def = (identifier + Literal('=') + rhs) | identifier
table_options_def = ZeroOrMore(table_option_def)
create_table_def = (
Literal("CREATE TABLE") +
Optional(Literal("IF NOT EXISTS")) +
tablename_def.setResultsName("tableName") +
"(" +
table_parts_def.setResultsName("table_parts") +
")" +
table_options_def +
";"
)
create_table_def.setParseAction(create_table_act)
add_fkey_def = Literal("ALTER") + "TABLE" + "ONLY" + tablename_def.setResultsName("tableName") + "ADD" + "CONSTRAINT" + Word(alphanums + "_") + "FOREIGN" + "KEY" + "(" + Word(alphanums + "_").setResultsName("keyName") + ")" + "REFERENCES" + Word(alphanums + "_").setResultsName("fkTable") + "(" + Word(alphanums + "_").setResultsName("fkCol") + ")" + Optional(Literal("DEFERRABLE")) + ";"
add_fkey_def.setParseAction(add_fkey_act)
other_statement_def = OneOrMore(CharsNotIn(";")) + ";"
other_statement_def.setParseAction(other_statement_act)
comment_def = "--" + ZeroOrMore(CharsNotIn("\n"))
comment_def.setParseAction(other_statement_act)
return OneOrMore(comment_def | create_table_def | add_fkey_def | other_statement_def)
preamble = '''/*
* Graphviz of '%(filename)s', created %(timestamp)s
* Generated from https://github.com/rm-hull/sql_graphviz
*/
digraph g {
graph [
rankdir="LR",
scale=false,
overlap=0,
splines=polyline,
concentrate=1,
pad="0.5",
nodesep="0.5",
ranksep="2"
];
'''
def graphviz(filename):
print(preamble % {
'filename': filename,
'timestamp': datetime.now()
})
for i in grammar().parseFile(filename):
if i != "":
print(i)
print("}")
if __name__ == '__main__':
filename = sys.stdin if len(sys.argv) == 1 else sys.argv[1]
graphviz(filename)
| [
"dviszneki@influential.co"
] | dviszneki@influential.co |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.