content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#####DONT CHANGE THIS########
######################
### Script By TRHACKNOnimous
### www.memanon.ml
### Don't Change This.!!!
######################
import os
import sys
os.system("clear")
os.system("mkdir TRHACKNOnimous")
os.system("mv TRHACKNOnimous/ /storage/emulated/0/")
os.system("chmod +x /storage/emulated/0/TRHACKNOnimous")
os.system("cp TRHACKNONscript.html /storage/emulated/0/TRHACKNOnimous/")
print
print("tu n'as plus qu'à utiliser un outil comme trhacktest, pour uploader le script que tu viens de creer.")
os.system("sleep 5")
print("script créé dans : /storage/emulated/0/TRHACKNOnimous/TRHACKNONscript.html")
os.system("sleep 2")
print("dont forget anonymous see everythink ;-)")
os.system("sleep 3")
print("[ Script en cours de chargement ]")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
4242,
2,
35,
35830,
5870,
27746,
12680,
7804,
198,
14468,
4242,
2235,
198,
21017,
12327,
2750,
7579,
39,
8120,
45,
2202,
320,
... | 2.710438 | 297 |
from django_filters import rest_framework as filters
from .models import Event
| [
6738,
42625,
14208,
62,
10379,
1010,
1330,
1334,
62,
30604,
355,
16628,
198,
198,
6738,
764,
27530,
1330,
8558,
628
] | 4.05 | 20 |
# coding=utf-8
# 实现主要思路
# 1. 获取网页教程的内容
# 2. 获取主页当中的ul-list
# 3. 根据获取的ul-list 当中的a 不断发送请求,获取数据,并写入
import os
import logging
import requests
import pickle
from weasyprint import HTML
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# global variable
INDEX_URL = 'https://facebook.github.io/react/docs/getting-started.html'
BASE_URL = 'https://facebook.github.io'
TRY_LIMITED = 5
# 配置日志模块,并且输出到屏幕和文件
logger = logging.getLogger('pdf_logger')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %('
'message)s')
fh = logging.FileHandler('../log/pdf.log')
sh = logging.StreamHandler()
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
# 配置浏览器选项,提高抓取速度
cap = dict(DesiredCapabilities.PHANTOMJS)
cap['phantomjs.page.settings.loadImages'] = False # 禁止加载图片
cap['phantomjs.page.settings.userAgent'] = ('Mozilla/5.0 (Windows NT 10.0; '
'WOW64) AppleWebKit/537.36 ('
'KHTML, like Gecko) '
'Chrome/45.0.2454.101 '
'Safari/537.36') # 设置useragent
cap['phantomjs.page.settings.diskCache'] = True # 设置浏览器开启缓存
# service_args = [
# '--proxy=127.0.0.1:1080',
# '--proxy-type=socks5',
# ]
# 设置忽略https
service_args=['--ignore-ssl-errors=true',
'--ssl-protocol=any',
'--proxy=127.0.0.1:1080',
'--proxy-type=socks5']
browser = webdriver.PhantomJS(desired_capabilities=cap, service_args=service_args)
browser.set_page_load_timeout(180) # 超时时间
def fetch_url_list():
"""
从react官网教程主页当中抓取页面的URL 列表
:return: 获取到的ul-list当中的所有li
"""
try:
page = requests.get(INDEX_URL, verify=True)
content = page.text
soup = BeautifulSoup(content, 'lxml')
url_list = [item['href'] for item in soup.select('.nav-docs-section ul li a')
if item['href'].find('https') == -1]
return url_list
except Exception as e:
logger.error('fetch url list failed')
logger.error(e)
def fetch_page(url, index):
"""
根据给定的URL抓取页面 即url_list当中的
:param url:要抓取页面的地址
:param index:页面地址在url_list当中的位置,调式时使用,方便查看哪个出错
:return:返回抓到页面的源代码,失败则返回none
"""
try:
browser.get(url)
return browser.page_source
except Exception as e:
logger.warning('get page %d %s failed' % (index, url))
logger.warning(e)
return None
def build_content():
"""
处理每一个url当中爬到页面,按顺序写入到文件当中
:return: None
"""
url_list = fetch_url_list()
print(url_list)
output = []
logger.info('there are %s pages' % len(url_list))
for url_index in range(len(url_list)):
# 爬页面时可能会因为网络等原因而失败,失败后可以尝试重新抓取,最多五次
try_count = 0
temp = BASE_URL + url_list[url_index]
html = fetch_page(temp, url_index)
while try_count < TRY_LIMITED and html is None:
html = fetch_page(BASE_URL + url_list[url_index], url_index)
try_count += 1
try:
if html is not None:
soup = BeautifulSoup(html, 'lxml')
title = soup.select(".inner-content")[0]
output.append(str(title))
logger.info('get page %s success' % url_index)
# 页面抓取比较耗时,且中途失败的几率较大,每抓取到页面可以把迄今为止的结果
# 序列化存储,程序异常退出后前面的结果不会丢失,可以反序列化后接着使用
# with open('output.dump', 'wb') as f:
# pickle.dump(output, f)
except Exception as e:
logger.warning('deal page %s %s failed' % (url_index,
url_list[url_index]))
logger.warning(e)
with open('../html/pages.html', 'w') as f:
f.write('<head><meta charset="utf-8"/></head><body>' + ''.join(
output) + '</body>')
if not os.path.exists('../html/pages.html'):
build_content()
if browser:
browser.quit()
css = [
'../css/codemirror.css',
'../css/react.css',
'../css/syntax.css'
]
HTML('../html/pages.html').write_pdf('../React教程.pdf', stylesheets=css)
| [
2,
19617,
28,
40477,
12,
23,
201,
198,
201,
198,
2,
10263,
106,
252,
163,
236,
108,
10310,
119,
17358,
223,
45250,
251,
164,
115,
107,
201,
198,
2,
352,
13,
5525,
236,
115,
20998,
244,
163,
121,
239,
165,
94,
113,
46763,
247,
16... | 1.579566 | 2,809 |
# -*- coding: utf-8 -*-
from django.conf.urls import url,patterns,include #antes: from django.conf.urls import url,patterns
from django.views.generic import TemplateView
from django.contrib import admin
from django.conf import settings
from . import views
from haystack.query import SearchQuerySet
from haystack.views import SearchView
from .forms import MainSearchForm
sqs = SearchQuerySet().all()
app_name= 'dynadb'
urlpatterns = [
url(r'^reset/$', views.reset_permissions, name="reset_permissions"),
#url(r'^prueba_varios/$', TemplateView.as_view(template_name='dynadb/pruebamult_template.html'), name="prueba_varios"),
#url(r'^profile_setting/$', views.profile_setting, name='profile_setting'),
#url(r'^sub_sim/$', views.sub_sim, name='sub_sim'),
#url(r'^name/$', views.get_name, name='name'),
# url(r'^dyndbfiles/$', views.get_DyndbFiles, name='dyndbfiles'),
url(r'^db_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='db_inputform'),
url(r'^before_db_inputform_prev_moddb_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='before_db_inputform_prev_mod'),
# url(r'^db_author_information/$', views.get_Author_Information, name='db_author_information'),
# url(r'^db_dynamics/$', views.get_Dynamics, name='db_dynamics'),
# url(r'^db_files/$', views.get_FilesCOMPLETE, name='db_files'),
# url(r'^db_protein/$', views.get_ProteinForm, name='db_protein'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_component/$', views.get_Component, name='db_component'),
# url(r'^db_model/$', views.get_Model, name='db_model'),
# url(r'^db_compoundform/$', views.get_CompoundForm, name='db_compoundform'),
# url(r'^your_name/$', views.get_name, name='your_name'),
# url(r'^thanks/$', views.get_name, name='thanks'),
# url(r'^admin/', admin.site.urls),
url(r'^protein/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='protein'),
url(r'^protein/(?P<submission_id>[0-9]+)/delete/$', views.delete_protein, name='delete_protein'),
url(r'^protein/get_data_upkb/?([A-Z0-9-]+)?$', views.protein_get_data_upkb, name='protein_get_data_upkb'),
url(r'^protein/download_specieslist/$', views.download_specieslist, name='protein_download_specieslist'),
url(r'^protein/get_specieslist/$', views.get_specieslist, name='protein_get_specieslist'),
url(r'^protein/get_mutations/$', views.get_mutations_view, name='protein_get_mutations'),
url(r'^protein/(?P<alignment_key>[0-9]+)/alignment/$', views.show_alig, name='show_alig'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/$',views.query_protein, name='query_protein'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/fasta$',views.query_protein_fasta, name='query_protein_fasta'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/$',views.query_molecule, name='query_molecule'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/sdf$',views.query_molecule_sdf,name='query_molecule_sdf'),
url(r'^compound/id/(?P<compound_id>[0-9]+)/$',views.query_compound, name='query_compound'),
url(r'^model/id/(?P<model_id>[0-9]+)/$',views.query_model, name='query_model'),
url(r'^dynamics/id/(?P<dynamics_id>[0-9]+)/$',views.query_dynamics, name='query_dynamics'),
url(r'^complex/id/(?P<complex_id>[0-9]+)/$',views.query_complex, name='query_complex'),
url(r'^references/$', views.REFERENCEview, name='references'),
url(r'^REFERENCEfilled/(?P<submission_id>[0-9]+)/$', views.REFERENCEview, name='REFERENCEfilled'),
url(r'^PROTEINfilled/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='PROTEINfilled'),
url(r'^submission_summary/(?P<submission_id>[0-9]+)/$', views.submission_summaryiew, name='submission_summary'),
url(r'^protein_summary/(?P<submission_id>[0-9]+)/$', views.protein_summaryiew, name='protein_summary'),
url(r'^molecule_summary/(?P<submission_id>[0-9]+)/$', views.molecule_summaryiew, name='molecule_summary'),
url(r'^model_summary/(?P<submission_id>[0-9]+)/$', views.model_summaryiew, name='model_summary'),
url(r'^molecule/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='molecule'),
url(r'^molecule/(?P<submission_id>[0-9]+)/delete/$', views.delete_molecule, name='delete_molecule'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?delete/$', views.delete_molecule, name='delete_molecule_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?submitpost/$', views.submitpost_view, name='submitpost_reuse'),
#url(r'^moleculereuse/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
#url(r'^moleculereuse/open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^molecule/(?P<submission_id>[0-9]+)/submitpost/$', views.submitpost_view, name='submitpost'),
url(r'^molecule/(?P<submission_id>[0-9]+)/generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl'),
url(r'^molecule/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem'),
url(r'^molecule/open_chembl/$', views.open_chembl, name='molecule_open_chembl'),
url(r'^molecule2/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview2, name='molecule2'),
url(r'^MOLECULEfilled/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='MOLECULEfilled'),
url(r'^MOLECULEfilled2/$', views.SMALL_MOLECULEview2, name='MOLECULEfilled2'),
url(r'^model/(?P<submission_id>[0-9]+)/$', views.MODELview, name='model'),
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'),
url(r'^(?P<form_type>dynamics)reuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'), #######
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/get_submission_molecule_info/$', views.get_submission_molecule_info, name='get_submission_molecule_info'),
url(r'^model/(?P<submission_id>[0-9]+)/ajax_pdbchecker/$', views.pdbcheck, name='pdbcheck'),
url(r'^model/(?P<submission_id>[0-9]+)/search_top/$',views.search_top,name='search_top'), #keep this one in a merge
url(r'^model/(?P<submission_id>[0-9]+)/upload_model_pdb/$', views.upload_model_pdb, name='upload_model_pdb'),
url(r'^modelreuse/(?P<submission_id>-?[0-9]+)/(?:[0-9]+/)?$', views.MODELreuseview, name='modelreuse'),
url(r'^proteinreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?$', views.PROTEINreuseview, name='proteinreuse'),
# url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
# url(r'^modelrow/$', views.MODELrowview, name='modelrow'),
url(r'^modelreuserequest/(?P<model_id>[0-9]+)/$', views.MODELreuseREQUESTview, name='modelreuserequest'),
url(r'^MODELfilled/(?P<submission_id>[0-9]+)/$', views.MODELview, name='MODELfilled'),
#url(r'^ajax_pdbchecker/(?P<submission_id>[0-9]+)/$', views.pdbcheck, name='pdbcheck'),
url(r'^search/$', SearchView(template='/protwis/sites/protwis/dynadb/templates/search/search.html', searchqueryset=sqs, form_class=MainSearchForm),name='haystack_search'),
url(r'^ajaxsearch/',views.ajaxsearcher,name='ajaxsearcher'),
url(r'^empty_search/',views.emptysearcher,name='emptysearcher'),
url(r'^autocomplete/',views.autocomplete,name='autocomplete'),
url(r'^advanced_search/$', views.NiceSearcher,name='NiceSearcher'),
#url(r'^search_top/(?P<submission_id>[0-9]+)/$',views.search_top,name='search_top'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='dynamics'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/check_trajectories/$', views.check_trajectories, name='dynamics_check_trajectories'),
url(r'^dynamics/do_analysis/$', views.do_analysis, name='do_analysis'),
# url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSreuseview, name='dynamicsreuse'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSview, name='dynamicsreuse'),
url(r'^DYNAMICSfilled/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='DYNAMICSfilled'),
#url(r'^form/$', views.get_formup, name='form'),
url(r'^model/carousel/(?P<model_id>[0-9]+)/$', views.carousel_model_components, name='carousel_model_components'),
url(r'^dynamics/carousel/(?P<dynamics_id>[0-9]+)/$', views.carousel_dynamics_components, name='carousel_dynamics_components'),
#url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT,}), #this line shouldnt be here
url(r'^submitted/(?P<submission_id>[0-9]+)/$', views.SUBMITTEDview, name='submitted'),
url(r'^close_submission/(?P<submission_id>[0-9]+)/$', views.close_submission, name='close_submission'),
url(r'^datasets/$', views.datasets, name='datasets'),
url(r'^table/$', views.table, name='table'),
url(r'^blank/$', TemplateView.as_view(template_name="dynadb/blank.html"), name='blank'),]
# url(r'^some_temp/$', views.some_view, name='some_temp')
# url(r'^prueba_varios/$', views.profile_setting, name='PRUEBA_varios'),
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
else:
if settings.FILES_NO_LOGIN:
serve_files_func = views.serve_submission_files_no_login
else:
serve_files_func = views.serve_submission_files
urlpatterns += patterns('',
url(r'^files/(?P<obj_folder>[^/\\]+)/(?P<submission_folder>[^/\\]+)/(?P<path>.*)$', serve_files_func, name='serve_submission_files'),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
33279,
82,
11,
17256,
1303,
39781,
25,
422,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
... | 2.305567 | 4,922 |
from .base import (
BetaFromHits,
Categorical,
LogNormalFromInterval,
NormalFromInterval,
bernoulli,
beta,
beta_from_hits,
categorical,
flip,
halfnormal,
halfnormal_from_interval,
lognormal,
lognormal_from_interval,
normal,
normal_from_interval,
random_choice,
random_integer,
uniform,
)
from .distribution import Distribution
from .histogram import HistogramDist
from .location_scale_family import Logistic, Normal
from .logistic_mixture import LogisticMixture
| [
6738,
764,
8692,
1330,
357,
198,
220,
220,
220,
17993,
4863,
39,
896,
11,
198,
220,
220,
220,
327,
2397,
12409,
11,
198,
220,
220,
220,
5972,
26447,
4863,
9492,
2100,
11,
198,
220,
220,
220,
14435,
4863,
9492,
2100,
11,
198,
220,
... | 2.617647 | 204 |
#!/usr/bin/python
#
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for ios_ospf_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: ios_ospf_interfaces
short_description: OSPF_Interfaces resource module
description: This module configures and manages the Open Shortest Path First (OSPF)
version 2 on IOS platforms.
version_added: 1.0.0
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL.
- This module works with connection C(network_cli).
See U(https://docs.ansible.com/ansible/latest/network/user_guide/platform_ios.html)
options:
config:
description: A dictionary of OSPF interfaces options.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface excluding any logical unit number,
i.e. GigabitEthernet0/1.
type: str
required: true
address_family:
description:
- OSPF interfaces settings on the interfaces in address-family
context.
type: list
elements: dict
suboptions:
afi:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces.
type: str
choices:
- ipv4
- ipv6
required: true
process:
description: OSPF interfaces process config
type: dict
suboptions:
id:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces. Please refer vendor documentation of Valid
values.
type: int
area_id:
description:
- OSPF interfaces area ID as a decimal value. Please
refer vendor documentation of Valid values.
- OSPF interfaces area ID in IP address format(e.g.
A.B.C.D)
type: str
secondaries:
description:
- Include or exclude secondary IP addresses.
- Valid only with IPv4 config
type: bool
instance_id:
description:
- Set the OSPF instance based on ID
- Valid only with IPv6 OSPF config
type: int
adjacency:
description: Adjacency staggering
type: bool
authentication:
description: Enable authentication
type: dict
suboptions:
key_chain:
description: Use a key-chain for cryptographic
authentication keys
type: str
message_digest:
description: Use message-digest authentication
type: bool
'null':
description: Use no authentication
type: bool
bfd:
description:
- BFD configuration commands
- Enable/Disable BFD on this interface
type: bool
cost:
description: Interface cost
type: dict
suboptions:
interface_cost:
description: Interface cost or Route cost of this interface
type: int
dynamic_cost:
description:
- Specify dynamic cost options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
default:
description: Specify default link metric value
type: int
hysteresis:
description: Specify hysteresis value for LSA dampening
type: dict
suboptions:
percent:
description: Specify hysteresis percent changed.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Specify hysteresis threshold value.
Please refer vendor documentation of Valid values.
type: int
weight:
description: Specify weight to be placed on individual
metrics
type: dict
suboptions:
l2_factor:
description:
- Specify weight to be given to L2-factor metric
- Percentage weight of L2-factor metric. Please refer
vendor documentation of Valid values.
type: int
latency:
description:
- Specify weight to be given to latency metric.
- Percentage weight of latency metric. Please refer
vendor documentation of Valid values.
type: int
oc:
description:
- Specify weight to be given to cdr/mdr for oc
- Give 100 percent weightage for current data rate(0
for maxdatarate)
type: bool
resources:
description:
- Specify weight to be given to resources metric
- Percentage weight of resources metric. Please refer
vendor documentation of Valid values.
type: int
throughput:
description:
- Specify weight to be given to throughput metric
- Percentage weight of throughput metric. Please refer
vendor documentation of Valid values.
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding
type: bool
dead_interval:
description: Interval after which a neighbor is declared dead
type: dict
suboptions:
time:
description: time in seconds
type: int
minimal:
description:
- Set to 1 second and set multiplier for Hellos
- Number of Hellos sent within 1 second. Please refer
vendor documentation of Valid values.
- Valid only with IP OSPF config
type: int
demand_circuit:
description: OSPF Demand Circuit, enable or disable
the demand circuit'
type: dict
suboptions:
enable:
description: Enable Demand Circuit
type: bool
ignore:
description: Ignore demand circuit auto-negotiation requests
type: bool
disable:
description:
- Disable demand circuit on this interface
- Valid only with IPv6 OSPF config
type: bool
flood_reduction:
description: OSPF Flood Reduction
type: bool
hello_interval:
description:
- Time between HELLO packets
- Please refer vendor documentation of Valid values.
type: int
lls:
description:
- Link-local Signaling (LLS) support
- Valid only with IP OSPF config
type: bool
manet:
description:
- Mobile Adhoc Networking options
- MANET Peering options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
cost:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
percent:
description: Relative incremental path cost.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Absolute incremental path cost.
Please refer vendor documentation of Valid values.
type: int
link_metrics:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
set:
description: Enable link-metrics
type: bool
cost_threshold:
description: Minimum link cost threshold.
Please refer vendor documentation of Valid values.
type: int
mtu_ignore:
description: Ignores the MTU in DBD packets
type: bool
multi_area:
description:
- Set the OSPF multi-area ID
- Valid only with IP OSPF config
type: dict
suboptions:
id:
description:
- OSPF multi-area ID as a decimal value. Please refer vendor
documentation of Valid values.
- OSPF multi-area ID in IP address format(e.g. A.B.C.D)
type: int
cost:
description: Interface cost
type: int
neighbor:
description:
- OSPF neighbor link-local IPv6 address (X:X:X:X::X)
- Valid only with IPv6 OSPF config
type: dict
suboptions:
address:
description: Neighbor link-local IPv6 address
type: str
cost:
description: OSPF cost for point-to-multipoint neighbor
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding for point-to-multipoint neighbor
type: bool
poll_interval:
description: OSPF dead-router polling interval
type: int
priority:
description: OSPF priority of non-broadcast neighbor
type: int
network:
description: Network type
type: dict
suboptions:
broadcast:
description: Specify OSPF broadcast multi-access network
type: bool
manet:
description:
- Specify MANET OSPF interface type
- Valid only with IPv6 OSPF config
type: bool
non_broadcast:
description: Specify OSPF NBMA network
type: bool
point_to_multipoint:
description: Specify OSPF point-to-multipoint network
type: bool
point_to_point:
description: Specify OSPF point-to-point network
type: bool
prefix_suppression:
description: Enable/Disable OSPF prefix suppression
type: bool
priority:
description: Router priority. Please refer vendor documentation
of Valid values.
type: int
resync_timeout:
description: Interval after which adjacency is reset if oob-resync
is not started. Please refer vendor documentation of Valid values.
type: int
retransmit_interval:
description: Time between retransmitting lost link state
advertisements. Please refer vendor documentation of Valid values.
type: int
shutdown:
description: Set OSPF protocol's state to disable under
current interface
type: bool
transmit_delay:
description: Link state transmit delay.
Please refer vendor documentation of Valid values.
type: int
ttl_security:
description:
- TTL security check
- Valid only with IPV4 OSPF config
type: dict
suboptions:
set:
description: Enable TTL Security on all interfaces
type: bool
hops:
description:
- Maximum number of IP hops allowed
- Please refer vendor documentation of Valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS
device by executing the command B(sh running-config | section
^interface).
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into Ansible structured data as per the
resource module's argspec and the value is then returned in the
I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in
- The states I(rendered), I(gathered) and I(parsed) does not perform any
change on the device.
- The state I(rendered) will transform the configuration in C(config)
option to platform specific CLI commands which will be returned in the
I(rendered) key within the result. For state I(rendered) active
connection to remote host is not required.
- The state I(gathered) will fetch the running configuration from device
and transform it into structured data in the format as per the resource
module argspec and the value is returned in the I(gathered) key within
the result.
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into JSON format as per the resource module
parameters and the value is returned in the I(parsed) key within the
result. The value of C(running_config) option should be the same format
as the output of command I(show running-config | include ip route|ipv6
route) executed on device. For state I(parsed) active connection to
remote host is not required.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- gathered
- rendered
- parsed
default: merged
"""
EXAMPLES = """
# Using deleted
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete provided OSPF Interface config
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using deleted without any config passed (NOTE: This will delete all OSPF Interfaces configuration from device)
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete all OSPF config from interfaces
cisco.ios.ios_ospf_interfaces:
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "no ip ospf 10 area 20",
# "no ip ospf adjacency stagger disable",
# "no ip ospf cost 30",
# "no ip ospf priority 40",
# "no ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# Using merged
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# router-ios#
- name: Merge provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: merged
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using overridden
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Override provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
- name: GigabitEthernet0/2
address_family:
- afi: ipv4
process:
id: 10
area_id: 20
adjacency: true
cost:
interface_cost: 30
priority: 40
ttl_security:
hops: 50
state: overridden
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "ip ospf 10 area 20",
# "ip ospf adjacency stagger disable",
# "ip ospf cost 30",
# "ip ospf priority 40",
# "ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "ipv6 ospf 55 area 105",
# "no ipv6 ospf database-filter all out",
# "no ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30",
# "no ip ospf 10 area 30",
# "no ip ospf adjacency stagger disable",
# "no ip ospf bfd",
# "no ip ospf cost 5",
# "no ip ospf dead-interval 5",
# "no ip ospf demand-circuit ignore",
# "no ip ospf network broadcast",
# "no ip ospf priority 25",
# "no ip ospf resync-timeout 10",
# "no ip ospf shutdown",
# "no ip ospf ttl-security hops 50"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using replaced
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Replaced provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/2
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
state: replaced
# Commands Fired:
# ---------------
# "commands": [
# "interface GigabitEthernet0/2",
# "ipv6 ospf 55 area 105",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# Using Gathered
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Gather OSPF Interfaces provided configurations
cisco.ios.ios_ospf_interfaces:
config:
state: gathered
# Module Execution Result:
# ------------------------
#
# "gathered": [
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
# After state:
# ------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using Rendered
- name: Render the commands for provided configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: rendered
# Module Execution Result:
# ------------------------
#
# "rendered": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# Using Parsed
# File: parsed.cfg
# ----------------
#
# interface GigabitEthernet0/2
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/0
- name: Parse the provided configuration with the existing running configuration
cisco.ios.ios_ospf_interfaces:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Module Execution Result:
# ------------------------
#
# "parsed": [
# },
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
after:
description: The resulting configuration model invocation.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface GigabitEthernet0/1', 'ip ospf 10 area 30', 'ip ospf cost 5', 'ip ospf priority 25']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.argspec.ospf_interfaces.ospf_interfaces import (
Ospf_InterfacesArgs,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.config.ospf_interfaces.ospf_interfaces import (
Ospf_Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Ospf_InterfacesArgs.argument_spec,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
result = Ospf_Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
2297,
10983,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
198,
2,
357,
3826,
27975,
457... | 2.009593 | 17,305 |
from django.dispatch import Signal
user_logged_in = Signal(providing_args=['instance', 'request'])
| [
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
198,
7220,
62,
6404,
2004,
62,
259,
796,
26484,
7,
15234,
2530,
62,
22046,
28,
17816,
39098,
3256,
705,
25927,
6,
12962,
198
] | 3.193548 | 31 |
import demistomock as demisto # noqa
import ExpanseAggregateAttributionIP
INPUT = [
{"src": "1.1.1.1", "count": 2},
{"src_ip": "8.8.8.8"},
{"src": "8.8.8.8", "count": 10}
]
CURRENT = [
{"ip": "1.1.1.1", "sightings": 1, "internal": False}
]
RESULT = [
{"ip": "1.1.1.1", "sightings": 3, "internal": False},
{"ip": "8.8.8.8", "sightings": 11, "internal": True}
]
def test_aggregate_command():
"""
Given:
- previous list aggregated IPs
- new data source with IP/sightings information
- merged aggregated data with new information
- list of internal ip networks
When
- merging new sightings to existing aggregated data
Then
- data is merged
- expected output is returned
"""
result = ExpanseAggregateAttributionIP.aggregate_command({
'input': INPUT,
'current': CURRENT,
'internal_ip_networks': "192.168.0.0/16,10.0.0.0/8,8.0.0.0/8"
})
assert result.outputs_prefix == "Expanse.AttributionIP"
assert result.outputs_key_field == "ip"
assert result.outputs == RESULT
| [
11748,
1357,
396,
296,
735,
355,
1357,
396,
78,
220,
1303,
645,
20402,
198,
198,
11748,
5518,
40054,
46384,
49373,
8086,
3890,
4061,
628,
198,
1268,
30076,
796,
685,
198,
220,
220,
220,
19779,
10677,
1298,
366,
16,
13,
16,
13,
16,
1... | 2.319415 | 479 |
import tensorflow.keras as keras
import tensorflow as tf
| [
11748,
11192,
273,
11125,
13,
6122,
292,
355,
41927,
292,
198,
11748,
11192,
273,
11125,
355,
48700,
198
] | 3.166667 | 18 |
from .case_decorators import *
| [
6738,
764,
7442,
62,
12501,
273,
2024,
1330,
1635,
198
] | 3.1 | 10 |
#!/usr/bin/python
#
#
#
#
# Kim Brugger (21 Oct 2015), contact: kbr@brugger.dk
import sys
import os
import pprint
pp = pprint.PrettyPrinter(indent=4)
import re
FLANK = 500
NR_PRIMERS = 4
ALLOWED_MISMATCHES = 4
MAX_MAPPINGS = 5
MAX_PRODUCT_SIZE = 800
MIN_PRODUCT_SIZE = 120
smalt_file = '8:96259936.smalt'
if ( sys.argv >= 1 ):
smalt_file = sys.argv[1]
region = smalt_file.rstrip(".smalt")
(chromo, pos) = region.split(":")
(start_pos, end_pos) = map(int, pos.split("-"))
primer_data = check_primers( smalt_file )
#pp.pprint( primer_data )
pcr_products = digital_PCR( primer_data )
pcr_products = check_PCR_products( pcr_products, chromo, start_pos, end_pos )
fwd_primer, rev_primer = pick_best_primers(primer_data, chromo, start_pos, end_pos)
print " Picked Primer Pair ( %s, %s )" % ( fwd_primer, rev_primer)
print "SMALT FILE :: %s " % smalt_file
| [
2,
48443,
14629,
14,
8800,
14,
29412,
220,
198,
2,
220,
198,
2,
220,
198,
2,
220,
198,
2,
220,
198,
2,
6502,
8274,
26679,
357,
2481,
2556,
1853,
828,
2800,
25,
479,
1671,
31,
65,
622,
26679,
13,
34388,
198,
198,
11748,
25064,
19... | 2.231707 | 410 |
"""Sensor platform for Kaleidescape integration."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import PERCENTAGE
from homeassistant.helpers.entity import EntityCategory
from .const import DOMAIN as KALEIDESCAPE_DOMAIN
from .entity import KaleidescapeEntity
if TYPE_CHECKING:
from collections.abc import Callable
from kaleidescape import Device as KaleidescapeDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
@dataclass
class BaseEntityDescriptionMixin:
"""Mixin for required descriptor keys."""
value_fn: Callable[[KaleidescapeDevice], StateType]
@dataclass
class KaleidescapeSensorEntityDescription(
SensorEntityDescription, BaseEntityDescriptionMixin
):
"""Describes Kaleidescape sensor entity."""
SENSOR_TYPES: tuple[KaleidescapeSensorEntityDescription, ...] = (
KaleidescapeSensorEntityDescription(
key="media_location",
name="Media Location",
icon="mdi:monitor",
value_fn=lambda device: device.automation.movie_location,
),
KaleidescapeSensorEntityDescription(
key="play_status",
name="Play Status",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_status,
),
KaleidescapeSensorEntityDescription(
key="play_speed",
name="Play Speed",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_speed,
),
KaleidescapeSensorEntityDescription(
key="video_mode",
name="Video Mode",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_mode,
),
KaleidescapeSensorEntityDescription(
key="video_color_eotf",
name="Video Color EOTF",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_eotf,
),
KaleidescapeSensorEntityDescription(
key="video_color_space",
name="Video Color Space",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_space,
),
KaleidescapeSensorEntityDescription(
key="video_color_depth",
name="Video Color Depth",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_depth,
),
KaleidescapeSensorEntityDescription(
key="video_color_sampling",
name="Video Color Sampling",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_sampling,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_ratio",
name="Screen Mask Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_trim_rel",
name="Screen Mask Top Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_trim_rel",
name="Screen Mask Bottom Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_conservative_ratio",
name="Screen Mask Conservative Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_conservative_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_mask_abs",
name="Screen Mask Top Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_mask_abs",
name="Screen Mask Bottom Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mask",
name="Cinemascape Mask",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mask,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mode",
name="Cinemascape Mode",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mode,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the platform from a config entry."""
device: KaleidescapeDevice = hass.data[KALEIDESCAPE_DOMAIN][entry.entry_id]
async_add_entities(
KaleidescapeSensor(device, description) for description in SENSOR_TYPES
)
class KaleidescapeSensor(KaleidescapeEntity, SensorEntity):
"""Representation of a Kaleidescape sensor."""
entity_description: KaleidescapeSensorEntityDescription
def __init__(
self,
device: KaleidescapeDevice,
entity_description: KaleidescapeSensorEntityDescription,
) -> None:
"""Initialize sensor."""
super().__init__(device)
self.entity_description = entity_description
self._attr_unique_id = f"{self._attr_unique_id}-{entity_description.key}"
self._attr_name = f"{self._attr_name} {entity_description.name}"
@property
def native_value(self) -> StateType:
"""Return value of sensor."""
return self.entity_description.value_fn(self._device)
| [
37811,
47864,
3859,
329,
509,
1000,
1460,
36435,
11812,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
6738,
... | 2.559787 | 2,626 |
#!/usr/bin/env python3
# Copyright (C) 2017 - 2020 Alexandre Teyar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime, timedelta
from itertools import chain, product
import coloredlogs
import jwt
from tqdm import tqdm
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', milliseconds=True)
def parse_args():
"""This function parses the command line.
Returns:
[object] -- The parsed arguments
"""
parser = argparse.ArgumentParser(
description="A CPU-based JSON Web Token (JWT) cracker",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
subparsers = parser.add_subparsers(
dest='attack_mode',
title="Attack-mode",
required=True
)
brute_force_subparser = subparsers.add_parser(
"brute-force",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
brute_force_subparser.add_argument(
"-c", "--charset",
default="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
dest="charset",
help="User-defined charset",
type=str,
required=False,
)
brute_force_subparser.add_argument(
"--increment-min",
default=1,
dest="increment_min",
help="Start incrementing at X",
type=int,
required=False,
)
brute_force_subparser.add_argument(
"--increment-max",
default=8,
dest="increment_max",
help="Stop incrementing at X",
type=int,
required=False,
)
cve_subparser = subparsers.add_parser(
"vulnerable",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
wordlist__subparser = subparsers.add_parser(
"wordlist",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Set the UTF-8 encoding and ignore error mode to avoid issues with the wordlist
wordlist__subparser.add_argument(
"-w", "--wordlist",
default=argparse.SUPPRESS,
dest="wordlist",
help="Wordlist of private key candidates",
required=True,
type=argparse.FileType(
'r',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"-lL", "--log-level",
default=logging.INFO,
dest="log_level",
# TODO: Improve how to retrieve all log levels
choices=[
'DEBUG',
'INFO',
],
help="Set the logging level",
type=str,
required=False,
)
parser.add_argument(
"-o", "--outfile",
dest="outfile",
help="Define outfile for recovered private keys",
required=False,
type=argparse.FileType(
'w+',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"--potfile-disable",
action='store_true',
default=False,
dest="potfile_disable",
help="Do not write potfile",
required=False,
)
parser.add_argument(
"--potfile-path",
default='jwtpot.potfile',
dest="potfile",
help="Specific path to potfile",
required=False,
type=argparse.FileType(
'a+',
encoding='UTF-8',
errors='ignore'
)
)
# parser.add_argument(
# "-tF", "--jwt-file",
# default=argparse.SUPPRESS,
# dest="token_file",
# help="File with JSON Web Tokens to attack",
# required=False,
# type=argparse.FileType(
# 'r',
# encoding='UTF-8',
# errors='ignore'
# )
# )
parser.add_argument(
default=argparse.SUPPRESS,
dest="token",
help="JSON Web Token to attack",
type=str
)
return parser.parse_args()
def bruteforce(charset, minlength, maxlength):
"""This function generates all the different possible combination in a given character space.
Arguments:
charset {string} -- The charset used to generate all possible candidates
minlength {integer} -- The minimum length for candiates generation
maxlength {integer} -- The maximum length for candiates generation
Returns:
[type] -- All the possible candidates
"""
return (''.join(candidate)
for candidate in chain.from_iterable(product(charset, repeat=i)
for i in range(minlength, maxlength + 1)))
def run(token, candidate):
"""This function checks if a candidate can decrypt a JWT token.
Arguments:
token {string} -- An encrypted JWT token to test
candidate {string} -- A candidate word for decoding
Returns:
[boolean] -- Result of the decoding attempt
"""
try:
payload = jwt.decode(token, candidate, algorithm='HS256')
return True
except jwt.exceptions.DecodeError:
logger.debug(f"DecodingError: {candidate}")
return False
except jwt.exceptions.InvalidTokenError:
logger.debug(f"InvalidTokenError: {candidate}")
return False
except Exception as ex:
logger.exception(f"Exception: {ex}")
sys.exit(1)
def is_vulnerable(args):
"""This function checks a JWT token against a well-known vulnerabilities.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if headers['alg'] == "HS256":
logging.info("JWT vulnerable to HS256 guessing attacks")
elif headers['alg'] == "None":
logging.info("JWT vulnerable to CVE-2018-1000531")
def hs256_attack(args):
"""This function passes down different candidates to the run() function and is required
to handle different types of guessing attack.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if not headers['alg'] == "HS256":
logging.error("JWT signed using an algorithm other than HS256.")
else:
tqdm_disable = True if args.log_level == "DEBUG" else False
if args.attack_mode == "brute-force":
# Count = ....
for candidate in tqdm(bruteforce(args.charset, args.increment_min, args.increment_max), disable=tqdm_disable):
if run(args.token, candidate):
return candidate
return None
elif args.attack_mode == "wordlist":
word_count = len(open(args.wordlist.name, "r",
encoding="utf-8").readlines())
for entry in tqdm(args.wordlist, disable=tqdm_disable, total=word_count):
if run(args.token, entry.rstrip()):
return entry.rstrip()
return None
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
220,
220,
220,
15069,
357,
34,
8,
2177,
532,
12131,
21000,
260,
1665,
88,
283,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
153... | 2.354677 | 3,186 |
#===========================================
# import modules, defs and variables
#===========================================
exec(open("./external.py").read())
exec(open("./defs.py").read())
exec(open("./config.py").read())
print('Finish modules, defs and variables import')
#===========================================
# L1.0 import data
#===========================================
df_pixel_rep = pd.read_csv(L0outputDir)
pixel_rep = df_pixel_rep.values.astype(np.float64)
print('Finish pixel raw data import')
#===========================================
# L1.0 data processing and manipulate
#===========================================
nPCs = retrace_columns(df_pixel_rep.columns.values, 'PC')
pcs = pixel_rep[:, 2:nPCs + 2]
# make folders for multivariate analysis
OutputFolder = locate_OutputFolder2(L0outputDir)
OutputFolder = locate_OutputFolder3(OutputFolder, 'multivariate clustering')
os.mkdir(OutputFolder)
# initiate a df for labels
df_pixel_label = pd.DataFrame(data=df_pixel_rep[['line_index', 'spectrum_index']].values.astype(int), columns = ['line_index','spectrum_index'])
print('Finish raw data processing')
#===========================================
# L1.0 GMM ensemble clustering
#===========================================
n_component = generate_nComponentList(n_components, span)
for i in range(repeat): # may repeat several times
for j in range(n_component.shape[0]): # ensemble with different n_component value
StaTime = time.time()
gmm = GMM(n_components = n_component[j], max_iter = 500) # max_iter does matter, no random seed assigned
labels = gmm.fit_predict(pcs)
# save data
index = j+1+i*n_component.shape[0]
title = 'No.' + str(index) + '_' +str(n_component[j]) + '_' + str(i)
df_pixel_label[title] = labels
SpenTime = (time.time() - StaTime)
# progressbar
print('{}/{}, finish classifying {}, running time is: {} s'.format(index, repeat*span, title, round(SpenTime, 2)))
print('Finish L1.0 GMM ensemble clustering, next step: L1.1 data process, plot and export data')
#===========================================
# L1.1 data processing and manipulate
#===========================================
pixel_label = relabel(df_pixel_label)
# parse dimension
NumLine = np.max(df_pixel_label.iloc[:,0])+1
NumSpePerLine = np.max(df_pixel_label.iloc[:,1])+1
# parameter for plotting
aspect = AspectRatio*NumSpePerLine/NumLine
# organize img
img = pixel_label.T.reshape(pixel_label.shape[1], NumLine, NumSpePerLine)
print('Finish L1.1 data process')
#===========================================
# L1.1 ensemble results in mosaic plot, save images
#===========================================
# mosaic img show
# parameters:
w_fig = 20 # default setting
ncols = ncols_L1
nrows = math.ceil((img.shape[0]-2)/ncols)
h_fig = w_fig * nrows * (AspectRatio + 0.16) / ncols # 0.2 is the space for title parameters
columns = df_pixel_label.columns.values
fig = plt.figure(figsize=(w_fig, h_fig))
fig.subplots_adjust(hspace= 0, wspace=0.01, right=0.95)
for i in range(1, img.shape[0]+1):
ax = fig.add_subplot(nrows, ncols, i)
im = ax.imshow(img[i-1], cmap=cm.tab20, aspect = aspect, vmin=0,vmax=19, interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
# title
title = columns[i+1]
ax.set_title(title, pad=8, fontsize = 15)
# colorbar
cbar_ax = fig.add_axes([0.96,0.1,0.01,0.8])
cbar = fig.colorbar(im, cax=cbar_ax, ticks=[0.5,1.4,2.3,3.3,4.3,5.1,6.2,7,8.1,9,10,10.9,11.8,12.7,13.6,14.7,15.6,16.6,17.5,18.5])
cbar.ax.set_yticklabels([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]) #hard code
cbar.ax.tick_params(labelsize=10)
SaveDir = OutputFolder + '\\ensemble_clustering_plot.png'
plt.savefig(SaveDir, dpi=dpi)
plt.close()
print('Finish L1.1 GMM ensemble clustering result plotting, saving .csv file')
#===========================================
# save data
#===========================================
# organize a dataframe for relabel data
df_pixel_relabel = pd.DataFrame(pixel_label.astype(int), columns = df_pixel_label.columns.values[2:df_pixel_label.shape[1]])
df_pixel_relabel.insert(0, 'spectrum_index', df_pixel_label.iloc[:,1])
df_pixel_relabel.insert(0, 'line_index', df_pixel_label.iloc[:,0])
SaveDir = OutputFolder + '\\pixel_label.csv'
df_pixel_relabel.to_csv(SaveDir, index=False, sep=',')
print('L1 is done, please check output results at: \n{}'.format(OutputFolder))
| [
2,
10052,
2559,
18604,
198,
2,
1330,
13103,
11,
825,
82,
290,
9633,
198,
2,
10052,
2559,
18604,
198,
18558,
7,
9654,
7,
1911,
14,
22615,
13,
9078,
11074,
961,
28955,
198,
18558,
7,
9654,
7,
1911,
14,
4299,
82,
13,
9078,
11074,
961... | 2.676331 | 1,690 |
# coding: utf-8
""" Utility functions for Spectroscopy Made Hard """
__author__ = "Andy Casey <andy@astrowizici.st>"
# Standard library
import os
import logging
import platform
import string
import sys
import traceback
import tempfile
from six import string_types
from collections import Counter, OrderedDict
try:
from subprocess import getstatusoutput
except ImportError: # python 2
from commands import getstatusoutput
from hashlib import sha1 as sha
from random import choice
from socket import gethostname, gethostbyname
# Third party imports
import numpy as np
import astropy.table
from scipy import stats, integrate, optimize
common_molecule_name2Z = {
'Mg-H': 12,'H-Mg': 12,
'C-C': 6,
'C-N': 7, 'N-C': 7, #TODO
'C-H': 6, 'H-C': 6,
'O-H': 8, 'H-O': 8,
'Fe-H': 26,'H-Fe': 26,
'N-H': 7, 'H-N': 7,
'Si-H': 14,'H-Si': 14,
'Ti-O': 22,'O-Ti': 22,
'V-O': 23,'O-V': 23,
'Zr-O': 40,'O-Zr': 40
}
common_molecule_name2species = {
'Mg-H': 112,'H-Mg': 112,
'C-C': 606,
'C-N': 607,'N-C': 607,
'C-H': 106,'H-C': 106,
'O-H': 108,'H-O': 108,
'Fe-H': 126,'H-Fe': 126,
'N-H': 107,'H-N': 107,
'Si-H': 114,'H-Si': 114,
'Ti-O': 822,'O-Ti': 822,
'V-O': 823,'O-V': 823,
'Zr-O': 840,'O-Zr': 840
}
common_molecule_species2elems = {
112: ["Mg", "H"],
606: ["C", "C"],
607: ["C", "N"],
106: ["C", "H"],
108: ["O", "H"],
126: ["Fe", "H"],
107: ["N", "H"],
114: ["Si", "H"],
822: ["Ti", "O"],
823: ["V", "O"],
840: ["Zr", "O"]
}
__all__ = ["element_to_species", "element_to_atomic_number", "species_to_element", "get_common_letters", \
"elems_isotopes_ion_to_species", "species_to_elems_isotopes_ion", \
"find_common_start", "extend_limits", "get_version", \
"approximate_stellar_jacobian", "approximate_sun_hermes_jacobian",\
"hashed_id"]
logger = logging.getLogger(__name__)
def equilibrium_state(transitions, columns=("expot", "rew"), group_by="species",
ycolumn="abundance", yerr_column=None):
"""
Perform linear fits to the abundances provided in the transitions table
with respect to x-columns.
:param transitions:
A table of atomic transitions with measured equivalent widths and
abundances.
:param columns: [optional]
The names of the columns to make fits against.
:param group_by: [optional]
The name of the column in `transitions` to calculate states.
"""
lines = {}
transitions = transitions.group_by(group_by)
for i, start_index in enumerate(transitions.groups.indices[:-1]):
end_index = transitions.groups.indices[i + 1]
# Do excitation potential first.
group_lines = {}
for x_column in columns:
x = transitions[x_column][start_index:end_index]
y = transitions["abundance"][start_index:end_index]
if yerr_column is not None:
try:
yerr = transitions[yerr_column][start_index:end_index]
except KeyError:
logger.exception("Cannot find yerr column '{}':".format(
yerr_column))
yerr = np.ones(len(y))
else:
yerr = np.ones(len(y))
# Only use finite values.
finite = np.isfinite(x * y * yerr)
try: # fix for masked arrays
finite = finite.filled(False)
except:
pass
if not np.any(finite):
#group_lines[x_column] = (np.nan, np.nan, np.nan, np.nan, 0)
continue
m, b, medy, stdy, stdm, N = fit_line(x, y, None)
group_lines[x_column] = (m, b, medy, (stdy, stdm), N)
# x, y, yerr = np.array(x[finite]), np.array(y[finite]), np.array(yerr[finite])
#
# # Let's remove the covariance between m and b by making the mean of x = 0
# xbar = np.mean(x)
# x = x - xbar
# # y = mx+b = m(x-xbar) + (b+m*xbar), so m is unchanged but b is shifted.
#
## A = np.vstack((np.ones_like(x), x)).T
## C = np.diag(yerr**2)
## try:
## cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
## b, m = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
##
## except np.linalg.LinAlgError:
## #group_lines[x_column] \
## # = (np.nan, np.nan, np.median(y), np.std(y), len(x))
## None
##
## else:
## #group_lines[x_column] = (m, b, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
## group_lines[x_column] = (m, b+m*xbar, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
# m, b, r, p, m_stderr = stats.linregress(x, y)
# group_lines[x_column] = (m, b-m*xbar, np.median(y), (np.std(y), m_stderr), len(x))
identifier = transitions[group_by][start_index]
if group_lines:
lines[identifier] = group_lines
return lines
def spectral_model_conflicts(spectral_models, line_list):
"""
Identify abundance conflicts in a list of spectral models.
:param spectral_models:
A list of spectral models to check for conflicts.
:param line_list:
A table of energy transitions.
:returns:
A list containing tuples of spectral model indices where there is a
conflict about which spectral model to use for the determination of
stellar parameters and/or composition.
"""
line_list_hashes = line_list.compute_hashes()
transition_hashes = {}
for i, spectral_model in enumerate(spectral_models):
for transition in spectral_model.transitions:
transition_hash = line_list.hash(transition)
transition_hashes.setdefault(transition_hash, [])
transition_hashes[transition_hash].append(i)
# Which of the transition_hashes appear more than once?
conflicts = []
for transition_hash, indices in transition_hashes.iteritems():
if len(indices) < 2: continue
# OK, what element is this transition?
match = (line_list_hashes == transition_hash)
element = line_list["element"][match][0].split()[0]
# Of the spectral models that use this spectral hash, what are they
# measuring?
conflict_indices = []
for index in indices:
if element not in spectral_models[index].metadata["elements"]:
# This transition is not being measured in this spectral model.
continue
else:
# This spectral model is modeling this transition.
# Does it say this should be used for the determination of
# stellar parameters or composition?
if spectral_models[index].use_for_stellar_parameter_inference \
or spectral_models[index].use_for_stellar_composition_inference:
conflict_indices.append(index)
if len(conflict_indices) > 1:
conflicts.append(conflict_indices)
return conflicts
# List the periodic table here so that we can use it outside of a single
# function scope (e.g., 'element in utils.periodic_table')
periodic_table = """H He
Li Be B C N O F Ne
Na Mg Al Si P S Cl Ar
K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr
Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe
Cs Ba Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn
Fr Ra Lr Rf"""
lanthanoids = "La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb"
actinoids = "Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No"
periodic_table = periodic_table.replace(" Ba ", " Ba " + lanthanoids + " ") \
.replace(" Ra ", " Ra " + actinoids + " ").split()
del actinoids, lanthanoids
hashed_id = hashed_id()
def approximate_stellar_jacobian(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, -7.2560e-02*vt + 1.2853e-01, 1.6258e-02*logg - 8.2654e-02, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -4.3985e-01*vt + 8.0592e-02, -5.7948e-02*logg - 1.2402e-01, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, 3.8185e-03*vt - 1.6601e-02, -1.2006e-02*logg - 3.5816e-03, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, 3.5564e-02*vt - 1.1024e-01, -1.2114e-02*logg + 4.1779e-02, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, -6.4264e-04*vt + 2.4581e-02, 1.7168e-02*logg - 5.3255e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, 5.0811e-02*vt - 3.1919e-01, -6.7963e-02*logg + 7.3189e-02, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -3.8736e-03*vt + 7.6987e-03, -6.4754e-03*logg - 2.0095e-02, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, 6.5783e-03*vt - 3.6509e-02, -9.7692e-03*logg + 3.2322e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def approximate_stellar_jacobian_2(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian {}".format(stellar_parameters))
teff, logg, vt, feh = stellar_parameters[:4]
#if np.isnan(teff): teff = 5000.; logger.info("jacobian: teff=nan->5000")
#if np.isnan(logg): logg = 2.0; logger.info("jacobian: logg=nan->2.0")
#if np.isnan(vt): vt = 1.75; logger.info("jacobian: vt=nan->1.75")
#if np.isnan(feh): feh = -2.0; logger.info("jacobian: feh=nan->-2.0")
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, 1.6258e-02*logg - 8.2654e-02, -7.2560e-02*vt + 1.2853e-01, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -5.7948e-02*logg - 1.2402e-01, -4.3985e-01*vt + 8.0592e-02, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, -1.2006e-02*logg - 3.5816e-03, 3.8185e-03*vt - 1.6601e-02, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, -1.2114e-02*logg + 4.1779e-02, 3.5564e-02*vt - 1.1024e-01, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian_2(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, logg, vt, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, 1.7168e-02*logg - 5.3255e-02, -6.4264e-04*vt + 2.4581e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, -6.7963e-02*logg + 7.3189e-02, 5.0811e-02*vt - 3.1919e-01, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -6.4754e-03*logg - 2.0095e-02, -3.8736e-03*vt + 7.6987e-03, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, -9.7692e-03*logg + 3.2322e-02, 6.5783e-03*vt - 3.6509e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def element_to_species(element_repr):
""" Converts a string representation of an element and its ionization state
to a floating point """
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
if element_repr.count(" ") > 0:
element, ionization = element_repr.split()[:2]
else:
element, ionization = element_repr, "I"
if element not in periodic_table:
try:
return common_molecule_name2species[element]
except KeyError:
# Don't know what this element is
return float(element_repr)
ionization = max([0, ionization.upper().count("I") - 1]) /10.
transition = periodic_table.index(element) + 1 + ionization
return transition
def element_to_atomic_number(element_repr):
"""
Converts a string representation of an element and its ionization state
to a floating point.
:param element_repr:
A string representation of the element. Typical examples might be 'Fe',
'Ti I', 'si'.
"""
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
element = element_repr.title().strip().split()[0]
try:
index = periodic_table.index(element)
except IndexError:
raise ValueError("unrecognized element '{}'".format(element_repr))
except ValueError:
try:
return common_molecule_name2Z[element]
except KeyError:
raise ValueError("unrecognized element '{}'".format(element_repr))
return 1 + index
def species_to_element(species):
""" Converts a floating point representation of a species to a string
representation of the element and its ionization state """
if not isinstance(species, (float, int)):
raise TypeError("species must be represented by a floating point-type")
if round(species,1) != species:
# Then you have isotopes, but we will ignore that
species = int(species*10)/10.
if species + 1 >= len(periodic_table) or 1 > species:
# Don"t know what this element is. It"s probably a molecule.
try:
elems = common_molecule_species2elems[species]
return "-".join(elems)
except KeyError:
# No idea
return str(species)
atomic_number = int(species)
element = periodic_table[int(species) - 1]
ionization = int(round(10 * (species - int(species)) + 1))
# The special cases
if element in ("C", "H", "He"): return element
return "%s %s" % (element, "I" * ionization)
def extend_limits(values, fraction=0.10, tolerance=1e-2):
""" Extend the values of a list by a fractional amount """
values = np.array(values)
finite_indices = np.isfinite(values)
if np.sum(finite_indices) == 0:
raise ValueError("no finite values provided")
lower_limit, upper_limit = np.min(values[finite_indices]), np.max(values[finite_indices])
ptp_value = np.ptp([lower_limit, upper_limit])
new_limits = lower_limit - fraction * ptp_value, ptp_value * fraction + upper_limit
if np.abs(new_limits[0] - new_limits[1]) < tolerance:
if np.abs(new_limits[0]) < tolerance:
# Arbitrary limits, since we"ve just been passed zeros
offset = 1
else:
offset = np.abs(new_limits[0]) * fraction
new_limits = new_limits[0] - offset, offset + new_limits[0]
return np.array(new_limits)
def get_version():
""" Retrieves the version of Spectroscopy Made Hard based on the
git version """
if getstatusoutput("which git")[0] == 0:
git_commands = ("git rev-parse --abbrev-ref HEAD", "git log --pretty=format:'%h' -n 1")
return "0.1dev:" + ":".join([getstatusoutput(command)[1] for command in git_commands])
else:
return "Unknown"
def struct2array(x):
""" Convert numpy structured array of simple type to normal numpy array """
Ncol = len(x.dtype)
type = x.dtype[0].type
assert np.all([x.dtype[i].type == type for i in range(Ncol)])
return x.view(type).reshape((-1,Ncol))
def process_session_uncertainties_lines(session, rhomat, minerr=0.001):
"""
Using Sergey's estimator
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
cols = ["index","wavelength","species","expot","loggf",
"logeps","e_stat","eqw","e_eqw","fwhm",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_tot","weight"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for i, model in enumerate(session.spectral_models):
if not model.is_acceptable: continue
if model.is_upper_limit: continue
wavelength = model.wavelength
species = np.ravel(model.species)[0]
expot = model.expot
loggf = model.loggf
if np.isnan(expot) or np.isnan(loggf):
print(i, species, model.expot, model.loggf)
try:
logeps = model.abundances[0]
staterr = model.metadata["1_sigma_abundance_error"]
if isinstance(model, SpectralSynthesisModel):
(named_p_opt, cov, meta) = model.metadata["fitted_result"]
if np.isfinite(cov[0,0]**0.5):
staterr = max(staterr, cov[0,0]**0.5)
assert ~np.isnan(staterr)
# apply minimum
staterr = np.sqrt(staterr**2 + minerr**2)
sperrdict = model.metadata["systematic_stellar_parameter_abundance_error"]
e_Teff = sperrdict["effective_temperature"]
e_logg = sperrdict["surface_gravity"]
e_vt = sperrdict["microturbulence"]
e_MH = sperrdict["metallicity"]
e_all = np.array([e_Teff, e_logg, e_vt, e_MH])
syserr_sq = e_all.T.dot(rhomat.dot(e_all))
syserr = np.sqrt(syserr_sq)
fwhm = model.fwhm
except Exception as e:
print("ERROR!!!")
print(i, species, model.wavelength)
print("Exception:",e)
logeps, staterr, e_Teff, e_logg, e_vt, e_MH, syserr = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
if isinstance(model, ProfileFittingModel):
eqw = model.equivalent_width or np.nan
e_eqw = model.equivalent_width_uncertainty or np.nan
else:
eqw = -999
e_eqw = -999
#toterr = np.sqrt(staterr**2 + syserr**2)
input_data = [i, wavelength, species, expot, loggf,
logeps, staterr, eqw, e_eqw, fwhm,
e_Teff, e_logg, e_vt, e_MH, syserr,
np.nan, np.nan]
for col, x in zip(cols, input_data):
data[col].append(x)
tab = astropy.table.Table(data)
# Calculate systematic error and effective weights for each species
tab["e_sys"] = np.nan
for species in np.unique(tab["species"]):
ix = np.where(tab["species"]==species)[0]
t = tab[ix]
# Estimate systematic error s
s = s_old = 0.
s_max = 2.
delta = struct2array(t["e_Teff","e_logg","e_vt","e_MH"].as_array())
ex = t["e_stat"]
for i in range(35):
sigma_tilde = np.diag(s**2 + ex**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
xhat = np.sum(w*t["logeps"])/np.sum(w)
dx = t["logeps"] - xhat
if func(0) < func(s_max):
s = 0
break
s = optimize.brentq(func, 0, s_max, xtol=.001)
if np.abs(s_old - s) < 0.01:
break
s_old = s
else:
print(species,"s did not converge!")
print("Final in {} iter: {:.1f} {:.3f}".format(i+1, species, s))
tab["e_sys"][ix] = s
tab["e_tot"][ix] = np.sqrt(s**2 + ex**2)
sigma_tilde = np.diag(tab["e_tot"][ix]**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
wb = np.sum(sigma_tilde_inv, axis=0)
assert np.allclose(w,wb,rtol=1e-6), "Problem in species {:.1f}, Nline={}, e_sys={:.2f}".format(species, len(t), s)
tab["weight"][ix] = w
for col in tab.colnames:
if col in ["index", "wavelength", "species", "loggf", "star"]: continue
tab[col].format = ".3f"
return tab
def process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY):
"""
Computes the following
Var([X/Fe]) = Var(X) + Var(Fe) - 2 Cov(X, Fe)
Does *not* compute covariances, but you can do that this way:
Cov([X/Fe], [Fe/H]) = Cov(X,Fe) - Cov(Fe, Fe)
"""
# [X/Fe] errors are the Fe1 and Fe2 parts of the covariance matrix
try:
ix1 = np.where(summary_tab["species"]==26.0)[0][0]
except IndexError:
print("No feh1: setting to nan")
feh1 = np.nan
exfe1 = np.nan
else:
feh1 = summary_tab["[X/H]"][ix1]
var_fe1 = var_X[ix1]
# Var(X/Fe1) = Var(X) + Var(Fe1) - 2*Cov(X,Fe1)
exfe1 = np.sqrt(var_X + var_fe1 - 2*cov_XY[ix1,:])
try:
ix2 = np.where(summary_tab["species"]==26.1)[0][0]
except IndexError:
print("No feh2: setting to feh1")
feh2 = feh1
try:
exfe2 = np.sqrt(var_X[ix1])
except UnboundLocalError: # no ix1 either
exfe2 = np.nan
else:
feh2 = summary_tab["[X/H]"][ix2]
var_fe2 = var_X[ix2]
# Var(X/Fe2) = Var(X) + Var(Fe2) - 2*Cov(X,Fe2)
exfe2 = np.sqrt(var_X + var_fe2 - 2*cov_XY[ix2,:])
return feh1, exfe1, feh2, exfe2
def process_session_uncertainties_abundancesummary(tab, rhomat):
"""
Take a table of lines and turn them into standard abundance table
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
unique_species = np.unique(tab["species"])
cols = ["species","elem","N",
"logeps","sigma","stderr",
"logeps_w","sigma_w","stderr_w",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_Teff_w","e_logg_w","e_vt_w","e_MH_w","e_sys_w",
"[X/H]","e_XH","s_X"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for species in unique_species:
ttab = tab[tab["species"]==species]
elem = species_to_element(species)
N = len(ttab)
logeps = np.mean(ttab["logeps"])
stdev = np.std(ttab["logeps"])
stderr = stdev/np.sqrt(N)
w = ttab["weight"]
finite = np.isfinite(w)
if finite.sum() != N:
print("WARNING: species {:.1f} N={} != finite weights {}".format(species, N, finite.sum()))
x = ttab["logeps"]
logeps_w = np.sum(w*x)/np.sum(w)
stdev_w = np.sqrt(np.sum(w*(x-logeps_w)**2)/np.sum(w))
stderr_w = np.sqrt(1/np.sum(w))
sperrs = []
sperrs_w = []
for spcol in ["Teff","logg","vt","MH"]:
x_new = x + ttab["e_"+spcol]
e_sp = np.mean(x_new) - logeps
sperrs.append(e_sp)
#e_sp_w = np.sum(w*x_new)/np.sum(w) - logeps_w
e_sp_w = np.sum(w*ttab["e_"+spcol])/np.sum(w)
sperrs_w.append(e_sp_w)
sperrs = np.array(sperrs)
sperrs_w = np.array(sperrs_w)
sperrtot = np.sqrt(sperrs.T.dot(rhomat.dot(sperrs)))
sperrtot_w = np.sqrt(sperrs_w.T.dot(rhomat.dot(sperrs_w)))
XH = logeps_w - solar_composition(species)
#e_XH = np.sqrt(stderr_w**2 + sperrtot_w**2)
e_XH = stderr_w
s_X = ttab["e_sys"][0]
assert np.allclose(ttab["e_sys"], s_X), s_X
input_data = [species, elem, N,
logeps, stdev, stderr,
logeps_w, stdev_w, stderr_w,
sperrs[0], sperrs[1], sperrs[2], sperrs[3], sperrtot,
sperrs_w[0], sperrs_w[1], sperrs_w[2], sperrs_w[3], sperrtot_w,
XH, e_XH, s_X
]
assert len(cols) == len(input_data)
for col, x in zip(cols, input_data):
data[col].append(x)
summary_tab = astropy.table.Table(data)
## Add in [X/Fe]
var_X, cov_XY = process_session_uncertainties_covariance(summary_tab, rhomat)
feh1, efe1, feh2, efe2 = process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY)
if len(summary_tab["[X/H]"]) > 0:
summary_tab["[X/Fe1]"] = summary_tab["[X/H]"] - feh1
summary_tab["e_XFe1"] = efe1
summary_tab["[X/Fe2]"] = summary_tab["[X/H]"] - feh2
summary_tab["e_XFe2"] = efe2
ixion = np.array([x - int(x) > .01 for x in summary_tab["species"]])
summary_tab["[X/Fe]"] = summary_tab["[X/Fe1]"]
summary_tab["e_XFe"] = summary_tab["e_XFe1"]
summary_tab["[X/Fe]"][ixion] = summary_tab["[X/Fe2]"][ixion]
summary_tab["e_XFe"][ixion] = summary_tab["e_XFe2"][ixion]
for col in summary_tab.colnames:
if col=="N" or col=="species" or col=="elem": continue
summary_tab[col].format = ".3f"
else:
for col in ["[X/Fe]","[X/Fe1]","[X/Fe2]",
"e_XFe","e_XFe1","e_XFe2"]:
summary_tab.add_column(astropy.table.Column(np.zeros(0),col))
#summary_tab[col] = np.nan #.add_column(col)
return summary_tab
def process_session_uncertainties(session,
rho_Tg=0.0, rho_Tv=0.0, rho_TM=0.0, rho_gv=0.0, rho_gM=0.0, rho_vM=0.0):
"""
After you have run session.compute_all_abundance_uncertainties(),
this pulls out a big array of line data
and computes the final abundance table and errors
By default assumes no correlations in stellar parameters. If you specify rho_XY
it will include that correlated error.
(X,Y) in [T, g, v, M]
"""
## Correlation matrix. This is multiplied by the errors to get the covariance matrix.
# rho order = [T, g, v, M]
rhomat = _make_rhomat(rho_Tg, rho_Tv, rho_TM, rho_gv, rho_gM, rho_vM)
## Make line measurement table (no upper limits yet)
tab = process_session_uncertainties_lines(session, rhomat)
## Summarize measurements
summary_tab = process_session_uncertainties_abundancesummary(tab, rhomat)
## Add upper limits
tab, summary_tab = process_session_uncertainties_limits(session, tab, summary_tab, rhomat)
return tab, summary_tab
def get_synth_eqw(model, window=1.0, wavelength=None,
get_spec=False):
"""
Calculate the equivalent width associated with the synthetic line.
This is done by synthesizing the line in absence of any other elements,
then integrating the synthetic spectrum in a window around the central wavelength.
The user can specify the size of the window (default +/-1A)
and the central wavelength (default None -> model.wavelength)
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
assert isinstance(model, SpectralSynthesisModel)
assert len(model.elements)==1, model.elements
abundances = model.metadata["rt_abundances"].copy()
for key in abundances:
if key != model.elements[0]: abundances[key] = -9.0
abundances[model.elements[0]] = model.metadata["fitted_result"][0].values()[0]
print(abundances)
synth_dispersion, intensities, meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
if wavelength is None: wavelength = model.wavelength
ii = (synth_dispersion > wavelength - window) & (synth_dispersion < wavelength + window)
# integrate with the trapezoid rule, get milliangstroms
eqw = 1000.*integrate.trapz(1.0-intensities[ii], synth_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
eqw_all = 1000.*integrate.trapz(1.0-intensities, synth_dispersion)
for key in abundances:
abundances[key] = -9.0
blank_dispersion, blank_flux, blank_meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
blank_eqw = 1000.*integrate.trapz(1.0-blank_flux[ii], blank_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
blank_eqw_all = 1000.*integrate.trapz(1.0-blank_flux, blank_dispersion)
if get_spec:
return eqw, eqw_all, blank_eqw, blank_eqw_all, synth_dispersion, intensities
return eqw, eqw_all, blank_eqw, blank_eqw_all
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
34030,
5499,
329,
13058,
45943,
11081,
14446,
6912,
37227,
198,
198,
834,
9800,
834,
796,
366,
35314,
21097,
1279,
10757,
31,
459,
808,
528,
44070,
13,
301,
24618,
198,
198,
2,
8997,
... | 2.041178 | 15,008 |
# -*- coding: utf-8 -*-
"""
Created on Sat May 18 16:04:58 2019
@author: Admin
"""
# -*- coding: utf-8 -*-
"""
Created on Mon July 8 17:30:45 2019
@author: Admin
"""
import pandas as pd
import numpy as np
# reading data
order_products_prior_df = pd.read_csv('order_products_prior.csv', dtype={
'order_id': np.int32,
'product_id': np.int32,
'add_to_cart_order': np.int16,
'reordered': np.int8})
print('Loaded prior orders')
print('shape of Ordersproduct priors',order_products_prior_df.shape)
order_products_prior_df=order_products_prior_df.loc[order_products_prior_df['order_id']<=2110720]
print('Loading orders')
orders_df = pd.read_csv( 'orders.csv', dtype={
'order_id': np.int32,
'user_id': np.int32,
'eval_set': 'category',
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32})
orders_df=orders_df.loc[orders_df['order_id']<=2110720]
print(orders_df.shape)
print('Loading aisles info')
aisles = pd.read_csv('products.csv', engine='c',
usecols = ['product_id','aisle_id'],
dtype={'product_id': np.int32, 'aisle_id': np.int32})
pd.set_option('display.float_format', lambda x: '%.3f' % x)
print("\n Checking the loaded CSVs")
print("Prior orders:", order_products_prior_df.shape)
print("Orders", orders_df.shape)
print("Aisles:", aisles.shape)
test = orders_df[orders_df['eval_set'] == 'test' ]
user_ids = test['user_id'].values
orders_df = orders_df[orders_df['user_id'].isin(user_ids)]
print('test shape', test.shape)
print(orders_df.shape)
prior = pd.DataFrame(order_products_prior_df.groupby('product_id')['reordered'] \
.agg([('number_of_orders',len),('sum_of_reorders','sum')]))
print(prior.head())
prior['prior_p'] = (prior['sum_of_reorders']+1)/(prior['number_of_orders']+2) # Informed Prior
print(prior.head())
print('Here is The Prior: our first guess of how probable it is that a product be reordered once it has been ordered.')
#print(prior.head())
# merge everything into one dataframe and save any memory space
combined_features = pd.DataFrame()
combined_features = pd.merge(order_products_prior_df, orders_df, on='order_id', how='right')
# slim down comb -
combined_features.drop(['eval_set','order_dow','order_hour_of_day'], axis=1, inplace=True)
del order_products_prior_df
del orders_df
combined_features = pd.merge(combined_features, aisles, on ='product_id', how = 'left')
del aisles
prior.reset_index(inplace = True)
combined_features = pd.merge(combined_features, prior, on ='product_id', how = 'left')
del prior
#print(combined_features.head())
recount = pd.DataFrame()
recount['reorder_c'] = combined_features.groupby(combined_features.order_id)['reordered'].sum().fillna(0)
#print(recount.head(20))
print('classification')
bins = [-0.1, 0, 2,4,6,8,11,14,19,71]
cat = ['None','<=2','<=4','<=6','<=8','<=11','<=14','<=19','>19']
recount['reorder_b'] = pd.cut(recount['reorder_c'], bins, labels = cat)
recount.reset_index(inplace = True)
#print(recount.head(20))
#We discretize reorder count into categories, 9 buckets, being sure to include 0 as bucket. These bins maximize mutual information with ['reordered'].
combined_features = pd.merge(combined_features, recount, how = 'left', on = 'order_id')
del recount
#print(combined_features.head(50))
bins = [0,2,3,5,7,9,12,17,80]
cat = ['<=2','<=3','<=5','<=7','<=9','<=12','<=17','>17']
combined_features['atco1'] = pd.cut(combined_features['add_to_cart_order'], bins, labels = cat)
del combined_features['add_to_cart_order']
#print(combined_features.head(50))
combined_features.to_csv('combined_features.csv', index=False)
atco_fac = pd.DataFrame()
atco_fac = combined_features.groupby(['reordered', 'atco1'])['atco1'].agg(np.count_nonzero).unstack('atco1')
#print(atco_fac.head(10))
tot = np.sum(atco_fac,axis=1)
print(tot.head(10))
atco_fac = atco_fac.iloc[:,:].div(tot, axis=0)
#print(atco_fac.head(10))
atco_fac = atco_fac.stack('atco1')
#print(atco_fac.head(20))
atco_fac = pd.DataFrame(atco_fac)
atco_fac.reset_index(inplace = True)
atco_fac.rename(columns = {0:'atco_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, atco_fac, how='left', on=('reordered', 'atco1'))
combined_features.head(50)
aisle_fac = pd.DataFrame()
aisle_fac = combined_features.groupby(['reordered', 'atco1', 'aisle_id'])['aisle_id']\
.agg(np.count_nonzero).unstack('aisle_id')
print(aisle_fac.head(30))
#print(aisle_fac.head(30))
tot = np.sum(aisle_fac,axis=1)
print(tot.head(20))
aisle_fac = aisle_fac.iloc[:,:].div(tot, axis=0)
print(aisle_fac.head(20))
print('Stacking Aisle Fac')
aisle_fac = aisle_fac.stack('aisle_id')
print(aisle_fac.head(20))
aisle_fac = pd.DataFrame(aisle_fac)
aisle_fac.reset_index(inplace = True)
aisle_fac.rename(columns = {0:'aisle_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, aisle_fac, how = 'left', on = ('aisle_id','reordered','atco1'))
recount_fac = pd.DataFrame()
recount_fac = combined_features.groupby(['reordered', 'atco1', 'reorder_b'])['reorder_b']\
.agg(np.count_nonzero).unstack('reorder_b')
print(recount_fac.head(20))
tot = pd.DataFrame()
tot = np.sum(recount_fac,axis=1)
print(tot.head(20))
recount_fac = recount_fac.iloc[:,:].div(tot, axis=0)
print(recount_fac.head(20))
#print('after stacking***************************')
recount_fac.stack('reorder_b')
print(recount_fac.head(20))
recount_fac = pd.DataFrame(recount_fac.unstack('reordered').unstack('atco1')).reset_index()
#print(recount_fac.head(20))
recount_fac.rename(columns = {0:'recount_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, recount_fac, how = 'left', on = ('reorder_b', 'reordered', 'atco1'))
print(recount_fac.head(50))
print(combined_features.head(20))
p = pd.DataFrame()
p = (combined_features.loc[:,'atco_fac_p'] * combined_features.loc[:,'aisle_fac_p'] * combined_features.loc[:,'recount_fac_p'])
p.reset_index()
combined_features['p'] = p
print(combined_features.head(30))
comb0 = pd.DataFrame()
print(combined_features.shape)
comb0 = combined_features[combined_features['reordered']==0]
print(comb0.shape)
comb0.loc[:,'first_order'] = comb0['order_number']
# now every product that was ordered has a posterior in usr.
comb0.loc[:,'beta'] = 1
comb0.loc[:,'bf'] = (comb0.loc[:,'prior_p'] * comb0.loc[:,'p']/(1 - comb0.loc[:,'p'])) # bf1
# Small 'slight of hand' here. comb0.bf is really the first posterior and second prior.
#comb0.to_csv('comb0.csv', index=False)
# Calculate beta and BF1 for the reordered products
comb1 = pd.DataFrame()
comb1 = combined_features[combined_features['reordered']==1]
comb1.loc[:,'beta'] = (1 - .05*comb1.loc[:,'days_since_prior_order']/30)
comb1.loc[:,'bf'] = (1 - comb1.loc[:,'p'])/comb1.loc[:,'p'] # bf0
comb_last = pd.DataFrame()
comb_last = pd.concat([comb0, comb1], axis=0).reset_index(drop=True)
comb_last = comb_last[['reordered', 'user_id', 'order_id', 'product_id','reorder_c','order_number',
'bf','beta','atco_fac_p', 'aisle_fac_p', 'recount_fac_p']]
comb_last = comb_last.sort_values((['user_id', 'order_number', 'bf']))
pd.set_option('display.float_format', lambda x: '%.6f' % x)
comb_last.head()
first_order = pd.DataFrame()
first_order = comb_last[comb_last.reordered == 0]
first_order.rename(columns = {'order_number':'first_o'}, inplace = True)
first_order.to_csv('first_order_before_transform.csv', index=False)
first_order.loc[:,'last_o'] = comb_last.groupby(['user_id'])['order_number'].transform(max)
first_order.to_csv('first_order_transform.csv', index=False)
first_order = first_order[['user_id','product_id','first_o','last_o']]
comb_last = pd.merge(comb_last, first_order, on = ('user_id', 'product_id'), how = 'left')
comb_last.head()
comb_last.to_csv('comb_last.csv')
comb_last = pd.read_csv('comb_last.csv', index_col=0)
#comb_last.to_csv('comb_last.csv', index=False)
temp = pd.pivot_table(comb_last[(comb_last.user_id == 786
) & (comb_last.first_o == comb_last.order_number)],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
#print (temp.head(10))
temp = temp.fillna(method='pad', axis=1).fillna(1)
temp.head(10)
temp.to_csv('temp.csv')
#print(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
# values = 'bf', index = ['user_id', 'product_id'],
# columns = 'order_number').head(10))
temp.update(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
print(temp.head(10))
#temp.to_csv('temp.csv')
import logging
logging.basicConfig(filename='bayes.log',level=logging.DEBUG)
logging.debug("Started Posterior calculations")
print("Started Posterior calculations")
pred = pd.DataFrame(columns=['user_id', 'product_id'])
pred['user_id'] = pred.user_id.astype(np.int32)
pred['product_id'] = pred.product_id.astype(np.int32)
for uid in comb_last.user_id.unique():
if uid % 1000 == 0:
print("Posterior calculated until user %d" % uid)
logging.debug("Posterior calculated until user %d" % uid)
# del comb_last_temp
comb_last_temp = pd.DataFrame()
comb_last_temp = comb_last[comb_last['user_id'] == uid].reset_index()
# del com
com = pd.DataFrame()
com = pd.pivot_table(comb_last_temp[comb_last_temp.first_o == comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
com = com.fillna(method='pad', axis=1).fillna(1)
com.update(pd.pivot_table(comb_last_temp[comb_last_temp.first_o <= comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
com.reset_index(inplace=True)
com['posterior'] = com.product(axis=1)
pred = pred.append(com.sort_values(by=['posterior'], ascending=False).head(10) \
.groupby('user_id')['product_id'].apply(list).reset_index())
print("Posterior calculated for all users")
logging.debug("Posterior calculated for all users")
pred = pred.rename(columns={'product_id': 'products'})
print(pred.head())
pred.to_csv('Finalpredictions.csv', index=False)
pred = pred.merge(test, on='user_id', how='left')[['order_id', 'products']]
pred['products'] = pred['products'].apply(lambda x: [int(i) for i in x]) \
.astype(str).apply(lambda x: x.strip('[]').replace(',', ''))
print(pred.head())
pred.to_csv('Testpredictions.csv', index=False) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
7031,
1737,
1248,
1467,
25,
3023,
25,
3365,
13130,
201,
198,
201,
198,
31,
9800,
25,
32053,
201,
198,
37811,
201,
198,
201,
198,
2,
... | 2.196841 | 5,192 |
import copy
testfile = "day8_test_input.txt"
testdata = load_input_file(testfile)
todaylist = load_input_file("day8input.txt")
part1 = run_commands(todaylist)[0]
print("part1:", part1)
part2 = alter_commands(todaylist)
print("part2:", part2)
| [
11748,
4866,
198,
198,
9288,
7753,
796,
366,
820,
23,
62,
9288,
62,
15414,
13,
14116,
1,
628,
628,
198,
9288,
7890,
796,
3440,
62,
15414,
62,
7753,
7,
9288,
7753,
8,
628,
628,
628,
198,
40838,
4868,
796,
3440,
62,
15414,
62,
7753,... | 2.581633 | 98 |
Frutas_favoritas = ["Mangos", "Manzanas", "Bananas"]
if("Mangos" in Frutas_favoritas):
print("La neta si me gustan mucho los Manguitos")
if("Cocos" in Frutas_favoritas):
print("En verdad me agradan los cocos")
if("Manzanas" in Frutas_favoritas):
print("Me gustan mucho las manzanas")
if("Kiwis" in Frutas_favoritas):
print("Comer kiwis esta chido")
if("Bananas" in Frutas_favoritas):
print("Las bananas saben muy ricas") | [
6732,
315,
292,
62,
69,
5570,
21416,
796,
14631,
44,
648,
418,
1600,
366,
5124,
15201,
292,
1600,
366,
30457,
15991,
8973,
198,
198,
361,
7203,
44,
648,
418,
1,
287,
1305,
315,
292,
62,
69,
5570,
21416,
2599,
198,
220,
220,
220,
3... | 2.329843 | 191 |
import sqlite3
import requests
import json
import time
"""
Input: doc from zhilian_doc.db
Aim:
get the entities/knowledges in the doc.
store them into entites.json/knowledges.json
entities.json:
{
'name+position':List(entities),
}
konwledges.json:
{
'entity':[
['relation', 'entity'],
...
],
}
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
conn = sqlite3.connect('zhilian_doc.db')
cur = conn.cursor()
data = cur.execute('select * from zhilian_doc')
seen_entity = set()
name, pos, doc = next(data)
entities = get_entity(doc)
while True:
name, pos, doc = next(data)
time.sleep(3)
entities = get_entity(doc)
entities = list(flatten(entities))
# knows = get_triple_tuple(entities)
print(entities)
# en_store_to_json(name, pos, entities)
# konw_store_to_json(name, pos, knows)
| [
11748,
44161,
578,
18,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
640,
198,
37811,
198,
20560,
25,
2205,
422,
1976,
71,
35824,
62,
15390,
13,
9945,
198,
49945,
25,
198,
220,
220,
220,
651,
262,
12066,
14,
16275,
992,
3212,
287,
... | 2.361179 | 407 |
# Generated by Django 2.2.16 on 2021-04-16 19:46
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1433,
319,
33448,
12,
3023,
12,
1433,
678,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628,
198
] | 2.741935 | 31 |
"""
Tests for the Cudnn code.
"""
__author__ = "Francesco Visin"
__license__ = "3-clause BSD"
__credits__ = "Francesco Visin"
__maintainer__ = "Lisa Lab"
import theano
from theano import tensor
from theano.sandbox.cuda.dnn import dnn_available
from pylearn2.linear.conv2d import Conv2D
from pylearn2.linear.cudnn2d import Cudnn2D, make_random_conv2D
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
from pylearn2.testing.skip import skip_if_no_gpu
import unittest
from nose.plugins.skip import SkipTest
import numpy as np
class TestCudnn(unittest.TestCase):
"""
Tests for the Cudnn code.
Parameters
----------
Refer to unittest.TestCase.
"""
def setUp(self):
"""
Set up a test image and filter to re-use.
"""
skip_if_no_gpu()
if not dnn_available():
raise SkipTest('Skipping tests cause cudnn is not available')
self.orig_floatX = theano.config.floatX
theano.config.floatX = 'float32'
self.image = np.random.rand(1, 1, 3, 3).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.input_space = Conv2DSpace((3, 3), 1, axes=('b', 'c', 0, 1))
self.filters_values = np.ones(
(1, 1, 2, 2), dtype=theano.config.floatX
)
self.filters = sharedX(self.filters_values, name='filters')
self.batch_size = 1
self.cudnn2d = Cudnn2D(self.filters, self.batch_size, self.input_space)
def tearDown(self):
"""
After test clean up.
"""
theano.config.floatX = self.orig_floatX
def test_value_errors(self):
"""
Check correct errors are raised when bad input is given.
"""
with self.assertRaises(AssertionError):
Cudnn2D(filters=self.filters, batch_size=-1,
input_space=self.input_space)
def test_get_params(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertEqual(self.cudnn2d.get_params(), [self.filters])
def test_get_weights_topo(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertTrue(np.all(
self.cudnn2d.get_weights_topo(borrow=True) ==
np.transpose(self.filters.get_value(borrow=True), (0, 2, 3, 1))))
def test_lmul(self):
"""
Use conv2D to check whether the convolution worked correctly.
"""
conv2d = Conv2D(self.filters, self.batch_size, self.input_space,
output_axes=('b', 'c', 0, 1),)
f_co = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f_cu = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
self.assertTrue(np.allclose(f_co(self.image), f_cu(self.image)))
def test_set_batch_size(self):
"""
Make sure that setting the batch size actually changes the property.
"""
img_shape = self.cudnn2d._img_shape
self.cudnn2d.set_batch_size(self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[0],
self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[1:], img_shape[1:])
def test_axes(self):
"""
Test different output axes.
Use different output axes and see whether the output is what we
expect.
"""
default_axes = ('b', 'c', 0, 1)
axes = (0, 'b', 1, 'c')
another_axes = (0, 1, 'c', 'b')
# 1, 3, 0, 2
map_to_default = tuple(axes.index(axis) for axis in default_axes)
# 2, 0, 3, 1
map_to_another_axes = tuple(default_axes.index(axis) for
axis in another_axes)
input_space = Conv2DSpace((3, 3), num_channels=1, axes=another_axes)
# Apply cudnn2d with `axes` as output_axes
cudnn2d = Cudnn2D(self.filters, 1, input_space, output_axes=axes)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
# Apply cudnn2d with default axes
f_def = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
# Apply f on the `another_axes`-shaped image
output = f(np.transpose(self.image, map_to_another_axes))
# Apply f_def on self.image (b,c,0,1)
output_def = np.array(f_def(self.image))
# transpose output to def
output = np.transpose(output, map_to_default)
np.testing.assert_allclose(output_def, output)
np.testing.assert_equal(output_def.shape, output.shape)
def test_channels(self):
"""
Go from 2 to 3 channels and see whether the shape is correct.
"""
input_space = Conv2DSpace((3, 3), num_channels=3)
filters_values = np.ones(
(2, 3, 2, 2), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = np.random.rand(1, 3, 3, 3).astype(theano.config.floatX)
cudnn2d = Cudnn2D(filters, 1, input_space)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(image).shape == (1, 2, 2, 2)
def test_make_random_conv2D(self):
"""
Test a random convolution.
Create a random convolution and check whether the shape, axes and
input space are all what we expect.
"""
output_space = Conv2DSpace((2, 2), 1)
cudnn2d = make_random_conv2D(1, self.input_space, output_space,
(2, 2), 1)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(self.image).shape == (1, 2, 2, 1)
assert cudnn2d._input_space == self.input_space
assert cudnn2d._output_axes == output_space.axes
| [
37811,
198,
51,
3558,
329,
262,
327,
463,
20471,
2438,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
6732,
1817,
1073,
6911,
259,
1,
198,
834,
43085,
834,
796,
366,
18,
12,
565,
682,
347,
10305,
1,
198,
834,
66,
20696,
834,
... | 2.047764 | 2,952 |
import http.client
import logging
import math
import os
from dataclasses import dataclass
from enum import Enum
from hashlib import md5
from urllib.parse import urlparse
MAX_PAGE_SIZE = 1000
MIN_PART_SIZE = 5 * 1024 * 1024
UPLOAD_BASE_URL = 'upload.jwplayer.com'
MAX_FILE_SIZE = 25 * 1000 * 1024 * 1024
class UploadType(Enum):
"""
This class stores the enum values for the different type of uploads.
"""
direct = "direct"
multipart = "multipart"
@dataclass
class UploadContext:
"""
This class stores the structure for an upload context so that it can be resumed later.
"""
"""
This method evaluates whether an upload can be resumed based on the upload context state
"""
class MultipartUpload:
"""
This class manages the multi-part upload.
"""
@property
@upload_context.setter
def upload(self):
"""
This methods uploads the parts for the multi-part upload.
Returns:
"""
if self._target_part_size < MIN_PART_SIZE:
raise ValueError(f"The part size has to be at least greater than {MIN_PART_SIZE} bytes.")
filename = self._file.name
file_size = os.stat(filename).st_size
part_count = math.ceil(file_size / self._target_part_size)
if part_count > 10000:
raise ValueError("The given file cannot be divided into more than 10000 parts. Please try increasing the "
"target part size.")
# Upload the parts
self._upload_parts(part_count)
# Mark upload as complete
self._mark_upload_completion()
class SingleUpload:
"""
This class manages the operations related to the upload of a media file via a direct link.
"""
@property
@upload_context.setter
def upload(self):
"""
Uploads the media file to the actual location as specified in the direct link.
Returns:
"""
self._logger.debug(f"Starting to upload file:{self._file.name}")
bytes_chunk = self._file.read()
computed_hash = _get_bytes_hash(bytes_chunk)
retry_count = 0
for _ in range(self._upload_retry_count):
try:
response = _upload_to_s3(bytes_chunk, self._upload_link)
returned_hash = _get_returned_hash(response)
# The returned hash is surrounded by '"' character
if repr(returned_hash) != repr(f"\"{computed_hash}\""):
raise DataIntegrityError(
"The hash of the uploaded file does not match with the hash on the server.")
self._logger.debug(f"Successfully uploaded file {self._file.name}.")
return
except (IOError, PartUploadError, DataIntegrityError, OSError) as err:
self._logger.warning(err)
self._logger.exception(err, stack_info=True)
self._logger.warning(f"Encountered error uploading file {self._file.name}.")
retry_count = retry_count + 1
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(f"Max retries exceeded while uploading file {self._file.name}") \
from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
class DataIntegrityError(Exception):
"""
This class is used to wrap exceptions when the uploaded data failed a data integrity check with the current file
part hash.
"""
pass
class MaxRetriesExceededError(Exception):
"""
This class is used to wrap exceptions when the number of retries are exceeded while uploading a part.
"""
pass
class PartUploadError(Exception):
"""
This class is used to wrap exceptions that occur because of part upload errors.
"""
pass
class S3UploadError(PartUploadError):
"""
This class extends the PartUploadError exception class when the upload is done via S3.
"""
pass
class UnrecoverableError(Exception):
"""
This class wraps exceptions that should not be recoverable or resumed from.
"""
pass
| [
11748,
2638,
13,
16366,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
28686,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
6738,
2956,
297,
571,
... | 2.488643 | 1,717 |
# -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1425177385.390867
_enable_loop = True
_template_filename = '/Users/Nate/chf_dmp/account/templates/users.html'
_template_uri = 'users.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
"""
__M_BEGIN_METADATA
{"source_encoding": "ascii", "uri": "users.html", "filename": "/Users/Nate/chf_dmp/account/templates/users.html", "line_map": {"64": 32, "65": 37, "66": 37, "35": 1, "68": 38, "74": 68, "45": 3, "27": 0, "67": 38, "52": 3, "53": 12, "54": 12, "55": 16, "56": 16, "57": 20, "58": 20, "59": 24, "60": 24, "61": 28, "62": 28, "63": 32}}
__M_END_METADATA
"""
| [
2,
532,
9,
12,
19617,
25,
292,
979,
72,
532,
9,
12,
198,
6738,
285,
25496,
1330,
19124,
11,
16628,
11,
12940,
198,
198,
4944,
7206,
20032,
1961,
796,
19124,
13,
4944,
7206,
20032,
1961,
198,
834,
44,
62,
11600,
62,
18780,
259,
796... | 2.338323 | 334 |
from marshmallow import fields, Schema
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, ModelSchema
from models import Saved_Posts, Post_Likes, User_Following, User, Post_Info, Post, Post_Tags, Comment_Likes, Reply_Likes
from sqlalchemy import and_
from .utilities import cleanhtml
import re
from app import db
PostSchemaOnly = PostSchema(many=False)
| [
6738,
22397,
42725,
1330,
7032,
11,
10011,
2611,
198,
6738,
22397,
42725,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
27722,
27054,
2611,
11,
9104,
27054,
2611,
198,
6738,
4981,
1330,
8858,
276,
62,
21496,
11,
2947,
62,
43,
7938,
1... | 3.392523 | 107 |
import paramiko
from time import sleep
import os
| [
11748,
5772,
12125,
198,
6738,
640,
1330,
3993,
198,
11748,
28686,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 2.85 | 20 |
'''This module manages all user endpoints(signup, login, logout etc)'''
from flask import jsonify, make_response
from flask_restful import Resource
from werkzeug.security import generate_password_hash
from .resources import Initialize
from ..models.user import User
from ..utils.users import Validation
class Signup(Resource, Initialize):
'''Handles user registration'''
@staticmethod
def post():
'''User signup endpoint'''
data = Initialize.get_json_data()
validate = Validation(data)
validate.check_empty_keys()
validate.check_empty_values()
validate.check_number_of_fields()
validate.check_signup_credentials()
validate.check_already_exists()
password = generate_password_hash(
data["password"], method='sha256').strip()
user = User(data["username"].strip(),
data["email"].lower().strip(), password)
user.save()
return make_response(jsonify({"message": "Account created successfully"}), 201)
| [
7061,
6,
1212,
8265,
15314,
477,
2836,
886,
13033,
7,
12683,
929,
11,
17594,
11,
2604,
448,
3503,
8,
7061,
6,
198,
6738,
42903,
1330,
33918,
1958,
11,
787,
62,
26209,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
6738,
266,
958... | 2.724409 | 381 |
import torch
import wandb
from Trainer import Trainer
MAX_SUMMARY_IMAGES = 4
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
assert torch.cuda.is_available()
# LR = 2e-4
EPOCHS = 100
# BATCH_SIZE = 64
NUM_WORKERS = 4
# LAMBDA_L1 = 100
sweep_config = {
'method': 'bayes', # grid, random
'metric': {
'name': 'loss_g',
'goal': 'minimize'
},
'parameters': {
'lambda_l1': {
'values': [80, 90, 100, 110, 120, 130]
},
'batch_size': {
'values': [64]
},
'learning_rate': {
'values': [1e-5, 1e-4, 2e-4, 3e-4]
}
}
}
if __name__ == '__main__':
sweep_id = wandb.sweep(sweep_config, project="poke-gan")
wandb.agent(sweep_id, train_wrapper)
| [
11748,
28034,
198,
11748,
11569,
65,
198,
198,
6738,
31924,
1330,
31924,
198,
198,
22921,
62,
50,
5883,
44,
13153,
62,
3955,
25552,
796,
604,
198,
198,
7206,
27389,
796,
28034,
13,
25202,
10786,
66,
15339,
6,
611,
28034,
13,
66,
15339... | 1.974811 | 397 |
# -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Different styles of persisted objects.
"""
# System Imports
import types
import copy_reg
import copy
import inspect
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.python import log
from twisted.python import reflect
oldModules = {}
## First, let's register support for some stuff that really ought to
## be registerable...
def pickleMethod(method):
'support function for copy_reg to pickle method refs'
return unpickleMethod, (method.im_func.__name__,
method.im_self,
method.im_class)
def unpickleMethod(im_name,
im_self,
im_class):
'support function for copy_reg to unpickle method refs'
try:
unbound = getattr(im_class,im_name)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_class)
return bound
except AttributeError:
log.msg("Method",im_name,"not on class",im_class)
assert im_self is not None,"No recourse: no instance to guess from."
# Attempt a common fix before bailing -- if classes have
# changed around since we pickled this method, we may still be
# able to get it by looking on the instance's current class.
unbound = getattr(im_self.__class__,im_name)
log.msg("Attempting fixup with",unbound)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_self.__class__)
return bound
copy_reg.pickle(types.MethodType,
pickleMethod,
unpickleMethod)
def pickleModule(module):
'support function for copy_reg to pickle module refs'
return unpickleModule, (module.__name__,)
def unpickleModule(name):
'support function for copy_reg to unpickle module refs'
if oldModules.has_key(name):
log.msg("Module has moved: %s" % name)
name = oldModules[name]
log.msg(name)
return __import__(name,{},{},'x')
copy_reg.pickle(types.ModuleType,
pickleModule,
unpickleModule)
def pickleStringO(stringo):
'support function for copy_reg to pickle StringIO.OutputTypes'
return unpickleStringO, (stringo.getvalue(), stringo.tell())
if hasattr(StringIO, 'OutputType'):
copy_reg.pickle(StringIO.OutputType,
pickleStringO,
unpickleStringO)
if hasattr(StringIO, 'InputType'):
copy_reg.pickle(StringIO.InputType,
pickleStringI,
unpickleStringI)
class Ephemeral:
"""
This type of object is never persisted; if possible, even references to it
are eliminated.
"""
versionedsToUpgrade = {}
upgraded = {}
def requireUpgrade(obj):
"""Require that a Versioned instance be upgraded completely first.
"""
objID = id(obj)
if objID in versionedsToUpgrade and objID not in upgraded:
upgraded[objID] = 1
obj.versionUpgrade()
return obj
def _aybabtu(c):
"""
Get all of the parent classes of C{c}, not including C{c} itself, which are
strict subclasses of L{Versioned}.
The name comes from "all your base are belong to us", from the deprecated
L{twisted.python.reflect.allYourBase} function.
@param c: a class
@returns: list of classes
"""
# begin with two classes that should *not* be included in the
# final result
l = [c, Versioned]
for b in inspect.getmro(c):
if b not in l and issubclass(b, Versioned):
l.append(b)
# return all except the unwanted classes
return l[2:]
class Versioned:
"""
This type of object is persisted with versioning information.
I have a single class attribute, the int persistenceVersion. After I am
unserialized (and styles.doUpgrade() is called), self.upgradeToVersionX()
will be called for each version upgrade I must undergo.
For example, if I serialize an instance of a Foo(Versioned) at version 4
and then unserialize it when the code is at version 9, the calls::
self.upgradeToVersion5()
self.upgradeToVersion6()
self.upgradeToVersion7()
self.upgradeToVersion8()
self.upgradeToVersion9()
will be made. If any of these methods are undefined, a warning message
will be printed.
"""
persistenceVersion = 0
persistenceForgets = ()
def __getstate__(self, dict=None):
"""Get state, adding a version number to it on its way out.
"""
dct = copy.copy(dict or self.__dict__)
bases = _aybabtu(self.__class__)
bases.reverse()
bases.append(self.__class__) # don't forget me!!
for base in bases:
if base.__dict__.has_key('persistenceForgets'):
for slot in base.persistenceForgets:
if dct.has_key(slot):
del dct[slot]
if base.__dict__.has_key('persistenceVersion'):
dct['%s.persistenceVersion' % reflect.qual(base)] = base.persistenceVersion
return dct
def versionUpgrade(self):
"""(internal) Do a version upgrade.
"""
bases = _aybabtu(self.__class__)
# put the bases in order so superclasses' persistenceVersion methods
# will be called first.
bases.reverse()
bases.append(self.__class__) # don't forget me!!
# first let's look for old-skool versioned's
if self.__dict__.has_key("persistenceVersion"):
# Hacky heuristic: if more than one class subclasses Versioned,
# we'll assume that the higher version number wins for the older
# class, so we'll consider the attribute the version of the older
# class. There are obviously possibly times when this will
# eventually be an incorrect assumption, but hopefully old-school
# persistenceVersion stuff won't make it that far into multiple
# classes inheriting from Versioned.
pver = self.__dict__['persistenceVersion']
del self.__dict__['persistenceVersion']
highestVersion = 0
highestBase = None
for base in bases:
if not base.__dict__.has_key('persistenceVersion'):
continue
if base.persistenceVersion > highestVersion:
highestBase = base
highestVersion = base.persistenceVersion
if highestBase:
self.__dict__['%s.persistenceVersion' % reflect.qual(highestBase)] = pver
for base in bases:
# ugly hack, but it's what the user expects, really
if (Versioned not in base.__bases__ and
not base.__dict__.has_key('persistenceVersion')):
continue
currentVers = base.persistenceVersion
pverName = '%s.persistenceVersion' % reflect.qual(base)
persistVers = (self.__dict__.get(pverName) or 0)
if persistVers:
del self.__dict__[pverName]
assert persistVers <= currentVers, "Sorry, can't go backwards in time."
while persistVers < currentVers:
persistVers = persistVers + 1
method = base.__dict__.get('upgradeToVersion%s' % persistVers, None)
if method:
log.msg( "Upgrading %s (of %s @ %s) to version %s" % (reflect.qual(base), reflect.qual(self.__class__), id(self), persistVers) )
method(self)
else:
log.msg( 'Warning: cannot upgrade %s to version %s' % (base, persistVers) )
| [
2,
532,
9,
12,
1332,
12,
7442,
12,
3672,
25,
19074,
13,
9288,
13,
9288,
62,
19276,
6347,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
40006,
24936,
46779,
13,
198,
2,
4091,
38559,
24290,
329,
3307,
13,
628,
198,
198,
37811,
198,
40341... | 2.395916 | 3,281 |
from django.urls import path
from . import views
urlpatterns = [
path('example/', views.ExampleListView.as_view(), name='example_list'),
path('example/create', views.ExampleCreateView.as_view(), name='example_create'),
path('example/<int:pk>/update/', views.ExampleUpdateView.as_view(), name='example_update'),
path('example/<int:pk>/delete/', views.ExampleDeleteView.as_view(), name='example_delete'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
20688,
14,
3256,
5009,
13,
16281,
8053,
7680,
13,
292,
62,
1177,
22784,
1438,
11639,
... | 2.958042 | 143 |
from threading import Thread
from selenium.webdriver import Remote
from selenium import webdriver
# start browser
"""
if __name__ == '__main__':
host_list = {'127.0.0.1:4444': 'internet explorer', '127.0.0.1:5555': 'chrome'}
threads = []
files = range(len(host_list))
for host_name, browser_name in host_list.items():
t = Thread(target=browser, args=(host_name, browser_name))
threads.append(t)
for i in files:
threads[i].start()
for i in files:
threads[i].join()
"""
if __name__ == '__main__':
driver = browser()
driver.get("http://www.baidu.com")
driver.quit()
| [
6738,
4704,
278,
1330,
14122,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
21520,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
628,
198,
2,
923,
6444,
198,
198,
37811,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
... | 2.472868 | 258 |
import numpy as np
from lagom.transform import geometric_cumsum
from lagom.utils import numpify
def bootstrapped_returns(gamma, rewards, last_V, reach_terminal):
r"""Return (discounted) accumulated returns with bootstrapping for a
batch of episodic transitions.
Formally, suppose we have all rewards :math:`(r_1, \dots, r_T)`, it computes
.. math::
Q_t = r_t + \gamma r_{t+1} + \dots + \gamma^{T - t} r_T + \gamma^{T - t + 1} V(s_{T+1})
.. note::
The state values for terminal states are masked out as zero !
"""
last_V = numpify(last_V, np.float32).item()
if reach_terminal:
out = geometric_cumsum(gamma, np.append(rewards, 0.0))
else:
out = geometric_cumsum(gamma, np.append(rewards, last_V))
return out[0, :-1].astype(np.float32)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
19470,
296,
13,
35636,
1330,
38445,
62,
66,
5700,
388,
198,
6738,
19470,
296,
13,
26791,
1330,
299,
931,
1958,
628,
198,
198,
4299,
6297,
12044,
1496,
62,
7783,
82,
7,
28483,
2611,
11,
... | 2.297814 | 366 |
from nintendo.dauth import LATEST_VERSION
username = None
password = None
with open("ConsoleData/8000000000000010", mode="rb") as file:
data = file.read()
username_bytes = bytearray(data[0x00064020:0x00064028])
username_bytes.reverse()
username = "0x" + username_bytes.hex().upper()
password = data[0x00064028:0x00064050].decode("ascii")
with open("webserver_args.json", mode="w") as file:
args = """{
"system_version": %d,
"user_id": "%s",
"password": "%s",
"keys": "./ConsoleData/prod.keys",
"prodinfo": "./ConsoleData/PRODINFO.dec",
"ticket": "./ConsoleData/SUPER MARIO MAKER 2 v0 (01009B90006DC000) (BASE).tik"
}""" % (LATEST_VERSION, username, password)
file.write(args) | [
6738,
299,
8773,
13,
6814,
1071,
1330,
42355,
6465,
62,
43717,
198,
198,
29460,
796,
6045,
198,
28712,
796,
6045,
198,
4480,
1280,
7203,
47581,
6601,
14,
23,
8269,
2388,
20943,
1600,
4235,
2625,
26145,
4943,
355,
2393,
25,
198,
197,
7... | 2.580524 | 267 |
"""
Deals with the world map, which submarines explore.
"""
import string
from functools import reduce
from ALTANTIS.utils.text import list_to_and_separated
from ALTANTIS.utils.direction import reverse_dir, directions
from ALTANTIS.utils.consts import X_LIMIT, Y_LIMIT
from ALTANTIS.world.validators import InValidator, NopValidator, TypeValidator, BothValidator, LenValidator, RangeValidator
from ALTANTIS.world.consts import ATTRIBUTES, WEATHER, WALL_STYLES
import random
from typing import List, Optional, Tuple, Any, Dict, Collection
undersea_map = [[Cell() for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
def map_to_dict() -> Dict[str, Any]:
"""
Converts our map to dict form. Since each of our map entries can be
trivially converted into dicts, we just convert them individually.
We also append a class identifier so they can be recreated correctly.
"""
undersea_map_dicts : List[List[Dict[str, Any]]] = [[{} for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
for i in range(X_LIMIT):
for j in range(Y_LIMIT):
undersea_map_dicts[i][j] = undersea_map[i][j]._to_dict()
return {"map": undersea_map_dicts, "x_limit": X_LIMIT, "y_limit": Y_LIMIT}
def map_from_dict(dictionary: Dict[str, Any]):
"""
Takes a triple generated by map_to_dict and overwrites our map with it.
"""
global X_LIMIT, Y_LIMIT, undersea_map
X_LIMIT = dictionary["x_limit"]
Y_LIMIT = dictionary["y_limit"]
map_dicts = dictionary["map"]
undersea_map_new = [[Cell._from_dict(map_dicts[x][y]) for y in range(Y_LIMIT)] for x in range(X_LIMIT)]
undersea_map = undersea_map_new
| [
37811,
198,
5005,
874,
351,
262,
995,
3975,
11,
543,
34031,
7301,
13,
198,
37811,
198,
11748,
4731,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
6738,
8355,
51,
8643,
1797,
13,
26791,
13,
5239,
1330,
1351,
62,
1462,
62,
392,
6... | 2.616613 | 626 |
from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import os
import pickle
app = Flask(__name__)
######## Preparing the Predictor
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,'pkl_objects/diabetes.pkl'), 'rb'))
@app.route('/')
@app.route('/results', methods=['POST'])
if __name__ == '__main__':
app.run(debug=True)
#
#
#2,108,64,30.37974684,156.05084746,30.8,0.158,21
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
6738,
266,
83,
23914,
1330,
5178,
11,
8255,
30547,
15878,
11,
4938,
2024,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
... | 2.463918 | 194 |
"""Module to test graph with maximum size that supports coloring algorithm."""
import sys
import os
from time import time
sys.path.append(os.getcwd())
from graph.graph_coloring import Graph
graph = Graph()
graph.create_graph_from_file('graph/graph_coloring_tests/max_size_graph.txt')
start = time()
colored_vertices = graph.color_graph(995)
end = time()
expected = [f'V{num}:{num}' for num in range(1, 995)]
print(expected == colored_vertices)
print('Time taken: ', end - start)
| [
37811,
26796,
284,
1332,
4823,
351,
5415,
2546,
326,
6971,
33988,
11862,
526,
15931,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
640,
1330,
640,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
1136,
66,
16993,
28955,
198,
... | 3.108974 | 156 |
import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
from mpl_toolkits.axes_grid.inset_locator import inset_axes
#majorLocatorX = MultipleLocator(2)
#minorLocatorX = MultipleLocator(1)
#majorLocatorY = MultipleLocator(0.05)
#minorLocatorY = MultipleLocator(0.025)
filename1 = '/home/sam/Documents/thesis/data/PA_EOM_COM.dat'
filename2 = '/home/sam/Documents/thesis/data/PR_EOM_COM.dat'
hw0_1 = []
e0_1 = []
hw1_1 = []
e1_1 = []
hwa_1 = []
hw0_2 = []
e0_2 = []
hw1_2 = []
e1_2 = []
hwa_2 = []
hw0_3 = []
e0_3 = []
hw1_3 = []
e1_3 = []
hwa_3 = []
hw0_4 = []
e0_4 = []
hw1_4 = []
e1_4 = []
hwa_4 = []
hw0_5 = []
e0_5 = []
hw1_5 = []
e1_5 = []
hwa_5 = []
hw0_6 = []
e0_6 = []
hw1_6 = []
e1_6 = []
hwa_6 = []
hw0_7 = []
e0_7 = []
hw1_7 = []
e1_7 = []
hwa_7 = []
hw0_8 = []
e0_8 = []
hw1_8 = []
e1_8 = []
hwa_8 = []
with open(filename1) as f1:
data1 = f1.read()
data1 = data1.split('\n')
with open(filename2) as f2:
data2 = f2.read()
data2 = data2.split('\n')
for num in range(len(data1)):
line = data1[num].split()
if( num - num%6 == 0 ):
hw0_1.append(float(line[0]))
e0_1.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_2.append(float(line[0]))
e0_2.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_3.append(float(line[0]))
e0_3.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_4.append(float(line[0]))
e0_4.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data1[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_1.append(float(line[0]))
e1_1.append(float(line[7]))
hwa_1.append(float(line[1]))
else:
hw1_1.append(float(line2[0]))
e1_1.append(float(line2[7]))
hwa_1.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_2.append(float(line[0]))
e1_2.append(float(line[7]))
hwa_2.append(float(line[1]))
else:
hw1_2.append(float(line2[0]))
e1_2.append(float(line2[7]))
hwa_2.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_3.append(float(line[0]))
e1_3.append(float(line[7]))
hwa_3.append(float(line[1]))
else:
hw1_3.append(float(line2[0]))
e1_3.append(float(line2[7]))
hwa_3.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_4.append(float(line[0]))
e1_4.append(float(line[7]))
hwa_4.append(float(line[1]))
else:
hw1_4.append(float(line2[0]))
e1_4.append(float(line2[7]))
hwa_4.append(float(line2[1]))
for num in range(len(data2)):
line = data2[num].split()
if( num - num%6 == 0 ):
hw0_5.append(float(line[0]))
e0_5.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_6.append(float(line[0]))
e0_6.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_7.append(float(line[0]))
e0_7.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_8.append(float(line[0]))
e0_8.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data2[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_5.append(float(line[0]))
e1_5.append(float(line[7]))
hwa_5.append(float(line[1]))
else:
hw1_5.append(float(line2[0]))
e1_5.append(float(line2[7]))
hwa_5.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_6.append(float(line[0]))
e1_6.append(float(line[7]))
hwa_6.append(float(line[1]))
else:
hw1_6.append(float(line2[0]))
e1_6.append(float(line2[7]))
hwa_6.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_7.append(float(line[0]))
e1_7.append(float(line[7]))
hwa_7.append(float(line[1]))
else:
hw1_7.append(float(line2[0]))
e1_7.append(float(line2[7]))
hwa_7.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_8.append(float(line[0]))
e1_8.append(float(line[7]))
hwa_8.append(float(line[1]))
else:
hw1_8.append(float(line2[0]))
e1_8.append(float(line2[7]))
hwa_8.append(float(line2[1]))
print(e0_1)
print(hw0_1)
print(e1_1)
print(hw1_1)
print(hwa_1)
print(e0_2)
print(hw0_2)
print(e1_2)
print(hw1_2)
print(hwa_2)
print(e0_3)
print(hw0_3)
print(e1_3)
print(hw1_3)
print(hwa_3)
print(e0_4)
print(hw0_4)
print(e1_4)
print(hw1_4)
print(hwa_4)
#hw0_1_1 = hw0_1[:-1]
#e0_1_1 = e0_1[:-1]
#hw0_2_1 = hw0_2[:-1]
#e0_2_1 = e0_2[:-1]
plt.rc('font', family='serif')
fig = plt.figure(figsize=(11, 10))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
plt.plot(hw0_1, e0_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw0_2, e0_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw0_3, e0_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw0_4, e0_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 9.0])
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax1.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax2 = plt.subplot(gs[1])
plt.plot(hw1_1, e1_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw1_2, e1_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw1_3, e1_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw1_4, e1_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, 0.0, 1.0])
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes2 = inset_axes(ax2,width="50%",height=1.5,loc=1)
plt.plot(hw0_1, hwa_1, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_3, hwa_3, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{17}O,^{17}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{23}O,^{23}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax2.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax3 = plt.subplot(gs[2])
plt.plot(hw0_5, e0_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw0_6, e0_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw0_7, e0_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw0_8, e0_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 10.0])
ax3.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax3.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax3.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax4 = plt.subplot(gs[3])
plt.plot(hw1_5, e1_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw1_6, e1_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw1_7, e1_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw1_8, e1_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.1, 1.0])
ax4.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax4.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes4 = inset_axes(ax4,width="50%",height=1.5,loc=1)
plt.plot(hw0_5, hwa_5, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_7, hwa_7, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{15}N,^{15}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{21}N,^{21}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax4.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
#ax.xaxis.set_major_locator(majorLocatorX)
#ax.xaxis.set_minor_locator(minorLocatorX)
#ax.yaxis.set_major_locator(majorLocatorY)
#ax.yaxis.set_minor_locator(minorLocatorY)
plt.tight_layout()
plt.savefig('EOM-CoM.pdf', format='pdf', bbox_inches='tight')
plt.show()
| [
11748,
25064,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
20401,
33711,
1352,
198,
6738,
2603,
29487,
... | 1.750894 | 5,592 |
r1 = float(input('Primeiro segmento: '))
r2 = float(input('segundo segmento: '))
r3 = float(input('terceiro segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('É um triangulo:')
if r1 == r2 == r3:
print('Equilatero!')
elif r1 != r2 != r3 != r1:
print('Escaleno!')
else:
print('Isosceles!')
else:
print('Nao é um triangulo') | [
81,
16,
796,
12178,
7,
15414,
10786,
26405,
7058,
10618,
78,
25,
705,
4008,
198,
81,
17,
796,
12178,
7,
15414,
10786,
325,
70,
41204,
10618,
78,
25,
705,
4008,
198,
81,
18,
796,
12178,
7,
15414,
10786,
353,
344,
7058,
10618,
78,
2... | 2.042105 | 190 |
import numpy as np
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# print(W.shape)
loss = reg_strength * (W ** 2).sum()
grad = 2 * reg_strength * W
return loss, grad
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
sm = softmax(predictions)
# print("softmax count", softmax, e, "sum", sum(e).sum())
# Your final implementation shouldn't have any loops
target, ti = targets(target_index, predictions.shape)
loss = np.mean(-np.log(sm[ti]))
dpredictions = (sm - target) / sm.shape[0]
# print("predictions", predictions, "softmax", sm, "target", target, "loss", loss, "grad", dpredictions)
return loss, dpredictions.reshape(predictions.shape)
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
if predictions.ndim > 1:
pred_scaled = predictions.T - predictions.max(axis=1)
e = np.exp(pred_scaled)
sm = (e / e.sum(axis=0)).T
else:
pred_scaled = predictions - np.max(predictions)
e = np.exp(pred_scaled)
sm = np.array(e / sum(e))
# print(np.array(sm))
# Your final implementation shouldn't have any loops
return sm
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
300,
17,
62,
16338,
1634,
7,
54,
11,
842,
62,
41402,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3082,
1769,
406,
17,
3218,
1634,
2994,
319,
19590,
290,
663,
31312,
628,
220,
... | 2.69871 | 853 |
import json
import os
import random
import string
import zipfile
from django.conf import settings
from django.http import Http404, JsonResponse, HttpResponseBadRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.http import require_POST, require_GET
from django.views.generic import CreateView, DeleteView, ListView
from .models import Picture
from .noteshrink_module import AttrDict, notescan_main
from .response import JSONResponse, response_mimetype
from .serialize import serialize
@require_GET
# TODO: 1. Сделать чтобы сохранялись загруженные файлы по сессии - Make uploaded files save between session using session key
# DONE: 2. Удалять сразу не разрешенные файлы - не загружаются - Don't upload from file extensions
# TODO: 3. Проверять отсутсвующие параметры в shrink - Check for missing params in shrink
# DONE: 4. Проверять, существуют ли папки PNG_ROOT и PDF_ROOT - создавать если нет - Check for PNG_ROOT and PDF_ROOT
# TODO: 5. Проверять максимальную длину названий файлов - Check for maximum filename length
# DONE: 6. Сделать кнопку для резета - Make a reset button
# DONE: 7. Сделать view для загрузки ZIP-архива картинок - Make a zip-archive download view
# DONE: 8. Кнопка очистить очищает список загруженных файлов в window, деактивирует кнопку скачать - Clear button must clear window._uploadedFiles, deactivates download button
@require_POST
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
19974,
7753,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
11,
449,
1559,
31077,
11,
367,
... | 1.980663 | 724 |
from . import database
import os.path as op
import shutil
from .freesurfer import parse_curv
import numpy as np
def import_subj(subject,
source_dir,
session=None,
sname=None):
"""Imports a subject from fmriprep-output.
See https://fmriprep.readthedocs.io/en/stable/
Parameters
----------
subject : string
Fmriprep subject name (without "sub-")
source_dir : string
Local directory that contains both fmriprep and freesurfer subfolders
session : string, optional
BIDS session that contains the anatomical data (leave to default if
not a specific session)
sname : string, optional
Pycortex subject name (These variable names should be changed). By default uses
the same name as the freesurfer subject.
"""
if sname is None:
sname = subject
database.db.make_subj(sname)
surfs = op.join(database.default_filestore, sname, "surfaces", "{name}_{hemi}.gii")
anats = op.join(database.default_filestore, sname, "anatomicals", "{name}.nii.gz")
surfinfo = op.join(database.default_filestore, sname, "surface-info", "{name}.npz")
fmriprep_dir = op.join(source_dir, 'fmriprep')
if session is not None:
fmriprep_dir = op.join(fmriprep_dir, 'ses-{session}')
session_str = '_ses-{session}'.format(session=session)
else:
session_str = ''
# import anatomical data
fmriprep_dir = op.join(fmriprep_dir, 'sub-{subject}', 'anat')
t1w = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_preproc.nii.gz')
aseg = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_label-aseg_roi.nii.gz')
for fmp_fn, out_fn in zip([t1w.format(subject=subject, session_str=session_str),
aseg.format(subject=subject, session_str=session_str)],
[anats.format(name='raw'),
anats.format(name='aseg')]):
shutil.copy(fmp_fn, out_fn)
#import surfaces
fmpsurf = op.join(fmriprep_dir,
'sub-{subject}{session_str}_T1w_').format(subject=subject,
session_str=session_str)
fmpsurf = fmpsurf + '{fmpname}.{fmphemi}.surf.gii'
for fmpname, name in zip(['smoothwm', 'pial', 'midthickness', 'inflated'],
['wm', 'pia', 'fiducial', 'inflated']):
for fmphemi, hemi in zip(['L', 'R'],
['lh', 'rh']):
source = fmpsurf.format(fmpname=fmpname,
fmphemi=fmphemi)
target = str(surfs.format(subj=sname, name=name, hemi=hemi))
shutil.copy(source, target)
#import surfinfo
curvs = op.join(source_dir,
'freesurfer',
'sub-{subject}',
'surf',
'{hemi}.{info}')
for curv, info in dict(sulc="sulcaldepth", thickness="thickness", curv="curvature").items():
lh, rh = [parse_curv(curvs.format(hemi=hemi, info=curv, subject=subject)) for hemi in ['lh', 'rh']]
np.savez(surfinfo.format(subj=sname, name=info), left=-lh, right=-rh)
database.db = database.Database()
| [
6738,
764,
1330,
6831,
198,
11748,
28686,
13,
6978,
355,
1034,
198,
11748,
4423,
346,
198,
6738,
764,
69,
6037,
333,
2232,
1330,
21136,
62,
22019,
85,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
1330,
62,
7266,
73,
7,
32796,
... | 2.008511 | 1,645 |
try:
from ._radix import Radix as _Radix
except Exception as e:
from .radix import Radix as _Radix
__version__ = '1.0.0'
__all__ = ['Radix']
# This acts as an entrypoint to the underlying object (be it a C
# extension or pure python representation, pickle files will work)
| [
28311,
25,
198,
220,
220,
220,
422,
47540,
6335,
844,
1330,
5325,
844,
355,
4808,
15546,
844,
198,
16341,
35528,
355,
304,
25,
198,
220,
220,
220,
422,
764,
6335,
844,
1330,
5325,
844,
355,
4808,
15546,
844,
198,
198,
834,
9641,
834... | 3.053763 | 93 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
from paddle.fluid.dygraph.parallel import ParallelEnv
def setup_logger(output=None, name="hapi", log_level=logging.INFO):
"""
Initialize logger of hapi and set its verbosity level to "INFO".
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger. Default: 'hapi'.
log_level (enum): log level. eg.'INFO', 'DEBUG', 'ERROR'. Default: logging.INFO.
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(log_level)
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# stdout logging: only local rank==0
local_rank = ParallelEnv().local_rank
if local_rank == 0 and len(logger.handlers) == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter(format_str))
logger.addHandler(ch)
# file logging if output is not None: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if local_rank > 0:
filename = filename + ".rank{}".format(local_rank)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
fh = logging.StreamHandler(filename)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(format_str))
logger.addHandler(fh)
return logger
| [
2,
220,
220,
15069,
357,
66,
8,
12131,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 2.710753 | 930 |
import torch
import torch.nn as nn
import numpy as np
#********************模型训练*******************************#
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
for epoch in range(train_epochs):
for i,(images,labels) in enumerate(train_loader):
images = images.cuda()
labels = labels.cuda()
outs = model(images)
loss = criterion(outs,labels)
# 根据pytorch中backward()函数的计算,
# 当网络参量进行反馈时,梯度是累积计算而不是被替换,
# 但在处理每一个batch时并不需要与其他batch的梯度混合起来累积计算,
# 因此需要对每个batch调用一遍zero_grad()将参数梯度置0.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch:{epoch},Loss:{loss.item()}...')
#********************模型测试************************#
model.eval() #对于bn和drop_out 起作用
with torch.no_grad():
correct = 0
total = 0
for images,labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
pred = torch.argmax(outputs,1).item()
correct+= (torch.argmax(outputs,1)==labels).sum().cpu().data.numpy()
total += len(images)
print(f'acc:{correct/total:.3f}')
#****************自定义loss*************************#
#***************标签平滑,有很强的聚类效果???****************************#
# https://zhuanlan.zhihu.com/p/302843504 label smoothing 分析
# 写一个label_smoothing.py 的文件,然后再训练代码里面引用,用LSR代替交叉熵损失即可
import torch
import torch.nn as nn
# timm 库中有现成的接口
# PyTorchImageModels
# from timm.loss import LabelSmoothingCrossEntrophy
# from timm.loss import SoftTargetCrossEntrophy
# criterion = LabelSmoothingCrossEntrophy(smoothing=config.MODEL.LABEL_SMOOTHING)
# criterion = SoftTargetCrossEntrophy()
# 或者直接再训练过程中进行标签平滑
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
N = labels.size(0)
# C is the number of classes.
smoothed_labels = torch.full(size=(N, C), fill_value=0.1 / (C - 1)).cuda()
smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(labels, dim=1), value=0.9)
score = model(images)
log_prob = torch.nn.functional.log_softmax(score, dim=1)
loss = -torch.sum(log_prob * smoothed_labels) / N
optimizer.zero_grad()
loss.backward()
optimizer.step()
#******************************Mixup训练,数据增强的一种方式***********************************#
# mixup采用对不同类别之间进行建模的方式实现数据增强,而通用数据增强方法则是针对同一类做变换。(经验风险最小->邻域风险最小),提升对抗样本及噪声样本的鲁棒性
# 思路非常简单:
# 从训练样本中随机抽取两个样本进行简单的随机加权求和,对于标签,相当于加权后的样本有两个label
# 求loss的时候,对两个label的loss进行加权,在反向求导更新参数。
# https://zhuanlan.zhihu.com/p/345224408
# distributions包含可参数化的概率分布和采样函数
# timm库有现成接口
# from timm.data import Mixup
# mixup_fn = Mixup(
# mixup_alpha=0.8,
# cutmix_alpha=1.0,
# cutmix_minmax=None,
# prob=1.0,
# switch_prob=0.5,
# mode='batch',
# label_smoothing=0.1,
# num_classes=1000)
# x,y = mixup_fn(x,y)
beta_distribution = torch.distributions.beta.Beta(alpha, alpha)
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
# Mixup images and labels.
lambda_ = beta_distribution.sample([]).item()
index = torch.randperm(images.size(0)).cuda()
mixed_images = lambda_ * images + (1 - lambda_) * images[index, :]
label_a, label_b = labels, labels[index]
# Mixup loss.
scores = model(mixed_images)
loss = (lambda_ * loss_function(scores, label_a)
+ (1 - lambda_) * loss_function(scores, label_b))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#************************正则化***********************
# l1正则化
loss = nn.CrossEntropyLoss()
for param in model.parameters():
loss += torch.sum(torch.abs(param))
loss.backward()
# l2正则化,pytorch中的weight_decay相当于l2正则化
bias_list = (param for name, param in model.named_parameters() if name[-4:] == 'bias')
others_list = (param for name, param in model.named_parameters() if name[-4:] != 'bias')
parameters = [{'parameters': bias_list, 'weight_decay': 0},
{'parameters': others_list}]
optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)
#*********************梯度裁剪*************************#
torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=20)
#********************得到当前学习率*********************#
# If there is one global learning rate (which is the common case).
lr = next(iter(optimizer.param_groups))['lr']
# If there are multiple learning rates for different layers.
all_lr = []
for param_group in optimizer.param_groups:
all_lr.append(param_group['lr'])
#在一个batch训练代码中,当前的lr是optimzer.param_groups[0]['lr']
#**********************学习率衰减************************#
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateaue(optimizer,mode='max',patience=5,verbose=True)
for epoch in range(num_epochs):
train_one_epoch(...)
val(...)
scheduler.step(val_acc)
# Cosine annealing learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=,T_max=80)
# Redule learning rate by 10 at given epochs
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[50,70],gamma=0.1)
for t in range(0,80):
scheduler.step()
train(...)
val(...)
# learning rate warmup by 10 epochs
# torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
# 设置学习率为初始学习率乘以给定lr_lambda函数的值,lr_lambda一般输入为当前epoch
# https://blog.csdn.net/ltochange/article/details/116524264
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda t: t/10)
for t in range(0,10):
scheduler.step()
train(...)
val(...)
#**********************优化器链式更新******************************#
# 从pytorch1.4版本开始,torch.optim.lr_scheduler支持链式更新(chaining),即用户可以定义两个schedulers,并在训练过程中交替使用
import torch
from torch.optim import SGD
from torch.optim.lr_scheduler import ExponentialLR,StepLR
model = [torch.nn.Parameter(torch.randn(2,2,requires_grad=True))]
optimizer = SGD(model,0.1)
scheduler1 = ExponentialLR(optimizer,gamma=0.9)
scheduler2 = StepLR(optimizer,step_size=3,gamma=0.1)
for epoch in range(4):
print(ecoch,scheduler2.get_last_lr()[0])
print(epoch,scheduler1.get_last_lr()[0])
optimizer.step()
scheduler1.step()
scheduler2.step()
#********************模型训练可视化*******************************#
# pytorch可以使用tensorboard来可视化训练过程
# pip install tensorboard
# tensorboard --logdir=runs
# 使用SummaryWriter类来收集和可视化相应的数据,为了方便查看,可以使用不同的文件夹,比如'loss/train'和'loss/test'
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for n_iter in range(100):
writer.add_scalar('loss/train',np.random.random(),n_iter)
writer.add_scalar('loss/test',np.random.random(),n_iter)
writer.add_scalar('Accuracy/train',np.random.random(),n_iter)
writer.add_scalar('Accuracy/test',np.random.random(),n_iter)
#********************保存和加载检查点****************************#
start_epoch = 0
# Load checkpoint.
if resume: # resume为参数,第一次训练时设为0,中断再训练时设为1
model_path = os.path.join('model', 'best_checkpoint.pth.tar')
assert os.path.isfile(model_path)
checkpoint = torch.load(model_path)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Load checkpoint at epoch {}.'.format(start_epoch))
print('Best accuracy so far {}.'.format(best_acc))
# Train the model
for epoch in range(start_epoch, num_epochs):
...
# Test the model
...
# save checkpoint
is_best = current_acc > best_acc
best_acc = max(current_acc, best_acc)
checkpoint = {
'best_acc': best_acc,
'epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
model_path = os.path.join('model', 'checkpoint.pth.tar')
best_model_path = os.path.join('model', 'best_checkpoint.pth.tar')
torch.save(checkpoint, model_path)
if is_best:
shutil.copy(model_path, best_model_path)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
220,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
8412,
2466,
162,
101,
94,
161,
252,
233,
164,
106,
255,
163,
119,
225,
8412,
46068,
8162,
2,
198,
22213,
28019,
796,
299... | 1.914749 | 4,258 |
from os import getenv
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = getenv("TELEGRAM_API_TOKEN")
GROUP_CHAT_ID = getenv("GROUP_CHAT_ID")
CHANNEL_NAME = getenv("CHANNEL_NAME")
SUPER_USER_ID = getenv("SUPER_USER_ID") # sudo :)
GOOGLE_API_KEY = getenv('GOOGLE_API_KEY')
CSE_ID = getenv('CSE_ID')
SENTRY_DSN = getenv("SENTRY_SDK") | [
6738,
28686,
1330,
651,
24330,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
33,
2394,
62,
10468,
43959,
796,
651,
24330,
7203,
9328,
2538,
10761,
2390,
62,
17614,
62,
10468,
43... | 2.24183 | 153 |
#!/usr/bin/env python
import rospy
import actionlib
from mycobot_320_moveit.msg import *
if __name__ == '__main__':
rospy.init_node('move_client')
result = move_client()
print(result) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
11748,
2223,
8019,
198,
6738,
616,
66,
672,
313,
62,
19504,
62,
21084,
270,
13,
19662,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12... | 2.255319 | 94 |
#!/bin/python
import os
import sys
import time
import numpy as np
import scipy as sp
from scipy.stats import norm as normal
from scipy.special import *
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
import scipy.linalg as linalg
from sklearn import metrics
import random
'''
This version deals with sparse features, VW format
'''
feature_off = 3
#d: dimension, rho: selection prior
# normal_PDF / normal_CDF
#batch training
#note, n is an array
#calculate the appearche of each features in the training data, used for the step-size of each approx. factor
#this version is the same as train_stochastic_multi_rate, except that at the beining, I will update all the prior factors
#this version keeps average likelihood for pos. and neg. samples separately, and also use n_pos and n_neg to update the full posterior
#enforce the same step-size
#this reads data from HDFS and keeps read the negative samples until it reaches the same amount with the postive samples
#then pass once
#in theory, go 1000 pass can process all 7 days' data, 150 iteraions can process 1day's data
#SEP training
#calculate the appearche of each features in the training data, for postive and negative samples
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage %s <tau0>'%sys.argv[0]
sys.exit(1)
np.random.seed(0)
tune_rcv1(float(sys.argv[1]))
| [
2,
48443,
8800,
14,
29412,
628,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593,
355,
3487,
198,
6738,
629,
... | 2.962151 | 502 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyRequestBase(Model):
"""LivyRequestBase.
:param name:
:type name: str
:param file:
:type file: str
:param class_name:
:type class_name: str
:param args:
:type args: list[str]
:param jars:
:type jars: list[str]
:param files:
:type files: list[str]
:param archives:
:type archives: list[str]
:param conf:
:type conf: dict[str, str]
:param driver_memory:
:type driver_memory: str
:param driver_cores:
:type driver_cores: int
:param executor_memory:
:type executor_memory: str
:param executor_cores:
:type executor_cores: int
:param num_executors:
:type num_executors: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
'class_name': {'key': 'className', 'type': 'str'},
'args': {'key': 'args', 'type': '[str]'},
'jars': {'key': 'jars', 'type': '[str]'},
'files': {'key': 'files', 'type': '[str]'},
'archives': {'key': 'archives', 'type': '[str]'},
'conf': {'key': 'conf', 'type': '{str}'},
'driver_memory': {'key': 'driverMemory', 'type': 'str'},
'driver_cores': {'key': 'driverCores', 'type': 'int'},
'executor_memory': {'key': 'executorMemory', 'type': 'str'},
'executor_cores': {'key': 'executorCores', 'type': 'int'},
'num_executors': {'key': 'numExecutors', 'type': 'int'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 2.576623 | 770 |
import functools
from collections import Counter
import numpy as np
from numba import njit
from numba.typed import Dict
from tqdm import tqdm
from kernel.utils import memoize_id, normalize_kernel
TRANSLATION = {
"A": "T",
"T": "A",
"C": "G",
"G": "C"
}
@functools.lru_cache(None)
def complement(x: str):
"""Taking into account that the complement of a k-mer is supposed to be counted as the k-mer itself
projects upon the space of k-mers beginning either by 'A' or 'C'
e.g: ATAGCC == TATCGG
complement("ATAGCC")="ATAGCC"
complement("TATCGG")="ATAGCC"
"""
if x[0] in "AC":
return x
return x.translate(TRANSLATION)
@memoize_id
@functools.lru_cache(None) | [
11748,
1257,
310,
10141,
198,
6738,
17268,
1330,
15034,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
6738,
997,
7012,
13,
774,
9124,
1330,
360,
713,
198,
198,
6738,
256,
80,
36020,
1330,
256,
8... | 2.434343 | 297 |
# Copyright (c) 2003-2019 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import logging
import sys
import os
def get_from_wiki(file_name):
"""We host some larger files used for the test suite separately on the TreeCorr wiki repo
so people don't need to download them with the code when checking out the repo.
Most people don't run the tests after all.
"""
local_file_name = os.path.join('data',file_name)
url = 'https://github.com/rmjarvis/TreeCorr/wiki/' + file_name
if not os.path.isfile(local_file_name):
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import shutil
print('downloading %s from %s...'%(local_file_name,url))
# urllib.request.urlretrieve(url,local_file_name)
# The above line doesn't work very well with the SSL certificate that github puts on it.
# It works fine in a web browser, but on my laptop I get:
# urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:600)>
# The solution is to open a context that doesn't do ssl verification.
# But that can only be done with urlopen, not urlretrieve. So, here is the solution.
# cf. http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
# http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error
try:
import ssl
context = ssl._create_unverified_context()
u = urlopen(url, context=context)
except (AttributeError, TypeError):
# Note: prior to 2.7.9, there is no such function or even the context keyword.
u = urlopen(url)
with open(local_file_name, 'wb') as out:
shutil.copyfileobj(u, out)
u.close()
print('done.')
def which(program):
"""
Mimic functionality of unix which command
"""
if sys.platform == "win32" and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_script_name(file_name):
"""
Check if the file_name is in the path. If not, prepend appropriate path to it.
"""
if which(file_name) is not None:
return file_name
else:
test_dir = os.path.split(os.path.realpath(__file__))[0]
root_dir = os.path.split(test_dir)[0]
script_dir = os.path.join(root_dir, 'scripts')
exe_file_name = os.path.join(script_dir, file_name)
print('Warning: The script %s is not in the path.'%file_name)
print(' Using explcit path for the test:',exe_file_name)
return exe_file_name
class CaptureLog(object):
"""A context manager that saves logging output into a string that is accessible for
checking in unit tests.
After exiting the context, the attribute `output` will have the logging output.
Sample usage:
>>> with CaptureLog() as cl:
... cl.logger.info('Do some stuff')
>>> assert cl.output == 'Do some stuff'
"""
# Replicate a small part of the nose package to get the `assert_raises` function/context-manager
# without relying on nose as a dependency.
import unittest
_t = Dummy('nop')
assert_raises = getattr(_t, 'assertRaises')
#if sys.version_info > (3,2):
if False:
# Note: this should work, but at least sometimes it fails with:
# RuntimeError: dictionary changed size during iteration
# cf. https://bugs.python.org/issue29620
# So just use our own (working) implementation for all Python versions.
assert_warns = getattr(_t, 'assertWarns')
else:
from contextlib import contextmanager
import warnings
@contextmanager
del Dummy
del _t
# Context to make it easier to profile bits of the code
def do_pickle(obj1, func = lambda x : x):
"""Check that the object is picklable. Also that it has basic == and != functionality.
"""
try:
import cPickle as pickle
except:
import pickle
import copy
print('Try pickling ',str(obj1))
#print('pickled obj1 = ',pickle.dumps(obj1))
obj2 = pickle.loads(pickle.dumps(obj1))
assert obj2 is not obj1
#print('obj1 = ',repr(obj1))
#print('obj2 = ',repr(obj2))
f1 = func(obj1)
f2 = func(obj2)
#print('func(obj1) = ',repr(f1))
#print('func(obj2) = ',repr(f2))
assert f1 == f2
# Check that == works properly if the other thing isn't the same type.
assert f1 != object()
assert object() != f1
obj3 = copy.copy(obj1)
assert obj3 is not obj1
f3 = func(obj3)
assert f3 == f1
obj4 = copy.deepcopy(obj1)
assert obj4 is not obj1
f4 = func(obj4)
assert f4 == f1
| [
2,
15069,
357,
66,
8,
5816,
12,
23344,
416,
4995,
46595,
198,
2,
198,
2,
12200,
10606,
81,
318,
1479,
3788,
25,
41425,
290,
779,
287,
2723,
290,
13934,
5107,
11,
198,
2,
351,
393,
1231,
17613,
11,
389,
10431,
2810,
326,
262,
1708,... | 2.593039 | 2,155 |
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets
from .models import *
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class PostViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Постов для редактирования и т.д.
"""
queryset = Post.objects.prefetch_related('photos').all()
serializer_class = PostSerializer
class PhotoViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Фото для редактирования и т.д.
"""
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
6738,
42625,
14208,
13,
33571,
13,
12501,
273,
2024,
13,
23870,
1330,
1239,
62,
23870,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
198,
6738,
764,
27530,
1330,
163... | 2.251678 | 298 |
from socialite.helpers import get_config
from .base import BaseOAuth2
| [
6738,
1919,
578,
13,
16794,
364,
1330,
651,
62,
11250,
198,
6738,
764,
8692,
1330,
7308,
23621,
1071,
17,
628
] | 3.55 | 20 |
import sys
from pyswip import Prolog
helloworld(); | [
11748,
25064,
198,
6738,
279,
893,
86,
541,
1330,
1041,
6404,
198,
220,
220,
220,
220,
198,
12758,
322,
1764,
9783
] | 2.619048 | 21 |
import spacy
import ContactInfo
DIVIDER = "~" # CONSTANT which defines dividing str between card entries in a file
class BusinessCardParser:
""" Function getContactInfo
Input(s): document with text from one business card (string).
Output(s): A (ContactInfo) object that contains vital information about the card owner.
Description: Where the magic happens. Calls methods that identify vital info.
"""
""" Function isName
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a name, else false (boolean).
Runtime: > O(m), m = characters in entry. Takes long b/c of NLP machine learning
"""
""" Function isPhone
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a phone, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
""" Function isEmail
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a email, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
""" Function starter
* does the heavy lifting (I/O, calling methods)
Input(s): n/a
Output(s): a (dictionary) containing contacts with name (string) as key
Runtime: O(n), n = number of business cards
"""
if __name__ == '__main__':
main()
| [
11748,
599,
1590,
201,
198,
11748,
14039,
12360,
201,
198,
201,
198,
33569,
41237,
796,
366,
93,
1,
220,
1303,
7102,
2257,
8643,
543,
15738,
27241,
965,
1022,
2657,
12784,
287,
257,
2393,
201,
198,
201,
198,
201,
198,
4871,
7320,
1696... | 2.693309 | 538 |
"""Testsuite for vfgithook.pylint_check"""
from vfgithook import pylint_check
from . import util
# pylint: disable=protected-access
def test_is_python_file(gitrepo):
"""Test pylint_check.is_python_file"""
# Extension
file_a = util.write_file(gitrepo, 'a.py', '')
assert pylint_check._is_python_file(file_a)
# Empty
file_b = util.write_file(gitrepo, 'b', '')
assert not pylint_check._is_python_file(file_b)
# Shebang
file_c = util.write_file(gitrepo, 'b', '#!/usr/bin/env python')
assert pylint_check._is_python_file(file_c)
| [
37811,
14402,
2385,
578,
329,
410,
40616,
342,
566,
13,
79,
2645,
600,
62,
9122,
37811,
198,
198,
6738,
410,
40616,
342,
566,
1330,
279,
2645,
600,
62,
9122,
198,
198,
6738,
764,
1330,
7736,
628,
198,
2,
279,
2645,
600,
25,
15560,
... | 2.358025 | 243 |
from django_p.tasks import Pipe
| [
6738,
42625,
14208,
62,
79,
13,
83,
6791,
1330,
36039,
198
] | 2.909091 | 11 |
from xlab.data.calc.interface import RecursiveInputs
from xlab.data.calc.interface import SourceInputs
from xlab.data.calc.interface import CalcInputs
from xlab.data.calc.interface import CalcTimeSpecs
from xlab.data.calc.interface import CalcProducer
from xlab.data.calc.interface import CalcProducerFactory
| [
6738,
2124,
23912,
13,
7890,
13,
9948,
66,
13,
39994,
1330,
3311,
30753,
20560,
82,
198,
6738,
2124,
23912,
13,
7890,
13,
9948,
66,
13,
39994,
1330,
8090,
20560,
82,
198,
6738,
2124,
23912,
13,
7890,
13,
9948,
66,
13,
39994,
1330,
2... | 3.185567 | 97 |
from django import forms
from .models import *
| [
6738,
42625,
14208,
1330,
5107,
201,
198,
6738,
764,
27530,
1330,
1635,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
201
] | 2.4 | 25 |
# Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
# vim: ts=4: sts=4: sw=4: expandtab
| [
2,
15069,
2864,
34590,
13670,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
198,
2,
5964,
326,
460,
307,
1043,
287,
262,
38559,
24290,
2393,
198,
11748,
9195,
628,
198,
2,
43907,
25,
40379,
... | 3.087719 | 57 |
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc # graphs etc
import dash_html_components as html # tags etc
app = dash.Dash() # dash can combine wth flask
app.layout = html.Div(children=[
dcc.Input(id = "Input", value = "Enter Something", type = "text"),
html.Div(id = "Output")
])
@app.callback(
Output(component_id="Output", component_property = "children"),
[Input(component_id="Input", component_property = "value")]
)
if __name__ == "__main__":
app.run_server(debug=True) | [
11748,
14470,
201,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
201,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
1303,
28770,
3503,
201,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
1303,
15940,... | 2.859375 | 192 |
from cachetclient.client import Client # noqa
___version__ = '3.0.0'
| [
6738,
40428,
3202,
16366,
13,
16366,
1330,
20985,
220,
1303,
645,
20402,
198,
198,
17569,
9641,
834,
796,
705,
18,
13,
15,
13,
15,
6,
198
] | 2.730769 | 26 |
'''
Main processor for v2 of the collocation between CALIOP and Himawari-8.
'''
import os
import sys
import traceback
from datetime import datetime
from pyhdf.SD import SD, SDC
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--list_of_files",
nargs="?",
type=str,
help="name of .txt file listing all the files to be downloaded"
)
parser.add_argument(
"-f",
"--filename",
nargs="?",
type=str,
help="name of file to be downloaded"
)
parser.add_argument(
"-d",
"--target_directory",
nargs="?",
default=os.getcwd(),
type=str,
help="full path to the directory where the files will be stored"
)
args = parser.parse_args()
if args.list_of_files is not None:
main(args.list_of_files, args.target_directory)
elif args.filename is not None:
full_collocation(args.filename, args.target_directory)
else:
raise Exception('Need to provide a filename or a text file containing a list of filenames')
| [
7061,
6,
198,
13383,
12649,
329,
410,
17,
286,
262,
2927,
5040,
1022,
33290,
40,
3185,
290,
10978,
707,
2743,
12,
23,
13,
198,
7061,
6,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
4818,
8079,
1330,
... | 2.34413 | 494 |
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.audio_mix_channel_type import AudioMixChannelType
import pprint
import six
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
2237,
1330,
4731,
62,
19199,
11,
11629,
23814,
198,
6738,
1643,
76,
709,
259,
62,
15042,
62,
21282,
74,
13,
11321,
13,
1930,
9122,
1330,
1426,
9122,
... | 3.088608 | 79 |
'''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
| [
7061,
6,
198,
198,
34500,
1358,
6208,
1665,
446,
593,
1339,
198,
198,
31,
9800,
25,
921,
48361,
198,
7061,
6,
198,
198,
11748,
1976,
25558,
8019,
13,
26791,
13,
23289,
355,
32639,
198,
11748,
1976,
25558,
8019,
13,
26791,
13,
4023,
... | 3.236641 | 131 |
from setuptools import setup
import os
import re
VERSION_REGEX = re.compile("__version__ = \"(.*?)\"")
CONTENTS = readfile(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"ringdown",
"__init__.py"
)
)
VERSION = VERSION_REGEX.findall(CONTENTS)[0]
setup(
name="ringdown",
author="Matthew Pitkin",
author_email="matthew.pitkin@ligo.org",
url="https://github.com/mattpitkin/ringdown",
version=VERSION,
packages=["ringdown"],
install_requires=readfile(
os.path.join(os.path.dirname(__file__), "requirements.txt")
),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
11748,
28686,
198,
11748,
302,
628,
198,
198,
43717,
62,
31553,
6369,
796,
302,
13,
5589,
576,
7203,
834,
9641,
834,
796,
3467,
18109,
15885,
10091,
7879,
4943,
198,
37815,
15365,
796,
1100,
775... | 2.344214 | 337 |
import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models, transforms
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for ResNet50 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
model = models.resnet50(pretrained=True).to(args.device)
grad_cam = GradCam(model=model, feature_module=model.layer4)
img = cv2.imread(args.image_path, 1)
img = np.float32(img) / 255
# Opencv loads as BGR:
img = img[:, :, ::-1]
input_img = preprocess_image(img).to(args.device)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested category.
target_category = None
grayscale_cam = grad_cam(input_img, target_category)
grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
cam = show_cam_on_image(img, grayscale_cam)
gb_model = GuidedBackpropReLUModel(model=model)
gb = gb_model(input_img, target_category=target_category)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite("grad_cam.jpg", cam)
cv2.imwrite('gb.jpg', gb)
cv2.imwrite('grad_cam_gb.jpg', cam_gb)
| [
11748,
1822,
29572,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
15553,
198,
6738,
28034,
10178,
1330,
4981,
11,
31408,
628,
628,
628,
628,
198,
198,
... | 2.477551 | 735 |
raio = int(input())
pi = 3.14159
volume = float(4.0 * pi * (raio* raio * raio) / 3)
print("VOLUME = %0.3f" %volume)
| [
430,
952,
796,
493,
7,
15414,
28955,
198,
14415,
796,
513,
13,
1415,
19707,
198,
29048,
796,
12178,
7,
19,
13,
15,
1635,
31028,
1635,
357,
430,
952,
9,
2179,
952,
1635,
2179,
952,
8,
1220,
513,
8,
198,
4798,
7203,
44558,
38340,
79... | 2.148148 | 54 |
import unittest
import matplotlib
import random
import time
import matplotlib.pyplot as plt
from timsort import Timsort
#test time sorting an array of n elements
#Checking the sorting of arrays in which there are less than 64 elements
#array sorting test greater than 64
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
201,
198,
11748,
2603,
29487,
8019,
201,
198,
11748,
4738,
201,
198,
11748,
640,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
4628,
30619,
1330,
5045,
30619,
201,
198,
201,
1... | 2.64539 | 141 |
import pandas as pd
import pytest
from pandas_profiling import ProfileReport
# Generating dummy data
dummy_bool_data = generate_cat_data_series(pd.Series({True: 82, False: 36}))
dummy_cat_data = generate_cat_data_series(
pd.Series(
{
"Amadeou_plus": 75,
"Beta_front": 50,
"Calciumus": 20,
"Dimitrius": 1,
"esperagus_anonymoliumus": 75,
"FrigaTTTBrigde_Writap": 50,
"galgarartiy": 30,
"He": 1,
"I": 10,
"JimISGODDOT": 1,
}
)
)
# Unit tests
# - Test category frequency plots general options
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
# - Test category frequency plots color options
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
# - Test exceptions
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
198,
6738,
19798,
292,
62,
5577,
4386,
1330,
13118,
19100,
628,
198,
2,
2980,
803,
31548,
1366,
628,
198,
67,
13513,
62,
30388,
62,
7890,
796,
7716,
62,
9246,
62,
7890,
... | 2.17193 | 570 |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1SubjectAccessReviewStatusDict generated type."""
from typing import TypedDict
V1SubjectAccessReviewStatusDict = TypedDict(
"V1SubjectAccessReviewStatusDict",
{
"allowed": bool,
"denied": bool,
"evaluationError": str,
"reason": str,
},
total=False,
)
| [
2,
6127,
7560,
416,
4600,
28004,
6048,
713,
5235,
44646,
8410,
5626,
48483,
13,
198,
37811,
53,
16,
19776,
15457,
14832,
19580,
35,
713,
7560,
2099,
526,
15931,
198,
6738,
19720,
1330,
17134,
276,
35,
713,
198,
198,
53,
16,
19776,
154... | 2.485915 | 142 |
from ROOT_AND_MAIN.widgets import Root_and_main
import ROOT_AND_MAIN.USER_WINDOW.setup as user_window
import ROOT_AND_MAIN.SCHEDULE_WINDOW.setup as schedule_window
import ROOT_AND_MAIN.SUBJECT_WINDOW.setup as subject_window | [
6738,
15107,
2394,
62,
6981,
62,
5673,
1268,
13,
28029,
11407,
1330,
20410,
62,
392,
62,
12417,
198,
198,
11748,
15107,
2394,
62,
6981,
62,
5673,
1268,
13,
29904,
62,
28929,
3913,
13,
40406,
355,
2836,
62,
17497,
198,
11748,
15107,
23... | 2.698795 | 83 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
"""
Copyright 2011 Dmitry Nikulin
This file is part of Captchure.
Captchure is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Captchure is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Captchure. If not, see <http://www.gnu.org/licenses/>.
"""
import cv
from pyfann import libfann
from cvext import copyTo
from general import argmax
| [
37811,
198,
220,
220,
220,
15069,
2813,
45181,
11271,
11599,
628,
220,
220,
220,
770,
2393,
318,
636,
286,
6790,
354,
495,
13,
628,
220,
220,
220,
6790,
354,
495,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,... | 3.455696 | 237 |
import canopen
network = canopen.Network()
network.connect(channel='can0', bustype='socketcan')
node = network.add_node(6, '') | [
11748,
460,
9654,
198,
198,
27349,
796,
460,
9654,
13,
26245,
3419,
198,
198,
27349,
13,
8443,
7,
17620,
11639,
5171,
15,
3256,
13076,
2981,
11639,
44971,
5171,
11537,
198,
17440,
796,
3127,
13,
2860,
62,
17440,
7,
21,
11,
10148,
8
] | 3.047619 | 42 |
#%%
import numpy as np
import sys
import pylab as plt
sys.path.append('../')
from fiber_nlse.fiber_nlse import *
# Physical units & constants
nm = 1e-9
ps = 1e-12
km = 1e3
mW = 1e-3
GHz = 1e9
Thz = 1e12
m = 1
W = 1
c = 3e8
# Simulation metrics
N_t = 2000
N_z = 1000
# Physical parameters
# Source
T = 500*ps
λ = 1550 * nm
P0 = 490 * mW
f0 = 10 * GHz
# Fiber
α = 0.046 / km
γ = 10.1 / W / km
γ2 = 1.1 / W / km
L2 = 5000 * m
L = 0 * m
D = -0.8 * ps / nm /km
D2 = - 20 * ps / nm / km
β2 = - D*λ**2/(2*np.pi*c) # dispersion
β2_2 = - D2*λ**2/(2*np.pi*c) # dispersion
τ0 = 10*ps # pulse FWHM
fib = Fiber(L, α, β2, γ) # create fiber
sim = SegmentSimulation(fib, N_z, N_t, direct_modulation, T) # simulate on the fiber portion
t, U = sim.run() # perform simulation
Pmatrix = np.abs(U)**2
fib2 = Fiber(L2, α, β2_2, γ2)
sim2 = SegmentSimulation(fib2, N_z, N_t, lambda x : U[-1,:], T) # simulate on the fiber portion
t, U2 = sim2.run() # perform simulation
Pmatrix = np.abs(np.vstack((U, U2)))**2/mW # compute optical power matrix
#%%
plt.figure()
plt.title(r'Pulse progagation with dipsersion')
plt.imshow(Pmatrix, aspect='auto', extent=[-T/2/ps, T/2/ps, L/km, 0])
plt.tight_layout()
plt.xlabel(r'Local time [ns]')
plt.ylabel(r'Distance [km]')
cb = plt.colorbar()
cb.set_label(r'Optical power [mW]')
plt.show()
# %%
plt.figure()
plt.title(r'Pulse propagation with dispersion')
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[0,:])))), label=r'Pulse at z={:.2f} km'.format(0))
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[-1,:])))), label=r'Pulse at z={:.2f} km'.format(L/km))
plt.grid()
plt.legend()
plt.ylabel(r'Optical phase [rad]')
plt.xlabel(r'Local time [ns]')
plt.tight_layout()
plt.show()
# %%
plt.plot(Pmatrix[-1,:])
plt.plot(Pmatrix[0,:])
plt.show()
# %%
| [
2,
16626,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
279,
2645,
397,
355,
458,
83,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
11537,
198,
198,
6738,
13608,
62,
21283,
325,
13,
69,
1856,
62,
21283,
325,... | 2.094077 | 861 |
data = [
("orange", "a sweet, orange, citrus fruit"),
("apple", "good for making cider"),
("lemon", "a sour, yellow citrus fruit"),
("grape", "a small, sweet fruit growing in bunches"),
("melon", "sweet and juicy"),
]
# Convert to ASCII chars
# print(ord("a"))
# print(ord("b"))
# print(ord("z"))
def simple_hash(s: str) -> int:
"""A ridiculously simple hashing function"""
basic_hash = ord(s[0])
return basic_hash % 10
def get(k: str) -> int:
"""
return value of the kry
:param k: the key
:return: `int if found else None`
"""
hash_code = simple_hash(k)
if values[hash_code]:
return values[hash_code]
else:
return None
for key, value in data:
h = simple_hash(key)
# h = hash(key)
print(key, h)
keys = [""] * 10
values = keys.copy()
for key, value in data:
h = simple_hash(key)
print(key, h)
# add in hash keys
keys[h] = key
values[h] = value
print(keys)
print(values)
print()
print(get('lemon'))
| [
7890,
796,
685,
198,
220,
220,
220,
5855,
43745,
1600,
366,
64,
6029,
11,
10912,
11,
35405,
8234,
12340,
198,
220,
220,
220,
5855,
18040,
1600,
366,
11274,
329,
1642,
36930,
12340,
198,
220,
220,
220,
5855,
293,
2144,
1600,
366,
64,
... | 2.462651 | 415 |
import numpy as np
import pandas as pd
import os
import datetime
homedir = os.path.expanduser('~')
datadir = 'github/RIPS_kircheis/data/eia_form_714/processed/'
fulldir = homedir + '/' + datadir
# li = []
# for d1 in os.listdir('.'):
# for fn in os.listdir('./%s' % d1):
# li.append(fn)
# dir_u = pd.Series(li).str[:-2].order().unique()
###### NPCC
# BECO: 54913 <- 1998
# BHE: 1179
# CELC: 1523 <- 2886
# CHGE: 3249
# CMP: 3266
# COED: 4226
# COEL: 4089 -> IGNORE
# CVPS: 3292
# EUA: 5618
# GMP: 7601
# ISONY: 13501
# LILC: 11171 <- 11172
# MMWE: 11806
# NEES: 13433
# NEPOOL: 13435
# NMPC: 13573
# NU: 13556
# NYPA: 15296
# NYPP: 13501
# NYS: 13511
# OR: 14154
# RGE: 16183
# UI: 19497
npcc = {
54913 : {
1993 : pd.read_fwf('%s/npcc/1993/BECO93' % (fulldir), header=None, skipfooter=1).loc[:, 2:].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BECO94' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1995 : pd.read_csv('%s/npcc/1995/BECO95' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1996 : pd.read_csv('%s/npcc/1996/BECO96' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1997 : pd.read_csv('%s/npcc/1997/BECO97' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1998 : pd.read_csv('%s/npcc/1998/BECO98' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/BECO99' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/BECO00' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/BECO01' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/BECO02' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/BECO03' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/BECO04' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
1179 : {
1993 : pd.read_csv('%s/npcc/1993/BHE93' % (fulldir), sep=' ', skiprows=2, skipinitialspace=True).loc[:, '0000':].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BHE94' % (fulldir)).dropna(how='all').loc[:729, '1/13':'12/24'].values.ravel(),
1995 : (pd.read_fwf('%s/npcc/1995/BHE95' % (fulldir)).loc[:729, '1/13':'1224'].astype(float)/10).values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/BHE01' % (fulldir), skiprows=2).iloc[:, 1:24].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/BHE03' % (fulldir), skiprows=3).iloc[:, 1:24].values.ravel()
},
1523 : {
1999 : pd.read_csv('%s/npcc/1999/CELC99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2000 : pd.read_csv('%s/npcc/2000/CELC00' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2001 : pd.read_csv('%s/npcc/2001/CELC01' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2002 : pd.read_csv('%s/npcc/2002/CELC02' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2003 : pd.read_csv('%s/npcc/2003/CELC03' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2004 : pd.read_csv('%s/npcc/2004/CELC04' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values
},
3249 : {
1993 : pd.read_csv('%s/npcc/1993/CHGE93' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[2].values,
1994 : pd.read_fwf('%s/npcc/1994/CHGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CHGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CHGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/CHGE97' % (fulldir), sep ='\s', skipinitialspace=True, header=None, skipfooter=1).iloc[:, 4:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/CHGE98' % (fulldir), skipfooter=1, header=None).iloc[:, 2:].values.ravel(),
},
3266 : {
1993 : pd.read_fwf('%s/npcc/1993/CMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/CMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CMP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/CMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/CMP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/CMP02' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/CMP03' % (fulldir), header=None).iloc[:, 1:].values.ravel()
},
4226 : {
1993 : pd.read_csv('%s/npcc/1993/COED93' % (fulldir), skipfooter=1, skiprows=11, header=None, skipinitialspace=True, sep=' ')[2].values,
1994 : pd.read_fwf('%s/npcc/1994/COED94' % (fulldir), skipfooter=1, header=None)[1].values,
1995 : pd.read_csv('%s/npcc/1995/COED95' % (fulldir), skiprows=3, header=None),
1996 : pd.read_excel('%s/npcc/1996/COED96' % (fulldir)).iloc[:, -1].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/COED97' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/COED98' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/COED99' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].str.replace(',', '').astype(int).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/COED00' % (fulldir), sep='\t')[' Load '].dropna().str.replace(',', '').astype(int).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/COED01' % (fulldir), sep='\t', skipfooter=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/COED02' % (fulldir), sep='\t', skipfooter=1, skiprows=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/COED03' % (fulldir), sep='\t')['Load'].dropna().astype(int).values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/COED04' % (fulldir), header=None).iloc[:, -1].str.replace('[A-Z,]', '').str.replace('\s', '0').astype(int).values.ravel()
},
4089 : {
1993 : pd.read_fwf('%s/npcc/1993/COEL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/COEL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/COEL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[3].values,
1997 : pd.read_csv('%s/npcc/1997/COEL97' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1998 : pd.read_csv('%s/npcc/1998/COEL98' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/COEL99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/COEL00' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/COEL01' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/COEL02' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/COEL03' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/COEL04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
3292 : {
1995 : pd.read_fwf('%s/npcc/1995/CVPS95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/CVPS96' % (fulldir), header=None, skipfooter=1)[1].values,
1997 : pd.read_csv('%s/npcc/1997/CVPS97' % (fulldir), header=None)[2].values,
1998 : pd.read_csv('%s/npcc/1998/CVPS98' % (fulldir), header=None, skipfooter=1)[4].values,
1999 : pd.read_csv('%s/npcc/1999/CVPS99' % (fulldir))['Load'].values
},
5618 : {
1993 : pd.read_fwf('%s/npcc/1993/EUA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/EUA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/EUA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/EUA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/EUA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/EUA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
7601 : {
1993 : pd.read_csv('%s/npcc/1993/GMP93' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4)[0].replace('MWH', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/GMP94' % (fulldir), header=None)[0].values,
1995 : pd.read_csv('%s/npcc/1995/GMP95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[0].values,
1996 : pd.read_csv('%s/npcc/1996/GMP96' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1997 : pd.read_csv('%s/npcc/1997/GMP97' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1998 : pd.read_csv('%s/npcc/1998/GMP98' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].astype(str).str[:3].astype(float).values,
1999 : pd.read_csv('%s/npcc/1999/GMP99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skipfooter=1).iloc[:8760, 0].values,
2002 : pd.read_excel('%s/npcc/2002/GMP02' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2003 : pd.read_excel('%s/npcc/2003/GMP03' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2004 : pd.read_csv('%s/npcc/2004/GMP04' % (fulldir), skiprows=13, sep='\s').iloc[:, 0].values
},
13501 : {
2002 : pd.read_csv('%s/npcc/2002/ISONY02' % (fulldir), sep='\t')['mw'].values,
2003 : pd.read_excel('%s/npcc/2003/ISONY03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/npcc/2004/ISONY04' % (fulldir)).loc[:, 'HR1':].values.ravel()
},
11171 : {
1994 : pd.read_fwf('%s/npcc/1994/LILC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/LILC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/LILC97' % (fulldir), skiprows=4, widths=[8,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
},
11806 : {
1998 : pd.read_fwf('%s/npcc/1998/MMWE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/MMWE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/npcc/2000/MMWE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/MMWE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/MMWE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/MMWE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2004 : pd.read_fwf('%s/npcc/2004/MMWE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel()
},
13433 : {
1993 : pd.read_fwf('%s/npcc/1993/NEES93' % (fulldir), widths=(8,7), header=None, skipfooter=1)[1].values,
1994 : pd.read_csv('%s/npcc/1994/NEES94' % (fulldir), header=None, skipfooter=1, sep=' ', skipinitialspace=True)[3].values
},
13435 : {
1993 : pd.read_fwf('%s/npcc/1993/NEPOOL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/NEPOOL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NEPOOL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=3).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NEPOOL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1997 : pd.read_fwf('%s/npcc/1997/NEPOOL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NEPOOL98' % (fulldir), header=None).iloc[:, 5:17].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/NEPOOL99' % (fulldir), engine='python', skiprows=1).iloc[:, 0].values,
2000 : pd.read_fwf('%s/npcc/2000/NEPOOL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/NEPOOL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NEPOOL02' % (fulldir), sep='\t').iloc[:, 3:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/NEPOOL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/NEPOOL04' % (fulldir), sep='\t', header=None, skiprows=10).iloc[:, 5:].values.ravel()
},
13573 : {
1993 : pd.read_csv('%s/npcc/1993/NMPC93' % (fulldir), skiprows=11, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:27].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NMPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/NMPC96' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
1998 : pd.read_fwf('%s/npcc/1998/NMPC98' % (fulldir), header=None).iloc[:, 2:].astype(int).values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/NMPC99' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
2000 : pd.read_excel('%s/npcc/2000/NMPC00' % (fulldir), sheetname=1, skiprows=10, skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/NMPC02' % (fulldir), sheetname=1, skiprows=2, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.concat([pd.read_excel('%s/npcc/2003/NMPC03' % (fulldir), sheetname=i, skiprows=1, header=None) for i in range(1,13)]).iloc[:, 2:].astype(str).apply(lambda x: x.str[:4]).astype(float).values.ravel()
},
13556 : {
1993 : pd.read_fwf('%s/npcc/1993/NU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_excel('%s/npcc/1994/NU94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_excel('%s/npcc/1995/NU95' % (fulldir), header=None, skipfooter=5).dropna(how='any').iloc[:, 3:].values.ravel(),
1996 : pd.read_excel('%s/npcc/1996/NU96' % (fulldir), header=None, skipfooter=1).iloc[:, 5:].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/NU97' % (fulldir), header=None, skipfooter=4).iloc[:, 5:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NU98' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NU99' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NU00' % (fulldir), sep='\t', header=None).iloc[:, 5:].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/NU01' % (fulldir)).iloc[:, -1].values,
2002 : pd.read_excel('%s/npcc/2002/NU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_excel('%s/npcc/2003/NU03' % (fulldir), skipfooter=1).iloc[:, -1].values
},
15296 : {
1993 : pd.read_csv('%s/npcc/1993/NYPA93' % (fulldir), engine='python', header=None).values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/NYPA94' % (fulldir), engine='python', header=None).values.ravel(),
1995 : pd.read_csv('%s/npcc/1995/NYPA95' % (fulldir), engine='python', header=None).values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NYPA96' % (fulldir), engine='python', header=None).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/NYPA97' % (fulldir), engine='python', header=None).values.ravel(),
1998 : pd.read_csv('%s/npcc/1998/NYPA98' % (fulldir), engine='python', header=None).values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYPA99' % (fulldir), header=None).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYPA00' % (fulldir), engine='python', header=None).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/NYPA01' % (fulldir), engine='python', header=None).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NYPA02' % (fulldir), engine='python', header=None).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/NYPA03' % (fulldir), engine='python', header=None).values.ravel()
},
13501 : {
1993 : pd.read_fwf('%s/npcc/1993/NYPP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13511 : {
1996 : pd.read_fwf('%s/npcc/1996/NYS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/NYS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYS99' % (fulldir)).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYS00' % (fulldir), sep='\t').iloc[:, -1].values,
2001 : pd.read_csv('%s/npcc/2001/NYS01' % (fulldir), sep='\t', skiprows=3).dropna(how='all').iloc[:, -1].values,
2002 : pd.read_csv('%s/npcc/2002/NYS02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3).iloc[:, 2].values,
2003 : pd.read_csv('%s/npcc/2003/NYS03' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/NYS04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).dropna(how='all').iloc[:, -1].values
},
14154 : {
1993 : pd.read_csv('%s/npcc/1993/OR93' % (fulldir), skiprows=5, header=None).iloc[:, 2:26].values.ravel(),
1995 : (pd.read_csv('%s/npcc/1995/OR95' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1996 : (pd.read_csv('%s/npcc/1996/OR96' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1997 : (pd.read_csv('%s/npcc/1997/OR97' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1998 : pd.read_fwf('%s/npcc/1998/OR98' % (fulldir), skiprows=1, header=None).dropna(axis=1, how='all').iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/OR99' % (fulldir), sep='\t', skiprows=1, header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/OR00' % (fulldir), sep='\t').iloc[:, -1].values.astype(int).ravel(),
2002 : pd.read_csv('%s/npcc/2002/OR02' % (fulldir), sep='\t', skiprows=2).iloc[:, -1].dropna().values.astype(int).ravel(),
2003 : pd.read_csv('%s/npcc/2003/OR03' % (fulldir), sep='\t').iloc[:, -1].dropna().values.astype(int).ravel(),
2004 : pd.read_csv('%s/npcc/2004/OR04' % (fulldir), header=None).iloc[:, -1].values.astype(int).ravel()
},
16183 : {
1994 : pd.read_fwf('%s/npcc/1994/RGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/RGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/RGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/RGE02' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2003 : pd.read_csv('%s/npcc/2003/RGE03' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/RGE04' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values
},
19497 : {
1993 : pd.read_fwf('%s/npcc/1993/UI93' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1994 : pd.read_fwf('%s/npcc/1994/UI94' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1995 : pd.read_fwf('%s/npcc/1995/UI95' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1996 : pd.read_fwf('%s/npcc/1996/UI96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1997 : pd.read_fwf('%s/npcc/1997/UI97' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1998 : pd.read_excel('%s/npcc/1998/UI98' % (fulldir))['MW'].values,
1999 : pd.read_excel('%s/npcc/1999/UI99' % (fulldir)).loc[:, 'HR1':'HR24'].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/UI01' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/UI02' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/UI03' % (fulldir), sheetname=0, skipfooter=2).ix[:, 'HR1':'HR24'].values.ravel(),
2004 : pd.read_excel('%s/npcc/2004/UI04' % (fulldir), sheetname=0, skipfooter=1).ix[:, 'HR1':'HR24'].values.ravel()
}
}
npcc[4226][1995] = pd.concat([npcc[4226][1995][2].dropna(), npcc[4226][1995][6]]).values.ravel()
npcc[3249][1994][npcc[3249][1994] > 5000] = 0
npcc[3249][1996][npcc[3249][1996] > 5000] = 0
npcc[15296][2000][npcc[15296][2000] > 5000] = 0
npcc[15296][2001][npcc[15296][2001] > 5000] = 0
npcc[4089][1998] = np.repeat(np.nan, len(npcc[4089][1998]))
npcc[13511][1996][npcc[13511][1996] < 500] = 0
npcc[13511][1997][npcc[13511][1997] < 500] = 0
npcc[13511][1999][npcc[13511][1999] < 500] = 0
npcc[13511][2000][npcc[13511][2000] < 500] = 0
npcc[14154][2002][npcc[14154][2002] > 2000] = 0
if not os.path.exists('./npcc'):
os.mkdir('npcc')
for k in npcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(npcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(npcc[k][i]))) for i in npcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].replace('.', '0').astype(float).replace(0, np.nan)
s.to_csv('./npcc/%s.csv' % k)
###### ERCOT
# AUST: 1015
# CPL: 3278
# HLP: 8901
# LCRA: 11269
# NTEC: 13670
# PUB: 2409
# SRGT: 40233
# STEC: 17583
# TUEC: 44372
# TMPP: 18715
# TXLA: 18679
# WTU: 20404
ercot = {
1015 : {
1993 : pd.read_fwf('%s/ercot/1993/AUST93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/AUST94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/AUST95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/AUST96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/AUST97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['AENX'].loc[2:].astype(float)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['AENX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[3].str.replace(',', '').astype(float)/1000).values
},
3278 : {
1993 : pd.read_fwf('%s/ercot/1993/CPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/CPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/CPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/CPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['CPLC'].loc[2:].astype(int)/1000).values
},
8901 : {
1993 : pd.read_fwf('%s/ercot/1993/HLP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/HLP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/HLP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/HLP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/HLP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['HLPC'].loc[2:].astype(int)/1000).values
},
11269: {
1993 : pd.read_fwf('%s/ercot/1993/LCRA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/ercot/1994/LCRA94' % (fulldir), skiprows=4).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/LCRA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/LCRA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/LCR97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['LCRA'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['LCRA'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[6].str.replace(',', '').astype(float)/1000).values
},
13670 : {
1993 : pd.read_csv('%s/ercot/1993/NTEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_fwf('%s/ercot/1994/NTEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/NTEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/NTEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/NTEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/ercot/2001/NTEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
2409 : {
1993 : pd.read_fwf('%s/ercot/1993/PUB93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/PUB94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/PUB95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/PUB96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/PUB97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['PUBX'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['PUBX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[7].str.replace(',', '').astype(float)/1000).values
},
40233 : {
1993 : pd.read_csv('%s/ercot/1993/SRGT93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1994 : pd.read_fwf('%s/ercot/1994/SRGT94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/SRGT95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/SRGT96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/SRGT97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
17583 : {
1993 : pd.read_fwf('%s/ercot/1993/STEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['STEC'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['STEC'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[9].str.replace(',', '').astype(float)/1000).values
},
44372 : {
1993 : pd.read_fwf('%s/ercot/1993/TUEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/TUEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TUEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TUE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TUE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TUEC'].loc[2:].astype(int)/1000).values
},
18715 : {
1993 : pd.read_csv('%s/ercot/1993/TMPP93' % (fulldir), skiprows=7, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TMPP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TMPP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['TMPP'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[10].str.replace(',', '').astype(float)/1000).values
},
18679 : {
1993 : pd.read_csv('%s/ercot/1993/TEXLA93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/TXLA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TXLA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TXLA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TXLA'].loc[2:].astype(int)/1000).values
},
20404 : {
1993 : pd.read_fwf('%s/ercot/1993/WTU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('\s', '0')).astype(float).values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/WTU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/WTU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/WTU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['WTUC'].loc[2:].astype(int)/1000).values
}
}
ercot[2409][1998][ercot[2409][1998] > 300] = 0
ercot[2409][1999][ercot[2409][1999] > 300] = 0
if not os.path.exists('./ercot'):
os.mkdir('ercot')
for k in ercot.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ercot[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ercot[k][i]))) for i in ercot[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ercot/%s.csv' % k)
###### FRCC
# GAIN: 6909
# LAKE: 10623
# FMPA: 6567
# FPC: 6455
# FPL: 6452
# JEA: 9617
# KUA: 10376
# OUC: 14610
# TECO: 18454
# SECI: 21554
frcc = {
6909 : {
1993 : pd.read_fwf('%s/frcc/1993/GAIN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/frcc/1994/GAIN94' % (fulldir), header=None, sep=' ', skipinitialspace=True, skipfooter=2, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/GAIN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/frcc/1996/GAIN96' % (fulldir), sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/GAIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/GAIN98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/GAIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/GAIN00' % (fulldir), header=None).iloc[:, 4:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/GAIN02' % (fulldir), sheetname=1, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2003 : pd.read_excel('%s/frcc/2003/GAIN03' % (fulldir), sheetname=2, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/GAIN04' % (fulldir), sheetname=0, header=None).iloc[:, 8:].values.ravel()
},
10623: {
1993 : pd.read_fwf('%s/frcc/1993/LAKE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/LAKE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/LAKE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/LAKE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/LAKE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/LAKE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/LAKE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/LAKE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/LAKE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/LAKE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
6567 : {
1993 : pd.read_fwf('%s/frcc/1993/FMPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/FMPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/FMPA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/FMPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/FMPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/FMPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/FMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/FMPA01' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6).iloc[:, 2:-1].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/FMPA02' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/FMPA03' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/frcc/2004/FMPA04' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6, skipfooter=1).iloc[:, 1:].values.ravel()
},
6455 : {
1993 : pd.read_csv('%s/frcc/1993/FPC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_csv('%s/frcc/1994/FPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/FPC95' % (fulldir), engine='python', header=None)[0].values,
1996 : pd.read_excel('%s/frcc/1996/FPC96' % (fulldir), header=None, skiprows=2, skipfooter=1).iloc[:, 6:].values.ravel(),
1998 : pd.read_excel('%s/frcc/1998/FPC98' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
1999 : pd.read_excel('%s/frcc/1999/FPC99' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2000 : pd.read_excel('%s/frcc/2000/FPC00' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2001 : pd.read_excel('%s/frcc/2001/FPC01' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/FPC02' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/FPC04' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel()
},
6452 : {
1993 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1993/FPL93' % (fulldir), 'r').readlines()]).iloc[:365, :24].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1994 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1994/FPL94' % (fulldir), 'r').readlines()]).iloc[3:, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1995 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1995/FPL95' % (fulldir), 'r').readlines()[3:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1996 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1996/FPL96' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1997/FPL97' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1998 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1998/FPL98' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1999 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1999/FPL99' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2000 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2000/FPL00' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2001 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2001/FPL01' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2002 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2002/FPL02' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2003 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2003/FPL03' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2004 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2004/FPL04' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel()
},
9617 : {
1993 : pd.read_csv('%s/frcc/1993/JEA93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1994 : pd.read_csv('%s/frcc/1994/JEA94' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1996 : pd.read_fwf('%s/frcc/1996/JEA96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/JEA97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/JEA98' % (fulldir), sep='\t', header=None)[2].values,
1999 : pd.read_csv('%s/frcc/1999/JEA99' % (fulldir), sep='\t', header=None)[2].values,
2000 : pd.read_excel('%s/frcc/2000/JEA00' % (fulldir), header=None)[2].values,
2001 : pd.read_excel('%s/frcc/2001/JEA01' % (fulldir), header=None, skiprows=2)[2].values,
2002 : pd.read_excel('%s/frcc/2002/JEA02' % (fulldir), header=None, skiprows=1)[2].values,
2003 : pd.read_excel('%s/frcc/2003/JEA03' % (fulldir), header=None, skiprows=1)[2].values,
2004 : pd.read_excel('%s/frcc/2004/JEA04' % (fulldir), header=None, skiprows=1)[2].values
},
10376 : {
1994 : pd.read_csv('%s/frcc/1994/KUA94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/KUA95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/frcc/1997/KUA97' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/KUA01' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/KUA02' % (fulldir), skipfooter=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel()
},
14610 : {
1993 : pd.read_fwf('%s/frcc/1993/OUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/OUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/OUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/OUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/OUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/OUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/OUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/OUC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/OUC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/OUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
18454 : {
1993 : pd.read_fwf('%s/frcc/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/TECO98' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
1999 : pd.read_csv('%s/frcc/1999/TECO99' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
2000 : pd.read_csv('%s/frcc/2000/TECO00' % (fulldir), engine='python', skiprows=3, header=None)[0].str[:4].astype(int).values,
2001 : pd.read_csv('%s/frcc/2001/TECO01' % (fulldir), skiprows=3, header=None)[0].values,
2002 : pd.read_csv('%s/frcc/2002/TECO02' % (fulldir), sep='\t').loc[:, 'HR1':].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/TECO03' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True).iloc[:, 2:].values.ravel()
},
21554 : {
1993 : pd.read_fwf('%s/frcc/1993/SECI93' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/SECI94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/SECI95' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/SECI96' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/SECI97' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/SECI99' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/SECI00' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/SECI02' % (fulldir), header=None).iloc[:, 3:].values.ravel(),
2004 : pd.read_fwf('%s/frcc/2004/SECI04' % (fulldir), header=None).iloc[:, 3:].values.ravel()
}
}
frcc[6455][1995][frcc[6455][1995] > 10000] = 0
frcc[9617][2002][frcc[9617][2002] > 10000] = 0
frcc[10376][1995][frcc[10376][1995] > 300] = 0
if not os.path.exists('./frcc'):
os.mkdir('frcc')
for k in frcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(frcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(frcc[k][i]))) for i in frcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./frcc/%s.csv' % k)
###### ECAR
# AEP: 829
# APS: 538
# AMPO: 40577
# BREC: 1692
# BPI: 7004
# CEI: 3755
# CGE: 3542
# CP: 4254
# DPL: 4922
# DECO: 5109
# DLCO: 5487
# EKPC: 5580
# HEC: 9267
# IPL: 9273
# KUC: 10171
# LGE: 11249
# NIPS: 13756
# OE: 13998
# OVEC: 14015
# PSI: 15470
# SIGE: 17633
# TE: 18997
# WVPA: 40211
# CINRGY: 3260 -> Now part of 3542
# FE: 32208
# MCCP:
ecar = {
829 : {
1993 : pd.read_fwf('%s/ecar/1993/AEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/AEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/AEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/AEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/AEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/AEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/AEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/AEP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/AEP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AEP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AEP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AEP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
538 : {
1993 : pd.read_fwf('%s/ecar/1993/APS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/APS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/APS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40577 : {
2001 : pd.read_fwf('%s/ecar/2001/AMPO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AMPO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AMPO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AMPO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
1692 : {
1993 : pd.read_fwf('%s/ecar/1993/BREC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/BREC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/BREC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/BREC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/BREC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/BREC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BREC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BREC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BREC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BREC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BREC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BREC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
7004 : {
1994 : pd.read_fwf('%s/ecar/1994/BPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BPI00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BPI01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BPI02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BPI04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3755 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CEI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3542 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CIN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/CIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/CIN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/CIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/CIN00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/CIN01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/CIN02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/CIN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/CIN04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4254 : {
1993 : pd.read_fwf('%s/ecar/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4922 : {
1993 : pd.read_fwf('%s/ecar/1993/DPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5109 : {
1993 : pd.read_fwf('%s/ecar/1993/DECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DECO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DECO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DECO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DECO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DECO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DECO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DECO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DECO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5487 : {
1993 : pd.read_fwf('%s/ecar/1993/DLCO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DLCO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DLCO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DLCO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DLCO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DLCO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DLCO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DLCO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DLCO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DLCO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DLCO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DLCO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5580 : {
1993 : pd.read_fwf('%s/ecar/1993/EKPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/EKPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/EKPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/EKPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/EKPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/EKPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/EKPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/EKPC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/EKPC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/EKPC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/EKPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/EKPC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9267 : {
1993 : pd.read_fwf('%s/ecar/1993/HEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/HEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/HEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/HEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/HEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/HEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/HEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/HEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/HEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/HEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/HEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/HEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9273 : {
1993 : pd.read_fwf('%s/ecar/1993/IPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/IPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/IPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/IPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/IPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/IPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/IPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/IPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/IPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/IPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/IPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/IPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
10171 : {
1993 : pd.read_fwf('%s/ecar/1993/KUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/KUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/KUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/KUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/KUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
11249 : {
1993 : pd.read_fwf('%s/ecar/1993/LGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/LGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/LGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/LGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/LGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/LGEE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/LGEE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/LGEE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/LGEE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/LGEE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/LGEE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/LGEE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13756 : {
1993 : pd.read_fwf('%s/ecar/1993/NIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/NIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/NIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/NIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/NIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/NIPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/NIPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/NIPS00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/NIPS01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/NIPS02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/NIPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/NIPS04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13998 : {
1993 : pd.read_fwf('%s/ecar/1993/OES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OES95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14015 : {
1993 : pd.read_fwf('%s/ecar/1993/OVEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OVEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OVEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OVEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/OVEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/OVEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/OVEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/OVEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/OVEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/OVEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/OVEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/OVEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
15470 : {
1993 : pd.read_fwf('%s/ecar/1993/PSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/PSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/PSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
17633 : {
1993 : pd.read_fwf('%s/ecar/1993/SIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/SIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/SIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/SIGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/SIGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/SIGE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/SIGE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/SIGE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/SIGE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/SIGE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/SIGE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
18997 : {
1993 : pd.read_fwf('%s/ecar/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/TECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/TECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40211 : {
1994 : pd.read_fwf('%s/ecar/1994/WVPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/ecar/2003/WVPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/WVPA04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
32208 : {
1997 : pd.read_fwf('%s/ecar/1997/FE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/FE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/FE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/FE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/FE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/FE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/FE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/FE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
'mccp' : {
1993 : pd.read_fwf('%s/ecar/1993/MCCP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/MCCP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/MCCP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/MCCP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/MCCP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/MCCP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/MCCP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/MCCP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
}
}
if not os.path.exists('./ecar'):
os.mkdir('ecar')
for k in ecar.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ecar[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ecar[k][i]))) for i in ecar[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ecar/%s.csv' % k)
###### MAIN
# CECO : 4110
# CILC: 3252 <- Looks like something is getting cut off from 1993-2000
# CIPS: 3253
# IPC: 9208
# MGE: 11479
# SIPC: 17632
# SPIL: 17828
# UE: 19436
# WEPC: 20847
# WPL: 20856
# WPS: 20860
# UPP: 19578
# WPPI: 20858
# AMER: 19436
# CWL: 4045
main = {
4110 : {
1993 : pd.read_fwf('%s/main/1993/CECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/CECO95' % (fulldir), skiprows=3, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/CECO96' % (fulldir), skiprows=4, header=None)[1].values,
1997 : pd.read_csv('%s/main/1997/CECO97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None)[3].values,
1998 : pd.read_csv('%s/main/1998/CECO98' % (fulldir), sep='\s', skipinitialspace=True, skiprows=5, header=None)[5].values,
1999 : pd.read_csv('%s/main/1999/CECO99' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2000 : pd.read_csv('%s/main/2000/CECO00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2001 : pd.read_csv('%s/main/2001/CECO01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2002 : pd.read_csv('%s/main/2002/CECO02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None)[2].values
},
3252 : {
1993 : pd.read_fwf('%s/main/1993/CILC93' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CILC94' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CILC95' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CILC96' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CILC97' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1998 : pd.read_fwf('%s/main/1998/CILC98' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_fwf('%s/main/1999/CILC99' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/CILC00' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CILC01' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2002 : pd.read_excel('%s/main/2002/CILC02' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2003 : pd.read_csv('%s/main/2003/CILC03' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].values
},
3253 : {
1993 : pd.read_fwf('%s/main/1993/CIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9208 : {
1993 : pd.read_csv('%s/main/1993/IPC93' % (fulldir), skipfooter=1, header=None)[2].values,
1994 : pd.read_csv('%s/main/1994/IPC94' % (fulldir), skipfooter=1, header=None)[2].values,
1995 : pd.read_csv('%s/main/1995/IPC95' % (fulldir), skipfooter=1, header=None)[4].astype(str).str.replace('.', '0').astype(float).values,
1996 : pd.read_csv('%s/main/1996/IPC96' % (fulldir)).iloc[:, -1].values,
1997 : pd.read_csv('%s/main/1997/IPC97' % (fulldir)).iloc[:, -1].values,
1998 : pd.read_excel('%s/main/1998/IPC98' % (fulldir)).iloc[:, -1].values,
1999 : pd.read_csv('%s/main/1999/IPC99' % (fulldir), skiprows=2, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/IPC00' % (fulldir), skiprows=1).iloc[:, -1].values,
2001 : pd.read_excel('%s/main/2001/IPC01' % (fulldir), skiprows=1).iloc[:, -1].values,
2002 : pd.read_excel('%s/main/2002/IPC02' % (fulldir), skiprows=4).iloc[:, -1].values,
2003 : pd.read_excel('%s/main/2003/IPC03' % (fulldir), skiprows=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/IPC04' % (fulldir), skiprows=1).iloc[:, -1].values
},
11479 : {
1993 : pd.read_fwf('%s/main/1993/MGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=4).iloc[:, 1:].dropna().astype(float).values.ravel(),
1995 : pd.read_csv('%s/main/1995/MGE95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1997 : pd.read_csv('%s/main/1997/MGE97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=12, header=None).iloc[:-1, 2].astype(float).values,
1998 : pd.read_csv('%s/main/1998/MGE98' % (fulldir), sep=' ', skipinitialspace=True).iloc[:-1]['LOAD'].astype(float).values,
1999 : pd.read_csv('%s/main/1999/MGE99' % (fulldir), sep=' ', skiprows=2, header=None, skipinitialspace=True).iloc[:-2, 2].astype(float).values,
2000 : pd.read_csv('%s/main/2000/MGE00' % (fulldir), sep=' ', skiprows=3, header=None, skipinitialspace=True, skipfooter=2).iloc[:, 2].astype(float).values,
2000 : pd.read_fwf('%s/main/2000/MGE00' % (fulldir), skiprows=2)['VMS_DATE'].iloc[:-2].str.split().str[-1].astype(float).values,
2001 : pd.read_fwf('%s/main/2001/MGE01' % (fulldir), skiprows=1, header=None).iloc[:-2, 2].values,
2002 : pd.read_fwf('%s/main/2002/MGE02' % (fulldir), skiprows=4, header=None).iloc[:-1, 0].str.split().str[-1].astype(float).values
},
17632 : {
1994 : pd.read_csv('%s/main/1994/SIPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/SIPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_csv('%s/main/1997/SIPC97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/main/1998/SIPC98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/main/1999/SIPC99' % (fulldir), engine='python', header=None)[0].replace('no data', '0').astype(float).values,
2000 : pd.read_csv('%s/main/2000/SIPC00' % (fulldir), engine='python', header=None)[0].astype(str).str[:3].astype(float).values,
2001 : pd.read_csv('%s/main/2001/SIPC01' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2002 : pd.read_csv('%s/main/2002/SIPC02' % (fulldir), sep='\t', skiprows=3, header=None)[1].values,
2003 : pd.read_csv('%s/main/2003/SIPC03' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2004 : pd.read_csv('%s/main/2004/SIPC04' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values
},
17828 : {
1993 : pd.read_csv('%s/main/1993/SPIL93' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None).iloc[:, 3:].values.ravel(),
1994 : pd.read_csv('%s/main/1994/SPIL94' % (fulldir), sep=' ', skipinitialspace=True, skiprows=6, header=None).iloc[:, 3:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/SPIL95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/SPIL96' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:366, 3:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/SPIL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1998 : pd.read_csv('%s/main/1998/SPIL98' % (fulldir), sep='\t', skipinitialspace=True, skiprows=8, header=None).iloc[:, 4:].values.ravel(),
1999 : pd.read_csv('%s/main/1999/SPIL99' % (fulldir), skiprows=4, header=None)[0].values,
2000 : pd.read_csv('%s/main/2000/SPIL00' % (fulldir), skiprows=4, header=None)[0].values,
2001 : pd.read_csv('%s/main/2001/SPIL01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=7, header=None).iloc[:, 5:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/SPIL02' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2003 : pd.read_excel('%s/main/2003/SPIL03' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2004 : pd.read_excel('%s/main/2004/SPIL04' % (fulldir), sheetname=0, skiprows=5).iloc[:, 3:].values.ravel()
},
19436 : {
1995 : pd.read_fwf('%s/main/1995/UE95' % (fulldir), header=None)[2].values,
1996 : pd.read_fwf('%s/main/1996/UE96' % (fulldir), header=None)[2].values,
1997 : pd.read_fwf('%s/main/1997/UE97' % (fulldir), header=None)[2].values
},
20847 : {
1993 : pd.read_csv('%s/main/1993/WEPC93' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1994 : pd.read_csv('%s/main/1994/WEPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1995 : pd.read_csv('%s/main/1995/WEPC95' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/WEPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_excel('%s/main/1997/WEPC97' % (fulldir), header=None)[0].astype(str).str.strip().replace('NA', '0').astype(float).values,
1998 : pd.read_csv('%s/main/1998/WEPC98' % (fulldir), engine='python', header=None)[0].str.strip().replace('NA', 0).astype(float).values,
1999 : pd.read_excel('%s/main/1999/WEPC99' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/WEPC00' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WEPC01' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/main/2002/WEPC02' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2003 : pd.read_excel('%s/main/2003/WEPC03' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2004 : pd.read_excel('%s/main/2004/WEPC04' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
20856 : {
1993 : pd.read_fwf('%s/main/1993/WPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/WPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/WPL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/WPL96' % (fulldir), header=None, sep='\t').iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[2].str.replace(',', '').astype(float).values
},
20860 : {
1993 : pd.read_csv('%s/main/1993/WPS93' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).values.ravel(),
1994 : (pd.read_csv('%s/main/1994/WPS94' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).iloc[:, 1:-1]/100).values.ravel(),
1995 : pd.read_csv('%s/main/1995/WPS95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=8, header=None, skipfooter=7)[2].values,
1996 : pd.read_csv('%s/main/1996/WPS96' % (fulldir), sep='\t', skiprows=2).loc[:365, '100':'2400'].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPS97' % (fulldir), sep='\s', header=None, skipfooter=1)[2].values,
1998 : pd.read_csv('%s/main/1998/WPS98' % (fulldir), sep='\s', header=None)[2].values,
1999 : pd.read_excel('%s/main/1999/WPS99' % (fulldir), skiprows=8, skipfooter=8, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/WPS00' % (fulldir), sheetname=1, skiprows=5, skipfooter=8, header=None)[2].values,
2001 : pd.read_excel('%s/main/2001/WPS01' % (fulldir), sheetname=0, skiprows=5, header=None)[2].values,
2002 : pd.read_csv('%s/main/2002/WPS02' % (fulldir), sep='\s', header=None, skiprows=5)[2].values,
2003 : pd.read_excel('%s/main/2003/WPS03' % (fulldir), sheetname=1, skiprows=6, header=None)[2].values
},
19578 : {
1996 : pd.read_csv('%s/main/1996/UPP96' % (fulldir), header=None, skipfooter=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/UPP04' % (fulldir)).iloc[:, -1].values
},
20858 : {
1997 : pd.read_csv('%s/main/1997/WPPI97' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/main/1999/WPPI99' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/main/2000/WPPI00' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WPPI01' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/WPPI02' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel()
},
19436 : {
1998 : pd.read_csv('%s/main/1998/AMER98' % (fulldir), sep='\t').iloc[:, -1].str.strip().replace('na', 0).astype(float).values,
1999 : pd.read_csv('%s/main/1999/AMER99' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2000 : pd.read_csv('%s/main/2000/AMER00' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2001 : pd.read_csv('%s/main/2001/AMER01' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('n/a', 0).astype(float).values,
2002 : pd.read_csv('%s/main/2002/AMER02' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2003 : pd.read_csv('%s/main/2003/AMER03' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2004 : pd.read_csv('%s/main/2004/AMER04' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values
},
4045 : {
2000 : pd.read_excel('%s/main/2000/CWL00' % (fulldir), skiprows=2).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CWL01' % (fulldir), skiprows=1).iloc[:, 0].values,
2002 : pd.read_excel('%s/main/2002/CWL02' % (fulldir), header=None).iloc[:, 0].values,
2003 : pd.read_excel('%s/main/2003/CWL03' % (fulldir), header=None).iloc[:, 0].values
}
}
main[20847][1994][main[20847][1994] > 9000] = 0
main[20847][1995][main[20847][1995] > 9000] = 0
main[20847][1996][main[20847][1996] > 9000] = 0
if not os.path.exists('./main'):
os.mkdir('main')
for k in main.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(main[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(main[k][i]))) for i in main[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./main/%s.csv' % k)
# EEI
# Bizarre formatting until 1998
###### MAAC
# AE: 963
# BC: 1167
# DPL: 5027
# PU: 7088
# PN: 14715
# PE: 14940
# PEP: 15270
# PS: 15477
# PJM: 14725
# ALL UTILS
maac93 = pd.read_fwf('%s/maac/1993/PJM93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac94 = pd.read_fwf('%s/maac/1994/PJM94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac95 = pd.read_csv('%s/maac/1995/PJM95' % (fulldir), sep='\t', header=None, skipfooter=1)
maac96 = pd.read_csv('%s/maac/1996/PJM96' % (fulldir), sep='\t', header=None, skipfooter=1)
maac = {
963 : {
1993 : maac93[maac93[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='ACE_LOAD').iloc[:, 1:25].values.ravel()
},
1167 : {
1993 : maac93[maac93[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='BC_LOAD').iloc[:, 1:25].values.ravel()
},
5027 : {
1993 : maac93[maac93[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='DPL_LOAD').iloc[:366, 1:25].values.ravel()
},
7088 : {
1993 : maac93[maac93[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='GPU_LOAD').iloc[:366, 1:25].values.ravel()
},
14715 : {
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PN_LOAD').iloc[:366, 1:25].values.ravel()
},
14940 : {
1993 : maac93[maac93[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PE_Load').iloc[:366, 1:25].values.ravel()
},
15270 : {
1993 : maac93[maac93[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PEP_LOAD').iloc[:366, 1:25].values.ravel()
},
15477 : {
1993 : maac93[maac93[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PS_Load').iloc[:366, 1:25].values.ravel()
},
14725 : {
1993 : maac93[maac93[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PJM_LOAD').iloc[:366, 1:25].values.ravel(),
1998 : pd.read_csv('%s/maac/1998/PJM98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_excel('%s/maac/1999/PJM99' % (fulldir), header=None)[2].values,
2000 : pd.read_excel('%s/maac/2000/PJM00' % (fulldir), header=None)[2].values
}
}
if not os.path.exists('./maac'):
os.mkdir('maac')
for k in maac.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(maac[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(maac[k][i]))) for i in maac[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./maac/%s.csv' % k)
###### SERC
# AEC: 189
# CPL: 3046
# CEPC: 40218
# CEPB: 3408
# MEMP: 12293
# DUKE: 5416
# FPWC: 6235 *
# FLINT: 6411
# GUC: 7639
# LCEC: 10857
# NPL: 13204
# OPC: 13994
# SCEG: 17539
# SCPS: 17543
# SMEA: 17568
# TVA: 18642
# VIEP: 19876
# WEMC: 20065
# DU: 4958
# AECI: 924
# ODEC-D: 402290
# ODEC-V: 402291
# ODEC: 40229
# SOCO-APCO: 195
# SOCO-GPCO: 7140
# SOCO-GUCO: 7801
# SOCO-MPCO: 12686
# SOCO-SECO: 16687 *?
serc = {
189 : {
1993 : pd.read_csv('%s/serc/1993/AEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/AEC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/serc/1995/AEC95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/serc/1996/AEC96' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/serc/1997/AEC97' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/serc/1998/AEC98' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/serc/1999/AEC99' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=3).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/serc/2000/AEC00' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/serc/2001/AEC01' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/serc/2002/AEC02' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/serc/2004/AEC04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel()
},
3046 : {
1994 : pd.read_csv('%s/serc/1994/CPL94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/serc/1995/CPL95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5)[1].values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPL96' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/CPL97' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/CPL98' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/CPL99' % (fulldir)).readlines()[1:]])[2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/CPL00' % (fulldir))['Load'].values,
2001 : pd.read_excel('%s/serc/2001/CPL01' % (fulldir))['Load'].values,
2002 : pd.read_excel('%s/serc/2002/CPL02' % (fulldir))['Load'].values,
2003 : pd.read_excel('%s/serc/2003/CPL03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/serc/2004/CPL04' % (fulldir))['Load'].values
},
40218 : {
1993 : pd.read_fwf('%s/serc/1993/CEPC93' % (fulldir), header=None).iloc[:, 1:-1].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/CEPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/serc/1995/CEPC95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1996 : (pd.read_fwf('%s/serc/1996/CEPC96' % (fulldir)).iloc[:-1, 1:]/1000).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPC97' % (fulldir)).readlines()[5:]]).iloc[:-1, 1:].astype(float)/1000).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPC98' % (fulldir)).readlines()]).iloc[:, 1:].astype(float)).values.ravel(),
2000 : pd.read_excel('%s/serc/2000/CEPC00' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2001 : pd.read_excel('%s/serc/2001/CEPC01' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values
},
3408 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/CEPB93' % (fulldir)).readlines()[12:]])[1].astype(float)/1000).values,
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/CEPB94' % (fulldir)).readlines()[10:]])[1].astype(float)).values,
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/CEPB95' % (fulldir)).readlines()[6:]])[2].astype(float)).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPB96' % (fulldir)).readlines()[10:]])[2].astype(float)).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPB97' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPB98' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/CEPB99' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/CEPB00' % (fulldir)).readlines()[11:]])[2].astype(float)).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/CEPB01' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/CEPB02' % (fulldir)).readlines()[6:]])[4].astype(float)).values,
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/CEPB03' % (fulldir)).readlines()[6:]])[2].astype(float)).values
},
12293 : {
2000 : (pd.read_csv('%s/serc/2000/MEMP00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/MEMP01' % (fulldir)).readlines()[1:]])[3].str.replace(',', '').astype(float)/1000).values,
2002 : (pd.read_csv('%s/serc/2002/MEMP02' % (fulldir), sep='\t').iloc[:, -1].str.replace(',', '').astype(float)/1000).values,
2003 : pd.read_csv('%s/serc/2003/MEMP03' % (fulldir)).iloc[:, -1].str.replace(',', '').astype(float).values
},
5416 : {
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/DUKE99' % (fulldir)).readlines()[4:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/DUKE00' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/DUKE01' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/DUKE02' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/DUKE03' % (fulldir)).readlines()[5:-8]])[2].astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/DUKE04' % (fulldir)).readlines()[5:]])[2].astype(float).values
},
6411 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/FLINT93' % (fulldir)).readlines()])[6].astype(float)/1000).values,
1994 : ((pd.DataFrame([i.split() for i in open('%s/serc/1994/FLINT94' % (fulldir)).readlines()[:-1]])).iloc[:, -1].astype(float)/1000).values,
1995 : ((pd.DataFrame([i.split() for i in open('%s/serc/1995/FLINT95' % (fulldir)).readlines()[1:]]))[3].astype(float)/1000).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/FLINT96' % (fulldir)).readlines()[3:-2]]))[2].astype(float).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/FLINT97' % (fulldir)).readlines()[6:]]))[3].astype(float).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/FLINT98' % (fulldir)).readlines()[4:]]))[2].astype(float).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/FLINT99' % (fulldir)).readlines()[1:]]))[1].astype(float).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/FLINT00' % (fulldir)).readlines()[2:]]))[4].astype(float).values
},
7639 : {
1993 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1994 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1995 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1996 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1997 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1998 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1999 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
2000 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
},
10857 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/LCEC93' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel(),
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/LCEC94' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel()
},
13204 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/NPL93' % (fulldir)).readlines()[6:]])[2].astype(float).values,
1994 : pd.read_fwf('%s/serc/1994/NPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13994 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/OPC93' % (fulldir)).readlines()[4:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/OPC95' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/OPC96' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/OPC97' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/OPC98' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/OPC99' % (fulldir)).readlines()[18:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/OPC00' % (fulldir)).readlines()[19:]])[2].astype(float).values
},
17539 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCEG93' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/SCEG95' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCEG96' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCEG97' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCEG98' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCEG99' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCEG00' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCEG01' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values
},
17543 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCPS93' % (fulldir)).readlines()[:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCPS96' % (fulldir)).readlines()[:-1]]).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCPS97' % (fulldir)).readlines()[1:-3]]).iloc[:, 4:-1].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCPS98' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].replace('NA', '0').astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCPS99' % (fulldir)).readlines()[1:-1]]).iloc[:, 2:-1].replace('NA', '0').astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCPS00' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCPS01' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/serc/2002/SCPS02' % (fulldir), header=None).dropna(axis=1, how='all').iloc[:, 2:-1].values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SCPS03' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/SCPS04' % (fulldir)).readlines()[1:]]).iloc[:, 1:-1].replace('NA', '0').astype(float).values.ravel()
},
17568 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/SMEA93' % (fulldir)).readlines()[5:]])[2].astype(float)/1000).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/SMEA94' % (fulldir)).readlines()[5:]]).iloc[:, -1].astype(float)).values,
1996 : ((pd.DataFrame([i.split() for i in open('%s/serc/1996/SMEA96' % (fulldir)).readlines()[:]])).iloc[:, -24:].astype(float)/1000).values.ravel(),
1997 : pd.read_excel('%s/serc/1997/SMEA97' % (fulldir), sheetname=1, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SMEA98' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SMEA99' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SMEA00' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/SMEA02' % (fulldir)).readlines()[2:]])[2].astype(float).values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SMEA03' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel()
},
18642 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/TVA93' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/TVA94' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/TVA95' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/TVA96' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/TVA97' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/TVA98' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/TVA99' % (fulldir)).iloc[:, 2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/TVA00' % (fulldir)).iloc[:, 2].astype(float).values,
2001 : pd.read_excel('%s/serc/2001/TVA01' % (fulldir), header=None, skiprows=3).iloc[:, 2].astype(float).values,
2003 : pd.read_excel('%s/serc/2003/TVA03' % (fulldir)).iloc[:, -1].values
},
19876 : {
1993 : pd.read_fwf('%s/serc/1993/VIEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/serc/1994/VIEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/serc/1995/VIEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/serc/1996/VIEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/serc/1997/VIEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/serc/1998/VIEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/serc/1999/VIEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/VIEP00' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/VIEP01' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/VIEP02' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/VIEP03' % (fulldir)).readlines()[2:]])[3].astype(float)).values.ravel(),
2004 : (pd.DataFrame([i.split() for i in open('%s/serc/2004/VIEP04' % (fulldir)).readlines()[:]])[3].astype(float)).values.ravel()
},
20065 : {
1993 : pd.read_fwf('%s/serc/1993/WEMC93' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1995 : (pd.read_csv('%s/serc/1995/WEMC95' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True)[3]/1000).values,
1996 : (pd.read_excel('%s/serc/1996/WEMC96' % (fulldir))['Load']/1000).values,
1997 : pd.read_excel('%s/serc/1997/WEMC97' % (fulldir), skiprows=4)['MW'].values,
1998 : pd.concat([pd.read_excel('%s/serc/1998/WEMC98' % (fulldir), sheetname=i).iloc[:, -1] for i in range(12)]).values,
1999 : pd.read_excel('%s/serc/1999/WEMC99' % (fulldir))['mwh'].values,
2000 : (pd.read_excel('%s/serc/2000/WEMC00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.read_excel('%s/serc/2001/WEMC01' % (fulldir), header=None)[0]/1000).values
},
4958 : {
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/DU99' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/DU00' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2003 : pd.read_excel('%s/serc/2003/DU03' % (fulldir)).iloc[:, -1].values
},
924 : {
1999 : pd.read_excel('%s/serc/1999/AECI99' % (fulldir))['CALoad'].values,
2001 : pd.read_excel('%s/serc/2001/AECI01' % (fulldir)).iloc[:, -1].values,
2002 : pd.Series(pd.read_excel('%s/serc/2002/AECI02' % (fulldir), skiprows=3).loc[:, 'Jan':'Dec'].values.ravel(order='F')).dropna().values
},
402290 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECD96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECD97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECD98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECD99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECD00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECD01' % (fulldir)).readlines()[3:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECD02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECD03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECD04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
402291 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECV96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECV97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECV98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECV99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECV00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECV01' % (fulldir)).readlines()[3:]])[4].dropna().str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECV02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECV03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECV04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
195 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/APCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/APCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Alabama'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 2].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Alabama'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 2].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 2].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 1].values
},
7140 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).replace(np.nan, 0).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Georgia'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 3].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Georgia'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 3].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 3].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 2].values
},
7801 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GUCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GUCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Gulf'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 4].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Gulf'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 4].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 4].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 3].values
},
12686 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/MPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/MPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Mississippi'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 5].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Mississippi'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 5].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 5].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 4].values
},
16687 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/SECO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/SECO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Savannah'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 6].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Savannah'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 6].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 6].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 5].values
},
18195 : {
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['System'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 7].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Southern'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 7].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 8].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 7].values
}
}
serc.update({40229 : {}})
for i in serc[402290].keys():
serc[40229][i] = serc[402290][i] + serc[402291][i]
serc[189][2001][serc[189][2001] > 2000] = 0
serc[3408][2002][serc[3408][2002] > 2000] = 0
serc[3408][2003][serc[3408][2003] > 2000] = 0
serc[7140][1999][serc[7140][1999] < 0] = 0
serc[7140][1994][serc[7140][1994] > 20000] = 0
if not os.path.exists('./serc'):
os.mkdir('serc')
for k in serc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(serc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(serc[k][i]))) for i in serc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./serc/%s.csv' % k)
###### SPP
# AECC: 807
# CAJN: 2777
# CLEC: 3265
# EMDE: 5860
# ENTR: 12506
# KCPU: 9996
# LEPA: 26253
# LUS: 9096
# GSU: 55936 <- 7806
# MPS: 12699
# OKGE: 14063
# OMPA: 14077
# PSOK: 15474
# SEPC: 18315
# WFEC: 20447
# WPEK: 20391
# CSWS: 3283
# SRGT: 40233
# GSEC: 7349
spp = {
807 : {
1993 : pd.read_csv('%s/spp/1993/AECC93' % (fulldir), skiprows=6, skipfooter=1, header=None).iloc[:, -1].values,
1994 : pd.read_csv('%s/spp/1994/AECC94' % (fulldir), skiprows=8, skipfooter=1, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/spp/1995/AECC95' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1996 : pd.read_csv('%s/spp/1996/AECC96' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1997 : pd.read_csv('%s/spp/1997/AECC97' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/AECC98' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1999 : pd.read_csv('%s/spp/1999/AECC99' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/AECC03' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -2].values,
2004 : pd.read_csv('%s/spp/2004/AECC04' % (fulldir), skiprows=5, header=None).iloc[:, -2].values
},
2777 : {
1998 : pd.read_excel('%s/spp/1998/CAJN98' % (fulldir), skiprows=4).iloc[:365, 1:].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CAJN99' % (fulldir)).readlines()[:]])[2].astype(float).values
},
3265 : {
1994 : pd.read_fwf('%s/spp/1994/CLEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/CLEC96' % (fulldir)).readlines()[:]])[0].astype(float).values,
1997 : pd.read_csv('%s/spp/1997/CLEC97' % (fulldir)).iloc[:, 2].str.replace(',', '').astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/CLEC98' % (fulldir)).readlines()[:]])[1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CLEC99' % (fulldir)).readlines()[1:]]).iloc[:, 0].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/CLEC01' % (fulldir)).readlines()[:]])[4].replace('NA', '0').astype(float).values,
},
5860 : {
1997 : pd.DataFrame([i.split() for i in open('%s/spp/1997/EMDE97' % (fulldir)).readlines()[:]])[3].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/EMDE98' % (fulldir)).readlines()[2:-2]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/EMDE99' % (fulldir)).readlines()[3:8763]])[2].astype(float).values,
2001 : pd.read_excel('%s/spp/2001/EMDE01' % (fulldir))['Load'].dropna().values,
2002 : pd.read_excel('%s/spp/2002/EMDE02' % (fulldir))['Load'].dropna().values,
2003 : pd.read_excel('%s/spp/2003/EMDE03' % (fulldir))['Load'].dropna().values,
2004 : pd.read_excel('%s/spp/2004/EMDE04' % (fulldir), skiprows=2).iloc[:8784, -1].values
},
12506 : {
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/ENTR94' % (fulldir)).readlines()[:]]).iloc[:, 1:-1].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/ENTR95' % (fulldir)).readlines()[1:-2]]).iloc[:, 1:-1].astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/ENTR97' % (fulldir), header=None).iloc[:, 1:-1].astype(float).values.ravel(),
1998 : pd.read_csv('%s/spp/1998/ENTR98' % (fulldir), header=None)[2].astype(float).values,
1999 : pd.read_excel('%s/spp/1999/ENTR99' % (fulldir)).iloc[:, -1].values,
2000 : pd.DataFrame([i.split() for i in open('%s/spp/2000/ENTR00' % (fulldir)).readlines()[4:]]).iloc[:, 3:].astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/ENTR01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
9996 : {
1994 : pd.read_fwf('%s/spp/1994/KCPU94' % (fulldir), skiprows=4, header=None).astype(str).apply(lambda x: x.str[-3:]).astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/KCPU97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/KCPU98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/spp/1999/KCPU99' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2000 : pd.read_csv('%s/spp/2000/KCPU00' % (fulldir), engine='python', header=None)[0].values,
2002 : pd.read_excel('%s/spp/2002/KCPU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/KCPU03' % (fulldir), engine='python', header=None)[0].values,
2004 : pd.read_csv('%s/spp/2004/KCPU04' % (fulldir), engine='python', header=None)[0].values
},
26253 : {
1993 : pd.read_csv('%s/spp/1993/LEPA93' % (fulldir), skiprows=3, header=None)[0].values,
1994 : pd.read_csv('%s/spp/1994/LEPA94' % (fulldir), skiprows=3, header=None)[0].values,
1995 : pd.read_csv('%s/spp/1995/LEPA95' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1996 : pd.read_csv('%s/spp/1996/LEPA96' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1997 : pd.read_csv('%s/spp/1997/LEPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None),
1998 : pd.Series(pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[[1,3]].values.ravel(order='F')).dropna().values,
1999 : pd.read_csv('%s/spp/1999/LEPA99' % (fulldir), sep='\t')['Load'].values,
2001 : pd.read_csv('%s/spp/2001/LEPA01' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2002 : pd.read_csv('%s/spp/2002/LEPA02' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2003 : pd.read_excel('%s/spp/2003/LEPA03' % (fulldir), header=None)[1].values
},
9096 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/LUS93' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/LUS94' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/LUS95' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/LUS96' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1997/LUS97' % (fulldir)).readlines()[3:-2]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1998/LUS98' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split(' ') for i in open('%s/spp/1999/LUS99' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
2000 : pd.read_csv('%s/spp/2000/LUS00' % (fulldir), skiprows=3, skipfooter=1, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/LUS01' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/LUS02' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/LUS03' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/spp/2004/LUS04' % (fulldir), skiprows=4, header=None).iloc[:, -1].values
},
55936 : {
1993 : pd.read_csv('%s/spp/1993/GSU93' % (fulldir), engine='python', header=None)[0].values
},
12699 : {
1993 : pd.read_csv('%s/spp/1993/MPS93' % (fulldir), sep=' ', skipinitialspace=True)['TOTLOAD'].values,
1996 : pd.read_excel('%s/spp/1996/MPS96' % (fulldir), skiprows=6, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/MPS98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2000 : pd.read_csv('%s/spp/2000/MPS00' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/MPS01' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/MPS02' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2003 : pd.read_excel('%s/spp/2003/MPS03' % (fulldir)).iloc[:, 1:].values.ravel()
},
14063 : {
1994 : pd.read_csv('%s/spp/1994/OKGE94' % (fulldir), header=None).iloc[:, 1:13].values.ravel()
},
14077 : {
1993 : pd.read_csv('%s/spp/1993/OMPA93' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/spp/1997/OMPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/OMPA98' % (fulldir), skiprows=2, engine='python', header=None)[0].str.replace('\*', '').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/OMPA00' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2001 : pd.read_csv('%s/spp/2001/OMPA01' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2002 : pd.read_csv('%s/spp/2002/OMPA02' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2003 : pd.read_csv('%s/spp/2003/OMPA03' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2004 : pd.read_csv('%s/spp/2004/OMPA04' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000
},
15474 : {
1993 : pd.read_fwf('%s/spp/1993/PSOK93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
18315 : {
1993 : pd.read_csv('%s/spp/1993/SEPC93' % (fulldir), header=None).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('NA', '').str.strip()).replace('', '0').astype(float).values.ravel(),
1997 : (pd.read_fwf('%s/spp/1997/SEPC97' % (fulldir), skiprows=1, header=None)[5]/1000).values,
1999 : pd.read_csv('%s/spp/1999/SEPC99' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].str.strip().replace('#VALUE!', '0').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/SEPC00' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2001 : pd.read_csv('%s/spp/2001/SEPC01' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2002 : (pd.read_fwf('%s/spp/2002/SEPC02' % (fulldir), skiprows=1, header=None)[6]).str.replace('"', '').str.strip().astype(float).values,
2004 : pd.read_csv('%s/spp/2004/SEPC04' % (fulldir), header=None, sep='\t')[5].values
},
20447 : {
1993 : pd.read_csv('%s/spp/1993/WFEC93' % (fulldir)).iloc[:, 0].values,
2000 : pd.read_csv('%s/spp/2000/WFEC00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[0].values
},
20391 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/WPEK93' % (fulldir)).readlines()[:]]).iloc[:365, 1:25].astype(float).values.ravel(),
1996 : pd.read_excel('%s/spp/1996/WPEK96' % (fulldir), skiprows=2).dropna().iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/WPEK98' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2000 : pd.read_csv('%s/spp/2000/WPEK00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2001 : pd.read_csv('%s/spp/2001/WPEK01' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2002 : pd.read_csv('%s/spp/2002/WPEK02' % (fulldir), header=None, sep=' ', skipinitialspace=True)[4].values
},
3283 : {
1997 : pd.read_fwf('%s/spp/1997/CSWS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/CSWS98' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True, header=None)[2].values,
1999 : pd.read_csv('%s/spp/1999/CSWS99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[2].values,
2000 : pd.read_csv('%s/spp/2000/CSWS00' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None)[2].values
},
40233 : {
2000 : pd.read_fwf('%s/spp/2000/SRGT00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/SRGT01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
7349 : {
1997 : pd.read_csv('%s/spp/1997/GSEC97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/GSEC98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/spp/1999/GSEC99' % (fulldir), sep='\s', skipinitialspace=True, skiprows=2, header=None)[17].dropna().values,
2000 : pd.read_csv('%s/spp/2000/GSEC00' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/GSEC01' % (fulldir)).readlines()[1:]])[0].astype(float).values,
2002 : pd.read_csv('%s/spp/2002/GSEC02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[5].values,
2003 : pd.read_csv('%s/spp/2003/GSEC03' % (fulldir), header=None)[2].values,
2004 : (pd.read_csv('%s/spp/2004/GSEC04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[5]/1000).values
}
}
spp[9096][2003][spp[9096][2003] > 600] = 0
spp[9996][2002] = np.repeat(np.nan, len(spp[9996][2002]))
spp[7349][2003] = np.repeat(np.nan, len(spp[7349][2003]))
if not os.path.exists('./spp'):
os.mkdir('spp')
for k in spp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(spp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(spp[k][i]))) for i in spp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./spp/%s.csv' % k)
###### MAPP
# CIPC: 3258
# CP: 4322
# CBPC: 4363
# DPC: 4716
# HUC: 9130
# IES: 9219
# IPW: 9417 <- 9392
# IIGE: 9438
# LES: 11018
# MPL: 12647
# MPC: 12658
# MDU: 12819
# MEAN: 21352
# MPW: 13143
# NPPD: 13337
# NSP: 13781
# NWPS: 13809
# OPPD: 14127
# OTP: 14232
# SMMP: 40580
# UPA: 19514
# WPPI: 20858
# MEC: 12341 <- 9435
# CPA: 4322
# MWPS: 23333
mapp = {
3258 : {
1998 : pd.read_fwf('%s/mapp/1998/CIPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
4363 : {
1993 : pd.read_fwf('%s/mapp/1993/CBPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CBPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CBPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/CBPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/CBPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/CB02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
4716 : {
1993 : pd.read_fwf('%s/mapp/1993/DPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/DPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_csv('%s/mapp/1996/DPC96' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 6:].values.ravel()
},
9130 : {
1993 : pd.read_fwf('%s/mapp/1993/HUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/HUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/HUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/HUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/HUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/HUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/HUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/HUC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9219 : {
1993 : pd.read_fwf('%s/mapp/1993/IESC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IESC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IESC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9417 : {
1993 : pd.read_fwf('%s/mapp/1993/IPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9438 : {
1993 : pd.read_fwf('%s/mapp/1993/IIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
11018 : {
1993 : pd.read_fwf('%s/mapp/1993/LES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/LES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/LES95' % (fulldir)).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/LES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/LES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/LES98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/LES99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_excel('%s/mapp/2000/LES00' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/mapp/2001/LES01' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/LES02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/LES03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12647 : {
1995 : pd.read_fwf('%s/mapp/1995/MPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/MPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
12658 : {
1993 : pd.read_fwf('%s/mapp/1993/MPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12819 : {
1993 : pd.read_fwf('%s/mapp/1993/MDU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MDU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MDU95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MDU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MDU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MDU98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MDU99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MDU02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MDU03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
21352 : {
1993 : pd.read_fwf('%s/mapp/1993/MEAN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MEAN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEAN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEAN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEAN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEAN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEAN02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEAN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13143 : {
1993 : pd.read_fwf('%s/mapp/1993/MPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPW99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPW02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPW03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13337 : {
1993 : pd.read_fwf('%s/mapp/1993/NPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NPPD95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/NPPD00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/NPPD01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_csv('%s/mapp/2002/NPPD02' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13781 : {
1993 : pd.read_fwf('%s/mapp/1993/NSP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NSP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NSP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NSP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NSP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NSP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/NSP00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=2, header=None, skipfooter=1)[2].values
},
13809 : {
1993 : pd.read_fwf('%s/mapp/1993/NWPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NWPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NWPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NWPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NWPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NWPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/NWPS02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NWPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14127 : {
1993 : pd.read_fwf('%s/mapp/1993/OPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OPPD95' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 7:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OPPD02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
14232 : {
1993 : pd.read_fwf('%s/mapp/1993/OTP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OTP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OTP95' % (fulldir), header=None).iloc[:, -2].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OTP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OTP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OTP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OTP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/OTP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OTP02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OTP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
40580 : {
1993 : pd.read_fwf('%s/mapp/1993/SMMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/SMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/SMMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/SMMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/SMMP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/SMMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/SMMP00' % (fulldir)).iloc[:-1, 3].values,
2001 : pd.read_csv('%s/mapp/2001/SMMP01' % (fulldir), header=None).iloc[:, 2].values,
2002 : pd.read_fwf('%s/mapp/2002/SMMPA02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/SMMPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
19514 : {
1993 : pd.read_fwf('%s/mapp/1993/UPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/UPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/UPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/UPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/UPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
20858 : {
1993 : pd.read_fwf('%s/mapp/1993/WPPI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/WPPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/WPPI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_csv('%s/mapp/1997/WPPI97' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:-1].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/WPPI98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/WPPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/WPPI02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/WPPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
12341 : {
1995 : pd.read_fwf('%s/mapp/1995/MEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEC_ALL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
23333 : {
1993 : pd.read_fwf('%s/mapp/1993/MPSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
}
}
mapp[20858][1997] = np.repeat(np.nan, len(mapp[20858][1997]))
mapp[21352][1995][mapp[21352][1995] < 0] = 0
mapp[40580][2000] = np.repeat(np.nan, len(mapp[40580][2000]))
if not os.path.exists('./mapp'):
os.mkdir('mapp')
for k in mapp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(mapp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(mapp[k][i]))) for i in mapp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./mapp/%s.csv' % k)
#################################
# WECC
#################################
import numpy as np
import pandas as pd
import os
import re
import datetime
import time
import pysal as ps
homedir = os.path.expanduser('~')
#basepath = '/home/akagi/Documents/EIA_form_data/wecc_form_714'
basepath = '%s/github/RIPS_kircheis/data/eia_form_714/active' % (homedir)
path_d = {
1993: '93WSCC1/WSCC',
1994: '94WSCC1/WSCC1994',
1995: '95WSCC1',
1996: '96WSCC1/WSCC1996',
1997: '97wscc1',
1998: '98WSCC1/WSCC1',
1999: '99WSCC1/WSCC1',
2000: '00WSCC1/WSCC1',
2001: '01WECC/WECC01/wecc01',
2002: 'WECCONE3/WECC One/WECC2002',
2003: 'WECC/WECC/WECC ONE/wecc03',
2004: 'WECC_2004/WECC/WECC One/ferc',
2006: 'form714-database_2006_2013/form714-database/Part 3 Schedule 2 - Planning Area Hourly Demand.csv'
}
#### GET UNIQUE UTILITIES AND UTILITIES BY YEAR
u_by_year = {}
for d in path_d:
if d != 2006:
full_d = basepath + '/' + path_d[d]
l = [i.lower().split('.')[0][:-2] for i in os.listdir(full_d) if i.lower().endswith('dat')]
u_by_year.update({d : sorted(l)})
unique_u = np.unique(np.concatenate([np.array(i) for i in u_by_year.values()]))
#### GET EIA CODES OF WECC UTILITIES
rm_d = {1993: {'rm': '93WSCC1/README2'},
1994: {'rm': '94WSCC1/README.TXT'},
1995: {'rm': '95WSCC1/README.TXT'},
1996: {'rm': '96WSCC1/README.TXT'},
1997: {'rm': '97wscc1/README.TXT'},
1998: {'rm': '98WSCC1/WSCC1/part.002'},
1999: {'rm': '99WSCC1/WSCC1/README.TXT'},
2000: {'rm': '00WSCC1/WSCC1/README.TXT'},
2001: {'rm': '01WECC/WECC01/wecc01/README.TXT'},
2002: {'rm': 'WECCONE3/WECC One/WECC2002/README.TXT'},
2003: {'rm': 'WECC/WECC/WECC ONE/wecc03/README.TXT'},
2004: {'rm': 'WECC_2004/WECC/WECC One/ferc/README.TXT'}}
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines()
f.close()
for i in range(len(r)):
if 'FILE NAME' in r[i]:
rm_d[d].update({'op': i})
if 'FERC' and 'not' in r[i]:
rm_d[d].update({'ed': i})
unique_u_ids = {}
for u in unique_u:
regex = re.compile('^ *%s\d\d.dat' % u, re.IGNORECASE)
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines() #[rm_d[d]['op']:rm_d[d]['ed']]
f.close()
for line in r:
result = re.search(regex, line)
if result:
# print line
code = line.split()[1]
nm = line.split(code)[1].strip()
unique_u_ids.update({u : {'code':code, 'name':nm}})
break
else:
continue
if u in unique_u_ids:
break
else:
continue
#id_2006 = pd.read_csv('/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv')
id_2006 = pd.read_csv('%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath))
id_2006 = id_2006.drop_duplicates('eia_code').set_index('eia_code').sort_index()
ui = pd.DataFrame.from_dict(unique_u_ids, orient='index')
ui = ui.loc[ui['code'] != '*'].drop_duplicates('code')
ui['code'] = ui['code'].astype(int)
ui = ui.set_index('code')
eia_to_r = pd.concat([ui, id_2006], axis=1).dropna()
# util = {
# 'aps' : 803,
# 'srp' : 16572,
# 'ldwp' : 11208
# }
# util_2006 = {
# 'aps' : 116,
# 'srp' : 244,
# 'ldwp' : 194
# }
#resp_ids = '/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv'
resp_ids = '%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath)
df_path_d = {}
df_d = {}
build_paths()
#### Southern California Edison part of CAISO in 2006-2013: resp id 125
if not os.path.exists('./wecc'):
os.mkdir('wecc')
for x in unique_u:
out_df = build_df(x)
if x in unique_u_ids.keys():
if str.isdigit(unique_u_ids[x]['code']):
out_df.to_csv('./wecc/%s.csv' % unique_u_ids[x]['code'])
else:
out_df.to_csv('./wecc/%s.csv' % x)
else:
out_df.to_csv('./wecc/%s.csv' % x)
#################################
from itertools import chain
li = []
for fn in os.listdir('.'):
li.append(os.listdir('./%s' % (fn)))
s = pd.Series(list(chain(*li)))
s = s.str.replace('\.csv', '')
u = s[s.str.contains('\d+')].str.replace('[^\d]', '').astype(int).unique()
homedir = os.path.expanduser('~')
rid = pd.read_csv('%s/github/RIPS_kircheis/data/eia_form_714/active/form714-database/form714-database/Respondent IDs.csv' % homedir)
ridu = rid[rid['eia_code'] != 0]
ridu[~ridu['eia_code'].isin(u)]
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
198,
71,
12657,
343,
796,
28686,
13,
6978,
13,
11201,
392,
7220,
10786,
93,
11537,
198,
19608,
324,
343,
796,
705,
12... | 1.91709 | 99,988 |
print (is_power(16, 2))
print (is_power(17, 2))
print (is_power(1, 1))
print (is_power(0, 0))
print (is_power(-8 , -2))
print (is_power(-27, -3))
| [
198,
4798,
357,
271,
62,
6477,
7,
1433,
11,
362,
4008,
198,
4798,
357,
271,
62,
6477,
7,
1558,
11,
362,
4008,
198,
4798,
357,
271,
62,
6477,
7,
16,
11,
352,
4008,
198,
4798,
357,
271,
62,
6477,
7,
15,
11,
657,
4008,
198,
4798,... | 2.144928 | 69 |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server has been built for the host machine, e.g. via:
gn gen out/Release --args="is_debug=false"
ninja -C out/Release quic_server
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gn -r
ninja -C out/Release cronet_perf_test_apk
4. If "sudo ufw status" doesn't say "Status: inactive", run "sudo ufw disable".
5. sudo apt-get install lighttpd
6. If the usb0 interface on the host keeps losing it's IPv4 address
(WaitFor(HasHostAddress) will keep failing), NetworkManager may need to be
told to leave usb0 alone with these commands:
sudo bash -c "printf \"\\n[keyfile]\
\\nunmanaged-devices=interface-name:usb0\\n\" \
>> /etc/NetworkManager/NetworkManager.conf"
sudo service network-manager restart
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import optparse
import os
import shutil
import sys
import tempfile
import time
import urllib
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools', 'perf'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'components'))
# pylint: disable=wrong-import-position
from chrome_telemetry_build import chromium_config
from devil.android import device_utils
from devil.android.sdk import intent
from core import benchmark_runner
from cronet.tools import android_rndis_forwarder
from cronet.tools import perf_test_utils
import lighttpd_server
from pylib import constants
from telemetry import android
from telemetry import benchmark
from telemetry import story
from telemetry.web_perf import timeline_based_measurement
# pylint: enable=wrong-import-position
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1853,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,... | 3.19893 | 935 |
# -*- coding: utf-8 -*-
import requests
import os
from threading import Thread
import website
import ai_request
import speech_recognition
import json
recognizer = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source1:
recognizer.adjust_for_ambient_noise(source1)
websiteThread = Thread(target=startWebsite)
websiteThread.start()
waitForBarvis()
#websiteThread.join()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
7007,
198,
11748,
28686,
198,
6738,
4704,
278,
1330,
14122,
198,
11748,
3052,
198,
11748,
257,
72,
62,
25927,
198,
11748,
4046,
62,
26243,
653,
198,
11748,
3... | 3.10687 | 131 |
from six.moves import http_client
from cloudframe.common import job
import logging
import time
LOG = logging.getLogger(__name__)
| [
198,
6738,
2237,
13,
76,
5241,
1330,
2638,
62,
16366,
198,
6738,
6279,
14535,
13,
11321,
1330,
1693,
198,
11748,
18931,
198,
11748,
640,
198,
198,
25294,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
628,
628
] | 3.317073 | 41 |
import csv
import os
import re
import subprocess
from mlperf.clustering.tools import dumpDataOnCleanCsv
from mlperf.tools.config import MATLAB_EXE, TEMPFOLDER, JAVA_EXE, R_BIN
from mlperf.tools.static import datasetOutFile, MATLAB_ALGO, matlabRedirectTempFolder, WEKA_ALGO, JAVA_CLASSPATH, \
SKLEARN_ALGO, R_ALGO, SHOGUN_ALGO
| [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
198,
6738,
25962,
525,
69,
13,
565,
436,
1586,
13,
31391,
1330,
10285,
6601,
2202,
32657,
34,
21370,
198,
6738,
25962,
525,
69,
13,
31391,
13,
11250,
... | 2.507576 | 132 |
import array
import hashlib
import json
import os.path
import ctypes
from ctypes import *
import utils
logger = utils.get_logger('test_02_utils')
my_lib = load_shared_library()
| [
11748,
7177,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
269,
19199,
198,
6738,
269,
19199,
1330,
1635,
198,
11748,
3384,
4487,
198,
198,
6404,
1362,
796,
3384,
4487,
13,
1136,
62,
6404,
1362,
10... | 2.983333 | 60 |
#!/usr/bin/python
# -----------------------------------------------------------------------------
# Name: VHDL instantiation script
# Purpose: Using with VIM
#
# Author: BooZe
#
# Created: 25.03.2013
# Copyright: (c) BooZe 2013
# Licence: BSD
# -----------------------------------------------------------------------------
import re
import sys
if __name__ == "__main__":
command_line_interface(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
16529,
32501,
198,
2,
6530,
25,
220,
220,
220,
220,
220,
220,
220,
569,
10227,
43,
9113,
3920,
4226,
198,
2,
32039,
25,
220,
8554,
351,
569,
3955,
198,
2,
198,
2,
6434,
25,
220,
220,... | 3.345865 | 133 |
from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QCursor
from qtpy.QtWidgets import QApplication, QDialog, QHBoxLayout, QLayout, QWidget
from . import helpers as hp
class QtDialog(QDialog):
"""Dialog base class"""
_icons = None
_main_layout = None
def on_close(self):
"""Close window"""
self.close()
def _on_teardown(self):
"""Execute just before deletion"""
def closeEvent(self, event):
"""Close event"""
self._on_teardown()
return super().closeEvent(event)
def make_panel(self) -> QLayout:
"""Make panel"""
...
def make_gui(self):
"""Make and arrange main panel"""
# make panel
layout = self.make_panel()
if layout is None:
raise ValueError("Expected layout")
# pack element
self.setLayout(layout)
self._main_layout = layout
def show_above_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + y_offset)
self.move(pos)
if show:
self.show()
def show_above_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + 14)
self.move(pos)
if show:
self.show()
def show_below_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, -y_offset)
self.move(pos)
if show:
self.show()
def show_below_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, -14)
self.move(pos)
if show:
self.show()
def show_right_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(-x_offset, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_right_of_mouse(self, show: bool = True):
"""Show popup dialog on the right hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(-14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left(), rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_mouse(self, show: bool = True):
"""Show popup dialog on the left hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
class QtFramelessPopup(QtDialog):
"""Frameless dialog"""
# attributes used to move windows around
_old_window_pos, _move_handle = None, None
def _make_move_handle(self) -> QHBoxLayout:
"""Make handle button that helps move the window around"""
self._move_handle = hp.make_qta_label(
self,
"move",
tooltip="Click here and drag the mouse around to move the window.",
)
self._move_handle.setCursor(Qt.PointingHandCursor)
layout = QHBoxLayout()
layout.addStretch(1)
layout.addWidget(self._move_handle)
return layout
def mousePressEvent(self, event):
"""mouse press event"""
super().mousePressEvent(event)
# allow movement of the window when user uses right-click and the move handle button does not exist
if event.button() == Qt.RightButton and self._move_handle is None:
self._old_window_pos = event.x(), event.y()
elif self._move_handle is None:
self._old_window_pos = None
elif self.childAt(event.pos()) == self._move_handle:
self._old_window_pos = event.x(), event.y()
def mouseMoveEvent(self, event):
"""Mouse move event - ensures its possible to move the window to new location"""
super().mouseMoveEvent(event)
if self._old_window_pos is not None:
self.move(
event.globalX() - self._old_window_pos[0],
event.globalY() - self._old_window_pos[1],
) # noqa
def mouseReleaseEvent(self, event):
"""mouse release event"""
super().mouseReleaseEvent(event)
self._old_window_pos = None
class QtFramelessTool(QtFramelessPopup):
"""Frameless dialog that stays on top"""
| [
6738,
10662,
83,
9078,
13,
48,
83,
14055,
1330,
1195,
12727,
11,
33734,
198,
6738,
10662,
83,
9078,
13,
48,
83,
8205,
72,
1330,
36070,
21471,
198,
6738,
10662,
83,
9078,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
44204... | 2.275695 | 2,481 |
x = 2
y = 2
n = 2
# ar = []
# p = 0
# for i in range ( x + 1 ) :
# for j in range( y + 1):
# if i+j != n:
# ar.append([])
# ar[p] = [ i , j ]
# p+=1
# print(ar)
x = 2
y = 2
z = 2
n = 2
lst = [[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if i + j + k != n]
print(lst) | [
87,
796,
362,
220,
198,
88,
796,
362,
198,
77,
796,
362,
628,
198,
2,
610,
796,
17635,
220,
198,
2,
279,
796,
657,
220,
198,
2,
329,
1312,
287,
2837,
357,
2124,
1343,
352,
1267,
1058,
220,
198,
2,
220,
220,
220,
220,
329,
474,... | 1.682243 | 214 |
from typing import Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.lib.bot_storage import (
StateError,
get_bot_storage,
get_keys_in_bot_storage,
remove_bot_storage,
set_bot_storage,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict, check_list, check_string
from zerver.models import UserProfile
@has_request_variables
@has_request_variables
@has_request_variables
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
18453,
11,
367,
29281,
31077,
198,
198,
6738,
1976,
18497,
13,
8019,
13,
13645,
62,
35350,
1330,
357,
198,
220,
220,
220,
1812... | 3 | 192 |
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurable strategy implementation."""
from typing import Callable, List, Optional, Tuple
from flower.typing import Weights
from .aggregate import aggregate, weighted_loss_avg
from .strategy import Strategy
class DefaultStrategy(Strategy):
"""Configurable default strategy."""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 1,
min_eval_clients: int = 1,
min_available_clients: int = 1,
eval_fn: Optional[Callable[[Weights], Optional[Tuple[float, float]]]] = None,
) -> None:
"""Constructor."""
super().__init__()
self.min_fit_clients = min_fit_clients
self.min_eval_clients = min_eval_clients
self.fraction_fit = fraction_fit
self.fraction_eval = fraction_eval
self.min_available_clients = min_available_clients
self.eval_fn = eval_fn
def should_evaluate(self) -> bool:
"""Evaluate every round."""
return self.eval_fn is None
def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for training."""
num_clients = int(num_available_clients * self.fraction_fit)
return max(num_clients, self.min_fit_clients), self.min_available_clients
def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for evaluation."""
num_clients = int(num_available_clients * self.fraction_eval)
return max(num_clients, self.min_eval_clients), self.min_available_clients
def evaluate(self, weights: Weights) -> Optional[Tuple[float, float]]:
"""Evaluate model weights using an evaluation function (if provided)."""
if self.eval_fn is None:
# No evaluation function provided
return None
return self.eval_fn(weights)
def on_aggregate_fit(
self, results: List[Tuple[Weights, int]], failures: List[BaseException]
) -> Optional[Weights]:
"""Aggregate fit results using weighted average (as in FedAvg)."""
return aggregate(results)
def on_aggregate_evaluate(
self, results: List[Tuple[int, float]], failures: List[BaseException]
) -> Optional[float]:
"""Aggregate evaluation losses using weighted average."""
return weighted_loss_avg(results)
| [
2,
15069,
12131,
1215,
499,
402,
2022,
39,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.817368 | 1,117 |
# Tabela do Brasileirão
times = ('Internacional', 'São Paulo', 'Flamengo', 'Atlético-MG', 'Palmeiras', 'Grêmio', 'Fluminense', 'Ceará',
'Santos', 'Corinthians', 'Bragantino', 'Athletico', 'Atlético-GO', 'Sport', 'Vasco', 'Fortaleza', 'Bahia',
'Goiás', 'Coritiba', 'Botafogo')
while True:
print()
print(f'Os 5 primeiros colocados são: {times[0:5]}')
print()
print(f'Os 4 últimos colocados são: {times[16:]}')
print()
print(f'Times: {sorted(times)}')
print()
print(f'O Bragantino está na posição: {times.index("Bragantino")+1}')
break
| [
2,
309,
9608,
64,
466,
39452,
576,
343,
28749,
201,
198,
22355,
796,
19203,
15865,
330,
1538,
3256,
705,
50,
28749,
34410,
3256,
705,
37,
2543,
1516,
78,
3256,
705,
25255,
25125,
3713,
12,
20474,
3256,
705,
11531,
1326,
343,
292,
3256... | 2.065068 | 292 |
from __future__ import absolute_import
import os.path as osp
import appdirs
from blazeutils.helpers import tolist
import flask
from pathlib import PurePath
import six
from werkzeug.utils import (
import_string,
ImportStringError
)
from keg.utils import app_environ_get, pymodule_fpaths_to_objects
substitute = SubstituteValue
# The following three classes are default configuration profiles
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
198,
11748,
598,
15908,
82,
198,
6738,
31259,
26791,
13,
16794,
364,
1330,
284,
4868,
198,
11748,
42903,
198,
6738,
3108,
8019,
1330,... | 3.368852 | 122 |
import tensorflow as tf
import numpy as np
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, LeakyReLU
from rdcnet.layers.nd_layers import get_nd_conv, get_nd_spatial_dropout, get_nd_conv_transposed
from rdcnet.layers.padding import DynamicPaddingLayer, DynamicTrimmingLayer
from rdcnet.layers.stacked_dilated_conv import StackedDilatedConv
def delta_loop(output_channels, recurrent_block, n_steps=3):
'''Recursively applies a given block to refine its output.
Args:
output_channels: number of output channels.
recurrent_block: a network taking (input_channels + output_channels) as
input and outputting output_channels
n_steps: number of times the block is applied
'''
return block
def rdc_block(n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
k_size=3,
spatial_dims=2,
dropout=0.1):
'''Grouped conv with stacked dilated conv in each group and pointwise convolution for mixing
Notes
-----
pre-activation to keep the residual path clear as described in:
HE, Kaiming, et al. Identity mappings in deep residual networks.
In: European conference on computer vision. Springer, Cham, 2016.
S. 630-645.
'''
Conv = get_nd_conv(spatial_dims)
channels = channels_per_group * n_groups
sd_conv = StackedDilatedConv(rank=spatial_dims,
filters=channels,
kernel_size=k_size,
dilation_rates=dilation_rates,
groups=n_groups,
activation=LeakyReLU())
# mixes ch/reduce from input_ch + channels_per_group*n_groups
reduce_ch_conv = Conv(channels, 1)
spatial_dropout = get_nd_spatial_dropout(spatial_dims)(dropout)
return _call
def GenericRDCnetBase(input_shape,
downsampling_factor,
n_downsampling_channels,
n_output_channels,
n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
n_steps=5,
dropout=0.1):
'''delta loop with input/output rescaling and atrous grouped conv recurrent block'''
spatial_dims = len(input_shape) - 1
downsampling_factor = tuple(
np.broadcast_to(np.array(downsampling_factor), spatial_dims).tolist())
recurrent_block = rdc_block(n_groups,
dilation_rates,
channels_per_group,
spatial_dims=spatial_dims,
dropout=dropout)
n_features = channels_per_group * n_groups
loop = delta_loop(n_features, recurrent_block, n_steps)
in_kernel_size = tuple(max(3, f) for f in downsampling_factor)
out_kernel_size = tuple(max(3, 2 * f) for f in downsampling_factor)
Conv = get_nd_conv(spatial_dims)
conv_in = Conv(n_downsampling_channels,
kernel_size=in_kernel_size,
strides=downsampling_factor,
padding='same')
ConvTranspose = get_nd_conv_transposed(spatial_dims)
conv_out = ConvTranspose(n_output_channels,
kernel_size=out_kernel_size,
strides=downsampling_factor,
padding='same')
input_padding = DynamicPaddingLayer(downsampling_factor,
ndim=spatial_dims + 2)
output_trimming = DynamicTrimmingLayer(ndim=spatial_dims + 2)
inputs = Input(shape=input_shape)
x = input_padding(inputs)
x = conv_in(x)
x = loop(x)
x = LeakyReLU()(x)
x = conv_out(x)
x = output_trimming([inputs, x])
name = 'RDCNet-F{}-DC{}-OC{}-G{}-DR{}-GC{}-S{}-D{}'.format(
_format_tuple(downsampling_factor),
n_downsampling_channels, n_output_channels, n_groups,
_format_tuple(dilation_rates), channels_per_group, n_steps, dropout)
return Model(inputs=inputs, outputs=[x], name=name)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
9104,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
23412,
11,
1004,
15492,
3041,
41596... | 2.0453 | 2,053 |
from googleplaces import GooglePlaces, types, lang
import googlemaps
import csv
from time import sleep
import requests
import sys
import re
from send_mail import *
if __name__ == '__main__':
scrape()
| [
6738,
23645,
23625,
1330,
3012,
3646,
2114,
11,
3858,
11,
42392,
198,
11748,
23645,
31803,
198,
11748,
269,
21370,
198,
6738,
640,
1330,
3993,
198,
11748,
7007,
198,
11748,
25064,
198,
11748,
302,
198,
6738,
3758,
62,
4529,
1330,
1635,
... | 3.419355 | 62 |
""" XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# constants
# Shared commands
# Markers only commands
# Battle events
# Invalidation targets
# Spotted statuses
| [
37811,
1395,
15996,
357,
66,
8,
7324,
13,
4666,
87,
14761,
13,
785,
2211,
12,
5539,
37227,
198,
198,
29113,
29113,
4242,
2,
198,
2,
38491,
198,
198,
2,
39403,
9729,
198,
198,
2,
2940,
364,
691,
9729,
198,
198,
2,
5838,
2995,
198,
... | 4.017544 | 57 |
from onto.attrs import attribute
from onto.models.base import Serializable
from collections import namedtuple
graph_schema = namedtuple('graph_schema', ['op_type', 'name', 'graphql_object_type'])
| [
6738,
4291,
13,
1078,
3808,
1330,
11688,
198,
198,
6738,
4291,
13,
27530,
13,
8692,
1330,
23283,
13821,
628,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
34960,
62,
15952,
2611,
796,
3706,
83,
29291,
10786,
34960,
62,
15952,
... | 3.311475 | 61 |
#!/usr/bin/env python
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, get_num_lines, log, ls_l, mkdir_p, rm_f,
run_shell_cmd, strip_ext_ta)
from encode_lib_genomic import (
subsample_ta_pe, subsample_ta_se)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
37773,
62,
8019,
62,
11321,
1330,
357,
198,
220,
220,
220,
6818,
62,
7753,
62,
1662,
62,
28920,
11,
651,
62,
22510,... | 2.451613 | 124 |
#!/usr/bin/env python3
import re
import yaml
from collections import namedtuple, defaultdict
from ipaddress import IPv4Network
from itertools import repeat, combinations
from functools import update_wrapper
import click
BoundIface = namedtuple('BoundIface', 'host if_no')
NetedIface = namedtuple('NetedIface', 'host if_no ip netmask')
DomainAsoc = namedtuple('DomainAsoc', 'iface domain')
IFACE_STATEMENT_REGEXP = r'([a-z0-9_]+)\[(\d+)\]\s*=\s*"([A-Z])'
pass_data = click.make_pass_decorator(object)
@click.group()
@click.option(
"--labconf",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of lab.conf",
)
@click.option(
"--netz",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of netz.yml",
)
@click.pass_context
@click.command()
@pass_data
@click.command()
@pass_data
@click.command()
@pass_data
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
302,
198,
11748,
331,
43695,
198,
6738,
17268,
1330,
3706,
83,
29291,
11,
4277,
11600,
198,
6738,
20966,
21975,
1330,
25961,
19,
26245,
198,
6738,
340,
861,
10141,
1330,
9585,... | 2.570681 | 382 |
#! /usr/bin/env python
import sys
# preferrence for the included libs
sys.path.insert(0, 'libs')
from editor import main
main.main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
2,
4702,
6784,
329,
262,
3017,
9195,
82,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
705,
8019,
82,
11537,
198,
198,
6738,
5464,
1330,
1388,
198,
12417,
1... | 2.87234 | 47 |
from __future__ import absolute_import
import abc
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from qrcode.image.base import BaseImage
from qrcode.main import ActiveWithNeighbors
class QRModuleDrawer(abc.ABC):
"""
QRModuleDrawer exists to draw the modules of the QR Code onto images.
For this, technically all that is necessary is a ``drawrect(self, box,
is_active)`` function which takes in the box in which it is to draw,
whether or not the box is "active" (a module exists there). If
``needs_neighbors`` is set to True, then the method should also accept a
``neighbors`` kwarg (the neighboring pixels).
It is frequently necessary to also implement an "initialize" function to
set up values that only the containing Image class knows about.
For examples of what these look like, see doc/module_drawers.png
"""
needs_neighbors = False
@abc.abstractmethod
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
450,
66,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4479,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
10662,
6015,
1098,
13,
9060,
13,
... | 3.302817 | 284 |
"""Desafio 80. Ler cinco valores númericos e ir colocando eles na lista de modo ordenado sem usar o método sort"""
numeros = list()
for cont in range(0, 5):
num = int(input("Escreva um número: "))
if cont == 0:
numeros.append(num)
elif cont == 1:
if num >= numeros[0]:
numeros.append(num)
else:
numeros.insert(0, num)
elif cont == 2:
if num >= numeros[1]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
else:
numeros.insert(1, num)
elif cont == 3:
if num >= numeros[2]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
else:
numeros.insert(2, num)
elif cont == 4:
if num >= numeros[3]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
elif num > numeros[1] and num <= numeros[2]:
numeros.insert(2, num)
else:
numeros.insert(3, num)
print(numeros)
| [
37811,
5960,
1878,
952,
4019,
13,
31831,
269,
259,
1073,
1188,
2850,
299,
21356,
946,
418,
304,
4173,
951,
420,
25440,
304,
829,
12385,
1351,
64,
390,
953,
78,
2760,
268,
4533,
5026,
514,
283,
267,
285,
25125,
24313,
3297,
37811,
198,... | 1.873512 | 672 |
import home
from ws.handler.event.enum import Handler as Parent
| [
11748,
1363,
198,
198,
6738,
266,
82,
13,
30281,
13,
15596,
13,
44709,
1330,
32412,
355,
16774,
628
] | 3.666667 | 18 |
i = 3
shit_indicator = 0
simple_nums = [2]
while len(simple_nums) < 10001:
for k in range(2, i):
if i % k == 0:
shit_indicator = 1
break
if shit_indicator == 1:
pass
else:
simple_nums.append(i)
i += 1
shit_indicator = 0
print(simple_nums[-1]) | [
72,
796,
513,
201,
198,
16211,
62,
521,
26407,
796,
657,
201,
198,
36439,
62,
77,
5700,
796,
685,
17,
60,
201,
198,
4514,
18896,
7,
36439,
62,
77,
5700,
8,
1279,
1802,
486,
25,
201,
198,
220,
220,
220,
329,
479,
287,
2837,
7,
... | 1.830508 | 177 |