content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import sys
import json
import copy
import mdtraj
import numpy as np
import time
import pandas as pd
import pickle
import mdtraj as md
import multiprocessing as mp
try:
import cupy as cp
cudaExists = True
import kernel
except ImportError as e:
cudaExists = False
print("Can't load CuPy, contact fingerprint will not run")
# sys.path.insert(1, os.path.join(sys.path[0], '../test_contacts/contacts/contacts/'))
import importlib
ligand_map = {'A': 0, 'C': 0, 'N': 1, 'NA': 1, 'O': 2, 'OA': 2, 'F': 3, 'P': 4, 'S': 5, 'SA': 5, 'CL': 6,
'BR': 7, 'I': 8, 'H': 9}
protein_map = {'A': 0, 'C': 0, 'N': 1, 'NA': 1, 'O': 2, 'OA': 2, 'S': 3, 'SA': 3, 'H': 4}
# import MDAnalysis as mda
# import MDAnalysis.analysis.rms
# self.trjLength = 0
# self.rmsd = []
# self.proteinCheck = TRJ.proteinCheck
# except:
# self.ligandTrajectory = None
# self.hasLigandTrajectory = False
# self.RMSD = TRJ.RMSD
# self.initialRMSD = []
# return np.array([self.numEM, self.frameEM, self.lengthEM, self.numQR, self.frameQR, self.lengthQR, self.numMD, self.frameMD, self.lengthMD],dtype=float)
# print(f'{self.sumMD:6.0f} molecular dynamics | {self.frameMD:8.0f} frames | {self.lengthMD:10.2f} nanoseconds')
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
4866,
198,
11748,
45243,
9535,
73,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2298,
293,
198,
11748,
45243,
9535,
... | 1.991736 | 726 |
#
# This file contains the Python code from Program 7.23 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm07_23.txt
#
| [
2,
198,
2,
770,
2393,
4909,
262,
11361,
2438,
422,
6118,
767,
13,
1954,
286,
198,
2,
366,
6601,
32112,
942,
290,
978,
7727,
907,
198,
2,
351,
9515,
12,
46,
380,
4714,
8495,
47020,
287,
11361,
1,
198,
2,
416,
31045,
371,
13,
3771... | 2.903846 | 104 |
__author__ = 'gkour'
import numpy as np
import torch
from abstractbrain import AbstractBrain
import os.path
from standardbrainnetwork import AbstractNetwork
import utils
torch.device('cuda' if torch.cuda.is_available() else 'cpu')
| [
834,
9800,
834,
796,
705,
70,
74,
454,
6,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
12531,
27825,
1330,
27741,
44687,
198,
11748,
28686,
13,
6978,
198,
6738,
3210,
27825,
27349,
1330,
27741,
26245,
198,
1174... | 3.405797 | 69 |
# ############## SWITH TO SSL ON TOKEN
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ServerFactory
from twisted.protocols.basic import LineReceiver
if __name__ == '__main__':
factory = ServerFactory()
factory.protocol = TLSServer
reactor.listenTCP(8000, factory)
reactor.run()
| [
2,
1303,
7804,
4242,
2,
12672,
10554,
5390,
25952,
6177,
5390,
43959,
198,
6738,
19074,
13,
37675,
1330,
21905,
11,
264,
6649,
198,
6738,
19074,
13,
37675,
13,
11235,
4668,
1330,
9652,
22810,
198,
6738,
19074,
13,
11235,
4668,
82,
13,
... | 3.194175 | 103 |
#!/usr/bin/env python
"""Extend regular notebook server to be aware of multiuser things."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
try:
from urllib.parse import quote
except ImportError:
# PY2 Compat
from urllib import quote
import requests
from jinja2 import ChoiceLoader, FunctionLoader
from tornado import ioloop
from tornado.web import HTTPError
try:
import notebook
except ImportError:
raise ImportError("JupyterHub single-user server requires notebook >= 4.0")
from traitlets import (
Bool,
Integer,
Unicode,
CUnicode,
)
from notebook.notebookapp import (
NotebookApp,
aliases as notebook_aliases,
flags as notebook_flags,
)
from notebook.auth.login import LoginHandler
from notebook.auth.logout import LogoutHandler
from notebook.utils import url_path_join
# Define two methods to attach to AuthenticatedHandler,
# which authenticate via the central auth server.
# register new hub related command-line aliases
aliases = dict(notebook_aliases)
aliases.update({
'user' : 'SingleUserNotebookApp.user',
'cookie-name': 'SingleUserNotebookApp.cookie_name',
'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
'hub-host': 'SingleUserNotebookApp.hub_host',
'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
'base-url': 'SingleUserNotebookApp.base_url',
})
flags = dict(notebook_flags)
flags.update({
'disable-user-config': ({
'SingleUserNotebookApp': {
'disable_user_config': True
}
}, "Disable user-controlled configuration of the notebook server.")
})
page_template = """
{% extends "templates/page.html" %}
{% block header_buttons %}
{{super()}}
<a href='{{hub_control_panel_url}}'
class='btn btn-default btn-sm navbar-btn pull-right'
style='margin-right: 4px; margin-left: 2px;'
>
Control Panel</a>
{% endblock %}
{% block logo %}
<img src='{{logo_url}}' alt='Jupyter Notebook'/>
{% endblock logo %}
"""
def _exclude_home(path_list):
"""Filter out any entries in a path list that are in my home directory.
Used to disable per-user configuration.
"""
home = os.path.expanduser('~')
for p in path_list:
if not p.startswith(home):
yield p
class SingleUserNotebookApp(NotebookApp):
"""A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
user = CUnicode(config=True)
cookie_name = Unicode(config=True)
hub_prefix = Unicode(config=True)
hub_host = Unicode(config=True)
hub_api_url = Unicode(config=True)
aliases = aliases
flags = flags
open_browser = False
trust_xheaders = True
login_handler_class = JupyterHubLoginHandler
logout_handler_class = JupyterHubLogoutHandler
port_retries = 0 # disable port-retries, since the Spawner will tell us what port to use
disable_user_config = Bool(False, config=True,
help="""Disable user configuration of single-user server.
Prevents user-writable files that normally configure the single-user server
from being loaded, ensuring admins have full control of configuration.
"""
)
cookie_cache_lifetime = Integer(
config=True,
default_value=300,
allow_none=True,
help="""
Time, in seconds, that we cache a validated cookie before requiring
revalidation with the hub.
""",
)
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%Y-%m-%d %H:%M:%S"
def _log_format_default(self):
"""override default log format to include time"""
return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
@property
@property
def patch_templates(self):
"""Patch page templates to add Hub-related buttons"""
self.jinja_template_vars['logo_url'] = self.hub_host + url_path_join(self.hub_prefix, 'logo')
env = self.web_app.settings['jinja2_env']
env.globals['hub_control_panel_url'] = \
self.hub_host + url_path_join(self.hub_prefix, 'home')
# patch jinja env loading to modify page template
orig_loader = env.loader
env.loader = ChoiceLoader([
FunctionLoader(get_page),
orig_loader,
])
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
11627,
437,
3218,
20922,
4382,
284,
307,
3910,
286,
1963,
3754,
263,
1243,
526,
15931,
198,
198,
2,
15069,
357,
66,
8,
449,
929,
88,
353,
7712,
4816,
13,
198,
2,
4307,
6169,
... | 2.654077 | 1,668 |
from django.core.management.base import BaseCommand
from grievance.models import *
from django.contrib.auth.models import User
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
47645,
13,
27530,
1330,
1635,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198
] | 3.848485 | 33 |
#!/usr/bin/env python2
import os,sys,glob,copy,re,shutil,collections,string
import glob
import re
from os.path import join, dirname
import math
#IS_catching_
list_of_files = glob.glob("./textFiles/microtubule*")
number_of_files_tmp = len( list_of_files )
for file in list_of_files:
print( file )
number = file.split('_')[ 1 ].split('.')[ 0 ];
MTOC_file = """./textFiles/MTOC_%s.txt"""%(
number)
IS_capture_shrinkage = """./textFiles/IS_capture_shrinkage_%s.txt"""%(
number )
IS_cortical_sl = """./textFiles/IS_cortical_sl_%s.txt"""%(
number )
IS_catching_file = """./textFiles/IS_catching_%s.txt"""%(
number )
Dynein_surface_random_dist = """./textFiles/Dynein_surface_randomly_distributed_%s.txt"""%(
number )
Dynein_IS_capt = """./textFiles/Dynein_IS_capture_%s.txt"""%(
number )
dynein_abscissa = """./textFiles/dynein_abscissa_%s.txt"""%(
number )
dynein_abscissa_attachment = """./textFiles/dynein_abscissa_attachment_%s.txt"""%(
number )
for number in range( 0 , number_of_files_tmp ):
#print( number )
print("START OF FUNCTION")
micro_file = """./textFiles/microtubule_%s.txt"""%(
number)
MTOC_file = """./textFiles/MTOC_%s.txt"""%(
number)
IS_capture_shrinkage = """./textFiles/IS_capture_shrinkage_0.txt"""
IS_cortical_sl = """./textFiles/IS_cortical_sl_0.txt"""
IS_catching_file = """./textFiles/IS_catching_%s.txt"""%(
number )
Dynein_surface_random_dist = """./textFiles/Dynein_surface_randomly_distributed_%s.txt"""%(
number )
Dynein_IS_capt = """./textFiles/Dynein_IS_capture_%s.txt"""%(
number )
dynein_abscissa = """./textFiles/dynein_abscissa_%s.txt"""%(
number )
dynein_abscissa_attachment = """./textFiles/dynein_abscissa_attachment_%s.txt"""%(
number )
#print( IS_capture_shrinkage )
cytoskeleton( micro_file , MTOC_file , IS_catching_file , IS_capture_shrinkage , IS_cortical_sl , Dynein_surface_random_dist
, dynein_abscissa , dynein_abscissa_attachment , Dynein_IS_capt )
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
11748,
28686,
11,
17597,
11,
4743,
672,
11,
30073,
11,
260,
11,
1477,
22602,
11,
4033,
26448,
11,
8841,
198,
11748,
15095,
198,
11748,
302,
198,
6738,
28686,
13,
6978,
1330,
4654,
... | 2.159681 | 1,002 |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import datetime
import glob
from jams.argsort import argsort
import numpy as np
__all__ = ['fullnames', 'fullnames_dates', 'fullnames_dates_sizes', 'fullnames_sizes',
'fullnames_times', 'fullnames_times_sizes',
'last_fullname', 'last_fullname_date', 'last_fullname_date_size',
'last_fullname_size', 'last_fullname_time', 'last_fullname_time_size',
'last_name', 'last_name_date', 'last_name_date_size', 'last_name_size',
'last_name_time', 'last_name_time_size',
'names', 'names_dates', 'names_dates_sizes', 'names_sizes', 'names_times',
'names_times_sizes',
'newest_fullname', 'newest_fullname_date', 'newest_fullname_date_size',
'newest_fullname_size', 'newest_fullname_time', 'newest_fullname_time_size',
'newest_name', 'newest_name_date', 'newest_name_date_size',
'newest_name_size', 'newest_name_time', 'newest_name_date_size']
# --------------------------------------------------------------------
def fullnames(fname=None, dirs=None):
"""
Filenames with absolute paths in local directories.
Definition
----------
def fullnames(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames incl. absolute paths.
Examples
--------
import os
# get .dat filenames with absolute paths in directories 2013, 2014, ...
fls = fullnames('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames in current directory
os.chdir('change/directory')
fls = fullnames('*.dat')
# get all filenames in current directory
fls = fullnames()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
fnames = [ os.path.abspath(f) for f in fnames ]
lls += fnames
if i != '.':
os.chdir(curdir)
lls.sort()
return lls
# --------------------------------------------------------------------
def fullnames_dates(fname=None, dirs=None):
"""
Filenames with absolute paths and modification times in local directories.
Definition
----------
def fullnames_dates(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames incl. absolute paths, List of modification times
Examples
--------
import os
# get .dat filenames with absolute paths and modification times in directories 2013, 2014, ...
fls, flsdate = fullnames_dates('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and modification times in current directory
os.chdir('change/directory')
fls, flsdate = fullnames_dates('*.dat')
# get all filenames and modification times in current directory
fls, flsdate = fullnames_dates()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llsd = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
fnames = [ os.path.abspath(f) for f in fnames ]
lls += fnames
llsd += [ datetime.datetime.fromtimestamp(os.stat(n).st_mtime) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llsd = [ llsd[i] for i in ii ]
return lls, llsd
# --------------------------------------------------------------------
def fullnames_dates_sizes(fname=None, dirs=None):
"""
Filenames with absolute paths, modification times, and file sizes in local directories.
Definition
----------
def fullnames_dates_sizes(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames incl. absolute paths, List of modification times, List of file sizes
Examples
--------
import os
# get .dat filenames with absolute paths, modification times, and file sizes in directories 2013, 2014, ...
fls, flsdate, flssize = fullnames_dates_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames, modification times, and file sizes in current directory
os.chdir('change/directory')
fls, flsdate, flssize = fullnames_dates_sizes('*.dat')
# get all filenames, modification times, and file sizes in current directory
fls, flsdate, flssize = fullnames_dates_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llsd = []
llss = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
fnames = [ os.path.abspath(f) for f in fnames ]
lls += fnames
llsd += [ datetime.datetime.fromtimestamp(os.stat(n).st_mtime) for n in fnames ]
llss += [ int(os.stat(n).st_size) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llsd = [ llsd[i] for i in ii ]
llss = [ llss[i] for i in ii ]
return lls, llsd, llss
# --------------------------------------------------------------------
def fullnames_sizes(fname=None, dirs=None):
"""
Filenames with absolute paths and file sizes in local directories.
Definition
----------
def fullnames_sizes(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames incl. absolute paths, List of file sizes
Examples
--------
import os
# get .dat filenames with absolute paths and file sizes in directories 2013, 2014, ...
fls, flssize = fullnames_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and file sizes in current directory
os.chdir('change/directory')
fls, flssize = fullnames_sizes('*.dat')
# get all filenames and file sizes in current directory
fls, flssize = fullnames_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llss = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
fnames = [ os.path.abspath(f) for f in fnames ]
lls += fnames
llss += [ int(os.stat(n).st_size) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llss = [ llss[i] for i in ii ]
return lls, llss
# --------------------------------------------------------------------
def fullnames_times(*args, **kwargs):
"""
Wrapper for fullnames_dates:
def fullnames_dates(fname=None, dirs=None):
Examples
--------
import os
# get .dat filenames with absolute paths and modification times in directories 2013, 2014, ...
fls, flsdate = fullnames_times('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and modification times in current directory
os.chdir('change/directory')
fls, flsdate = fullnames_times('*.dat')
# get all filenames and modification times in current directory
fls, flsdate = fullnames_times()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return fullnames_dates(*args, **kwargs)
# --------------------------------------------------------------------
def fullnames_times_sizes(*args, **kwargs):
"""
Wrapper for fullnames_dates_sizes:
def fullnames_dates_sizes(fname=None, dirs=None):
Examples
--------
import os
# get .dat filenames with absolute paths, modification times, and file sizes in directories 2013, 2014, ...
fls, flsdate, flssize = fullnames_times_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames, modification times, and file sizes in current directory
os.chdir('change/directory')
fls, flsdate, flssize = fullnames_times_sizes('*.dat')
# get all filenames, modification times, and file sizes in current directory
fls, flsdate, flssize = fullnames_times_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return fullnames_dates_sizes(*args, **kwargs)
# --------------------------------------------------------------------
def last_fullname(fname=None, dirs=None):
"""
Filename with absolute paths of last file in local directories.
Definition
----------
def last_fullname(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename incl. absolute paths of last modified file
Examples
--------
import os
# get last .dat filename with absolute paths in directories 2013, 2014, ...
fls = last_fullname('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename in current directory
os.chdir('change/directory')
fls = last_fullname('*.dat')
# get last filename in current directory
fls = last_fullname()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld = fullnames_dates(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]]
# --------------------------------------------------------------------
def last_fullname_date(fname=None, dirs=None):
"""
Filename with absolute paths and modification time of last file in local directories.
Definition
----------
def last_fullname_date(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename incl. absolute paths of last modified file, Modification time
Examples
--------
import os
# get last .dat filename with absolute paths and modification time in directories 2013, 2014, ...
fls, flsdate = last_fullname_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flsdate = last_fullname_date('*.dat')
# get last filename and modification time in current directory
fls, flsdate = last_fullname_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld = fullnames_dates(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], fld[ii[-1]]
# --------------------------------------------------------------------
def last_fullname_date_size(fname=None, dirs=None):
"""
Filename with absolute paths, modification time, and file size of last file in local directories.
Definition
----------
def last_fullname_date_size(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename incl. absolute paths of last modified file, Modification time, File size
Examples
--------
import os
# get last .dat filename with absolute paths, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = last_fullname_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = last_fullname_date_size('*.dat')
# get last filename, modification time, and file size in current directory
fls, flsdate, flssize = last_fullname_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld, flss = fullnames_dates_sizes(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], fld[ii[-1]], flss[ii[-1]]
# --------------------------------------------------------------------
def last_fullname_size(fname=None, dirs=None):
"""
Filename with absolute paths and file size of last file in local directories.
Definition
----------
def last_fullname_size(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename incl. absolute paths of last modified file, File size
Examples
--------
import os
# get last .dat filename with absolute paths and file size in directories 2013, 2014, ...
fls, flsdate = last_fullname_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and file size in current directory
os.chdir('change/directory')
fls, flsdate = last_fullname_date('*.dat')
# get last filename and file size in current directory
fls, flsdate = last_fullname_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld, flss = fullnames_dates_sizes(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], flss[ii[-1]]
# --------------------------------------------------------------------
def last_fullname_time(*args, **kwargs):
"""
Wrapper for last_fullname_date:
def last_fullname_date(fname=None, dirs=None):
Examples
--------
import os
# get last .dat filename with absolute paths and modification time in directories 2013, 2014, ...
fls, flstime = last_fullname_time('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flstime = last_fullname_time('*.dat')
# get last filename and modification time in current directory
fls, flstime = last_fullname_time()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_date(*args, **kwargs)
# --------------------------------------------------------------------
def last_fullname_time_size(*args, **kwargs):
"""
Wrapper for last_fullname_date_size:
def last_fullname_date_size(fname=None, dirs=None):
Examples
--------
import os
# get last .dat filename with absolute paths, modification time, and file size in directories 2013, 2014, ...
fls, flstime, flssize = last_fullname_time_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flstime, flssize = last_fullname_time_size('*.dat')
# get last filename, modification time, and file size in current directory
fls, flstime, flssize = last_fullname_time_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_date_size(*args, **kwargs)
# --------------------------------------------------------------------
def last_name(fname=None, dirs=None):
"""
Filename of last file in local directories.
Definition
----------
def last_name(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename of last modified file
Examples
--------
import os
# get last .dat filename in directories 2013, 2014, ...
fls = last_name('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename in current directory
os.chdir('change/directory')
fls = last_name('*.dat')
# get last filename in current directory
fls = last_name()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld = names_dates(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]]
# --------------------------------------------------------------------
def last_name_date(fname=None, dirs=None):
"""
Filename and modification time of last file in local directories.
Definition
----------
def last_name_date(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename of last modified file, Modification time
Examples
--------
import os
# get last .dat filename and modification time in directories 2013, 2014, ...
fls, flsdate = last_name_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flsdate = last_name_date('*.dat')
# get last filename and modification time in current directory
fls, flsdate = last_name_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld = names_dates(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], fld[ii[-1]]
# --------------------------------------------------------------------
def last_name_date_size(fname=None, dirs=None):
"""
Filename, modification time, and file size of last file in local directories.
Definition
----------
def last_name_date_size(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename of last modified file, Modification time, File size
Examples
--------
import os
# get last .dat filename, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = last_name_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = last_name_date_size('*.dat')
# get last filename, modification time, and file size in current directory
fls, flsdate, flssize = last_name_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld, flss = names_dates_sizes(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], fld[ii[-1]], flss[ii[-1]]
# --------------------------------------------------------------------
def last_name_size(fname=None, dirs=None):
"""
Filename and file size of last file in local directories.
Definition
----------
def last_name_size(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename of last modified file, File size
Examples
--------
import os
# get last .dat filename and file size in directories 2013, 2014, ...
fls, flsdate = last_name_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and file size in current directory
os.chdir('change/directory')
fls, flsdate = last_name_date('*.dat')
# get last filename and file size in current directory
fls, flsdate = last_name_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld, flss = names_dates_sizes(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return fls[ii[-1]], flss[ii[-1]]
# --------------------------------------------------------------------
def last_name_time(*args, **kwargs):
"""
Wrapper for last_name_date:
last_name_date(fname=None, dirs=None):
Examples
--------
import os
# get last .dat filename and modification time in directories 2013, 2014, ...
fls, flstime = last_name_time('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flstime = last_name_time('*.dat')
# get last filename and modification time in current directory
fls, flstime = last_name_time()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_name_date(*args, **kwargs)
# --------------------------------------------------------------------
def last_name_time_size(*args, **kwargs):
"""
Wrapper for last_name_date_size:
last_name_date_size(fname=None, dirs=None):
Examples
--------
import os
# get last .dat filename, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = last_name_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get last .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = last_name_date_size('*.dat')
# get last filename, modification time, and file size in current directory
fls, flsdate, flssize = last_name_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_name_date_size(*args, **kwargs)
# --------------------------------------------------------------------
def names(fname=None, dirs=None):
"""
Filenames in local directories.
Definition
----------
def names(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames
Examples
--------
import os
# get .dat filenames in directories 2013, 2014, ...
fls = names('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames in current directory
os.chdir('change/directory')
fls = names('*.dat')
# get all filenames in current directory
fls = names()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
lls += fnames
if i != '.':
os.chdir(curdir)
lls.sort()
return lls
# --------------------------------------------------------------------
def names_dates(fname=None, dirs=None):
"""
Filenames and modification times in local directories.
Definition
----------
def names_dates(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames, List of modification times
Examples
--------
import os
# get .dat filenames and modification times in directories 2013, 2014, ...
fls, flsdate = names_dates('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and modification times in current directory
os.chdir('change/directory')
fls, flsdate = names_dates('*.dat')
# get all filenames and modification times in current directory
fls, flsdate = names_dates()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llsd = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
lls += fnames
llsd += [ datetime.datetime.fromtimestamp(os.stat(n).st_mtime) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llsd = [ llsd[i] for i in ii ]
return lls, llsd
# --------------------------------------------------------------------
def names_dates_sizes(fname=None, dirs=None):
"""
Filenames, modification times, and file sizes in local directories.
Definition
----------
def names_dates_sizes(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames, List of modification times, List of file sizes
Examples
--------
import os
# get .dat filenames, modification times, and file sizes in directories 2013, 2014, ...
fls, flsdate = names_dates_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames, modification times, and file sizes in current directory
os.chdir('change/directory')
fls, flsdate = names_dates_sizes('*.dat')
# get all filenames, modification times, and file sizes in current directory
fls, flsdate = names_dates_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llsd = []
llss = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
lls += fnames
llsd += [ datetime.datetime.fromtimestamp(os.stat(n).st_mtime) for n in fnames ]
llss += [ int(os.stat(n).st_size) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llsd = [ llsd[i] for i in ii ]
llss = [ llss[i] for i in ii ]
return lls, llsd, llss
# --------------------------------------------------------------------
def names_sizes(fname=None, dirs=None):
"""
Filenames and file sizes in local directories.
Definition
----------
def names_sizes(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
List of filenames, List of file sizes
Examples
--------
import os
# get .dat filenames and file sizes in directories 2013, 2014, ...
fls, flssize = names_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and file sizes in current directory
os.chdir('change/directory')
fls, flssize = names_sizes('*.dat')
# get all filenames and file sizes in current directory
fls, flssize = names_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
Modified, MC, Jun 2015 - dirs can be single directory
- dirs can be empty
"""
if dirs is None:
idirs = ['.']
else:
if isinstance(dirs, (list, tuple, np.ndarray, set)):
idirs = dirs
else:
idirs = [dirs]
lls = []
llss = []
curdir = os.path.realpath(os.path.curdir)
for i in idirs:
if i != '.':
os.chdir(str(i))
if fname is None:
fnames = os.listdir('.')
else:
fnames = glob.glob(fname)
lls += fnames
llss += [ int(os.stat(n).st_size) for n in fnames ]
if i != '.':
os.chdir(curdir)
ii = argsort(lls)
lls = [ lls[i] for i in ii ]
llss = [ llss[i] for i in ii ]
return lls, llss
# --------------------------------------------------------------------
def names_times(*args, **kwargs):
"""
Wrapper for names_dates:
def names_dates(fname=None, dirs=None):
Examples
--------
import os
# get .dat filenames and modification times in directories 2013, 2014, ...
fls, flsdate = names_times('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames and modification times in current directory
os.chdir('change/directory')
fls, flsdate = names_times('*.dat')
# get all filenames and modification times in current directory
fls, flsdate = names_times()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return names_dates(*args, **kwargs)
# --------------------------------------------------------------------
def names_times_sizes(*args, **kwargs):
"""
Wrapper for names_dates_sizes:
def names_dates_sizes(fname=None, dirs=None):
Examples
--------
import os
# get .dat filenames, modification times, and file sizes in directories 2013, 2014, ...
fls, flsdate, flssize = names_times_sizes('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get .dat filenames, modification times, and file sizes in current directory
os.chdir('change/directory')
fls, flsdate, flssize = names_times_sizes('*.dat')
# get all filenames, modification times, and file sizes in current directory
fls, flsdate, flssize = names_times_sizes()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return names_dates_sizes(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname(*args, **kwargs):
"""
Wrapper for last_fullname:
last_fullname(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename with absolute paths in directories 2013, 2014, ...
fls = newest_fullname('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename in current directory
os.chdir('change/directory')
fls = newest_fullname('*.dat')
# get newest filename in current directory
fls = newest_fullname()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname_date(*args, **kwargs):
"""
Wrapper for last_fullname_date:
last_fullname_date(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename with absolute paths and modification time in directories 2013, 2014, ...
fls, flsdate = newest_fullname_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flsdate = newest_fullname_date('*.dat')
# get newest filename and modification time in current directory
fls, flsdate = newest_fullname_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_date(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname_date_size(*args, **kwargs):
"""
Wrapper for last_fullname_date_size:
last_fullname_date_size(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename with absolute paths, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = newest_fullname_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = newest_fullname_date_size('*.dat')
# get newest filename, modification time, and file size in current directory
fls, flsdate, flssize = newest_fullname_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_date_size(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname_size(*args, **kwargs):
"""
Wrapper for last_fullname_size:
last_fullname_size(fname=None, dirs=None):
Definition
----------
def newest_fullname_size(fname=None, dirs=None):
Optional Input
--------------
fname filename, filename globbing is possible such as '*.dat' (all files)
dirs list of or single directory names (default: '.')
Output
------
Filename incl. absolute paths of newest modified file, File size
Examples
--------
import os
# get newest .dat filename with absolute paths and file size in directories 2013, 2014, ...
fls, flsdate = newest_fullname_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and file size in current directory
os.chdir('change/directory')
fls, flsdate = newest_fullname_date('*.dat')
# get newest filename and file size in current directory
fls, flsdate = newest_fullname_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_size(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname_time(*args, **kwargs):
"""
Wrapper for last_fullname_date:
def last_fullname_date(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename with absolute paths and modification time in directories 2013, 2014, ...
fls, flstime = newest_fullname_time('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flstime = newest_fullname_time('*.dat')
# get newest filename and modification time in current directory
fls, flstime = newest_fullname_time()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_time(*args, **kwargs)
# --------------------------------------------------------------------
def newest_fullname_time_size(*args, **kwargs):
"""
Wrapper for last_fullname_date_size:
def last_fullname_date_size(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename with absolute paths, modification time, and file size in directories 2013, 2014, ...
fls, flstime, flssize = newest_fullname_time_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flstime, flssize = newest_fullname_time_size('*.dat')
# get newest filename, modification time, and file size in current directory
fls, flstime, flssize = newest_fullname_time_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_fullname_date_size(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name(*args, **kwargs):
"""
Wrapper for last_name:
last_name(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename in directories 2013, 2014, ...
fls = newest_name('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename in current directory
os.chdir('change/directory')
fls = newest_name('*.dat')
# get newest filename in current directory
fls = newest_name()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_name(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name_date(*args, **kwargs):
"""
Wrapper for last_name_date:
last_name_date(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename and modification time in directories 2013, 2014, ...
fls, flsdate = newest_name_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flsdate = newest_name_date('*.dat')
# get newest filename and modification time in current directory
fls, flsdate = newest_name_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
# Files
fls, fld = names_dates(fname, dirs=dirs)
if len(fls) == 0: # nothing in there
return None
# Dates
fldec = []
for idat in fld:
dec = (idat.year*366. + idat.month*31. + idat.day*1. +
idat.hour/24. + idat.minute/1440. + idat.second/86400.)
fldec.append(dec)
ii = argsort(fldec)
return last_name_date(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name_date_size(*args, **kwargs):
"""
Wrapper for last_name_date_size:
last_name_date_size(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = newest_name_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = newest_name_date_size('*.dat')
# get newest filename, modification time, and file size in current directory
fls, flsdate, flssize = newest_name_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_name_date_size(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name_size(*args, **kwargs):
"""
Wrapper for last_name_size:
last_name_size(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename and file size in directories 2013, 2014, ...
fls, flsdate = newest_name_date('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and file size in current directory
os.chdir('change/directory')
fls, flsdate = newest_name_date('*.dat')
# get newest filename and file size in current directory
fls, flsdate = newest_name_date()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return last_name_size(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name_time(*args, **kwargs):
"""
Wrapper for newest_name_date:
newest_name_date(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename and modification time in directories 2013, 2014, ...
fls, flstime = newest_name_time('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename and modification time in current directory
os.chdir('change/directory')
fls, flstime = newest_name_time('*.dat')
# get newest filename and modification time in current directory
fls, flstime = newest_name_time()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return newest_name_date(*args, **kwargs)
# --------------------------------------------------------------------
def newest_name_date_size(*args, **kwargs):
"""
Wrapper for newest_name_date_size:
newest_name_date_size(fname=None, dirs=None):
Examples
--------
import os
# get newest .dat filename, modification time, and file size in directories 2013, 2014, ...
fls, flsdate, flssize = newest_name_date_size('*.dat', dirs=glob.glob('[0-9][0-9][0-9][0-9]'))
# get newest .dat filename, modification time, and file size in current directory
os.chdir('change/directory')
fls, flsdate, flssize = newest_name_date_size('*.dat')
# get newest filename, modification time, and file size in current directory
fls, flsdate, flssize = newest_name_date_size()
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2015 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Dec 2014
"""
return newest_name_date_size(*args, **kwargs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
11748,
15095,
198,
6738,
44147,
13,
22046,
419,
1330,
26498,... | 2.678761 | 38,641 |
#!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import socket
import sys
import requests
CACERT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
API_URL = "https://kubernetes.default:443"
CURRENT_POD = socket.gethostname()
CURRENT_NAMESPACE = read(NAMESPACE_PATH)
TOKEN = read(TOKEN_PATH)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
2,
357,
34,
8,
15069,
2177,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
1... | 3.156425 | 358 |
"""
Given a column title as appear in an Excel sheet, return its corresponding
column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example:
Input: "AB"
Output: 28
Example:
Input: "ZY"
Output: 701
Constraints:
- 1 <= s.length <= 7
- s consists only of uppercase English letters.
- s is between "A" and "FXSHRXW".
"""
#Difficulty: Easy
#1000 / 1000 test cases passed.
#Runtime: 28 ms
#Memory Usage: 13.7 MB
#Runtime: 28 ms, faster than 92.13% of Python3 online submissions for Excel Sheet Column Number.
#Memory Usage: 13.7 MB, less than 83.68% of Python3 online submissions for Excel Sheet Column Number.
| [
37811,
198,
220,
220,
220,
11259,
257,
5721,
3670,
355,
1656,
287,
281,
24134,
9629,
11,
1441,
663,
11188,
220,
198,
220,
220,
220,
5721,
1271,
13,
628,
220,
220,
220,
1114,
1672,
25,
628,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.314121 | 347 |
import math
hueStepSize = 2.34 # 600 lumens divided into 8 bit
| [
11748,
10688,
198,
71,
518,
8600,
10699,
796,
362,
13,
2682,
220,
220,
1303,
10053,
46390,
641,
9086,
656,
807,
1643,
628
] | 3 | 22 |
# Run this file by right clicking it within the Project tab in the left pane, and then choose "Run test"
print("Hello world!")
print("Did i do this right?") | [
2,
5660,
428,
2393,
416,
826,
12264,
340,
1626,
262,
4935,
7400,
287,
262,
1364,
37218,
11,
290,
788,
3853,
366,
10987,
1332,
1,
198,
198,
4798,
7203,
15496,
995,
2474,
8,
198,
4798,
7203,
11633,
1312,
466,
428,
826,
1701,
8
] | 3.738095 | 42 |
import versioneer
from setuptools import setup, find_packages
setup(
name='miniast',
url='https://github.com/cpcloud/miniast',
packages=find_packages(),
python_requires='>=3.5',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Lightweight macros for Python',
license='Apache License, Version 2.0',
author='Phillip Cloud',
author_email='cpcloud@gmail.com',
)
| [
11748,
2196,
28153,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
45313,
459,
3256,
198,
220,
220,
220,
19016,
11639,
5450,
1378,
12567,
13,
785,
14,
13155,
17... | 2.806452 | 155 |
import yaml
| [
11748,
331,
43695,
198
] | 3 | 4 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import gettext
import warnings
import pkg_resources
try:
# First, try to get our version out of PKG-INFO. If we're installed,
# this'll let us find our version without pulling in pbr. After all, if
# we're installed on a system, we're not in a Git-managed source tree, so
# pbr doesn't really buy us anything.
__version__ = __canonical_version__ = pkg_resources.get_provider(
pkg_resources.Requirement.parse('swift')).version
except pkg_resources.DistributionNotFound:
# No PKG-INFO? We're probably running from a checkout, then. Let pbr do
# its thing to figure out a version number.
import pbr.version
_version_info = pbr.version.VersionInfo('swift')
__version__ = _version_info.release_string()
__canonical_version__ = _version_info.version_string()
_localedir = os.environ.get('SWIFT_LOCALEDIR')
_t = gettext.translation('swift', localedir=_localedir, fallback=True)
if (3, 0) <= sys.version_info[:2] <= (3, 5):
# In the development of py3, json.loads() stopped accepting byte strings
# for a while. https://bugs.python.org/issue17909 got fixed for py36, but
# since it was termed an enhancement and not a regression, we don't expect
# any backports. At the same time, it'd be better if we could avoid
# leaving a whole bunch of json.loads(resp.body.decode(...)) scars in the
# code that'd probably persist even *after* we drop support for 3.5 and
# earlier. So, monkey patch stdlib.
import json
if not getattr(json.loads, 'patched_to_decode', False):
json.loads = JsonLoadsPatcher(json.loads)
del JsonLoadsPatcher
warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=(
'Python 2 is no longer supported by the Python core team. '
'Support for it is now deprecated in cryptography, '
'and will be removed in a future release.'))
warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=(
'Python 2 is no longer supported by the Python core team. '
'Support for it is now deprecated in cryptography, '
'and will be removed in the next release.'))
| [
2,
15069,
357,
66,
8,
2211,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
239... | 3.183815 | 865 |
{
"name" : "ZK Biometric Device Integration Kware (ZKTECO) Demo (UDP)",
"version" : "1.0",
"author" : "JUVENTUD PRODUCTIVA VENEZOLANA",
"category" : "HR",
"website" : "https://www.youtube.com/channel/UCTj66IUz5M-QV15Mtbx_7yg",
"description": "Module for the connection between odoo and zkteco devices for the control of employee assistance. This module is a demo version to test the compatibility of your device with our module.d Odoo.",
'license': 'AGPL-3',
"depends" : ["base","hr"],
"data" : [
"views/biometric_machine_view.xml",
"secuirty/res_groups.xml",
"secuirty/ir.model.access.csv"
],
'images': ['static/images/zk_screenshot.jpg'],
"active": True,
"installable": True,
}
| [
90,
198,
220,
220,
220,
366,
3672,
1,
1058,
366,
57,
42,
8436,
16996,
16232,
38410,
509,
1574,
357,
57,
42176,
2943,
46,
8,
34588,
357,
52,
6322,
42501,
198,
220,
220,
220,
366,
9641,
1,
1058,
366,
16,
13,
15,
1600,
198,
220,
22... | 2.5 | 296 |
import STM32
import PikaPiZero
import PikaStdLib
time = STM32.Time()
uart = STM32.UART()
uart.init()
uart.setId(1)
uart.enable()
print('initing rgb...')
rgb = PikaPiZero.RGB()
rgb.init()
rgb.enable()
print('init rgb ok!')
mem = PikaStdLib.MemChecker()
print('mem max:')
mem.max()
while True:
time.sleep_ms(50)
rgb.flow()
| [
11748,
3563,
44,
2624,
198,
11748,
350,
9232,
38729,
28667,
198,
11748,
350,
9232,
1273,
67,
25835,
198,
2435,
796,
3563,
44,
2624,
13,
7575,
3419,
198,
19986,
796,
3563,
44,
2624,
13,
52,
7227,
3419,
198,
198,
19986,
13,
15003,
3419,... | 2.273973 | 146 |
import logging
import typing
import gym
import numpy as np
from DeepRL.Env import EnvAbstract, EnvState
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| [
11748,
18931,
198,
11748,
19720,
198,
198,
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
10766,
7836,
13,
4834,
85,
1330,
2039,
85,
23839,
11,
2039,
85,
9012,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
64... | 2.971429 | 70 |
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright (C) libracore, 2017-2020
# https://www.libracore.com or https://github.com/libracore
#
# For information on ERPNext, refer to https://erpnext.org/
#
import frappe
import json
import urllib.parse
import six
import pdfkit, os
from frappe import _, attach_print
from frappe.utils.data import today, add_days
from frappe.contacts.doctype.address.address import get_address_display
import csv
from PyPDF2 import PdfFileWriter, PdfFileReader
import socket
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
# für Parsen von Histogrammdaten in Jinja
@frappe.whitelist()
# Prüfen auf Existenz eines Templates in Jinja
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
201,
198,
2,
3384,
4487,
13,
9078,
201,
198,
2,
201,
198,
2,
15069,
357,
34,
8,
300,
2889,
330,
382,
11,
2177,
12,
42334,
201,
198,
2,
3740,
1378,
2503,
... | 2.114286 | 525 |
import os
import random
from dataclasses import dataclass
from typing import Optional, List
VOICES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), 'voices'))
@dataclass
@dataclass
__all__ = [
'VOICES_FOLDER',
'VoiceFile',
'Voice'
]
| [
11748,
28686,
198,
11748,
4738,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
32233,
11,
7343,
628,
198,
29516,
34444,
62,
37,
3535,
14418,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,... | 2.46789 | 109 |
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("source", help="A WAPO or NYT article to pull in from a URL.")
parser.add_argument("-d", "--dest", help="filename to save article as. defaults to print to the screen")
args = parser.parse_args()
r = requests.get(args.source)
soup = BeautifulSoup(r.text, "html.parser")
url_list = args.source.split(".")
if "nytimes" in url_list:
article = soup.find('section',{'name':'articleBody'}) # NYT
elif "washingtonpost" in url_list:
article = soup.find('div',{'class':'article-body'}) # WAPO
else:
print("WAPO and NYT are currently the only sites supported.")
sys.exit()
article_text = article.find_all(text=True)
article_links = article.find_all('a')
if not args.dest:
print("No destination provided, printing to screen")
print(soup.title)
for line in article_text:
print(line)
# for link in article_links:
# print([t for t in link.find_all(text=True) if t.parent.name == 'a'])
else:
# write some file handing stuff here and then print the text to it
pass | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
... | 2.976 | 375 |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import os, re
_KNOWN_LOCATIONS = [
("/opt/sun/", re.compile(r"j2sdk(.+)/jre/lib/i386/client/libjvm.so") ),
("/usr/java/", re.compile(r"j2sdk(.+)/jre/lib/i386/client/libjvm.so") ),
("/usr/java/", re.compile(r"jdk(.+)/jre/lib/i386/client/libjvm.so") ),
]
JRE_ARCHS = [
"amd64/server/libjvm.so",
"i386/client/libjvm.so",
"i386/server/libjvm.so",
]
| [
2,
17174,
17174,
4557,
35625,
201,
198,
2,
220,
220,
15069,
5472,
12,
11528,
6542,
6065,
446,
201,
198,
2,
201,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
22... | 2.695455 | 440 |
import numpy as np
from scipy import fftpack
import tensorly as tl
from unittest import TestCase
from ..util import square_tensor_gen, TensorInfoBucket, RandomInfoBucket, eval_rerr
from ..sketch import Sketch
import time
from tensorly.decomposition import tucker
from ..recover_from_sketches import SketchTwoPassRecover
from ..recover_from_sketches import SketchOnePassRecover
from sklearn.utils.extmath import randomized_svd
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
277,
701,
8002,
198,
11748,
11192,
273,
306,
355,
256,
75,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
11485,
22602,
1330,
6616,
62,
83,
22854,
62,
5235,
... | 3.274809 | 131 |
"""Example driving LED matrix"""
import random
import time
from buildhat import Matrix
matrix = Matrix('C')
matrix.clear(("red", 10))
time.sleep(1)
matrix.clear()
time.sleep(1)
matrix.set_pixel((0, 0), ("blue", 10))
matrix.set_pixel((2, 2), ("red", 10))
time.sleep(1)
while True:
out = [[(int(random.uniform(0, 9)), 10) for x in range(3)] for y in range(3)]
matrix.set_pixels(out)
time.sleep(0.1)
| [
37811,
16281,
5059,
12365,
17593,
37811,
198,
198,
11748,
4738,
198,
11748,
640,
198,
198,
6738,
1382,
5183,
1330,
24936,
198,
198,
6759,
8609,
796,
24936,
10786,
34,
11537,
198,
198,
6759,
8609,
13,
20063,
7,
7203,
445,
1600,
838,
4008... | 2.447059 | 170 |
from collections import OrderedDict
from fence.jwt.keys import load_keypairs
from tests import utils
def test_reconstruct_keys_dict(app, client):
"""
Test reconstructing the dictionary mapping key ids to public keys from the
return value from the public keys endpoint.
"""
response = client.get("/jwt/keys")
public_keys_dict = OrderedDict(response.json["keys"])
assert public_keys_dict == app.jwt_public_keys[app.config["BASE_URL"]]
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
13990,
13,
73,
46569,
13,
13083,
1330,
3440,
62,
2539,
79,
3468,
198,
198,
6738,
5254,
1330,
3384,
4487,
628,
198,
4299,
1332,
62,
260,
41571,
62,
13083,
62,
11600,
7,
1324,
1... | 3.019481 | 154 |
# author: github/zabir-nabil | [
2,
1772,
25,
33084,
14,
89,
397,
343,
12,
77,
14991
] | 2.545455 | 11 |
"""
@author: Sriram Veturi
@title: SmartSearch - An Intelligent Search Engine.
@date: 05/06/2019
"""
import os
import math
import json
DATA_DIR = "./documents"
TF_IDF_FILES_DIR = "./tf_idf_files"
def generate_tf():
"""
Function to build the corpus for every document.
:return tf_dict: Ter Frequency Dictionary (Mapping of documents with term frequencies of words in it).
"""
# Dictionary to store term frequency of each term in each url.
tf_dict = dict()
i = 0
# Traverse through the 'documents' directory and get words for each url.
for json_file in os.listdir(DATA_DIR):
# Open the file and contents.
with open(os.path.join(DATA_DIR, json_file), "r") as jf:
# To store term frequency of words in each url.
tf_dict_each = dict()
# Load the dictionary
url_text_dict = dict(json.load(jf))
url_word_count_map = url_text_dict["WORD_COUNT_MAP"]
# "url_word_count_map" is dictionary with word and its frequency.
total_words = sum(list(url_word_count_map.values()))
for word, word_count in url_word_count_map.items():
tf_dict_each[word] = word_count / total_words
# Push individual tf_dict_each to main tf_dict
tf_dict[url_text_dict["URL"]] = tf_dict_each
return tf_dict
def generate_idf():
"""
Function to generate the inverse document frequency.
:return idf_dict: Inverse Document Frequency Mapping.
"""
# Total number of json files in the data directory.
total_pages = len(os.listdir(DATA_DIR))
# Dicitonary to store the IDF of each document.
idf_dict = dict()
#######################
# GENARATE VOCABULARY #
#######################
# Vocabulary of all the words in all the urls.
all_words_in_all_urls = list()
i = 0
# Traverse through the 'documents' directory and get words for each url.
for json_file in os.listdir(DATA_DIR):
if i >= 200:
break
i += 1
# Open the file and contents.
with open(os.path.join(DATA_DIR, json_file), "r") as jf:
# To store term frequency of words in each url.
tf_dict_each = dict()
# Load the dictionary
url_text_dict = dict(json.load(jf))
# Get all the words in this particular document/url.
all_words_in_this_url = list(url_text_dict["WORD_COUNT_MAP"].keys())
# Append it to main list
all_words_in_all_urls += all_words_in_this_url
# Vocabulary of all urls.
vocabulary = set(all_words_in_all_urls)
print(len(vocabulary))
#################
# IDF OPERATION #
#################
j = 0
for word in vocabulary:
print(j)
j += 1
# To track how many urls have this word of vocabulary.
word_count = 0
# Traverse through the 'documents' directory and get words for each url.
for json_file in os.listdir(DATA_DIR):
# Open the file and contents.
with open(os.path.join(DATA_DIR, json_file), "r") as jf:
# To store term frequency of words in each url.
tf_dict_each = dict()
# Load the dictionary
url_text_dict = dict(json.load(jf))
# Get all the words in this particular document/url.
all_words_in_this_url = list(url_text_dict["WORD_COUNT_MAP"].keys())
if word in all_words_in_this_url:
word_count += 1
# Now, we know how many urls contain the word.
# Let's calculate idf.
if word_count == 0:
continue
else:
idf_dict[word] = math.log(total_pages / word_count)
return idf_dict
def generate_tf_idf(tf_dict, idf_dict):
"""
Function to generate TF-IDF.
:return tf_idf_dict: TF-IDF Mapping.
"""
# To store TF-IDF dictionary.
tf_idf_dict = dict()
# Traverse through the 'documents' directory and get words for each url.
for json_file in os.listdir(DATA_DIR):
print(json_file)
# Open the file and contents.
with open(os.path.join(DATA_DIR, json_file), "r") as jf:
# To store term frequency of words in each url.
tf_dict_each = dict()
# Load the dictionary
url_text_dict = dict(json.load(jf))
# Get all the words in this particular document/url.
all_words_in_this_url = list(url_text_dict["WORD_COUNT_MAP"].keys())
this_url = url_text_dict["URL"]
tf_idf_dict_each = dict()
# Product of TF and IDF here.
for word in all_words_in_this_url:
try:
tf_idf_dict_each[word] = tf_dict[this_url][word] * idf_dict[word]
except:
continue
tf_idf_dict[this_url] = tf_idf_dict_each
return tf_idf_dict
def create_directory():
"""
Function to create a directory to store the documents.
:return True/False: Creation Successful Flag.
"""
# If it already exists, return True.
if os.path.isdir(TF_IDF_FILES_DIR) is True:
print("Directory to store the tf-idf already exists. Moving on.")
return True
else:
try:
os.mkdir(TF_IDF_FILES_DIR)
print("Directory created to store the tf-idf files.")
return True
except Exception as e:
print(e)
return False
def create_vector_space_model():
"""
Driver funciton to generate TF-IDF files for further use.
"""
# Check if directory already exists.
# If no, create one.
if create_directory() is True:
# Generate Term Frequency and save json.
tf_dict = generate_tf()
with open(os.path.join(TF_IDF_FILES_DIR, 'tf.json'), 'w') as fp:
json.dump(tf_dict, fp)
# Generate Inverse Document Frequency and save json.
idf_dict = generate_idf()
with open(os.path.join(TF_IDF_FILES_DIR, 'idf.json'), 'w') as fp:
json.dump(idf_dict, fp)
# Generate TF-IDF and save json.
tf_idf_dict = generate_tf_idf(tf_dict, idf_dict)
with open(os.path.join(TF_IDF_FILES_DIR, 'tf_idf.json'), 'w') as fp:
json.dump(tf_idf_dict, fp)
else:
raise Exception("DirectoryCreationError: Could not create directory to store TF-IDF related files.")
create_vector_space_model()
| [
37811,
198,
31,
9800,
25,
21714,
343,
321,
49744,
9900,
198,
31,
7839,
25,
10880,
18243,
532,
1052,
49452,
11140,
7117,
13,
198,
31,
4475,
25,
8870,
14,
3312,
14,
23344,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
10688,
198,
1174... | 2.628598 | 2,154 |
# https://git-scm.com/book/pt-br/v1/Primeiros-passos-No%C3%A7%C3%B5es-B%C3%A1sicas-de-Git
# Este é um progama para codificar em CN (Códogo Numerico)
alfabeto = {'\\n':'\\n','*':'*','-':'-','+':'+',' ':'=','!':'!','?':'?','.':'.',',':',',':':':',';':';','(':'(',')':')','a':'1','b':'2','c':'3','d':'4','e':'5','f':'6','g':'7','h':'8','i':'9','j':'*10*','k':'*11*','l':'*12*','m':'*13*','n':'*14*','o':'*15*','p':'*16*','q':'*17*','r':'*18*','s':'*19*','t':'*20*','u':'*21*','v':'*22*','w':'*23*','y':'*25*','z':'*26*','x':'*27*','ç':'*28*','ã':'*30*','é':'*31*','ê':'*32*','í':'*33*','â':'*34*','á':'*35*','õ':'*36*','ó':'*37*','ú':'*38*','ù':'*39*' }
codigo_pronto = ''
flag = 0
flag2 = True
while flag2 == True:
while flag == 0:
print('Digite a frase que quer codificar em CN (Codigo Numérico) sem caracteres especiais (=,@,#,$,%,\",\',\\):')
entrada = input().lower().strip()
if '@' in entrada or '#' in entrada or '$' in entrada or '%' in entrada or '"' in entrada or '\'' in entrada or '\\' in entrada or '/' in entrada or '=' in entrada:
print('Seu burro e cego não pode ter caracteres especiais (=,@,#,$,%,\",\',\\) na frase. Na proxima a MOMO vai te levar! \n')
entrada = ''
else:
flag = 1
for i in range(len(entrada)):
for k,v in alfabeto.items():
if k == entrada[i]:
codigo_pronto += v
print('\ncodigo pronto:')
print(codigo_pronto +'\n')
print('aperte qualquer tecla para continuar')
flag = 0
input()
| [
2,
3740,
1378,
18300,
12,
1416,
76,
13,
785,
14,
2070,
14,
457,
12,
1671,
14,
85,
16,
14,
26405,
72,
4951,
12,
6603,
418,
12,
2949,
4,
34,
18,
4,
32,
22,
4,
34,
18,
4,
33,
20,
274,
12,
33,
4,
34,
18,
4,
32,
16,
21383,
... | 1.902913 | 824 |
class BaseHTTPRequestError(Exception):
"""Base error for request responses."""
class NotFound(BaseHTTPRequestError):
"""Raised when the guild or user is not found."""
class InvalidToken(BaseHTTPRequestError):
"""Raised when the authentication key is invalid."""
class Ratelimited(BaseHTTPRequestError):
"""Raised when ratelimit responses are recieved."""
class AmariServerError(BaseHTTPRequestError):
"""Raised when their is an internal error in the Amari servers."""
| [
4871,
7308,
40717,
18453,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
4049,
329,
2581,
9109,
526,
15931,
628,
198,
4871,
1892,
21077,
7,
14881,
40717,
18453,
12331,
2599,
198,
220,
220,
220,
37227,
21762,
1417,
618,
262,
19... | 3.674074 | 135 |
"""Support for reading vehicle status from BMW connected drive portal."""
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
import logging
from bimmer_connected.const import SERVICE_ALL_TRIPS, SERVICE_LAST_TRIP, SERVICE_STATUS
from bimmer_connected.state import ChargingState
from bimmer_connected.vehicle import ConnectedDriveVehicle
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_UNIT_SYSTEM_IMPERIAL,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
LENGTH_KILOMETERS,
LENGTH_MILES,
MASS_KILOGRAMS,
PERCENTAGE,
TIME_HOURS,
TIME_MINUTES,
VOLUME_GALLONS,
VOLUME_LITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.icon import icon_for_battery_level
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
from . import (
DOMAIN as BMW_DOMAIN,
BMWConnectedDriveAccount,
BMWConnectedDriveBaseEntity,
)
from .const import CONF_ACCOUNT, DATA_ENTRIES
_LOGGER = logging.getLogger(__name__)
@dataclass
class BMWSensorEntityDescription(SensorEntityDescription):
"""Describes BMW sensor entity."""
unit_metric: str | None = None
unit_imperial: str | None = None
SENSOR_TYPES: dict[str, BMWSensorEntityDescription] = {
# --- Generic ---
"charging_time_remaining": BMWSensorEntityDescription(
key="charging_time_remaining",
icon="mdi:update",
unit_metric=TIME_HOURS,
unit_imperial=TIME_HOURS,
),
"charging_status": BMWSensorEntityDescription(
key="charging_status",
icon="mdi:battery-charging",
),
# No icon as this is dealt with directly as a special case in icon()
"charging_level_hv": BMWSensorEntityDescription(
key="charging_level_hv",
unit_metric=PERCENTAGE,
unit_imperial=PERCENTAGE,
),
# LastTrip attributes
"date_utc": BMWSensorEntityDescription(
key="date_utc",
device_class=DEVICE_CLASS_TIMESTAMP,
),
"duration": BMWSensorEntityDescription(
key="duration",
icon="mdi:timer-outline",
unit_metric=TIME_MINUTES,
unit_imperial=TIME_MINUTES,
),
"electric_distance_ratio": BMWSensorEntityDescription(
key="electric_distance_ratio",
icon="mdi:percent-outline",
unit_metric=PERCENTAGE,
unit_imperial=PERCENTAGE,
entity_registry_enabled_default=False,
),
# AllTrips attributes
"battery_size_max": BMWSensorEntityDescription(
key="battery_size_max",
icon="mdi:battery-charging-high",
unit_metric=ENERGY_WATT_HOUR,
unit_imperial=ENERGY_WATT_HOUR,
entity_registry_enabled_default=False,
),
"reset_date_utc": BMWSensorEntityDescription(
key="reset_date_utc",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_registry_enabled_default=False,
),
"saved_co2": BMWSensorEntityDescription(
key="saved_co2",
icon="mdi:tree-outline",
unit_metric=MASS_KILOGRAMS,
unit_imperial=MASS_KILOGRAMS,
entity_registry_enabled_default=False,
),
"saved_co2_green_energy": BMWSensorEntityDescription(
key="saved_co2_green_energy",
icon="mdi:tree-outline",
unit_metric=MASS_KILOGRAMS,
unit_imperial=MASS_KILOGRAMS,
entity_registry_enabled_default=False,
),
# --- Specific ---
"mileage": BMWSensorEntityDescription(
key="mileage",
icon="mdi:speedometer",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"remaining_range_total": BMWSensorEntityDescription(
key="remaining_range_total",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"remaining_range_electric": BMWSensorEntityDescription(
key="remaining_range_electric",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"remaining_range_fuel": BMWSensorEntityDescription(
key="remaining_range_fuel",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"max_range_electric": BMWSensorEntityDescription(
key="max_range_electric",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"remaining_fuel": BMWSensorEntityDescription(
key="remaining_fuel",
icon="mdi:gas-station",
unit_metric=VOLUME_LITERS,
unit_imperial=VOLUME_GALLONS,
),
# LastTrip attributes
"average_combined_consumption": BMWSensorEntityDescription(
key="average_combined_consumption",
icon="mdi:flash",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"average_electric_consumption": BMWSensorEntityDescription(
key="average_electric_consumption",
icon="mdi:power-plug-outline",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"average_recuperation": BMWSensorEntityDescription(
key="average_recuperation",
icon="mdi:recycle-variant",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"electric_distance": BMWSensorEntityDescription(
key="electric_distance",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"saved_fuel": BMWSensorEntityDescription(
key="saved_fuel",
icon="mdi:fuel",
unit_metric=VOLUME_LITERS,
unit_imperial=VOLUME_GALLONS,
entity_registry_enabled_default=False,
),
"total_distance": BMWSensorEntityDescription(
key="total_distance",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
# AllTrips attributes
"average_combined_consumption_community_average": BMWSensorEntityDescription(
key="average_combined_consumption_community_average",
icon="mdi:flash",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_combined_consumption_community_high": BMWSensorEntityDescription(
key="average_combined_consumption_community_high",
icon="mdi:flash",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_combined_consumption_community_low": BMWSensorEntityDescription(
key="average_combined_consumption_community_low",
icon="mdi:flash",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_combined_consumption_user_average": BMWSensorEntityDescription(
key="average_combined_consumption_user_average",
icon="mdi:flash",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"average_electric_consumption_community_average": BMWSensorEntityDescription(
key="average_electric_consumption_community_average",
icon="mdi:power-plug-outline",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_electric_consumption_community_high": BMWSensorEntityDescription(
key="average_electric_consumption_community_high",
icon="mdi:power-plug-outline",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_electric_consumption_community_low": BMWSensorEntityDescription(
key="average_electric_consumption_community_low",
icon="mdi:power-plug-outline",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_electric_consumption_user_average": BMWSensorEntityDescription(
key="average_electric_consumption_user_average",
icon="mdi:power-plug-outline",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"average_recuperation_community_average": BMWSensorEntityDescription(
key="average_recuperation_community_average",
icon="mdi:recycle-variant",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_recuperation_community_high": BMWSensorEntityDescription(
key="average_recuperation_community_high",
icon="mdi:recycle-variant",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_recuperation_community_low": BMWSensorEntityDescription(
key="average_recuperation_community_low",
icon="mdi:recycle-variant",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
entity_registry_enabled_default=False,
),
"average_recuperation_user_average": BMWSensorEntityDescription(
key="average_recuperation_user_average",
icon="mdi:recycle-variant",
unit_metric=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_KILOMETERS}",
unit_imperial=f"{ENERGY_KILO_WATT_HOUR}/100{LENGTH_MILES}",
),
"chargecycle_range_community_average": BMWSensorEntityDescription(
key="chargecycle_range_community_average",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"chargecycle_range_community_high": BMWSensorEntityDescription(
key="chargecycle_range_community_high",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"chargecycle_range_community_low": BMWSensorEntityDescription(
key="chargecycle_range_community_low",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"chargecycle_range_user_average": BMWSensorEntityDescription(
key="chargecycle_range_user_average",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"chargecycle_range_user_current_charge_cycle": BMWSensorEntityDescription(
key="chargecycle_range_user_current_charge_cycle",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"chargecycle_range_user_high": BMWSensorEntityDescription(
key="chargecycle_range_user_high",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
),
"total_electric_distance_community_average": BMWSensorEntityDescription(
key="total_electric_distance_community_average",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"total_electric_distance_community_high": BMWSensorEntityDescription(
key="total_electric_distance_community_high",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"total_electric_distance_community_low": BMWSensorEntityDescription(
key="total_electric_distance_community_low",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"total_electric_distance_user_average": BMWSensorEntityDescription(
key="total_electric_distance_user_average",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"total_electric_distance_user_total": BMWSensorEntityDescription(
key="total_electric_distance_user_total",
icon="mdi:map-marker-distance",
unit_metric=LENGTH_KILOMETERS,
unit_imperial=LENGTH_MILES,
entity_registry_enabled_default=False,
),
"total_saved_fuel": BMWSensorEntityDescription(
key="total_saved_fuel",
icon="mdi:fuel",
unit_metric=VOLUME_LITERS,
unit_imperial=VOLUME_GALLONS,
entity_registry_enabled_default=False,
),
}
DEFAULT_BMW_DESCRIPTION = BMWSensorEntityDescription(
key="",
entity_registry_enabled_default=True,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the BMW ConnectedDrive sensors from config entry."""
# pylint: disable=too-many-nested-blocks
unit_system = hass.config.units
account: BMWConnectedDriveAccount = hass.data[BMW_DOMAIN][DATA_ENTRIES][
config_entry.entry_id
][CONF_ACCOUNT]
entities: list[BMWConnectedDriveSensor] = []
for vehicle in account.account.vehicles:
for service in vehicle.available_state_services:
if service == SERVICE_STATUS:
entities.extend(
[
BMWConnectedDriveSensor(
account, vehicle, description, unit_system
)
for attribute_name in vehicle.drive_train_attributes
if attribute_name in vehicle.available_attributes
and (description := SENSOR_TYPES.get(attribute_name))
]
)
if service == SERVICE_LAST_TRIP:
entities.extend(
[
# mypy issues will be fixed in next release
# https://github.com/python/mypy/issues/9096
BMWConnectedDriveSensor(
account,
vehicle,
description, # type: ignore[arg-type]
unit_system,
service,
)
for attribute_name in vehicle.state.last_trip.available_attributes
if attribute_name != "date"
and (description := SENSOR_TYPES.get(attribute_name)) # type: ignore[no-redef]
]
)
if "date" in vehicle.state.last_trip.available_attributes:
entities.append(
BMWConnectedDriveSensor(
account,
vehicle,
SENSOR_TYPES["date_utc"],
unit_system,
service,
)
)
if service == SERVICE_ALL_TRIPS:
for attribute_name in vehicle.state.all_trips.available_attributes:
if attribute_name == "reset_date":
entities.append(
BMWConnectedDriveSensor(
account,
vehicle,
SENSOR_TYPES["reset_date_utc"],
unit_system,
service,
)
)
elif attribute_name in (
"average_combined_consumption",
"average_electric_consumption",
"average_recuperation",
"chargecycle_range",
"total_electric_distance",
):
entities.extend(
[
BMWConnectedDriveSensor(
account,
vehicle,
SENSOR_TYPES[f"{attribute_name}_{attr}"],
unit_system,
service,
)
for attr in (
"community_average",
"community_high",
"community_low",
"user_average",
)
]
)
if attribute_name == "chargecycle_range":
entities.extend(
BMWConnectedDriveSensor(
account,
vehicle,
SENSOR_TYPES[f"{attribute_name}_{attr}"],
unit_system,
service,
)
for attr in ("user_current_charge_cycle", "user_high")
)
elif attribute_name == "total_electric_distance":
entities.extend(
[
BMWConnectedDriveSensor(
account,
vehicle,
SENSOR_TYPES[f"{attribute_name}_{attr}"],
unit_system,
service,
)
for attr in ("user_total",)
]
)
else:
if (description := SENSOR_TYPES.get(attribute_name)) is None:
description = copy(DEFAULT_BMW_DESCRIPTION)
description.key = attribute_name
entities.append(
BMWConnectedDriveSensor(
account,
vehicle,
description,
unit_system,
service,
)
)
async_add_entities(entities, True)
class BMWConnectedDriveSensor(BMWConnectedDriveBaseEntity, SensorEntity):
"""Representation of a BMW vehicle sensor."""
entity_description: BMWSensorEntityDescription
def __init__(
self,
account: BMWConnectedDriveAccount,
vehicle: ConnectedDriveVehicle,
description: BMWSensorEntityDescription,
unit_system: UnitSystem,
service: str | None = None,
) -> None:
"""Initialize BMW vehicle sensor."""
super().__init__(account, vehicle)
self.entity_description = description
self._service = service
if service:
self._attr_name = f"{vehicle.name} {service.lower()}_{description.key}"
self._attr_unique_id = f"{vehicle.vin}-{service.lower()}-{description.key}"
else:
self._attr_name = f"{vehicle.name} {description.key}"
self._attr_unique_id = f"{vehicle.vin}-{description.key}"
if unit_system.name == CONF_UNIT_SYSTEM_IMPERIAL:
self._attr_native_unit_of_measurement = description.unit_imperial
else:
self._attr_native_unit_of_measurement = description.unit_metric
def update(self) -> None:
"""Read new state data from the library."""
_LOGGER.debug("Updating %s", self._vehicle.name)
vehicle_state = self._vehicle.state
sensor_key = self.entity_description.key
if sensor_key == "charging_status":
self._attr_native_value = getattr(vehicle_state, sensor_key).value
elif self.unit_of_measurement == VOLUME_GALLONS:
value = getattr(vehicle_state, sensor_key)
value_converted = self.hass.config.units.volume(value, VOLUME_LITERS)
self._attr_native_value = round(value_converted)
elif self.unit_of_measurement == LENGTH_MILES:
value = getattr(vehicle_state, sensor_key)
value_converted = self.hass.config.units.length(value, LENGTH_KILOMETERS)
self._attr_native_value = round(value_converted)
elif self._service is None:
self._attr_native_value = getattr(vehicle_state, sensor_key)
elif self._service == SERVICE_LAST_TRIP:
vehicle_last_trip = self._vehicle.state.last_trip
if sensor_key == "date_utc":
date_str = getattr(vehicle_last_trip, "date")
if parsed_date := dt_util.parse_datetime(date_str):
self._attr_native_value = parsed_date.isoformat()
else:
_LOGGER.debug(
"Could not parse date string for 'date_utc' sensor: %s",
date_str,
)
self._attr_native_value = None
else:
self._attr_native_value = getattr(vehicle_last_trip, sensor_key)
elif self._service == SERVICE_ALL_TRIPS:
vehicle_all_trips = self._vehicle.state.all_trips
for attribute in (
"average_combined_consumption",
"average_electric_consumption",
"average_recuperation",
"chargecycle_range",
"total_electric_distance",
):
if sensor_key.startswith(f"{attribute}_"):
attr = getattr(vehicle_all_trips, attribute)
sub_attr = sensor_key.replace(f"{attribute}_", "")
self._attr_native_value = getattr(attr, sub_attr)
return
if sensor_key == "reset_date_utc":
date_str = getattr(vehicle_all_trips, "reset_date")
if parsed_date := dt_util.parse_datetime(date_str):
self._attr_native_value = parsed_date.isoformat()
else:
_LOGGER.debug(
"Could not parse date string for 'reset_date_utc' sensor: %s",
date_str,
)
self._attr_native_value = None
else:
self._attr_native_value = getattr(vehicle_all_trips, sensor_key)
vehicle_state = self._vehicle.state
charging_state = vehicle_state.charging_status in [ChargingState.CHARGING]
if sensor_key == "charging_level_hv":
self._attr_icon = icon_for_battery_level(
battery_level=vehicle_state.charging_level_hv, charging=charging_state
)
| [
37811,
15514,
329,
3555,
4038,
3722,
422,
19339,
5884,
3708,
17898,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4866,
1330,
4866,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
18931,
198,
198,... | 1.968484 | 12,343 |
# Programa para Clasificación de suelos mediante SUCS.
gravas, arenas, finos = 29, 67, 4
gruesos = gravas + arenas
pesoMuestra = gravas + arenas + finos
cu, cc = 3.4, 2.6
ll, lp = 52, 24
ip = ll - lp
pi = 0.73 * (ll - 20)
sueloOrganico = True
if (gruesos / pesoMuestra) > 0.5:
if (gravas / gruesos) > 0.5:
if (finos / pesoMuestra) < 0.05:
if (cu > 4 and cc > 1 and cc < 3):
print("GW")
else:
print("GP")
elif (finos / pesoMuestra) < 0.12:
print("Caso de doble Frontera")
else:
if ip < pi or ip < 4:
print("GM")
elif ip > pi and ip > 4 and ip < 7:
print("GM-GC")
elif ip > pi and ip > 7:
print("GC")
else:
print("Error, checar datos de entrada")
else:
if (finos / pesoMuestra) < 0.05:
if (cu > 4 and cc > 1 and cc < 3):
print("SW")
else:
print("SP")
elif (finos / pesoMuestra) < 0.12:
print("Caso de doble Frontera")
else:
if ip < pi or ip < 4:
print("SM")
elif ip > pi and ip > 4 and ip < 7:
print("SM-SC")
elif ip > pi and ip > 7:
print("SC")
else:
print("Error, checar datos de entrada")
else:
if ll < 50:
if ip < pi or ip < 4:
if sueloOrganico == True:
print("OL")
else:
print("ML")
elif ip > pi and ip > 4 and ip < 7:
print("ML-CL")
elif ip > pi and ip > 7:
print("CL")
else:
print("Error, checar datos de entrada")
else:
if ip < pi:
if sueloOrganico == True:
print("OH")
else:
print("MH")
else:
print("CH")
| [
2,
6118,
64,
31215,
1012,
292,
811,
32009,
18840,
390,
424,
417,
418,
1117,
3014,
68,
13558,
7902,
13,
198,
198,
70,
4108,
292,
11,
46168,
11,
957,
418,
796,
2808,
11,
8275,
11,
604,
198,
48929,
274,
418,
796,
9067,
292,
1343,
461... | 1.679761 | 1,171 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install Pillow
from PIL import Image
image_file = "Collapse all.png"
img = Image.open(image_file).convert('RGBA')
pixdata = img.load()
# Делаем темнее картинку, игнорируя фон
for y in range(img.size[1]):
for x in range(img.size[0]):
if pixdata[x, y][:3] != (255, 255, 255):
pixdata[x, y] = tuple(map(lambda x: x - 50, pixdata[x, y][:3])) + pixdata[x, y][3:]
img.save("Collapse all black.png")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
2,
7347,
2721,
19770,
322,
198,
6738,
350,
4146,
... | 1.957031 | 256 |
#! /usr/bin/env python
"""
read and write test for Storage System
liuyang1,mtime: 2012-12-04 13:18:11
"""
import os
from random import randint
import log
g_path="/home/liuy/video/"
g_logger=log.initLogging("RWT")
if __name__=="__main__":
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
961,
290,
3551,
1332,
329,
20514,
4482,
198,
4528,
4669,
648,
16,
11,
76,
2435,
25,
2321,
12,
1065,
12,
3023,
1511,
25,
1507,
25,
1157,
220,
198,
37811,
198,
198,
117... | 2.525253 | 99 |
from slackbot.bot import Bot, listen_to, respond_to, default_reply
import re
import logging
import random
LOG_FORMAT = '%(asctime)-15s %(message)s'
logger = None
announce = "Received direct tweet (@sae @brian @otherdavid @ken @tgiordonell0): {0} {1}"
@listen_to('#slack2tweet (.*) (.*)', re.IGNORECASE)
@respond_to('^#slack2tweet$')
@listen_to('^#slack2tweet$')
@respond_to('tweet (.*) (.*)', re.IGNORECASE)
@respond_to('burninator$', re.IGNORECASE)
@respond_to('trogdor$', re.IGNORECASE)
@respond_to('burninator (.*)', re.IGNORECASE)
@respond_to('trogdor (.*)', re.IGNORECASE)
@listen_to('burninator$', re.IGNORECASE)
@listen_to('trogdor$', re.IGNORECASE)
@listen_to('burninator (.*)', re.IGNORECASE)
@listen_to('trogdor (.*)', re.IGNORECASE)
@default_reply
if __name__ == '__main__':
logging.basicConfig(filename='slack.log',
level=logging.INFO,
format=LOG_FORMAT)
bot = Bot()
bot.run()
| [
6738,
30740,
13645,
13,
13645,
1330,
18579,
11,
6004,
62,
1462,
11,
3031,
62,
1462,
11,
4277,
62,
47768,
198,
11748,
302,
198,
11748,
18931,
198,
11748,
4738,
628,
198,
25294,
62,
21389,
1404,
796,
705,
4,
7,
292,
310,
524,
13219,
1... | 2.193622 | 439 |
from twitter import *
import pyttsx
import APIKEYS
''' MYCREDS.txt has the following format:
oauthtokenvalue
oauthsecretvalue
'''
if __name__ == "__main__":
t = getTwitterByConfig()
timeline = getTimeline(t)
printTimeline(timeline)
engine = getSpeechEngine()
speakTimeline(engine, timeline) | [
6738,
17044,
1330,
1635,
201,
198,
11748,
12972,
83,
912,
87,
201,
198,
11748,
7824,
7336,
16309,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
7061,
6,
17615,
9419,
... | 2.324503 | 151 |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines the Evaluator interface and related functions.
"""
from abc import abstractmethod
from typing import Any, Optional
from texar.torch import HParams
from forte.data.base_pack import PackType
from forte.pipeline_component import PipelineComponent
__all__ = [
"Evaluator",
]
class Evaluator(PipelineComponent[PackType]):
r"""The evaluator.
Args:
config: The configuration of the evaluator.
"""
@abstractmethod
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
r"""Consume the prediction pack and the reference pack to compute
evaluation results.
Args:
pred_pack: The prediction datapack, which should contain the system
predicted results.
ref_pack: The reference datapack, which should contain the reference
to score on.
"""
raise NotImplementedError
@abstractmethod
def get_result(self) -> Any:
r"""The evaluator gather the results and the score can be obtained here.
"""
raise NotImplementedError
| [
2,
15069,
13130,
383,
6401,
68,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.012433 | 563 |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from monai.transforms import ScaleIntensity
from tests.utils import NumpyImageTestCase2D
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
12131,
25000,
20185,
42727,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,... | 3.694581 | 203 |
from __future__ import print_function
import argparse
import os
import stat
import sys
import re
import platform
import shutil
from lib import utils
from lib import compare
# test mode: create copy of reference files
# update mode: try to open file in p4 if necessary
# test mode: run perl script to compare reference and generated files
###########################################################################################################
# main
###########################################################################################################
parser = argparse.ArgumentParser(description='Generates meta data source files.')
parser.add_argument('-test', help='enables testing mode, internal only', action='store_true')
args = parser.parse_args()
scriptDir = os.path.dirname(os.path.realpath(__file__))
try:
os.makedirs("temp")
except:
None
# find SDK_ROOT, EXTERNALS and PX_SHARED
sdkRoot = utils.find_root_path(scriptDir, "Source")
if os.path.isdir(os.path.join(sdkRoot, "../PhysX_3.4")):
externalsRoot = os.path.join(sdkRoot, "../Externals")
pxSharedRoot = os.path.join(sdkRoot, "../PxShared")
else:
externalsRoot = os.path.join(utils.find_root_path(scriptDir, "externals/clang"), "externals")
pxSharedRoot = os.path.join(utils.find_root_path(scriptDir, "PxShared"), os.path.normpath("PxShared/1.0/trunk"))
print("testmode:", args.test)
print("root sdk:", sdkRoot)
print("root externals:", externalsRoot)
print("root shared:", pxSharedRoot)
boilerPlateFile = os.path.join(sdkRoot, os.path.normpath("Tools/PhysXMetaDataGenerator/PxBoilerPlate.h"))
includes = ''
includes += includeString(pxSharedRoot + '/include')
includes += includeString(pxSharedRoot + '/src/foundation/include')
includes += includeString(pxSharedRoot + '/src/pvd/include')
includes += includeString(pxSharedRoot + '/src/fastxml/include')
includes += includeString(sdkRoot + '/Include/common')
includes += includeString(sdkRoot + '/Include/geometry')
includes += includeString(sdkRoot + '/Include/pvd')
includes += includeString(sdkRoot + '/Include/particles')
includes += includeString(sdkRoot + '/Include/cloth')
includes += includeString(sdkRoot + '/Include/gpu')
includes += includeString(sdkRoot + '/Include')
includes += includeString(sdkRoot + '/Include/foundation')
includes += includeString(sdkRoot + '/Source/PhysXCommon/src')
includes += includeString(sdkRoot + '/Source/GeomUtils/headers')
includes += includeString(sdkRoot + '/Source/GeomUtils/src')
includes += includeString(sdkRoot + '/Source/GeomUtils/Opcode')
includes += includeString(sdkRoot + '/Source/PhysX/src')
includes += includeString(sdkRoot + '/Source/PhysX/src/buffering')
includes += includeString(sdkRoot + '/Source/PhysX/src/particles')
includes += includeString(sdkRoot + '/Source/PhysX/src/cloth')
includes += includeString(sdkRoot + '/Source/SimulationController/src')
includes += includeString(sdkRoot + '/Source/SimulationController/src/framework')
includes += includeString(sdkRoot + '/Source/SimulationController/include')
includes += includeString(sdkRoot + '/Source/PhysXCooking/include')
includes += includeString(sdkRoot + '/Source/SceneQuery')
includes += includeString(sdkRoot + '/Source/PhysXMetaData/core/include')
includes += includeString(sdkRoot + '/Source/PhysXGpu/include')
includes += includeString(sdkRoot + '/Tools/PhysXMetaDataGenerator')
print("platform:", platform.system())
commonFlags = '-DNDEBUG -DPX_GENERATE_META_DATA -x c++-header -std=c++0x -w -nobuiltininc -fms-extensions '
if platform.system() == "Windows":
# stddef.h doesn't compile with VS10 and -std=c++0x
# for some reason -cc1 needs to go first in commonFlags
commonFlags = '-cc1 ' + commonFlags
platformFlags = '-DPX_VC=11 -D_WIN32 ' + ' -isystem"' + os.environ['VS110COMNTOOLS'] + '/../../VC/include"'
clangExe = os.path.join(externalsRoot, os.path.normpath('clang/3.3.3/win32/bin/clang.exe'))
debugFile = open("temp/clangCommandLine_windows.txt", "a")
elif platform.system() == "Linux":
platformFlags = ''
clangExe = os.path.join(externalsRoot, os.path.normpath('clang/3.3.3/linux32/bin/clang'))
debugFile = open("temp/clangCommandLine_linux.txt", "a")
elif platform.system() == "Darwin":
platformFlags = ' -isysroot' + get_osx_platform_path()
clangExe = os.path.join(externalsRoot, os.path.normpath('clang/3.3.3/osx/bin/clang'))
debugFile = open("temp/clangCommandLine_osx.txt", "a")
else:
print("unsupported platform, aborting!")
sys.exit(1)
commonFlags += ' -boilerplate-file ' + boilerPlateFile
#some checks
if not os.path.isfile(clangExe):
print("didn't find,", clangExe, ", aborting!")
sys.exit(1)
clangExe = '"' + clangExe + '"'
# required for execution of clang.exe
os.environ["PWD"] = os.path.join(sdkRoot, os.path.normpath("Tools\PhysXMetaDataGenerator"))
###############################
# PxPhysicsWithExtensions #
###############################
print("PxPhysicsWithExtensions:")
srcPath = "PxPhysicsWithExtensionsAPI.h"
metaDataDir = os.path.join(sdkRoot, os.path.normpath("Source/PhysXMetaData"))
targetDir = setup_targetdir(metaDataDir, args.test)
cmd = " ".join(["", clangExe, commonFlags, "", platformFlags, includes, srcPath, "-o", '"'+targetDir+'"'])
print(cmd, file = debugFile)
(stdout, stderr) = utils.run_cmd(cmd)
if (stderr != "" or stdout != ""):
print(stderr, "\n", stdout)
print("wrote meta data files in", targetDir)
test_targetdir(targetDir, metaDataDir, args.test)
###############################
# PxVehicleExtension #
###############################
print("PxVehicleExtension:")
srcPath = "PxVehicleExtensionAPI.h"
metaDataDir = os.path.join(sdkRoot, os.path.normpath("Source/PhysXVehicle/src/PhysXMetaData"))
includes += includeString(sdkRoot + '/Include/vehicle')
includes += includeString(sdkRoot + '/Source/PhysXVehicle/src')
includes += includeString(pxSharedRoot + '/include/foundation')
targetDir = setup_targetdir(metaDataDir, args.test)
cmd = " ".join(["", clangExe, commonFlags, "", platformFlags, includes, srcPath, "-o", '"'+targetDir+'"'])
print(cmd, file = debugFile)
(stdout, stderr) = utils.run_cmd(cmd)
if (stderr != "" or stdout != ""):
print(stderr, "\n", stdout)
print("wrote meta data files in", targetDir)
test_targetdir(targetDir, metaDataDir, args.test)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
1185,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
3859,
198,
11748,
4423,
346,
198,
6738,
9195,
1330,
3384,
4487,
198,
6738,
... | 2.954202 | 2,118 |
import pickle
import warnings
from datetime import datetime
import os
from typing import Callable, Dict, Iterable
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torchvision
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torch.utils.data import Dataset, Subset
from torchvision import transforms
import avalanche
from avalanche.benchmarks.generators import tensor_scenario
from avalanche.evaluation.metrics import (ExperienceForgetting,
StreamConfusionMatrix,
accuracy_metrics, cpu_usage_metrics,
disk_usage_metrics, loss_metrics,
timing_metrics)
from avalanche.logging import InteractiveLogger, TensorboardLogger, TextLogger
from avalanche.models import SimpleMLP
from avalanche.training.plugins import EvaluationPlugin
from avalanche.training.strategies import GEM
warnings.filterwarnings("ignore")
now = datetime.now()
cur_time = now.strftime("%d-%m-%Y::%H:%M:%S")
train_data_x = []
train_data_y = []
test_data_x= []
test_data_y = []
cache_label = './data/train_data_x.pth'
if(os.path.exists(cache_label)):
with open('./data/train_data_x.pth','rb') as f:
train_data_x = pickle.load(f)
with open('./data/train_data_y.pth','rb') as f:
train_data_y = pickle.load(f)
with open('./data/test_data_x.pth','rb') as f:
test_data_x = pickle.load(f)
with open('./data/test_data_y.pth','rb') as f:
test_data_y = pickle.load(f)
print('Data Loaded!!!')
else:
data_path = './data/dataset.npy'
labels_path = './data/labels.npy'
ds = np.load(data_path)
label = np.load(labels_path)
whole = np.concatenate((ds, label), axis=1)
df = pd.DataFrame(whole,columns=[str(i) for i in range(whole.shape[1])])
y = df.pop(df.columns[-1]).to_frame()-1
# Normalsing Dataset
X = (df-df.min())/(df.max()-df.min() + 1e-5)
X_train, X_test, y_train, y_test = train_test_split(X, y,stratify=y, test_size=0.33)
train_dict = {}
train_label_dict = {}
test_dict = {}
test_label_dict = {}
for i in range(y_train.iloc[:,-1].nunique()):
train_dict["cat"+str(i)] = X_train[y_train.iloc[:,-1] == i]
temp = y_train[y_train.iloc[:,-1]==i]
if i==0:
temp.loc[:,'70']=0
else:
temp.loc[:,'70']=1
train_label_dict["cat"+str(i)] = temp
for i in range(y_test.iloc[:,-1].nunique()):
test_dict["cat"+str(i)] = X_test[y_test.iloc[:,-1] == i]
temp = y_test[y_test.iloc[:,-1]==i]
if i==0:
temp.loc[:,'70']=0
else:
temp.loc[:,'70']=1
test_label_dict["cat"+str(i)] = temp
train_data_x = list(torch.Tensor(train_dict[key].to_numpy()) for key in train_dict)
train_data_y = list(torch.Tensor(train_label_dict[key].to_numpy()) for key in train_label_dict)
test_data_x = list(torch.Tensor(test_dict[key].to_numpy()) for key in test_dict)
test_data_y = list(torch.Tensor(test_label_dict[key].to_numpy()) for key in test_label_dict)
with open('./data/train_data_x.pth','wb') as f:
pickle.dump(train_data_x,f)
with open('./data/train_data_y.pth','wb') as f:
pickle.dump(train_data_y,f)
with open('./data/test_data_x.pth','wb') as f:
pickle.dump(test_data_x,f)
with open('./data/test_data_y.pth','wb') as f:
pickle.dump(test_data_y,f)
print('Dumped into ./data/')
whole_x = [t.numpy() for t in train_data_x]
whole_x = np.vstack(whole_x)
whole_x = np.array(whole_x)
whole_y = [t.numpy() for t in train_data_y]
whole_y = np.vstack(whole_y)
whole_y = np.array(whole_y)
# Splitting test samples
sss = StratifiedShuffleSplit(n_splits=1000, test_size=0.75, random_state=0)
sss.get_n_splits(whole_x, whole_y)
for train_index, test_index in sss.split(whole_x, whole_y):
print("TRAIN:", train_index, "TEST:", test_index)
X_t, X_te = whole_x[train_index], whole_x[test_index]
y_t, y_te = whole_y[train_index], whole_y[test_index]
break
tsne = TSNE(n_components=2, random_state=0,verbose=1,perplexity=50,init='pca')
X_2d = tsne.fit_transform(X_t)
target_ids = [0,1]
y_t=y_t.astype('int')
plt.figure(figsize=(10,6))
colors = 'r', 'g'
target_names=['Normal','Attack']
for i, c, label in zip(target_ids, colors, target_names):
plt.scatter(X_2d[y_t[:,-1] == i,0], X_2d[y_t[:,-1] == i,1], c=c, label=label)
plt.axis('off')
plt.legend()
plt.savefig("./tSNE_train_dataset_plot.png")
plt.savefig("./tSNE_train.eps",format='eps')
| [
11748,
2298,
293,
198,
11748,
14601,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
28686,
220,
198,
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
40806,
540,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,... | 2.107578 | 2,296 |
#!/usr/bin/env python
# encoding: UTF-8
import json
from pprint import pprint
import datetime
import re
import sys
def parseJson():
''' Main application logic of parser. Parses the JSON file which contains the Azure subscription configuration and
it was generated by the Azurite Explorer. Converts the file to a JSON object that is understoon by the AzuriteVisualizer.html
in order to create the Graph representation of the Azure resources.
Operates on the JSON input file provided by the user in the command line.
'''
if not len(sys.argv) > 1:
print "[!] Please provide input file. (Hint: Azurite Explorer output for Azure Subscription.)"
print "[!] Usage: python AzuriteVisualizer.py <json_file>"
sys.exit()
# PowerShell generates the output in UTF-8 with BOM.
#Convert to UTF-8 to be understoon by the json module.
inputFile = open(sys.argv[1])
jsonIn = inputFile.read()
unicodeJsonIn = jsonIn.decode("utf-8-sig")
jsonIn = unicodeJsonIn.encode("utf-8")
# Load the JSON object as string.
data = json.loads(jsonIn)
# Initialise variable for the id of the nodes in the final JSON object.
id = 0
print "[*] Parsing data..."
# Initialise the final JSON object.
jsonOut = {"type": "NetworkGraph", "label": "Azure Subscription Configuration", "nodes": [], "links": []}
# For each JSON array iterate through the values and after each step that is completed,
# append the retrieved information in final JSON object.
#print(type(data['subscriptionVNETs']))
for vnet in data['subscriptionVNETs']:
#Iterate through the Virtual Network properties.
vnetNode = {}
vnetProperties = {}
vnetSourceNode = {}
#print("Here is vnet")
#print(data['subscriptionVNETs']['vnetName'])
# Create the main values for the node.
id, vnetNode['id'] = id + 1, id
vnetProperties['nodeType'] = 'vnet'
#vnetNode['label'] = vnet['vnetName']
vnetNode['label'] = data['subscriptionVNETs']['vnetName']
# Create the values for the node's properties.
vnetProperties['location'] = data['subscriptionVNETs']['vnetLocation']
vnetProperties['vnetAddressSpace'] = ', '.join(data['subscriptionVNETs']['vnetAddressSpaces']['AddressPrefixes'])
vnetNode['properties'] = vnetProperties
jsonOut['nodes'].append(vnetNode)
for subnet in data['subscriptionVNETs']['vnetSubnets']:
# Iterate through the Virtual Network's subnet properties.
subnetNode = {}
subnetProperties = {}
subnetSourceNode = {}
subnetDestinationNode = {}
subnetToVnetLink = {}
subnetToVnetLinkProperties = {}
subnetNetworkSecurityGroupsStatus = 'OK'
# Create the main values for the node.
id, subnetNode['id'] = id + 1, id
subnetProperties['nodeType'] = 'subnet'
subnetNode['label'] = subnet['subnetName']
# Create the values for the node's properties.
subnetProperties['subnetAddressSpace'] = subnet['subnetAddressSpace']
subnetProperties['vnetName'] = data['subscriptionVNETs']['vnetName']
# Parse the subnet's Network Security Groups (NSGs) and retrieve the weak NSG rules.
# Gateway Subnet cannot have NSGs.
if subnet['subnetName'] != 'GatewaySubnet':
if subnet['subnetNetworkSecurityGroups']:
# Check the configuration of the Subnet's NSGs.
subnetWeakCustomNetworkSecurityGroups = parseNetworkSecurityGroups(subnet['subnetNetworkSecurityGroups'])
if subnetWeakCustomNetworkSecurityGroups:
# Ammend the node's properties in case weak NSGs were identified.
subnetProperties['nodeType'] = 'subnet-weak-nsg'
subnetProperties['weakNetworkSecurityGroupsCustomRules'] = ', '.join(subnetWeakCustomNetworkSecurityGroups)
subnetNetworkSecurityGroupsStatus = 'Weak'
else:
subnetProperties['nodeType'] = 'subnet-weak-nsg'
subnetProperties['weakNetworkSecurityGroupsCustomRules'] = 'Not defined'
subnetNetworkSecurityGroupsStatus = 'Weak'
subnetNode['properties'] = subnetProperties
jsonOut['nodes'].append(subnetNode)
# Add link for each Subnet - Source is subnet, Destination is VNet.
# Cost is always 1.
subnetToVnetLink['source'] = subnetNode['id']
subnetToVnetLink['target'] = vnetNode['id']
subnetToVnetLink['cost'] = 1
# Add link properties to determine the connection between the nodes.
subnetToVnetLinkProperties['linkType'] = 'subnet-to-vnet'
subnetToVnetLink['properties'] = subnetToVnetLinkProperties
jsonOut['links'].append(subnetToVnetLink)
if 'subnetItems' in subnet:
# Iterate through the Subnet's items (VMs or VNet Gateways).
for subnetItem in subnet['subnetItems']:
subnetItemNode = {}
subnetItemProperties = {}
vmPrivateIpAddresses = []
vmPublicIpAddresses = []
id, subnetItemNode['id'] = id + 1, id
if (subnetItem['itemType'] == 'Virtual Machine'):
vmToSubnetLink = {}
vmToSubnetLinkProperties = {}
subnetItemNode['label'] = subnetItem['vmName']
# Populate node's properties.
# Get the VM's private and public IP configuration.
for vmNetworkIpConfiguration in subnetItem['vmNetworkConfiguration']['vmNetworkConfigurationIpConfigurations']:
vmPrivateIpAddresses.append(vmNetworkIpConfiguration['vmNetworkConfigurationPrivateIpAddress'] + ' (' + vmNetworkIpConfiguration['vmNetworkConfigurationName'] + ')')
if 'vmNetworkConfigurationPublicIpAddress' in vmNetworkIpConfiguration:
vmPublicIpAddresses.append(vmNetworkIpConfiguration['vmNetworkConfigurationPublicIpAddress'] + ' (' + vmNetworkIpConfiguration['vmNetworkConfigurationName'] + ')')
subnetItemProperties['privateIpAddress'] = ', '.join(vmPrivateIpAddresses)
if vmPublicIpAddresses:
subnetItemProperties['publicIpAddress'] = ', '.join(vmPublicIpAddresses)
subnetItemProperties['vnetName'] = data['subscriptionVNETs']['vnetName']
subnetItemProperties['subnetName'] = subnet['subnetName']
subnetItemProperties['vmOsEncrypted'] = subnetItem['vmEncryption']['osVolumeEncryption']
subnetItemProperties['vmDiskEncrypted'] = subnetItem['vmEncryption']['dataVolumesEncryption']
subnetItemProperties['nodeType'] = 'vm'
# Parse the VM's Network Security Groups (NSGs) and retrieve the weak NSG rules.
if subnetItem['vmNetworkSecurityGroups']:
vmWeakCustomNetworkSecurityGroups = parseNetworkSecurityGroups(subnetItem['vmNetworkSecurityGroups'], subnetNetworkSecurityGroupsStatus)
if vmWeakCustomNetworkSecurityGroups:
# Ammend the node's properties in case weak NSGs were identified.
subnetItemProperties['nodeType'] = 'vm-weak-nsg'
subnetItemProperties['weakNetworkSecurityGroupsCustomRules'] = ', '.join(vmWeakCustomNetworkSecurityGroups)
else:
subnetItemProperties['nodeType'] = 'vm-weak-nsg'
subnetItemProperties['weakNetworkSecurityGroupsCustomRules'] = "Not defined"
subnetItemNode['properties'] = subnetItemProperties
jsonOut['nodes'].append(subnetItemNode)
# Add link for each VM - Source is VM, Destination is the Subnet.
vmToSubnetLink['source'] = subnetItemNode['id']
vmToSubnetLink['target'] = subnetNode['id']
vmToSubnetLink['cost'] = 1
# Add link properties to determine the connection between the nodes.
vmToSubnetLinkProperties['linkType'] = 'vm-to-subnet'
vmToSubnetLink['properties'] = vmToSubnetLinkProperties
jsonOut['links'].append(vmToSubnetLink)
else:
gatewayToSubnetLink = {}
gatewayToSubnetLinkProperties = {}
subnetItemNode['label'] = subnetItem['virtualNetworkGatewayName']
# Populate node's properties.
subnetItemProperties['publicIpAddress'] = subnetItem['virtualNetworkGatewayNetworkConfiguration']['virtualNetworkGatewayPublicIpAddress']
# subnetItemProperties['privateIpAddress'] = subnetItem['virtualNetworkGatewayNetworkConfiguration']['virtualNetworkGatewayPrivateIpAddress']
subnetItemProperties['nodeType'] = 'gateway'
subnetItemProperties['vnetName'] = vnet['vnetName']
subnetItemProperties['subnetName'] = subnet['subnetName']
subnetItemNode['properties'] = subnetItemProperties
jsonOut['nodes'].append(subnetItemNode)
# Add link for each Gateway - Source is the Gateway, Destination is the Subnet.
gatewayToSubnetLink['source'] = subnetItemNode['id']
gatewayToSubnetLink['target'] = subnetNode['id']
gatewayToSubnetLink['cost'] = 1
# Add link properties to determine the connection between the nodes.
gatewayToSubnetLinkProperties['linkType'] = 'gateway-to-subnet'
gatewayToSubnetLink['properties'] = gatewayToSubnetLinkProperties
jsonOut['links'].append(gatewayToSubnetLink)
if 'subscriptionLocalNetworkGateways' in data:
# Iterate through the Local Network Gateway properties.
for localNetworkGateway in data['subscriptionLocalNetworkGateways']:
localNetworkGatewayNode = {}
localNetworkGatewayProperties = {}
id, localNetworkGatewayNode['id'] = id + 1, id
# Populate node's properties.
localNetworkGatewayNode['nodeType'] = 'local-network-gateway'
localNetworkGatewayNode['label'] = localNetworkGateway['localNetworkGatewayName']
localNetworkGatewayProperties['nodeType'] = 'gateway'
localNetworkGatewayProperties['localGatewayAddressSpace'] = ', '.join(localNetworkGateway['localNetworkGatewayNetworkConfiguration']['localNetworkGatewayAddressSpace'])
localNetworkGatewayProperties['localGatewayPubliIpAddress'] = localNetworkGateway['localNetworkGatewayNetworkConfiguration']['localNetworkGatewayPublicIpAddress']
localNetworkGatewayNode['properties'] = localNetworkGatewayProperties
jsonOut['nodes'].append(localNetworkGatewayNode)
if 'subscriptionSqlServers' in data:
# Iterate through the Azure SQL Server properties.
for sqlServer in data['subscriptionSqlServers']:
sqlServerNode = {}
sqlServerProperties = {}
id, sqlServerNode['id'] = id + 1, id
# Populate node's properties.
sqlServerNode['nodeType'] = 'azure-sql-server'
sqlServerNode['label'] = sqlServer['sqlServerName']
sqlServerProperties['sqlServerVersion'] = sqlServer['sqlServerVersion']
sqlServerProperties['location'] = sqlServer['sqlServerLocation']
# TODO: Retrieve audit state from the cmdlet and represent in human readable format.
sqlServerProperties['auditState'] = sqlServer['sqlServerAuditingPolicy']['AuditState']
# Retrieve the SQL Server's firewall rules.
sqlServerFirewallRules = []
for sqlServerFirewallRule in sqlServer['sqlServerFirewallRules']:
if sqlServerFirewallRule['StartIpAddress'] == sqlServerFirewallRule['EndIpAddress']:
sqlServerFirewallRules.append(sqlServerFirewallRule['StartIpAddress'] + " (" + sqlServerFirewallRule['FirewallRuleName'] + ")")
else:
sqlServerFirewallRules.append(sqlServerFirewallRule['StartIpAddress'] + "-" + sqlServerFirewallRule['EndIpAddress'] + " (" + sqlServerFirewallRule['FirewallRuleName'] + ")")
sqlServerProperties['sqlServerFirewallRules'] = ', '.join(sqlServerFirewallRules)
sqlServerNode['properties'] = sqlServerProperties
jsonOut['nodes'].append(sqlServerNode)
if 'sqlServerDatabases' in sqlServer:
# Iterate through the properties of the Azure SQL Databases that are hosted on the Azure SQL Server.
for sqlServerDatabase in sqlServer['sqlServerDatabases']:
sqlServerDatabaseNode = {}
sqlServerDatabaseProperties = {}
azureSqlDatabaseToazureSqlServerLink = {}
azureSqlDatabaseToazureSqlServerLinkProperties = {}
id, sqlServerDatabaseNode['id'] = id + 1, id
# Populate node's properties.
sqlServerDatabaseNode['label'] = sqlServerDatabase['sqlDatabaseName']
sqlServerDatabaseProperties['location'] = sqlServerDatabase['sqlDatabaseLocation']
sqlServerDatabaseProperties['sqlDatabaseServerName'] = sqlServerDatabase['sqlDatabaseServerName']
sqlServerDatabaseProperties['nodeType'] = 'azure-sql-database'
sqlServerDatabaseProperties['auditState'] = sqlServerDatabase['sqlDatabaseAuditingPolicy']['AuditState']
# Retrieve the Azure SQL Database's data masking policy.
if 'sqlDatabaseDataMaskingPolicy' in sqlServerDatabase:
sqlServerDatabaseProperties['dataMaskingState'] = sqlServerDatabase['sqlDatabaseDataMaskingPolicy']['DataMaskingState']
sqlServerDatabaseProperties['ProxyDnsName'] = sqlServerDatabase['sqlDatabaseSecureConnectionPolicy']['ProxyDnsName']
sqlServerDatabaseProperties['ProxyPort'] = sqlServerDatabase['sqlDatabaseSecureConnectionPolicy']['ProxyPort']
sqlServerDatabaseProperties['ConnectionStrings'] = str(sqlServerDatabase['sqlDatabaseSecureConnectionPolicy']['ConnectionStrings'])
sqlServerDatabaseProperties['transparentDataEncryption'] = sqlServerDatabase['sqlDatabaseTransparentDataEncryption']
sqlServerDatabaseNode['properties'] = sqlServerDatabaseProperties
jsonOut['nodes'].append(sqlServerDatabaseNode)
# Add link for each Azure SQL Database - Source is the Azure SQL Database, Destination is the Azure SQL Server.
azureSqlDatabaseToazureSqlServerLink['source'] = sqlServerDatabaseNode['id']
azureSqlDatabaseToazureSqlServerLink['target'] = sqlServerNode['id']
azureSqlDatabaseToazureSqlServerLink['cost'] = 1
# Add link properties to determine the connection between the nodes.
azureSqlDatabaseToazureSqlServerLinkProperties['linkType'] = 'azure-sql-database-to-azure-sql-server'
azureSqlDatabaseToazureSqlServerLink['properties'] = azureSqlDatabaseToazureSqlServerLinkProperties
jsonOut['links'].append(azureSqlDatabaseToazureSqlServerLink)
if 'subscriptionWebApps' in data:
# Iterate through the Azure Web Application's properties.
for webApp in data['subscriptionWebApps']:
webAppNode = {}
webAppProperties = {}
id, webAppNode['id'] = id + 1, id
# Populate node's properties.
webAppNode['nodeType'] = 'web-app'
webAppNode['label'] = webApp['webAppName']
webAppProperties['webAppResourceGroupName'] = webApp['webAppResourceGroupName']
webAppProperties['location'] = webApp['webAppLocation']
webAppProperties['webAppHostNames'] = webApp['webAppHostNames']
webAppProperties['webAppOutboundIpAddresses'] = webApp['webAppOutboundIpAddresses']
# Retrieve Azure Web Application's SSL/TLS configuration.
if 'webAppSSLCertificate' in webApp:
webAppProperties['webAppSSLCertificateName'] = webApp['webAppSSLCertificate']['webAppSSLCertificateName']
webAppProperties['webAppSSLCertificateSubjectName'] = webApp['webAppSSLCertificate']['webAppSSLCertificateSubjectName']
webAppProperties['webAppSSLCertificateIssuer'] = webApp['webAppSSLCertificate']['webAppSSLCertificateIssuer']
# Calculate the expiration date from the timestamp returned from the Azure PowerShell cmdlet.
webAppProperties['webAppSSLCertificateExpirationDate'] = str(datetime.datetime.fromtimestamp(int(re.findall('\d+', str(webApp['webAppSSLCertificate']['webAppSSLCertificateExpirationDate']))[0].encode('ascii')[:10])))
webAppNode['properties'] = webAppProperties
jsonOut['nodes'].append(webAppNode)
# Function call to create the connections between the Gateways in the subscription (if any);
# It performs the connections: VNet Gateway-to-VNet Gateway and VNet Gateway-to-Local Network Gateway.
jsonOut = connectGatewaysToGateways(data, jsonOut)
# Create the final JSON object.
jsonOutRaw = json.dumps(jsonOut)
print "[+] The following JSON object was generated:\n"
print jsonOutRaw
# Export the final JSON object and save it to a file.
jsonOutFile = open('azure-subscription-nodes.json', 'w')
jsonOutFile.write(json.dumps(jsonOut, sort_keys=True, indent=4))
jsonOutFile.close()
print "\n\n"
print "[*] Output was saved in the file: azure-subscription-nodes.json"
print "[*] Please proceed to open the AzuriteVisualizer.html in Firefox to view the Graph."
def parseNetworkSecurityGroups(jsonNetworkSecurityGroups, subnetNetworkSecurityGroupCustomRulesStatus = 'OK'):
''' Parse the Network Security Groups (NSGs) for Subnets and Virtual Machines.
For each weak NSG that is identified append to an array that is returned to the function call.
Currently only basic business logic to retrieve the weak NSGs is supported. The following rules are used:
* Direction - Inbound:
- Source port range is 'ALL' (*) and Destination port range includes ports of the Management Interfaces.
- Source IP address range 'ALL' (*) and Destination IP address range is 'ALL' (*)
- Destination port range is 'ALL' (*)
* Direction - Outbound:
- Source IP address range 'ALL' (*) and Destination IP address range is 'ALL' (*)
- Destination port range is 'ALL' (*)
Attributes:
jsonNetworkSecurityGroups: A JSON object of the NSGs of either a Subnet or a VM.
subnetNetworkSecurityGroupCustomRulesStatus: This is used when the function is called for the NSGs of a VM.
The attribute refers to the state of the NSG rules for the associated Subnet. This is used to determine whether the Subnet is secure or not.
If the Subnet is not secure further parsing will be performed to determine the status of the NSG rules for the VM.
'''
nsgWeakRules = []
nsgWeakCustomRules = []
# Array to define weak protocols.
clearTextProtocols = [21, 23, 69, 512, 512, 513, 514]
# Array to define management ports.
managementPorts = [21, 22, 23, 161, 3389, 512, 513, 514, 1433, 3306, 1521, 4321]
# Check the NSGs for the Subnet.
if 'subnetNetworkSecurityGroupCustomRules' in jsonNetworkSecurityGroups:
for subnetNetworkSecurityGroupCustomRule in jsonNetworkSecurityGroups['subnetNetworkSecurityGroupCustomRules']:
# Only the NSG rules that have been successfuly provisioned are reviewed.
if subnetNetworkSecurityGroupCustomRule['ProvisioningState'] == 'Succeeded':
if subnetNetworkSecurityGroupCustomRule['Direction'] == 'Inbound' and subnetNetworkSecurityGroupCustomRule['Access'] == 'Allow' and (((subnetNetworkSecurityGroupCustomRule['DestinationPortRange'] in managementPorts) and subnetNetworkSecurityGroupCustomRule['SourcePortRange'] == '*') or (subnetNetworkSecurityGroupCustomRule['SourceAddressPrefix'] == '*' and subnetNetworkSecurityGroupCustomRule['DestinationAddressPrefix'] == '*') or (subnetNetworkSecurityGroupCustomRule['DestinationPortRange'] == '*')):
nsgWeakCustomRules.append(subnetNetworkSecurityGroupCustomRule['Direction'] + "-" + subnetNetworkSecurityGroupCustomRule['Name'])
elif subnetNetworkSecurityGroupCustomRule['Direction'] == 'Outbound' and subnetNetworkSecurityGroupCustomRule['Access'] == 'Allow' and ((subnetNetworkSecurityGroupCustomRule['SourceAddressPrefix'] == '*' and subnetNetworkSecurityGroupCustomRule['DestinationAddressPrefix'] == '*') or (subnetNetworkSecurityGroupCustomRule['DestinationPortRange'] == '*')):
nsgWeakCustomRules.append(subnetNetworkSecurityGroupCustomRule['Direction'] + "-" + subnetNetworkSecurityGroupCustomRule['Name'])
# In case the NSGs for the Subnet of the associated VM are weak, perform checks on the VM's NSGs.
# Otherwise the perimeter is considered secure.
if subnetNetworkSecurityGroupCustomRulesStatus == 'Weak' and 'vmNICNetworkSecurityGroupCustomRules' in jsonNetworkSecurityGroups:
for vmNetworkSecurityGroupCustomRule in jsonNetworkSecurityGroups['vmNICNetworkSecurityGroupCustomRules']:
# Only the NSG rules that have been successfuly provisioned are reviewed.
if vmNetworkSecurityGroupCustomRule['ProvisioningState'] == 'Succeeded':
if vmNetworkSecurityGroupCustomRule['Direction'] == 'Inbound' and vmNetworkSecurityGroupCustomRule['Access'] == 'Allow' and (
(vmNetworkSecurityGroupCustomRule['DestinationPortRange'] in managementPorts) or (vmNetworkSecurityGroupCustomRule['SourceAddressPrefix'] == '*' and vmNetworkSecurityGroupCustomRule['DestinationAddressPrefix'] == '*') or (vmNetworkSecurityGroupCustomRule['DestinationPortRange'] == '*')):
nsgWeakCustomRules.append(vmNetworkSecurityGroupCustomRule['Direction'] + "-" + vmNetworkSecurityGroupCustomRule['Name'])
elif vmNetworkSecurityGroupCustomRule['Direction'] == 'Outbound' and vmNetworkSecurityGroupCustomRule['Access'] == 'Allow' and ((vmNetworkSecurityGroupCustomRule['SourceAddressPrefix'] == '*' and vmNetworkSecurityGroupCustomRule['DestinationAddressPrefix'] == '*') or (vmNetworkSecurityGroupCustomRule['DestinationPortRange'] == '*')):
nsgWeakCustomRules.append(vmNetworkSecurityGroupCustomRule['Direction'] + "-" + vmNetworkSecurityGroupCustomRule['Name'])
return nsgWeakCustomRules
def connectGatewaysToGateways(jsonData, jsonOut):
''' Function to create nodes and links for the connections between the Azure Gateways,
VNet Gateway-to-VNet Gateway and VNet Gateway-to-Local Network Gateway.
Attributes:
jsonData: The original JSON object.
jsonOut: A JSON object including only the links (connections) between the Gateways discovered in the network.
'''
# Iterate through the subscription components to identify the connections between the Gateways.
for vnet in jsonData['subscriptionVNETs']:
for subnet in jsonData['subscriptionVNETs']['vnetSubnets']:
if 'subnetItems' in subnet:
for subnetItem in subnet['subnetItems']:
if (subnetItem['itemType'] == 'Virtual Network Gateway'):
# Add a link for each VNet Gateway to VNet Gateway connection.
if 'virtualNetworkGatewayConnections' in subnetItem:
vnetGatewayToVnetGatewayLink = {}
vnetGatewayToVnetGatewayLinkProperties = {}
# Retrieve the source and destination for the connection.
sourceId = next(id for (id, d) in enumerate(jsonOut['nodes']) if
d["label"] == subnetItem['virtualNetworkGatewayConnections']['virtualNetworkGatewayConnectionGateway1'])
targetId = next(id for (id, d) in enumerate(jsonOut['nodes']) if
d["label"] == subnetItem['virtualNetworkGatewayConnections']['virtualNetworkGatewayConnectionGateway2'])
# Check for bidirectional connections.
for d in jsonOut['links']:
if d.get("source") == sourceId and d.get("target") == targetId or d.get("source") == targetId and d.get("target") == sourceId:
vnetGatewayToVnetGatewayLinkProperties['Bidirectional'] = 'True'
vnetGatewayToVnetGatewayLinkProperties['Complementary connection name'] = d.get("properties").get("connectionName")
# Add link for each Gateway to Gateway connection.
vnetGatewayToVnetGatewayLink['source'] = sourceId
vnetGatewayToVnetGatewayLink['target'] = targetId
vnetGatewayToVnetGatewayLink['cost'] = 1
# Add link properties to determine the connection between the nodes.
vnetGatewayToVnetGatewayLinkProperties['linkType'] = 'vnet-gateway-to-vnet-gateway'
vnetGatewayToVnetGatewayLinkProperties['connectionName'] = subnetItem['virtualNetworkGatewayConnections']['virtualNetworkGatewayConnectionName']
vnetGatewayToVnetGatewayLink['properties'] = vnetGatewayToVnetGatewayLinkProperties
jsonOut['links'].append(vnetGatewayToVnetGatewayLink)
return jsonOut
def banner():
''' Function to print the tool's banner and various details.
'''
logo = """
█████ ███████╗██╗ ██╗██████╗ ██╗████████╗███████╗
██╔══██╗╚══███╔╝██║ ██║██╔══██╗██║╚══██╔══╝██╔════╝
███████║ ███╔╝ ██║ ██║██████╔╝██║ ██║ █████╗
██╔══██║ ███╔╝ ██║ ██║██╔══██╗██║ ██║ ██╔══╝
██║ ██║███████╗╚██████╔╝██║ ██║██║ ██║ ███████╗
╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚══════╝
██╗ ██╗██╗███████╗██╗ ██╗ █████╗ ██╗ ██╗███████╗███████╗██████╗
██║ ██║██║██╔════╝██║ ██║██╔══██╗██║ ██║╚══███╔╝██╔════╝██╔══██╗
██║ ██║██║███████╗██║ ██║███████║██║ ██║ ███╔╝ █████╗ ██████╔╝
╚██╗ ██╔╝██║╚════██║██║ ██║██╔══██║██║ ██║ ███╔╝ ██╔══╝ ██╔══██╗
╚████╔╝ ██║███████║╚██████╔╝██║ ██║███████╗██║███████╗███████╗██║ ██║
╚═══╝ ╚═╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝╚══════╝╚══════╝╚═╝ ╚═╝
Version: 0.6 Beta
Author: Apostolos Mastoris (@Lgrec0)
Email: apostolis.mastoris[at]mwrinfosecurity.com
"""
print logo
def main():
''' Main function.
Perform required function calls.
'''
banner()
parseJson()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
41002,
12,
23,
198,
198,
11748,
33918,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
11748,
25064,
198,
198,
4299,
21136,
41,
1559,... | 2.881239 | 8,395 |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
# reacher-v1 state feature
| [
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
1330,
3384,
4487,
198,
6738,
11550,
13,
268,
14259,
13,
76,
23577,
25634,
1330,
285,
23577,
25634,
62,
24330,
628,
220,
220,
220,
1303,
302,
3493,
12,
85,
16,
1181,
3895,
198
] | 2.8 | 40 |
#!/usr/bin/env python
from __future__ import annotations
from arenasdk.nodes.gpu_exclusive_node import GPUExclusiveNode
from arenasdk.nodes.gpu_exclusive_node import build_gpu_exclusive_nodes
from arenasdk.nodes.gpu_topology_node import GPUTopologyNode
from arenasdk.nodes.gpu_topology_node import build_gpu_topology_nodes
from arenasdk.nodes.gpushare_node import GPUShareNode
from arenasdk.nodes.gpushare_node import build_gpushare_nodes
from arenasdk.nodes.normal_node import NormalNode
from arenasdk.nodes.normal_node import build_normal_nodes
from typing import List
from typing import Dict
from arenasdk.common.log import Log
logger = Log(__name__).get_logger()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
46168,
34388,
13,
77,
4147,
13,
46999,
62,
41195,
62,
17440,
1330,
11362,
3109,
5731,
19667,
198,
6738,
46168,
34388,
13,
77,
4147,
13,
... | 3.076577 | 222 |
"""Provides end-to-end flow to use machine learning models.
"""
__author__ = 'khanhtpd'
__date__ = '2021-12-01'
import math
from typing import Union
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def count_missing_columns_rows(data: pd.DataFrame, figsize: tuple = (12, 4)):
"""Count missing values according to columns and rows.
Args:
data (pd.DataFrame): Input data frame.
figsize (tuple, optional): Size of figure. Defaults to (12, 4).
"""
bins = np.linspace(0, 1, 11)
plt.figure(figsize=figsize)
plt.subplot(1, 2, 1)
missing_column = data.isna().sum(axis=0) / data.shape[0]
missing_column = (
pd.cut(missing_column, bins=bins, include_lowest=False, right=True)
.cat.add_categories(pd.Interval(0.0, 0.0, closed='both')))
missing_column.cat.reorder_categories(missing_column.cat.categories.sortlevel()[0], inplace=True)
missing_column = (
missing_column
.fillna(pd.Interval(0, 0, closed='both'))
.value_counts()
.sort_index()
)
ax = missing_column.plot(kind='barh')
total = missing_column.sum()
for p in ax.patches:
percentage = 100 * p.get_width() / total
percentage = f'{percentage:.1f}%'
x = p.get_x() + p.get_width()
y = p.get_y() + 0.05
ax.annotate(percentage, (x, y))
plt.xlabel('Count')
plt.ylabel('Percentage of missing')
plt.title('Missing values - Columns', size=15)
plt.subplots_adjust(wspace=0.5)
plt.subplot(1, 2, 2)
missing_row = data.isna().sum(axis=1) / data.shape[1]
missing_row = (
pd.cut(missing_row, bins=bins, include_lowest=False, right=True)
.cat.add_categories(pd.Interval(0.0, 0.0, closed='both')))
missing_row.cat.reorder_categories(missing_row.cat.categories.sortlevel()[0], inplace=True)
missing_row = (
missing_row
.fillna(pd.Interval(0, 0, closed='both'))
.value_counts()
.sort_index()
)
ax = missing_row.plot(kind='barh')
total = missing_row.sum()
for p in ax.patches:
percentage = 100 * p.get_width() / total
percentage = f'{percentage:.1f}%'
x = p.get_x() + p.get_width()
y = p.get_y() + 0.05
ax.annotate(percentage, (x, y))
plt.xlabel('Count')
plt.ylabel('Percentage of missing')
plt.title('Missing values - Rows', size=15)
def top_missing_columns(data: pd.DataFrame, ntop: int = 10, figsize: Union[tuple, str] = 'auto'):
"""Count missing values in every columns.
Args:
data (pd.DataFrame): Input data frame.
ntop (int, optional): Number of top missing columns displayed. Defaults to 10.
figsize (Union[tuple[int], str], optional): Size of the whole plot. If 'auto',
figsize = (12, ntop / 2). Defaults to 'auto'.
"""
if ntop == 'all':
ntop = data.shape[1]
if figsize == 'auto':
figsize = (12, ntop / 2)
plt.figure(figsize=figsize)
missing_count = data.isna().sum()
missing_count = missing_count.sort_values(ascending=False)[0: ntop]
missing_count = missing_count.sort_values(ascending=True)
ax = missing_count.plot(kind='barh')
for p in ax.patches:
percentage = p.get_width() / len(data) * 100
percentage = f'{percentage:.1f}%'
x = p.get_x() + p.get_width()
y = p.get_y() + 0.15
ax.annotate(percentage, (x, y))
plt.title('Top missing values columns', size=15)
def displot(
data: pd.DataFrame,
columns: list[str] = None,
kind: str = 'hist',
nrows: Union[int, str] = 'auto',
ncols: int = 2,
figsize: Union[tuple[int], str] = 'auto',
hspace: float = 0.7,
wspace: float = 0.5,
title: str = 'Distribution of numerical variables',
y_title: float = 1
):
"""Plot distribution plot of continuous variables.
Args:
data (pd.DataFrame): Input data frame.
columns (list[str], optional): Names of numerical columns in the data frame.
If None, numerical columns will be taken. Defaults to None.
kind (str, optional): Kind of plot. Defaults to 'hist'.
nrows (Union[int, str], optional): Number of rows in the plot.
If 'auto', will be automatically calulated based on ncols. Defaults to 'auto'.
ncols (int, optional): Number of columns in the plot. Defaults to 2.
figsize (Union[tuple[int], str], optional): Size of the whole plot. If 'auto',
figsize = (12, 2 * nrows). Defaults to 'auto'.
hspace (float, optional): Height space between sup plots. Defaults to 0.7.
wspace (float, optional): Width space between sup plots. Defaults to 0.5.
title (str, optional): Title. Defaults to 'Distribution of numerical variables'.
y_title (float, optional): Position of title. Defaults to 1.
"""
if columns is None:
columns = data.select_dtypes(include=np.number).columns
if len(columns) == 1:
ncols = 1
if nrows == 'auto':
nrows = math.ceil(len(columns) / ncols)
if figsize == 'auto':
figsize = (12, 2 * nrows)
plt.figure(figsize=figsize)
for i, column in enumerate(columns):
plt.subplot(nrows, ncols, i + 1)
plt.subplots_adjust(hspace=hspace, wspace=wspace)
data[column].plot(kind=kind)
plt.xlabel('')
plt.ylabel('')
plt.title(column)
if len(columns) > 1:
plt.suptitle(title, size=15, y=y_title)
def boxplot(
data: pd.DataFrame,
columns: list[str] = None,
label: str = None,
nrows: Union[int, str] = 'auto',
ncols: int = 2,
figsize: Union[tuple[int], str] = 'auto',
hspace: float = 0.7,
wspace: float = 0.5,
y_title: float = 1,
title: str = 'Distribution of numerical variables',
):
"""Plot boxplot of numerical variables.
Args:
data (pd.DataFrame): Input data frame.
columns (list[str], optional): Names of numerical columns in the data frame.
If None, numerical columns will be taken. Defaults to None.
label (str, optional): Name of column label in the data frame. Defaults to None.
nrows (Union[int, str], optional): Number of rows in the plot.
If 'auto', will be automatically calulated based on ncols. Defaults to 'auto'.
ncols (int, optional): Number of columns in the plot. Defaults to 2.
figsize (Union[tuple[int], str], optional): Size of the whole plot. If 'auto',
figsize = (12, 2 * nrows). Defaults to 'auto'.
hspace (float, optional): Height space between sup plots. Defaults to 0.7.
wspace (float, optional): Width space between sup plots. Defaults to 0.5.
title (str, optional): Title. Defaults to 'Distribution of numerical variables'.
y_title (float, optional): Position of title. Defaults to 1.
"""
if columns is None:
columns = data.select_dtypes(include=np.number).columns
if len(columns) == 1:
ncols = 1
if nrows == 'auto':
nrows = math.ceil(len(columns) / ncols)
if figsize == 'auto':
figsize = (12, 2 * nrows)
plt.figure(figsize=figsize)
for i, column in enumerate(columns):
plt.subplot(nrows, ncols, i + 1)
plt.subplots_adjust(hspace=hspace, wspace=wspace)
sns.boxplot(x=column, y=label, data=data, orient='h')
plt.xlabel('')
plt.ylabel('')
plt.title(column)
if len(columns) > 1:
plt.suptitle(title, size=15, y=y_title)
def countplot(
data: pd.DataFrame,
columns: list[str] = None,
label: str = None,
nclass: Union[int, str] = 5,
nrows: Union[int, str] = 'auto',
ncols: int = 2,
figsize: Union[tuple[int], str] = 'auto',
sort_index: bool = False,
sample: int = 10**6,
hspace: float = 0.7,
wspace: float = 0.5,
title: str = 'Distribution of categorical variables',
y_title: float = 1,
):
"""Count plot categorical variables.
Args:
data (pd.DataFrame): Input data frame.
columns (list[str], optional): Name of categorical columns in the data frame. If None,
categorical and string columns will be taken. Defaults to None.
label (str, optional): Name of label column in the data frame. Defaults to None.
nclass (Union[int, str], optional): Number of class displayed in the plot.
If 'all', display all classes. Defaults to 5.
nrows (Union[int, str], optional): Number of rows in the plot.
If 'auto', will be automatically calulated based on ncols. Defaults to 'auto'.
ncols (int, optional): Number of columns in the plot. Defaults to 2.
figsize (Union[tuple[int], str], optional): Size of the whole plot. If 'auto',
figsize = (12, 2 * nrows). Defaults to 'auto'.
sort_index (bool, optional): Sort by index. Defaults to False.
sample (int, optional): Number of drown samples if the dataset is too large.
Defaults to 10**6.
hspace (float, optional): Height space between sup plots. Defaults to 0.7.
wspace (float, optional): Width space between sup plots. Defaults to 0.5.
title (str, optional): Title. Defaults to 'Distribution of categorical variables'.
y_title (float, optional): Position of title. Defaults to 1.
"""
if columns is None:
columns = data.select_dtypes(exclude=np.number).columns
if len(data) > sample:
print(f'Only {sample:,} random samples out of {len(data):,} are taken.')
data = data.sample(sample, random_state=0)
if nclass == 'all':
nclass = data[columns].nunique().max()
if len(columns) == 1:
ncols = 1
if nrows == 'auto':
nrows = math.ceil(len(columns) / ncols)
if figsize == 'auto':
figsize = (12, 2 * nrows)
plt.figure(figsize=figsize)
for i, column in enumerate(columns):
plt.subplot(nrows, ncols, i + 1)
plt.subplots_adjust(hspace=hspace, wspace=wspace)
# config sort_index
if sort_index:
order = (
data[column]
.value_counts()
.iloc[0: min(data[column].nunique(), nclass)]
.sort_index(ascending=False)
.index)
else:
order = (
data[column]
.value_counts()
.iloc[0: min(data[column].nunique(), nclass)]
.index)
# config plot
if column == label:
ax = sns.countplot(
y=column,
data=data,
order=order)
else:
ax = sns.countplot(
y=column,
data=data,
order=order,
hue=label)
# add percentage to the plot
total = data.shape[0]
for p in ax.patches:
percentage = 100 * p.get_width() / total
percentage = f'{percentage:.1f}%'
x = p.get_x() + p.get_width()
if label is None:
y_adjust = 0.55
else:
y_adjust = 0.35
y = p.get_y() + y_adjust
ax.annotate(percentage, (x, y))
# label and title
plt.title(f'{column} ({data[column].nunique()})', size=12)
plt.xlabel('')
plt.ylabel('')
if len(columns) > 1:
plt.suptitle(title, size=15, y=y_title)
def correlation_matrix(data: pd.DataFrame, figsize: tuple = (7, 7)):
"""Plot correlation matrix.
Args:
data (pd.DataFrame): Input data frame.
figsize (tuple, optional): Size of the figure. Defaults to (7, 7).
"""
# Compute the correlation matrix
corr = data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(7, 7))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation among variables', size=15)
def categorical_to_numeric(
data: pd.DataFrame,
features: Union[str, list[str]],
label: str = None,
method: str = 'target',
drop_first: bool = False,
missing_value_integer: Union[int, str] = -1
) -> pd.DataFrame:
"""Convert categorical to numeric variables.
Args:
data (pd.DataFrame): Input data frame.
features (Union[str, list[str]]): Names of categorical features.
label (str, optional): Name of label column, must be specified if method='label'.
Defaults to None.
method (str, optional): Method to convert categorical to numeric. Defaults to 'target'.
drop_first (bool, optional): If method='one_hot', option to drop the first converted
one-hot column. Defaults to False.
missing_value_integer (Union[int, str], optional): If method='integer', option to convert
missing values to maximum number of classes, np.nan or -1. Defaults to -1.
Raises:
ValueError: Choose method in ['target', 'one_hot', 'integer'].
Returns:
pd.DataFrame: Converted data frame.
"""
if method not in ['target', 'one_hot', 'integer']:
raise ValueError("method must be one of ['target', 'one_hot', 'integer'].")
converted_data = data.copy()
if type(features) is not list: # option to input only one feature
features = [features]
if method == 'target': # missing values remain missing values.
for feature in features:
feature_label = (
converted_data
.groupby(feature)
.agg({label: np.mean})
.reset_index())
feature_label_dict = dict(zip(feature_label[feature], feature_label[label]))
converted_data[feature] = converted_data[feature].replace(feature_label_dict)
print(f'Missing values, if any, remain as missing values.')
elif method == 'one_hot': # missing values have 0 for all one-hot columns.
for feature in features:
dummies = pd.get_dummies(converted_data[feature], drop_first=drop_first)
data_drop = converted_data.drop(feature, axis=1)
converted_data = pd.concat([data_drop, dummies], axis=1)
print(f'Missing values, if any, have 0 for all the converted one-hot columns.')
elif method == 'integer': # missing values are encoded as predetermined values.
global categories_dict_list
categories_dict_list = []
for feature in features:
if missing_value_integer == 'max':
missing_value_integer = converted_data[feature].astype('category').cat.codes.max() + 1
categories = converted_data[feature].astype('category').cat.categories
categories_dict = dict(zip(categories, range(len(categories))))
categories_dict_list.append(categories_dict)
converted_data[feature] = (
converted_data[feature]
.astype('category').cat.codes.replace(-1, missing_value_integer))
print(f'Categorical(s) have been encoded according to categories_dict_list.')
print(f'Missing values, if any, are encoded as maximum classes, np.nan or -1 (defaulted).')
return converted_data
| [
37811,
15946,
1460,
886,
12,
1462,
12,
437,
5202,
284,
779,
4572,
4673,
4981,
13,
198,
37811,
198,
834,
9800,
834,
796,
705,
74,
7637,
4352,
30094,
6,
198,
834,
4475,
834,
796,
705,
1238,
2481,
12,
1065,
12,
486,
6,
628,
198,
1174... | 2.314928 | 6,719 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stats.py."""
from tests.common import testing
import datetime
import mock
import os
import webtest
from titan.common.lib.google.apputils import basetest
from titan import activities
from titan import wsgi
from titan.stats import stats
if __name__ == '__main__':
basetest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
2321,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
7... | 3.645418 | 251 |
#!usr/bin/python
# -*- coding: utf-8 -*-
import re
from torchvision.models.densenet import DenseNet, model_urls as imagenet_urls
from torchvision.models.utils import load_state_dict_from_url
from .utils import cnn_model
__all__ = ['densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://srv-file7.gofile.io/download/XqHLBB/densenet121-binary-classification.pth'
}
model_cut = -1
def densenet121(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def densenet161(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def densenet169(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def densenet201(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
| [
2,
0,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
302,
198,
6738,
28034,
10178,
13,
27530,
13,
67,
18756,
316,
1330,
360,
1072,
7934,
11,
2746,
62,
6371,
82,
355,
3... | 2.617135 | 2,241 |
import re
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
| [
11748,
302,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
628
] | 3.5 | 34 |
# Generated by Django 2.0.1 on 2018-02-08 13:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
2999,
12,
2919,
1511,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import unittest
from visuanalytics.tests.analytics.transform.transform_test_helper import prepare_test
| [
11748,
555,
715,
395,
198,
198,
6738,
1490,
7258,
3400,
14094,
13,
41989,
13,
38200,
14094,
13,
35636,
13,
35636,
62,
9288,
62,
2978,
525,
1330,
8335,
62,
9288,
628
] | 3.5 | 30 |
import discord
import dispacher
import questions
import security
client = discord.Client()
@client.event
@client.event
client.run('<discord token>') | [
11748,
36446,
198,
11748,
4596,
3493,
198,
11748,
2683,
198,
11748,
2324,
198,
198,
16366,
796,
36446,
13,
11792,
3419,
628,
198,
31,
16366,
13,
15596,
198,
198,
31,
16366,
13,
15596,
628,
198,
16366,
13,
5143,
10786,
27,
15410,
585,
... | 3.5 | 44 |
import sys
import os
o_path = os.getcwd()
sys.path.append(o_path)
import Scraper.HuabanScraper as HuabanScraper
from multiprocessing import Pool
import multiprocessing
import configparser
config_file = 'config.ini'
con = configparser.ConfigParser()
con.read(config_file, encoding='utf-8')
section = con.sections()
num = len(section)
urls = []
pic_max = []
path_name = []
type_filters = []
for i in range(num):
item = con.items(f'url{i+1}')
urls.append(item[0][1])
pic_max.append(item[1][1])
path_name.append(item[2][1])
type_filters.append(item[3][1])
if __name__ == '__main__':
multiprocessing.freeze_support()
main() | [
11748,
25064,
201,
198,
11748,
28686,
201,
198,
78,
62,
6978,
796,
28686,
13,
1136,
66,
16993,
3419,
201,
198,
17597,
13,
6978,
13,
33295,
7,
78,
62,
6978,
8,
201,
198,
11748,
1446,
38545,
13,
38202,
45094,
3351,
38545,
355,
11256,
... | 2.31 | 300 |
from __future__ import print_function # For Py2/3 compatibility
import async_eel
import asyncio
loop = asyncio.get_event_loop()
@async_eel.expose # Expose this function to Javascript
if __name__ == '__main__':
asyncio.run_coroutine_threadsafe(main(), loop)
loop.run_forever()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
197,
2,
1114,
9485,
17,
14,
18,
17764,
198,
11748,
30351,
62,
68,
417,
198,
11748,
30351,
952,
628,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
3419,
628,
198,
31,
292,... | 2.52 | 125 |
from keras import Sequential
from keras.layers import Dense
from keras.activations import relu, linear
class PLAYER:
""" A trained agent that plays the game """
| [
6738,
41927,
292,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
6738,
41927,
292,
13,
15791,
602,
1330,
823,
84,
11,
14174,
198,
198,
4871,
28180,
1137,
25,
628,
220,
220,
220,
37227,
317,
8776,
5797,
... | 3.5 | 48 |
import numpy as np
import matplotlib.pyplot as plt
from .single_unit import PSTH
def crosscorrelogram(target, reference, ROI=(-0.5,0.5), binsize=.01, shift=None, skip_plot=False):
"""
Cross Correlation between two unit, optionally corrected by shift predictor.
arguments:
- target: the target spike train as 1d numpy.array
- reference: the reference spike train as 1d numpy.array
keyword arguments:
- shift: shift size, if None then skip the shift predictor correction [default: None]
- ROI: region of interest as tuple [default: (-0.5, 0.5)]
- binsize: the size of each bin [default: 0.01]
- skip_plot: if True then skip auto plot crosscorrelogram [default: False]
return:
- crosscorrelogram: as in 1d numpy.array
"""
_xcorr, _ = PSTH(target, reference, ROI, binsize, True)
if isinstance(shift, int) or isinstance(shift, float):
_shift_reference = shiftappend(reference, shift)
_xcorr_shift, _ = PSTH(target, _shift_reference, ROI, binsize, True)
_xcorr = _xcorr - _xcorr_shift
elif isinstance(shift, list) or isinstance(shift, np.ndarray):
_xcorr_shift = np.zeros_like(_xcorr)
for item in shift:
_shift_reference = shiftappend(reference, item)
_xcorr_shift_item, _ = PSTH(target, _shift_reference, ROI, binsize, True)
_xcorr_shift = _xcorr_shift + _xcorr_shift_item/np.size(shift)
_xcorr = _xcorr - _xcorr_shift
else:
_xcorr_shift = None
if not skip_plot:
plt.figure(figsize=(16,4))
plt.subplot(1,2,2)
_tspec = np.linspace(ROI[0], ROI[1]-1/int((ROI[1]-ROI[0])/binsize), int((ROI[1]-ROI[0])/binsize))
plt.bar(_tspec+binsize/2, _xcorr, width=binsize)
plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5)
plt.xlim((ROI[0], ROI[-1]))
plt.title('crosscorrelogram')
if not isinstance(_xcorr_shift, type(None)):
plt.subplot(1,2,1)
plt.bar(_tspec+binsize/2, _xcorr_shift, width=binsize)
plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5)
plt.xlim((ROI[0], ROI[-1]))
plt.title('shift predictor')
plt.show()
return _xcorr | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
764,
29762,
62,
20850,
1330,
28220,
39,
628,
198,
4299,
3272,
10215,
2411,
21857,
7,
16793,
11,
4941,
11,
15107,
40,
16193,
12,
15... | 2.120849 | 1,084 |
#!/usr/bin/env python3
"""
Basic uci implementation
"""
import sys
import time
from .board import Board
from .evaluation import Evaluator
from .search import Searcher
from .ttable import ttable
from .utils import move_from_san, san_from_move
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
26416,
334,
979,
7822,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
764,
3526,
1330,
5926,
198,
6738,
764,
18206,
2288,
1330,
26439,
84,
13... | 3.097826 | 92 |
"""File selection dialog classes.
Classes:
- FileDialog
- LoadFileDialog
- SaveFileDialog
This module also presents tk common file dialogues, it provides interfaces
to the native file dialogues available w Tk 4.2 oraz newer, oraz the
directory dialogue available w Tk 8.3 oraz newer.
These interfaces were written by Fredrik Lundh, May 1997.
"""
z tkinter zaimportuj *
z tkinter.dialog zaimportuj Dialog
z tkinter zaimportuj commondialog
zaimportuj os
zaimportuj fnmatch
dialogstates = {}
klasa FileDialog:
"""Standard file selection dialog -- no checks on selected file.
Usage:
d = FileDialog(master)
fname = d.go(dir_or_file, pattern, default, key)
jeżeli fname jest Nic: ...canceled...
inaczej: ...open file...
All arguments to go() are optional.
The 'key' argument specifies a key w the global dictionary
'dialogstates', which keeps track of the values dla the directory
oraz pattern arguments, overriding the values dalejed w (it does
nie keep track of the default argument!). If no key jest specified,
the dialog keeps no memory of previous state. Note that memory jest
kept even when the dialog jest canceled. (All this emulates the
behavior of the Macintosh file selection dialogs.)
"""
title = "File Selection Dialog"
inaczej:
self.directory, default = os.path.split(dir_or_file)
self.set_filter(self.directory, pattern)
self.set_selection(default)
self.filter_command()
self.selection.focus_set()
self.top.wait_visibility() # window needs to be visible dla the grab
self.top.grab_set()
self.how = Nic
self.master.mainloop() # Exited by self.quit(how)
jeżeli key:
directory, pattern = self.get_filter()
jeżeli self.how:
directory = os.path.dirname(self.how)
dialogstates[key] = directory, pattern
self.top.destroy()
zwróć self.how
def quit(self, how=Nic):
self.how = how
self.master.quit() # Exit mainloop()
def dirs_double_event(self, event):
self.filter_command()
def dirs_select_event(self, event):
dir, pat = self.get_filter()
subdir = self.dirs.get('active')
dir = os.path.normpath(os.path.join(self.directory, subdir))
self.set_filter(dir, pat)
def files_double_event(self, event):
self.ok_command()
def files_select_event(self, event):
file = self.files.get('active')
self.set_selection(file)
def ok_event(self, event):
self.ok_command()
def ok_command(self):
self.quit(self.get_selection())
def filter_command(self, event=Nic):
dir, pat = self.get_filter()
spróbuj:
names = os.listdir(dir)
wyjąwszy OSError:
self.master.bell()
zwróć
self.directory = dir
self.set_filter(dir, pat)
names.sort()
subdirs = [os.pardir]
matchingfiles = []
dla name w names:
fullname = os.path.join(dir, name)
jeżeli os.path.isdir(fullname):
subdirs.append(name)
albo_inaczej fnmatch.fnmatch(name, pat):
matchingfiles.append(name)
self.dirs.delete(0, END)
dla name w subdirs:
self.dirs.insert(END, name)
self.files.delete(0, END)
dla name w matchingfiles:
self.files.insert(END, name)
head, tail = os.path.split(self.get_selection())
jeżeli tail == os.curdir: tail = ''
self.set_selection(tail)
def get_filter(self):
filter = self.filter.get()
filter = os.path.expanduser(filter)
jeżeli filter[-1:] == os.sep albo os.path.isdir(filter):
filter = os.path.join(filter, "*")
zwróć os.path.split(filter)
def get_selection(self):
file = self.selection.get()
file = os.path.expanduser(file)
zwróć file
self.filter.delete(0, END)
self.filter.insert(END, os.path.join(dir albo os.curdir, pat albo "*"))
def set_selection(self, file):
self.selection.delete(0, END)
self.selection.insert(END, os.path.join(self.directory, file))
klasa LoadFileDialog(FileDialog):
"""File selection dialog which checks that the file exists."""
title = "Load File Selection Dialog"
inaczej:
self.quit(file)
klasa SaveFileDialog(FileDialog):
"""File selection dialog which checks that the file may be created."""
title = "Save File Selection Dialog"
d = Dialog(self.top,
title="Overwrite Existing File Question",
text="Overwrite existing file %r?" % (file,),
bitmap='questhead',
default=1,
strings=("Yes", "Cancel"))
jeżeli d.num != 0:
zwróć
inaczej:
head, tail = os.path.split(file)
jeżeli nie os.path.isdir(head):
self.master.bell()
zwróć
self.quit(file)
# For the following classes oraz modules:
#
# options (all have default values):
#
# - defaultextension: added to filename jeżeli nie explicitly given
#
# - filetypes: sequence of (label, pattern) tuples. the same pattern
# may occur przy several patterns. use "*" jako pattern to indicate
# all files.
#
# - initialdir: initial directory. preserved by dialog instance.
#
# - initialfile: initial file (ignored by the open dialog). preserved
# by dialog instance.
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - multiple: jeżeli true user may select more than one file
#
# options dla the directory chooser:
#
# - initialdir, parent, title: see above
#
# - mustexist: jeżeli true, user must pick an existing directory
#
klasa _Dialog(commondialog.Dialog):
#
# file dialogs
klasa Open(_Dialog):
"Ask dla a filename to open"
command = "tk_getOpenFile"
jeżeli nie widget.tk.wantobjects() oraz "multiple" w self.options:
# Need to split result explicitly
zwróć self._fixresult(widget, widget.tk.splitlist(result))
zwróć _Dialog._fixresult(self, widget, result)
klasa SaveAs(_Dialog):
"Ask dla a filename to save as"
command = "tk_getSaveFile"
# the directory dialog has its own _fix routines.
klasa Directory(commondialog.Dialog):
"Ask dla a directory"
command = "tk_chooseDirectory"
#
# convenience stuff
def askopenfilename(**options):
"Ask dla a filename to open"
zwróć Open(**options).show()
def asksaveasfilename(**options):
"Ask dla a filename to save as"
zwróć SaveAs(**options).show()
def askopenfilenames(**options):
"""Ask dla multiple filenames to open
Returns a list of filenames albo empty list if
cancel button selected
"""
options["multiple"]=1
zwróć Open(**options).show()
# FIXME: are the following perhaps a bit too convenient?
def askopenfile(mode = "r", **options):
"Ask dla a filename to open, oraz returned the opened file"
filename = Open(**options).show()
jeżeli filename:
zwróć open(filename, mode)
zwróć Nic
def askopenfiles(mode = "r", **options):
"""Ask dla multiple filenames oraz zwróć the open file
objects
returns a list of open file objects albo an empty list if
cancel selected
"""
files = askopenfilenames(**options)
jeżeli files:
ofiles=[]
dla filename w files:
ofiles.append(open(filename, mode))
files=ofiles
zwróć files
def asksaveasfile(mode = "w", **options):
"Ask dla a filename to save as, oraz returned the opened file"
filename = SaveAs(**options).show()
jeżeli filename:
zwróć open(filename, mode)
zwróć Nic
def askdirectory (**options):
"Ask dla a directory, oraz zwróć the file name"
zwróć Directory(**options).show()
# --------------------------------------------------------------------
# test stuff
# dialog dla openening files
openfilename=askopenfilename(filetypes=[("all files", "*")])
spróbuj:
fp=open(openfilename,"r")
fp.close()
wyjąwszy:
print("Could nie open File: ")
print(sys.exc_info()[1])
print("open", openfilename.encode(enc))
# dialog dla saving files
saveasfilename=asksaveasfilename()
print("saveas", saveasfilename.encode(enc))
jeżeli __name__ == '__main__':
test()
| [
37811,
8979,
6356,
17310,
6097,
13,
198,
198,
9487,
274,
25,
198,
198,
12,
9220,
44204,
198,
12,
8778,
8979,
44204,
198,
12,
12793,
8979,
44204,
198,
198,
1212,
8265,
635,
10969,
256,
74,
2219,
2393,
17310,
947,
11,
340,
3769,
20314,
... | 2.391498 | 3,599 |
"""add booking image model
Revision ID: 9247b9acb4bd
Revises: 7a4e99ff303f
Create Date: 2018-05-28 18:08:48.285717
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9247b9acb4bd'
down_revision = '7a4e99ff303f'
branch_labels = None
depends_on = None
| [
37811,
2860,
25452,
2939,
2746,
198,
198,
18009,
1166,
4522,
25,
860,
23753,
65,
24,
330,
65,
19,
17457,
198,
18009,
2696,
25,
767,
64,
19,
68,
2079,
487,
22572,
69,
198,
16447,
7536,
25,
2864,
12,
2713,
12,
2078,
1248,
25,
2919,
... | 2.472 | 125 |
import os
import sys
from flexlogger.automation import Application
def main(project_path):
"""Launch FlexLogger, open a project, and gets the value of a test property."""
with Application.launch() as app:
project = app.open_project(path=project_path)
test_property_name = input("Enter the name of the test property to get the value of: ")
logging_specification = project.open_logging_specification_document()
test_property = logging_specification.get_test_property(test_property_name)
print("Test property:")
print(test_property)
print("Press Enter to close the project...")
input()
project.close()
return 0
if __name__ == "__main__":
argv = sys.argv
if len(argv) < 2:
print("Usage: %s <path of project to open>" % os.path.basename(__file__))
sys.exit()
project_path_arg = argv[1]
sys.exit(main(project_path_arg))
| [
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
7059,
6404,
1362,
13,
2306,
296,
341,
1330,
15678,
628,
198,
4299,
1388,
7,
16302,
62,
6978,
2599,
198,
220,
220,
220,
37227,
38296,
26719,
11187,
1362,
11,
1280,
257,
1628,
11,
290,
30... | 2.626404 | 356 |
import sqlite3
conn = sqlite3.connect('oinpUpdatesDatabase.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS updates (update_id blob, update_date text, update_data text)""")
# c.execute("INSERT INTO updates VALUES (:id, :date, :dataInfo)",
# {'id': update2.id, 'date': update2.date, 'dataInfo': update2.updatetext})
# c.execute("SELECT * FROM updates WHERE date=:date", {'date': 'March 5, 2020'})
# DELETE FROM table_name WHERE condition; c.execute("DELETE FROM udates WHERE update_date='March 19, 2020'")
# print(c.fetchall())
| [
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
36743,
79,
4933,
19581,
38105,
13,
9945,
11537,
198,
66,
796,
48260,
13,
66,
21471,
3419,
198,
198,
66,
13,
41049,
7203,
15931,
43387,
6158,
43679,
16876,
5... | 2.762376 | 202 |
#!/usr/bin/env python3
import geometry_msgs.msg
import rospy
import roslib
import tf_conversions
import tf2_ros
from std_msgs.msg import String
from scipy.optimize import minimize as fmin
# /* estimated camera2 pose */
m_rotx = -0.653
m_roty = 0.271
m_rotz = -0.271
m_rotw = 0.653
m_tranx = 0.073
m_trany = 0.073
m_tranz = 0
if __name__ == "__main__":
rospy.init_node('python_calibration')
try:
DemoNode()
print("entering Try")
rospy.spin()
except rospy.ROSInterruptException:
print("exception thrown")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
22939,
62,
907,
14542,
13,
19662,
198,
11748,
686,
2777,
88,
198,
11748,
686,
6649,
571,
198,
11748,
48700,
62,
1102,
47178,
198,
11748,
48700,
17,
62,
4951,
198,
6738,
1436... | 2.266393 | 244 |
PREF_DB_QUERY = "Preferences DB Query Exec"
PREF_DB_LEAK = "Pref DB Leak"
DB_QUERY = "SQL-lite Query Exec"
DB_LEAK = "SQL-lite Leaks"
CONN_LEAKS = "Connectivity Leaks"
FW = "File Writing"
FO = "File Opening"
LOC_LEAKS = "Location Leaks"
SOCKET = "Open Socket"
PREF_LOC_LEAKS = "Pref Location Leaks"
PREF_TM_LEAKS = "Pref TM Leak"
SMS = "Send SMS"
TM_LEAK = "TM Leaks"
PREF_CONN_LEAK = "Pref Connectivity Leaks"
TAKE_PICT = "Take Picture"
CALL = "Direct Call"
DIAL = "Call via Intent"
EMAIL_SMS = "Send Email or SMS Intent"
NEW_APP = "Start New App"
CALENDAR = "Edit Calendar"
PLAY = "Play Video or Audio"
SOCIAL = "Post to Social"
DOWNLOAD = "Save/Download Picture"
F_CLASS = "Fetch Class"
CONSTR_INSTANCE = "Constructor Instance"
F_METHOD = "Fetch method"
M_PARAM = "Method Parameter"
SOP = "Frame Confusion"
F_CONSTR = "Fetch Constructor"
API = "Reflection"
RF = "File Reading"
UKNOWN_INTENT = "Uknown Intent"
labels = [PREF_DB_QUERY, PREF_DB_LEAK, DB_QUERY, DB_LEAK,
CONN_LEAKS, FW, FO, LOC_LEAKS, SOCKET, PREF_LOC_LEAKS,
PREF_TM_LEAKS, SMS, TM_LEAK, PREF_CONN_LEAK, TAKE_PICT,
CALL, DIAL, EMAIL_SMS, NEW_APP, CALENDAR, PLAY, SOCIAL,
DOWNLOAD, F_CLASS, CONSTR_INSTANCE, F_METHOD, M_PARAM,
SOP, F_CONSTR, API, RF, UKNOWN_INTENT]
| [
47,
31688,
62,
11012,
62,
10917,
19664,
796,
366,
36698,
4972,
20137,
43301,
8393,
1,
198,
47,
31688,
62,
11012,
62,
2538,
10206,
796,
366,
36698,
20137,
1004,
461,
1,
198,
11012,
62,
10917,
19664,
796,
366,
17861,
12,
36890,
43301,
8... | 2.322878 | 542 |
from PIL import Image, ImageEnhance, ImageChops
import numpy as np, math
''' file che contiene le funzione per la manipolazione delle immaggini '''
# converte l'immagine da array di numpy ad oggeto di pillow
# converte l'immagine da oggetto di pillow ad array di numpy
# regola il contrasto dell'immagine
# regola la luminosità dell'immgine
# regola la saturazione dell'immagine
# applica filtro color seppia all'immagine
'''
Forumla reperita da: https://www.w3.org/TR/filter-effects-1/#sepiaEquivalent
'''
# crea un immagine dei colori e dimensioni passati come parametri
# crea gradiente gradiale dei colori passati come parametri
# crea gradiente lineare dei colori passati come parametri
# converte immagine da RGBA ad RGB
'''
Foruma reperita da: https://www.w3.org/TR/compositing-1/#blending
Cr = (αs - αs x αb) x Cs + αs x αb x B(Cb, Cs) + (αb - αs x αb) x Cb
Dove:
- Cr è l'immagine risultante
- B è la forula per il blending
- Cs è l'immagine originale
- Cb è il colore da applicare a Cs
- αs è il valore del canale alpha di Cs
- αb è il valore del canale alpha di Cb
'''
# riporta tutti i valori dell'immagine tra 0 e 255
'''
Formula reperita da: https://www.w3.org/TR/compositing-1/#blendingsoftlight
if(Cs <= 0.5)
B(Cb, Cs) = Cb - (1 - 2 x Cs) x Cb x (1 - Cb)
else
B(Cb, Cs) = Cb + (2 x Cs - 1) x (D(Cb) - Cb)
dove:
if(Cb <= 0.25)
D(Cb) = ((16 * Cb - 12) x Cb + 4) x Cb
else
D(Cb) = sqrt(Cb)
'''
'''
Formula reperita da: https://www.w3.org/TR/compositing-1/#blendinghardlight
if(Cs <= 0.5)
B(Cb, Cs) = Multiply(Cb, 2 x Cs)
else
B(Cb, Cs) = Screen(Cb, 2 x Cs -1)
'''
'''
Formula reperita da: https://www.w3.org/TR/compositing-1/#blendingoverlay
B(Cb, Cs) = HardLight(Cs, Cb)
'''
| [
6738,
350,
4146,
1330,
7412,
11,
7412,
35476,
590,
11,
7412,
1925,
2840,
198,
11748,
299,
32152,
355,
45941,
11,
10688,
198,
198,
7061,
6,
2393,
1125,
542,
72,
1734,
443,
1257,
89,
7935,
583,
8591,
7704,
349,
1031,
7935,
1619,
293,
... | 2.294118 | 765 |
from datetime import datetime
from models import Wallet
from pandas import DataFrame
from ohlcv_data import load_binance_data
from indicators import sma, ema, wma
from strategy_utils import save_strategy_data
def run_ma_strategy(df:DataFrame, ma:str = 'sma', span = 7) -> None:
'''
ma can be one of [sma, ema, 'wma]
'''
wallet = Wallet(0, 1000)
if ma == 'sma':
sma(df=df, span=span, column_name=ma)
elif ma == 'ema':
ema(df=df, span=span, column_name=ma)
elif ma == 'wma':
wma(df=df, span=span, column_name=ma)
else:
print(f"unkown mouving average 'ma = {ma}' ")
return
for i in range(span +1, df.shape[0]):
pi = i -1
if df['close'][i] > df[ma][i] and df['close'][pi] <= df[ma][pi]:
wallet.buy(df['close'][i], df['time'][i])
if df['close'][i] < df[ma][i] and df['close'][pi] >= df[ma][pi]:
wallet.sell(df['close'][i], df['time'][i])
last_row = df.shape[0] - 1
wallet.sell(df['close'][last_row], df['time'][last_row])
save_strategy_data(ma, df, [ma], wallet.orders_as_dataframe())
print(f'{len(wallet._orders)} order(s) executed')
print(f'base assets: {wallet._base_assets}')
print(f'quote assets: {wallet._quote_assets}')
print('--- vs ---')
wallet = Wallet(0, 1000)
wallet.buy(df['close'][0], df['time'][0])
wallet.sell(df['close'][last_row], df['time'][last_row])
print(f'quote assets: {wallet._quote_assets}')
# --- test function
symbol = 'BTC/USDT'
candel_interval = '1h'
since = int(datetime.timestamp(datetime.strptime('01/01/2021', '%d/%m/%Y')) * 1000)
span = 7
df = load_binance_data(symbol, candel_interval, since=since)
print(f'------------------------------SMA {candel_interval}------------------------------------------')
ma_name = 'sma'
run_ma_strategy(df, ma = ma_name, span= span)
print(f'------------------------------EMA {candel_interval}------------------------------------------')
ma_name = 'ema'
run_ma_strategy(df, ma = ma_name, span= span)
print(f'------------------------------WMA {candel_interval}------------------------------------------')
ma_name = 'wma'
run_ma_strategy(df, ma = ma_name, span= span) | [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4981,
1330,
37249,
198,
6738,
19798,
292,
1330,
6060,
19778,
198,
6738,
11752,
75,
33967,
62,
7890,
1330,
3440,
62,
8800,
590,
62,
7890,
198,
6738,
21337,
1330,
895,
64,
11,
795,
64,
11,
... | 2.423414 | 914 |
import sys
import pytest
from ctypes import c_char_p, c_void_p, cast
import sdl2
from sdl2 import SDL_Init, SDL_Quit, SDL_QuitSubSystem, SDL_INIT_EVERYTHING
# NOTE: These tests are very incomplete and in need of a rewrite
# Test initializing event structs (is this actually useful?)
| [
11748,
25064,
198,
11748,
12972,
9288,
198,
6738,
269,
19199,
1330,
269,
62,
10641,
62,
79,
11,
269,
62,
19382,
62,
79,
11,
3350,
198,
11748,
264,
25404,
17,
198,
6738,
264,
25404,
17,
1330,
45417,
62,
31768,
11,
45417,
62,
4507,
27... | 3.119565 | 92 |
import pytest
from abuse_whois import schemas
@pytest.mark.parametrize(
"address,expected",
[
("http://example.com", "form"),
("abuse@example.com", "email"),
],
)
| [
11748,
12972,
9288,
198,
198,
6738,
5076,
62,
8727,
271,
1330,
3897,
5356,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
21975,
11,
40319,
1600,
198,
220,
220,
220,
685,
198,
220,
220,
... | 2.309524 | 84 |
import os
import cv2
import json
import numpy as np
import torch
import pickle
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
2298,
293,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
256,
80,
36020,
1330,
256,
... | 3.25 | 64 |
import pandas
import math
import numpy as np
import re
from sklearn.ensemble import RandomForestClassifier as Rforest
from sklearn import cross_validation as validate
from sklearn.feature_selection import SelectKBest, f_classif
import operator
# select best features
if __name__ == '__main__':
trainAndTest() | [
11748,
19798,
292,
201,
198,
11748,
10688,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
302,
201,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
9487,
7483,
355,
371,
29623,
201,
198,
6738,
1341,
35720,
1330,
... | 3.292929 | 99 |
import twitter
import gevent
import click
import requests
from tweet_delete.util import td_format
from datetime import datetime
from dateutil import parser
| [
11748,
17044,
198,
11748,
4903,
1151,
198,
11748,
3904,
198,
11748,
7007,
198,
6738,
6126,
62,
33678,
13,
22602,
1330,
41560,
62,
18982,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3128,
22602,
1330,
30751,
628
] | 4.243243 | 37 |
# local
from .base_factory import SpiderClsFactory
from .crawl_spider_factory import CrawlSpiderClsFactory
| [
2,
1957,
198,
6738,
764,
8692,
62,
69,
9548,
1330,
12648,
2601,
82,
22810,
198,
6738,
764,
66,
13132,
62,
2777,
1304,
62,
69,
9548,
1330,
327,
13132,
41294,
2601,
82,
22810,
198
] | 3.242424 | 33 |
#!/usr/bin/env python3
n,m = map(int, input().split())
t1 = [0] * 100010
t2 = [0] * 100010
tt = int(0)
for i in range(n):
l,r = map(int, input().split())
t1[l] += 1
t2[r+1] -= 1
for i in range(1,100010):
t1[i] += t1[i-1]
t2[i] += t2[i-1]
for i in range(m):
l,r = map(int, input().split())
tt += t1[r] + t2[l]
print(tt) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
77,
11,
76,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
198,
83,
16,
796,
685,
15,
60,
1635,
1802,
20943,
198,
83,
17,
796,
685,
15,
60,
1635,
1802,
20943,
19... | 1.805128 | 195 |
""" ``sklearn_xarray.common.wrappers`` """
from types import MethodType
from sklearn.base import clone
from sklearn.utils.validation import check_X_y, check_array
from .base import (
partial_fit, predict, predict_proba, predict_log_proba, decision_function,
transform, inverse_transform, fit_transform, score,
_CommonEstimatorWrapper, _ImplementsPredictMixin,
_ImplementsScoreMixin, _ImplementsTransformMixin,
_ImplementsFitTransformMixin, _ImplementsInverseTransformMixin
)
from sklearn_xarray.utils import is_dataarray, is_dataset, is_target
# mapping from wrapped methods to wrapper methods
_method_map = {
'partial_fit': partial_fit,
'predict': predict,
'predict_proba': predict_proba,
'predict_log_proba': predict_log_proba,
'decision_function': decision_function,
'transform': transform,
'inverse_transform': inverse_transform,
'fit_transform': fit_transform,
'score': score
}
def wrap(estimator, reshapes=None, sample_dim=None, compat=False, **kwargs):
""" Wrap an sklearn estimator for xarray objects.
Parameters
----------
estimator : sklearn estimator class or instance
The estimator this instance wraps around.
reshapes : str or dict, optional
The dimension(s) reshaped by this estimator. Any coordinates in the
DataArray along these dimensions will be dropped. If the estimator drops
this dimension (e.g. a binary classifier returning a 1D vector), the
dimension itself will also be dropped.
You can specify multiple dimensions mapping to multiple new dimensions
with a dict whose items are lists of reshaped dimensions, e.g.
``{'new_feature': ['feature_1', 'feature_2'], ...}``
sample_dim : str, optional
The name of the dimension that represents the samples. By default,
the wrapper will assume that this is the first dimension in the array.
compat : bool, optional
If True, ``set_params``/``get_params`` will only use the wrapper's
actual parameters and not those of the wrapped estimator. This might
be necessary when the estimator defines parameters with the same name
as the wrapper.
Returns
-------
A wrapped estimator.
"""
return EstimatorWrapper(estimator, reshapes=reshapes,
sample_dim=sample_dim, compat=compat, **kwargs)
class EstimatorWrapper(_CommonEstimatorWrapper):
""" A wrapper around sklearn estimators compatible with xarray objects.
Parameters
----------
estimator : sklearn estimator
The estimator instance this instance wraps around.
reshapes : str or dict, optional
The dimension(s) reshaped by this estimator. Any coordinates in the
DataArray along these dimensions will be dropped. If the estimator drops
this dimension (e.g. a binary classifier returning a 1D vector), the
dimension itself will also be dropped.
You can specify multiple dimensions mapping to multiple new dimensions
with a dict whose items are lists of reshaped dimensions, e.g.
``{'new_feature': ['feature_1', 'feature_2'], ...}``
sample_dim : str, optional
The name of the dimension that represents the samples. By default,
the wrapper will assume that this is the first dimension in the array.
compat : bool, optional
If True, ``set`_params``/``get_params`` will only use the wrapper's
actual parameters and not those of the wrapped estimator. This might
be necessary when the estimator defines parameters with the same name
as the wrapper.
"""
def _decorate(self):
""" Decorate this instance with wrapping methods for the estimator. """
if hasattr(self.estimator, '_estimator_type'):
setattr(self, '_estimator_type', self.estimator._estimator_type)
for m in _method_map:
if hasattr(self.estimator, m):
try:
setattr(self, m,
MethodType(_method_map[m], self, EstimatorWrapper))
except TypeError:
setattr(self, m, MethodType(_method_map[m], self))
def fit(self, X, y=None, **fit_params):
""" A wrapper around the fitting function.
Parameters
----------
X : xarray DataArray, Dataset other other array-like
The training input samples.
y : xarray DataArray, Dataset other other array-like
The target values.
Returns
-------
Returns self.
"""
if self.estimator is None:
raise ValueError('You must specify an estimator instance to wrap.')
if is_target(y):
y = y(X)
if is_dataarray(X):
self.type_ = 'DataArray'
self.estimator_ = self._fit(X, y, **fit_params)
elif is_dataset(X):
self.type_ = 'Dataset'
self.estimator_dict_ = {
v: self._fit(X[v], y, **fit_params) for v in X.data_vars}
else:
self.type_ = 'other'
if y is None:
X = check_array(X)
else:
X, y = check_X_y(X, y)
self.estimator_ = clone(self.estimator).fit(X, y, **fit_params)
for v in vars(self.estimator_):
if v.endswith('_') and not v.startswith('_'):
setattr(self, v, getattr(self.estimator_, v))
return self
class TransformerWrapper(
EstimatorWrapper, _ImplementsTransformMixin,
_ImplementsFitTransformMixin, _ImplementsInverseTransformMixin):
""" A wrapper around sklearn transformers compatible with xarray objects.
Parameters
----------
estimator : sklearn estimator
The estimator this instance wraps around.
reshapes : str or dict, optional
The dimension reshaped by this estimator.
"""
class RegressorWrapper(
EstimatorWrapper, _ImplementsPredictMixin, _ImplementsScoreMixin):
""" A wrapper around sklearn regressors compatible with xarray objects.
Parameters
----------
estimator : sklearn estimator
The estimator this instance wraps around.
reshapes : str or dict, optional
The dimension reshaped by this estimator.
"""
_estimator_type = "regressor"
class ClassifierWrapper(
EstimatorWrapper, _ImplementsPredictMixin, _ImplementsScoreMixin):
""" A wrapper around sklearn classifiers compatible with xarray objects.
Parameters
----------
estimator : sklearn estimator
The estimator this instance wraps around.
reshapes : str or dict, optional
The dimension reshaped by this estimator.
"""
_estimator_type = "classifier"
| [
37811,
7559,
8135,
35720,
62,
87,
18747,
13,
11321,
13,
29988,
11799,
15506,
37227,
198,
198,
6738,
3858,
1330,
11789,
6030,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
17271,
198,
6738,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
21... | 2.616095 | 2,597 |
import numpy as np
import os
import pandas as pd
## read yuliang tces
f = open('../target_info/tces.csv')
tces_data = f.readlines()
f.close()
del tces_data[0]
yuliang_ticids = []
for row in tces_data:
ticid = row.split(',')[1]
yuliang_ticids.append(int(ticid))
yuliang_ticids = set(yuliang_ticids)
## create a script for download
for sector in range(1, 6):
idx = 0
num = 0
outfile = open('tces_sector{}.sh'.format(sector), 'w')
outfile.write('#!/bin/sh\n')
f = open('sectors_lc_sh/tesscurl_sector_{}_lc.sh'.format(sector))
data = f.readlines()
f.close()
del data[0]
for l in data:
ticid = l.split('-')[6]
if int(ticid) in yuliang_ticids:
outfile.write(l)
idx += 1
if idx % 100 == 0:
print(idx)
print(idx)
outfile.close()
## the number of ticids that can be discovered
lc_ticids = []
for sector in range(1, 6):
f = open('sectors_lc_sh/tesscurl_sector_{}_lc.sh'.format(sector))
data = f.readlines()
f.close()
del data[0]
for l in data:
lc_ticids.append(int(l.split('-')[6]))
lc_ticids = set(lc_ticids)
common = lc_ticids & yuliang_ticids
print(len(common))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2235,
1100,
331,
377,
15483,
256,
728,
220,
198,
69,
796,
1280,
10786,
40720,
16793,
62,
10951,
14,
83,
728,
13,
40664,
11537,
198,
83,
... | 2.088235 | 578 |
import unittest
import datetime
from sqlalchemy.exc import NoResultFound, IntegrityError
from src.flight_model.model import create_database, Session, Airline, Flight
from src.flight_model.logic import create_airport
from src.flight_model.logic import create_airline
from src.flight_model.logic import create_flight
from tests.flight_model.utils import create_test_layout, \
create_test_seating_plan, create_test_passengers_on_flight
| [
11748,
555,
715,
395,
198,
11748,
4818,
8079,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
1400,
23004,
21077,
11,
39348,
12331,
198,
6738,
12351,
13,
22560,
62,
19849,
13,
19849,
1330,
2251,
62,
48806,
11,
23575,
11,
3701,
1370,
11,
... | 3.448819 | 127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Misc tool functions """
import pythonzimbra
import pythonzimbra.tools.xmlserializer
import re
import hmac
import hashlib
from xml.dom import minidom
re_zuuid = re.compile(r'[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
def is_zuuid(s):
""" Is it a zimbraUUID ?
example zimbra UUID : d78fd9c9-f000-440b-bce6-ea938d40fa2d
"""
return re_zuuid.match(s)
def build_preauth_str(preauth_key, account_name, timestamp, expires,
admin=False):
""" Builds the preauth string and hmac it, following the zimbra spec.
Spec and examples are here http://wiki.zimbra.com/wiki/Preauth
"""
if admin:
s = '{0}|1|name|{1}|{2}'.format(account_name, expires, timestamp)
else:
s = '{0}|name|{1}|{2}'.format(account_name, expires, timestamp)
return hmac.new(preauth_key.encode('utf-8'), s.encode('utf-8'),
hashlib.sha1).hexdigest()
def get_content(obj):
""" Works arround (sometimes) non predictible results of pythonzimbra
Sometime, the content of an XML tag is wrapped in {'_content': foo},
sometime it is accessible directly.
"""
if isinstance(obj, dict):
return obj['_content']
else:
return obj
def auto_type(s):
""" Get a XML response and tries to convert it to Python base object
"""
if isinstance(s, bool):
return s
elif s is None:
return ''
elif s == 'TRUE':
return True
elif s == 'FALSE':
return False
else:
try:
try:
# telephone numbers may be wrongly interpretted as ints
if s.startswith('+'):
return s
else:
return int(s)
except ValueError:
return float(s)
except ValueError:
return s
def auto_untype(arg):
""" The oposite of auto_type : takes a python base object and tries to
convert it to XML typed string.
"""
if arg is True:
return 'TRUE'
elif arg is False:
return 'FALSE'
else:
return arg
def xml_str_to_dict(s):
""" Transforms an XML string it to python-zimbra dict format
For format, see:
https://github.com/Zimbra-Community/python-zimbra/blob/master/README.md
:param: a string, containing XML
:returns: a dict, with python-zimbra format
"""
xml = minidom.parseString(s)
return pythonzimbra.tools.xmlserializer.dom_to_dict(xml.firstChild)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
37811,
29882,
2891,
5499,
37227,
198,
198,
11748,
... | 2.225 | 1,160 |
# OpenNero will execute ModMain when this mod is loaded
from BlocksTower.client import ClientMain
| [
2,
4946,
45,
3529,
481,
12260,
3401,
13383,
618,
428,
953,
318,
9639,
198,
6738,
35111,
51,
789,
13,
16366,
1330,
20985,
13383,
198
] | 4.083333 | 24 |
from abc import abstractmethod, ABCMeta
| [
6738,
450,
66,
1330,
12531,
24396,
11,
9738,
48526,
628,
628
] | 3.909091 | 11 |
import FWCore.ParameterSet.Config as cms
OutALCARECOPromptCalibProdSiPixel_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOPromptCalibProdSiPixel')
),
outputCommands=cms.untracked.vstring(
'keep *_ALCARECOPromptCalibProdSiPixel_*_*',
'keep *_siPixelStatusProducer_*_*')
)
import copy
OutALCARECOPromptCalibProdSiPixel=copy.deepcopy(OutALCARECOPromptCalibProdSiPixel_noDrop)
OutALCARECOPromptCalibProdSiPixel.outputCommands.insert(0, "drop *")
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
7975,
1847,
20034,
2943,
3185,
45700,
9771,
571,
2964,
67,
42801,
40809,
62,
3919,
26932,
796,
269,
907,
13,
3705,
316,
7,
198,
220,
220,
220,
9683,
37103,
796,... | 2.367713 | 223 |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
from gspylib.inspection.common import SharedFuncs
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
12131,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
2,
198,
2,
1280,
35389,
1046,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
2,
921,... | 3.198413 | 252 |
"""
Created on Wed Jul 8 21:03:54 2020
@author: ning
"""
import os
from glob import glob
from tqdm import tqdm
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
# matplotlib.pyplot.switch_backend('agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
from sklearn import preprocessing
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedShuffleSplit,cross_validate
from sklearn.utils import shuffle as sk_shuffle
from sklearn.inspection import permutation_importance
sns.set_style('white')
sns.set_context('poster',font_scale = 1.5,)
from matplotlib import rc
rc('font',weight = 'bold')
plt.rcParams['axes.labelsize'] = 45
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 45
plt.rcParams['axes.titleweight'] = 'bold'
plt.rcParams['ytick.labelsize'] = 32
plt.rcParams['xtick.labelsize'] = 32
paper_dir = '/export/home/nmei/nmei/properties_of_unconscious_processing/figures'
working_dir = '../../another_git/agent_models/results'
figure_dir = '../figures'
marker_factor = 10
marker_type = ['8','s','p','*','+','D','o']
alpha_level = .75
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
working_data = glob(os.path.join(working_dir,'*','*.csv'))
df = []
for f in working_data:
if ('inception' not in f):
temp = pd.read_csv(f)
k = f.split('/')[-2]
model,hidde_unit,hidden_ac,drop,out_ac = k.split('_')
if 'drop' not in temp.columns:
temp['drop'] = float(drop)
df.append(temp)
df = pd.concat(df)
n_noise_levels = 50
noise_levels = np.concatenate([[0],[item for item in np.logspace(-1,3,n_noise_levels)]])
x_map = {round(item,9):ii for ii,item in enumerate(noise_levels)}
inverse_x_map = {round(value,9):key for key,value in x_map.items()}
print(x_map,inverse_x_map)
df['x'] = df['noise_level'].round(9).map(x_map)
print(df['x'].values)
df['x'] = df['x'].apply(lambda x: [x + np.random.normal(0,0.1,size = 1)][0][0])
df = df.sort_values(['hidden_activation','output_activation'])
df['activations'] = df['hidden_activation'] + '_' + df['output_activation']
idxs = np.logical_or(df['model'] == 'CNN',df['model'] == 'linear-SVM')
df_picked = df.loc[idxs,:]
col_indp = ['hidden_units','hidden_activation','output_activation','noise_level','drop','model_name']
for col in col_indp:
print(col, pd.unique(df_picked[col]))
df_stat = {name:[] for name in col_indp}
df_stat['CNN_performance'] = []
df_stat['SVM_performance'] = []
df_stat['CNN_chance_performance']= []
df_stat['CNN_pval'] = []
df_stat['SVM_pval'] = []
for attri,df_sub in tqdm(df_picked.groupby(col_indp),desc='generate features'):
if df_sub.shape[0] == 1:
# [df_stat[col].append(df_sub[col].values[0]) for col in col_indp]
# df_stat['CNN_performance'].append(df_sub['score_mean'].values[0])
# df_stat['CNN_pval'].append(df_sub['pval'].values[0])
# df_stat['SVM_performance'].append(0)
# df_stat['SVM_pval'].append(1)
pass
elif df_sub.shape[0] > 1:
for model,df_sub_sub in df_sub.groupby('model'):
if model == 'CNN':
[df_stat[col].append(df_sub[col].values[0]) for col in col_indp]
df_stat['CNN_performance'].append(df_sub_sub['score_mean'].values[0])
df_stat['CNN_pval'].append(df_sub_sub['pval'].values[0])
df_stat['CNN_chance_performance'].append(df_sub_sub['chance_mean'].values[0])
elif model == 'linear-SVM':
df_stat['SVM_performance'].append(df_sub_sub['score_mean'].values[0])
df_stat['SVM_pval'].append(df_sub_sub['pval'].values[0])
else:
print('what?')
df_stat = pd.DataFrame(df_stat)
df_stat.to_csv(os.path.join(paper_dir,
'CNN_SVM_stats.csv'),index = False)
df_chance = df_stat[np.logical_or(
df_stat['CNN_pval'] > 0.05,
df_stat['CNN_performance'] < df_stat['CNN_chance_performance'])
]
| [
37811,
198,
41972,
319,
3300,
5979,
220,
807,
2310,
25,
3070,
25,
4051,
12131,
198,
198,
31,
9800,
25,
299,
278,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
15095,
1330,
15095,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
... | 2.116201 | 2,074 |
# -----------------------------------------------------------
# CS229: Machine Learning Assignment 2
#
# author: Armand Sumo
#
# email: armandsumo@gmail.com
# -----------------------------------------------------------
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# -----------------------------------------------------------
#Logistic Regression
# -----------------------------------------------------------
try:
xrange
except NameError:
xrange = range
# -----------------------------------------------------------
#Naive Bayes
# -----------------------------------------------------------
### Bernouilli event model
if __name__ == '__main__':
main()
| [
2,
20368,
22369,
6329,
198,
2,
9429,
23539,
25,
10850,
18252,
50144,
362,
198,
2,
198,
2,
1772,
25,
7057,
392,
5060,
78,
198,
2,
220,
198,
2,
3053,
25,
3211,
1746,
43712,
31,
14816,
13,
785,
198,
2,
20368,
22369,
6329,
198,
198,
... | 4.733333 | 150 |
from nsm.model import NSM, NSMLightningModule, instruction_model_types
import nsm.datasets.synthetic as syn
import nsm.datasets.clevr as clevr
import pytorch_lightning as pl
from tqdm import tqdm
import torch
from itertools import product
import argparse
from pprint import pprint
import logging
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
# parser.add_argument("--q-len", type=int)
# parser.add_argument("--steps", required=True, type=int)
parser.add_argument(
"--nhops",
type=int,
nargs="+",
default=[],
help="use questions in clevr that require these number of hops only",
)
parser.add_argument("--batch-size", required=True, type=int)
parser.add_argument("--learn-rate", default=0.001, type=float)
parser.add_argument("--encoded-question-size", required=True, type=int)
parser.add_argument(
"--instruction-model-name",
required=True,
choices=list(instruction_model_types.keys()),
)
parser.add_argument("--w-instructions", action="store_true", default=False)
args = parser.parse_args()
if args.w_instructions:
assert args.instruction_model_name == "dummy"
main(args)
| [
6738,
299,
5796,
13,
19849,
1330,
10896,
44,
11,
10896,
5805,
432,
768,
26796,
11,
12064,
62,
19849,
62,
19199,
198,
11748,
299,
5796,
13,
19608,
292,
1039,
13,
1837,
429,
6587,
355,
6171,
198,
11748,
299,
5796,
13,
19608,
292,
1039,
... | 2.608247 | 485 |
import logging
from logging.handlers import QueueHandler, QueueListener
import time
from khlbot.config import LOGGING_CONSOLE_LEVEL, LOGGING_FILE_LEVEL, LOGGING_FILE_DIR
import sys
import os
class Logger:
"""
Config logging module
"""
@staticmethod
def listener_configure(_queue, dirpath=LOGGING_FILE_DIR, log_level=LOGGING_CONSOLE_LEVEL) -> QueueListener:
"""
Configuration log queue listener
:param _queue: Log queue
:param dirpath: The path used to save log file
:param log_level: Log level
:return: QueueListener object
"""
logger = logging.getLogger()
logger.setLevel(log_level)
formatter = logging.Formatter(f"%(asctime)s Process-%(process)s %(processName)s [%(levelname)s] : %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
streamHandler = logging.StreamHandler()
streamHandler.setLevel(log_level)
streamHandler.setFormatter(formatter)
listener = None
try:
os.makedirs(dirpath, exist_ok=True)
fileHandler = logging.FileHandler(dirpath + f"twprgbot_{int(time.time())}.log")
fileHandler.setLevel(log_level)
fileHandler.setFormatter(formatter)
listener = QueueListener(_queue, streamHandler, fileHandler)
except Exception as err:
logging.error(err)
logging.warning("Logging file cannot be created, will just print log to terminal")
listener = QueueListener(_queue, streamHandler)
logger.addHandler(QueueHandler(queue=_queue))
return listener
@staticmethod
def worker_configure(_queue, log_level=LOGGING_CONSOLE_LEVEL) -> None:
"""
Configure logger for worker process
:param _queue: Log queue
:param log_level: Log level
"""
logger = logging.getLogger()
logger.addHandler(QueueHandler(_queue))
logger.setLevel(log_level)
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
11748,
18931,
198,
6738,
18931,
13,
4993,
8116,
1330,
4670,
518,
25060,
11,
4670,
518,
33252,
198,
11748,
640,
198,
6738,
479,
18519,
13645,
13,
11250,
1330,
41605,
38,
2751,
62,
10943,
15821,
2538,
62,
2538,
18697,
11,
41605,
38,
2751,... | 2.363326 | 878 |
"""Desenvolva um programa que as duas notas de um aluno. Calcule e mostre a sua média."""
from utilidadescev.dado import leia_real
from utilidadescev.numero import media
n1 = leia_real('Digite 1ª nota: ')
n2 = leia_real('Digite 2ª nota: ')
print(f'A média entre {n1} e {n2} é igual {media(n1, n2):.2f}')
| [
37811,
5960,
268,
10396,
6862,
23781,
1430,
64,
8358,
355,
7043,
292,
407,
292,
390,
23781,
435,
36909,
13,
2199,
23172,
304,
749,
260,
257,
424,
64,
285,
2634,
67,
544,
526,
15931,
198,
198,
6738,
7736,
312,
2367,
344,
85,
13,
67,
... | 2.201439 | 139 |
import configparser
from pathlib import Path
def add_layout(name, layout_props):
'''
EXAMPLE LAYOUT_PROPS
{
"title_2": [[("0", "e"), ("0", "e"), ("1", "w"), ("4", "h")]],
"images_2": [[("0", "e"), ("0", "e"), ("2", "w"), ("1", "h")]],
"text_2": [[("2", "w"), ("0", "e"), ("2", "w"), ("1", "h")]],
"title_3": [[("0", "e"), ("0", "e"), ("1", "w"), ("4", "h")]],
"images_3": [[("0", "e"), ("0", "e"), ("3", "w"), ("2", "h")],
[("3", "w"), ("2", "h"), ("3", "w"), ("2", "h")],
[("3*2", "w"), ("0", "e"), ("3", "w"), ("2", "h")]],
"text_3": [[("0", "e"), ("2", "h"), ("3", "w"), ("2", "h")],
[("3", "w"), ("0", "e"), ("3", "w"), ("2", "h")],
[("3*2", "w"), ("2", "h"), ("3", "w"), ("2", "h")]],
}
'''
cfg = get_layouts()
try:
cfg.add_section(name)
except configparser.DuplicateSectionError:
pass
for k in layout_props:
result_str = ""
for j in layout_props[k]:
result_str += f"{j[0][0]}-{j[0][1]},"
result_str += f"{j[1][0]}-{j[1][1]},"
result_str += f"{j[2][0]}-{j[2][1]},"
result_str += f"{j[3][0]}-{j[3][1]}"
if layout_props[k].index(j) != len(layout_props[k]) - 1:
result_str += "|"
cfg.set(name, k, result_str)
with open(Path.joinpath(Path(__file__).parent, 'layouts.ini'), "w") as config_file:
cfg.write(config_file)
| [
11748,
4566,
48610,
198,
6738,
3108,
8019,
1330,
10644,
628,
628,
628,
198,
198,
4299,
751,
62,
39786,
7,
3672,
11,
12461,
62,
1676,
862,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
7788,
2390,
16437,
406,
4792,
12425,
... | 1.803419 | 819 |
import os
import time
import requests
import io
import hashlib
from PIL import Image
from selenium import webdriver
import signal
from glob import glob
##############
# Parameters
##############
number_of_images = 400
GET_IMAGE_TIMEOUT = 2
SLEEP_BETWEEN_INTERACTIONS = 0.1
SLEEP_BEFORE_MORE = 5
IMAGE_QUALITY = 85
output_path = "/Users/ladvien/deep_arcane/images/0_raw/0_scraped/"
# Get terms already recorded.
dirs = glob(output_path + "*")
dirs = [dir.split("/")[-1].replace("_", " ") for dir in dirs]
search_terms = [
"black and white magic symbol icon",
"black and white arcane symbol icon",
"black and white mystical symbol",
"black and white useful magic symbols icon",
"black and white ancient magic sybol icon",
"black and white key of solomn symbol icon",
"black and white historic magic symbol icon",
"black and white symbols of demons icon",
"black and white magic symbols from book of enoch",
"black and white historical magic symbols icons",
"black and white witchcraft magic symbols icons",
"black and white occult symbols icons",
"black and white rare magic occult symbols icons",
"black and white rare medieval occult symbols icons",
"black and white alchemical symbols icons",
"black and white demonology symbols icons",
"black and white magic language symbols icon",
"black and white magic words symbols glyphs",
"black and white sorcerer symbols",
"black and white magic symbols of power",
"occult religious symbols from old books",
"conjuring symbols",
"magic wards",
"esoteric magic symbols",
"demon summing symbols",
"demon banishing symbols",
"esoteric magic sigils",
"esoteric occult sigils",
"ancient cult symbols",
"gypsy occult symbols",
"Feri Tradition symbols",
"Quimbanda symbols",
"Nagualism symbols",
"Pow-wowing symbols",
"Onmyodo symbols",
"Ku magical symbols",
"Seidhr And Galdr magical symbols",
"Greco-Roman magic symbols",
"Levant magic symbols",
"Book of the Dead magic symbols",
"kali magic symbols",
]
# Exclude terms already stored.
search_terms = [term for term in search_terms if term not in dirs]
##########
# Scrap
##########
wd = webdriver.Chrome()
# Credit:
# https://stackoverflow.com/a/22348885
for term in search_terms:
search_and_download(term, output_path, number_of_images)
| [
11748,
28686,
198,
11748,
640,
198,
198,
11748,
7007,
198,
11748,
33245,
198,
11748,
12234,
8019,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
198,
11748,
6737,
198,
198,
6738,
15095,
1330,
... | 3.059569 | 789 |
import logging
from ozobotmapf.graphics.drawables import FullArrow, DrawableGroup, Line, Arc
from ozobotmapf.graphics.shapes import Rectangle
from ozobotmapf.simulator.path_position import PathPosition
from ozobotmapf.simulator.position_tile import PositionTile
from ozobotmapf.utils.constants import Directions
| [
11748,
18931,
198,
198,
6738,
15649,
672,
313,
8899,
69,
13,
70,
11549,
13,
19334,
2977,
1330,
6462,
3163,
808,
11,
15315,
540,
13247,
11,
6910,
11,
10173,
198,
6738,
15649,
672,
313,
8899,
69,
13,
70,
11549,
13,
1477,
7916,
1330,
4... | 3.376344 | 93 |
from .file import FileAdapter as file
from .memcached import MemcachedAdapterFactory as memcached
from .djangocache import DjangoCacheFactory as djcache
from .rediscache import RedisAdapterFactory as rediscache
| [
6738,
764,
7753,
1330,
9220,
47307,
355,
2393,
198,
6738,
764,
11883,
66,
2317,
1330,
4942,
66,
2317,
47307,
22810,
355,
1066,
66,
2317,
198,
6738,
764,
28241,
648,
420,
4891,
1330,
37770,
30562,
22810,
355,
42625,
23870,
198,
6738,
764... | 3.907407 | 54 |
# -*- coding: utf-8 -*-
from cdpy.common import CdpSdkBase, Squelch
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
22927,
9078,
13,
11321,
1330,
327,
26059,
50,
34388,
14881,
11,
5056,
417,
354,
628
] | 2.258065 | 31 |
import json, subprocess
from .. pyaz_utils import get_cli_name, get_params
| [
11748,
33918,
11,
850,
14681,
198,
6738,
11485,
12972,
1031,
62,
26791,
1330,
651,
62,
44506,
62,
3672,
11,
651,
62,
37266,
628
] | 3.304348 | 23 |
"""https://adventofcode.com/2021/day/3"""
data = open('day-03/input', 'r', encoding='utf-8').read().splitlines()
N = len(data[0])
gamma = 0
epsilon = 0
for n in range(N):
count0 = sum(1 for line in data if line[n] == '0')
count1 = len(data) - count0
gamma *= 2
epsilon *= 2
if count0 < count1:
gamma += 1
else:
epsilon += 1
print(gamma * epsilon)
# 3374136
| [
37811,
5450,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
1238,
2481,
14,
820,
14,
18,
37811,
198,
198,
7890,
796,
1280,
10786,
820,
12,
3070,
14,
15414,
3256,
705,
81,
3256,
21004,
11639,
40477,
12,
23,
27691,
961,
22446,
35312,
6615,
... | 2.149733 | 187 |
from rdflib import Graph as _Graph
from rdflib.namespace import (
Namespace, NamespaceManager as _NamespaceManager,
RDF, RDFS, OWL, XSD, FOAF, SKOS) # VOID
SIOC = Namespace('http://rdfs.org/sioc/ns#')
OA = Namespace('http://www.openannotation.org/ns/')
CATALYST = Namespace('http://purl.org/catalyst/core#')
IDEA = Namespace('http://purl.org/catalyst/idea#')
IBIS = Namespace('http://purl.org/catalyst/ibis#')
VOTE = Namespace('http://purl.org/catalyst/vote#')
VERSION = Namespace('http://purl.org/catalyst/version#')
ASSEMBL = Namespace('http://purl.org/assembl/core#')
TIME = Namespace('http://www.w3.org/2006/time#')
QUADNAMES = Namespace('http://purl.org/assembl/quadnames/')
DCTERMS = Namespace('http://purl.org/dc/terms/')
namespace_manager = _NamespaceManager(_Graph())
_name, _var = None, None
for _name, _var in locals().iteritems():
if _name[0] == '_':
continue
if isinstance(_var, Namespace):
namespace_manager.bind(_name.lower(), _var)
| [
6738,
374,
67,
2704,
571,
1330,
29681,
355,
4808,
37065,
198,
6738,
374,
67,
2704,
571,
13,
14933,
10223,
1330,
357,
198,
220,
220,
220,
28531,
10223,
11,
28531,
10223,
13511,
355,
4808,
36690,
10223,
13511,
11,
198,
220,
220,
220,
37... | 2.533505 | 388 |
from chainer.functions.activation import relu
from chainer.functions.activation import sigmoid
from chainer import link
from chainer.links.connection import linear
class Highway(link.Chain):
"""Highway module.
In highway network, two gates are added to the ordinal non-linear
transformation (:math:`H(x) = activate(W_h x + b_h)`).
One gate is the transform gate :math:`T(x) = \\sigma(W_t x + b_t)`, and the
other is the carry gate :math:`C(x)`.
For simplicity, the author defined :math:`C = 1 - T`.
Highway module returns :math:`y` defined as
.. math::
y = activate(W_h x + b_h) \\odot \\sigma(W_t x + b_t) +
x \\odot(1 - \\sigma(W_t x + b_t))
The output array has the same spatial size as the input. In order to
satisfy this, :math:`W_h` and :math:`W_t` must be square matrices.
Args:
in_out_size (int): Dimension of input and output vectors.
nobias (bool): If ``True``, then this function does not use the bias.
activate: Activation function of plain array. :math:`tanh` is also
available.
init_Wh (2-D array): Initial weight value of plain array.
If ``None``, the default initializer is used to
initialize the weight matrix.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
init_bh (1-D array): Initial bias value of plain array. If ``None``,
then this function uses it to initialize zero vector.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
init_Wt (2-D array): Initial weight value of transform array.
If ``None``, the default initializer is used to
initialize the weight matrix.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
init_bt (1-D array): Initial bias value of transform array.
Default value is -1 vector.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
Negative value is recommended by the author of the paper.
(e.g. -1, -3, ...).
See:
`Highway Networks <https://arxiv.org/abs/1505.00387>`_.
"""
def __call__(self, x):
"""Computes the output of the Highway module.
Args:
x (~chainer.Variable): Input variable.
Returns:
Variable: Output variable. Its array has the same spatial size and
the same minibatch size as the input array.
"""
out_plain = self.activate(self.plain(x))
out_transform = sigmoid.sigmoid(self.transform(x))
y = out_plain * out_transform + x * (1 - out_transform)
return y
| [
6738,
6333,
263,
13,
12543,
2733,
13,
48545,
1330,
823,
84,
198,
6738,
6333,
263,
13,
12543,
2733,
13,
48545,
1330,
264,
17225,
1868,
198,
6738,
6333,
263,
1330,
2792,
198,
6738,
6333,
263,
13,
28751,
13,
38659,
1330,
14174,
628,
198,... | 2.465278 | 1,152 |
def string_to_lowercase(input_string=None, **kwargs):
"""
Convert the provided string to all lowercase characters
Args:
input_string (CEF type: *): The string to convert to lowercase
Returns a JSON-serializable object that implements the configured data paths:
lowercase_string (CEF type: *): The lowercase string after conversion
"""
############################ Custom Code Goes Below This Line #################################
import json
import phantom.rules as phantom
try:
lowercase_string = input_string.lower()
except AttributeError:
raise ValueError('input_string must be a string or unicode')
outputs = {"lowercase_string": lowercase_string}
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs
| [
4299,
4731,
62,
1462,
62,
21037,
7442,
7,
15414,
62,
8841,
28,
14202,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
38240,
262,
2810,
4731,
284,
477,
2793,
7442,
3435,
198,
220,
220,
220,
220,
198,
22... | 3.087248 | 298 |
import sys
list = []
path = sys.argv[1]
a = sys.argv[2]
f = open(path, "r")
for line in f.readlines():
list.append(line.strip(', \n'))
for i in list:
if i[0] == a:
print(i)
f.close()
| [
11748,
25064,
198,
198,
4868,
796,
17635,
198,
6978,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
64,
796,
25064,
13,
853,
85,
58,
17,
60,
198,
69,
796,
1280,
7,
6978,
11,
366,
81,
4943,
198,
198,
1640,
1627,
287,
277,
13,
961,
6... | 2.076087 | 92 |
import torch
from torch import nn
from attrdict import AttrDict
from torch.nn import functional as F
from torch.distributions.normal import Normal
from torch.distributions.kl import kl_divergence
from .arch import arch
class ImageEncoderBg(nn.Module):
"""Background image encoder"""
def forward(self, x):
"""
Encoder image into a feature vector
Args:
x: (B, 3, H, W)
Returns:
enc: (B, D)
"""
return self.enc(x)
class PredictMask(nn.Module):
"""
Predict z_mask given states from rnn. Used in inference
"""
def forward(self, h):
"""
Predict z_mask given states from rnn. Used in inference
:param h: hidden state from rnn_mask
:return:
z_mask_loc: (B, D)
z_mask_scale: (B, D)
"""
x = self.fc(h)
z_mask_loc = x[:, :arch.z_mask_dim]
z_mask_scale = F.softplus(x[:, arch.z_mask_dim:]) + 1e-4
return z_mask_loc, z_mask_scale
class MaskDecoder(nn.Module):
"""Decode z_mask into mask"""
def forward(self, z_mask):
"""
Decode z_mask into mask
:param z_mask: (B, D)
:return: mask: (B, 1, H, W)
"""
B = z_mask.size(0)
# 1d -> 3d, (B, D, 1, 1)
z_mask = z_mask.view(B, -1, 1, 1)
mask = torch.sigmoid(self.dec(z_mask))
return mask
class CompEncoder(nn.Module):
"""
Predict component latent parameters given image and predicted mask concatenated
"""
def forward(self, x):
"""
Predict component latent parameters given image and predicted mask concatenated
:param x: (B, 3+1, H, W). Image and mask concatenated
:return:
z_comp_loc: (B, D)
z_comp_scale: (B, D)
"""
x = self.enc(x)
z_comp_loc = x[:, :arch.z_comp_dim]
z_comp_scale = F.softplus(x[:, arch.z_comp_dim:]) + 1e-4
return z_comp_loc, z_comp_scale
class SpatialBroadcast(nn.Module):
"""
Broadcast a 1-D variable to 3-D, plus two coordinate dimensions
"""
def forward(self, x, width, height):
"""
Broadcast a 1-D variable to 3-D, plus two coordinate dimensions
:param x: (B, L)
:param width: W
:param height: H
:return: (B, L + 2, W, H)
"""
B, L = x.size()
# (B, L, 1, 1)
x = x[:, :, None, None]
# (B, L, W, H)
x = x.expand(B, L, width, height)
xx = torch.linspace(-1, 1, width, device=x.device)
yy = torch.linspace(-1, 1, height, device=x.device)
yy, xx = torch.meshgrid((yy, xx))
# (2, H, W)
coords = torch.stack((xx, yy), dim=0)
# (B, 2, H, W)
coords = coords[None].expand(B, 2, height, width)
# (B, L + 2, W, H)
x = torch.cat((x, coords), dim=1)
return x
class CompDecoder(nn.Module):
"""
Decoder z_comp into component image
"""
def forward(self, z_comp):
"""
:param z_comp: (B, L)
:return: component image (B, 3, H, W)
"""
h, w = arch.img_shape
# (B, L) -> (B, L+2, H, W)
z_comp = self.spatial_broadcast(z_comp, h + 8, w + 8)
# -> (B, 3, H, W)
comp = self.decoder(z_comp)
comp = torch.sigmoid(comp)
return comp
class PredictComp(nn.Module):
"""
Predict component latents given mask latent
"""
def forward(self, h):
"""
:param h: (B, D) hidden state from rnn_mask
:return:
z_comp_loc: (B, D)
z_comp_scale: (B, D)
"""
x = self.mlp(h)
z_comp_loc = x[:, :arch.z_comp_dim]
z_comp_scale = F.softplus(x[:, arch.z_comp_dim:]) + 1e-4
return z_comp_loc, z_comp_scale
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
708,
4372,
713,
1330,
3460,
81,
35,
713,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
6738,
28034,
13,
17080,
2455,
507,
13,
11265,
1330,
14435,
198,
6738,
28034,
1... | 1.929061 | 2,044 |
#! /usr/bin/env python
"""
Script to test PandaArm and panda_robot package.
Run script interactively to test different functions.
python -i env.py
"""
import rospy
from panda_robot import PandaArm
poses = [[-8.48556818e-02, -8.88127666e-02, -6.59622769e-01, -1.57569726e+00, -4.82374882e-04, 2.15975946e+00, 4.36766917e-01],
[ 1.34695728e-01, -2.74474940e-01, -2.46027836e-01, -1.19805447e+00, -5.27289847e-05, 2.17926193e+00, 9.10497957e-01],
[ 1.81297444e-01, 3.94348774e-01, -2.25835923e-01, -1.19416311e+00, -7.51349249e-04, 2.79453565e+00, 8.36526167e-01],
[ 0.63068724, 0.86207321, -0.52113169, -0.95186331, 0.02450696, 2.64150352, 0.5074312 ]]
if __name__ == '__main__':
rospy.init_node("panda_env")
r = PandaArm(reset_frames = False) # handle to use methods from PandaArm class
fi = r.get_frames_interface() # frames interface object for the robot. Test switching EE frames
# How to test:
# 1) open rviz -> add RobotModel (topic 'robot_description')
# 2) set panda_link0 as global fixed frame
# 3) add tf -> disable visualisation of all links except panda_EE
# 4) run this script in terminal in interactive mode
# 5) type $ fi.set_EE_frame_to_link('panda_hand')
# to move the EE frame to the link. Try different link names.
# Test the same for the stiffness frame (set_K_frame_to_link)
cm = r.get_controller_manager() # controller manager object to get controller states and switch controllers, controllers don't have to be switched manually in most cases! The interface automatically chooses the right command depending on the control command sent (r.exec_position_cmd, r.exec_velocity_cmd, r.exec_torque_cmd, r.set_joint_position_velocity)
kin = r._kinematics() # to test the kinematics (not required, can directly query kinematics using methods in PandaArm)
g = r.get_gripper() # gripper object. Test using $ g.close(), $ g.open(), $ g.home_joints(), $g.move_joints(0.01), etc.
neutral = r.move_to_neutral
move_to = r.move_to_joint_position
mtc = r.move_to_cartesian_pose
# In interactive mode, for instance enter
# $ neutral()
# to make the robot move to neutral pose
# or type $ move_to(poses[0]) to move to the first joint pose from the list defined above (make sure robot workspace is free, visualise in panda_simulator or moveit planning before executing to be sure.)
# movegroup interface instance of the robot. See https://justagist.github.io/franka_ros_interface/DOC.html#pandamovegroupinterface for available methods.
mvt = r.get_movegroup_interface()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
220,
220,
220,
12327,
284,
1332,
41112,
26560,
290,
279,
5282,
62,
305,
13645,
5301,
13,
628,
220,
220,
220,
5660,
4226,
9427,
2280,
284,
1332,
1180,
5499,
13,
198... | 2.271605 | 1,296 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python
import time
import sys
import json
import tweepy
import urllib3
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
def main():
"""
Descripcion: usado solo para recolectar datos que se han mostrado en la presentacion del proyecto,
recolectar todos los tweets con el hashtag '#heatMAD' y que tengan una localizacion y meterlos
en un archivo csv.
"""
# Bot authentication.
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth_handler=auth)
hashtags = api.search(q="heatMAD")
with open('coord.csv', 'a') as f:
for status in hashtags:
if status.coordinates:
f.write(str(status.coordinates).split(':')[2][2:-2]+', '+status.user.name.encode('utf8')+', "'+status.text.encode('utf8')+'"\n')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
quit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
640,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
4184,
538,
88,
198,
11748,
2956,
297,
571,
18,
198,
6738,
4184,
538,
88,
1330,
13860,
198,
6738,
4184,
538,
88,
1330,... | 2.604938 | 405 |
'''
Run Avida analyze mode
'''
import argparse, os, copy, errno, csv, subprocess, sys
output_dump_dir = "./avida_analysis_dump"
treatment_whitelist = ["change", "l9", "limres", "empty"]
avida_lineage_analysis_path="avida__analyze_fdom_lineages.cfg"
avida_genotypes_analysis_path="avida__analyze_all.cfg"
default_lineage_target_update = 200000
default_lineage_target_generation = 10000
def ParseTimeFile(detail_fpath):
"""
Given file pointer to detail file, extract information into form below:
return [{"detail":value, "detail":value, ...}, ...]
"""
orgs = []
with open(detail_fpath, "r") as detail_fp:
######################
# Step 1) Build Legend
###
# Travel to the legend.
for line in detail_fp:
if "1" in line: break
# Consume the legend.
details = []
for line in detail_fp:
if line == "\n": break
details.append(line.split(":")[-1].strip())
######################
# Step 2) Consume Organisms
###
for line in detail_fp:
org_dets = line.strip().split(" ")
org = {details[i].lower():org_dets[i] for i in range(0, len(org_dets))}
orgs.append(org)
return orgs
def mkdir_p(path):
"""
This is functionally equivalent to the mkdir -p [fname] bash command
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
if __name__ == "__main__":
main() | [
7061,
6,
198,
10987,
5184,
3755,
16602,
4235,
198,
7061,
6,
198,
11748,
1822,
29572,
11,
28686,
11,
4866,
11,
11454,
3919,
11,
269,
21370,
11,
850,
14681,
11,
25064,
198,
198,
22915,
62,
39455,
62,
15908,
796,
366,
19571,
615,
3755,
... | 2.253879 | 709 |
import shutil
import itertools
import unittest
from dampr import Dampr, BlockMapper, BlockReducer, Dataset, settings
from dampr.inputs import UrlsInput
if __name__ == '__main__':
unittest.main()
| [
11748,
4423,
346,
198,
11748,
340,
861,
10141,
198,
11748,
555,
715,
395,
198,
198,
6738,
21151,
81,
1330,
360,
696,
81,
11,
9726,
44,
11463,
11,
9726,
7738,
48915,
11,
16092,
292,
316,
11,
6460,
198,
6738,
21151,
81,
13,
15414,
82,... | 2.885714 | 70 |
#!/usr/bin/env python3
def is_leap_year(year):
"""
Report if a given year is a leap year.
The tricky thing here is that a leap year occurs:
on every year that is evenly divisible by 4
except every year that is evenly divisible by 100
unless the year is also evenly divisible by 400
"""
try:
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
except TypeError:
print("Error: '{}' is not a valid year. Please enter an integer.".format(year)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
4299,
318,
62,
293,
499,
62,
1941,
7,
1941,
2599,
198,
197,
37811,
198,
197,
19100,
611,
257,
1813,
614,
318,
257,
16470,
614,
13,
198,
197,
464,
17198,
1517,
994,
318,
32... | 3.098684 | 152 |