content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
class InvalidCoinID(Exception):
"""
Exception raised when search query requested by user doesn't return a coin.
""" | [
4871,
17665,
24387,
2389,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
35528,
4376,
618,
2989,
12405,
9167,
416,
2836,
1595,
470,
1441,
257,
10752,
13,
198,
220,
220,
220,
37227
] | 3.628571 | 35 |
# -*- coding: utf-8 -*-
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pytransact.testsupport import BLMTests
from accounting.sie_export import sie_export, remove_dashes_from_date, \
print_program_name, print_format, print_sietype, print_orgtype, print_org_id, \
print_orgnum, print_industry_code, print_address, print_orgname, \
print_taxation_year, \
print_layout, print_currency, print_account, print_sru, print_unit, \
print_account_type, print_dim, print_subdim, print_accounting_object, \
print_opening_balance, print_closing_balance, print_turnover, \
print_verification, print_transaction
import blm
from decimal import Decimal
import pytest
from accounting.sie_import import SIEImporter
try:
import StringIO #py2
except ImportError:
from io import StringIO #py3
import codecs
class TestImported(BLMTests):
"""Do tests on an imported file"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
13130,
4946,
5268,
9564,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
7... | 3.228507 | 442 |
import ftplib
import requests
import tempfile
import json
import os
import sys
'''
Created on 07 Sep 2013
@author: rob dobson
'''
| [
11748,
10117,
489,
571,
198,
11748,
7007,
198,
11748,
20218,
7753,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
7061,
6,
198,
41972,
319,
8753,
8621,
2211,
198,
198,
31,
9800,
25,
3857,
466,
1443,
261,
198,
7061,
... | 2.711538 | 52 |
# convert decimal to binary
# Get the i-th digit of binary string x
# sort A using binary digit i
from random import choice
from string import uppercase
if __name__ == "__main__":
strings = []
for i in range(27):
strings.append(genstring(5))
print strings
strings = RADIX_SORT(strings)
print strings
| [
2,
10385,
32465,
284,
13934,
198,
198,
2,
3497,
262,
1312,
12,
400,
16839,
286,
13934,
4731,
2124,
198,
198,
2,
3297,
317,
1262,
13934,
16839,
1312,
198,
198,
6738,
4738,
1330,
3572,
198,
6738,
4731,
1330,
334,
39921,
589,
198,
198,
... | 3.047619 | 105 |
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
<DCCsi>/azpy/dcc/marmoset/stub.py
stub is a sub-module placeholder, for the azpy.dcc.marmoset api.
"""
# -------------------------------------------------------------------------
# standard imports
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# global scope
from azpy.dcc.marmoset import _PKG_DCC_NAME
_MODULENAME = 'tools.dcc.{}.stub'.format(_PKG_DCC_NAME)
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.info('This stub is an api placeholder: {}'.format(_MODULENAME)) | [
2,
19617,
25,
40477,
12,
23,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
198,
2,
1114,
1844,
6634,
290,
5964,
2846,
3387,
766,
262,
38559,
24... | 3.705882 | 255 |
marksheet=[]
scoresheet=[]
if __name__ == '__main__':
for _ in range(int(input())):
name = input()
score = float(input())
marksheet=marksheet+[[name,score]]
scoresheet+=[score]
x=sorted(set(scoresheet))[1]
for n, s in sorted(marksheet):
if s==x:
print(n)
| [
14306,
25473,
28,
21737,
198,
1416,
2850,
25473,
28,
21737,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
329,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
220,
220,
220,
2... | 1.953488 | 172 |
# -*- coding: utf-8 -*-
"""
Created on 2021-03-02 23:40:37
---------
@summary:
---------
@author: Boris
"""
import feapder
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
33448,
12,
3070,
12,
2999,
2242,
25,
1821,
25,
2718,
198,
45537,
198,
31,
49736,
25,
198,
45537,
198,
31,
9800,
25,
25026,
198,
37811,
198,
198... | 2.5 | 50 |
#!/usr/bin/env python
################################################################################
# COPYRIGHT(c) 2018 STMicroelectronics #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of STMicroelectronics nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
# DESCRIPTION
#
# This application example shows how to perform a Bluetooth Low Energy (BLE)
# scan, connect to a device, retrieve its exported features, and get push
# notifications from it.
# IMPORT
from __future__ import print_function
import sys
import os
import time
import datetime
import click
from abc import abstractmethod
from threading import Thread
from bluepy.btle import BTLEException
from blue_st_sdk.manager import Manager
from blue_st_sdk.manager import ManagerListener
from blue_st_sdk.node import NodeListener
from blue_st_sdk.feature import FeatureListener
from blue_st_sdk.features.feature_audio_adpcm import FeatureAudioADPCM
from blue_st_sdk.features.feature_audio_adpcm_sync import FeatureAudioADPCMSync
from blue_st_sdk.utils.number_conversion import LittleEndian
###Audio Stream#########################################################
import alsaaudio
###Audio Stream#########################################################
# PRECONDITIONS
#
# Please remember to add to the "PYTHONPATH" environment variable the parent
# folder of the "blue_st_sdk" package.
#
# On Linux:
# export PYTHONPATH=/home/<user>/.../<parent-of-blue_st_sdk>/
#
# Install the following packages:
# libasound:
# sudo apt-get install libasound2-dev
# pyalsaaudio:
# sudo pip install pyalsaaudio
#
# Troubleshooting
# Prevent audio out garbling caused by the audio out peripheral:
# sudo bash -c "echo disable_audio_dither=1 >> /boot/config.txt"
# sudo bash -c "echo pwm_mode=2 >> /boot/config.txt"
# CONSTANTS
INTRO = """##################
# BlueST Example #
##################"""
# Paths and File names
AUDIO_DUMPS_PATH = "/home/pi/audioDumps/"
AUDIO_DUMP_SUFFIX = "_audioDump.raw"
#Notifications per second
NPS = 200
#Number of channels
CHANNELS = 1
#Sampling frequency
SAMPLING_FREQ = 8000
# Global Audio Raw file
audioFile=None
saveAudioFlag = 0
# Bluetooth Scanning time in seconds.
SCANNING_TIME_s = 5
# Global stream control index
nIdx = 0
# Global audio features
audioFeature = None
audioSyncFeature = None
# FUNCTIONS
# Printing intro
# INTERFACES
#
# Implementation of the interface used by the Manager class to notify that a new
# node has been discovered or that the scanning starts/stops.
#
#
# This method is called whenever a discovery process starts or stops.
#
# @param manager Manager instance that starts/stops the process.
# @param enabled True if a new discovery starts, False otherwise.
#
#
# This method is called whenever a new node is discovered.
#
# @param manager Manager instance that discovers the node.
# @param node New node discovered.
#
#
# Implementation of the interface used by the Node class to notify that a node
# has updated its status.
#
#
# To be called whenever a node changes its status.
#
# @param node Node that has changed its status.
# @param new_status New node status.
# @param old_status Old node status.
#
#
# Implementation of the interface used by the Feature class to notify that a
# feature has updated its data.
#
#
# To be called whenever the feature updates its data.
#
# @param feature Feature that has updated.
# @param sample Data extracted from the feature.
#
#
# Implementation of the interface used by the Feature class to notify that a
# feature has updated its data.
#
#
# To be called whenever the feature updates its data.
#
# @param feature Feature that has updated.
# @param sample Data extracted from the feature.
#
# MAIN APPLICATION
# This application example connects to a Bluetooth Low Energy device, retrieves
# its exported features, and let the user get data from those supporting
# notifications.
if __name__ == "__main__":
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
29113,
29113,
14468,
201,
198,
2,
27975,
38162,
9947,
7,
66,
8,
2864,
3563,
13031,
9509,
20844,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.76526 | 2,228 |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
#load .env variables
import os
from dotenv import load_dotenv
load_dotenv()
PWD = os.getenv('PWD')
import sys
sys.path.insert(1, PWD)
SQLALCHEMY_DATABASE_URL = f"sqlite:///{PWD}/data_base/sql_app.db"
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
print(SQLALCHEMY_DATABASE_URL)
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| [
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
2,
2220,
764,
24330,
9633,
198... | 2.633745 | 243 |
"""
defines:
- TableObject
- RealTableArray
- ComplexTableArray
these are used by:
- RealDisplacementArray
- RealVelocityArray
- RealAccelerationArray
- RealEigenvaluesArray
- RealSPCForcesArray
- RealMPCForcesArray
- RealAppliedLoadsArray
- ComplexDisplacementArray
- ComplexVelocityArray
- ComplexAccelerationArray
- ComplexEigenvaluesArray
- ComplexSPCForcesArray
- ComplexMPCForcesArray
- ComplexAppliedLoadsArray
"""
from __future__ import annotations
import copy
from struct import Struct, pack
import warnings
from typing import List
import numpy as np
#from numpy import float32
from pyNastran.bdf import MAX_32_BIT_INT
from pyNastran.op2.result_objects.op2_objects import ScalarObject
from pyNastran.f06.f06_formatting import write_floats_13e, write_imag_floats_13e, write_float_12e
from pyNastran.op2.errors import SixtyFourBitError
from pyNastran.op2.op2_interface.write_utils import set_table3_field, view_dtype, view_idtype_as_fdtype
from pyNastran.utils.numpy_utils import integer_types, float_types
from pyNastran.op2.writer.utils import fix_table3_types
SORT2_TABLE_NAME_MAP = {
# sort2_name : sort1_name
# displacement
'OUGATO2' : 'OUGATO1',
'OUGCRM2' : 'OUGCRM1',
'OUGNO2' : 'OUGNO1',
'OUGPSD2' : 'OUGPSD1',
'OUGRMS2' : 'OUGRMS1',
# velocity
'OVGATO2' : 'OVGATO1',
'OVGCRM2' : 'OVGCRM1',
'OVGNO2' : 'OVGNO1',
'OVGPSD2' : 'OVGPSD1',
'OVGRMS2' : 'OVGRMS1',
# acceleration
'OAGATO2' : 'OAGATO1',
'OAGCRM2' : 'OAGCRM1',
'OAGNO2' : 'OAGNO1',
'OAGPSD2' : 'OAGPSD1',
'OAGRMS2' : 'OAGRMS1',
# spc forces
'OQGATO2' : 'OQGATO1',
'OQGCRM2' : 'OQGCRM1',
'OQGNO2' : 'OQGNO1',
'OQGPSD2' : 'OQGPSD1',
'OQGRMS2' : 'OQGRMS1',
# mpc forces
'OQMATO2' : 'OQMATO1',
'OQMCRM2' : 'OQMCRM1',
'OQMNO2' : 'OQMNO1',
'OQMPSD2' : 'OQMPSD1',
'OQMRMS2' : 'OQMRMS1',
# load vectors
'OPGATO2' : 'OPGATO1',
'OPGCRM2' : 'OPGCRM1',
'OPGNO2' : 'OPGNO1',
'OPGPSD2' : 'OPGPSD1',
'OPGRMS2' : 'OPGRMS1',
# pressure
'OPRATO2' : 'OPRATO1',
'OPRCRM2' : 'OPRCRM1',
'OPRNO2' : 'OPRNO1',
'OPRPSD2' : 'OPRPSD1',
'OPRRMS2' : 'OPRRMS1',
#'OUG2' : 'OUG1',
'OUGV2' : 'OUGV1',
'OQG2' : 'OQG1',
'OQMG2' : 'OQMG1',
'OPG2' : 'OPG1',
'OPNL2' : 'OPNL1',
'OUXY2' : 'OUXY1',
'OQGGF2' : 'OQGGF1',
'OQGCF2' : 'OQGCF1',
'OUGF2' : 'OUGF1',
}
SORT1_TABLES = list(SORT2_TABLE_NAME_MAP.values())
SORT1_TABLES.extend([
'BOUGV1',
'OUG1F',
'BOUGF1',
'OUG1',
'OVG1',
'OAG1',
])
SORT2_TABLES = list(SORT2_TABLE_NAME_MAP.keys())
table_name_to_table_code = {
# displacement (msc/nx)
'OUGV1' : 1,
'BOUGV1' : 1,
# load vector (msc/nx)
'OPG1' : 2,
'BOPG1' : 2,
#'BOPHIG1' : 5, # ???
# spc/mpc forces
'OQG1' : 3,
}
def append_sort1_sort2(data1, data2, to_sort1=True):
"""
data1 : (ntimes, nnids, 6)
data2 : (nnids, ntimes, 6)
"""
assert len(data1.shape) == 3, data1.shape
assert len(data2.shape) == 3, data2.shape
ntimes1, nnids1 = data1.shape[:2]
nnids2, ntimes2 = data2.shape[:2]
unused_ntimes = ntimes1 + ntimes2
unused_nnids = nnids1 + nnids2
assert ntimes1 == ntimes2
if to_sort1:
out = np.hstack([
data1,
np.swapaxes(data2, 0, 1),])
else:
out = np.hstack([
np.swapaxes(data1, 0, 1),
data2,])
return out
class TableArray(ScalarObject): # displacement style table
"""
Base class for:
- RealTableArray
- ComplexTableArray
"""
@property
def build(self):
"""sizes the vectorized attributes of the TableArray"""
#print('_nnodes=%s ntimes=%s sort1?=%s ntotal=%s -> _nnodes=%s' % (self._nnodes, self.ntimes, self.is_sort1,
#self.ntotal, self._nnodes // self.ntimes))
# we have a SORT1 data array that will be (ntimes, nnodes, 6)
# we start by sizing the total number of entries (_nnodes = ntimes * nnodes)
# we also keep track of the number of times
# then we compute nnodes
#
# for sort1, we just use what was discussed above
# for sort2, we flip nnodes and ntimes
#
# note that in both cases, ntotal is the major dimension:
# - SORT1 - ntimes
# - SORT2 - nnodes
#print('ntotal=%s ntimes=%s _nnodes=%s' % (self.ntotal, self.ntimes, self._nnodes))
self._nnodes //= self.ntimes
#print('ntotal=%s ntimes=%s _nnodes=%s\n' % (self.ntotal, self.ntimes, self._nnodes))
#if self.ntimes > 1000:
# raise RuntimeError(self.ntimes)
self.itime = 0
self.itotal = 0
if self.is_sort1:
ntimes = self.ntimes
nnodes = self.ntotal
ntotal = self.ntotal
nx = ntimes
ny = nnodes
#print("SORT1 ntimes=%s nnodes=%s" % (ntimes, nnodes))
elif self.is_sort2:
# flip this to sort1
ntimes = self.ntotal
nnodes = self.ntimes
ntotal = nnodes
nx = ntimes
ny = nnodes
#print("***SORT2 ntotal=%s nnodes=%s ntimes=%s" % (ntotal, nnodes, ntimes))
else:
raise RuntimeError('expected sort1/sort2\n%s' % self.code_information())
self.build_data(ntimes, nnodes, ntotal, nx, ny, self._times_dtype)
def build_data(self, ntimes, nnodes, ntotal, nx, ny, float_fmt: str):
"""actually performs the build step"""
self.ntimes = ntimes
self._nnodes = nnodes
self.ntotal = ntotal
_times = np.zeros(ntimes, dtype=float_fmt)
int_fmt = 'int32' if self.size == 4 else 'int64'
node_gridtype = np.zeros((nnodes, 2), dtype=int_fmt)
#[t1, t2, t3, r1, r2, r3]
data = np.zeros((nx, ny, 6), self.data_type())
if self.load_as_h5:
group = self._get_result_group()
self._times = group.create_dataset('_times', data=_times)
self.node_gridtype = group.create_dataset('node_gridtype', data=node_gridtype)
self.data = group.create_dataset('data', data=data)
else:
self._times = _times
self.node_gridtype = node_gridtype
self.data = data
#print('ntimes=%s nnodes=%s; nx=%s ny=%s; ntotal=%s' % (
#ntimes, nnodes, nx, ny, self.ntotal))
def build_dataframe(self):
"""creates a pandas dataframe
works: 0.24.2
broken: 0.25.0
"""
import pandas as pd
#is_v25 = pd.__version__ >= '0.25'
headers = self.get_headers()
#headers = [0, 1, 2, 3, 4, 5]
#node_gridtype = [self.node_gridtype[:, 0], self.gridtype_str]
#letter_dims = [
#('G', 6),
#('E', 1),
#('S', 1),
#('H', 6),
#('L', 6),
#]
ntimes, nnodes = self.data.shape[:2]
ugridtype_str = np.unique(self.gridtype_str)
if self.nonlinear_factor not in (None, np.nan):
#if not self.is_sort1:
#print("skipping %s because it's not SORT1" % self.class_name)
#return
column_names, column_values = self._build_dataframe_transient_header()
#if is_v25:
# we start out like this...
#
# Mode 1 2 3
# EigenvalueReal -0.0 -0.0 -0.0
# EigenvalueImag -0.463393 0.463393 -1.705689
# Damping 0.0 0.0 0.0
# NodeID Type Item
# 1 G t1 (0.6558146+0j) (0.6558146+0j) (1.034078+0j)
# t2 0j 0j 0j
# t3 0j 0j 0j
# r1 0j 0j 0j
# r2 0j 0j 0j
# r3 0j 0j 0j
# ...
#
# then we call pandas_extract_rows to make it this...
#
# Mode 1 2 3
# EigenvalueReal -0.0 -0.0 -0.0
# EigenvalueImag -0.463393 0.463393 -1.705689
# Damping 0.0 0.0 0.0
# NodeID Item
# 1 t1 0.655815+0.0j 0.655815+0.0j 1.034078+0.0j
# t2 0.0+0.0j 0.0+0.0j 0.0+0.0j
# t3 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r1 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r2 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r3 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 2 t1 0.999141+0.0j 0.999141+0.0j -0.282216+0.0j
# t2 0.0+0.0j 0.0+0.0j 0.0+0.0j
# t3 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r1 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r2 0.0+0.0j 0.0+0.0j 0.0+0.0j
# r3 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 1001 S 0.000859+0.0j 0.000859+0.0j -0.003323+0.0j
columns = pd.MultiIndex.from_arrays(column_values, names=column_names)
gridtype_str = self.gridtype_str
ugridtype_str = np.unique(gridtype_str)
if len(ugridtype_str) == 1 and gridtype_str[0] in ['S', 'M', 'E']:
nnodes = self.node_gridtype.shape[0]
node_gridtype = [self.node_gridtype[:, 0], [gridtype_str[0]] * nnodes]
names = ['NodeID', 'Item']
index = pd.MultiIndex.from_arrays(node_gridtype, names=names)
A = self.data[:, :, 0].T
data_frame = pd.DataFrame(A, columns=columns, index=index)
else:
node_gridtype_item = []
node_ids = self.node_gridtype[:, 0]
for nid, gridtype in zip(node_ids, gridtype_str):
node_gridtype_item.extend([[nid, gridtype, 't1']])
node_gridtype_item.extend([[nid, gridtype, 't2']])
node_gridtype_item.extend([[nid, gridtype, 't3']])
node_gridtype_item.extend([[nid, gridtype, 'r1']])
node_gridtype_item.extend([[nid, gridtype, 'r2']])
node_gridtype_item.extend([[nid, gridtype, 'r3']])
names = ['NodeID', 'Type', 'Item']
index = pd.MultiIndex.from_tuples(node_gridtype_item, names=names)
A = self.data.reshape(ntimes, nnodes*6).T
try:
data_frame = pd.DataFrame(A, columns=columns, index=index)
except ValueError: # pragma: no cover
print(f'data.shape={self.data.shape} A.shape={A.shape} '
f'ntimes={ntimes} nnodes*6={nnodes*6} ngrids={len(node_ids)}\n'
f'column_names={column_names} column_values={column_values} _times={self._times}')
raise
#print(data_frame.to_string())
data_frame = pandas_extract_rows(data_frame, ugridtype_str, ['NodeID', 'Item'])
#elif is_v25 and 0: # pragma: no cover
# t1 t2
# itime Mode EigenvalueReal EigenvalueImag Damping NodeID Type
# 0 1 G -0.0 -0.463393 0.0 1 0.655815+0.0j 0.0+0.0j
# G 2 0.999141+0.0j 0.0+0.0j
# G 3 1.0+0.0j 0.0+0.0j
# S 1001 0.000859+0.0j 0.0+0.0j
# 1 1 G -0.0 0.463393 0.0 2 0.655815+0.0j 0.0+0.0j
# G 2 0.999141+0.0j 0.0+0.0j
# G 3 1.0+0.0j 0.0+0.0j
# S 1001 0.000859+0.0j 0.0+0.0j
# 2 1 G -0.0 -1.705689 0.0 3 1.034078+0.0j 0.0+0.0j
# G 2 -0.282216+0.0j 0.0+0.0j
# G 3 -0.285539+0.0j 0.0+0.0j
# S 1001 -0.003323+0.0j 0.0+0.0j
#time_node_gridtype = []
#from itertools import count
#for itime in range(ntimes):
#column_values2 = [column_value[itime] for column_value in column_values]
#for nid, gridtype in zip(self.node_gridtype[:, 0], self.gridtype_str):
#time_node_gridtype.append([itime] + column_values2 + [nid, gridtype])
#names = ['itime'] + column_names + ['NodeID', 'Type']
#index = pd.MultiIndex.from_tuples(time_node_gridtype, names=names)
#A = self.data.reshape(ntimes*nnodes, 6)
#data_frame = pd.DataFrame(A, columns=headers, index=index)
##print(self.data_frame.index.names)
##data_frame = pandas_extract_rows(self.data_frame, ugridtype_str)
#print(data_frame)
#elif is_v25 and 0: # pragma: no cover
#node_gridtype2 = []
#NodeID Type t1 t2 ...
#1 G 0.655815+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G 0.999141+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S 0.000859+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G 0.655815+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G 0.999141+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S 0.000859+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G 1.034078+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G -0.282216+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G -0.285539+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S -0.003323+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G 1.034078+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G -0.282216+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G -0.285539+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S -0.003323+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G -0.001818+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G -0.124197+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G 0.625574+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S 0.749771+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G 0.001011+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G -0.200504+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S 1.200504+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1 G 0.001011+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#2 G -0.200504+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#3 G 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#1001 S 1.200504+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#for itime in range(ntimes):
#column_values2 = [column_value[itime] for column_value in column_values]
#for nid, gridtype in zip(self.node_gridtype[:, 0], self.gridtype_str):
#node_gridtype2.append([itime, nid, gridtype])
#names = ['itime', 'NodeID', 'Type']
#index = pd.MultiIndex.from_tuples(node_gridtype2, names=names)
#A = self.data.reshape(ntimes*nnodes, 6)
#data_frame = pd.DataFrame(A, columns=headers, index=index)
##print(data_frame.index.names)
##data_frame = pandas_extract_rows(data_frame, ugridtype_str)
#print(data_frame)
#elif is_v25 and 0: # pragma: no cover
# t1 t2 t3 r1 r2 r3
# 0 0.655815+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 1 0.999141+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 2 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 3 0.000859+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 4 0.655815+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 5 0.999141+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
# 6 1.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j 0.0+0.0j
#index = pd.MultiIndex.from_arrays(node_gridtype, names=['NodeID', 'Type'])
#A = self.data.reshape(nnodes*ntimes, 6)
#data_frame = pd.DataFrame(A, columns=headers)
##data_frame = pd.DataFrame(A, columns=headers, index=index) # doesn't work
# doesn't turn into workable table
#else:
# old
# Mode 1 2 3
# EigenvalueReal -0.0 -0.0 -0.0
# EigenvalueImag -0.463393 0.463393 -1.705689
# Damping 0.0 0.0 0.0
# NodeID Type Item
# 1 G t1 (0.6558146+0j) (0.6558146+0j) (1.034078+0j)
# t2 0j 0j 0j
# t3 0j 0j 0j
# r1 0j 0j 0j
# r2 0j 0j 0j
# r3 0j 0j 0j
# mode 1 2 3
# freq 1.0 2.0 3.0
# nodeid
# 1 item 1.0 2.0 3.0
# t1 etc.
# t2
# t3
# ...
# 2
# t1
# t2
# t3
# ...
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=node_gridtype, minor_axis=headers).to_frame() # to_xarray()
#data_frame.columns.names = column_names
#data_frame.index.names = ['NodeID', 'Type', 'Item']
#print(column_names)
#print(data_frame)
#print(self.data_frame.index.names)
#data_frame = pandas_extract_rows(data_frame, ugridtype_str, ['NodeID', 'Item'])
self.data_frame = data_frame
#print(self.data_frame)
else:
#self.data_frame = pd.Panel(self.data[0, :, :], major_axis=node_gridtype, minor_axis=headers).to_frame()
#self.data_frame.columns.names = ['Static']
#self.data_frame.index.names = ['NodeID', 'Type', 'Item']
# NodeID Type t1 t2 t3 r1 r2 r3
# 0 1 G 0.0 0.000000 0.000000e+00 0.0 0.0 0.0
# 1 2 G 0.0 0.000000 0.000000e+00 0.0 0.0 0.0
# 2 3 G 0.0 0.000000 0.000000e+00 0.0 0.0 0.0
#self.data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.node_gridtype)
df1 = pd.DataFrame(self.node_gridtype[:, 0])
df1.columns = ['NodeID']
df2 = pd.DataFrame(self.gridtype_str)
df2.columns = ['Type']
df3 = pd.DataFrame(self.data[0])
df3.columns = headers
self.data_frame = df1.join([df2, df3])
#df1 = pd.DataFrame(self.node_gridtype)
#df1.columns = ['NodeID', 'Type']
#df2 = pd.DataFrame(self.data[0])
#df2.columns = headers
#self.data_frame = df1.join([df2])
#print(self.data_frame)
def finalize(self):
"""
Calls any OP2 objects that need to do any post matrix calcs
"""
self.set_as_sort1()
gridtypes = self.node_gridtype[:, 1]
nnodes = len(gridtypes)
self.gridtype_str = np.chararray((nnodes), unicode=True)
ugridtypes = np.unique(gridtypes)
for ugridtype in ugridtypes:
i = np.where(gridtypes == ugridtype)
self.gridtype_str[i] = self.recast_gridtype_as_string(ugridtype)
#del self.itotal, self.itime
def set_as_sort1(self):
"""changes the table into SORT1"""
#if not self.table_name != 'OQMRMS1':
#return
if self.is_sort1:
return
#print('set_as_sort1: table_name=%r' % self.table_name)
try:
analysis_method = self.analysis_method
except AttributeError:
print(self.code_information())
raise
#print(self.get_stats())
#print(self.node_gridtype)
#print(self.data.shape)
self.sort_method = 1
self.sort_bits[1] = 0
bit0, bit1, bit2 = self.sort_bits
self.table_name = SORT2_TABLE_NAME_MAP[self.table_name]
self.sort_code = bit0 + 2*bit1 + 4*bit2
#print(self.code_information())
assert self.is_sort1
if analysis_method != 'N/A':
self.data_names[0] = analysis_method
#print(self.table_name_str, analysis_method, self._times)
setattr(self, self.analysis_method + 's', self._times)
del self.analysis_method
def add_sort1(self, dt, node_id, grid_type, v1, v2, v3, v4, v5, v6):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(node_id, int) and node_id > 0, 'dt=%s node_id=%s' % (dt, node_id)
# itotal - the node number
# itime - the time/frequency step
# the times/freqs
self._times[self.itime] = dt
self.node_gridtype[self.itotal, :] = [node_id, grid_type]
self.data[self.itime, self.itotal, :] = [v1, v2, v3, v4, v5, v6]
self.itotal += 1
class RealTableArray(TableArray):
"""
displacement style table
"""
@classmethod
@classmethod
@classmethod
def __pos__(self) -> RealTableArray:
"""positive; +a"""
return self
def __neg__(self) -> RealTableArray:
"""negative; -a"""
new_table = copy.deepcopy(self)
new_table.data *= -1.0
return new_table
def __add__(self, table: RealTableArray) -> RealTableArray:
"""a + b"""
if isinstance(table, RealTableArray):
self._check_math(table)
new_data = self.data + table.data
elif isinstance(table, (integer_types, float_types)):
new_data = self.data + table
else:
raise TypeError(table)
new_table = copy.deepcopy(self)
new_table.data = new_data
return new_table
# __radd__: reverse order adding (b+a)
def __iadd__(self, table: RealTableArray) -> RealTableArray:
"""inplace adding; a += b"""
self._check_math(table)
self.data += table.data
return self
def __sub__(self, table: RealTableArray) -> RealTableArray:
"""a - b"""
if isinstance(table, RealTableArray):
self._check_math(table)
new_data = self.data - table.data
elif isinstance(table, (integer_types, float_types)):
new_data = self.data - table
else:
raise TypeError(table)
new_table = copy.deepcopy(self)
new_table.data = new_data
return new_table
def __mul__(self, table: RealTableArray) -> RealTableArray:
"""a * b"""
if isinstance(table, RealTableArray):
self._check_math(table)
new_data = self.data * table.data
elif isinstance(table, (integer_types, float_types)):
new_data = self.data * table
else:
raise TypeError(table)
new_table = copy.deepcopy(self)
new_table.data = new_data
return new_table
def __imul__(self, table: RealTableArray) -> RealTableArray:
"""a *= b"""
if isinstance(table, RealTableArray):
self._check_math(table)
self.data *= table.data
elif isinstance(table, (integer_types, float_types)):
self.data *= table
else:
raise TypeError(table)
return self
def __truediv__(self, table: RealTableArray) -> RealTableArray:
"""a / b"""
if isinstance(table, RealTableArray):
self._check_math(table)
new_data = self.data / table.data
elif isinstance(table, (integer_types, float_types)):
new_data = self.data / table
else:
raise TypeError(table)
new_table = copy.deepcopy(self)
new_table.data = new_data
return new_table
def _check_math(self, table: RealTableArray) -> None:
"""verifies that the shapes are the same"""
assert self.ntimes == table.ntimes, f'ntimes={self.ntimes} table.times={table.ntimes}'
assert self.ntotal == table.ntotal, f'ntotal={self.ntotal} table.ntotal={table.ntotal}'
assert self.node_gridtype.shape == table.node_gridtype.shape, f'node_gridtype.shape={self.node_gridtype.shape} table.node_gridtype.shape={table.node_gridtype.shape}'
assert self.data.shape == table.data.shape, f'data.shape={self.data.shape} table.data.shape={table.data.shape}'
@property
@property
def write_op2(self, op2_file, fascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
allowed_tables = [
'OUGV1', 'BOUGV1',
'OPHIG', 'BOPHIG',
'OUPV1', 'OUXY1', # solution set
'OQP1', 'OQMG1', 'OQG1', 'OQGV1', 'OPNL1',
'OPG1', 'BOPG1',
'OPHSA',
'OPGV1',
'OUGATO1', 'OUGCRM1', 'OUGNO1', 'OUGPSD1', 'OUGRMS1', # disp/vel/acc/eigenvector
'OVGATO1', 'OVGCRM1', 'OVGNO1',
'OAGATO1', 'OAGCRM1', 'OAGNO1', 'OAGPSD1', 'OAGRMS1', # acceleration
'OPGNO1', 'OPGRMS1', # load vector
'OPRATO1', 'OPRCRM1', 'OPRNO1', 'OPRPSD1', # pressure
'OQGPSD1',
'OCRPG', 'OCRUG', 'OUG1', 'OVG1', 'OAG1',
'OUGV1PAT',
'OUGF1',
'OQGCF1', 'OQGGF1',
'RADCONS', 'RADEATC', 'RADEFFM',
]
assert self.table_name in allowed_tables, self.table_name
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
fascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2_file, fascii, date)
itable = -3
#print('nonlinear_factor =', self.nonlinear_factor)
if not self.is_sort1:
raise NotImplementedError('SORT2')
#op2_format = endian + b'2i6f'
node = self.node_gridtype[:, 0]
gridtype = self.node_gridtype[:, 1]
# table 4 info
#ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nnodes_device = self.node_gridtype[:, 0] * 10 + self.device_code
nnodes = len(node)
max_id = node.max()
if max_id > MAX_32_BIT_INT:
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max id={max_id}')
fdtype = self.data.dtype
if self.size == 4:
pass
else:
warnings.warn(f'downcasting {self.class_name}...this is buggy')
#idtype = np.int32(1)
fdtype = np.float32(1.0)
nodedevice_gridtype = np.column_stack([nnodes_device, gridtype])
node_gridtype_floats = view_idtype_as_fdtype(nodedevice_gridtype, fdtype)
#print(node_gridtype_floats)
#node_gridtype_floats = nodedevice_gridtype.view(fdtype) # .reshape(nnodes, 2)
#format_table4_1 = Struct(self._endian + b'15i')
#format_table4_2 = Struct(self._endian + b'3i')
#(2+6) => (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i)
ntotal = nnodes * (2 + 6)
#print('shape = %s' % str(self.data.shape))
#assert nnodes > 1, nnodes
assert ntotal > 1, ntotal
unused_device_code = self.device_code
fascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
for itime in range(self.ntimes):
self._write_table_3(op2_file, fascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4*ntotal]
op2_file.write(pack(b'%ii' % len(header), *header))
fascii.write('r4 [4, 0, 4]\n')
fascii.write('r4 [4, %s, 4]\n' % (itable))
fascii.write('r4 [4, %i, 4]\n' % (4*ntotal))
datai = view_dtype(self.data[itime, :, :], fdtype)
node_gridtype_data = np.hstack([node_gridtype_floats, datai])
op2_file.write(node_gridtype_data)
assert ntotal == node_gridtype_data.size
#print(self.data.shape, ntotal)
itable -= 1
header = [4 * ntotal,]
op2_file.write(pack(b'i', *header))
fascii.write('footer = %s\n' % header)
new_result = False
return itable
#def _write_sort2_as_sort2(self, f06_file, page_num, page_stamp, header, words):
#nodes = self.node_gridtype[:, 0]
#gridtypes = self.node_gridtype[:, 1]
#times = self._times
#for inode, (node_id, gridtypei) in enumerate(zip(nodes, gridtypes)):
#t1 = self.data[inode, :, 0]
#t2 = self.data[inode, :, 1]
#t3 = self.data[inode, :, 2]
#r1 = self.data[inode, :, 3]
#r2 = self.data[inode, :, 4]
#r3 = self.data[inode, :, 5]
#header[1] = ' POINT-ID = %10i\n' % node_id
#f06_file.write(''.join(header + words))
#for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_floats_13e(vals)
#(dx, dy, dz, rx, ry, rz) = vals2
#if sgridtype in ['G', 'H', 'L']:
#f06_file.write('%14s %6s %-13s %-13s %-13s %-13s %-13s %s\n' % (
#write_float_12e(dt), sgridtype, dx, dy, dz, rx, ry, rz))
#elif sgridtype == 'S':
#f06_file.write('%14s %6s %s\n' % (node_id, sgridtype, dx))
#else:
#raise NotImplementedError(sgridtype)
#f06_file.write(page_stamp % page_num)
#page_num += 1
#return page_num
class ComplexTableArray(TableArray):
"""
complex displacement style table
"""
@classmethod
@property
@property
#def _write_f06_block(self, words, header, page_stamp, page_num, f06_file, is_mag_phase):
#self._write_f06_transient_block(words, header, page_stamp, page_num, f06_file, is_mag_phase, is_sort1)
#def write_sort2_as_sort2(self, f06_file, page_num, page_stamp, header, words, is_mag_phase):
#"""TODO: not validated"""
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#times = self._times
## print(self.data.shape)
#for inode, (node_id, gridtypei) in enumerate(zip(node, gridtype)):
## TODO: for SORT1 pretending to be SORT2
##t1 = self.data[:, inode, 0].ravel()
#t1 = self.data[:, inode, 0].ravel()
#t2 = self.data[:, inode, 1].ravel()
#t3 = self.data[:, inode, 2].ravel()
#r1 = self.data[:, inode, 3].ravel()
#r2 = self.data[:, inode, 4].ravel()
#r3 = self.data[:, inode, 5].ravel()
#if len(r3) != len(times):
#raise RuntimeError('len(d)=%s len(times)=%s' % (len(r3), len(times)))
#header[2] = ' POINT-ID = %10i\n' % node_id
#f06_file.write(''.join(header + words))
#for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#[dxr, dyr, dzr, rxr, ryr, rzr,
#dxi, dyi, dzi, rxi, ryi, rzi] = vals2
#sdt = write_float_12e(dt)
##if not is_all_zeros:
#if sgridtype == 'G':
#f06_file.write('0 %12s %6s %-13s %-13s %-13s %-13s %-13s %-s\n'
#' %13s %6s %-13s %-13s %-13s %-13s %-13s %-s\n' % (
#sdt, sgridtype, dxr, dyr, dzr, rxr, ryr, rzr,
#'', '', dxi, dyi, dzi, rxi, ryi, rzi))
#elif sgridtype in ['S', 'M', 'E']:
#f06_file.write('0 %12s %6s %-13s\n'
#' %12s %6s %-13s\n' % (sdt, sgridtype, dxr, '', '', dxi))
#else:
#msg = 'nid=%s dt=%s type=%s dx=%s dy=%s dz=%s rx=%s ry=%s rz=%s' % (
#node_id, dt, sgridtype, t1i, t2i, t3i, r1i, r2i, r3i)
#raise NotImplementedError(msg)
#f06_file.write(page_stamp % page_num)
#page_num += 1
#return page_num
def write_op2(self, op2_file, fascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
allowed_tables = [
'OUGV1', 'BOUGV1',
'OQG1', 'OQMG1',
'OPG1',
'OUXY1',
'OUG1F',
'OUG1',
'OUGF1', 'BOUGF1',
'OAG1', 'OVG1',
]
assert self.table_name in allowed_tables, self.table_name
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
fascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2_file, fascii, date)
itable = -3
#print('nonlinear_factor =', self.nonlinear_factor)
if self.is_sort1:
op2_format = endian + b'2i 12f'
else:
raise NotImplementedError('SORT2')
s = Struct(op2_format)
node = self.node_gridtype[:, 0]
max_id = node.max()
if max_id > 99999999:
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max id={max_id}')
gridtype = self.node_gridtype[:, 1]
#format_table4_1 = Struct(self._endian + b'15i')
#format_table4_2 = Struct(self._endian + b'3i')
# table 4 info
#ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nnodes_device = self.node_gridtype[:, 0] * 10 + self.device_code
#fdtype = self.data.real.dtype
#fdtype = 'float32' # we don't support complex data yet
#nodedevice_gridtype = np.column_stack([nnodes_device, gridtype])
#node_gridtype_bytes = nodedevice_gridtype.tobytes()
#node_gridtype_floats = np.frombuffer(node_gridtype_bytes,
#dtype=fdtype).reshape(nnodes, 2)
#(2+6) => (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i)
ntotal = nnodes * (2 + 12)
#print('shape = %s' % str(self.data.shape))
assert nnodes >= 1, nnodes
assert ntotal > 1, ntotal
unused_device_code = self.device_code
fascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
for itime in range(self.ntimes):
self._write_table_3(op2_file, fascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4*ntotal]
op2_file.write(pack(b'%ii' % len(header), *header))
fascii.write('r4 [4, 0, 4]\n')
fascii.write('r4 [4, %s, 4]\n' % (itable))
fascii.write('r4 [4, %i, 4]\n' % (4*ntotal))
#datai = self.data[itime, :, :]
#node_gridtype_data = np.hstack([
#node_gridtype_floats,
#datai.real.astype('float32'),
#datai.imag.astype('float32'),
#])
#node_gridtype_data_bytes = node_gridtype_data.tobytes()
#op2_file.write(node_gridtype_data_bytes)
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
for node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i in zip(nnodes_device, gridtype, t1, t2, t3, r1, r2, r3):
data = [node_id, gridtypei,
t1i.real, t2i.real, t3i.real, r1i.real, r2i.real, r3i.real,
t1i.imag, t2i.imag, t3i.imag, r1i.imag, r2i.imag, r3i.imag]
fascii.write(' nid, grid_type, dx, dy, dz, rx, ry, rz = %s\n' % data)
op2_file.write(s.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2_file.write(pack(b'i', *header))
fascii.write('footer = %s\n' % header)
new_result = False
#header = [
#4, itable, 4,
#4, 1, 4,
#4, 0, 4,
#]
#op2_file.write(pack(b'%ii' % len(header), *header))
#fascii.write('footer2 = %s\n' % header)
return itable
#def write_sort2_as_sort2(self, f06_file, page_num, page_stamp, header, words, is_mag_phase):
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#times = self._times
#for inode, (node_id, gridtypei) in enumerate(zip(node, gridtype)):
## TODO: for SORT1 pretending to be SORT2
##t1 = self.data[:, inode, 0].ravel()
#t1 = self.data[inode, :, 0]
#t2 = self.data[inode, :, 1]
#t3 = self.data[inode, :, 2]
#r1 = self.data[inode, :, 3]
#r2 = self.data[inode, :, 4]
#r3 = self.data[inode, :, 5]
#if len(r3) != len(times):
#raise RuntimeError('len(d)=%s len(times)=%s' % (len(r3), len(times)))
#header[2] = ' POINT-ID = %10i\n' % node_id
#f06_file.write(''.join(header + words))
#for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
#sgridtype = self.recast_gridtype_as_string(gridtypei)
#vals = [t1i, t2i, t3i, r1i, r2i, r3i]
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#[dxr, dyr, dzr, rxr, ryr, rzr,
#dxi, dyi, dzi, rxi, ryi, rzi] = vals2
#sdt = write_float_12e(dt)
##if not is_all_zeros:
#if sgridtype == 'G':
#f06_file.write('0 %12s %6s %-13s %-13s %-13s %-13s %-13s %-s\n'
#' %13s %6s %-13s %-13s %-13s %-13s %-13s %-s\n' % (
#sdt, sgridtype, dxr, dyr, dzr, rxr, ryr, rzr,
#'', '', dxi, dyi, dzi, rxi, ryi, rzi))
#elif sgridtype == 'S':
#f06_file.write('0 %12s %6s %-13s\n'
#' %12s %6s %-13s\n' % (sdt, sgridtype, dxr, '', '', dxi))
#else:
#msg = 'nid=%s dt=%s type=%s dx=%s dy=%s dz=%s rx=%s ry=%s rz=%s' % (
#node_id, dt, sgridtype, t1i, t2i, t3i, r1i, r2i, r3i)
#raise NotImplementedError(msg)
#f06_file.write(page_stamp % page_num)
#page_num += 1
#return page_num
def pandas_extract_rows(data_frame, ugridtype_str, index_names):
"""removes the t2-t6 for S and E points"""
import pandas as pd
letter_dims = [
('G', 6),
('E', 1),
('S', 1),
('H', 6),
('L', 6),
]
cat_keys = []
for (letter, dim) in letter_dims:
if letter not in ugridtype_str:
continue
if dim == 1:
# Note that I'm only keeping every 6th row
eig = data_frame.xs(letter, level=1).iloc[0::6]
eig = eig.reset_index()
#print(eig.columns)
#print(eig)
#item = eig.loc[:, 1]
#item = eig.loc[:, 'Item']
#print(dir(eig))
#print(eig.loc)
#item = eig['Item']
#print(item)
try:
eig = eig.replace({'Item' : {'t1' : letter}}).set_index(index_names)
except (TypeError, NotImplementedError):
print(f'skipping pandas cleanup due to issue with complex {letter} points')
return data_frame
#continue
elif dim == 6:
eig = data_frame.xs(letter, level=1)
else:
raise RuntimeError(dim)
#log.info('eig = %s' % eig)
cat_keys.append(eig)
data_frame = pd.concat(cat_keys)
return data_frame
#class StaticArrayNode(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class StaticArrayElement(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayNodeSort1(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayElementSort1(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayNodeSort2(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class TimeArrayElementSort2(RealTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#RealTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def times(self):
#return self._times
#@property
#def element_ids(self):
#return self.node_gridtype[:, 0]
#class FrequencyArrayNodeSort2(ComplexTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#ComplexTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def frequencies(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
#class FrequencyArrayElementSort2(ComplexTableArray):
#def __init__(self, data_code, is_sort1, isubcase, dt):
#ComplexTableArray.__init__(self, data_code, is_sort1, isubcase, dt)
#@property
#def frequencies(self):
#return self._times
#@property
#def node_ids(self):
#return self.node_gridtype[:, 0]
| [
37811,
198,
4299,
1127,
25,
198,
532,
8655,
10267,
198,
532,
6416,
10962,
19182,
198,
532,
19157,
10962,
19182,
198,
198,
27218,
389,
973,
416,
25,
198,
532,
6416,
7279,
489,
5592,
19182,
198,
532,
6416,
46261,
11683,
19182,
198,
532,
... | 1.658789 | 27,971 |
linkedList = LinkedList()
linkedList.insert(1)
linkedList.insert(4)
linkedList.insert(7)
linkedList.insert(8)
linkedList.printLinkedList()
print()
# linkedList.reverse_iteratively()
linkedList.reverse_recursively()
linkedList.printLinkedList()
| [
628,
198,
25614,
8053,
796,
7502,
276,
8053,
3419,
198,
25614,
8053,
13,
28463,
7,
16,
8,
198,
25614,
8053,
13,
28463,
7,
19,
8,
198,
25614,
8053,
13,
28463,
7,
22,
8,
198,
25614,
8053,
13,
28463,
7,
23,
8,
198,
25614,
8053,
13,... | 2.975904 | 83 |
# ----------------------------------------------------------------------------
# test17.py
#
# Copyright 2021 Daniel Tisza
# MIT License
#
# Taking raw image with Zybo and saving to NETCDF
#
# LED calibration set: C
# Wavelength: 584.577535
# Setpoint: 11127
#
# ----------------------------------------------------------------------------
# import all the stuff from mvIMPACT Acquire into the current scope
from mvIMPACT import acquire
# import all the mvIMPACT Acquire related helper function such as 'conditionalSetProperty' into the current scope
# If you want to use this module in your code feel free to do so but make sure the 'Common' folder resides in a sub-folder of your project then
from mvIMPACT.Common import exampleHelper
# For systems with NO mvDisplay library support
import ctypes
import numpy as np
import datetime as dt
import xarray as xr
import matplotlib
import matplotlib.image
import serial
import serial.tools.list_ports as list_ports
from serial import Serial
from spectracular.fpi_driver import detectFPIDevices, createFPIDevice
import fpipy as fp
# ------------------------------------------------
# ledportstring = '/dev/ttyACM1'
ledportstring = 'COM10'
print('Trying to use ' + ledportstring + ' for LED control')
port = serial.Serial(ledportstring, 9600, timeout=0.5)
# Led Set C
# index Npeaks SP1 SP2 SP3 PeakWL1 PeakWL2 PeakWL3 Sinv11 Sinv12 Sinv13 Sinv21 Sinv22 Sinv23 Sinv31 Sinv32 Sinv33 FWHM1 FWHM2 FWHM3
# 54 1 11127 0 0 584.577535 0 0 0.702570403 2.016995336 -4.32210678 0 0 0 0 0 0 15.84827112 0 0
#C
print('LEDs on for set C')
input("Press to activate")
#led.L(0b000000001000000001000000001) 40201
port.flushOutput()
port.flushInput()
port.write(str.encode('L0x40201\r\n'))
port.flushOutput()
port.flushInput()
port.close()
# ------------------------------------------------
FPI_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of FPI devices."""
FPI_HWIDS = [
# Strings returned by read_hardware_id
'd02b012 af380065 5b5bbeab f50019c1'
]
print('Trying to create FPI device')
fpi = createFPIDevice(detectFPIDevices(FPI_IDS, FPI_HWIDS)[0].device)
print(fpi)
#C
# 54 1 11127 0 0 584.577535 0 0 0.702570403 2.016995336 -4.32210678 0 0 0 0 0 0 15.84827112 0 0
print('Setting MFPI setpoint')
fpi.set_setpoint(11127, wait=True)
input('Wait after setting setpoint')
fpi.close()
# ------------------------------------------------
devMgr = acquire.DeviceManager()
pDev = exampleHelper.getDeviceFromUserInput(devMgr)
if pDev == None:
exampleHelper.requestENTERFromUser()
sys.exit(-1)
pDev.open()
#
# Set system settings
#
# RequestCount 10
#
ss = acquire.SystemSettings(pDev)
print("Old RequestCount:")
print(ss.requestCount.readS())
#
# Basic device settings
#
bdc = acquire.BasicDeviceSettings(pDev)
print("Old ImageRequestTimeout_ms:")
print(bdc.imageRequestTimeout_ms.readS())
#
# Set camera settings
#
# AcquisitionMode SingleFrame
# TriggerSource Line1
# TriggerMode Off
#
ac = acquire.AcquisitionControl(pDev)
print("Old AcquisitionMode:")
print(ac.acquisitionMode.readS())
print("New AcquisitionMode:")
ac.acquisitionMode.writeS("SingleFrame")
print(ac.acquisitionMode.readS())
print("Old TriggerSource:")
print(ac.triggerSource.readS())
# print("New TriggerSource:")
# ac.triggerSource.writeS("Software")
# print(ac.triggerSource.readS())
print("Old TriggerMode:")
print(ac.triggerMode.readS())
# print("New TriggerMode:")
# ac.triggerMode.writeS("On")
# print(ac.triggerMode.readS())
print("Old ExposureAuto:")
print(ac.exposureAuto.readS())
print("New ExposureAuto:")
ac.exposureAuto.writeS("Off")
print(ac.exposureAuto.readS())
ifc = acquire.ImageFormatControl(pDev)
print("Old pixelformat:")
print(ifc.pixelFormat.readS())
print("New pixelformat:")
ifc.pixelFormat.writeS("BayerGB12")
# ifc.pixelFormat.writeS("RGB8")
print(ifc.pixelFormat.readS())
print("Old pixelColorFilter:")
print(ifc.pixelColorFilter.readS())
imgp = acquire.ImageProcessing(pDev)
# "Auto" originally
print("Old colorProcessing:")
print(imgp.colorProcessing.readS())
imgp.colorProcessing.writeS("Raw")
print("New colorProcessing:")
print(imgp.colorProcessing.readS())
print("Old ExposureTime:")
print(ac.exposureTime.readS())
print("New ExposureTime:")
ac.exposureTime.writeS("150000")
print(ac.exposureTime.readS())
anlgc = acquire.AnalogControl(pDev)
print("Old BalanceWhiteAuto:")
print(anlgc.balanceWhiteAuto.readS())
print("New BalanceWhiteAuto:")
anlgc.balanceWhiteAuto.writeS("Off")
print(anlgc.balanceWhiteAuto.readS())
print("Old Gamma:")
print(anlgc.gamma.readS())
print("New Gamma:")
anlgc.gamma.writeS("1")
print(anlgc.gamma.readS())
print("Old Gain:")
print(anlgc.gain.readS())
print("New Gain:")
anlgc.gain.writeS("1.9382002601")
print(anlgc.gain.readS())
print("Old GainAuto:")
print(anlgc.gainAuto.readS())
print("New GainAuto:")
anlgc.gainAuto.writeS("Off")
print(anlgc.gainAuto.readS())
#
# Taking image
#
fi = acquire.FunctionInterface(pDev)
statistics = acquire.Statistics(pDev)
fi.imageRequestReset(0,0)
fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(pDev, fi)
# ac.triggerSoftware()
requestNr = fi.imageRequestWaitFor(20000)
# Add this from SingleCapture.cpp
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
if fi.isRequestNrValid(requestNr):
print("Request number valid!")
pRequest = fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
print("Info from " + pDev.serial.read() +
": " + statistics.framesPerSecond.name() + ": " + statistics.framesPerSecond.readS() +
", " + statistics.errorCount.name() + ": " + statistics.errorCount.readS() +
", " + statistics.captureTime_s.name() + ": " + statistics.captureTime_s.readS())
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
# For systems with NO mvDisplay library support
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
#print(cbuf)
# Handling in test_hsi6.py
# self._pixel_format = "BayerGB12"
# self._buffer_decoder = get_decoder(self._pixel_format)
# data = self._buffer_decoder(cbuf, (height, width))
data = np.frombuffer(
cbuf,
dtype=np.uint16
).reshape((height, width)).copy()
print("Pixel data:")
print(data)
height, width = data.shape[0], data.shape[1]
coords = {
"x": ("x", np.arange(0, width) + 0.5),
"y": ("y", np.arange(0, height) + 0.5),
"timestamp": dt.datetime.today().timestamp(),
}
dims = ('y', 'x')
# Replace these hard-coded values by reading from camera!
coords['Gain'] = "1.9382002601"
coords['ExposureTime'] = 150000
coords['PixelFormat'] = "BayerGB12"
coords['PixelColorFilter'] = "BayerGB"
frame = xr.DataArray(
data,
name="frame",
dims=dims,
coords=coords,
#attrs={
#'valid_range': self._image_range,
#}
)
print("Frame xarray:")
print(frame)
print("Save frame to netcdf:")
frame.to_netcdf('testframe.nc')
# Original code for shaping data
# channelType = numpy.uint16 if channelBitDepth > 8 else numpy.uint8
# arr = numpy.fromstring(cbuf, dtype = channelType)
# arr.shape = (height, width, channelCount)
# print(arr)
# print("Start saving PNG image...")
# matplotlib.image.imsave('testimage.png', arr)
#if channelCount == 1:
# img = Image.fromarray(arr)
#else:
# img = Image.fromarray(arr, 'RGBA' if alpha else 'RGB')
fi.imageRequestUnlock(requestNr)
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
fi.imageRequestReset(0,0)
else:
# Please note that slow systems or interface technologies in combination with high resolution sensors
# might need more time to transmit an image than the timeout value which has been passed to imageRequestWaitFor().
# If this is the case simply wait multiple times OR increase the timeout(not recommended as usually not necessary
# and potentially makes the capture thread less responsive) and rebuild this application.
# Once the device is configured for triggered image acquisition and the timeout elapsed before
# the device has been triggered this might happen as well.
# The return code would be -2119(DEV_WAIT_FOR_REQUEST_FAILED) in that case, the documentation will provide
# additional information under TDMR_ERROR in the interface reference.
# If waiting with an infinite timeout(-1) it will be necessary to call 'imageRequestReset' from another thread
# to force 'imageRequestWaitFor' to return when no data is coming from the device/can be captured.
print("imageRequestWaitFor failed (" + str(requestNr) + ", " + acquire.ImpactAcquireException.getErrorCodeAsString(requestNr) + ")")
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
exampleHelper.requestENTERFromUser()
# ------------------------------------
# Turn off LED
print('Trying to use ' + ledportstring + ' for LED control')
port = serial.Serial(ledportstring, 9600, timeout=0.5)
print('LEDs turn off')
input("Press to activate")
port.flushOutput()
port.flushInput()
port.write(str.encode('L0x0\r\n'))
port.flushOutput()
port.flushInput()
port.close()
print('Finished!')
| [
2,
16529,
10541,
198,
2,
197,
9288,
1558,
13,
9078,
198,
2,
198,
2,
197,
15269,
33448,
7806,
309,
271,
4496,
198,
2,
197,
36393,
13789,
198,
2,
198,
2,
197,
26556,
8246,
2939,
351,
40905,
2127,
290,
8914,
284,
30502,
34,
8068,
198... | 2.62721 | 3,903 |
import json
from typing import Union
import mock
from openeo.rest.datacube import DataCube
from openeo.rest.imagecollectionclient import ImageCollectionClient
def get_download_graph(cube: Union[DataCube, ImageCollectionClient]) -> dict:
"""
Do fake download of a cube and intercept the process graph
:param cube: cube to download
:param connection: connection object
:return:
"""
with mock.patch.object(cube.connection, 'download') as download:
cube.download("out.geotiff", format="GTIFF")
download.assert_called_once()
args, kwargs = download.call_args
actual_graph = _json_normalize(args[0])
return actual_graph
def get_execute_graph(cube: Union[DataCube, ImageCollectionClient]) -> dict:
"""
Do fake execute of a cube and intercept the process graph
:param cube: cube to download
:param connection: connection object
:return:
"""
with mock.patch.object(cube.connection, 'execute') as execute:
cube.execute()
execute.assert_called_once()
args, kwargs = execute.call_args
actual_graph = _json_normalize(args[0])
return actual_graph
def _json_normalize(x: dict) -> dict:
"""Normalize given graph to be JSON compatible (e.g. use lists instead of tuples)"""
return json.loads(json.dumps(x))
| [
11748,
33918,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
15290,
198,
198,
6738,
1280,
68,
78,
13,
2118,
13,
19608,
330,
3266,
1330,
6060,
29071,
198,
6738,
1280,
68,
78,
13,
2118,
13,
9060,
43681,
16366,
1330,
7412,
36307,
11792,
... | 2.937916 | 451 |
import os
import string
import sys
import math
languages = ['dan','deu','dut','eng','fin','fra','gla','ita','nob','pol','por','spa','swe','swh','tgl']
def smoothing(corpus):
'''
Addative Smoothing
args: corpus(dict): words to occourences based on langauge
returns: a modified corpus
increase the count of each occourence to make zero count occourences 1 count.
'''
for word in corpus:
for lang in corpus[word]:
corpus[word]['total'] += 1
corpus[word][lang] += 1
return corpus
def print_odds(odds, identifier, sentence, extra_credit):
'''
Print odds in correct format
args: odds(dict) a dict of odds of a sentence coming from a specific language, identifier(str) unique identifier coming from intial file, sentence(str) a input string, extra_credit(Bool) 1 if appling extra credit
returns:Nothing
Take in the calculated information and print in correct format
'''
max_value = -1 * float('inf')
max_lang = 'unk'
print('{}\t{}'.format(identifier, sentence[:-1]))
average_odds = 0
for p in odds:
average_odds += odds[p]
if odds[p] > max_value:
max_value = odds[p]
max_lang = p
print('{}\t{}'.format(p,odds[p]))
difference = max_value - average_odds/15
if extra_credit == 1 and difference < 25:
print("result unk")
else:
print("result {}".format(max_lang))
def calculate_odds(corpus, sentence):
'''
Calculate odds of a specific langauge for an input sentence
args: corpus(dict) a dict of words and count of occourences across words, sentence(str) a sentence that has been cleaned that we want to caclulate its probabilities across languages
returns: probabilities(dict) a dict with langauge being the key and the value being the log prob of it occouring. Higher numbers = more likley
take a sentence and use bayesean stat to calculate likleyhood of each langauge
'''
probabilities = {}
for lang in languages:
probability = float(0)
for word in sentence:
if word in corpus:
probability += math.log10(float(corpus[word][lang])/float(corpus[word]['total']))
else:
probability += math.log10(0.06666666666666667
)
probabilities[lang] = probability
return probabilities
def load_corpus(corpus_directory):
'''
Read a corpus and normalize
args: corpus_directory(str) a location where expected corpus files exist
returns: a corpus(dict) structure with the smoothed occourences of words across langauges
Function reads all the corpuses in a directory and then normalizes their occourences using additive smoothing
'''
corpus = {}
target_files = os.listdir(corpus_directory)
for target_file in target_files:
file_path = os.path.join(corpus_directory, target_file)
with open(file_path,'r') as f:
lang = target_file[:3]
for l in f:
l = l.split('\t')
target_word = l[0]
target_value = int(l[1])
if target_word not in corpus:
corpus[target_word] = {'dan':0,'deu':0,'dut':0,'eng':0,'fin':0,'fra':0,'gla':0,'ita':0,'nob':0,'pol':0,'por':0,'spa':0,'swe':0,'swh':0,'tgl':0}
corpus[target_word][lang] = target_value
for word in corpus:
unique_total = 0
for lang in languages:
unique_total += corpus[word][lang]
corpus[word]['total'] = unique_total
return smoothing(corpus)
def main(corpus_locaiton, target_file, extra_credit):
'''
Read a corpus and caculate most probable langauge for given sentences
args: corpus_location(str) a location of a directory of corpuses, target_file(str) a file with sentences we want to calculate most likley langauges
Load a corpus, smooth it, use it to calculate most likley langauges for files
'''
corpus = load_corpus(corpus_location)
with open(target_file,'r') as f:
for l in f:
l = l.split('\t')
sentence = l[1].strip()
sentence = sentence.translate(None, string.punctuation)
print_odds(calculate_odds(corpus,sentence.split(' ')), l[0], l[1], extra_credit)
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: main.py <target dna strings> <location of dna corpus> <flag extra credit>")
exit(-1)
else:
corpus_location = sys.argv[1]
target_file = sys.argv[2]
extra_credit = int(sys.argv[3])
main(corpus_location, target_file, extra_credit)
| [
11748,
28686,
198,
11748,
4731,
198,
11748,
25064,
198,
11748,
10688,
198,
75,
33213,
796,
37250,
25604,
41707,
2934,
84,
41707,
67,
315,
41707,
1516,
41707,
15643,
41707,
69,
430,
41707,
4743,
64,
41707,
5350,
41707,
34952,
41707,
16104,
... | 2.48311 | 1,865 |
# DESCRIPTION
# Implement a trie with insert, search, and startsWith methods.
# EXAMPLE:
# Trie trie = new Trie();
# trie.insert("apple");
# trie.search("apple"); // returns true
# trie.search("app"); // returns false
# trie.startsWith("app"); // returns true
# trie.insert("app");
# trie.search("app"); // returns true
# Note:
# You may assume that all inputs consist of lowercase letters a-z.
# All inputs are guaranteed to be non-empty strings.
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
| [
2,
22196,
40165,
198,
2,
48282,
257,
1333,
68,
351,
7550,
11,
2989,
11,
290,
4940,
3152,
5050,
13,
198,
198,
2,
7788,
2390,
16437,
25,
198,
2,
309,
5034,
1333,
68,
796,
649,
309,
5034,
9783,
198,
198,
2,
1333,
68,
13,
28463,
720... | 2.961905 | 210 |
import socket
import time
import cv2
import pickle
import zlib
import datetime
HOST = '192.168.0.101'
PORT = 13327
video = cv2.VideoCapture(0)
video.set(3, 50)
video.set(4, 50)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
for i in range(5800):
ret, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for array in gray:
s.send(pickle.dumps(zlib.compress(bytes(array))))
Echo = s.recv(1024)
s.send(b'END')
Echo = s.recv(1024)
if i == 100:
s.send(b'END')
break
else:
s.send(b'Again')
Echo = s.recv(1024)
s.sendall(b'')
s.close()
video.release()
cv2.destroyAllWindows()
| [
11748,
17802,
198,
11748,
640,
198,
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
11748,
1976,
8019,
198,
11748,
4818,
8079,
198,
198,
39,
10892,
796,
705,
17477,
13,
14656,
13,
15,
13,
8784,
6,
198,
15490,
796,
22169,
1983,
198,
... | 2.042735 | 351 |
import xml_utilities
| [
198,
11748,
35555,
62,
315,
2410,
628,
628,
628,
628,
198
] | 2.727273 | 11 |
# normal libaries
import os
import gzip
import re
import collections
import zipfile
import operator
import logging
logger = logging.getLogger(os.path.basename(__file__))
logging.basicConfig(level=logging.INFO)
# 3rd party (all avalible trough pip!)
import numpy as np
import matplotlib as mp
mp.use("Agg")
from matplotlib import pyplot as plt
import lmfit
################################################################################
# CONSTANTS AND PATHS
################################################################################
DEBUGGING = False
MIRBASE_VERSION = 20
# SCALING_FACTOR = 2.2275
SCALING_FACTOR = 1
# PREDICTION_FILES = ("miRanda.tsv", "TargetScan.tsv", "PicTar.tsv")
# PATHS
MASTER_DIR = "master_files"
ID_PATH = "id_dictionaries"
DATA_PATH = "data"
FIG_PATH = "figures"
# miRBase
MIR_MAPPING_ALIASES_PATH = os.path.join(ID_PATH, 'miRBase_%i_mir_aliases.tsv')
MIR_MATURE_MAPPING_ALIASES_PATH = os.path.join(ID_PATH, 'miRBase_%i_mir_aliases_only_mature.tsv')
MIRBASE_ALIASES_TXT_PATH = os.path.join(ID_PATH, "miRBase_%i_aliases.txt.gz")
MIRBASE_MIRNA_DAT_PATH = os.path.join(ID_PATH, "miRBase_%i_miRNA.dat.gz")
MIRBASE_BASE_URL = "ftp://mirbase.org/pub/mirbase/%i"
# MIR_MAPPING_UNIQUE_PATH = 'id_dictionaries/miRBase_%i_mir_unique.tsv'
# MIRBASE_MIR_ALIASES_MAPPING = 'id_dictionaries/miRBase_mir_aliases.tsv'
# STEM_LOOP_MAPPING_FILE = "id_dictionaries/miRBase_stem_loop_mapping.tsv"
# MIR_TO_STEM_LOOP_MAPPING_FILE = "id_dictionaries/miRBase_mir_to_stem_loop_mapping.tsv"
# STRING ENSP
STRING_ALIASES_100 = "http://string-db.org/newstring_download/protein.aliases.v10.txt.gz"
STRING_ALIASES_91 = "http://string91.embl.de/newstring_download/protein.aliases.v9.1.txt.gz"
STRING_ALIASES_83 = "http://string83.embl.de/newstring_download/protein.aliases.v8.3.txt.gz"
STRING_SPECIES_91 = 'http://string-db.org/newstring_download/species.v9.1.txt'
STRING_SPECIES_100 = "http://string-db.com/newstring_download/species.v10.txt"
# PubMed IDs of experiments integrated in StarBase 2.0
STARBASE_PMIDs = os.path.join(DATA_PATH, "starBase_2.0_Experiments_PubMedIDs.tsv")
################################################################################
# utility classes
################################################################################
class Interaction:
""" Simple interaction class to store an interaction, i.e., a line in a master file."""
def __eq__(self, other):
"""
Two Interactions are equal if they are in the same organism and connect the same entities.
:param other: The Interaction to compare to.
:return: True if and only if the Interactions being compared are equal.
"""
if isinstance(other, self.__class__):
self_ent_sorted = sorted((self._ent1, self._ent2))
other_ent_sorted = sorted((other._ent1, other._ent2))
return (self._org == other._org) and (self_ent_sorted[0] == other_ent_sorted[0]) and \
(self_ent_sorted[1] == other_ent_sorted[1])
else:
return False
class EntityType:
"""Enumerator class representing different molecular entities."""
Protein, miRNA, ncRNA = range(3)
class InteractionType:
"""Enumerator class representing different types of interactions."""
Protein_miRNA, Protein_ncRNA, miRNA_ncRNA, ncRNA_ncRNA = range(4)
@staticmethod
@staticmethod
################################################################################
# 'math' functions
################################################################################
################################################################################
# MAPPING FUNCTIONS
################################################################################
# general functions
# miRBase functions
# def infer_organism_from_mir_prefix(mir):
# prefix_to_organism = {
# 'dre': 7955,
# 'ath': 3702,
# 'bta': 9913,
# 'cel': 6239,
# 'dme': 7227,
# 'hsa': 9606,
# 'gga': 9031,
# 'mmu': 10090,
# 'rno': 10116,
# 'xla': 8364
# }
# return prefix_to_organism.get(mir.split('-')[0], 'NOT A MODDEL ORGANISM... DO WEE NEED THIS????')
################################################################################
# STRING mapping functions:
def get_alias_mir_mapper(version=MIRBASE_VERSION):
"""Returns a dictionary that maps a mir to a list of mirs ie:
mapping_hash[ambigious_mir] -> [unambigious_mir1, ...]
mostly they map one-one byt if the input miR is a stem it maps to all the
mature mirs from that stem
Introduced mature_only flag because some matures are indiscriminate from
their stems (no lower/upper distinction) hsa-let-7c is a stem but also a
old name for a mature mir."""
path = MIR_MAPPING_ALIASES_PATH
if not os.path.exists(path % version) or DEBUGGING:
make_mir_mapping_files(path, version)
mapping_hash = __load_mir_mapper_alias_file__(version)
return dict((key, sorted(values)) for (key, values) in mapping_hash.items())
def get_unique_mir_mapper(version=MIRBASE_VERSION):
"""Returns a dictionary that maps mir to mir ie:\n
mapping_hash[ambigious_mir] -> unambigious_mir
Introduced mature_only flag because some matures are indiscriminate from
their stems (no lower/upper distinction) hsa-let-7c is a stem but also a
old name for a mature mir."""
mapper = {}
alias_mapper = get_alias_mir_mapper(version)
for from_mir, to_mirs in alias_mapper.items():
if len(to_mirs) == 1:
mapper[from_mir] = to_mirs[0][1]
return mapper
def get_mir_id_to_tax_id_mapper(mirbase_version=MIRBASE_VERSION):
"""
:param mirbase_version: miRBase version to be used
:return: dict string -> string, mapping RNA identifiers to their respective taxonomy identifiers
"""
mir_alias_path = MIR_MAPPING_ALIASES_PATH
mir_id_to_tax_id = {}
for line in open(mir_alias_path % mirbase_version, 'r'):
organism, target_mir, alias_mir, priority = line.rstrip().split('\t')
mir_id_to_tax_id[target_mir] = organism
return mir_id_to_tax_id
################################################################################
# ensp8_to_ensg = stringrnautils.get_string_to_alias_mapper(9606, 'ENSP', 'ENSG', 8)['9606']
def get_string_to_alias_mapper(organisms="9606", filter_string_id='ENSP',
filter_string_alias='ENSG', version=10, db='all', include_basename=True):
"""parses the string alias files, and generates a mapper of mapper[organism][string_id] = string_alias
- note that all keys in the mapper are strings
if organisms = 'all', every organism is added to the mapper,
if organisms = list, tuple or set only thouse will be avalible in the mapper
if organisms = a integer only that organism will be avalible in the mapper
db is an optional argument if you want to filter based based on the last last column of the string alias file
- default behavious is to ignore this coloumn (by setting it to all)
to_id and from_id are supstrings that have to be found in the corresponding collums to be put in the mapper"""
mapping_file = get_string_mapping_file(organisms, filter_string_id,
filter_string_alias, version, db, include_basename)
mapper = collections.defaultdict(dict)
for line in open(mapping_file):
organism, string_alias, string_id = line.rstrip().split('\t')
mapper[organism][string_id] = string_alias
return dict(mapper)
def get_alias_to_string_mapper(organisms="9606", filter_string_id='ENSP',
filter_string_alias='ENSG', version=10, db='all', include_basename=True):
"""parses the string alias files, and generates a mapper of mapper[organism][string_alias] = string_id
- note that all keys in the mapper are strings
if organisms = 'all', every organism is added to the mapper,
if organisms = list, tuple or set only thouse will be avalible in the mapper
if organisms = a integer only that organism will be avalible in the mapper
db is an optional argument if you want to filter based based on the last last column of the string alias file
- default behavious is to ignore this coloumn (by setting it to all)
to_id and from_id are supstrings that have to be found in the corresponding collums to be put in the mapper"""
mapping_file = get_string_mapping_file(organisms, filter_string_id,
filter_string_alias, version, db, include_basename)
mapper = collections.defaultdict(dict)
for line in open(mapping_file):
organism, string_alias, string_id = line.rstrip().split('\t')
mapper[organism][string_alias] = string_id
return dict(mapper)
#########################################################################
### RNA mapping functions
########################################################################
def get_non_coding_rna_alias_mapper():
"""
Generates a dictionary mapping ncRNA aliases in different organisms to the corresponding RAIN ncRNA identifier,
:return: a dictionary (str -> str -> str): taxonomy ID -> RNA alias -> RNA identifier
"""
ncrna_file = os.path.join(ID_PATH, "ncRNAaliasfile.tsv.gz")
handle = gzip.open(ncrna_file) if ncrna_file.endswith(".gz") else open(ncrna_file)
tmp_list = [x.strip("\n").split("\t") for x in handle]
handle.close()
ncrna_mapper = collections.defaultdict(dict)
for tax, identifier, alias, source in tmp_list:
ncrna_mapper[tax][alias] = identifier
return ncrna_mapper
def get_non_coding_rna_alias_mapper_including_source():
"""
Generates a dictionary mapping ncRNA aliases in different organisms to the corresponding RAIN ncRNA identifier.
This version also includes the source of the alias mapping in the output.
:return: a dictionary (str -> str -> str -> str): taxonomy ID -> source database -> RNA alias -> RNA identifier
"""
ncrna_file = os.path.join(ID_PATH, "ncRNAaliasfile.tsv.gz")
handle = gzip.open(ncrna_file) if ncrna_file.endswith(".gz") else open(ncrna_file)
tmp_list = [x.strip("\n").split("\t") for x in handle]
handle.close()
ncrna_mapper = collections.defaultdict(lambda: collections.defaultdict(lambda: dict()))
for tax, identifier, alias, source in tmp_list:
ncrna_mapper[tax][source][alias] = identifier
return ncrna_mapper
def get_rna_identifiers_in_organism(rna_aliases_file):
"""
:param rna_aliases_file: RNA alias file as created by script create_rna_aliases_file.py
:return: a dictionary: taxonomy ID -> RNA identifiers
"""
mapper = collections.defaultdict(set)
with gzip.open(rna_aliases_file, 'rb') as rna_file:
# skip header
next(rna_file)
for line in rna_file:
tax, rna_id, rna_alias, sources_string = line.rstrip('\n\r').split('\t')
mapper[tax].add(rna_id)
return mapper
################################################################################
# NPINTER mappers
################################################################################
######### UniProt to STRINGidentifier
######### RefSeq(NM) to STRING identifier
#RefSeq (NM_ mRNA) to ENSP
############ EnsemblID to GeneName
############# RefSeq (NR_ ncRNA) to GeneName (HGNC,FlyBase,?) (ENSEMBL conversion in between)
################ NONCODE 2 GeneName (NCv3->NCv4->(ENST or RefSeqncRNA(NR_))->ENSG->Genename)
################################################################################
# benchmark functions
################################################################################
def discrete_benchmark(organism_ids, rna_ids, protein_ids, assigned_bins, gold_standard_file,
out_file_name='test', filtered_assigned_bins=None):
"""
Computes confidence for a set of interactions where each interactions is assigned to one or several bins.
The confidence of each bin is the precision with respect to the gold standard but restricted to RNAs and proteins
that also occur in the given gold standard set of interactions.
Finally, the confidence of an interaction is the maximum confidence of all bins it is assigned to.
:param organism_ids: collection of strings - taxonomy identifiers of the organism where the interaction was observed
:param rna_ids: collection of strings - identifiers of the interacting RNAs
:param protein_ids: collection of strings - identifiers of the interacting proteins
:param assigned_bins: collection of collections of strings - the bins each interaction is assigned to
:param gold_standard_file: string - name of the gold standard file to be used for scoring
:param out_file_name: name of the output file, a diagnostic output is written to
:param filtered_assigned_bins: the bins used for scoring, defaults to assigned_bins if unset
:return: list of float - the confidence of each interaction or nan if no confidence could be computed
"""
if filtered_assigned_bins is None:
filtered_assigned_bins = assigned_bins
# Maps each pair of interacting RNAs and proteins to a list of bins assigned to this interaction
interactions_to_bins = {}
# Maps each bin to the number of occurrences in this data set
bin_to_occurrences = collections.defaultdict(int)
for org, rna, protein, bins in zip(organism_ids, rna_ids, protein_ids, filtered_assigned_bins):
# for org, rna, protein, bins in zip(organism_ids, rna_ids, protein_ids, assigned_bins):
# Make sure that all assigned bins are a list, tuple or set
if not isinstance(bins, (list, tuple, set)):
bins_collection = [bins]
else:
bins_collection = bins
for b in bins_collection:
bin_to_occurrences[b] += 1
if org not in interactions_to_bins:
interactions_to_bins[org] = {(rna, protein): bins_collection}
else:
interactions_to_bins[org][rna, protein] = bins_collection
# Returns a dict of interactions as described above for the gold standard and two additional dicts for the RNA and
# protein identifiers that simply map the identifiers to themselves
gold_interactions, gold_rnas, gold_proteins = parse_marster_file(gold_standard_file)
common_rnas = get_common(gold_rnas, rna_ids)
logger.info("The number of common ncRNAs is: " + str(len(common_rnas)))
common_proteins = get_common(gold_proteins, protein_ids)
logger.info("The number of common proteins is: " + str(len(common_proteins)))
logger.info("Started benchmarking the data set")
positive = 1
negative = 0
# Vector of two-element tuples of the form (this_bin, 0 or 1) where the second element is 1 if a TP interaction was found
# for the respective this_bin and 0 if a FP interaction was seen
vector = []
positives = 0
negatives = 0
for org in interactions_to_bins.keys():
for rna, protein in interactions_to_bins[org].keys():
bins = interactions_to_bins[org][(rna, protein)]
if (rna in common_rnas) and (protein in common_proteins):
for curr_bin in bins:
if curr_bin == "0":
continue
if (rna, protein) in gold_interactions[org]:
vector.append((curr_bin, positive))
positives += 1
else:
vector.append((curr_bin, negative))
negatives += 1
vector.sort(key= lambda x: x[0])
# Map each bin to the number of TP and the number of (TP+FP)
bin_to_tps = collections.defaultdict(int)
bin_to_total = collections.defaultdict(int)
for bin_name, pos_or_neg in vector:
bin_to_tps[bin_name] += pos_or_neg
bin_to_total[bin_name] += 1
bin_to_confidence = {}
for bin_name, tps in sorted(bin_to_tps.items()):
tps *= SCALING_FACTOR
total = bin_to_total[bin_name]
bin_to_confidence[bin_name] = min(tps / float(total), 0.9) # Highest possible confidence is 0.9
logger.info('bin {0}: {1} / {2} = {3}'.format(bin_name, tps, total, bin_to_confidence[bin_name]))
interaction_confidences = []
for bins in assigned_bins: # we use assigned_bins to add low throughput back when scoring the interactions
if not isinstance(bins, (list, tuple, set)):
bins_collection = [bins]
else:
bins_collection = bins
max_conf = float('-inf')
for curr_bin in bins_collection:
if curr_bin in bin_to_confidence:
curr_conf = bin_to_confidence[curr_bin]
if curr_conf > max_conf:
max_conf = curr_conf
if max_conf == float('-inf'):
max_conf = float('nan')
interaction_confidences.append(max_conf)
# Print confidences to file and stderr for diagnosis
out_file_name_full = os.path.join(FIG_PATH, out_file_name + '.txt')
with open(out_file_name_full, 'w') as f_out:
f_out.write('\t'.join(("Assay", "Occurrences", "TP", "TP+FP", "Precision")) + "\n")
for this_bin in sorted(bin_to_occurrences.keys()):
bin_occurrences = bin_to_occurrences[this_bin]
if this_bin in bin_to_confidence:
tps = bin_to_tps[this_bin]
tot = bin_to_total[this_bin]
conf = bin_to_confidence[this_bin]
else:
tps = 0
tot = 0
conf = float('nan')
f_out.write("\t".join((this_bin, str(bin_occurrences), str(tps), str(tot), str(conf)))+ "\n")
logger.info("Finished benchmarking the data set.")
return interaction_confidences
def benchmark(organisms, rna_ids, protein_ids, scores, gold_standard_file,
increases=True, window_size=100, fit_name='test', discrete=False, max_value=0.9,
ignore_fraction=0.0, filtered_scores=None):
"""needs 4 args: organism, rna_ids and protein_ids, scores are vectors
optional args: debugging True (plots the fits twice - first initial guess, then final fit)
windows_size used to estimate the overlap between scores and golden standard
increases = True if 'higher scores' are better, otherwise false
goldstandard_file, file in master format to benchmark against, default=croft
filtered_scores are the real scores, often the same as scores, but different in the case of miRTarBase/NPInter as
we are removing low throughout during the benchmarking step, but reintroducing them when we are scoring
- returns list of 'confidence scores' """
if discrete:
return discrete_benchmark(organisms, rna_ids, protein_ids, scores,
gold_standard_file,
out_file_name=fit_name,
filtered_assigned_bins=filtered_scores)
# Continuous scoring
original_scores = scores
data = {}
data_rnas, data_prots = rna_ids, protein_ids
for org, rna_id, prot_id, score in zip(organisms, rna_ids, protein_ids, scores):
if org not in data:
data[org] = {(rna_id, prot_id): score}
else:
data[org][rna_id, prot_id] = score
gold_data, gold_rnas, gold_prots = parse_marster_file(gold_standard_file)
common_rnas = get_common(gold_rnas, data_rnas)
logger.info("The number of common ncRNAs is: " + str(len(common_rnas)))
common_prots = get_common(gold_prots, data_prots)
logger.info("The number of common Proteins is: " + str(len(common_prots)))
logger.info("Benchmarking the dataset.")
positive = 1
negative = 0
vector = []
positives = 0
negatives = 0
for organism in data.keys():
for rna, prot in data[organism].keys():
if (rna in common_rnas) and (prot in common_prots):
score = data[organism][(rna,prot)]
if gold_data[organism].has_key((rna, prot)):
vector.append((score, positive))
positives += 1
else:
vector.append((score, negative))
negatives += 1
vector.sort(key= lambda x: x[0])
scores, vector = zip(*vector)
scores = moving_avg(scores, window_size)
vector = moving_avg(vector, window_size)
logger.info("Total positives: " + str(positives) + "," + " Total negatives: " + str(negatives))
f = fit_to_sigmoid(np.array(scores), np.array(vector) * SCALING_FACTOR,
increases=increases, fit_name=fit_name, max_value=max_value,
window_size=window_size, ignore_fraction=ignore_fraction)
return map(f, np.array(original_scores))
########################################################
# combine master files
########################################################
# defcombine_masterfiles(prediction_files = PREDICTION_FILES, out_file=COMBINED_PREDICTION_FILE):
########################################################
# Misc
########################################################
def get_string_10_species():
"""
:return: a list of strings where each string is the taxonomy identifier of a species in STRING 10
"""
tax_list = list(species_name_to_taxonomy_id().values())
tax_list.sort()
return tax_list
def starbase_exp_pmids():
"""
:return: set of ints - the PubMed IDs of publication whose experimental data sets have been integrated in StarBase
"""
pmids = set()
with open(STARBASE_PMIDs, 'r') as f:
for line in f:
if line.startswith("#"):
continue
line.rstrip()
pmids.add(int(line))
return pmids
def species_name_to_taxonomy_id():
"""
:return: dictionary, string -> string, that maps species names to their taxonomy identifiers. Based on STRING
species file.
"""
taxonomy_id_to_species_name = {}
string_species_file = os.path.join(DATA_PATH, STRING_SPECIES_100.split('/')[-1])
with open(string_species_file, 'r') as species_handle:
next(species_handle)
for line in species_handle:
taxonomy_id, string_type, string_name, ncbi_name = line.strip().split('\t')
taxonomy_id_to_species_name[ncbi_name.lower()] = taxonomy_id
return taxonomy_id_to_species_name
def qq_correct( input_dict, ofig="None", ref_tax = "9606" ):
"""
qq_correct: Quantile normalization of orgnisms to the human
Arguments Type Description
-----------------------------------
input_dict Dictionary Two layer dictionary of type input_dict[ tax_id ][ interaction_key ]
ofig str Output file name
ref_tax str Taxonomy id of reference organism, default 9606
"""
#----------------
# Load packages
#----------------
from scipy.interpolate import interp1d
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#----------------
# Verify arguments
#----------------
if not isinstance(ref_tax, str):
raise TypeError("func qq_correct, argument human_tax must be a string")
if not isinstance(ofig, str):
raise TypeError("func qq_correct, argument ofig must be a string")
if not isinstance(input_dict, dict):
raise TypeError("func qq_correct, argument input_dict must be a dictionary")
if not ref_tax in input_dict.keys():
raise ValueError( "Dictionary doesn't hold the ref taxonomy %s"%ref_tax )
#----------------
# Define human quantiles and quantile mapping function
#----------------
ref_scores = np.sort( np.array( input_dict[ref_tax].values() ) )
ref_scores_min = np.min(ref_scores)
ref_scores_max = np.max(ref_scores)
ref_rank_scores = stats.rankdata(ref_scores, "average")/len(ref_scores)
ref_rank_scores_min = np.min(ref_rank_scores)
ref_rank_scores_max = np.max(ref_rank_scores)
qq_func = interp1d( ref_rank_scores, ref_scores, kind='linear')
#----------------
# perform quantile normalization
#----------------
pdf = None
if not ofig=="None":
pdf = PdfPages( os.path.join(FIG_PATH, ofig ) )
for taxonomy in [ tax for tax in input_dict.keys() if not tax==ref_tax ]:
keys, scores = zip(*input_dict[taxonomy].items())
scores = np.array(scores)
rank_scores = stats.rankdata(scores, "average")/len(scores)
rank_scores_min = np.min(rank_scores)
rank_scores_max = np.max(rank_scores)
rank_scores = (rank_scores - rank_scores_min) * (ref_rank_scores_max - ref_rank_scores_min) / (rank_scores_max - rank_scores_min ) + ref_rank_scores_min
new_scores = qq_func( rank_scores )
new_scores[ rank_scores==ref_rank_scores_min ] = ref_scores_min # boundary issue
new_scores[ rank_scores==ref_rank_scores_max ] = ref_scores_max # boundary issue
input_dict[ taxonomy ] = dict([ (key,score) for key,score in zip(keys,new_scores) ])
overall_min = np.min( (np.min(ref_scores), np.min(scores), np.min(new_scores)) )
overall_max = np.max( (np.max(ref_scores), np.max(scores), np.max(new_scores)) )
if not ofig=="None":
#----------------
# Generate histograms
#----------------
f, axarr = plt.subplots(2, sharex=True)
axarr[0].hist( ref_scores, color="red",alpha=0.4,normed=True,label="Taxonomy:%s"%ref_tax,log=True,bins=100)
axarr[0].hist( scores, color="blue",alpha=0.4,normed=True,label="Taxonomy:%s"%taxonomy,log=True,bins=100)
axarr[0].set_xlim( (overall_min,overall_max) )
axarr[0].set_title('No Normalization')
axarr[0].set_ylabel("Density")
axarr[0].legend(loc='best',frameon=False)
#
axarr[1].hist( ref_scores, color="red",alpha=0.4,normed=True,label="Taxonomy:%s"%ref_tax,log=True,bins=100)
axarr[1].hist( new_scores, color="blue",alpha=0.4,normed=True,label="Taxonomy:%s"%taxonomy,log=True,bins=100)
axarr[1].set_title('Quantile Normalization')
axarr[1].set_xlabel("Confidence Score")
axarr[1].set_ylabel("Density")
axarr[1].legend(loc='best',frameon=False)
pdf.savefig(f)
plt.close()
if not ofig=="None":
pdf.close()
return input_dict
if __name__ == '__main__':
version = 20
path = MIR_MAPPING_ALIASES_PATH
make_mir_mapping_files(path, version)
| [
2,
3487,
9195,
3166,
198,
11748,
28686,
198,
11748,
308,
13344,
198,
11748,
302,
198,
11748,
17268,
198,
11748,
19974,
7753,
198,
11748,
10088,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
418,
13,
6978... | 2.596798 | 10,305 |
# By Mostapha Sadeghipour Roudsari
# Sadeghipour@gmail.com
# Ladybug started by Mostapha Sadeghipour Roudsari is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
Export Honeybee Objects to OpenStudio
-
Provided by Honeybee 0.0.53
Args:
openStudioLibFolder:
Returns:
readMe!: ...
"""
# check for libraries
# default is C:\\Ladybug\\OpenStudio
import os
import System
import scriptcontext as sc
import Rhino as rc
import Grasshopper.Kernel as gh
import time
import pprint
if sc.sticky.has_key('honeybee_release'):
openStudioLibFolder = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "OpenStudio")
if os.path.isdir(openStudioLibFolder) and os.path.isfile(os.path.join(openStudioLibFolder, "openStudio.dll")):
# openstudio is there
# I need to add a function to check the version and compare with available version
openStudioIsReady = True
import clr
clr.AddReferenceToFileAndPath(openStudioLibFolder+"\\openStudio.dll")
import sys
if openStudioLibFolder not in sys.path:
sys.path.append(openStudioLibFolder)
import OpenStudio as ops
else:
openStudioIsReady = False
# let the user know that they need to download OpenStudio libraries
msg = "Cannot find OpenStudio libraries. You can download the libraries from the link below. " + \
"Unzip the file and copy it to " + openStudioLibFolder + " and try again. Click on the link to copy the address."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
link = "https://app.box.com/s/y2sx16k98g1lfd3r47zi"
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, link)
#buttons = System.Windows.Forms.MessageBoxButtons.OK
#icon = System.Windows.Forms.MessageBoxIcon.Warning
#up = rc.UI.Dialogs.ShowMessageBox(msg + "\n" + link, "Duplicate Material Name", buttons, icon)
if openStudioIsReady and sc.sticky.has_key('honeybee_release') and \
sc.sticky.has_key("isNewerOSAvailable") and sc.sticky["isNewerOSAvailable"]:
# check if there is an update available
msg = "There is a newer version of OpenStudio libraries available to download! " + \
"We strongly recommend you to download the newer version from this link and replace it with current files at " + \
openStudioLibFolder +".\n" + \
"https://app.box.com/s/y2sx16k98g1lfd3r47zi"
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
else:
openStudioIsReady = False
ghenv.Component.Name = "Honeybee_Get Annual SQL Data"
ghenv.Component.NickName = 'getAnnualSQLData'
ghenv.Component.Message = 'VER 0.0.55\nNOV_2_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "12 | WIP"
ghenv.Component.AdditionalHelpFromDocStrings = "2"
#ops.
#sqlPath = r'C:\Users\Administrator\Downloads\eplusout.sql'
pp = pprint.PrettyPrinter(indent=4)
sqlPath = _sqlFilePath
sqlFile = ops.SqlFile(ops.Path(sqlPath))
conversionFactorElec = float(str(ops.OptionalDouble(277.777778)))
outputs={}
electricity={
'units':'kilowatt-hours',
'Electricity Interior Lights':float(str(sqlFile.electricityInteriorLighting())) * conversionFactorElec,
'Electricity Exterior Lights':float(str(sqlFile.electricityExteriorLighting())) * conversionFactorElec,
'Electricity Interior Equipment':float(str(sqlFile.electricityInteriorEquipment())) * conversionFactorElec,
'Electricity Exterior Equipment':float(str(sqlFile.electricityExteriorEquipment())) * conversionFactorElec,
'Electricity Heating':float(str(sqlFile.electricityHeating())) * conversionFactorElec,
'Electricity Water Systems':float(str(sqlFile.electricityWaterSystems())) * conversionFactorElec,
'Electricity Cooling':float(str(sqlFile.electricityCooling())) * conversionFactorElec,
'Electricity Humidification':float(str(sqlFile.electricityHumidification())) * conversionFactorElec,
'Electricity Heat Recovery':float(str(sqlFile.electricityHeatRecovery())) * conversionFactorElec,
'Electricity Heat Rejection':float(str(sqlFile.electricityHeatRejection())) * conversionFactorElec,
'Electricity Refrigeration':float(str(sqlFile.electricityRefrigeration())) * conversionFactorElec,
'Electricity Generators':float(str(sqlFile.electricityGenerators())) * conversionFactorElec,
'Fan Electricity':float(str(sqlFile.electricityFans())) * conversionFactorElec,
'Pumps Electricity':float(str(sqlFile.electricityPumps())) * conversionFactorElec,
'Electricity Total End Uses':float(str(sqlFile.electricityTotalEndUses())) * conversionFactorElec
}
conversionFactorNG = 9.480434
naturalgas={
'units':'therms',
'Natural Gas Heating':float(str(sqlFile.naturalGasHeating())) * conversionFactorNG,
'Natural Gas Cooling':float(str(sqlFile.naturalGasCooling())) * conversionFactorNG,
'Natural Gas Interior Lighting':float(str(sqlFile.naturalGasInteriorLighting())) * conversionFactorNG,
'Natural Gas Exterior Lighting':float(str(sqlFile.naturalGasExteriorLighting())) * conversionFactorNG,
'Natural Gas Interior Equipment':float(str(sqlFile.naturalGasInteriorEquipment())) * conversionFactorNG,
'Natural Gas Exterior Equipment':float(str(sqlFile.naturalGasExteriorEquipment())) * conversionFactorNG,
'Natural Gas Fans':float(str(sqlFile.naturalGasFans())) * conversionFactorNG,
'Natural Gas Pumps':float(str(sqlFile.naturalGasPumps())) * conversionFactorNG,
'Natural Gas Heat Rejection':float(str(sqlFile.naturalGasHeatRejection())) * conversionFactorNG,
'Natural Gas Humidification':float(str(sqlFile.naturalGasHumidification())) * conversionFactorNG,
'Natural Gas Heat Recovery':float(str(sqlFile.naturalGasHeatRecovery())) * conversionFactorNG,
'Natural Gas Water Systems':float(str(sqlFile.naturalGasWaterSystems())) * conversionFactorNG,
'Natural Gas Refrigeration':float(str(sqlFile.naturalGasRefrigeration())) * conversionFactorNG,
'Natural Gas Generators':float(str(sqlFile.naturalGasGenerators())) * conversionFactorNG,
'Natural Gas Total End Uses':float(str(sqlFile.naturalGasTotalEndUses())) * conversionFactorNG,
}
outputs['elec']=electricity
outputs['ng']=naturalgas
pp.pprint(outputs)
print "GJ to kWy Conversion Factor: " + str(conversionFactorElec)
print "GH to therms Conversion Factor: " + str(conversionFactorNG)
retval = checkVals(outputs['elec'],'Electricity Total End Uses')
print "Electricty test: " + retval
retval = checkVals(outputs['ng'],'Natural Gas Total End Uses')
print "Natural gas test: " + retval
allAnnualTotals = dictToClass(outputs)
annualElectricity = dictToClass(outputs['elec'])
annualNaturalgas = dictToClass(outputs['ng']) | [
2,
2750,
4042,
499,
3099,
311,
671,
456,
541,
454,
371,
2778,
82,
2743,
198,
2,
311,
671,
456,
541,
454,
31,
14816,
13,
785,
198,
2,
11182,
25456,
2067,
416,
4042,
499,
3099,
311,
671,
456,
541,
454,
371,
2778,
82,
2743,
318,
11... | 2.925302 | 2,316 |
import petsc4py, sys
# I like to be in control of the command line
petsc4py.init(sys.argv)
from dolfin import *
from petsc4py import PETSc
parameters['form_compiler']['representation'] = 'uflacs'
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['cpp_optimize_flags'] = '-O3 -ffast-math -march=native'
parameters['ghost_mode'] = 'shared_facet'
opts = PETSc.Options()
mesh_file = '2Dtest/cell_grid_2d.h5'
#mesh_file = '500Kdofs/cell_grid.h5'
#mesh_file = '8Mdofs/cell_grid.h5'
#mesh_file = 'cell_grid_2d.h5'
comm = MPI.comm_world
h5 = HDF5File(comm, mesh_file, 'r')
mesh = Mesh()
h5.read(mesh, 'mesh', False)
# The mesh comes in micro meters. Below it is more convenient to work in cm
mesh.coordinates()[:] *= 1E-4
# Facets in the mesh have tags 0, 1, 2. One is for interfaces between
# cells and cells and the exterior. Two is used for marking boundary facets
# of the domain - this is where typically zero DirichletBCs are applied
# for the potential
surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1)
h5.read(surfaces, 'facet')
# The domain is split into 2 subdomains marked as 1 and 2 (cell interior,
# cell exterior). These differ by conductivities
volumes = MeshFunction('size_t', mesh, mesh.topology().dim())
h5.read(volumes, 'physical')
cell = mesh.ufl_cell()
# We have 3 spaces S for sigma = -kappa*grad(u) [~electric field]
# U for potential u
# Q for transmebrane potential p
Sel = FiniteElement('RT', cell, 1)
Vel = FiniteElement('DG', cell, 0)
Qel = FiniteElement('Discontinuous Lagrange Trace', cell, 0)
W = FunctionSpace(mesh, MixedElement([Sel, Vel, Qel]))
sigma, u, p = TrialFunctions(W)
tau, v, q = TestFunctions(W)
# Grounding for potential
bcs = [DirichletBC(W.sub(2), Constant(0), surfaces, 2)]
#file = File("Volumes.pvd")
#file << volumes
#file = File("Surfaces.pvd")
#file << surfaces
# Make measures aware of subdomains
dx = Measure('dx', domain=mesh, subdomain_data=volumes)
dS = Measure('dS', domain=mesh, subdomain_data=surfaces)
ds = Measure('ds', domain=mesh, subdomain_data=surfaces)
# Normal fo the INTERIOR surface. Note that 1, 2 marking of volume makes
# 2 cells the '+' cells w.r.t to surface and n('+') would therefore be their
# outer normal (that is an outer normal of the outside). ('-') makes the orientation
# right
n = FacetNormal(mesh)('-')
# Now onto the weak form
# Electric properties of membrane and interior/exterior
C_m = Constant(1) # 1 mu F / cm^2
cond_int = Constant(5) # 5 mS / cm
cond_ext = Constant(20) # 20 mS / cm
# Time step
dt_fem = Constant(1E-3) # ms
# The source term as a function Q is coming from ODE solver. Here it is
# just some random function
Q = FunctionSpace(mesh, Qel)
p0 = interpolate(Constant(1), Q)
# And additional source on the boundary is the ionic current. For simplicity
I_ion = p0
# The system
a = ((1/cond_int)*inner(sigma, tau)*dx(1)+(1/cond_ext)*inner(sigma, tau)*dx(2)
- inner(div(tau), u)*dx(1) - inner(div(tau), u)*dx(2)
+ inner(p('+'), dot(tau('+'), n))*dS(1)
- inner(div(sigma), v)*dx(1) - inner(div(sigma), v)*dx(2)
+ inner(q('+'), dot(sigma('+'), n))*dS(1)
- (C_m/dt_fem)*inner(q('+'), p('+'))*dS(1))
L = inner(q('+'), I_ion('+')-(C_m/dt_fem)*p0('+'))*dS(1)
# Additional terms to set to zero the dofs of W.sub(2) which are not on
# the interfaces
a -= inner(p('+'), q('+'))*dS(0) + inner(p, q)*ds(2)
L -= inner(Constant(0)('+'), q('+'))*dS(0) + inner(Constant(0), q)*ds(2)
# In the presence of internal facet terms, the local to global maps have ghost cells (through shared facets)
# However, only one process insert values there -> we need to prune empty local rows/columns
# from the other processes. This can be done programmatically from PETSc, but it is currently
# in a development branch
opts.setValue("-test_matis_fixempty", None)
# Assemble system: matrix in unassembled (MATIS) format
A, b = PETScMatrix(), PETScVector()
as_backend_type(A).mat().setOptionsPrefix("test_")
as_backend_type(A).mat().setType("is")
as_backend_type(A).mat().setFromOptions()
assemble_system(a, L, bcs, A_tensor=A, b_tensor=b)
## test unassembled format
#Aij, bij = PETScMatrix(), PETScVector()
#assemble_system(a, L, bcs, A_tensor=Aij, b_tensor=bij)
#A1 = PETSc.Mat()
#PETSc.Sys.Print("CONVERTING")
#as_backend_type(A).mat().convert('aij',A1)
#A1.axpy(-1,as_backend_type(Aij).mat())
#PETSc.Sys.Print('Norm assembly: %f' % A1.norm(PETSc.NormType.INFINITY))
#del A1, Aij
## test reassembly
#A1 = as_backend_type(A).mat().duplicate(copy=True)
#as_backend_type(A).mat().zeroEntries()
#assemble_system(a, L, bcs, A_tensor=A, b_tensor=b)
#
#A1.axpy(-1,as_backend_type(A).mat())
#PETSc.Sys.Print('Norm reassembly: %f' % A1.norm(PETSc.NormType.INFINITY))
#A1.viewFromOptions("-diff_view")
as_backend_type(A).mat().viewFromOptions("-my_view")
# Create PETSc Krylov solver (from petsc4py)
ksp = PETSc.KSP()
ksp.create(PETSc.COMM_WORLD)
ksp.setOptionsPrefix("test_")
# Set the Krylov solver type and set tolerances
# We can use CG since DRT dofs will never be part of the interface between subdomains
# This is because DRT dofs are only coupled to RT dofs and not cell dofs
# Note: If DRT dofs were coupled to DG dofs, we could have put all the DRT dofs (see setBDDCPrimalVerticesIS) that lie on the
# interface between subdomains in the primal space
# The coarse matrix will be diagonal on those DRT dofs (most of them) that are not on
# the intra/extra cellular interface
ksp.setType("cg")
ksp.setTolerances(rtol=1.0e-8, atol=1.0e-12, divtol=1.0e10, max_it=300)
ksp.setOperators(as_backend_type(A).mat())
pc = ksp.getPC()
pc.setType("bddc")
#pc.setBDDCPrimalVerticesIS(PETSc.IS().createGeneral(W.sub(2).dofmap().dofs(),PETSc.COMM_WORLD))
# Options
opts.setValue("-test_ksp_view", None)
opts.setValue("-test_ksp_converged_reason", None)
opts.setValue("-test_ksp_monitor_singular_value", None)
opts.setValue("-test_ksp_norm_type", "natural")
# Don't turn these off
opts.setValue("-test_pc_bddc_detect_disconnected", None)
opts.setValue("-test_pc_bddc_use_faces", None)
opts.setValue("-test_pc_bddc_benign_trick", None)
opts.setValue("-test_pc_bddc_nonetflux", None)
opts.setValue("-test_pc_bddc_schur_exact", None)
opts.setValue("-test_pc_bddc_use_deluxe_scaling", None)
opts.setValue("-test_pc_bddc_deluxe_zerorows", None)
opts.setValue("-test_pc_bddc_use_local_mat_graph", "0")
opts.setValue("-test_pc_bddc_adaptive_userdefined", None)
# Better off you have MUMPS or SUITESPARSE for the factorizations
# Sometimes MUMPS fails with error -9 (increase Schur workspace....this is annoying)
opts.setValue("-test_sub_schurs_mat_mumps_icntl_14",500)
opts.setValue("-mat_mumps_icntl_14",500)
# Local solvers (MUMPS)
opts.setValue("-test_pc_bddc_dirichlet_pc_type","cholesky") # This is actually LDL^T
opts.setValue("-test_pc_bddc_neumann_pc_type","cholesky") # This is actually LDL^T
opts.setValue("-test_pc_bddc_dirichlet_pc_factor_mat_solver_type","mumps")
opts.setValue("-test_pc_bddc_neumann_pc_factor_mat_solver_type","mumps")
# Alternative local factorizations with SUITESPARSE
#opts.setValue("-test_pc_bddc_dirichlet_pc_type","lu")
#opts.setValue("-test_pc_bddc_neumann_pc_type","lu")
#opts.setValue("-test_pc_bddc_dirichlet_pc_factor_mat_solver_type","umfpack")
#opts.setValue("-test_pc_bddc_neumann_pc_factor_mat_solver_type","umfpack")
# Alternative redundant coarse factorization with SUITESPARSE
#opts.setValue("-test_pc_bddc_coarse_pc_type","redundant")
#opts.setValue("-test_pc_bddc_coarse_redundant_pc_factor_mat_solver_type","umfpack")
# Number of additional levels : 0 means standard 2-level BDDC
nlevels = 1
# Coarse solver (MUMPS or BDDC)
opts.setValue("-test_pc_bddc_coarse_pc_factor_mat_solver_type","mumps")
if nlevels < 1:
opts.setValue("-test_pc_bddc_coarse_pc_type","cholesky") # This is actually LDL^T
opts.setValue("-test_pc_bddc_levels",nlevels)
opts.setValue("-test_pc_bddc_coarse_sub_schurs_mat_mumps_icntl_14",500)
opts.setValue("-test_pc_bddc_coarse_pc_bddc_use_deluxe_scaling",None)
opts.setValue("-test_pc_bddc_coarse_pc_bddc_deluxe_zerorows",None)
opts.setValue("-test_pc_bddc_coarse_pc_bddc_schur_exact",None)
opts.setValue("-test_pc_bddc_coarse_pc_bddc_use_local_mat_graph",None)
opts.setValue("-test_pc_bddc_coarse_check_ksp_monitor",None)
opts.setValue("-test_pc_bddc_coarse_check_ksp_converged_reason",None)
opts.setValue("-test_pc_bddc_coarse_check_ksp_type","cg")
opts.setValue("-test_pc_bddc_coarse_check_ksp_norm_type","natural")
#opts.setValue("test_pc_bddc_coarse_ksp_type","chebyshev")
#opts.setValue("test_pc_bddc_use_coarse_estimates",None)
#opts.setValue("test_pc_bddc_coarse_pc_bddc_use_coarse_estimates",None)
for j in range(0, nlevels):
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_sub_schurs_mat_mumps_icntl_14",500)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_pc_bddc_use_deluxe_scaling",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_pc_bddc_deluxe_zerorows",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_pc_bddc_schur_exact",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_pc_bddc_use_local_mat_graph",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_type","cg")
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_norm_type","natural")
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_ksp_type","chebyshev")
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_monitor",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_converged_reason",None)
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_type","cg")
opts.setValue("-test_pc_bddc_coarse_l" + str(j) + "_check_ksp_norm_type","natural")
#opts.setValue("test_pc_bddc_coarse_l" + str(j) + "_ksp_type","chebyshev")
#opts.setValue("test_pc_bddc_coarse_l" + str(j) + "_pc_bddc_use_coarse_estimates",None);
# prevent to have a bad coarse solver
opts.setValue("-test_pc_bddc_coarse_l3_redundant_pc_factor_mat_solver_package","mumps");
opts.setValue("-test_pc_bddc_coarse_l2_redundant_pc_factor_mat_solver_package","mumps");
opts.setValue("-test_pc_bddc_coarse_l1_redundant_pc_factor_mat_solver_package","mumps");
opts.setValue("-test_pc_bddc_coarse_redundant_pc_factor_mat_solver_package","mumps")
#Solve
ksp.setFromOptions()
sol = Function(W)
ksp.solve(as_backend_type(b).vec(), as_backend_type(sol.vector()).vec())
# prevent from deadlocks when garbage collecting
del pc, ksp
#as_backend_type(sol.vector()).update_ghost_values()
#(sols, solv, solq) = sol.split()
#file = File("electric.pvd")
#file << sols
#file = File("potential.pvd")
#file << solv
##file = File("tpotential.pvd")
##file << solq
| [
11748,
17252,
66,
19,
9078,
11,
25064,
198,
198,
2,
314,
588,
284,
307,
287,
1630,
286,
262,
3141,
1627,
198,
79,
1039,
66,
19,
9078,
13,
15003,
7,
17597,
13,
853,
85,
8,
198,
198,
6738,
288,
4024,
259,
1330,
1635,
198,
6738,
17... | 2.394489 | 4,464 |
from unittest import TestCase
from employees import Employee, Job, sort_employees_by_salary
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
4409,
1330,
36824,
11,
15768,
11,
3297,
62,
7033,
2841,
62,
1525,
62,
21680,
560,
628,
198
] | 3.518519 | 27 |
import numpy as np
import matplotlib.pyplot as plt
# remove cv2 before you submit to autograder
import cv2
import os
from PIL import Image
def img_tile(imgs, path, filename, save, aspect_ratio=1.0, border=1, border_color=0):
"""
Visualize the WGAN result for each step
:param imgs: Numpy array of the generated images
:param path: Path to save visualized results for each epoch
:param epoch: Epoch index
:param save: Boolean value to determine whether you want to save the result or not
"""
if imgs.ndim != 3 and imgs.ndim != 4:
raise ValueError('imgs has wrong number of dimensions.')
n_imgs = imgs.shape[0]
tile_shape = None
# Grid shape
img_shape = np.array(imgs.shape[1:3])
if tile_shape is None:
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
else:
assert len(tile_shape) == 2
grid_shape = np.array(tile_shape)
# Tile image shape
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
# Assemble tile image
tile_img = np.empty(tile_img_shape)
tile_img[:] = border_color
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
img_idx = j + i * grid_shape[1]
if img_idx >= n_imgs:
# No more images - stop filling out the grid.
break
# -1~1 to 0~1
img = (imgs[img_idx] + 1) / 2.0 # * 255.0
yoff = (img_shape[0] + border) * i
xoff = (img_shape[1] + border) * j
tile_img[yoff:yoff + img_shape[0], xoff:xoff + img_shape[1], ...] = img
# path_name = path + "/epoch_%03d" % (epoch) + ".jpg"
path_name = path + "/" + filename + ".jpg"
##########################################
# Change code below if you want to save results using PIL
##########################################
tile_img = cv2.resize(tile_img, (256, 256))
cv2.imshow("Results", tile_img)
cv2.waitKey(1)
if save:
cv2.imwrite(path_name, tile_img * 255)
print('Saving image')
def mnist_reader(numbers):
"""
Read MNIST dataset with specific numbers you needed
:param numbers: A list of number from 0 - 9 as you needed
:return: A tuple of a numpy array with specific numbers MNIST training dataset,
labels of the training set and the length of the training dataset.
"""
# Training Data
f = open('./data/train-images.idx3-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32) / 127.5 - 1
f = open('./data/train-labels.idx1-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainY = loaded[8:].reshape((60000)).astype(np.int32)
_trainX = []
for idx in range(0, len(trainX)):
if trainY[idx] in numbers:
_trainX.append(trainX[idx])
return np.array(_trainX), trainY, len(_trainX)
def BCE_loss(x, y):
"""
Binary Cross Entropy Loss for VAE
"""
epsilon = 10e-8
loss = np.sum(-y * np.log(x + epsilon) - (1 - y) * np.log(1 - x + epsilon))
return loss
def img_save(imgs, path, epoch):
"""
Save the generated images for each epoch for VAE
:param imgs: (batch_size, 28, 28)
:param path: path to save the imgs
:param epoch: # of epoch
:return:
"""
aspect_ratio = 1.0
border = 1
border_color = 0
if not os.path.exists(path):
os.mkdir(path)
img_num = imgs.shape[0] # 64 batch_size
# Grid-like images
img_shape = np.array(imgs.shape[1:3])
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(img_num * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(img_num / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
tile_img = np.empty(tile_img_shape)
tile_img[:] = border_color
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
img_idx = j + i * grid_shape[1]
if img_idx >= img_num:
break
img = imgs[img_idx]
yoff = (img_shape[0] + border) * i
xoff = (img_shape[1] + border) * j
tile_img[yoff:yoff + img_shape[0], xoff:xoff + img_shape[1], ...] = img
file_name = path + "/iteration_{}.png".format(epoch)
img = Image.fromarray(np.uint8(tile_img * 255), 'L')
img.save(file_name)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
2,
4781,
269,
85,
17,
878,
345,
9199,
284,
1960,
519,
81,
5067,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
6738,
350,
4146,
1330... | 2.232934 | 2,168 |
from django.contrib import messages
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import redirect, render
from .forms import EmailForm, RegistrationForm
def register(request):
"""
Handles RegistrationForm, login the new user,
and redirects to home page
"""
if request.user.is_authenticated():
return redirect('home')
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('home')
else:
form = RegistrationForm()
return render(request, 'register.html', {'form':form})
@login_required
def account_settings(request):
"""
Displays EmailForm and PasswordChangeForm
"""
email_form = EmailForm(instance=request.user)
password_form = PasswordChangeForm(request.user)
return render(request,
'account-settings.html',
{'email_form':email_form,
'password_form':password_form})
@login_required
def change_email(request):
"""
Handles EmailForm and responds to user using messages
"""
if request.method == 'POST':
form = EmailForm(instance=request.user, data=request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Your Email was updated successfully!')
else:
messages.error(request, 'Please enter valid data.')
return redirect('account_settings')
@login_required
def change_password(request):
"""
Handles PasswordChangeForm and responds to user using messages
"""
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password was successfully updated!')
else:
messages.error(request, 'Please enter valid data')
return redirect('account_settings')
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
11,
4296,
62,
29891,
62,
18439,
62,
17831,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
27... | 2.651498 | 901 |
#Dependencies
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Boilerplate
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurements = Base.classes.measurements_table
Stations = Base.classes.stations_table
session = Session(engine)
#Start flask app
app = Flask(__name__)
#Create root directory with list of all available pages
@app.route("/")
#Precipitation page, finds all temperature data taken in the last year
@app.route("/api/v1.0/precipitation")
#Stations page, finds all stations
@app.route("/api/v1.0/stations")
def stations():
"""Return a list of all stations"""
# Query all Stations
results = session.query(Stations.station).all()
all_stations = list(results)
return jsonify(all_stations)
#Observed temperature page, takes all temperatures in the last year (identical to precipitation page)
@app.route("/api/v1.0/tobs")
#Queries min average and max temperature after a certain start date
@app.route("/api/v1.0/<start>")
#Queries min average and max temperature in a date range defined by a start and end date
@app.route("/api/v1.0/<start>/<end>")
#Runs the Flask App
if __name__ == '__main__':
app.run(debug=True)
| [
2,
35,
2690,
3976,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
44161,
282,
26599,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
2306,
296,
499,
1330,
... | 3.029724 | 471 |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.series.qf_series import QFSeries
| [
2,
220,
220,
220,
220,
15069,
1584,
12,
25579,
327,
28778,
784,
3427,
12275,
329,
19229,
4992,
198,
2,
198,
2,
220,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
2... | 3.304167 | 240 |
import json
from os import wait
import time
import math
import threading
from .HWIO.LEDController import LEDController
from .HWIO.SwitchController import SwitchController
from .HWIO.ServoController import ServoController
from .KinematicsDB import getKinematicsDB
eRobotState_VOID = 0
eRobotState_LOAD = 1
eRobotState_WAKE = 2
eRobotState_RUN = 3
eRobotState_DROWZEE = 4
eRobotState_SLEEP = 5
cServoOffPosition = 360
| [
11748,
33918,
198,
6738,
28686,
1330,
4043,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
4704,
278,
198,
198,
6738,
764,
39,
54,
9399,
13,
30465,
22130,
1330,
12365,
22130,
198,
6738,
764,
39,
54,
9399,
13,
38978,
22130,
1330,
14645,... | 2.957746 | 142 |
import sys
from torch.optim import Adam
from tqdm import tqdm
from src.dataset.ReviewDataset import ReviewDataset
from src.models.AttSeq2Seq import AttSeq2Seq
from src.models.Seq2Seq import Seq2Seq
from src.trainers.BaseTrainer import BaseTrainer
from src.utils.const import MAX_PADDING, CATE_MAX_PADDING, EARLY_STOPPING
from src.utils.metrics import compute_f1, compute_precision, compute_recall
import numpy as np
| [
11748,
25064,
198,
198,
6738,
28034,
13,
40085,
1330,
7244,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
12351,
13,
19608,
292,
316,
13,
14832,
27354,
292,
316,
1330,
6602,
27354,
292,
316,
198,
6738,
12351,
13,
27... | 2.85034 | 147 |
#!/usr/local/bin/python3
import os
import sys
import pprint
import logging
import json
from pyflowater import PyFlo
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
279,
4798,
198,
11748,
18931,
198,
11748,
33918,
198,
198,
6738,
12972,
11125,
729,
1330,
9485,
33574,
628,
198,
361,
11593,
367... | 2.872727 | 55 |
import pygame
import time
import draw
from window import Window
import event_handler
pygame.init()
ARRAY_SIZE,DELAY = event_handler.get_user_input()
if __name__ == "__main__":
running = True
clock = pygame.time.Clock()
FPS = 60
window = Window()
handler = event_handler.Handler(window.get_manager())
draw_to_window = draw.Draw(window,ARRAY_SIZE,DELAY)
#Links the event occurred with the draw.py class wich has the logic behind that event.
handler.set_button_event_logic_handler(draw_to_window)
while(running):
time_delta = clock.tick(FPS) / 1000.0
handler.check_for_events(pygame.event.get())
window.update_gui_manager(time_delta)
window.set_background()
window.display_buttons()
if draw_to_window.is_required_for_sorting:
draw_to_window.sort_visualization()
else:
draw_to_window.rectangle_bar_chart()
pygame.display.flip() | [
11748,
12972,
6057,
198,
11748,
640,
198,
11748,
3197,
198,
6738,
4324,
1330,
26580,
198,
11748,
1785,
62,
30281,
198,
198,
9078,
6057,
13,
15003,
3419,
198,
1503,
30631,
62,
33489,
11,
35,
3698,
4792,
796,
1785,
62,
30281,
13,
1136,
... | 2.39312 | 407 |
from pyglobe3d.core.icosalogic.grid_consts import Grid
from pyglobe3d.core.icosalogic.elements import ElementWithIndexAndLocationObjects
from pyglobe3d.core.icosalogic.triangle_attrs import TriangleIndex, TriangleLocation
class Triangle(ElementWithIndexAndLocationObjects):
"""
Describes a logical triangle located on the icosahedron
"""
@classmethod
@property
if __name__ == '__main__':
from pyglobe3d.core.icosalogic.grid_consts import Grid
tr45 = Triangle(
index_object=TriangleIndex(
grid=Grid(4),
index=45
)
)
for nd in tr45.triangle_nodes:
print(nd.index)
| [
6738,
12972,
4743,
5910,
18,
67,
13,
7295,
13,
291,
418,
11794,
291,
13,
25928,
62,
1102,
6448,
1330,
24846,
198,
6738,
12972,
4743,
5910,
18,
67,
13,
7295,
13,
291,
418,
11794,
291,
13,
68,
3639,
1330,
11703,
3152,
15732,
1870,
147... | 2.486692 | 263 |
import csv
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
if __name__ == "__main__":
main()
| [
11748,
269,
21370,
198,
11748,
28686,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
629,
19458,
62,
29891,
11,
6246,
10297,
198,
198,
361,
407,
28686,
13,
1136,
24330,
7203,
35,
... | 2.848 | 125 |
import argparse
import json
from _jsonnet import evaluate_file as jsonnet_evaluate_file
from sklearn.decomposition import TruncatedSVD
from scipy.sparse import csr_matrix
import torch
from tqdm import tqdm
from purano.io import read_tg_jsonl
from purano.training.models.tfidf import build_idf_vocabulary, get_tfidf_vector, SVDEmbedder
from purano.util import get_true_file
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", type=str, required=True)
parser.add_argument("--input-file", type=str, required=True)
parser.add_argument("--output-file", type=str, required=True)
parser.add_argument("--svd-matrix-file", type=str, required=True)
args = parser.parse_args()
train_tfidf(**vars(args))
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
198,
6738,
4808,
17752,
3262,
1330,
13446,
62,
7753,
355,
33918,
3262,
62,
49786,
62,
7753,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
833,
19524,
515,
50,
8898,
198,
6738,
629,
541,
... | 2.821168 | 274 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: jyao
Go over all n-grams up to length k (say 5) in a text, check which of these n-grams appear in the p(e|m) dictionary.
For each mention that appears, take the top scored entity e based on p(e|m) value,
and check its wikipedia page to see if it contains a DrugBank id.
If it does, link it, otherwise do not link it
"""
from .data_utils import generate_ngrams
from utils.wikibot import Wikibot | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
474,
3972,
78,
198,
5247,
625,
477,
299,
12,
4546,
82,
510,
284,
4129,
479,
357,
1670... | 2.974026 | 154 |
import bz2
import pytest
from io import TextIOWrapper
from pathlib import Path
from tests.mowgli_etl_test.http_client.mock_etl_http_client import MockEtlHttpClient
_sample_usf_file_path = Path(__file__).parent / "usf_test_data.xml"
_sample_usf_zip_file_path = Path(__file__).parent / "usf_test_data.zip"
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
275,
89,
17,
198,
11748,
12972,
9288,
198,
6738,
33245,
1330,
8255,
40,
3913,
430,
2848,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
5254,
13,
76,
322,
70,
4528,
62,
316,
75,
62,
9288,
13,
4023,
62,
16366,
13,
76,
735,
6... | 2.524476 | 143 |
import os
import sys
import subprocess
import csv
# When instantiating a trial class both of the inputs, name and trial, should be included.
#Their current defaults are for demonstration/testing purposes only.
def loop(que):
"""
desc: the loop used to manage talking to the trial interpreter.
possible commands:
'trial.start' -> runs the specified trial file and returns its outputs
arg inputs are it the order ('trialfile_path', 'subject_path', 'robot_path', {dict of subject information})
'trial.log -> writes event to log file. arg inputs are in the order ('time', 'event to log')
'trial.end' -> closes the open log file for the current trial, and stops trial
"""
current_trial = Trial()
if len(que) and que[0][0].startswith('trial'):
# fetch the message from the top of the que
addr, retaddr, args = que.pop(0)
# parse the adress into just the command by spitiling and disposing
# of the first item. the cmd is the address minus the module name
cmd = addr.split('.')[1:]
cmd_type = cmd[0] # convieneient for below
if cmd_type == 'start':
current_trial = Trial(args[0], args[1], arg[2])
que.append((retaddr, None, current_trial.run()))
elif cmd_type == 'log':
current_trial.write(args[0], args[1])
elif cmd_type == 'end':
current_trial.log.close()
que.append((retaddr, None, "Trial Ended"))
else:
que.append((retaddr, None, "error command not found"))
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
269,
21370,
198,
198,
2,
1649,
9113,
26336,
257,
4473,
1398,
1111,
286,
262,
17311,
11,
1438,
290,
4473,
11,
815,
307,
3017,
13,
198,
2,
14574,
1459,
26235,
389,
329... | 2.684932 | 584 |
"""VIMS ISIS header."""
import os
import numpy as np
import pvl
from .errors import ISISError
from .history import ISISHistory
from .labels import ISISLabels
from .tables import ISISTables
from .time import time as _dt
from .vars import BYTE_ORDERS, FIELD_TYPES
from ..misc import get_md5
class ISISCube:
"""VIMS ISIS header object.
Parameters
----------
filename: str
Input ISIS filename.
"""
@property
@filename.setter
@property
def is_file(self):
"""Check if the file exists."""
return os.path.exists(self.filename)
@property
def is_isis(self):
"""Check if the file is in ISIS format."""
with open(self.filename, 'rb') as f:
header = f.read(17)
return header == b'Object = IsisCube'
@property
def md5(self):
"""QUB MD5 hash."""
return get_md5(self.filename)
@property
def pvl(self):
"""Full ISIS header in PVL format."""
if self.__pvl is None:
self.__pvl = pvl.load(self.filename)
return self.__pvl
@property
def labels(self):
"""ISIS label labels."""
if self.__labels is None:
self.__labels = ISISLabels(self.pvl)
return self.__labels
@property
def tables(self):
"""ISIS tables."""
if self.__tables is None:
self.__tables = ISISTables(self.filename, self.pvl)
return self.__tables
@property
def history(self):
"""ISIS cube history."""
if self.__history is None:
self.__history = ISISHistory(self.filename, self.pvl['History'])
return self.__history
@property
def orig_lbl(self):
"""ISIS cube original labels."""
if self.__orig_lbl is None:
lbl = ISISHistory(self.filename, self.pvl['OriginalLabel'])['QUBE']
self.__orig_lbl = lbl
return self.__orig_lbl
def keys(self):
"""ISIS labels and tables keys."""
return set(
list(self.labels.keys())
+ list(self.tables.keys())
+ list(self.orig_lbl.keys())
)
@property
def header(self):
"""Main ISIS Cube header."""
return self.pvl['IsisCube']
@property
def _core(self):
"""ISIS core header."""
return self.header['Core']
@property
def _dim(self):
"""ISIS dimension header."""
return self._core['Dimensions']
@property
def NS(self):
"""Number of samples."""
return self._dim['Samples']
@property
def NL(self):
"""Number of lines."""
return self._dim['Lines']
@property
def NB(self):
"""Number of bands."""
return self._dim['Bands']
@property
def shape(self):
"""Cube shape."""
return (self.NB, self.NL, self.NS)
@property
def _pix(self):
"""ISIS core header."""
return self._core['Pixels']
@property
def dtype(self):
"""Cube data type."""
return np.dtype(BYTE_ORDERS[self._pix['ByteOrder']]
+ FIELD_TYPES[self._pix['Type']])
@property
def _start_byte(self):
"""Cube data start byte."""
return self._core['StartByte'] - 1
@property
def _nbytes(self):
"""Cube data bytes size."""
return self.NB * self.NL * self.NS * self.dtype.itemsize
@property
def _base(self):
"""Cube data base factor."""
return self._pix['Base']
@property
def _mult(self):
"""Cube data multiplication factor."""
return self._pix['Multiplier']
@property
def cube(self):
"""ISIS cube."""
if self.__cube is None:
self.__cube = self._load_data()
return self.__cube
def _load_data(self):
"""Load ISIS table data."""
with open(self.filename, 'rb') as f:
f.seek(self._start_byte)
data = f.read(self._nbytes)
data = np.frombuffer(data, dtype=self.dtype) * self._mult + self._base
data[self._is_null(data)] = np.nan
return self._reshape(data)
@property
def _underflow(self):
"""Data type underflow value."""
return np.finfo(self.dtype).min if self.dtype.char == 'f' \
else np.iinfo(self.dtype).min
@property
def _overflow(self):
"""Data type overflow value."""
return np.finfo(self.dtype).max if self.dtype.char == 'f' \
else np.iinfo(self.dtype).max
def _is_null(self, data, tol=1e-6):
"""Find NULL values.
Find the values lower than underflow or higher than overflow.
Parameters
----------
data: np.array
Input array to test.
tol: float
Relative tolerance factor
Returns
-------
np.array
Location of the null values.
"""
return (np.abs(data / self._underflow) >= tol) | \
(np.abs(data / self._overflow) >= tol)
@property
def _TL(self):
"""Number of tiles in the line direction."""
return self._core['TileLines']
@property
def _TS(self):
"""Number of tiles in the sample direction."""
return self._core['TileSamples']
def _reshape(self, data):
"""Reshape data based on tile size."""
if self._TS == self.NS and self._TL == self.NL:
return np.reshape(data, self.shape)
size = np.size(data)
shape = (size // (self._TL * self._TS), self._TL, self._TS)
tiled_data = np.reshape(data, shape)
# Stack in the samples direction
shape = (size // (self._TL * self.NS), self.NS, self._TL)
samples_stacked = np.moveaxis(
np.moveaxis(tiled_data, 1, 2).reshape(shape), 1, 2)
# Stack in the lines direction
return np.reshape(samples_stacked, self.shape)
@property
def _bands(self):
"""Cube band bin header."""
return self.header['BandBin']
@property
def bands(self):
"""Cube bands numbers."""
return np.array(self._bands['OriginalBand'])
@property
def wvlns(self):
"""Cube central wavelengths (um)."""
return np.array([float(w[:-1]) if isinstance(w, str) else w
for w in self._bands['Center']])
@property
def _inst(self):
"""Cube instrument header."""
return self.header['Instrument']
@property
def start(self):
"""Instrument start time (UTC)."""
return _dt(self._inst['StartTime'])
@property
def stop(self):
"""Instrument stop time (UTC)."""
return _dt(self._inst['StopTime'])
@property
def duration(self):
"""Instrument acquisition dureation."""
return self.stop - self.start
@property
def time(self):
"""Instrument mid time (UTC)."""
return self.start + self.duration / 2
@property
def _naif(self):
"""NAIF keywords stored in ISIS header."""
return self.pvl['NaifKeywords']
@property
def exposure(self):
"""ISIS header exposure duration."""
return self._inst['ExposureDuration']
@property
def kernels(self):
"""List of kernels cached by ISIS."""
if 'Kernels' not in self:
return None
kernels = []
for kernel in self['Kernels'].values():
if isinstance(kernel, str) and '$' in kernel:
kernels.append(kernel)
elif isinstance(kernel, list):
for k in kernel:
if '$' in k:
kernels.append(k)
return sorted(kernels)
@property
def target_name(self):
"""Main target name."""
return self._inst['TargetName']
@property
def target_radii(self):
"""Main target radii (km)."""
for k, v in self.pvl['NaifKeywords']:
if 'RADII' in k:
return v
raise ValueError('Target radii not found in the header.')
@property
def target_radius(self):
"""Main target mean radius (km)."""
return np.power(np.prod(self.target_radii), 1 / 3)
def dumps_header(self):
"""Dumps cube header."""
return pvl.dumps(self.header).decode()
| [
37811,
53,
3955,
50,
7768,
13639,
526,
15931,
198,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
279,
19279,
198,
198,
6738,
764,
48277,
1330,
7768,
12331,
198,
6738,
764,
23569,
1330,
7768,
18122,
198,
67... | 2.216525 | 3,764 |
# -*- coding: utf-8 -*-
import os
from pyfann import libfann
from test_opencv import PROJECT_ROOT
imgW = 83
imgH = 23
factor = 2
rimgW = imgW * factor
rimgH = imgH * factor
nSegs = 5
segW = 18
segH = 18
segSize = (segW, segH)
num_input = segW * segH
TRAIN_FILE = os.path.join(PROJECT_ROOT, "fann.train")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
198,
6738,
12972,
69,
1236,
1330,
9195,
69,
1236,
198,
198,
6738,
1332,
62,
9654,
33967,
1330,
21965,
23680,
62,
13252,
2394,
628,
198,
9600,
54,
796,... | 2.113333 | 150 |
#coding:utf-8
import matplotlib.pyplot as plt
import matplotlib
import platform
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
198,
11748,
3859,
628,
220,
220,
220,
220,
220,
220,
220,
220
] | 2.5 | 36 |
from __future__ import division
import numpy as np
import utils
import pdb
data = utils.load_dataset("mnist/mnist_test", npy=True)
Xvalid, yvalid = data['X'], data['y']
if __name__ == "__main__":
pdb.set_trace()
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3384,
4487,
198,
11748,
279,
9945,
198,
198,
7890,
796,
3384,
4487,
13,
2220,
62,
19608,
292,
316,
7203,
10295,
396,
14,
10295,
396,
62,
9288,
1600,
... | 2.522727 | 88 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.898734 | 79 |
# -*- coding: utf-8 -*-
"""
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
"""
from __future__ import unicode_literals
from .calendar_request import CalendarRequest
from ..request_builder_base import RequestBuilderBase
from ..request import event_collection
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
37811,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
220,
1439,
6923,
33876,
13,
220,
49962,
739,
262,
17168,
13789,
13,
220,
4091,
13789,
287,
262,
1628,
6808,
329... | 3.875 | 112 |
import torch
from rlpyt.models.conv2d import Conv2dModel
from rlpyt.models.mlp import MlpModel
from rlpyt.ul.models.dmlab_conv2d import DmlabConv2dModel
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
def weight_init(m):
"""Kaiming_normal is standard for relu networks, sometimes."""
if isinstance(m, (torch.nn.Linear, torch.nn.Conv2d)):
torch.nn.init.kaiming_normal_(m.weight, mode="fan_in",
nonlinearity="relu")
torch.nn.init.zeros_(m.bias)
| [
198,
11748,
28034,
198,
198,
6738,
374,
75,
9078,
83,
13,
27530,
13,
42946,
17,
67,
1330,
34872,
17,
67,
17633,
198,
6738,
374,
75,
9078,
83,
13,
27530,
13,
4029,
79,
1330,
337,
34431,
17633,
198,
6738,
374,
75,
9078,
83,
13,
377,... | 2.318182 | 220 |
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import User, Job
from datetime import datetime
import bcrypt
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
764,
27530,
1330,
11787,
11,
15768,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
275,
29609,
198
] | 4 | 39 |
import __version
print(__version.version) | [
11748,
11593,
9641,
201,
198,
4798,
7,
834,
9641,
13,
9641,
8
] | 3.5 | 12 |
# -*- coding: utf-8 -*-
import wx
from src.ui import MakeInfoEntrySizer, AddField, MedFont, winSizes
import hashlib
from strings import trans
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
266,
87,
198,
6738,
12351,
13,
9019,
1330,
6889,
12360,
30150,
50,
7509,
11,
3060,
15878,
11,
2019,
23252,
11,
1592,
50,
4340,
198,
11748,
12234,
8019,
198,
... | 2.918367 | 49 |
# Create a function called uppercase_and_reverse that takes a little bit of text,
# uppercases it all, and then reverses it (flips all the letters around)
text = uppercase_and_reverse("Do not go gentle into that good night.") #"THGIN DOOG TAHT OTNI ELTNEG OG TON OD"
print(text) | [
2,
13610,
257,
2163,
1444,
334,
39921,
589,
62,
392,
62,
50188,
326,
2753,
257,
1310,
1643,
286,
2420,
11,
220,
198,
2,
334,
39921,
1386,
340,
477,
11,
290,
788,
10372,
274,
340,
357,
2704,
2419,
477,
262,
7475,
1088,
8,
198,
198,... | 3.157303 | 89 |
#!/usr/bin/env python3
"""
mypy-after-commit.py: Git post-commit script to type-check commits in the background.
Install with:
ln -rs ./contrib/hooks/mypy-after-commit.py .git/hooks/post-commit
"""
import sys
import subprocess
import os
from lib import announce, complain, file_link, in_acceptable_environment, check_to_cache, get_current_commit, is_rebase
if __name__ == "__main__":
sys.exit(main(len(sys.argv), sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
1820,
9078,
12,
8499,
12,
41509,
13,
9078,
25,
15151,
1281,
12,
41509,
4226,
284,
2099,
12,
9122,
23463,
287,
262,
4469,
13,
198,
198,
15798,
351,
25,
198,
198,
... | 2.836601 | 153 |
import openml
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import iinc
# Measures
# Dataset list
openml_list = openml.datasets.list_datasets()
datalist = pd.DataFrame.from_dict(openml_list, orient='index')
filtered = datalist.query('NumberOfClasses > 2')
filtered = filtered.query('NumberOfInstances <= 3000')
filtered = filtered.query('NumberOfFeatures <= 30')
filtered = filtered.query('format == "ARFF"') # Avoid sparse data sets
filtered = filtered.query('NumberOfSymbolicFeatures <= 1') # The label is included in the count
filtered = filtered.query('MajorityClassSize/MinorityClassSize > 2')
filtered = filtered.query('did not in [1528, 1529, 1530, 1543, 1544, 1545, 1546]') # Almost duplicates
logger = []
for did in filtered.did:
try:
# Download dataset
dataset = openml.datasets.get_dataset(did)
X_in, y_in, categorical_indicator, _ = dataset.get_data(target=dataset.default_target_attribute, dataset_format='array')
X_in[np.isnan(X_in)] = -1 # Very simple missing value treatment
print('Dataset', dataset.name, did, flush=True) # For progress indication
# Split
X_train, X_test, y_train, y_test = train_test_split(X_in, y_in, test_size = 0.33, random_state = 42, stratify=y_in)
# Score
clf = KNeighborsClassifier(n_neighbors=3)
probs = clf.fit(X_train, y_train).predict_proba(X_test)
prediction = clf.fit(X_train, y_train).predict(X_test)
baseline_auc = metrics.roc_auc_score(y_test, probs, multi_class='ovo')
baseline_kappa = metrics.cohen_kappa_score(y_test, prediction)
baseline_brier = brier_multi(y_test, probs)
probs, prediction = sandbox_iinc.iinc(X_train, y_train, X_test, prior_weight='raw')
iinc_raw_auc = metrics.roc_auc_score(y_test, probs, multi_class='ovo')
iinc_raw_kappa = metrics.cohen_kappa_score(y_test, prediction)
iinc_raw_brier = brier_multi(y_test, probs)
probs, prediction = sandbox_iinc.iinc(X_train, y_train, X_test, prior_weight='ovo')
iinc_ovo_auc = metrics.roc_auc_score(y_test, probs, multi_class='ovo')
iinc_ovo_kappa = metrics.cohen_kappa_score(y_test, prediction)
iinc_ovo_brier = brier_multi(y_test, probs)
probs, prediction = sandbox_iinc.iinc(X_train, y_train, X_test, prior_weight='ovr')
iinc_ovr_auc = metrics.roc_auc_score(y_test, probs, multi_class='ovo')
iinc_ovr_kappa = metrics.cohen_kappa_score(y_test, prediction)
iinc_ovr_brier = brier_multi(y_test, probs)
logger.append([dataset.name, did, dataset.qualities.get('MajorityClassSize') / dataset.qualities.get('MinorityClassSize'), baseline_kappa, iinc_raw_kappa, iinc_ovo_kappa, iinc_ovr_kappa, baseline_auc, iinc_raw_auc, iinc_ovo_auc, iinc_ovr_auc, baseline_brier, iinc_raw_brier, iinc_ovo_brier, iinc_ovr_brier])
except Exception:
continue
result = pd.DataFrame(logger, columns=['dataset', 'did', 'class_ratio', 'baseline_kappa', 'raw_kappa', 'ovo_kappa', 'ovr_kappa', 'baseline_auc', 'raw_auc', 'ovo_auc', 'ovr_auc', 'baseline_brier', 'raw_brier', 'ovo_brier', 'ovr_brier'])
result.to_csv('~/Downloads/results.csv')
pd.set_option('display.width', 1600)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(result)
| [
11748,
1280,
4029,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
7483,
198,
6738,
1341,
35720,
... | 2.462845 | 1,413 |
# Server Configuration
ip_address_server = "http://192.168.88.204"
url = "/epass2018/parking_outs/api_member_out"
timeout_connection = 30 # in second(s)
retry_connect = 3 # in second(s)
# HID Configuration
hid_name = "Sycreader RFID" | [
2,
9652,
28373,
198,
541,
62,
21975,
62,
15388,
796,
366,
4023,
1378,
17477,
13,
14656,
13,
3459,
13,
18638,
1,
198,
6371,
796,
12813,
538,
562,
7908,
14,
20928,
278,
62,
5269,
14,
15042,
62,
19522,
62,
448,
1,
198,
48678,
62,
386... | 2.819277 | 83 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import glob
import re
import warnings
from os.path import join as pjoin
from collections import OrderedDict, defaultdict
from typing import Any, Optional, Mapping, List, Tuple, Container, Union
from numpy.random import RandomState
import textworld
from textworld import g_rng
from textworld.utils import uniquify
from textworld.generator.data import KnowledgeBase
from textworld.textgen import TextGrammar
NB_EXPANSION_RETRIES = 20
class Grammar:
"""
Context-Free Grammar for text generation.
"""
_cache = {}
def __init__(self, options: Union[GrammarOptions, Mapping[str, Any]] = {}, rng: Optional[RandomState] = None):
"""
Arguments:
options:
For customizing text generation process (see
:py:class:`textworld.generator.GrammarOptions <textworld.generator.text_grammar.GrammarOptions>`
for the list of available options).
rng:
Random generator used for sampling tag expansions.
"""
self.options = GrammarOptions(options)
self.grammar = OrderedDict()
self.rng = g_rng.next() if rng is None else rng
self.allowed_variables_numbering = self.options.allowed_variables_numbering
self.unique_expansion = self.options.unique_expansion
self.all_expansions = defaultdict(list)
# The current used symbols
self.overflow_dict = OrderedDict()
self.used_names = set(self.options.names_to_exclude)
# Load the grammar associated to the provided theme.
self.theme = self.options.theme
# Load the object names file
path = pjoin(KnowledgeBase.default().text_grammars_path, glob.escape(self.theme) + "*.twg")
files = glob.glob(path)
if len(files) == 0:
raise MissingTextGrammar(path)
assert len(files) == 1, "TODO"
for filename in files:
self._parse(filename)
def _parse(self, path: str):
"""
Parse lines and add them to the grammar.
"""
with open(path) as f:
self.grammar = TextGrammar.parse(f.read(), filename=path)
# if path not in self._cache:
# with open(path) as f:
# self._cache[path] = TextGrammar.parse(f.read(), filename=path)
# for name, rule in self._cache[path].rules.items():
# self.grammar["#" + name + "#"] = rule
def has_tag(self, tag: str) -> bool:
"""
Check if the grammar has a given tag.
"""
return tag in self.grammar
def get_random_expansion(self, tag: str, rng: Optional[RandomState] = None, game=None) -> str:
"""
Return a randomly chosen expansion for the given tag.
Parameters
----------
tag :
Grammar tag to be expanded.
rng : optional
Random generator used to chose an expansion when there is many.
By default, it used the random generator of this grammar object.
Returns
-------
expansion :
An expansion chosen randomly for the provided tag.
"""
rng = rng or self.rng
if not self.has_tag(tag):
raise ValueError("Tag: {} does not exist!".format(tag))
for _ in range(NB_EXPANSION_RETRIES):
expansion = rng.choice(self.grammar[tag].alternatives)
expansion = expansion.full_form(game)
if not self.unique_expansion or expansion not in self.all_expansions[tag]:
break
self.all_expansions[tag].append(expansion)
return expansion
def expand(self, text: str, rng: Optional[RandomState] = None, game = None) -> str:
"""
Expand some text until there is no more tag to expand.
Parameters
----------
text :
Text potentially containing grammar tags to be expanded.
rng : optional
Random generator used to chose an expansion when there is many.
By default, it used the random generator of this grammar object.
Returns
-------
expanded_text :
Resulting text in which there is no grammar tag left to be expanded.
"""
rng = self.rng if rng is None else rng
while "#" in text:
to_replace = re.findall(r'[#][^#]*[#]', text)
tag = self.rng.choice(to_replace)
replacement = self.get_random_expansion(tag, rng, game)
text = text.replace(tag, replacement)
return text
def split_name_adj_noun(self, candidate: str, include_adj: bool) -> Optional[Tuple[str, str, str]]:
"""
Extract the full name, the adjective and the noun from a string.
Parameters
----------
candidate :
String that may contain one adjective-noun sperator '|'.
include_adj : optional
If True, the name can contain a generated adjective.
If False, any generated adjective will be discarded.
Returns
-------
name :
The whole name, i.e. `adj + " " + noun`.
adj :
The adjective part of the name.
noun :
The noun part of the name.
"""
parts = candidate.split("|")
noun = parts[-1].strip()
if len(parts) == 1 or not include_adj:
adj = None
elif len(parts) == 2:
adj = parts[0].strip()
else:
raise ValueError("Too many separators '|' in '{}'".format(candidate))
name = adj + " " + noun if adj is not None else noun
return name, adj, noun
def generate_name(self, obj_type: str, room_type: str = "",
include_adj: Optional[bool] = None, exclude: Container[str] = []) -> Tuple[str, str, str]:
"""
Generate a name given an object type and the type room it belongs to.
Parameters
----------
obj_type :
Type of the object for which we will generate a name.
room_type : optional
Type of the room the object belongs to.
include_adj : optional
If True, the name can contain a generated adjective.
If False, any generated adjective will be discarded.
Default: use value grammar.options.include_adj
exclude : optional
List of names we should avoid generating.
Returns
-------
name :
The whole name, i.e. `adj + " " + noun`.
adj :
The adjective part of the name.
noun :
The noun part of the name.
"""
if include_adj is None:
include_adj = self.options.include_adj
# Get room-specialized name, if possible.
symbol = "#{}_({})#".format(room_type, obj_type)
if not self.has_tag(symbol):
# Otherwise, fallback on the generic object names.
symbol = "#({})#".format(obj_type)
# We don't want to generate a name that is in `exclude`.
found_candidate = False
for i in range(50): # We default to fifty attempts
candidate = self.expand(symbol)
name, adj, noun = self.split_name_adj_noun(candidate, include_adj)
if name not in exclude:
found_candidate = True
break
if not found_candidate:
# Not enough variation for the object we want to name.
# Warn the user and fall back on adding an adjective if we can.
if not include_adj:
name, adj, noun = self.generate_name(obj_type, room_type, include_adj=True, exclude=exclude)
msg = ("Not enough variation for '{}'. Falling back on using adjective '{}'."
" To avoid this message you can add more variation in the '{}'"
" related grammar files located in '{}'.")
msg = msg.format(symbol, adj, self.theme, KnowledgeBase.default().text_grammars_path)
warnings.warn(msg, textworld.GenerationWarning)
return name, adj, noun
# Still not enough variation for the object we want to name.
if not self.allowed_variables_numbering:
msg = ("Not enough variation for '{}'. You can add more variation"
" in the '{}' related grammar files located in '{}'"
" or turn on the 'include_adj=True' grammar flag."
" In last resort, you could always turn on the"
" 'allowed_variables_numbering=True' grammar flag"
" to append unique number to object name.")
msg = msg.format(symbol, self.theme, KnowledgeBase.default().text_grammars_path)
raise ValueError(msg)
if obj_type not in self.overflow_dict:
self.overflow_dict[obj_type] = []
# Append unique (per type) number to the noun.
suffix = " {}".format(len(self.overflow_dict[obj_type]))
noun += suffix
name += suffix
self.overflow_dict[obj_type].append(name)
return name, adj, noun
def get_all_expansions_for_tag(self, tag: str, max_depth: int = 500) -> List[str]:
"""
Get all possible expansions for a grammar tag.
Parameters
----------
tag :
Grammar tag to be expanded.
max_depth : optional
Maximum recursion depth when expanding tag.
Returns
-------
expansions :
All possible expansions.
"""
if tag not in self.grammar:
return []
variants = []
# Recursively get all symbol possibilities
_iterate(tag, 0)
return variants
def get_all_expansions_for_type(self, type: str):
"""
Get all possible expansions for a given object type.
Parameters
----------
type :
Object type.
Returns
-------
names :
All possible names.
"""
expansions = self.get_all_expansions_for_tag("#({})#".format(type))
for room_type in self.grammar["#room_type#"].alternatives:
expansions += self.get_all_expansions_for_tag("#{}_({})#".format(room_type.full_form(), type))
return uniquify(expansions)
def get_all_names_for_type(self, type: str, include_adj: True):
"""
Get all possible names for a given object type.
Parameters
----------
type :
Object type.
include_adj : optional
If True, names can contain generated adjectives.
If False, any generated adjectives will be discarded.
Returns
-------
names :
All possible names sorted in alphabetical order.
"""
expansions = self.get_all_expansions_for_type(type)
names = [self.split_name_adj_noun(expansion, include_adj)[0] for expansion in expansions]
return sorted(set(names))
def get_all_adjective_for_type(self, type: str):
"""
Get all possible adjectives for a given object type.
Parameters
----------
type :
Object type.
Returns
-------
adjectives :
All possible adjectives sorted in alphabetical order.
"""
expansions = self.get_all_expansions_for_type(type)
adjectives = [self.split_name_adj_noun(expansion, include_adj=True)[1] for expansion in expansions]
return sorted(set(adjectives))
def get_all_nouns_for_type(self, type: str):
"""
Get all possible nouns for a given object type.
Parameters
----------
type :
Object type.
Returns
-------
nouns :
All possible nouns sorted in alphabetical order.
"""
expansions = self.get_all_expansions_for_type(type)
nouns = [self.split_name_adj_noun(expansion, include_adj=False)[2] for expansion in expansions]
return sorted(set(nouns))
def check(self) -> bool:
"""
Check if this grammar is valid.
TODO: use logging mechanism to report warnings and errors.
"""
errors_found = False
for symbol in self.grammar:
if len(self.grammar[symbol].alternatives) == 0:
print("[Warning] Symbol {} has empty tags".format(symbol))
for tag in self.grammar[symbol].alternatives:
tag = tag.full_form()
if tag == "":
print("[Warning] Symbol {} has empty tags".format(symbol))
for symb in re.findall(r'[#][^#]*[#]', tag):
if symb not in self.grammar:
print("[Error] Symbol {} not found in grammar (Occurs in expansion of {})".format(symb, symbol))
errors_found = True
return not errors_found
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
628,
198,
11748,
15095,
198,
11748,
302,
198,
11748,
14601,
198,
6738,
28686,
13,
6978,
1330,
4654,
355,
279,
22179,
198,
6738,
... | 2.274615 | 5,779 |
# pylint: disable=unused-wildcard-import,c-extension-no-member,too-many-instance-attributes,no-self-use,raise-missing-from,no-member,wildcard-import
"""Provides a wrapper for Windows Process defined by PID"""
from ctypes import *
from ctypes.wintypes import *
from wmempy.wmem_system import WinSys as WSys
from wmempy.wmem_structs import MEMORY_BASIC_INFORMATION, MODULEINFO
from wmempy.wmem_scannable import ProcPage, ProcModule
from wmempy.wmem_scanner import ProcScanner
from wmempy.wmem_memory import ProcReader, ProcWriter
import numpy as np
import win32api
import win32process
import win32con
import pywintypes
class WinProc:
"""
Represents a single Windows Process.
"""
def __init__(self, process_name, process_id = -1):
"""
:param process_name: name of the process to wrap around
:param process_id: pid of the process to wrap around
:type process_name: string
:type process_id: int
"""
self.modules = []
self.pages = []
# Get all processes that match PID or name
filtered_proc_list = self.__filter_processes(process_name, process_id)
# If there is only one, this class can become a wrapper of such process
if len(filtered_proc_list) == 1:
self.proc_id = filtered_proc_list[0][0]
self.proc_name = filtered_proc_list[0][1]
# Try opening handle to the process, this can and will fail if we do not have enough
# access rights. Usually (aka on some Windows configurations based on your antivirus
# /UAC/DEP/...........) you need at least the same rights as the running process (when
# process is running as admin the app has to as well). Some kernel anticheats (EAC,
# BattleEye, ...) also hook OpenProcess and strip access if the app is not on their
# white list (csrss.exe/dwm.exe should always have full access handle to every process,
# even System). This is usually heavily guarded as you can just dupe the handle. To
# make it short, if anticheat decides, it can strip access from your handle and while
# the handle will appear as valid, all functions will simply fail (they are also hooked)
# More info on this (and bypass) here:
# https://www.unknowncheats.me/forum/anti-cheat-bypass/212113-hleaker.html
try:
self.handle = win32api.OpenProcess(WSys.REQUIRED_ACCESS, 0, self.proc_id)
except pywintypes.error:
raise Exception('Access denied.', filtered_proc_list)
# If process does not exist at all, fail.
elif len(filtered_proc_list) == 0:
raise Exception('Process not found.')
else:
# If there are multiple processes, we cannot just take first one, the process has
# to be specified by PID.
raise Exception('Unable to determine unique process from name.', filtered_proc_list)
# Create scanner and create a link between them
self.scanner = ProcScanner(self)
# Create reader
self.reader = ProcReader(self)
# Create writer
self.writer = ProcWriter(self)
# Post init setup (methods and imports)
self.__post_init__()
# Grab info about the process (modules and pages)
self.gather_info()
def __post_init__(self):
"""
Make sure ctypes functions behave like we want them to
"""
ctypes.windll.kernel32.VirtualQueryEx.argtypes = [HANDLE, LPCVOID, c_size_t, c_size_t]
ctypes.windll.psapi.GetModuleInformation.argtypes = [HANDLE, HMODULE,
ctypes.POINTER(MODULEINFO), DWORD]
ctypes.windll.kernel32.ReadProcessMemory.argtypes = [HANDLE, LPCVOID, LPCVOID, c_size_t,
ctypes.POINTER(c_size_t)]
def __filter_processes(self, process_name, process_id):
"""
Goes through all processes in the system and filters them based on provided criteria
"""
proc_list = WSys.process_list()
# If we were not given PID, we have to filter by name, does not have to be unique
if process_id == -1:
filtered_proc_list = [proc for proc in proc_list if proc[1] == process_name]
# If we have PID, try finding one process that has such PID
else:
filtered_proc_list = [proc for proc in proc_list if proc[0] == process_id]
return filtered_proc_list
def print_process(self):
"""
Prints basic info about the process
"""
print(f'{self.proc_name}')
print(f'PID: {self.proc_id}')
print(f'Handle: {self.get_handle()}')
def print_modules(self):
"""
Prints all loaded modules
"""
print('Module list:')
print('-------------------')
for module in self.modules:
module.print()
print('')
print('-------------------')
def print_pages(self):
"""
Prints all valid memory pages
"""
print('Memory page list:')
print('-------------------')
for page in self.pages:
page.print()
print('-------------------')
def print_process_detailed(self):
"""
Prints full process information, this can be very long
"""
self.print_process()
self.print_modules()
self.print_pages()
def gather_info(self):
"""
Retrieves all modules and memory pages of given process
"""
if not self.process_valid():
raise Exception('Process no longer exists.')
self.get_modules()
self.get_pages()
def process_valid(self):
"""
Process is valid if we have an open handle and it is still running
:returns: whether the process is valid or not
:rtype: boolean
"""
return self.handle and win32process.GetExitCodeProcess(self.handle) == WSys.PROCESS_RUNNING
def get_modules(self):
"""
Fills self.modules with currently loaded modules of the process. Modules (typically
.dll/.so) can be loaded by the app itself or injected by others into the process. This
method will not detect stealth injection techniques of modules (manual mapping, scrambling)
:returns: ProcModule list that is used by the process
:rtype: list of ProcModules
"""
self.modules = []
# https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumprocessmodulesex
# win32py provides very nice wrapper which returns the module list through return value
for module in win32process.EnumProcessModulesEx(self.handle, win32process.LIST_MODULES_ALL):
self.modules.append(ProcModule(self, module))
def get_pages(self):
"""
Fills self.pages with currently valid virtual memory pages of the process. This is useful
for full memory scans, since the address space is limited only by the architecture and
this way, we can only scan used address space that we have access to
:returns: ProcPage list that is used by the process
:rtype: list of ProcPages
"""
self.pages = []
current_base = 0
mbi = MEMORY_BASIC_INFORMATION()
# https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualqueryex
# Iterates over memory regions and adds the valid ones into our list of pages
while ctypes.windll.kernel32.VirtualQueryEx(self.get_handle(), current_base,
ctypes.addressof(mbi), ctypes.sizeof(mbi)) > 0:
if (mbi.State == win32con.MEM_COMMIT and mbi.Protect != win32con.PAGE_NOACCESS and
mbi.Protect != win32con.PAGE_GUARD):
self.pages.append(ProcPage(self, mbi.BaseAddress, mbi.RegionSize))
current_base += mbi.RegionSize
def get_handle(self):
"""
Since handle is wrapped in PyHandle so that it is automatically closed (CloseHandle)
upon destruction we need to provide direct access to the handle for ctypes functions
that cannot work with the wrapper
:returns: OpenProcess handle of the process
:rtype: int
"""
return self.handle.__int__()
def compare(self, other):
"""
Compare two processes against each other using common strings
"""
# Stack of first process
first_entry = [module for module in self.modules
if module.get_name().lower() == self.proc_name.lower()]
# List all long enough strings and convert them to numpy array for speed
first = np.array(self.scanner.ASCII_list_arr(first_entry, True, 12))
# Same thing for second process
second_entry = [module for module in other.modules
if module.get_name().lower() == other.proc_name.lower()]
second = np.array(other.scanner.ASCII_list_arr(second_entry, True, 12))
# A - B set diff (unique values only)
first_diff = np.setdiff1d(first, second)
# B - A set diff (unique values only)
second_diff = np.setdiff1d(second, first)
# Print the results, the percentage will almost always be different between
# (A to B) vs (B to A) due to the sizes
print(f'{self.proc_name} is {len(first_diff)}/{len(first)} ('
f'{"{:.2f}".format(len(first_diff) / len(first) * 100)} %) '
f'different from {other.proc_name}')
print(f'{other.proc_name} is {len(second_diff)}/{len(second)} ('
f'{"{:.2f}".format(len(second_diff) / len(second) * 100)} %)'
f' different from {self.proc_name}')
| [
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
21992,
9517,
12,
11748,
11,
66,
12,
2302,
3004,
12,
3919,
12,
19522,
11,
18820,
12,
21834,
12,
39098,
12,
1078,
7657,
11,
3919,
12,
944,
12,
1904,
11,
40225,
12,
45688,
12,
6738,
... | 2.431561 | 4,062 |
from setuptools import setup
setup(
name='patho_toolbox',
version='',
packages=[''],
url='',
license='',
author='vankhoa',
author_email='vankhoa21991@gmail.com',
description='pathology toolbox'
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
6978,
78,
62,
25981,
3524,
3256,
198,
220,
220,
220,
2196,
11639,
3256,
198,
220,
220,
220,
10392,
41888,
7061,
4357,
198,
220,
220,
220,
19016,... | 2.410526 | 95 |
#! /usr/bin/env python3
"""
Plot the compostion of the giant component in time from a set of snapshots located in a directory.
``` {.text}
usage: kappa_snapshot_largest_complex_time
[-h] Show detailed help.
[-d DIRECTORY] Directory where snapshots are stored, default is <.>
[-p PATTERN] Pattern that groups desired snapshots names; default 'snap*.ka'.
[-a [...]] Patterns that should be plotted; omiting plots sum formula.
[-o OUTPUT_NAME] The common file name for saving figures; shown if not given.
[-fs WIDTH HEIGHT] Size of the resulting figure, in inches.
[--lin_log] If specified, produce an additional plot with linear X-axis and logarithmic Y-axis.
[--log_lin] If specified, produce an additional plot with logarithmic X-axis and linear Y-axis.
[--log_log] If specified, produce an additional plot with logarithmic X-axis and logarithmic Y-axis.
[--un_stacked] If given, produce regular non-stacked plot.
[--mt THREADS] Launch multiple threads for reading snapshots. Safe, but always less performant: WIP.
```
"""
import argparse
import matplotlib as mpl
import matplotlib.pyplot as plt
from pathlib import Path
from KaSaAn.functions import find_snapshot_names
from KaSaAn.functions.graph_largest_complex_composition import snapshot_list_to_plot_matrix, _make_figure
def main():
"""Plot the evolution of the giant component in time from a set of snapshots located in a directory, showing only
the subset of patterns specified."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('-d', '--directory', type=str, default='.',
help='Name of the directory where snapshots are stored; default is current directory.')
parser.add_argument('-p', '--pattern', type=str, default='snap*.ka',
help='Pattern that should be used to get the snapshot names; default is as produced by KaSim,'
' `snap*.ka`')
parser.add_argument('-a', '--agent-patterns', type=str, default=None, nargs='*',
help='Patterns whose number of symmetry-adjusted embeddings into the giant component'
' should be plotted; leave blank or omit to plot all agent types (i.e. sum formula)'
' instead.')
parser.add_argument('-o', '--output_name', type=str,
help='If specified, the name of the file where the figure should be saved. If not given,'
' figure will be shown instead. If alternate scale options are given, a "_log_lin" or'
' similar will be inserted between the file-name and the extension requested to'
' distinguish the additional requested files.')
parser.add_argument('-fs', '--figure_size', type=float, default=mpl.rcParams['figure.figsize'], nargs=2,
help='Size of the resulting figure, in inches, specified as two elements, width and height'
' (text size is specified in points, so this affects the size of text relative to other'
' graph elements).')
parser.add_argument('--lin_log', action='store_true',
help='If specified, produce an additional plot with linear X-axis and logarithmic Y-axis.')
parser.add_argument('--log_lin', action='store_true',
help='If specified, produce an additional plot with logarithmic X-axis and linear Y-axis.')
parser.add_argument('--log_log', action='store_true',
help='If specified, produce an additional plot with logarithmic X-axis and logarithmic Y-axis.')
parser.add_argument('--un_stacked', action='store_true',
help='If given, produce a conventional plot rather than a filled stacked plot (meant for sum'
' formulae). Useful when plotting patterns that may overlap, ergo whose stacking would not be'
' as intuitive.')
parser.add_argument('-mt', '--multi_thread', type=int, default=1,
help='Number of threads for the concurrent pool of workers to read-in snapshots. Default uses'
' 1, so a single-threaded for-loop.')
args = parser.parse_args()
snap_name_list = find_snapshot_names(target_directory=args.directory, name_pattern=args.pattern)
s_times, p_matrix, pattern_list = snapshot_list_to_plot_matrix(snapshot_names=snap_name_list,
agent_patterns_requested=args.agent_patterns,
thread_number=args.multi_thread)
# scale plot
fig_lin_lin = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'linear', 'linear', args.un_stacked)
if args.lin_log:
fig_lin_log = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'linear', 'log', args.un_stacked)
if args.log_lin:
fig_log_lin = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'log', 'linear', args.un_stacked)
if args.log_log:
fig_log_log = _make_figure(s_times, p_matrix, pattern_list, args.figure_size, 'log', 'log', args.un_stacked)
# save or display?
if args.output_name:
save_path = Path(args.output_name)
fig_lin_lin.savefig(save_path)
if args.lin_log:
fig_lin_log.savefig(save_path.parents[0] / Path(save_path.stem + '_lin_log' + save_path.suffix))
if args.log_lin:
fig_log_lin.savefig(save_path.parents[0] / Path(save_path.stem + '_log_lin' + save_path.suffix))
if args.log_log:
fig_log_log.savefig(save_path.parents[0] / Path(save_path.stem + '_log_log' + save_path.suffix))
else:
plt.show()
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
43328,
262,
36541,
295,
286,
262,
6175,
7515,
287,
640,
422,
257,
900,
286,
47787,
5140,
287,
257,
8619,
13,
198,
198,
15506,
63,
1391,
13,
5239,
92,
198,
26060,
... | 2.391843 | 2,501 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from datetime import datetime
from tempest.test import attr
import requests
import yaml
import time
import os
import re
import pdb
LOG = logging.getLogger(__name__)
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
... | 3.354839 | 279 |
import json
import argparse
from mlcl.init_implementation import init_implementation
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('--profiling-method', help='Type of profiling that shall be run.')
parser.add_argument('--implementation', help='Implemented method.')
parser.add_argument('--config', help='Config for implementation.')
parser.add_argument('--datafile', help='Data for profiling.')
parser.add_argument('--train-apply', help='Indicates whether to run training or application.')
parser.add_argument('--log-file', help='Name of log file to write.')
args = parser.parse_args()
# load implementation and data
implementation = init_implementation(args.implementation)
with open(args.config, 'r', encoding='utf-8') as clf:
config = json.load(clf)
implementation.prepare(config)
data_handler = implementation.ds_class(args.datafile)
data_info = data_handler.info()
data_info['logfile'] = args.log_file
if args.profiling_method == 'evaluate':
if args.train_apply == 'train':
prof = {}
else:
data_apply, _ = data_handler.get_apply(complete=True)
prof = data_handler.evaluate(implementation.apply(data_apply, data_info))
else:
if args.train_apply == 'train':
data_train, data_size = data_handler.get_train()
func = lambda: implementation.train(data_train, data_info)
elif args.train_apply == 'apply':
data_apply, data_size = data_handler.get_apply()
func = lambda: implementation.apply(data_apply, data_info)
else:
raise RuntimeError(f'"{args.train_apply}" is not valid, please pass "train" or "apply"!')
# run and log profiling
if args.profiling_method == 'energy':
try:
from .energy import PyRaplEnergyProfiling
profiling = PyRaplEnergyProfiling()
except ModuleNotFoundError:
# TODO add more Energy profilers here, e.g. for Windows
from .energy import DummyEnergyProfiling
profiling = DummyEnergyProfiling()
elif args.profiling_method == 'runtime':
from .runtime import RuntimeProfiling
profiling = RuntimeProfiling()
elif args.profiling_method == 'memory':
from .memory import MemoryProfiling
profiling = MemoryProfiling(data_size)
elif args.profiling_method == 'gpu':
from .gpu_prof import GpuMonitoringProcess
profiling = GpuMonitoringProcess()
elif args.profiling_method == 'evaluate':
pass
else:
raise RuntimeError(f'Profiling {args.profiling_method} not implemented!')
prof, result = profiling.run(func)
success = 0
except Exception as exc:
import traceback
prof = {
'ERROR': str(exc),
'DETAILS': str(traceback.format_exc())
}
success = 1
with open(args.log_file, 'w', encoding='utf-8') as lf:
json.dump(prof, lf)
import sys
sys.exit(success)
| [
11748,
33918,
198,
11748,
1822,
29572,
198,
198,
6738,
25962,
565,
13,
15003,
62,
320,
32851,
1330,
2315,
62,
320,
32851,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
1949,
25,
198,
220,
220,... | 2.224902 | 1,534 |
#coding:utf-8
#
# id: bugs.core_4653
# title: Crash on infinite mutual SP calls (instead of "Too many concurrent executions of the same request.")
# decription: 20150108: crach every even run, WI-T3.0.0.31529, Win XP SP3
# tracker_id: CORE-4653
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('=.*', ''), ('line.*', ''), ('col.*', '')]
init_script_1 = """
set term ^;
create or alter procedure p03(a_i int) returns (z int) as
begin
z = 0 ;
suspend;
end^
commit^
create or alter procedure p02(a_i int) returns (z int) as
begin
z = (select z from p03(:a_i)) + 1;
suspend;
end^
commit^
create or alter procedure p03(a_i int) returns (z int) as
begin
z = (select z from p02(:a_i)) + 1;
suspend;
end^
commit^
create or alter procedure p01(a_i int) returns (z int) as
begin
z = (select z from p02(:a_i)) + 1;
suspend;
end^
commit^
set term ;^
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
-- 07.03.2015: updated expected_stderr - it was changed and now is identical in 2.5 & 3.0
-- Old stderr:
-- Statement failed, SQLSTATE = HY001
-- Stack overflow. The resource requirements of the runtime stack have exceeded the memory available to it.
select * from p01(1);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Z
============
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 54001
Too many concurrent executions of the same request
-At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At procedure 'P02' line: 3, col: 3
At procedure 'P03' line: 3, col: 3
At p...
"""
@pytest.mark.version('>=3.0')
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
198,
2,
4686,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
11316,
13,
7295,
62,
42018,
18,
198,
2,
3670,
25,
220,
220,
220,
220,
220,
220,
220,
23653,
319,
15541,
13584,
6226,
3... | 2.3576 | 1,250 |
import functools
## these code solution is from: https://github.com/danielgaylord/coding-exercises/blob/main/Advent%20of%20Code/2021-Day16.py
# Read puzzle input and return as usable data structure
# Find value of a literal value subpacket
# Recursive function to decode packets and subpackets
if __name__ == "__main__":
# Part 1 solution
# print(packet_decoder('Advent of Code/test.txt', 0))
print(packet_decoder('day16/day16_1_input.txt', 0))
# Part 2 solution
# print(packet_decoder('Advent of Code/test.txt', 1))
print(packet_decoder('day16/day16_1_input.txt', 1))
| [
11748,
1257,
310,
10141,
198,
198,
2235,
777,
2438,
4610,
318,
422,
25,
3740,
1378,
12567,
13,
785,
14,
67,
6321,
22744,
10572,
14,
66,
7656,
12,
1069,
2798,
2696,
14,
2436,
672,
14,
12417,
14,
2782,
1151,
4,
1238,
1659,
4,
1238,
... | 2.757991 | 219 |
import os
import pandas as pd
from fit_simple import *
from social_heuristic import *
from baseline_model import *
from test_model import *
from sklearn import gaussian_process
data = []
in_dir = '../../new-processed/'
out_dir = '../../modeling/'
light_fields = ['0-1en01', '1-1en01', '2-1en01', '3-1en01']
pars = None
smoothed = {}
x = []
y = []
for light_field in light_fields:
par_sweep = pd.read_csv(out_dir + light_field + '_parameters.csv')
if pars is not None:
assert set(pars) == set(par_sweep['par'])
else:
pars = par_sweep['par']
x += list(np.log(pars))
y += list(par_sweep['score'])
x = np.array(x)
y = np.array(y)
dim = 3
fit = np.polyfit(x, y, dim)[::-1]
x = np.log(pars)
smoothed = np.dot(np.transpose([x**i for i in range(dim+1)]), fit)
#smoothed[light_field] = gaussian_process.GaussianProcess(nugget = 1e-12)
#smoothed[light_field].fit(np.transpose(np.array([par_sweep['score']])), par_sweep['par'])
#max(min(smoothed[background].predict(score, eval_MSE=True)[0][0], 1.0), 0.0)
scores = []
for game in os.listdir(in_dir):
if game.split('.')[-1] != 'csv' or game.split('_')[-2][2] != '1':
continue
background = game.split('_')[-2]
df = pd.read_csv(in_dir + game)
players = list(set(df['pid'].dropna()))
if len(players) == 1 and len(df) == 2880:
print game
pid = players[0]
scores += [np.mean(df.loc[1440:,'bg_val'])]
par = predict(np.mean(scores))
scores = []
for game in os.listdir(in_dir):
if game.split('.')[-1] != 'csv' or game.split('_')[-2][2] != '1':
continue
background = game.split('_')[-2]
df = pd.read_csv(in_dir + game)
players = list(set(df['pid'].dropna()))
for pid in players:
pos = np.array(df[(df['tick'] == 0)&(df['pid'] == pid)][['x_pos','y_pos']])[0]
data += [[game, len(players), pid, par, pos[0], pos[1]]]
out_df = pd.DataFrame(data)
out_df.columns = ['game','n_players','pid','par','x_pos','y_pos']
out_df.to_csv(out_dir + '/model-pars.csv', index = False)
| [
198,
11748,
28686,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
4197,
62,
36439,
1330,
1635,
198,
6738,
1919,
62,
258,
27915,
1330,
1635,
198,
6738,
14805,
62,
19849,
1330,
1635,
198,
6738,
1332,
62,
19849,
1330,
1635,
1... | 2.143293 | 984 |
import os
import json
import glob
import numpy as np
from matplotlib import pyplot as plt
from enterprise_warp.results import suitable_estimator
crn_lvl_dir = '/fred/oz002/bgoncharov/correlated_noise_pta_2020_out/dr2_timing_20200607/20200908_20200917_cpl_vargam_x1psr/noisefiles/'
pe_lvl_dir = '/fred/oz002/bgoncharov/correlated_noise_pta_2020_out/dr2_timing_20200607/20200908_v1/noisefiles/'
with open(crn_lvl_dir+'_credlvl.json', 'r') as fin:
crn_lvl = json.load(fin)
pe_lvl = {}
lvlfiles = sorted(glob.glob(pe_lvl_dir + '*_credlvl.json'))
for lf in lvlfiles:
psr_name = os.path.basename(lf)[:-13].split('_')[1]
pe_lvl[psr_name] = {}
with open(lf, 'r') as fin:
pe_lvl[psr_name] = json.load(fin)
'red_noise'
'system_noise_XXX_SYSTEM'
'band_noise_by_B_1020CM'
'band_noise_XXX_20CM'
keystr_red = ['red_noise','system_noise','band_noise']
yy_1 = list()
yy_met_1 = list()
yy_err_1 = list()
yy_2 = list()
yy_met_2 = list()
yy_err_2 = list()
xx = list()
xx_lbl_1 = list()
xx_lbl_2 = list()
count = 0
for psr in pe_lvl.keys():
for param in pe_lvl[psr].keys():
#if any(map(param.__contains__, keystr_red)) and 'log10_A' in param:
if 'red_noise' in param and 'log10_A' in param:
#if ('band_noise' in param or 'system_noise' in param) and 'log10_A' in param:
#if 'chromatic_gp' in param and 'log10_A' in param:
(center_val, method) = suitable_estimator(pe_lvl[psr][param])
yy_1.append(center_val)
yy_met_1.append(method)
yy_err_1.append([pe_lvl[psr][param]['84'] - center_val, \
center_val - pe_lvl[psr][param]['16']])
xx.append(count)
xx_lbl_1.append(param)
xx_lbl_2.append(pe_lvl[psr][param.replace('log10_A','gamma')]['maximum'])
count += 1
# Common red noise case
if param in crn_lvl.keys():
(center_val, method) = suitable_estimator(crn_lvl[param])
yy_2.append(center_val)
yy_met_2.append(method)
yy_err_2.append([crn_lvl[param]['84'] - center_val, \
center_val - crn_lvl[param]['16']])
else:
yy_2.append(np.nan)
yy_met_2.append(np.nan)
yy_err_2.append([np.nan,np.nan])
print('Warning: parameter ', param, 'not in CRN!')
xx_lbl_2 = [str("{:.1f}".format(xx2)) for xx2 in xx_lbl_2]
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(111)
plt.errorbar(np.array(xx)-0.05, yy_1, yerr=np.array(yy_err_1).T, linestyle='', fmt='.', color='red', label='No CRN')
plt.errorbar(np.array(xx)+0.05, yy_2, yerr=np.array(yy_err_2).T, linestyle='', fmt='.', color='blue', label='CNR')
plt.xticks(xx, xx_lbl_1, rotation='vertical')
#ax1.set_xticks(xx)
#ax1.set_xticklabels(xx_lbl_1)
#plt.setp( ax1.xaxis.get_majorticklabels(), rotation=70 )
plt.xlabel('Red noise parameter')
plt.ylabel('$\log_{10}A$')
plt.legend()
ax2 = ax1.twiny()
ax2.set_xticks(xx)
ax2.set_xticklabels(xx_lbl_2)
ax2.set_xlabel('$\gamma$')
ax1.set_xlim([np.min(xx)-0.5, np.max(xx)+0.5])
ax2.set_xlim([np.min(xx)-0.5, np.max(xx)+0.5])
plt.tight_layout()
plt.savefig(crn_lvl_dir + 'compare_red_crn_levels.png')
plt.close()
import ipdb; ipdb.set_trace()
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
6738,
13953,
62,
86,
5117,
13,
43420,
1330,
11080,
62,
395,
320,
1352,
198... | 2.034528 | 1,535 |
""" Weapon serializers. """
from rest_framework import serializers
from common.serializers import OwnedModelSerializer
from equipment.models import Weapon, WeaponProperty
class WeaponSerializer(OwnedModelSerializer):
""" Serializer for Weapon model """
properties = serializers.PrimaryKeyRelatedField(
queryset=WeaponProperty.objects.all(), many=True)
| [
37811,
13072,
11389,
11341,
13,
37227,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
2219,
13,
46911,
11341,
1330,
11744,
276,
17633,
32634,
7509,
198,
6738,
5112,
13,
27530,
1330,
13072,
11,
13072,
21746,
628,
198,
4871... | 3.747475 | 99 |
l = []
for el in l:
print(
f"""
module {el.capitalize().replace(".", "_")}Listener : sig
type t = ???
val t_to_js : t -> Ojs.t
val t_of_js : Ojs.t -> t
end
"""
)
print(
"""
type listener =
([
"""
)
for el in l:
print(
f"""
| `{el.capitalize().replace(".", "_")} of {el.capitalize().replace(".", "_")}Listener.t
"""
)
print(
"""
]
[@js.union])
[@@@js.stop]
val on : t -> listener -> unit
val add_listener : t -> listener -> unit
val once : t -> listener -> unit
val prepend_listener : t -> listener -> unit
val prepend_once_listener : t -> listener -> unit
[@@@js.start]
[@@@js.implem
val on : t -> string -> Ojs.t -> unit [@@js.call "on"]
val add_listener : t -> string -> Ojs.t -> unit [@@js.call "addListener"]
val once : t -> string -> Ojs.t -> unit [@@js.call "once"]
val prepend_listener : t -> string -> Ojs.t -> unit
[@@js.call "prependListener"]
val prepend_once_listener : t -> string -> Ojs.t -> unit
[@@js.call "prependOnceListener"]
let with_listener_fn fn t = function
"""
)
for el in l:
print(
f"""
| `{el.capitalize()} f ->
fn t "{el}" @@ [%js.of: {el.capitalize().replace(".", "_")}Listener.t] f
"""
)
print(
f"""
let on = with_listener_fn on
let add_listener = with_listener_fn add_listener
let once = with_listener_fn once
let prepend_listener = with_listener_fn prepend_listener
let prepend_once_listener = with_listener_fn prepend_once_listener]
"""
)
| [
75,
796,
17635,
198,
198,
1640,
1288,
287,
300,
25,
198,
220,
220,
220,
3601,
7,
198,
220,
220,
220,
220,
220,
220,
220,
277,
37811,
198,
21412,
1391,
417,
13,
27544,
1096,
22446,
33491,
7203,
33283,
45434,
4943,
92,
33252,
1058,
43... | 2.399031 | 619 |
i#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
YEAR: 2019 - 2021
@author: ADA GJERMUNDSEN
This script will reproduce FIGURE 5 in Gjermundsen et. al 2021
The data used for plotting is generated by scripts
contained in the same folder as this (FIGURE5)
"""
import xarray as xr
import warnings
warnings.simplefilter('ignore')
import numpy as np
import matplotlib.pyplot as plt
import cmocean.cm as cmo
from matplotlib import colors
from scipy.stats import linregress
from mpl_toolkits.basemap import Basemap
if __name__ == '__main__':
mm = .1/2.54 # millimeters in inches
fig = plt.figure(figsize=(180*mm, 100*mm))
gs = fig.add_gridspec(2, 2*16)
#############################################################################################################
path = 'path_to_data/'
#######################################################################################################
sstlist={'NorESM2-hosing':['tos_Omon_NorESM2-LM_hosing_r1i1p1f1_5_5yravg.nc',
'tos_Omon_NorESM2-LM_piControl-hosing_r1i1p1f1_5_5yravg.nc'],
'NorESM2-LM':['tos_Omon_NorESM2-LM_abrupt-4xCO2_r1i1p1f1_5_5yravg.nc',
'tos_Omon_NorESM2-LM_piControl_r1i1p1f1_5_5yravg.nc']}
figlist=[r"$\bf{a)}$", r"$\bf{b)}$", r"$\bf{c)}$", r"$\bf{d)}$"]
lats = [ -80, -60, -60, -80 ]
lons = [ -60, -60, 60, 60 ]
sstval=[-1.,2.5]
sstlevels = np.arange(sstval[0],sstval[1]+.25,.25)
cmap=cmo.balance
n=0
nn=0
for expid, file in sstlist.items():
m = Basemap(projection='ortho',lat_0=-90,lon_0=0,resolution='c')
ax = fig.add_subplot(gs[0,2*n:2*(n+3)])
ax.cla()
m.drawcoastlines(linewidth = .5,color='black')
var = 'tos'
case = xr.open_mfdataset(path + sstlist[expid][0])
case = case[var]
case = case.where(case.lat > -80)
ctrl = xr.open_mfdataset(path + sstlist[expid][1])
ctrl = ctrl[var]
ctrl = ctrl.where(ctrl.lat > -80)
casevals = case.values - ctrl.values
casevalscycl = np.zeros((len(case.lat.values),len(case.lon.values)+1))
casevalscycl[:,:-1] = casevals
casevalscycl[:,-1] = casevals[:,0]
loncycl = np.append(case.lon.values, case.lon.values[-1]+np.diff(case.lon.values[0:2]))
x, y = np.meshgrid(loncycl,case.lat.values )
px,py = m(x, y)
m.contourf(px,py,casevalscycl, levels = sstlevels, cmap=cmap,
extend="both", norm = colors.DivergingNorm(vmin=sstval[0], vcenter=0., vmax=sstval[1]))
m.fillcontinents(color='lightgrey')
m.drawcoastlines(linewidth = .5)
parallels = np.arange(-60,0,30)
cax4=m.drawparallels(parallels, linewidth = 0.4)
for i in np.arange(len(parallels)):
plt.annotate(np.str(np.abs(parallels[i]))+'°S',xy=m(220,parallels[i]+5),xycoords='data', fontsize=8)
if nn in [0,2]:
meridian = np.arange(0,360,60)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
else:
meridian = np.arange(0,360,90)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
plt.clim(sstval[0],sstval[1])
if expid == 'NorESM2-hosing':
norhos5=case
lons = np.linspace(-60,60)
lats = np.linspace(-60,-60)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='black',linestyle='--')
lons = np.linspace(-60,-60)
lats = np.linspace(-90,-60)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='black',linestyle='--')
lons = np.linspace(60,60)
lats = np.linspace(-90,-60)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='black',linestyle='--')
if expid == 'NorESM2-LM':
noresm2 = case
ax.set_title(figlist[nn]+ ' ' +expid +'\n', fontsize=9)#+ ' (5 years)')
lons = np.linspace(-180,180)
lats = np.linspace(-62,-62)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
lats = np.linspace(-45,-45)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
n=n+4
nn = nn+1
ax = fig.add_subplot(gs[0,2*n:2*(n+3)])
ax.cla()
m.drawcoastlines(linewidth = .5,color='black')
casevals = noresm2.values - norhos5.values
casevalscycl = np.zeros((len(case.lat.values),len(case.lon.values)+1))
casevalscycl[:,:-1] = casevals
casevalscycl[:,-1] = casevals[:,0]
m.contourf(px,py,casevalscycl, levels = sstlevels, cmap=cmap,
extend="both", norm = colors.DivergingNorm(vmin=sstval[0], vcenter=0., vmax=sstval[1]))
m.fillcontinents(color='lightgrey')
m.drawcoastlines(linewidth = .5)
parallels = np.arange(-60,0,30)
cax4=m.drawparallels(parallels, linewidth = 0.4)
for i in np.arange(len(parallels)):
plt.annotate(np.str(np.abs(parallels[i]))+'°S',xy=m(220,parallels[i]+5),xycoords='data', fontsize=8)
if nn in [0,2]:
meridian = np.arange(0,360,60)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
else:
meridian = np.arange(0,360,90)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
plt.clim(sstval[0],sstval[1])
lons = np.linspace(-180,180)
lats = np.linspace(-62,-62)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
lats = np.linspace(-45,-45)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
ax.set_title(figlist[nn]+ ' ' +'NorESM2-LM -\nNorESM2-hosing', backgroundcolor= 'white', fontsize = 9)
n=n+4
nn=nn+1
ax = fig.add_subplot(gs[0,2*n:2*(n+3)])
case = xr.open_mfdataset(path + 'tos_Omon_CESM2_abrupt-4xCO2_r1i1p1f1_5_5yravg.nc')
case = case[var]
ctrl = xr.open_mfdataset(path + 'tos_Omon_CESM2_piControl_r1i1p1f1_5_5yravg.nc')
ctrl = ctrl[var]
casevals = case.values - ctrl.values
casevalscycl = np.zeros((len(case.lat.values),len(case.lon.values)+1))
casevalscycl[:,:-1] = casevals
casevalscycl[:,-1] = casevals[:,0]
loncycl = np.append(case.lon.values, case.lon.values[-1]+np.diff(case.lon.values[0:2]))
x, y = np.meshgrid(loncycl,case.lat.values )
px,py = m(x, y)
m.contourf(px,py,casevalscycl, levels = sstlevels, cmap=cmap,
extend="both", norm = colors.DivergingNorm(vmin=sstval[0], vcenter=0., vmax=sstval[1]))
m.fillcontinents(color='lightgrey')
m.drawcoastlines(linewidth = .5)
parallels = np.arange(-60,0,30)
cax4=m.drawparallels(parallels, linewidth = 0.4)
for i in np.arange(len(parallels)):
plt.annotate(np.str(np.abs(parallels[i]))+'°S',xy=m(220,parallels[i]+5),xycoords='data', fontsize=8)
if nn in [0,2]:
meridian = np.arange(0,360,60)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
else:
meridian = np.arange(0,360,90)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
plt.clim(sstval[0],sstval[1])
lats = np.linspace(-62,-62)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
lats = np.linspace(-45,-45)
x,y = m(lons,lats)
m.plot(x,y, linewidth=1.5, color='yellow',linestyle='dotted')
ax.set_title(figlist[nn]+ ' ' +'CESM2\n', fontsize = 9)#' (5 years)')
cax = fig.add_axes([0.9, 0.61, 0.013, 0.2 ])
cb = plt.colorbar(cax=cax,ticks=sstlevels[::4], orientation='vertical' )
cb.set_label(label='SST change [K]', fontsize=10)
cb.ax.tick_params(labelsize=11)
#%%
pathobs = 'path_to_obs_trend_data/'
filelist =['sst_HadISST_historical_1960_2014_trend.nc',
'tos_NorESM2-LM_historical_1960_2014_ssttrend.nc',
'tos_NorESM2-MM_historical_1960_2014_ssttrend.nc',
'tos_CESM2_historical_1960_2014_ssttrend.nc']
vval=[-0.1,0.4]
dv = 0.025
levels = np.arange(vval[0],vval[1]+dv,dv)
figlist=[r"$\bf{e)}$", r"$\bf{f)}$", r"$\bf{g)}$", r"$\bf{h)}$"]
cmap=cmo.balance
n=0
nn=0
for file in filelist:
ax = fig.add_subplot(gs[1,2*n:2*(n+3)])
ax.cla()
m.drawcoastlines(linewidth = .5,color='black')
if file[:3]=='tos':
var = 'tos'
ds = xr.open_dataset(path + file)
ds = ds.rename({'tos_trend':'sst_trend'})
else:
ds = xr.open_dataset(pathobs + file)
var = 'sst'
m = Basemap(projection='ortho',lat_0=-90,lon_0=0,resolution='c')
casevalscycl = np.zeros((len(ds.lat.values),len(ds.lon.values)+1))
casevalscycl[:,:-1] = ds.sst_trend.values
casevalscycl[:,-1] = ds.sst_trend.values[:,0]
loncycl = np.append(ds.lon.values, ds.lon.values[-1]+np.diff(ds.lon.values[0:2]))
x, y = np.meshgrid(loncycl,ds.lat.values )
px,py = m(x, y)
m.contourf(px,py, casevalscycl, levels = levels, cmap=cmap,
extend="both", norm = colors.DivergingNorm(vmin=vval[0], vcenter=0., vmax=vval[1]))
m.plot(x,y, linewidth=1.5, color='r',linestyle='--')
m.drawcoastlines(linewidth = .5)
m.fillcontinents(color='lightgrey')
m.drawmapboundary(linewidth = .5)
parallels = np.arange(-60,0,30)
cax4=m.drawparallels(parallels, linewidth = 0.4)
for i in np.arange(len(parallels)):
plt.annotate(np.str(np.abs(parallels[i]))+'°S',xy=m(240,parallels[i]+5),xycoords='data', fontsize=8)
if nn in [0,2]:
meridian = np.arange(0,360,60)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
else:
meridian = np.arange(0,360,90)
cax5=m.drawmeridians(meridian,labels=[1,1,1,1], linewidth = 0.4, fontsize=8)
ax.set_title(figlist[nn]+ ' ' + file.split('_')[1] +'\n', fontsize=9)
plt.clim(vval[0],vval[1])
n=n+4
nn = nn+1
cax = fig.add_axes([0.9, 0.1, 0.013, 0.2])
cb = plt.colorbar(cax=cax,ticks=levels[::4], orientation='vertical' )
cb.set_label(label='SST trend [K/dec.]', fontsize=10)
cb.ax.tick_params(labelsize=9)
plt.text(-23,2.21,'Initial SST anomalies (1-5 years) ', fontsize = 11)
plt.text(-23,.85,'Historical SST trends (1960 - 2014)', fontsize = 11)
plt.subplots_adjust(left=0.05, bottom=-0.05, right=0.9, top=.95, wspace=0.5, hspace=0.)
| [
72,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
56,
17133,
25,
13130,
532,
33448,
198,
198,
31,
9800,
25,
46460,
402,
41,
1137,
44,
4944,
5258,
... | 1.87952 | 5,669 |
import os
import numpy as np
import pandas as pd
import json
import base64
# from netCDF4 import Dataset,chartostring
from netcdf import NetCDF
from s3netcdfapi import S3NetCDFAPI
# import binpy
import scipy.io as sio
from mbtilesapi import getTile,getVT,readVT,send,VT2Tile,Points2VT,getVTfromBinary
from s3netcdfapi.data import getData
import s3netcdfapi.export as export
input={
"name":"s3netcdfapi_test",
"cacheLocation":"../s3",
"localOnly":True,
"verbose":True,
"maxPartitions":40,
"autoRemove":False,
}
# def test_slf():
# with S3NetCDFAPI(input) as netcdf:
# obj=netcdf.prepareInput({"export":"slf","variable":"u,v","inode":[0,1,2],"itime":[0,1]})
# export.to_slf(obj,getData(netcdf,obj))
# def test_binary():
# with S3NetCDFAPI(input) as netcdf:
# obj=netcdf.prepareInput({"export":"bin","variable":"mesh"})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['elem'],netcdf['elem','elem'])
# np.testing.assert_array_almost_equal(results['x'],netcdf['node','x'])
# np.testing.assert_array_almost_equal(results['y'],netcdf['node','y'])
# obj=netcdf.prepareInput({"export":"bin","variable":"time"})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_equal(results['time'],netcdf['time','time'])
# obj=netcdf.prepareInput({"export":"bin","variable":"freq"})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_equal(results['freq'],netcdf['freq','freq'])
# obj=netcdf.prepareInput({"export":"bin","variable":"u","itime":0})
# export.to_binary(netcdf,obj,getData(netcdf,obj),0,10301)
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# image=results['u_s_0'].reshape(netcdf.res()*netcdf.res(),2)
# nnode=netcdf._meta['dimensions']['nnode']
# np.testing.assert_array_almost_equal(np.round(export.decode(image,0,10301)[:nnode]),np.squeeze(netcdf['s','u',0]))
# obj=netcdf.prepareInput({"export":"bin","variable":"u","inode":0})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['u_t_0'],np.squeeze(netcdf['t','u',0]))
# obj=netcdf.prepareInput({"export":"bin","variable":"u","x":-159.0,"y":40.0})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['u_t_-159.0_40.0'],np.squeeze(netcdf['t','u',10]))
# obj=netcdf.prepareInput({"export":"bin","variable":"spectra","isnode":0,"itime":0})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['spectra_0_0'],np.squeeze(netcdf['spc','spectra',0,0]))
# obj=netcdf.prepareInput({"export":"bin","variable":"spectra","itime":0,"x":-159.0,"y":40.0})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['spectra_-159.0_40.0_0'],np.squeeze(netcdf['spc','spectra',5,0]))
# obj=netcdf.prepareInput({"export":"bin","variable":"spectra","start":"2000-01-01T02","end":"2000-01-01T02","x":-159.0,"y":40.0})
# export.to_binary(netcdf,obj,getData(netcdf,obj))
# with open(obj["filepath"]+".bin","rb") as f:results=binpy.read(f)
# np.testing.assert_array_almost_equal(results['spectra_-159.0_40.0_2000-01-01T02:00:00'],np.squeeze(netcdf['spc','spectra',5,2]))
# def test_mat():
# export.mat()
# def test_shapefile():
# export.shapefile()
# def test_tri():
# export.tri()
if __name__ == "__main__":
test_table()
test_csv()
test_json()
test_geojson()
test_netcdf()
test_mbtiles()
test_mat()
# test_binary()
# test_slf()
# test_mat()
# test_shapefile()
# test_tri() | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
33918,
198,
11748,
2779,
2414,
198,
2,
422,
2010,
34,
8068,
19,
1330,
16092,
292,
316,
11,
40926,
455,
1806,
198,
6738,
2010,
66,
7568... | 2.21658 | 1,930 |
# The MIT License (MIT)
#
# Copyright (c) 2016 Litrin Jiang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABCMeta
__all__ = [
'Normal',
'NormalS',
'MA',
'EMA',
]
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
25659,
12769,
32294,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
... | 3.549708 | 342 |
# coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
FileConfirmed=open('source/GlobalConfirm.csv', 'r')
lines_confirm=FileConfirmed.readlines()
lines_confirm.pop(0)
FileRecovered=open('source/GlobalRecovered.csv', 'r')
lines_recovered=FileRecovered.readlines()
lines_recovered.pop(0)
FileDeath=open('source/GlobalDeath.csv', 'r')
lines_death=FileDeath.readlines()
lines_death.pop(0)
XChina=[]
YChina=[]
XAmecria=[]
YAmecria=[]
XIran=[]
YIran=[]
XItaly=[]
YItaly=[]
XSpain=[]
YSpain=[]
XGermany=[]
YGermany=[]
XFrance=[]
YFrance=[]
XUK=[]
YUK=[]
for i in range(4,94,1):
XChina.append(i)
XAmecria.append(i)
XIran.append(i)
XItaly.append(i)
XSpain.append(i)
XGermany.append(i)
XFrance.append(i)
XUK.append(i)
valueChina=0
valueAmerica=0
valueItaly=0
valueIran=0
valueSpain=0
valueGermany=0
valueFrance=0
valueUK=0
dvalueChina = 0
dvalueAmerica = 0
dvalueItaly = 0
dvalueIran = 0
dvalueSpain = 0
dvalueGermany = 0
dvalueFrance = 0
dvalueUK = 0
for line in lines_confirm:
lineMessage=line.split(',')
if lineMessage[1]=='China':
valueChina += int(lineMessage[i])
if lineMessage[1]=='US':
valueAmerica += int(lineMessage[i])
if lineMessage[1] == 'Iran':
valueIran += int(lineMessage[i])
if lineMessage[1] == 'Italy':
valueItaly += int(lineMessage[i])
if lineMessage[1] == 'Spain':
valueSpain+= int(lineMessage[i])
if lineMessage[1] == 'Germany':
valueGermany += int(lineMessage[i])
if lineMessage[1] == 'France':
valueFrance += int(lineMessage[i])
if lineMessage[1] == 'United Kingdom':
valueUK += int(lineMessage[i])
for line in lines_death:
lineMessage = line.split(',')
if lineMessage[1] == 'China':
dvalueChina += int(lineMessage[i])
if lineMessage[1] == 'US':
dvalueAmerica += int(lineMessage[i])
if lineMessage[1] == 'Iran':
dvalueIran += int(lineMessage[i])
if lineMessage[1] == 'Italy':
dvalueItaly += int(lineMessage[i])
if lineMessage[1] == 'Spain':
dvalueSpain += int(lineMessage[i])
if lineMessage[1] == 'Germany':
dvalueGermany += int(lineMessage[i])
if lineMessage[1] == 'France':
dvalueFrance += int(lineMessage[i])
if lineMessage[1] == 'United Kingdom':
dvalueUK += int(lineMessage[i])
for line in lines_recovered:
lineMessage = line.split(',')
if lineMessage[1] == 'China':
dvalueChina += int(lineMessage[i])
if lineMessage[1] == 'US':
dvalueAmerica += int(lineMessage[i])
if lineMessage[1] == 'Iran':
dvalueIran += int(lineMessage[i])
if lineMessage[1] == 'Italy':
dvalueItaly += int(lineMessage[i])
if lineMessage[1] == 'Spain':
dvalueSpain += int(lineMessage[i])
if lineMessage[1] == 'Germany':
dvalueGermany += int(lineMessage[i])
if lineMessage[1] == 'France':
dvalueFrance += int(lineMessage[i])
if lineMessage[1] == 'United Kingdom':
dvalueUK += int(lineMessage[i])
YChina.append(valueChina-dvalueChina)
YAmecria.append(valueAmerica-dvalueAmerica)
YIran.append(valueIran-dvalueIran)
YItaly.append(valueItaly - dvalueItaly)
YSpain.append(valueSpain - dvalueSpain)
YGermany.append(valueGermany - dvalueGermany)
YFrance.append(valueFrance - dvalueFrance)
YUK.append(valueUK - dvalueUK)
x=np.arange(0,100)
plt.plot(XChina,YChina,label='China')
plt.plot(XAmecria,YAmecria,label='America')
plt.plot(XIran,YIran,label='Iran')
plt.plot(XItaly,YItaly,label='Italy')
plt.plot(XSpain,YSpain,label='Spain')
plt.plot(XGermany,YGermany,label='Germany')
plt.plot(XFrance,YFrance,label='France')
plt.plot(XUK,YUK,label='United Kingdom')
plt.title('Global Net Confirmed')
plt.xlabel('Days')
plt.ylabel('Net Confirmed')
plt.legend()
plt.savefig('plt/GlobalNetConfirm.png',dpi=300)
plt.show()
plt.plot(XChina,YChina,label='China')
plt.plot(XAmecria,YAmecria,label='America')
plt.plot(XIran,YIran,label='Iran')
plt.plot(XItaly,YItaly,label='Italy')
plt.plot(XSpain,YSpain,label='Spain')
plt.plot(XGermany,YGermany,label='Germany')
plt.plot(XFrance,YFrance,label='France')
plt.plot(XUK,YUK,label='United Kingdom')
plt.title('Global Net Confirmed As Log')
plt.xlabel('Days')
plt.ylabel('Net Confirmed')
plt.legend()
plt.semilogy()
plt.savefig('plt/GlobalNetConfirmForLog.png',dpi=300)
plt.show()
| [
2,
19617,
25,
40477,
12,
23,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
8979,
18546,
15491,
28,
9654,
10786,
10459,
14,
22289,
18546,
2533,
13,
40664,
3256,
705,
81,
115... | 2.231981 | 2,095 |
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
#from nystrom_attention import Nystromformer
import gc
import collections
import torch.nn.functional as F
class ConvBlock(nn.Module):
"""Some Information about ConvBlock"""
if __name__ == '__main__':
x = torch.rand((8,48,56,56))
net = SelfAttention().to(device="cuda")
y = net(x)
print(y.shape) | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
304,
259,
2840,
1330,
37825,
858,
198,
2,
6738,
299,
88,
20282,
62,
1078,
1463,
1330,
17735,
20282,
16354,
198,
11748,
308,
66,
1... | 2.715232 | 151 |
import sys
sys.path.append("..")
from utils import utils, responses
from django.http import HttpResponse, HttpRequest
from django.views import View
from rest_framework.views import APIView # For swagger
from documentapp.models import Document
from profileapp.models import Profile
from .models import Comment
# Create your views here.
| [
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
6738,
3384,
4487,
1330,
3384,
4487,
11,
9109,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
18453,
198,
6738,
42625,
14208,
13,
3357... | 3.766667 | 90 |
from pytest_kind import KindCluster
from kluctl.e2e.conftest import recreate_namespace
from kluctl.e2e.kluctl_test_project import KluctlTestProject
from kluctl.e2e.kluctl_test_project_helpers import add_configmap_deployment
from kluctl.utils.dict_utils import get_dict_value
from kluctl.utils.yaml_utils import yaml_load
| [
6738,
12972,
9288,
62,
11031,
1330,
14927,
2601,
5819,
198,
198,
6738,
479,
2290,
34168,
13,
68,
17,
68,
13,
1102,
701,
395,
1330,
32049,
62,
14933,
10223,
198,
6738,
479,
2290,
34168,
13,
68,
17,
68,
13,
74,
2290,
34168,
62,
9288,
... | 2.833333 | 114 |
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ('*',)
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
INTERNAL_IPS = ('127.0.0.1',)
# Email settings for Mailhog.
# These need to match the settings in docker-compose.yml.
#
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
| [
6738,
764,
8692,
1330,
1635,
198,
198,
2,
10729,
4261,
9050,
39410,
25,
836,
470,
1057,
351,
14257,
2900,
319,
287,
3227,
0,
198,
30531,
796,
6407,
198,
198,
7036,
3913,
1961,
62,
39,
10892,
50,
796,
19203,
9,
3256,
8,
198,
198,
7... | 2.560241 | 166 |
import os
import win32com.client as client
text = """
This sample document is generated by WdBibTeX.
Some text with dummy citation \\cite{enArticle1} will be converted \
to [1] by executing wdbibtex.
The list of bibliography is placed to the thebibliography command as follows:
\\thebibliography
"""
ap = client.Dispatch('Word.Application')
ap.Visible = True
dc = ap.Documents.Add()
sl = ap.Selection
sl.InsertBefore(text)
dc.SaveAs2(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'sample.docx'
)
)
# Close document
dc.Close()
# Quit Word application if no other opened document
if len(ap.Documents) == 0:
ap.Quit()
| [
11748,
28686,
198,
11748,
1592,
2624,
785,
13,
16366,
355,
5456,
198,
198,
5239,
796,
37227,
198,
1212,
6291,
3188,
318,
7560,
416,
370,
36077,
571,
49568,
13,
198,
198,
4366,
2420,
351,
31548,
27860,
26867,
66,
578,
90,
268,
14906,
1... | 2.805907 | 237 |
# Generated by Django 3.0.6 on 2020-06-16 13:03
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
3312,
12,
1433,
1511,
25,
3070,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2018. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from fn_geocoding.util.request_common import execute_call
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def selftest_function(opts):
"""
Simple api test to confirm access
"""
options = opts.get("fn_geocoding", {})
url = options['url']
payload = { "key": options['api_key'],
"latlng": "42.3656119,-71.0805841"
}
response = execute_call(log, "get", url, None, None, payload, True, None, None)
if response and response['status'] == "OK":
return {
"state": "success",
"response": response
}
else:
log.error(response)
return {
"state": "failure"
} | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
357,
66,
8,
15069,
19764,
11421,
13,
2864,
13,
1439,
6923,
33876,
13,
198,
2,
23864,
2611,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
11,
645,
12,
944,
... | 2.333333 | 396 |
from .lib import eth_wallet
| [
6738,
764,
8019,
1330,
220,
4555,
62,
44623,
201,
198
] | 3 | 10 |
from unittest import skip
from unittest.mock import Mock, patch
from django.core import mail
from django.urls import reverse
from django.test import TestCase
from guardian.shortcuts import assign_perm, get_user_perms
from django.db.models import Count
from feder.cases.factories import CaseFactory
from feder.cases.models import Case
from feder.domains.factories import DomainFactory
from feder.institutions.factories import InstitutionFactory
from feder.letters.factories import IncomingLetterFactory, DraftLetterFactory
from feder.letters.factories import OutgoingLetterFactory
from feder.main.tests import PermissionStatusMixin
from feder.monitorings.filters import MonitoringFilter
from feder.parcels.factories import IncomingParcelPostFactory, OutgoingParcelPostFactory
from feder.teryt.factories import JSTFactory
from feder.records.factories import RecordFactory
from feder.users.factories import UserFactory
from .factories import MonitoringFactory
from .forms import MonitoringForm
from .models import Monitoring
from .tasks import send_letter_for_mass_assign, handle_mass_assign
EXAMPLE_DATA = {
"name": "foo-bar-monitoring",
"description": "xyz",
"notify_alert": True,
"subject": "example subject",
"template": "xyz {{EMAIL}}",
"email_footer": "X",
"domain": 1,
}
| [
6738,
555,
715,
395,
1330,
14267,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
8529,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
6920,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
9288,
... | 3.458115 | 382 |
from test_in_prod import track_class
@track_class(thorough=True)
class HelperClass(object):
'''
def another_func(self, str_val):
return str_val
'''
| [
6738,
1332,
62,
259,
62,
1676,
67,
1330,
2610,
62,
4871,
198,
198,
31,
11659,
62,
4871,
7,
400,
7985,
28,
17821,
8,
198,
4871,
5053,
525,
9487,
7,
15252,
2599,
628,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
825,
1194,
62,
20... | 2.394366 | 71 |
import numpy as np
import pandas as pd
from tqdm import tqdm
def get_percentiles(x, n_percentiles=5):
""" Utility function to divide a particular variable up into
percentiles. """
return pd.qcut(x, q=n_percentiles, retbins=False, labels=False)
def reduce_repeats(X, y, categorical=False, use_index=False):
""" Removes the repeated trials from the target (y) and
design matrix (X) by taking the mean (if continuous) or
argmax (if categorical) across trial repetitions.
Parameters
----------
X : DataFrame
A pandas DataFrame with the intended predictors in columns and
observations in rows
y : Series
A pandas Series with the dependent variable
categorical : bool
Whether the dependent variable (y) is continuous or categorical
use_index : bool
In determining which trials are repeats, use the index instead of the
actual rows (not useful for 99% of the usecases)
Returns
-------
X_reduced : DataFrame
A pandas DataFrame with the repeats "reduced"
y_reduced : Series
A pandas Series with the repeats "reduced"
"""
X_, y = _check_Xy(X, y, categorical=categorical, use_index=use_index)
if use_index:
X_ = _use_index(X)
# Find repeated trials (keep all!)
rep_id, X_reduced = _find_repeats(X_)
# Add repetition ID to y so we can use groupby
y = y.to_frame().assign(rep_id=rep_id)
### FROM HERE ON, IT IS SPECIFIC TO THIS FUNCTION
if categorical:
opt = _y2opt(y)
# opt is a DataFrame with classes in columns, repetition IDs
# in rows and the fraction of class labels as values
# To get the reduced labels, simply take the argmax
y_reduced = opt.idxmax(axis=1)
else:
# If y is continuous, simply average the values
# across trials with the same repetition ID
y_reduced = y.groupby('rep_id').mean()
y_reduced = y_reduced.iloc[:, 0] # series to index
# Make sure indices match
y_reduced.index = X_reduced.index
if use_index: # stupid but necessary
# Want our original features (X) back, not the ones we used
# to determine repeats (X_)
uniq_idx = X_.duplicated(keep='first')
X_reduced = X.loc[~uniq_idx.to_numpy(), :]
# Check whether indices align & X does not contain repeats anymore
assert(y_reduced.index.equals(X_reduced.index))
assert((~X_reduced.duplicated()).any())
return X_reduced, y_reduced
def _check_Xy(X, y, categorical=False, use_index=False):
""" Some preemptive checks. """
nx, ny = X.shape[0], y.shape[0]
if nx != ny:
raise ValueError(f"Number of samples in X ({nx}) does not match "
f"the number of samples in y ({ny})!")
if not categorical:
# Make sure y are floating point values
# (crashes when "object")
y = y.astype(float)
else:
# Need to make sure the Series name is
# always the same
y = y.astype("category")
y = y.rename("target")
if not use_index:
# Make sure indices align
y.index = range(y.size)
X.index = range(X.shape[0])
else:
# Just checking!
assert(y.index.equals(X.index))
if X.duplicated().sum() == 0:
raise ValueError("There are no repeats in your data.")
else:
if categorical:
for cat in y.unique():
if X.loc[y == cat, :].duplicated().sum() == 0:
raise ValueError(f"There are no repeats for class {cat}!")
return X, y
def _find_repeats(X, progress_bar=False):
""" Finds repeats in DataFrame by checking each unique
row against all others.
Parameters
----------
X : DataFrame
A pandas DataFrame with predictors in columns and observations
("trials") in rows
Returns
-------
rep_id : np.ndarray
A numpy array of size X.shape[0] with indices that indicate
which trials are repeats of each other
X_uniq : DataFrame
A pandas DataFrame from which the repeats are removed (only
the first occurence is kept)
"""
# Within all repeated trials, get all unique trial configs
X_uniq = X.drop_duplicates(keep='first')
rep_id = np.zeros(X.shape[0]) # store indices
# Loop over unique rows to see which match other rows!
to_iter = range(X_uniq.shape[0])
if progress_bar:
to_iter = tqdm(to_iter)
for i in to_iter:
# ALL columns should match
idx = (X_uniq.iloc[i, :] == X).all(axis=1).to_numpy()
rep_id[idx] = i + 1 # each repeated trial gets a "repetition ID"
# After the loop, there should be no trials with ID 0 ...
if np.sum(rep_id == 0) != 0:
raise ValueError("Something went wrong in determining repeats ...")
return rep_id, X_uniq
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
628,
198,
4299,
651,
62,
25067,
2915,
7,
87,
11,
299,
62,
25067,
2915,
28,
20,
2599,
198,
220,
220,
220,
37227,
... | 2.45954 | 2,002 |
# Copyright (c) 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import naclimc
if __name__ == "__main__":
Main(sys.argv[1:])
| [
2,
15069,
357,
66,
8,
3050,
383,
12547,
20985,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
... | 3.125 | 80 |
"""Input uuid handler."""
import re
from typing import Optional
from InquirerPy import inquirer # type: ignore
from redbrick.cli.cli_base import CLIInputParams
class CLIInputUUID(CLIInputParams):
"""Input uuid handler."""
def __init__(self, entity: Optional[str], name: str) -> None:
"""Init handlers."""
self.entity = entity
self.error_message = "Invalid " + name
self.name = name
def filtrator(self, entity: str) -> str:
"""Filter input entity."""
return entity.strip().lower()
def validator(self, entity: str) -> bool:
"""Validate input entity."""
uuid = self.filtrator(entity)
return re.match(r"^[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}$", uuid) is not None
def get(self) -> str:
"""Get filtered uuid value post validation."""
self.entity = self.from_args()
if self.entity is None:
self.entity = inquirer.text(
qmark=">",
amark=">",
message=self.name + ":",
transformer=self.filtrator,
filter=self.filtrator,
validate=self.validator,
invalid_message=self.error_message,
).execute()
return self.entity
| [
37811,
20560,
334,
27112,
21360,
526,
15931,
198,
11748,
302,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
17193,
557,
81,
20519,
1330,
38212,
81,
220,
1303,
2099,
25,
8856,
198,
198,
6738,
2266,
1671,
624,
13,
44506,
13,
44506,
62,
... | 2.178082 | 584 |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from simpleranker import SimpleRanker
@pytest.mark.docker
| [
834,
22163,
4766,
834,
796,
366,
15269,
357,
66,
8,
33448,
449,
1437,
9552,
15302,
13,
1439,
2489,
10395,
526,
198,
834,
43085,
834,
796,
366,
25189,
4891,
12,
17,
13,
15,
1,
198,
198,
11748,
850,
14681,
198,
198,
11748,
299,
32152,... | 3.2875 | 80 |
# coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recsim.agents.cluster_bandit_agent."""
from gym import spaces
import numpy as np
from recsim.agents import cluster_bandit_agent
from recsim.environments import interest_exploration as ie
import recsim.testing.test_base as test_base
if __name__ == '__main__':
test_base.main()
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
13130,
383,
3311,
8890,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
7... | 3.51938 | 258 |
from distutils.core import setup
setup(
name = 'steamprofile',
packages = ['steamprofile'],
version = '0.1.2',
license='MIT',
description = 'Gather Information about Profiles on Steam.',
author = 'Aaron Levi Can (aaronlyy)',
author_email = 'aaronlevican@gmail.com',
url = 'https://github.com/aaronlyy/steamprofile',
download_url = 'https://github.com/aaronlyy/steamprofile/archive/v0.1.2.tar.gz',
keywords = ['steam', 'steamcommunity'],
install_requires=[
"requests",
"unidecode"
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | [
220,
220,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
40406,
7,
198,
220,
1438,
796,
705,
4169,
696,
305,
7753,
3256,
198,
220,
10392,
796,
37250,
4169,
696,
305,
7753,
6,
4357,
198,
220,
2196,
796,
705,
15,
13,
16,
13,
17,... | 2.847411 | 367 |
#!coding=utf8
from __future__ import unicode_literals
import datetime
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from django_datajsonar.models import Catalog, Node, Distribution
from series_tiempo_ar_api.apps.analytics.models import Query
from series_tiempo_ar_api.apps.management.models import Indicator
from series_tiempo_ar_api.libs.datajsonar_repositories.distribution_repository import DistributionRepository
from series_tiempo_ar_api.libs.indexing.report import attachments
from series_tiempo_ar_api.libs.indexing.report.indicators_generator import IndicatorsGenerator
from series_tiempo_ar_api.libs.indexing.report.node_admins import GlobalAdmins, NodeAdmins
from series_tiempo_ar_api.libs.indexing.report.report_mail_sender import ReportMailSender
| [
2,
0,
66,
7656,
28,
40477,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
4818,
8079,
198,
198,
6738,
3128,
22602,
13,
2411,
265,
1572,
12514,
1330,
48993,
1572,
12514,
198,
6738,
42625,
14208,
13,
10... | 3.238095 | 273 |
from .actor_critics import ActorCritic
from .actor_critics import ActorCriticWithTargets
from .actor_critics import ActorTwinCriticWithTargets
from .actors import Actor
from .actors import DetachedScaleGaussianPolicyHead
from .actors import DeterministicPolicyHead
from .actors import GaussianPolicyHead
from .actors import SquashedMultivariateNormalDiag
from .critics import Critic, ValueHead
from .encoders import ObservationActionEncoder, ObservationEncoder
from .utils import MLP, trainable_variables
__all__ = [
MLP, trainable_variables, ObservationActionEncoder,
ObservationEncoder, SquashedMultivariateNormalDiag,
DetachedScaleGaussianPolicyHead, GaussianPolicyHead,
DeterministicPolicyHead, Actor, ValueHead, Critic, ActorCritic,
ActorCriticWithTargets, ActorTwinCriticWithTargets]
| [
6738,
764,
11218,
62,
22213,
873,
1330,
27274,
18559,
291,
198,
6738,
764,
11218,
62,
22213,
873,
1330,
27274,
18559,
291,
3152,
51,
853,
1039,
198,
6738,
764,
11218,
62,
22213,
873,
1330,
27274,
5080,
259,
18559,
291,
3152,
51,
853,
... | 3.344262 | 244 |
"""
Tests to be run against production servers and APIs to make sure nothing broke in production.
"""
| [
37811,
198,
198,
51,
3558,
284,
307,
1057,
1028,
3227,
9597,
290,
23113,
284,
787,
1654,
2147,
6265,
287,
3227,
13,
198,
198,
37811,
198
] | 4.16 | 25 |
"""
Nearest neighbor accuracy.
"""
import os
import torch
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from dotmap import DotMap
from src.utils import utils
from torchvision import transforms
import torch.nn.functional as F
from torch.utils.data import DataLoader
from src.systems.simclr import SimCLRSystem, TaU_SimCLRSystem
from src.systems.mocov2 import MoCoV2System, TaU_MoCoV2System
from src.systems.hib import HIBSystem
from src.systems.transfer import Pretrained_TaU_SimCLRSystem
@torch.no_grad()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('exp_dir', type=str, help='experiment directory')
parser.add_argument('checkpoint_name', type=str, help='checkpoint name')
parser.add_argument('--dataset', type=str, default=None)
parser.add_argument('--one-view', action='store_true', default=False)
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
test_acc = get_nearest_neighbor_accuracy(args)
print('--------------------------')
print(f'Test Accuracy: {test_acc}')
| [
37811,
198,
8199,
12423,
4780,
9922,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
16605,
8899,
... | 2.963255 | 381 |
from cached_property import cached_property
from ..data import panda_urdfpath
from ..model import RobotModel
from .urdf import RobotModelFromURDF
class Panda(RobotModelFromURDF):
"""Panda Robot Model.
https://frankaemika.github.io/docs/control_parameters.html
"""
@cached_property
@cached_property
| [
6738,
39986,
62,
26745,
1330,
39986,
62,
26745,
198,
198,
6738,
11485,
7890,
1330,
279,
5282,
62,
2799,
69,
6978,
198,
6738,
11485,
19849,
1330,
16071,
17633,
198,
6738,
764,
2799,
69,
1330,
16071,
17633,
4863,
4261,
8068,
628,
198,
487... | 3.037383 | 107 |
"""This module contains the general information for ChassisPowerMonitor ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ChassisPowerMonitor(ManagedObject):
"""This is ChassisPowerMonitor class."""
consts = ChassisPowerMonitorConsts()
naming_props = set([])
mo_meta = {
"modular": MoMeta("ChassisPowerMonitor", "chassisPowerMonitor", "pwrmonitor", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], ['equipmentChassis'], [], ["Get"])
}
prop_meta = {
"modular": {
"average": MoPropertyMeta("average", "average", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current": MoPropertyMeta("current", "current", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"maximum": MoPropertyMeta("maximum", "maximum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"minimum": MoPropertyMeta("minimum", "minimum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"period": MoPropertyMeta("period", "period", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
}
prop_map = {
"modular": {
"average": "average",
"childAction": "child_action",
"current": "current",
"dn": "dn",
"maximum": "maximum",
"minimum": "minimum",
"period": "period",
"rn": "rn",
"status": "status",
},
}
| [
37811,
1212,
8265,
4909,
262,
2276,
1321,
329,
609,
20297,
13434,
35479,
1869,
1886,
10267,
526,
15931,
198,
198,
6738,
2644,
320,
66,
5908,
1330,
1869,
1886,
10267,
198,
6738,
2644,
320,
535,
382,
28961,
1330,
4270,
21746,
48526,
11,
4... | 2.519467 | 976 |
"""
Word Ladder
Given two words beginWord and endWord, and a dictionary wordList, return the length of the shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list.
Return 0 if there is no such transformation sequence.
Example 1:
Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog", return its length 5.
Example 2:
Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
Constraints:
1 <= beginWord.length <= 100
endWord.length == beginWord.length
1 <= wordList.length <= 5000
wordList[i].length == beginWord.length
beginWord, endWord, and wordList[i] consist of lowercase English letters.
beginWord != endWord
All the strings in wordList are unique.
"""
# approach: BFS with queue
# memory: O(n)
# runtime: O(n)
# passes 43/43 test cases, but takes too long
| [
37811,
198,
26449,
12862,
1082,
198,
198,
15056,
734,
2456,
2221,
26449,
290,
886,
26449,
11,
290,
257,
22155,
1573,
8053,
11,
1441,
262,
4129,
286,
262,
35581,
13389,
8379,
422,
2221,
26449,
284,
886,
26449,
11,
884,
326,
25,
628,
22... | 3.214092 | 369 |
"""
Trolls are attacking your comment section!
A common way to deal with this situation is to remove all of the vowels from the trolls' comments,
neutralizing the threat.
Your task is to write a function that takes a string and return a new string with all vowels removed.
For example, the string "This website is for losers LOL!" would become "Ths wbst s fr lsrs LL!".
Note: for this kata y isn't considered a vowel.
"""
delete_vowel('asdasdasdA asaaa! LOL')
| [
37811,
198,
51,
2487,
82,
389,
9274,
534,
2912,
2665,
0,
198,
198,
32,
2219,
835,
284,
1730,
351,
428,
3074,
318,
284,
4781,
477,
286,
262,
23268,
1424,
422,
262,
24727,
6,
3651,
11,
198,
29797,
2890,
262,
2372,
13,
198,
198,
7120... | 3.511278 | 133 |
#! coding: utf-8
from django.db import migrations
| [
2,
0,
19617,
25,
3384,
69,
12,
23,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.833333 | 18 |
import arrow
import discord
from apollo import emojis as emoji
from apollo.constants import EMBED_COLOR
from apollo.translate import t
| [
11748,
15452,
198,
11748,
36446,
198,
198,
6738,
2471,
15578,
1330,
795,
13210,
271,
355,
44805,
198,
6738,
2471,
15578,
13,
9979,
1187,
1330,
412,
10744,
1961,
62,
46786,
198,
6738,
2471,
15578,
13,
7645,
17660,
1330,
256,
628
] | 3.512821 | 39 |
# Copyright (c) 2010 - 2020, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
# Constants
KB = 1024
BOOTLOADER_FLASH_SIZE = 22*KB
BOOTLOADER_RAM_SIZE = 768
RESERVED_FLASH_PAGES_COUNT = 2
| [
2,
15069,
357,
66,
8,
3050,
532,
12131,
11,
35834,
311,
5314,
40990,
49599,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
... | 3.407186 | 501 |
import datetime
from common.variables import *
from sqlalchemy import create_engine, Table, Column, Integer, String, Text, MetaData, DateTime
from sqlalchemy.orm import mapper, sessionmaker
import os
class ClientDatabase:
'''
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
'''
class KnownUsers:
'''
Класс - отображение для таблицы всех пользователей.
'''
class MessageStat:
'''
Класс - отображение для таблицы статистики переданных сообщений.
'''
class Contacts:
'''
Класс - отображение для таблицы контактов.
'''
# Конструктор класса:
def add_contact(self, contact):
'''Метод добавляющий контакт в базу данных.'''
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
'''Метод очищающий таблицу со списком контактов.'''
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
'''Метод удаляющий определённый контакт.'''
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
'''Метод заполняющий таблицу известных пользователей.'''
self.session.query(self.KnownUsers).delete()
for user in users_list:
user_row = self.KnownUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
'''Метод сохраняющий сообщение в базе данных.'''
message_row = self.MessageStat(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
'''Метод возвращающий список всех контактов.'''
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
'''Метод возвращающий список всех известных пользователей.'''
return [user[0]
for user in self.session.query(self.KnownUsers.username).all()]
def check_user(self, user):
'''Метод проверяющий существует ли пользователь.'''
if self.session.query(
self.KnownUsers).filter_by(
username=user).count():
return True
else:
return False
def check_contact(self, contact):
'''Метод проверяющий существует ли контакт.'''
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
'''Метод возвращающий историю сообщений с определённым пользователем.'''
query = self.session.query(
self.MessageStat).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# отладка
if __name__ == '__main__':
test_db = ClientDatabase('test1')
# for i in ['test3', 'test4', 'test5']:
# test_db.add_contact(i)
# test_db.add_contact('test4')
# test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
# test_db.save_message('test2', 'in', f'Привет! я тестовое сообщение от {datetime.datetime.now()}!')
# test_db.save_message('test2', 'out', f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!')
# print(test_db.get_contacts())
# print(test_db.get_users())
# print(test_db.check_user('test1'))
# print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
# test_db.del_contact('test4')
# print(test_db.get_contacts())
| [
11748,
4818,
8079,
198,
6738,
2219,
13,
25641,
2977,
1330,
1635,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
11,
8655,
11,
29201,
11,
34142,
11,
10903,
11,
8255,
11,
30277,
6601,
11,
7536,
7575,
198,
6738,
44161,
282,
26599,
... | 1.633929 | 2,464 |
# coding : utf-8
# created by wyj
import pandas as pd
# TERMINALNO, TIME, TRIP_ID, LONGITUDE, LATITUDE, DIRECTION, HEIGHT, SPEED, CALLSTATE, Y
| [
2,
19617,
1058,
3384,
69,
12,
23,
198,
2,
2727,
416,
266,
88,
73,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
2,
28994,
44,
17961,
15285,
11,
20460,
11,
7579,
4061,
62,
2389,
11,
44533,
2043,
52,
7206,
11,
42355,
2043,
52,
7... | 2.416667 | 60 |
import sys
import os
import re
import binascii
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import block
if __name__ == "__main__":
userdata= 'testy stringteddX;adminXtrue'
prepended_text= 'comment1=cooking%20MCs;userdata='
appended_text = ';comment2=%20like%20a%20pound%20of%20bacon'
encrypted_tampered= process_and_encrypt_string(prepended_text, userdata, appended_text)
decrypted_tampered,is_admin_string_present= process_and_decrypt_string(encrypted_tampered)
t1= []
for j in range(0,len(decrypted_tampered)):
t1.append(decrypted_tampered[j])
if len(t1) % 16 == 0:
print ''.join(t1)
t1= []
else:
continue
'''
#This bit is useful don't delete it. Uncomment it when you still haven't found out which exact character you need for your solution.
t1= []
for i in range(0,256):
decrypted_tampered, is_admin_string_present= process_and_decrypt_string(encrypted_tampered[i])
if decrypted_tampered[48] == ';':
print "Key for ; is:","\t",i
print '-'*100
for j in range(0,len(decrypted_tampered)):
t1.append(decrypted_tampered[j])
if len(t1) % 16 == 0:
print ''.join(t1)
t1= []
else:
continue
break
for i in range(0,256):
decrypted_tampered, is_admin_string_present= process_and_decrypt_string(encrypted_tampered[i])
if decrypted_tampered[54] == '=':
print "Key for ; is:","\t",i
print '-'*100
for j in range(0,len(decrypted_tampered)):
t1.append(decrypted_tampered[j])
if len(t1) % 16 == 0:
print ''.join(t1)
t1= []
else:
continue
break
'''
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
9874,
292,
979,
72,
198,
2,
32901,
8619,
284,
262,
3108,
810,
11361,
15455,
329,
13103,
198,
28758,
62,
43551,
796,
28686,
13,
6978,
13,
15908,
3672,
10786,
14,
11195,
14,
... | 2.005814 | 1,032 |
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = 'Yannic Schneider (v@vendetta.ch)'
__copyright__ = 'Copyright (c) 20xx Yannic Schneider'
__license__ = 'WTFPL'
__vcs_id__ = '$Id$'
__version__ = '0.1' #Versioning: http://www.python.org/dev/peps/pep-0386/
#
## Code goes here.
#
import colorama
from colorama import Fore, Back, Style
LINE = '-' * 80
HEAD = '#' * 80
def SetTitle(nr):
""" Testing Docstring"""
print(Fore.MAGENTA + HEAD)
print('>> Cryptopals crypto challenge :: SET:%d ' % nr)
print(Fore.MAGENTA + HEAD + Style.RESET_ALL)
def ChallengeTitle(snr, cnr, subtitle=None):
""" Testing Docstring"""
sub = ' ' if subtitle == None else subtitle
print(LINE)
if sub == ' ' :
print('>> Cryptopals :: SET:%d :: Challenge: %d' % (snr, cnr))
else:
print('>> Cryptopals :: SET:%d :: Challenge: %d :: Sub: %s'
% (snr, cnr, sub))
print(LINE + Style.RESET_ALL)
def PrintResult(btest, bresult):
""" Testing Docstring"""
print(Fore.CYAN + LINE)
print('>> Result bytes :: %s' % bresult)
check = True if btest == bresult else False
if check:
print(Fore.GREEN + '>> Result check :: %s' % 'CORRECT'
+ Style.RESET_ALL)
else:
print(Fore.RED + '>> Result check :: %s' % 'WRONG'
+ Style.RESET_ALL)
| [
37811,
198,
26796,
14432,
8841,
198,
23579,
37336,
25,
2638,
1378,
2503,
13,
29412,
13,
2398,
14,
7959,
14,
431,
862,
14,
431,
79,
12,
15,
28676,
14,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
56,
1236,
291,
26039,
357,
85,
3... | 2.216383 | 647 |
#
# @lc app=leetcode id=725 lang=python3
#
# [725] Split Linked List in Parts
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from typing import List
from LeetCode.Python.BaseListNode import MakeListNodes, PrintListNode, ListNode
if __name__ == '__main__':
for part in Solution().splitListToParts(root=MakeListNodes([1, 2, 3, 4, 5, 6, 7, 8, 9]), k=10):
PrintListNode(part)
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
45151,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
45151,
60,
27758,
7502,
276,
7343,
287,
22349,
198,
2,
198,
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398... | 2.479167 | 192 |
__all__ = ['show_research_popup',
'show_research_popup_once']
HEADER = _('Keep Digsby Free')
MINOR = _("Digsby will use your computer's free time using it to conduct both free and paid research.")
| [
834,
439,
834,
796,
37250,
12860,
62,
34033,
62,
12924,
929,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
12860,
62,
34033,
62,
12924,
929,
62,
27078,
20520,
198,
198,
37682,
1137,
796,
4808,
10786,
15597,
7367,
... | 2.876712 | 73 |
'''
github.com/uxai/gridlock
Thanks for your interest in GRIDLOCK
I plan to do slow improvements here and there on methods, or enabling files in the near future.
If you have suggestions or improvements, let me know! I'm new to Python so I'm still learning ^^;
TABLE OF CONTENTS
-------------------------------------------------------------
Line # Description
-------------------------------------------------------------
22 Imports and global variables
34 Clear screen function
44 About grid encryption function
55 Merge string
63 Read in message to be encrypted/decrypted
87 Print final details for encrypted messages
106 Encrypt message function
152 Decrypt message function
198 Main function
'''
#!/usr/bin/env python3
import os
import secrets
# Colors used in the text interface
COLORS = {"cyan": '\033[1;36;48m', "green": '\033[1;32;48m', "red": '\033[1;31;48m', "end": '\033[0m'}
# ALl characters, you can add more as needed.
ALPHA = ("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "~", "!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "-", "_", "+", "=", "[", "]", "{", "}", "\\", "|", ";", ":", "\"", "\'", "<", ">", ",", ".", "?", "/", "`", "£", "¥", " ")
grid_height = range(-1, 2)
grid_width = range(-1, 2)
clear()
# About GRIDENCRYPTION
# Merge the final output to a single string
# Get the content for encryption or decryption
# Print final output of encryption
# Encrypt the given message in the matrix list
if __name__ == '__main__':
main()
| [
7061,
6,
198,
12567,
13,
785,
14,
2821,
1872,
14,
25928,
5354,
198,
198,
9690,
329,
534,
1393,
287,
10863,
2389,
36840,
198,
40,
1410,
284,
466,
3105,
8561,
994,
290,
612,
319,
5050,
11,
393,
15882,
3696,
287,
262,
1474,
2003,
13,
... | 2.590071 | 705 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
from keystoneclient import utils
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.459459 | 185 |