hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9dde6bfc80676d7c23483dae2cdebeb48e518e09
| 6,801
|
py
|
Python
|
spinel_wisun_utils.py
|
LaudateCorpus1/ti-wisunfan-pyspinel
|
5b911ef8319115fb2ef20a57358dd44733bed30a
|
[
"Apache-2.0"
] | 2
|
2021-03-22T21:42:03.000Z
|
2021-09-01T09:12:43.000Z
|
spinel_wisun_utils.py
|
LaudateCorpus1/ti-wisunfan-pyspinel
|
5b911ef8319115fb2ef20a57358dd44733bed30a
|
[
"Apache-2.0"
] | 1
|
2021-11-11T16:18:51.000Z
|
2021-11-11T16:18:51.000Z
|
spinel_wisun_utils.py
|
LaudateCorpus1/ti-wisunfan-pyspinel
|
5b911ef8319115fb2ef20a57358dd44733bed30a
|
[
"Apache-2.0"
] | 5
|
2021-08-18T03:15:32.000Z
|
2022-01-20T05:19:41.000Z
|
#!/usr/bin/env python
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ipaddress
def set_bit_at_position(position, data):
mask = 1 << position
return data | mask
def change_format_input_string(original_input):
byte_array_str_list = str(original_input)
byte_array_input_string = ''
count = 0
for value in original_input:
if(count != 0):
byte_array_input_string += ':'
append_str = value[2:]
# if only one digit is present, add another 0
if(len(append_str) == 1):
append_str = '0' + append_str
byte_array_input_string += append_str
count += 1
return byte_array_input_string
def format_display_string(line):
display_string = ''
count = 0
for char in line:
if(count % 2 == 0) and count != 0:
display_string += ':'
display_string += char
count += 1
return display_string
def convert_to_chan_num_list(line):
values = line.split(':')
total_num_channels = 129
hex_string = ''
binary_string = ''
count = 0
for v in values:
if(' ' in v):
v = v.split(' ')[0]
# Step 2: Convert the string to a hex number
hex_num = int(v, 16)
# Step 3: Invert the binary values if needed (we do not use this feature)
new_value_first = 0
new_value_last = 0
inverted_hex_num = hex_num
# inverted_hex_num = hex_num ^ 0b11111111
new_value = inverted_hex_num
# Step 4: Combine the inverted values
hex_string += str(hex(new_value))[2:]
string_to_reverse = str('{0:08b}'.format(new_value))
reversedstring=''.join(reversed(string_to_reverse))
binary_string += reversedstring
channel_num = 0
channel_list = list()
# 1110 1111 inverse is 0001 0000
# Step 5: Loop through binary string and add channels
for c in (binary_string):
if(channel_num == total_num_channels):
break
if(c == '1'):
# add this channel
channel_list.append(channel_num)
channel_num+=1
channel_list_display_string = ''
lst = channel_list
result = str(lst[0])
end = None
for index, num in enumerate(lst[1:]):
if num - 1 == lst[index]: # the slice shifts the index by 1 for us
end = str(num)
else:
if end:
result += '-' + end
end = None
result += ':' + str(num)
# Catch the last term
if end:
result += '-' + str(num)
channel_list_display_string = result
return channel_list_display_string
def convert_to_bitmask(input_line='0-128'):
included_ch_list = (input_line.split(':')) # 0-10, 15-20 etc
real_channel_list = list()
for each_entry in included_ch_list:
start_channel = int(each_entry.split('-')[0]) # 0
try:
end_channel = int(each_entry.split('-')[1]) # 10
except Exception:
# in the case of no end channel specified, it means only one channel selected
end_channel = start_channel
pass
for current_channel in range(start_channel, end_channel + 1):
real_channel_list.append(current_channel)
count = 0
channel_mask_byte = 0
channel_mask_byte_inverted = 0
eight_multiple = 8
# convert channel list from right to left
while(count in range(0, len(real_channel_list))):
if(count == 129):
break
channel_mask_byte = set_bit_at_position(real_channel_list[count], channel_mask_byte)
if(count+1 == len(real_channel_list)):
break
if(int(real_channel_list[count+1]) >= eight_multiple):
eight_multiple += 8
count += 1
final_channel = int(real_channel_list[-1])
mask = 0b1
channel_mask_byte_inverted = channel_mask_byte
# increment by 1 to include the last channel
final_channel += 1
while(final_channel % 8 != 0):
# make sure you have an even number of bytes
final_channel += 1
# invert every single bit
"""for bit in range(0, final_channel):
channel_mask_byte_inverted ^= (mask)
# shift the mask to the left by 1
mask = mask << 1"""
value = (hex(channel_mask_byte_inverted)[2:].strip())
# make sure 17 byte pairs are used
value = value.zfill(34)
channel_mask_correct_endian = value
if len(str(value)) > 34:
# if length is greater than 34, only use the first 17 bytes
value = value[0:34]
channel_mask_inverted_hex = bytearray.fromhex(value)
channel_mask_inverted_hex.reverse()
channel_mask_correct_endian = channel_mask_inverted_hex.hex()
return channel_mask_correct_endian, channel_mask_inverted_hex
# Helper util function to parse received PROP_ROUTING_TABLE_UPDATE property info
def parse_routingtable_property(propRoutingTableAddrInfo):
"""
Internal utility function to convert Routing Table Addr Info into structure
Returns changed_type and dictionary entry
"""
routingTableEntry = {}
update_type = -1
dst_ipv6_addr = ""
try:
# 2 bytes = length of structure; 1 byte = change type; 16 bytes Dest IPv6 address;
# 1 byte = prefix len ; 16 bytes = next hop IPv6 address; 4 bytes = lifetime
routingTableStruct = propRoutingTableAddrInfo[0:len(propRoutingTableAddrInfo)]
changed_info = routingTableStruct[2:3] # C
dst_addr = routingTableStruct[3:19] # 6
prefix_length = routingTableStruct[19:20] # C
next_hop_addr = routingTableStruct[20:36] # 6
lifetime = routingTableStruct[36:40] # L
update_type = int.from_bytes(changed_info, "little", signed=False)
dst_ipv6_addr = ipaddress.IPv6Address(dst_addr)
routingTableEntry["prefixLen"] = int.from_bytes(prefix_length, "little", signed=False)
routingTableEntry["nextHopAddr"] = ipaddress.IPv6Address(next_hop_addr)
routingTableEntry["lifetime"] = int.from_bytes(lifetime, "little", signed=False)
except Exception as es:
print("Exception raised during Parsing Routing Table")
print(es)
return(update_type, dst_ipv6_addr, routingTableEntry)
| 32.54067
| 94
| 0.642847
| 897
| 6,801
| 4.654404
| 0.285396
| 0.039521
| 0.028743
| 0.019162
| 0.068982
| 0.035928
| 0.022036
| 0.022036
| 0
| 0
| 0
| 0.031445
| 0.270548
| 6,801
| 208
| 95
| 32.697115
| 0.810119
| 0.241435
| 0
| 0.140625
| 0
| 0
| 0.023463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0.007813
| 0.007813
| 0
| 0.09375
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ddef4eb6e5502bd565a0db158fed8bdc6d939f1
| 1,383
|
py
|
Python
|
src/ModularChess/movements/EnPassant.py
|
ferranSanchezLlado/ModularChess
|
896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b
|
[
"MIT"
] | null | null | null |
src/ModularChess/movements/EnPassant.py
|
ferranSanchezLlado/ModularChess
|
896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b
|
[
"MIT"
] | null | null | null |
src/ModularChess/movements/EnPassant.py
|
ferranSanchezLlado/ModularChess
|
896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING, Optional
from ModularChess.movements.Movement import Movement, MovementData
if TYPE_CHECKING:
from ModularChess.pieces.Piece import Piece
from ModularChess.utils.Position import Position
class EnPassant(Movement):
def __init__(self, piece: "Piece", new_position: "Position", captured_piece: "Piece",
is_valid_move: Optional[bool] = None):
moves = [MovementData(captured_piece, captured_piece.position, None),
MovementData(piece, piece.position, new_position)]
super().__init__(moves, piece=piece, destination=new_position, is_valid_move=is_valid_move)
def __str__(self) -> str:
if self.piece.board.dimensions == 2:
move = self.piece.abbreviation()
same_pieces = self.piece.board.pieces[self.player][type(self.piece)]
if self.movements[-1].destination_position is not None and \
len([piece for piece in same_pieces if
piece.check_piece_valid_move(self.movements[-1].destination_position)]) \
> 1:
move += str(self.movements[-1].initial_position)
if len(self) == 2: # Capture
move += "x"
return move + str(self.movements[-1].destination_position) + ("+" if self.is_check else "")
return super().__str__()
| 44.612903
| 103
| 0.642805
| 160
| 1,383
| 5.30625
| 0.30625
| 0.053004
| 0.06596
| 0.088339
| 0.149588
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.250904
| 1,383
| 30
| 104
| 46.1
| 0.812741
| 0.005061
| 0
| 0
| 0
| 0
| 0.014556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.041667
| 0.166667
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ddfecdb8db0a77645be766083a1ef5b0e142f16
| 2,387
|
py
|
Python
|
pytemperaturectrl/julabo.py
|
jenrei/pytemperaturectrl
|
eabfbf8a6d732cda72c5cd8397a85b0d8960da78
|
[
"MIT"
] | null | null | null |
pytemperaturectrl/julabo.py
|
jenrei/pytemperaturectrl
|
eabfbf8a6d732cda72c5cd8397a85b0d8960da78
|
[
"MIT"
] | null | null | null |
pytemperaturectrl/julabo.py
|
jenrei/pytemperaturectrl
|
eabfbf8a6d732cda72c5cd8397a85b0d8960da78
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
julabo.py
Contains Julabo temperature control
see documentation http://www.julabo.com/sites/default/files/downloads/manuals/french/19524837-V2.pdf at section 10.2.
:copyright: (c) 2015 by Maxime DAUPHIN
:license: MIT, see LICENSE for details
"""
import serial
import time
from .pytemperaturectrl import TemperatureControl
class Julabo(TemperatureControl):
"""Julabo Temperature control implementation"""
# see Julabo doc
MIN_TIME_INTERVAL = 0.250
def __init__(self, *args, **kwargs):
super(TemperatureControl, self).__init__()
self.serial = None
def checkIfOpen(self):
""" Check if serial port is open """
if self.serial == None:
raise Exception("Please call open function before all communication")
def open(self, com_port, baudrate=4800):
""" Open serial communication"""
self.serial = serial.Serial(com_port,
baudrate=baudrate,
bytesize=serial.SEVENBITS,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
timeout=1,
xonxoff=False,
rtscts=True,
dsrdtr=False)
def close(self):
""" Close serial communication"""
self.checkIfOpen()
if self.serial != None :
self.serial.close()
def power(self, on):
"""set power to on or off"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
value = 1 if on else 0
self.serial.write(b'f"out_mode_05 {value}\r\n"')
def getVersion(self):
"""retrieve engine version"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'version\r\n')
return self.serial.readline()
def getStatus(self):
"""retrieve engine status"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'status\r\n')
return self.serial.readline()
def setWorkTemperature(self, temperature_in_degree):
"""set setpoint temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'f"out_sp_00 {temperature_in_degree}\r\n"')
def getWorkTemperature(self):
"""get setpoint temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'in_sp_00\r\n')
return float(self.serial.readline())
def getCurrentTemperature(self):
"""get current tank temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'in_pv_00\r\n')
return float(self.serial.readline())
| 26.820225
| 118
| 0.714286
| 321
| 2,387
| 5.193146
| 0.386293
| 0.089982
| 0.062987
| 0.086383
| 0.323335
| 0.323335
| 0.308938
| 0.274145
| 0.208758
| 0.208758
| 0
| 0.017813
| 0.153331
| 2,387
| 88
| 119
| 27.125
| 0.807026
| 0.233347
| 0
| 0.314815
| 0
| 0
| 0.091063
| 0.015837
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185185
| false
| 0
| 0.055556
| 0
| 0.351852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de15ad674c58a2528581460d79721fcb5a0883c
| 1,143
|
py
|
Python
|
algorithms/matrix.py
|
rkistner/contest-algorithms
|
8133f8ddce4f257386c7bcf55589d559854c1955
|
[
"Apache-2.0"
] | 4
|
2015-03-08T15:38:45.000Z
|
2018-04-08T02:13:54.000Z
|
algorithms/matrix.py
|
rkistner/contest-algorithms
|
8133f8ddce4f257386c7bcf55589d559854c1955
|
[
"Apache-2.0"
] | 1
|
2017-11-29T01:15:55.000Z
|
2017-11-29T01:17:40.000Z
|
algorithms/matrix.py
|
rkistner/contest-algorithms
|
8133f8ddce4f257386c7bcf55589d559854c1955
|
[
"Apache-2.0"
] | 4
|
2015-11-08T03:39:54.000Z
|
2020-11-06T10:42:53.000Z
|
"""
Some basic matrix-related functionality.
"""
def cumulative2d(grid):
"""
>>> cumulative2d([[2, 5, 4], [3, 8, 1]])
[[0, 0, 0, 0], [0, 2, 7, 11], [0, 5, 18, 23]]
"""
rows = []
for row in grid:
rrr = [0]
last = 0
for col in row:
last += col
rrr.append(last)
rows.append(rrr)
blocks = []
last = [0]*len(rows[0])
blocks.append(last)
for row in rows:
last = list(map(sum, zip(last, row)))
blocks.append(last)
return blocks
def transpose(grid):
"""
Switches rows and columns.
>>> transpose([[1, 2, 3], [4, 5, 6]])
[[1, 4], [2, 5], [3, 6]]
"""
R = len(grid)
C = len(grid[0])
inverted = []
for r in range(C):
row = [c[r] for c in grid]
inverted.append(row)
return inverted
def moment(array):
"""
>>> moment([5, 6, 7, 2, 4])
[0, 6, 14, 6, 16]
"""
return list(map(lambda i_v: i_v[0]*i_v[1], enumerate(array)))
def moment2d(grid):
"""
>>> moment2d([[5, 6, 7, 2, 4]])
[[0, 6, 14, 6, 16]]
"""
return list(map(moment, grid))
| 21.166667
| 65
| 0.464567
| 165
| 1,143
| 3.2
| 0.315152
| 0.015152
| 0.017045
| 0.015152
| 0.094697
| 0.094697
| 0.094697
| 0.094697
| 0.094697
| 0.094697
| 0
| 0.089239
| 0.333333
| 1,143
| 54
| 66
| 21.166667
| 0.603675
| 0.276465
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de39c8afb68ea723afe9738f2762762838a1e06
| 5,361
|
py
|
Python
|
douyu_spider/douyu_spider_v2.py
|
DH-JQ/WebSpider-DH
|
71603c85cc5327ce7a0a864db145f3c650fa13a5
|
[
"MIT"
] | null | null | null |
douyu_spider/douyu_spider_v2.py
|
DH-JQ/WebSpider-DH
|
71603c85cc5327ce7a0a864db145f3c650fa13a5
|
[
"MIT"
] | null | null | null |
douyu_spider/douyu_spider_v2.py
|
DH-JQ/WebSpider-DH
|
71603c85cc5327ce7a0a864db145f3c650fa13a5
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import json
class DouyuSpider:
def __init__(self):
""" 初始化
"""
start_url = 'https://www.douyu.com/g_LOL'
self.browser = webdriver.Chrome()
self.browser.get(start_url)
def get_one_page(self):
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
path = '//*[@id="listAll"]/div[2]/ul/li[8]/div/a[1]/div[2]/div[2]/span'
method = EC.presence_of_element_located((By.XPATH, path))
wait = WebDriverWait(self.browser, 10)
# self.browser.refresh()
wait.until(method, message='加载超时')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
html = etree.HTML(self.browser.page_source)
li_list = html.xpath('//ul[@class="layout-Cover-list"]/li')
li_num = len(li_list)
# //*[@id="listAll"]/div[2]/ul/li[1]/div/a[1]/div[2]/div[1]/h3
title_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[1]/h3/@title'
# hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/span/text()'
room_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/@href'
user_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2/text()'
items = []
item = {}
for num in range(li_num):
item['title'] = html.xpath(title_path.format(num+1))
# item['title'] = html.xpath(title_path.format(num+1)).get_attribute('title')
item['hot'] = html.xpath(hot_path.format(num+1))
item['room_url'] = html.xpath(room_path.format(num+1))
item['user'] = html.xpath(user_path.format(num+1))
if num % 20 == 0:
print(f'完成第{num+1}条数据')
print(item)
items.append(item)
return items
def fetch_one_page(self):
path = '//*[@id="listAll"]/div[2]/ul/li[8]/div/a[1]/div[2]/div[2]/span'
method = EC.presence_of_element_located((By.XPATH, path))
wait = WebDriverWait(self.browser, 10)
# self.browser.refresh()
wait.until(method, message='加载超时')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
li_list = self.browser.find_elements_by_xpath('//ul[@class="layout-Cover-list"]/li')
li_num = len(li_list)
# //*[@id="listAll"]/div[2]/ul/li[1]/div/a[1]/div[2]/div[1]/h3
title_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[1]/h3'
# hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/span'
room_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]'
user_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
items = []
for num in range(li_num):
item = {}
item['title'] = self.browser.find_element_by_xpath(title_path.format(num+1)).get_attribute('title')
item['hot'] = self.browser.find_element_by_xpath(hot_path.format(num+1)).text
item['room_url'] = self.browser.find_element_by_xpath(room_path.format(num+1)).get_attribute('href')
item['user'] = self.browser.find_element_by_xpath(user_path.format(num+1)).text
if num % 20 == 0:
print(f'完成第{num+1}条数据')
print(item)
items.append(item)
return items
def save_content(self, items):
with open('douyu.json', 'a+', encoding='utf-8') as f:
for item in items:
# print(item)
json.dump(item, f, ensure_ascii=False)
f.write('\n')
def get_next_url(self, num):
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
max_num = self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/ul/li[last()-1]/a').text
if num < int(max_num):
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/input').clear()
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/input').send_keys(num+1)
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/span').click()
next_flag = True
if num >= int(max_num):
next_flag = False
return next_flag, max_num
def run(self):
next_flag = True
num = 0
while next_flag:
items = self.get_one_page()
self.save_content(items)
if num % 2 == 0:
self.browser.implicitly_wait(5)
num += 1
print('*'*10 + f'完成第{num}页' + '*'*10)
next_flag, max_num = self.get_next_url(num)
print(max_num)
if __name__ == '__main__':
dou_spider = DouyuSpider()
dou_spider.run()
| 43.233871
| 112
| 0.564447
| 761
| 5,361
| 3.823916
| 0.172142
| 0.052234
| 0.074227
| 0.080412
| 0.681787
| 0.668729
| 0.58488
| 0.571134
| 0.571134
| 0.552234
| 0
| 0.02616
| 0.244171
| 5,361
| 123
| 113
| 43.585366
| 0.692004
| 0.078157
| 0
| 0.354839
| 0
| 0.086022
| 0.251218
| 0.201907
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.064516
| 0
| 0.172043
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de3f58e34c1bda2a31dcb16e2481f3de5ab6ad2
| 960
|
py
|
Python
|
setup.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | 1
|
2017-10-25T17:11:44.000Z
|
2017-10-25T17:11:44.000Z
|
setup.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | 3
|
2017-03-10T05:27:29.000Z
|
2017-04-07T16:06:28.000Z
|
setup.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Read version info from bitcoinscript/version.py
version_vars = {}
with open("bitcoinscript/version.py") as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='bitcoinscript',
description='Bitcoin Script Debugger and Interactive Shell',
long_description=long_description,
version=version_string,
author='fungibit',
author_email='fungibit@yandex.com',
url='https://github.com/fungibit/bitcoinscript',
license='MIT',
packages=find_packages(exclude=['tests*', 'bin']),
platforms = ["POSIX", "Windows"],
keywords='bitcoin, script, bitcoin-script, blockchain',
)
| 28.235294
| 64
| 0.726042
| 122
| 960
| 5.54918
| 0.52459
| 0.088626
| 0.064993
| 0.070901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001217
| 0.14375
| 960
| 33
| 65
| 29.090909
| 0.822384
| 0.11875
| 0
| 0
| 0
| 0
| 0.29656
| 0.02847
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de4af217717d87a01fd3c8f160faa869464539b
| 29,850
|
py
|
Python
|
discretize/tree_mesh.py
|
ngodber/discretize
|
2329c9e9552b5c05f40ebf62f0bb207267bd2f92
|
[
"MIT"
] | 123
|
2017-01-09T04:59:25.000Z
|
2022-03-29T08:06:43.000Z
|
discretize/tree_mesh.py
|
ngodber/discretize
|
2329c9e9552b5c05f40ebf62f0bb207267bd2f92
|
[
"MIT"
] | 246
|
2017-01-09T17:20:12.000Z
|
2022-03-01T22:05:20.000Z
|
discretize/tree_mesh.py
|
ngodber/discretize
|
2329c9e9552b5c05f40ebf62f0bb207267bd2f92
|
[
"MIT"
] | 26
|
2018-03-27T19:24:46.000Z
|
2021-11-11T20:28:09.000Z
|
# ___ ___ ___ ___ ___ ___
# /\ \ /\ \ /\ \ /\ \ /\ \ /\ \
# /::\ \ /::\ \ \:\ \ /::\ \ /::\ \ /::\ \
# /:/\:\ \ /:/\:\ \ \:\ \ /:/\:\ \ /:/\:\ \ /:/\:\ \
# /:/ \:\ \ /:/ \:\ \ /::\ \ /::\~\:\ \ /::\~\:\ \ /::\~\:\ \
# /:/__/ \:\__\/:/__/ \:\__\ /:/\:\__\/:/\:\ \:\__\/:/\:\ \:\__\/:/\:\ \:\__\
# \:\ \ /:/ /\:\ \ \/__//:/ \/__/\/_|::\/:/ /\:\~\:\ \/__/\:\~\:\ \/__/
# \:\ /:/ / \:\ \ /:/ / |:|::/ / \:\ \:\__\ \:\ \:\__\
# \:\/:/ / \:\ \ \/__/ |:|\/__/ \:\ \/__/ \:\ \/__/
# \::/ / \:\__\ |:| | \:\__\ \:\__\
# \/__/ \/__/ \|__| \/__/ \/__/
#
#
#
# .----------------.----------------.
# /| /| /|
# / | / | / |
# / | 6 / | 7 / |
# / | / | / |
# .----------------.----+-----------. |
# /| . ---------/|----.----------/|----.
# / | /| / | /| / | /|
# / | / | 4 / | / | 5 / | / |
# / | / | / | / | / | / |
# . -------------- .----------------. |/ |
# | . ---+------|----.----+------|----. |
# | /| .______|___/|____.______|___/|____.
# | / | / 2 | / | / 3 | / | /
# | / | / | / | / | / | /
# . ---+---------- . ---+---------- . | /
# | |/ | |/ | |/ z
# | . ----------|----.-----------|----. ^ y
# | / 0 | / 1 | / | /
# | / | / | / | /
# | / | / | / o----> x
# . -------------- . -------------- .
#
#
# Face Refinement:
#
# 2_______________3 _______________
# | | | | |
# ^ | | | 2 | 3 |
# | | | | | |
# | | x | ---> |-------+-------|
# t1 | | | | |
# | | | 0 | 1 |
# |_______________| |_______|_______|
# 0 t0--> 1
#
#
# Face and Edge naming conventions:
#
# fZp
# |
# 6 ------eX3------ 7
# /| | / |
# /eZ2 . / eZ3
# eY2 | fYp eY3 |
# / | / fXp|
# 4 ------eX2----- 5 |
# |fXm 2 -----eX1--|---- 3 z
# eZ0 / | eY1 ^ y
# | eY0 . fYm eZ1 / | /
# | / | | / | /
# 0 ------eX0------1 o----> x
# |
# fZm
#
#
# fX fY
# 2___________3 2___________3
# | e1 | | e1 |
# | | | |
# e0 | x | e2 z e0 | x | e2 z
# | | ^ | | ^
# |___________| |___> y |___________| |___> x
# 0 e3 1 0 e3 1
# fZ
# 2___________3
# | e1 |
# | |
# e0 | x | e2 y
# | | ^
# |___________| |___> x
# 0 e3 1
from discretize.base import BaseTensorMesh
from discretize.operators import InnerProducts, DiffOperators
from discretize.mixins import InterfaceMixins, TreeMeshIO
from discretize.utils import as_array_n_by_dim
from discretize._extensions.tree_ext import _TreeMesh, TreeCell
import numpy as np
import scipy.sparse as sp
import warnings
from discretize.utils.code_utils import deprecate_property
class TreeMesh(
_TreeMesh, BaseTensorMesh, InnerProducts, DiffOperators, TreeMeshIO, InterfaceMixins
):
"""Class for QuadTree (2D) and OcTree (3D) meshes.
Tree meshes are numerical grids where the dimensions of each cell are powers of 2
larger than some base cell dimension. Unlike the :class:`~discretize.TensorMesh`
class, gridded locations and numerical operators for instances of ``TreeMesh``
cannot be simply constructed using tensor products. Furthermore, each cell
is an instance of ``TreeMesh`` is an instance of the
:class:`~discretize.tree_mesh.TreeCell` .
Parameters
----------
h : (dim) iterable of int, numpy.ndarray, or tuple
Defines the cell widths of the *underlying tensor mesh* along each axis. The
length of the iterable object is equal to the dimension of the mesh (2 or 3).
For a 3D mesh, the list would have the form *[hx, hy, hz]*. The number of cells
along each axis **must be a power of 2** .
Along each axis, the user has 3 choices for defining the cells widths for the
underlying tensor mesh:
- :class:`int` -> A unit interval is equally discretized into `N` cells.
- :class:`numpy.ndarray` -> The widths are explicity given for each cell
- the widths are defined as a :class:`list` of :class:`tuple` of the form *(dh, nc, [npad])*
where *dh* is the cell width, *nc* is the number of cells, and *npad* (optional)
is a padding factor denoting exponential increase/decrease in the cell width
for each cell; e.g. *[(2., 10, -1.3), (2., 50), (2., 10, 1.3)]*
origin : (dim) iterable, default: 0
Define the origin or 'anchor point' of the mesh; i.e. the bottom-left-frontmost
corner. By default, the mesh is anchored such that its origin is at [0, 0, 0].
For each dimension (x, y or z), The user may set the origin 2 ways:
- a ``scalar`` which explicitly defines origin along that dimension.
- **{'0', 'C', 'N'}** a :class:`str` specifying whether the zero coordinate along
each axis is the first node location ('0'), in the center ('C') or the last
node location ('N') (see Examples).
Examples
--------
Here we generate a basic 2D tree mesh.
>>> from discretize import TreeMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
Define base mesh (domain and finest discretization),
>>> dh = 5 # minimum cell width (base mesh cell width)
>>> nbc = 64 # number of base mesh cells
>>> h = dh * np.ones(nbc)
>>> mesh = TreeMesh([h, h])
Define corner points for a rectangular box, and subdived the mesh within the box
to the maximum refinement level.
>>> x0s = [120.0, 80.0]
>>> x1s = [240.0, 160.0]
>>> levels = [mesh.max_level]
>>> mesh.refine_box(x0s, x1s, levels)
>>> mesh.plot_grid()
>>> plt.show()
"""
_meshType = "TREE"
_aliases = {
**BaseTensorMesh._aliases,
**DiffOperators._aliases,
**{
"ntN": "n_total_nodes",
"ntEx": "n_total_edges_x",
"ntEy": "n_total_edges_y",
"ntEz": "n_total_edges_z",
"ntE": "n_total_edges",
"ntFx": "n_total_faces_x",
"ntFy": "n_total_faces_y",
"ntFz": "n_total_faces_z",
"ntF": "n_total_faces",
"nhN": "n_hanging_nodes",
"nhEx": "n_hanging_edges_x",
"nhEy": "n_hanging_edges_y",
"nhEz": "n_hanging_edges_z",
"nhE": "n_hanging_edges",
"nhFx": "n_hanging_faces_x",
"nhFy": "n_hanging_faces_y",
"nhFz": "n_hanging_faces_z",
"nhF": "n_hanging_faces",
"gridhN": "hanging_nodes",
"gridhFx": "hanging_faces_x",
"gridhFy": "hanging_faces_y",
"gridhFz": "hanging_faces_z",
"gridhEx": "hanging_edges_x",
"gridhEy": "hanging_edges_y",
"gridhEz": "hanging_edges_z",
},
}
_items = {"h", "origin", "cell_state"}
# inheriting stuff from BaseTensorMesh that isn't defined in _QuadTree
def __init__(self, h=None, origin=None, **kwargs):
if "x0" in kwargs:
origin = kwargs.pop("x0")
super().__init__(h=h, origin=origin)
cell_state = kwargs.pop("cell_state", None)
cell_indexes = kwargs.pop("cell_indexes", None)
cell_levels = kwargs.pop("cell_levels", None)
if cell_state is None:
if cell_indexes is not None and cell_levels is not None:
cell_state = {}
cell_state["indexes"] = cell_indexes
cell_state["levels"] = cell_levels
if cell_state is not None:
indexes = cell_state["indexes"]
levels = cell_state["levels"]
self.__setstate__((indexes, levels))
def __repr__(self):
"""Plain text representation."""
mesh_name = "{0!s}TreeMesh".format(("Oc" if self.dim == 3 else "Quad"))
top = "\n" + mesh_name + ": {0:2.2f}% filled\n\n".format(self.fill * 100)
# Number of cells per level
level_count = self._count_cells_per_index()
non_zero_levels = np.nonzero(level_count)[0]
cell_display = ["Level : Number of cells"]
cell_display.append("-----------------------")
for level in non_zero_levels:
cell_display.append("{:^5} : {:^15}".format(level, level_count[level]))
cell_display.append("-----------------------")
cell_display.append("Total : {:^15}".format(self.nC))
extent_display = [" Mesh Extent "]
extent_display.append(" min , max ")
extent_display.append(" ---------------------------")
dim_label = {0: "x", 1: "y", 2: "z"}
for dim in range(self.dim):
n_vector = getattr(self, "nodes_" + dim_label[dim])
extent_display.append(
"{}: {:^13},{:^13}".format(dim_label[dim], n_vector[0], n_vector[-1])
)
for i, line in enumerate(extent_display):
if i == len(cell_display):
cell_display.append(" " * (len(cell_display[0]) - 3 - len(line)))
cell_display[i] += 3 * " " + line
h_display = [" Cell Widths "]
h_display.append(" min , max ")
h_display.append("-" * (len(h_display[0])))
h_gridded = self.h_gridded
mins = np.min(h_gridded, axis=0)
maxs = np.max(h_gridded, axis=0)
for dim in range(self.dim):
h_display.append("{:^10}, {:^10}".format(mins[dim], maxs[dim]))
for i, line in enumerate(h_display):
if i == len(cell_display):
cell_display.append(" " * len(cell_display[0]))
cell_display[i] += 3 * " " + line
return top + "\n".join(cell_display)
def _repr_html_(self):
"""html representation"""
mesh_name = "{0!s}TreeMesh".format(("Oc" if self.dim == 3 else "Quad"))
level_count = self._count_cells_per_index()
non_zero_levels = np.nonzero(level_count)[0]
dim_label = {0: "x", 1: "y", 2: "z"}
h_gridded = self.h_gridded
mins = np.min(h_gridded, axis=0)
maxs = np.max(h_gridded, axis=0)
style = " style='padding: 5px 20px 5px 20px;'"
# Cell level table:
cel_tbl = "<table>\n"
cel_tbl += "<tr>\n"
cel_tbl += "<th" + style + ">Level</th>\n"
cel_tbl += "<th" + style + ">Number of cells</th>\n"
cel_tbl += "</tr>\n"
for level in non_zero_levels:
cel_tbl += "<tr>\n"
cel_tbl += "<td" + style + ">{}</td>\n".format(level)
cel_tbl += "<td" + style + ">{}</td>\n".format(level_count[level])
cel_tbl += "</tr>\n"
cel_tbl += "<tr>\n"
cel_tbl += (
"<td style='font-weight: bold; padding: 5px 20px 5px 20px;'> Total </td>\n"
)
cel_tbl += "<td" + style + "> {} </td>\n".format(self.nC)
cel_tbl += "</tr>\n"
cel_tbl += "</table>\n"
det_tbl = "<table>\n"
det_tbl += "<tr>\n"
det_tbl += "<th></th>\n"
det_tbl += "<th" + style + " colspan='2'>Mesh extent</th>\n"
det_tbl += "<th" + style + " colspan='2'>Cell widths</th>\n"
det_tbl += "</tr>\n"
det_tbl += "<tr>\n"
det_tbl += "<th></th>\n"
det_tbl += "<th" + style + ">min</th>\n"
det_tbl += "<th" + style + ">max</th>\n"
det_tbl += "<th" + style + ">min</th>\n"
det_tbl += "<th" + style + ">max</th>\n"
det_tbl += "</tr>\n"
for dim in range(self.dim):
n_vector = getattr(self, "nodes_" + dim_label[dim])
det_tbl += "<tr>\n"
det_tbl += "<td" + style + ">{}</td>\n".format(dim_label[dim])
det_tbl += "<td" + style + ">{}</td>\n".format(n_vector[0])
det_tbl += "<td" + style + ">{}</td>\n".format(n_vector[-1])
det_tbl += "<td" + style + ">{}</td>\n".format(mins[dim])
det_tbl += "<td" + style + ">{}</td>\n".format(maxs[dim])
det_tbl += "</tr>\n"
det_tbl += "</table>\n"
full_tbl = "<table>\n"
full_tbl += "<tr>\n"
full_tbl += "<td style='font-weight: bold; font-size: 1.2em; text-align: center;'>{}</td>\n".format(
mesh_name
)
full_tbl += "<td style='font-size: 1.2em; text-align: center;' colspan='2'>{0:2.2f}% filled</td>\n".format(
100 * self.fill
)
full_tbl += "</tr>\n"
full_tbl += "<tr>\n"
full_tbl += "<td>\n"
full_tbl += cel_tbl
full_tbl += "</td>\n"
full_tbl += "<td>\n"
full_tbl += det_tbl
full_tbl += "</td>\n"
full_tbl += "</tr>\n"
full_tbl += "</table>\n"
return full_tbl
@BaseTensorMesh.origin.setter
def origin(self, value):
# first use the BaseTensorMesh to set the origin to handle "0, C, N"
BaseTensorMesh.origin.fset(self, value)
# then update the TreeMesh with the hidden value
self._set_origin(self._origin)
@property
def vntF(self):
"""
Vector number of total faces along each axis
This property returns the total number of hanging and
non-hanging faces along each axis direction. The returned
quantity is a list of integers of the form [nFx,nFy,nFz].
Returns
-------
list of int
Vector number of total faces along each axis
"""
return [self.ntFx, self.ntFy] + ([] if self.dim == 2 else [self.ntFz])
@property
def vntE(self):
"""
Vector number of total edges along each axis
This property returns the total number of hanging and
non-hanging edges along each axis direction. The returned
quantity is a list of integers of the form [nEx,nEy,nEz].
Returns
-------
list of int
Vector number of total edges along each axis
"""
return [self.ntEx, self.ntEy] + ([] if self.dim == 2 else [self.ntEz])
@property
def stencil_cell_gradient(self):
if getattr(self, "_stencil_cell_gradient", None) is None:
self._stencil_cell_gradient = sp.vstack(
[self.stencil_cell_gradient_x, self.stencil_cell_gradient_y]
)
if self.dim == 3:
self._stencil_cell_gradient = sp.vstack(
[self._stencil_cell_gradient, self.stencil_cell_gradient_z]
)
return self._stencil_cell_gradient
@property
def cell_gradient(self):
if getattr(self, "_cell_gradient", None) is None:
i_s = self.face_boundary_indices
ix = np.ones(self.nFx)
ix[i_s[0]] = 0.0
ix[i_s[1]] = 0.0
Pafx = sp.diags(ix)
iy = np.ones(self.nFy)
iy[i_s[2]] = 0.0
iy[i_s[3]] = 0.0
Pafy = sp.diags(iy)
MfI = self.get_face_inner_product(invMat=True)
if self.dim == 2:
Pi = sp.block_diag([Pafx, Pafy])
elif self.dim == 3:
iz = np.ones(self.nFz)
iz[i_s[4]] = 0.0
iz[i_s[5]] = 0.0
Pafz = sp.diags(iz)
Pi = sp.block_diag([Pafx, Pafy, Pafz])
self._cell_gradient = (
-Pi * MfI * self.face_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient
@property
def cell_gradient_x(self):
if getattr(self, "_cell_gradient_x", None) is None:
nFx = self.nFx
i_s = self.face_boundary_indices
ix = np.ones(self.nFx)
ix[i_s[0]] = 0.0
ix[i_s[1]] = 0.0
Pafx = sp.diags(ix)
MfI = self.get_face_inner_product(invMat=True)
MfIx = sp.diags(MfI.diagonal()[:nFx])
self._cell_gradient_x = (
-Pafx * MfIx * self.face_x_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_x
@property
def cell_gradient_y(self):
if getattr(self, "_cell_gradient_y", None) is None:
nFx = self.nFx
nFy = self.nFy
i_s = self.face_boundary_indices
iy = np.ones(self.nFy)
iy[i_s[2]] = 0.0
iy[i_s[3]] = 0.0
Pafy = sp.diags(iy)
MfI = self.get_face_inner_product(invMat=True)
MfIy = sp.diags(MfI.diagonal()[nFx : nFx + nFy])
self._cell_gradient_y = (
-Pafy * MfIy * self.face_y_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_y
@property
def cell_gradient_z(self):
if self.dim == 2:
raise TypeError("z derivative not defined in 2D")
if getattr(self, "_cell_gradient_z", None) is None:
nFx = self.nFx
nFy = self.nFy
i_s = self.face_boundary_indices
iz = np.ones(self.nFz)
iz[i_s[4]] = 0.0
iz[i_s[5]] = 0.0
Pafz = sp.diags(iz)
MfI = self.get_face_inner_product(invMat=True)
MfIz = sp.diags(MfI.diagonal()[nFx + nFy :])
self._cell_gradient_z = (
-Pafz * MfIz * self.face_z_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_z
@property
def face_x_divergence(self):
if getattr(self, "_face_x_divergence", None) is None:
self._face_x_divergence = self.face_divergence[:, : self.nFx]
return self._face_x_divergence
@property
def face_y_divergence(self):
if getattr(self, "_face_y_divergence", None) is None:
self._face_y_divergence = self.face_divergence[
:, self.nFx : self.nFx + self.nFy
]
return self._face_y_divergence
@property
def face_z_divergence(self):
if getattr(self, "_face_z_divergence", None) is None:
self._face_z_divergence = self.face_divergence[:, self.nFx + self.nFy :]
return self._face_z_divergence
def point2index(self, locs):
"""Finds cells that contain the given points.
Returns an array of index values of the cells that contain the given
points
Parameters
----------
locs: (N, dim) array_like
points to search for the location of
Returns
-------
(N) array_like of int
Cell indices that contain the points
"""
locs = as_array_n_by_dim(locs, self.dim)
inds = self._get_containing_cell_indexes(locs)
return inds
def cell_levels_by_index(self, indices):
"""Fast function to return a list of levels for the given cell indices
Parameters
----------
index: (N) array_like
Cell indexes to query
Returns
-------
(N) numpy.ndarray of int
Levels for the cells.
"""
return self._cell_levels_by_indexes(indices)
def get_interpolation_matrix(
self, locs, location_type="CC", zeros_outside=False, **kwargs
):
"""Produces interpolation matrix
Parameters
----------
loc : (N, dim) array_like
Location of points to interpolate to
location_type: str, optional
What to interpolate
location_type can be:
- 'CC' -> scalar field defined on cell centers
- 'Ex' -> x-component of field defined on edges
- 'Ey' -> y-component of field defined on edges
- 'Ez' -> z-component of field defined on edges
- 'Fx' -> x-component of field defined on faces
- 'Fy' -> y-component of field defined on faces
- 'Fz' -> z-component of field defined on faces
- 'N' -> scalar field defined on nodes
Returns
-------
(N, n_loc_type) scipy.sparse.csr_matrix
the interpolation matrix
"""
if "locType" in kwargs:
warnings.warn(
"The locType keyword argument has been deprecated, please use location_type. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
location_type = kwargs["locType"]
if "zerosOutside" in kwargs:
warnings.warn(
"The zerosOutside keyword argument has been deprecated, please use zeros_outside. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
zeros_outside = kwargs["zerosOutside"]
locs = as_array_n_by_dim(locs, self.dim)
if location_type not in ["N", "CC", "Ex", "Ey", "Ez", "Fx", "Fy", "Fz"]:
raise Exception(
"location_type must be one of N, CC, Ex, Ey, Ez, Fx, Fy, or Fz"
)
if self.dim == 2 and location_type in ["Ez", "Fz"]:
raise Exception("Unable to interpolate from Z edges/face in 2D")
locs = np.require(np.atleast_2d(locs), dtype=np.float64, requirements="C")
if location_type == "N":
Av = self._getNodeIntMat(locs, zeros_outside)
elif location_type in ["Ex", "Ey", "Ez"]:
Av = self._getEdgeIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["Fx", "Fy", "Fz"]:
Av = self._getFaceIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["CC"]:
Av = self._getCellIntMat(locs, zeros_outside)
return Av
@property
def permute_cells(self):
"""Permutation matrix re-ordering of cells sorted by x, then y, then z
Returns
-------
(n_cells, n_cells) scipy.sparse.csr_matrix
"""
# TODO: cache these?
P = np.lexsort(self.gridCC.T) # sort by x, then y, then z
return sp.identity(self.nC).tocsr()[P]
@property
def permute_faces(self):
"""Permutation matrix re-ordering of faces sorted by x, then y, then z
Returns
-------
(n_faces, n_faces) scipy.sparse.csr_matrix
"""
# TODO: cache these?
Px = np.lexsort(self.gridFx.T)
Py = np.lexsort(self.gridFy.T) + self.nFx
if self.dim == 2:
P = np.r_[Px, Py]
else:
Pz = np.lexsort(self.gridFz.T) + (self.nFx + self.nFy)
P = np.r_[Px, Py, Pz]
return sp.identity(self.nF).tocsr()[P]
@property
def permute_edges(self):
"""Permutation matrix re-ordering of edges sorted by x, then y, then z
Returns
-------
(n_edges, n_edges) scipy.sparse.csr_matrix
"""
# TODO: cache these?
Px = np.lexsort(self.gridEx.T)
Py = np.lexsort(self.gridEy.T) + self.nEx
if self.dim == 2:
P = np.r_[Px, Py]
if self.dim == 3:
Pz = np.lexsort(self.gridEz.T) + (self.nEx + self.nEy)
P = np.r_[Px, Py, Pz]
return sp.identity(self.nE).tocsr()[P]
@property
def cell_state(self):
""" The current state of the cells on the mesh.
This represents the x, y, z indices of the cells in the base tensor mesh, as
well as their levels. It can be used to reconstruct the mesh.
Returns
-------
dict
dictionary with two entries:
- ``"indexes"``: the indexes of the cells
- ``"levels"``: the levels of the cells
"""
indexes, levels = self.__getstate__()
return {"indexes": indexes.tolist(), "levels": levels.tolist()}
def validate(self):
return self.finalized
def equals(self, other):
try:
if self.finalized and other.finalized:
return super().equals(other)
except AttributeError:
pass
return False
def __reduce__(self):
return TreeMesh, (self.h, self.origin), self.__getstate__()
cellGrad = deprecate_property("cell_gradient", "cellGrad", removal_version="1.0.0", future_warn=False)
cellGradx = deprecate_property(
"cell_gradient_x", "cellGradx", removal_version="1.0.0", future_warn=False
)
cellGrady = deprecate_property(
"cell_gradient_y", "cellGrady", removal_version="1.0.0", future_warn=False
)
cellGradz = deprecate_property(
"cell_gradient_z", "cellGradz", removal_version="1.0.0", future_warn=False
)
cellGradStencil = deprecate_property(
"cell_gradient_stencil", "cellGradStencil", removal_version="1.0.0", future_warn=False
)
nodalGrad = deprecate_property(
"nodal_gradient", "nodalGrad", removal_version="1.0.0", future_warn=False
)
nodalLaplacian = deprecate_property(
"nodal_laplacian", "nodalLaplacian", removal_version="1.0.0", future_warn=False
)
faceDiv = deprecate_property("face_divergence", "faceDiv", removal_version="1.0.0", future_warn=False)
faceDivx = deprecate_property(
"face_x_divergence", "faceDivx", removal_version="1.0.0", future_warn=False
)
faceDivy = deprecate_property(
"face_y_divergence", "faceDivy", removal_version="1.0.0", future_warn=False
)
faceDivz = deprecate_property(
"face_z_divergence", "faceDivz", removal_version="1.0.0", future_warn=False
)
edgeCurl = deprecate_property("edge_curl", "edgeCurl", removal_version="1.0.0", future_warn=False)
maxLevel = deprecate_property("max_used_level", "maxLevel", removal_version="1.0.0", future_warn=False)
vol = deprecate_property("cell_volumes", "vol", removal_version="1.0.0", future_warn=False)
areaFx = deprecate_property("face_x_areas", "areaFx", removal_version="1.0.0", future_warn=False)
areaFy = deprecate_property("face_y_areas", "areaFy", removal_version="1.0.0", future_warn=False)
areaFz = deprecate_property("face_z_areas", "areaFz", removal_version="1.0.0", future_warn=False)
area = deprecate_property("face_areas", "area", removal_version="1.0.0", future_warn=False)
edgeEx = deprecate_property("edge_x_lengths", "edgeEx", removal_version="1.0.0", future_warn=False)
edgeEy = deprecate_property("edge_y_lengths", "edgeEy", removal_version="1.0.0", future_warn=False)
edgeEz = deprecate_property("edge_z_lengths", "edgeEz", removal_version="1.0.0", future_warn=False)
edge = deprecate_property("edge_lengths", "edge", removal_version="1.0.0", future_warn=False)
permuteCC = deprecate_property(
"permute_cells", "permuteCC", removal_version="1.0.0", future_warn=False
)
permuteF = deprecate_property("permute_faces", "permuteF", removal_version="1.0.0", future_warn=False)
permuteE = deprecate_property("permute_edges", "permuteE", removal_version="1.0.0", future_warn=False)
faceBoundaryInd = deprecate_property(
"face_boundary_indices", "faceBoundaryInd", removal_version="1.0.0", future_warn=False
)
cellBoundaryInd = deprecate_property(
"cell_boundary_indices", "cellBoundaryInd", removal_version="1.0.0", future_warn=False
)
_aveCC2FxStencil = deprecate_property(
"average_cell_to_total_face_x", "_aveCC2FxStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FyStencil = deprecate_property(
"average_cell_to_total_face_y", "_aveCC2FyStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FzStencil = deprecate_property(
"average_cell_to_total_face_z", "_aveCC2FzStencil", removal_version="1.0.0", future_warn=False
)
_cellGradStencil = deprecate_property(
"stencil_cell_gradient", "_cellGradStencil", removal_version="1.0.0", future_warn=False
)
_cellGradxStencil = deprecate_property(
"stencil_cell_gradient_x", "_cellGradxStencil", removal_version="1.0.0", future_warn=False
)
_cellGradyStencil = deprecate_property(
"stencil_cell_gradient_y", "_cellGradyStencil", removal_version="1.0.0", future_warn=False
)
_cellGradzStencil = deprecate_property(
"stencil_cell_gradient_z", "_cellGradzStencil", removal_version="1.0.0", future_warn=False
)
| 38.665803
| 115
| 0.514807
| 3,410
| 29,850
| 4.243695
| 0.158944
| 0.007187
| 0.007878
| 0.037592
| 0.437565
| 0.388017
| 0.320434
| 0.279179
| 0.180914
| 0.158455
| 0
| 0.017913
| 0.337956
| 29,850
| 771
| 116
| 38.715953
| 0.714351
| 0.313936
| 0
| 0.293706
| 0
| 0.009324
| 0.171697
| 0.018119
| 0
| 0
| 0
| 0.003891
| 0
| 1
| 0.055944
| false
| 0.002331
| 0.020979
| 0.004662
| 0.219114
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de60149d3083eff4951fd6c5511316e856edce5
| 4,053
|
py
|
Python
|
BLOX/Examples/DayTrader/data_downloader.py
|
linearlabstech/blox
|
6a5c8a28fcfcb17731be89939284e7ac13a047d7
|
[
"Apache-2.0"
] | 17
|
2019-03-31T18:37:35.000Z
|
2020-08-17T18:14:40.000Z
|
BLOX/Examples/DayTrader/data_downloader.py
|
linearlabstech/blox
|
6a5c8a28fcfcb17731be89939284e7ac13a047d7
|
[
"Apache-2.0"
] | null | null | null |
BLOX/Examples/DayTrader/data_downloader.py
|
linearlabstech/blox
|
6a5c8a28fcfcb17731be89939284e7ac13a047d7
|
[
"Apache-2.0"
] | 1
|
2019-04-02T07:02:08.000Z
|
2019-04-02T07:02:08.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Linear Labs Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from yahoo_finance_api2 import share
import torch.tensor as tt
import torch,random
def get_targets(rows):
"""
This is where you can build your target.
For the tutorial we're only concerned about whether we should buy or sell.
So we'll create a rule for this, we'll need to set this up as regression ( on the open set (-1,1) ). Our rule is as follows:
If the stock closes below the open, we should have sold (t-1 = sell at close, t = buy at close).
If the stock closes above the open, we should have bought (t-1 = buy at close, t = sell at close).
While under the constraint of maximizing our profit
We can obviously make this more complex, even with the data we have, but for this tutorial,
If you want to create your own targets, this is where you should do it. Below is the accessible data structure passed to this function.
rows = {
"timestamp": [
1557149400000,
],
"open": [
126.38999938964844,
],
"high": [
128.55999755859375,
],
"low": [
126.11000061035156,
],
"close": [
128.14999389648438,
],
"volume": [
24239800,
]
}
"""
# targets: sell = -1, buy = +1
# set to sell at beginning of the trading day
# we assume that unless the it's going down, buy.
# later we'll add some business logic to determine the actual action of purchasing
# return [ tt([0.]) ] + [ tt([ 0 if (rows['close'][i-2] > rows['open'][i-2]) and (rows['close'][i] > rows['open'][i]) else (1 if random.random() > .7 else 2 )]) for i in range(2,len(rows['open'])) ]
return [ tt( [ [ [ rows['high'][i] ] ] ] ) for i in range(1,len(rows['open'])) ]
def get_inputs(rows):
# you could also use a pandas DataFrame
return [ tt( [ [ [ rows['open'][i],rows['close'][i],rows['volume'][i],rows['low'][i],rows['high'][i] ] ] ]) for i in range(len(rows['open'])-1 ) ]
def main(args):
# default grab the last 75 days
import datetime
if args.csv:
import pandas as pd
data = pd.read_csv(args.csv)
else:
today = datetime.date.today()
ticker = share.Share(args.ticker)
data = ticker.get_historical(share.PERIOD_TYPE_DAY,args.start,share.FREQUENCY_TYPE_MINUTE,int(60/args.frequency))
torch.save({
'inputs':get_inputs(data),
'targets':get_targets(data)
},args.output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--ticker',help="enter the stock ticker symbol",required=True)
parser.add_argument('-s','--start',help="start date of data to grab. default is 75 days ago",default=75,type=int)
parser.add_argument('-o','--output_file',help="name of the output file to save the dataset",default='trader.ds')
parser.add_argument('-f','--frequency',help='how frequent to sample each day of trading (in hourly fractions)',type=int,default=1)
parser.add_argument('--csv',help='the csv file to load instead of downloading fresh data',default=None)
main( parser.parse_args() )
| 40.939394
| 203
| 0.606464
| 566
| 4,053
| 4.293286
| 0.411661
| 0.024691
| 0.034979
| 0.01358
| 0.032099
| 0.016461
| 0.016461
| 0
| 0
| 0
| 0
| 0.042838
| 0.280039
| 4,053
| 99
| 204
| 40.939394
| 0.789925
| 0.555885
| 0
| 0
| 0
| 0
| 0.229826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.206897
| 0.034483
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de667e4365b4429b64b9267a959ad26b53a85c3
| 790
|
py
|
Python
|
services/db-api/project/config.py
|
JoshPrim/EVA-Projekt
|
94e4f594519eda676e0f5f2787f8643831f346df
|
[
"Apache-2.0"
] | 2
|
2018-05-30T08:40:26.000Z
|
2018-09-06T15:37:25.000Z
|
services/db-api/project/config.py
|
JoshPrim/EVA-Projekt
|
94e4f594519eda676e0f5f2787f8643831f346df
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:37:55.000Z
|
2021-06-01T22:37:55.000Z
|
services/db-api/project/config.py
|
JoshPrim/EVA-Projekt
|
94e4f594519eda676e0f5f2787f8643831f346df
|
[
"Apache-2.0"
] | 2
|
2018-05-31T14:55:04.000Z
|
2018-08-29T09:38:31.000Z
|
import os
from project import app, db
class BaseConfig:
"""Base configuration"""
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
print('Running through config')
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('POSTGRES_URL')
MASTER_STATION = os.environ.get('MASTER_STATION')
MASTER_ELEVATOR = os.environ.get('MASTER_ELEVATOR')
MONGO_URI = os.environ.get('MONGO_URI')
MONGO_DBNAME = 'eva_dev'
class TestingConfig(BaseConfig):
"""Testing configuration"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
class ProductionConfig(BaseConfig):
"""Production configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
| 27.241379
| 65
| 0.734177
| 88
| 790
| 6.363636
| 0.443182
| 0.096429
| 0.128571
| 0.107143
| 0.251786
| 0.251786
| 0.251786
| 0
| 0
| 0
| 0
| 0
| 0.155696
| 790
| 29
| 66
| 27.241379
| 0.83958
| 0.11519
| 0
| 0
| 0
| 0
| 0.159057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.941176
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9de9e23fa22dfbdc836f630d94dbe82b7f2350bd
| 1,259
|
py
|
Python
|
src/sample.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 14
|
2021-01-28T07:13:25.000Z
|
2022-02-10T06:41:32.000Z
|
src/sample.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 2
|
2021-04-14T15:24:30.000Z
|
2021-05-06T07:02:08.000Z
|
src/sample.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 1
|
2021-07-09T02:52:59.000Z
|
2021-07-09T02:52:59.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 11:17:21 2020
@author: eilxaix
"""
import pandas as pd
import re
def remove_hashtag(t):
t=re.sub('-',' ', t)
t=' '.join(t.split())
return t
def read_csv_data(df):
title = [remove_hashtag(i) for i in df['Document Title']]
abstract = [remove_hashtag(i) for i in df['Abstract']]
doc = [title[i] + '. ' + abstract[i] for i in range(len(df))]
inspec_controlled = [remove_hashtag(i) for i in df['INSPEC Controlled Terms']]
inspec_uncontrolled = [remove_hashtag(i) for i in df['INSPEC Non-Controlled Terms']]
for i in range(len(inspec_uncontrolled)):
inspec_uncontrolled[i] = [k.lower() for k in inspec_uncontrolled[i].split(';')]
for i in range(len(inspec_controlled)):
inspec_controlled[i] = [k.lower() for k in inspec_controlled[i].split(';')]
data = {'title': title, 'abstract': abstract, 'title+abs': doc, 'inspec_controlled': inspec_controlled,'inspec_uncontrolled':inspec_uncontrolled}
return data
# =============================================================================
# data = read_csv_data(pd.read_csv('../../dataset/ieee_xai/ieee_xai.csv'))
# =============================================================================
| 38.151515
| 155
| 0.576648
| 163
| 1,259
| 4.306748
| 0.306748
| 0.039886
| 0.059829
| 0.049858
| 0.273504
| 0.253561
| 0.196581
| 0.079772
| 0
| 0
| 0
| 0.01223
| 0.155679
| 1,259
| 32
| 156
| 39.34375
| 0.648166
| 0.242256
| 0
| 0
| 0
| 0
| 0.145281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9deccc7336bb4388cafa2a33d6c4aebd562a78e9
| 936
|
py
|
Python
|
tests/test_estimators/test_probweight_regression.py
|
janvdvegt/scikit-lego
|
774e557c4d19f67ef54f3f0d1622c64ef9903b63
|
[
"MIT"
] | null | null | null |
tests/test_estimators/test_probweight_regression.py
|
janvdvegt/scikit-lego
|
774e557c4d19f67ef54f3f0d1622c64ef9903b63
|
[
"MIT"
] | null | null | null |
tests/test_estimators/test_probweight_regression.py
|
janvdvegt/scikit-lego
|
774e557c4d19f67ef54f3f0d1622c64ef9903b63
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from sklego.common import flatten
from sklego.linear_model import ProbWeightRegression
from tests.conftest import nonmeta_checks, regressor_checks, general_checks
@pytest.mark.parametrize("test_fn", flatten([
nonmeta_checks,
general_checks,
regressor_checks
]))
def test_estimator_checks(test_fn):
regr_min_zero = ProbWeightRegression(non_negative=True)
test_fn(ProbWeightRegression.__name__ + '_min_zero_true', regr_min_zero)
regr_not_min_zero = ProbWeightRegression(non_negative=False)
test_fn(ProbWeightRegression.__name__ + '_min_zero_true_false', regr_not_min_zero)
def test_shape_trained_model(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod_no_intercept = ProbWeightRegression()
assert mod_no_intercept.fit(X, y).coefs_.shape == (X.shape[1], )
np.testing.assert_approx_equal(mod_no_intercept.fit(X, y).coefs_.sum(), 1.0, significant=4)
| 36
| 95
| 0.794872
| 130
| 936
| 5.269231
| 0.415385
| 0.061314
| 0.061314
| 0.087591
| 0.30073
| 0.189781
| 0.189781
| 0
| 0
| 0
| 0
| 0.004843
| 0.117521
| 936
| 25
| 96
| 37.44
| 0.824455
| 0
| 0
| 0
| 0
| 0
| 0.043803
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df0774506aa365c6756ee8a870b647ac6699146
| 8,284
|
py
|
Python
|
GEM/plt_resukts_GEM.py
|
Webbah/sec-for-reinforcement-learning
|
19db622dce4963d25cb1b6e4ae12ddf98b6d27d2
|
[
"MIT"
] | 2
|
2021-12-16T12:49:26.000Z
|
2022-01-28T19:18:43.000Z
|
GEM/plt_resukts_GEM.py
|
Webbah/sec-for-reinforcement-learning
|
19db622dce4963d25cb1b6e4ae12ddf98b6d27d2
|
[
"MIT"
] | null | null | null |
GEM/plt_resukts_GEM.py
|
Webbah/sec-for-reinforcement-learning
|
19db622dce4963d25cb1b6e4ae12ddf98b6d27d2
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
save_results = False
def plot_stored_GEM_reults(interval_x=None, interval_y=None):
if interval_x is None:
#interval_list_x = [0.499, 0.506] # 1
interval_list_x = [0, 1]
#interval_list_x = [0.299, 0.305] # 2
#interval_list_x = [0.949, 0.953] # 3
#interval_list_x = [0.049, 0.052]
else:
interval_list_x = interval_x
if interval_y is None:
interval_list_y = [80, 345]
else:
interval_list_y = interval_y
folder_name = 'GEM/data'
df_DDPG = pd.read_pickle('GEM/data/DDPG_data')
df_DDPG_I = pd.read_pickle('GEM/data/SEC_DDPG_data')
df_PI = pd.read_pickle('GEM/data/GEM_PI_a4.pkl')
ts = 1e-4
t_test = np.arange(0, len(df_DDPG['i_d_mess'][0]) * ts, ts).tolist()
t_PI_2 = np.arange(-ts, len(df_PI['i_d_mess']) * ts-ts, ts).tolist()
t_reward = np.arange(-ts-ts, round((len(df_DDPG['v_d_mess'][0])) * ts - ts -ts, 4), ts).tolist()
reward_sec = df_DDPG_I['Reward_test'].tolist()[0]
reward = df_DDPG['Reward_test'].tolist()[0]
reward_PI = df_PI['Reward'].tolist()
if save_results:
params = {'backend': 'ps',
'text.latex.preamble': [r'\usepackage{gensymb}'
r'\usepackage{amsmath,amssymb,mathtools}'
r'\newcommand{\mlutil}{\ensuremath{\operatorname{ml-util}}}'
r'\newcommand{\mlacc}{\ensuremath{\operatorname{ml-acc}}}'],
'axes.labelsize': 12.5, # fontsize for x and y labels (was 10)
'axes.titlesize': 12.5,
'font.size': 12.5, # was 10
'legend.fontsize': 12.5, # was 10
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': True,
'figure.figsize': [5.2, 5.625],#[4.5, 7.5],
'font.family': 'serif',
'lines.linewidth': 1.2
}
matplotlib.rcParams.update(params)
fig, axs = plt.subplots(3, 1)
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_q_mess'].tolist()[0]], 'r', label='$\mathrm{SEC}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG['i_q_mess'].tolist()[0]], '-.r',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_q_mess'].tolist()], '--r',
label='$\mathrm{PI}_\mathrm{}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_q_ref'].tolist()[0]], ':', color='gray',
label='$\mathrm{i}_\mathrm{q}^*$', linewidth=2)
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_q_ref'].tolist()], ':', color='gray',
label='$\mathrm{i}_\mathrm{q}^*$', linewidth=2)
axs[1].grid()
# axs[1].legend()
axs[1].set_xlim(interval_list_x)
axs[1].set_ylim([-0.5 * 160 * 1.41, 0.55 * 160 * 1.41]) # 1
#axs[1].set_ylim([-0 * 160 * 1.41, 0.4 * 160 * 1.41]) # 2
#axs[1].set_ylim([0.37 * 160 * 1.41, 0.52 * 160 * 1.41]) # 3
# axs[0].set_xlabel(r'$t\,/\,\mathrm{s}$')
axs[1].tick_params(axis='x', colors='w')
axs[1].set_ylabel("$i_{\mathrm{q}}\,/\,{\mathrm{A}}$")
axs[1].tick_params(direction='in')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_d_mess'].tolist()[0]], 'b',
label='$\mathrm{SEC}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG['i_d_mess'].tolist()[0]], '-.b',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_d_mess'].tolist()], '--b',
label='$\mathrm{PI}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_d_ref'].tolist()[0]], ':', color='gray',
label='$i_\mathrm{}^*$', linewidth=2)
axs[0].grid()
axs[0].legend(bbox_to_anchor = (0, 1.02, 1, 0.2), loc="lower left",mode="expand", borderaxespad=0, ncol=4)
axs[0].set_xlim(interval_list_x)
axs[0].set_ylim([-0.78 * 160 * 1.41, 0.05 * 160 * 1.41]) #
axs[0].set_ylim([-0.78 * 160 * 1.41, 0.05 * 160 * 1.41]) # 1
#axs[0].set_ylim([-0.9 * 160 * 1.41, 0.005 * 160 * 1.41]) # 2
#axs[0].set_ylim([-1 * 160 * 1.41, -0.2 * 160 * 1.41]) # 3
axs[0].tick_params(axis='x', colors='w')
axs[0].set_ylabel("$i_{\mathrm{d}}\,/\,{\mathrm{A}}$")
axs[0].tick_params(direction='in')
fig.subplots_adjust(wspace=0, hspace=0.05)
axs[2].plot(t_reward, [i * 200 for i in df_DDPG_I['v_q_mess'].tolist()[0]], 'r', label='$\mathrm{SEC}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG['v_q_mess'].tolist()[0]], '-.r',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[2].plot(t_PI_2, [i * 200 for i in df_PI['v_q_mess'].tolist()], '--r',
label='$\mathrm{PI}_\mathrm{}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG_I['v_d_mess'].tolist()[0]], 'b', label='$\mathrm{SEC}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG['v_d_mess'].tolist()[0]], '-.b',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[2].plot(t_PI_2, [i * 200 for i in df_PI['v_d_mess'].tolist()], '--b',
label='$\mathrm{PI}_\mathrm{}$')
#axs[2].plot(t_reward, df_DDPG_I['v_q_mess'].tolist()[0], 'r', label='$\mathrm{SEC}$')
#axs[2].plot(t_reward, df_DDPG['v_q_mess'].tolist()[0], '-.r',
# label='$\mathrm{DDPG}_\mathrm{}$')
#axs[2].plot(t_reward, df_PI['v_q_mess'].tolist(), '--r',
# label='$\mathrm{PI}_\mathrm{}$')
# axs[2].plot(t_reward, df_DDPG_I['v_d_mess'].tolist()[0], 'b', label='$\mathrm{SEC}$')
# axs[2].plot(t_reward, df_DDPG['v_d_mess'].tolist()[0], '--b', label='$\mathrm{DDPG}_\mathrm{}$')
# axs[2].plot(t_PI_3, df_PI['v_d_mess'].tolist(), '--b', label='$\mathrm{PI}_\mathrm{}$')
axs[2].grid()
# axs[1].legend()
axs[2].set_xlim(interval_list_x)
#axs[2].set_ylim([-100, 100])
# axs[0].set_xlabel(r'$t\,/\,\mathrm{s}$')
#axs[2].set_xlabel(r'$t\,/\,\mathrm{s}$')
#axs[2].tick_params(axis='x', colors='w')
axs[2].set_xlabel(r'$t\,/\,\mathrm{s}$')
axs[2].set_ylabel("$v_{\mathrm{dq}}\,/\,{\mathrm{V}}$")
#axs[2].set_ylabel("$u_{\mathrm{dq}}\,/\, v_\mathrm{DC}\,/\,2$")
axs[2].tick_params(direction='in')
"""
axs[3].plot(t_test, reward_sec, 'b', label=f' SEC-DDPG: '
f'{round(sum(reward_sec[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].plot(t_test, reward, 'r', label=f'DDPG: '
f'{round(sum(reward[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].plot(t_PI_2, reward_PI, '--r', label=f'PI: '
f'{round(sum(reward_PI[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].grid()
axs[3].set_xlim(interval_list_x)
#axs[3].legend()
axs[3].set_ylabel("Reward")
plt.show()
"""
plt.show()
if save_results:
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.pgf')
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.png')
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.pdf')
plt.plot(t_test, reward_sec, 'b', label=f' SEC-DDPG: '
f'{round(sum(reward_sec[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.plot(t_test, reward, 'r', label=f'DDPG: '
f'{round(sum(reward[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.plot(t_test, reward_PI, '--r', label=f'PI: '
f'{round(sum(reward_PI[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.grid()
plt.xlim(interval_list_x)
plt.legend()
plt.ylabel("Reward")
plt.show()
plot_stored_GEM_reults()
| 49.017751
| 185
| 0.543457
| 1,342
| 8,284
| 3.131893
| 0.129657
| 0.105639
| 0.108256
| 0.056626
| 0.656674
| 0.57911
| 0.544611
| 0.526053
| 0.519867
| 0.490126
| 0
| 0.065848
| 0.233704
| 8,284
| 169
| 186
| 49.017751
| 0.596251
| 0.154515
| 0
| 0.166667
| 0
| 0.027778
| 0.268337
| 0.143418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009259
| false
| 0
| 0.037037
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df19b2f9979610a8ed9bef79a44747496f8dd2a
| 3,725
|
py
|
Python
|
Adhesion/Interactions/PowerLaw.py
|
ContactEngineering/Adhesion
|
acc46ad9bfe49fec667cb9a116ebde426faa38c4
|
[
"MIT"
] | null | null | null |
Adhesion/Interactions/PowerLaw.py
|
ContactEngineering/Adhesion
|
acc46ad9bfe49fec667cb9a116ebde426faa38c4
|
[
"MIT"
] | 4
|
2021-08-18T07:30:57.000Z
|
2022-03-05T11:05:09.000Z
|
Adhesion/Interactions/PowerLaw.py
|
ContactEngineering/Adhesion
|
acc46ad9bfe49fec667cb9a116ebde426faa38c4
|
[
"MIT"
] | null | null | null |
#
# Copyright 2020 Antoine Sanner
# 2020 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from NuMPI import MPI
from Adhesion.Interactions import Potential, SoftWall
class PowerLaw(Potential):
r""" Polynomial interaction wiches value, first and second derivatives are
0 at the cutoff radius :math:`r_c`
.. math ::
(r < r_c) \ (1 - r / r_c)^p
With the exponent :math:`p >= 3`
"""
name = "PowerLaw"
def __init__(self, work_of_adhesion, cutoff_radius, exponent=3,
communicator=MPI.COMM_WORLD):
"""
Parameters:
-----------
work_of_adhesion: float or ndarray
surface energy at perfect contact
cutoff_radius: float or ndarray
distance :math:`r_c` at which the potential has decayed to 0
"""
self.cutoff_radius = self.rho = cutoff_radius
self.work_of_adhesion = work_of_adhesion
self.exponent = exponent
SoftWall.__init__(self, communicator=communicator)
def __repr__(self, ):
return (
"Potential '{0.name}': "
"work_of_adhesion = {0.work_of_adhesion},"
"cutoff_radius = {0.cutoff_radius}, exponent = {0.exponent}"
).format(self)
def __getstate__(self):
state = super().__getstate__(), \
self.exponent, self.rho, self.work_of_adhesion
return state
def __setstate__(self, state):
superstate, self.exponent, self.rho, self.work_of_adhesion = state
super().__setstate__(superstate)
@property
def has_cutoff(self):
return True
@property
def r_min(self):
return None
@property
def r_infl(self):
return None
@property
def max_tensile(self):
return - self.work_of_adhesion / self.rho * self.exponent
def evaluate(self, gap, potential=True, gradient=False, curvature=False,
mask=None):
r = np.asarray(gap)
if mask is None:
mask = (slice(None), ) * len(r.shape)
w = self.work_of_adhesion if np.isscalar(self.work_of_adhesion) \
else self.work_of_adhesion[mask]
rc = self.rho if np.isscalar(self.rho) else self.rho[mask]
p = self.exponent
g = (1 - r / rc)
V = dV = ddV = None
gpm2 = g ** (p - 2)
gpm1 = gpm2 * g
if potential:
V = np.where(g > 0, - w * gpm1 * g, 0)
if gradient:
dV = np.where(g > 0, p * w / rc * gpm1, 0)
if curvature:
ddV = np.where(g > 0, - p * (p - 1) * w / rc ** 2 * gpm2, 0)
return V, dV, ddV
| 32.391304
| 79
| 0.628725
| 497
| 3,725
| 4.577465
| 0.374245
| 0.031648
| 0.073846
| 0.063297
| 0.086154
| 0.032527
| 0.032527
| 0.032527
| 0
| 0
| 0
| 0.012304
| 0.28
| 3,725
| 114
| 80
| 32.675439
| 0.835943
| 0.395168
| 0
| 0.105263
| 0
| 0
| 0.059925
| 0.009831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0.087719
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df5471ca3ddddaa94bd6982c624b686b6a66f95
| 677
|
py
|
Python
|
Python3/Books/Douson/chapter09/simple_game.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | null | null | null |
Python3/Books/Douson/chapter09/simple_game.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | null | null | null |
Python3/Books/Douson/chapter09/simple_game.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | 2
|
2018-07-31T23:25:43.000Z
|
2019-07-03T14:26:18.000Z
|
# Simple Game
# Demonstrates importing modules
import games, random
print("Welcome to the world's simplest game!\n")
again = None
while again != "n":
players = []
num = games.ask_number(question = "How many players? (2 - 5): ",
low = 2, high = 5)
for i in range(num):
name = input("Player name: ")
score = random.randrange(100) + 1
player = games.Player(name, score)
players.append(player)
print("\nHere are the game results:")
for player in players:
print(player)
again = games.ask_yes_no("\nDo you want to play again? (y/n): ")
input("\n\nPress the enter key to exit.")
| 26.038462
| 68
| 0.589365
| 92
| 677
| 4.304348
| 0.608696
| 0.040404
| 0.075758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.285081
| 677
| 25
| 69
| 27.08
| 0.801653
| 0.062038
| 0
| 0
| 0
| 0
| 0.278481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df5c3a1c0529a4f203b0c8d4d096dd4cd43ed68
| 10,873
|
py
|
Python
|
izzoLambertSolver.py
|
tylera277/voyagerTrajectoryCalculator
|
fded6356e670fbc2b182cac2bfcc98e7223e2b80
|
[
"MIT"
] | null | null | null |
izzoLambertSolver.py
|
tylera277/voyagerTrajectoryCalculator
|
fded6356e670fbc2b182cac2bfcc98e7223e2b80
|
[
"MIT"
] | null | null | null |
izzoLambertSolver.py
|
tylera277/voyagerTrajectoryCalculator
|
fded6356e670fbc2b182cac2bfcc98e7223e2b80
|
[
"MIT"
] | null | null | null |
""" A module hosting all algorithms devised by Izzo """
import time
import numpy as np
from numpy import cross, pi
from numpy.linalg import norm
from scipy.special import hyp2f1
def izzo2015(
mu,
r1,
r2,
tof,
M=0,
prograde=True,
low_path=True,
maxiter=35,
atol=1e-5,
rtol=1e-7,
full_output=False,
):
r"""
Solves Lambert problem using Izzo's devised algorithm.
Parameters
----------
mu: float
Gravitational parameter, equivalent to :math:`GM` of attractor body.
r1: numpy.array
Initial position vector.
r2: numpy.array
Final position vector.
M: int
Number of revolutions. Must be equal or greater than 0 value.
prograde: bool
If `True`, specifies prograde motion. Otherwise, retrograde motion is imposed.
low_path: bool
If two solutions are available, it selects between high or low path.
maxiter: int
Maximum number of iterations.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
full_output: bool
If True, the number of iterations is also returned.
Returns
-------
v1: numpy.array
Initial velocity vector.
v2: numpy.array
Final velocity vector.
numiter: list
Number of iterations.
Notes
-----
This is the algorithm devised by Dario Izzo[1] in 2015. It inherits from
the one developed by Lancaster[2] during the 60s, following the universal
formulae approach. It is one of the most modern solvers, being a complete
Lambert's problem solver (zero and Multiple-revolution solutions). It shows
high performance and robustness while requiring no more than four iterations
to reach a solution.
All credits of the implementation go to Juan Luis Cano Rodríguez and the
poliastro development team, from which this routine inherits. Some changes
were made to adapt it to `lamberthub` API. In addition, the hypergeometric
function is the one from SciPy.
Copyright (c) 2012-2021 Juan Luis Cano Rodríguez and the poliastro development team
References
----------
[1] Izzo, D. (2015). Revisiting Lambert’s problem. Celestial Mechanics
and Dynamical Astronomy, 121(1), 1-15.
[2] Lancaster, E. R., & Blanchard, R. C. (1969). A unified form of
Lambert's theorem (Vol. 5368). National Aeronautics and Space
Administration.
"""
# Check that input parameters are safe
#assert_parameters_are_valid(mu, r1, r2, tof, M)
# Chord
c = r2 - r1
c_norm, r1_norm, r2_norm = norm(c), norm(r1), norm(r2)
# Semiperimeter
s = (r1_norm + r2_norm + c_norm) * 0.5
# Versors
i_r1, i_r2 = r1 / r1_norm, r2 / r2_norm
i_h = cross(i_r1, i_r2)
i_h = i_h / norm(i_h)
# Geometry of the problem
ll = np.sqrt(1 - min(1.0, c_norm / s))
# Compute the fundamental tangential directions
if i_h[2] < 0:
ll = -ll
i_t1, i_t2 = cross(i_r1, i_h), cross(i_r2, i_h)
else:
i_t1, i_t2 = cross(i_h, i_r1), cross(i_h, i_r2)
# Correct transfer angle parameter and tangential vectors regarding orbit's
# inclination
ll, i_t1, i_t2 = (-ll, -i_t1, -i_t2) if prograde is False else (ll, i_t1, i_t2)
# Non dimensional time of flight
T = np.sqrt(2 * mu / s ** 3) * tof
# Find solutions and filter them
x, y, numiter, tpi = _find_xy(ll, T, M, maxiter, atol, rtol, low_path)
# Reconstruct
gamma = np.sqrt(mu * s / 2)
rho = (r1_norm - r2_norm) / c_norm
sigma = np.sqrt(1 - rho ** 2)
# Compute the radial and tangential components at initial and final
# position vectors
V_r1, V_r2, V_t1, V_t2 = _reconstruct(x, y, r1_norm, r2_norm, ll, gamma, rho, sigma)
# Solve for the initial and final velocity
v1 = V_r1 * (r1 / r1_norm) + V_t1 * i_t1
v2 = V_r2 * (r2 / r2_norm) + V_t2 * i_t2
return (v1, v2, numiter, tpi) if full_output is True else (v1, v2)
def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):
"""Reconstruct solution velocity vectors."""
V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1
V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2
V_t1 = gamma * sigma * (y + ll * x) / r1
V_t2 = gamma * sigma * (y + ll * x) / r2
return [V_r1, V_r2, V_t1, V_t2]
def _find_xy(ll, T, M, maxiter, atol, rtol, low_path):
"""Computes all x, y for given number of revolutions."""
# For abs(ll) == 1 the derivative is not continuous
assert abs(ll) < 1
M_max = np.floor(T / pi)
T_00 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) # T_xM
# Refine maximum number of revolutions if necessary
if T < T_00 + M_max * pi and M_max > 0:
_, T_min = _compute_T_min(ll, M_max, maxiter, atol, rtol)
if T < T_min:
M_max -= 1
# Check if a feasible solution exist for the given number of revolutions
# This departs from the original paper in that we do not compute all solutions
if M > M_max:
raise ValueError("No feasible solution, try lower M!")
# Initial guess
x_0 = _initial_guess(T, ll, M, low_path)
# Start Householder iterations from x_0 and find x, y
x, numiter, tpi = _householder(x_0, T, ll, M, atol, rtol, maxiter)
y = _compute_y(x, ll)
return x, y, numiter, tpi
def _compute_y(x, ll):
"""Computes y."""
return np.sqrt(1 - ll ** 2 * (1 - x ** 2))
def _compute_psi(x, y, ll):
"""Computes psi.
"The auxiliary angle psi is computed using Eq.(17) by the appropriate
inverse function"
"""
if -1 <= x < 1:
# Elliptic motion
# Use arc cosine to avoid numerical errors
return np.arccos(x * y + ll * (1 - x ** 2))
elif x > 1:
# Hyperbolic motion
# The hyperbolic sine is bijective
return np.arcsinh((y - x * ll) * np.sqrt(x ** 2 - 1))
else:
# Parabolic motion
return 0.0
def _tof_equation(x, T0, ll, M):
"""Time of flight equation."""
return _tof_equation_y(x, _compute_y(x, ll), T0, ll, M)
def _tof_equation_y(x, y, T0, ll, M):
"""Time of flight equation with externally computated y."""
if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):
eta = y - ll * x
S_1 = (1 - ll - x * eta) * 0.5
Q = 4 / 3 * hyp2f1(3, 1, 5 / 2, S_1)
T_ = (eta ** 3 * Q + 4 * ll * eta) * 0.5
else:
psi = _compute_psi(x, y, ll)
T_ = np.divide(
np.divide(psi + M * pi, np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,
(1 - x ** 2),
)
return T_ - T0
def _tof_equation_p(x, y, T, ll):
# TODO: What about derivatives when x approaches 1?
return (3 * T * x - 2 + 2 * ll ** 3 * x / y) / (1 - x ** 2)
def _tof_equation_p2(x, y, T, dT, ll):
return (3 * T + 5 * x * dT + 2 * (1 - ll ** 2) * ll ** 3 / y ** 3) / (1 - x ** 2)
def _tof_equation_p3(x, y, _, dT, ddT, ll):
return (7 * x * ddT + 8 * dT - 6 * (1 - ll ** 2) * ll ** 5 * x / y ** 5) / (
1 - x ** 2
)
def _compute_T_min(ll, M, maxiter, atol, rtol):
"""Compute minimum T."""
if ll == 1:
x_T_min = 0.0
T_min = _tof_equation(x_T_min, 0.0, ll, M)
else:
if M == 0:
x_T_min = np.inf
T_min = 0.0
else:
# Set x_i > 0 to avoid problems at ll = -1
x_i = 0.1
T_i = _tof_equation(x_i, 0.0, ll, M)
x_T_min = _halley(x_i, T_i, ll, atol, rtol, maxiter)
T_min = _tof_equation(x_T_min, 0.0, ll, M)
return [x_T_min, T_min]
def _initial_guess(T, ll, M, low_path):
"""Initial guess."""
if M == 0:
# Single revolution
T_0 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) + M * pi # Equation 19
T_1 = 2 * (1 - ll ** 3) / 3 # Equation 21
if T >= T_0:
x_0 = (T_0 / T) ** (2 / 3) - 1
elif T < T_1:
x_0 = 5 / 2 * T_1 / T * (T_1 - T) / (1 - ll ** 5) + 1
else:
# This is the real condition, which is not exactly equivalent
# elif T_1 < T < T_0
x_0 = (T_0 / T) ** (np.log2(T_1 / T_0)) - 1
return x_0
else:
# Multiple revolution
x_0l = (((M * pi + pi) / (8 * T)) ** (2 / 3) - 1) / (
((M * pi + pi) / (8 * T)) ** (2 / 3) + 1
)
x_0r = (((8 * T) / (M * pi)) ** (2 / 3) - 1) / (
((8 * T) / (M * pi)) ** (2 / 3) + 1
)
# Filter out the solution
x_0 = np.max([x_0l, x_0r]) if low_path is True else np.min([x_0l, x_0r])
return x_0
def _halley(p0, T0, ll, atol, rtol, maxiter):
"""Find a minimum of time of flight equation using the Halley method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
for ii in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fder = _tof_equation_p(p0, y, T0, ll)
fder2 = _tof_equation_p2(p0, y, T0, fder, ll)
if fder2 == 0:
raise RuntimeError("Derivative was zero")
fder3 = _tof_equation_p3(p0, y, T0, fder, fder2, ll)
# Halley step (cubic)
p = p0 - 2 * fder * fder2 / (2 * fder2 ** 2 - fder * fder3)
if abs(p - p0) < rtol * np.abs(p0) + atol:
return p
p0 = p
raise RuntimeError("Failed to converge")
def _householder(p0, T0, ll, M, atol, rtol, maxiter):
"""Find a zero of time of flight equation using the Householder method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
# The clock starts together with the iteration
tic = time.perf_counter()
for numiter in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fval = _tof_equation_y(p0, y, T0, ll, M)
T = fval + T0
fder = _tof_equation_p(p0, y, T, ll)
fder2 = _tof_equation_p2(p0, y, T, fder, ll)
fder3 = _tof_equation_p3(p0, y, T, fder, fder2, ll)
# Householder step (quartic)
p = p0 - fval * (
(fder ** 2 - fval * fder2 / 2)
/ (fder * (fder ** 2 - fval * fder2) + fder3 * fval ** 2 / 6)
)
if abs(p - p0) < rtol * np.abs(p0) + atol:
# Stop the clock and compute the time per iteration
tac = time.perf_counter()
tpi = (tac - tic) / numiter
return p, numiter, tpi
p0 = p
raise RuntimeError("Failed to converge")
| 32.360119
| 89
| 0.54925
| 1,660
| 10,873
| 3.464458
| 0.216867
| 0.030603
| 0.003652
| 0.005216
| 0.239611
| 0.196835
| 0.166927
| 0.116501
| 0.100504
| 0.067119
| 0
| 0.049364
| 0.334866
| 10,873
| 336
| 90
| 32.360119
| 0.745852
| 0.363469
| 0
| 0.132075
| 0
| 0
| 0.014306
| 0
| 0
| 0
| 0
| 0.002976
| 0.006289
| 1
| 0.08805
| false
| 0
| 0.031447
| 0.018868
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df6b818788dd513c66a7c66d4ffd98206ba31ae
| 2,688
|
py
|
Python
|
integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py
|
sherry546/zstack-woodpecker
|
54a37459f2d72ce6820974feaa6eb55772c3d2ce
|
[
"Apache-2.0"
] | 2
|
2016-03-23T08:45:44.000Z
|
2017-06-26T02:40:46.000Z
|
integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py
|
KevinDavidMitnick/zstack-woodpecker
|
96257faaf3c362168d008bdb47002025ad669b24
|
[
"Apache-2.0"
] | null | null | null |
integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py
|
KevinDavidMitnick/zstack-woodpecker
|
96257faaf3c362168d008bdb47002025ad669b24
|
[
"Apache-2.0"
] | 2
|
2020-03-12T03:11:28.000Z
|
2021-07-26T01:57:58.000Z
|
'''
Test deleting SG with 2 attached NICs.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_security_group as test_sg_header
import zstackwoodpecker.zstack_test.zstack_test_sg_vm as test_sg_vm_header
import apibinding.inventory as inventory
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Port = test_state.Port
def test():
'''
Test image requirements:
1. have nc to check the network port
2. have "nc" to open any port
3. it doesn't include a default firewall
VR image is a good candiate to be the guest image.
'''
test_util.test_dsc("Create 3 VMs with vlan VR L3 network and using VR image.")
vm1 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
test_util.test_dsc("Create security groups.")
sg1 = test_stub.create_sg()
sg_vm = test_sg_vm_header.ZstackTestSgVm()
test_obj_dict.set_sg_vm(sg_vm)
l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid
vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0]
vm2_ip = test_lib.lib_get_vm_nic_by_l3(vm2.vm, l3_uuid).ip
rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
sg1.add_rule([rule1])
sg1.add_rule([rule2])
sg1.add_rule([rule3])
sg_vm.check()
nic_uuid1 = vm1.vm.vmNics[0].uuid
nic_uuid2 = vm2.vm.vmNics[0].uuid
# nic_uuid3 = vm2.vm.vmNics[0].uuid
vm1_nics = (nic_uuid1, vm1)
vm2_nics = (nic_uuid2, vm2)
# vm3_nics = (nic_uuid3, vm3)
#test_stub.lib_add_sg_rules(sg1.uuid, [rule0, rule1])
test_util.test_dsc("Add nic to security group 1.")
test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports)
#sg_vm.attach(sg1, [vm1_nics, vm2_nics, vm3_nics])
sg_vm.attach(sg1, [vm1_nics, vm2_nics])
sg_vm.check()
sg_vm.delete_sg(sg1)
sg_vm.check()
vm1.destroy()
test_obj_dict.rm_vm(vm1)
vm2.destroy()
test_obj_dict.rm_vm(vm2)
test_util.test_pass('Delete Security Group with 2 attached NICs Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 33.6
| 97
| 0.696801
| 430
| 2,688
| 4.039535
| 0.248837
| 0.032239
| 0.040299
| 0.034542
| 0.302821
| 0.24928
| 0.17559
| 0.070236
| 0.039148
| 0.039148
| 0
| 0.034644
| 0.205357
| 2,688
| 79
| 98
| 34.025316
| 0.778558
| 0.175223
| 0
| 0.0625
| 0
| 0
| 0.087753
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.020833
| 0.125
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9df90abf6f95f0cc5563b0534b8331a0e2b2223e
| 15,365
|
py
|
Python
|
examples/deep_architect.py
|
negrinho/sane_tikz
|
fd6f291d9815613594d724678cb91ac9d412fbb7
|
[
"MIT"
] | 274
|
2020-02-13T20:24:50.000Z
|
2022-03-23T01:51:20.000Z
|
examples/deep_architect.py
|
negrinho/sane_tikz
|
fd6f291d9815613594d724678cb91ac9d412fbb7
|
[
"MIT"
] | null | null | null |
examples/deep_architect.py
|
negrinho/sane_tikz
|
fd6f291d9815613594d724678cb91ac9d412fbb7
|
[
"MIT"
] | 19
|
2020-02-14T01:07:42.000Z
|
2022-02-28T11:42:36.000Z
|
# Figure 5 in https://arxiv.org/pdf/1909.13404.pdf (towards modular and programmable architecture search)
import sane_tikz.core as stz
import sane_tikz.formatting as fmt
frame_height = 9.5
frame_width = 10.0
frame_spacing = 0.2
frame_roundness = 0.6
frame_line_width = 4.5 * fmt.standard_line_width
module_height = 1.6
module_width = 2.8
io_height = 0.40
io_long_side = 0.9
io_short_side = 1.0 * io_long_side
io_spacing = 0.12
p_height = 1.2 * io_height
p_width = 1.2
p_spacing = io_spacing / 2.0
h_width = 1 * p_width
h_height = 1.3 * p_height
h_spacing = io_spacing / 2.0
io_corner_roundness = 0.0
module_roundness = 0.0
line_width = 2.0 * fmt.standard_line_width
module_inner_vertical_spacing = 0.1
delta_increment = 0.0
horizontal_module_spacing = 0.2
vertical_module_spacing = 0.2
spacing_between_module_and_hyperp = 0.8
spacing_between_hyperp_and_hyperp = 0.4
arrow_length = vertical_module_spacing
name2color = fmt.google_slides_named_colors()
connect_s_fmt = fmt.combine_tikz_strs(
[fmt.arrow_heads("end"), fmt.line_width(line_width)])
input_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
output_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
property_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
module_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
hyperp_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
frame_s_fmt = fmt.combine_tikz_strs([
fmt.rounded_corners(frame_roundness),
fmt.line_width(frame_line_width),
])
unassigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
assigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
def input(name):
x1 = io_short_side / 2.0
x2 = io_long_side / 2.0
r = stz.closed_path([[-x1, io_height], [x1, io_height], [x2, 0], [-x2, 0]],
input_s_fmt)
l = stz.latex(stz.center_coords(r), name)
return [r, l]
def output(name):
x1 = io_long_side / 2.0
x2 = io_short_side / 2.0
r = stz.closed_path([[-x1, io_height], [x1, io_height], [x2, 0], [-x2, 0]],
output_s_fmt)
l = stz.latex(stz.center_coords(r), name)
return [r, l]
def property(name, width_scale=1.0, height_scale=1.0):
e = stz.ellipse([0, 0], width_scale * p_width / 2.0,
height_scale * p_height / 2.0, property_s_fmt)
l = stz.latex(stz.center_coords(e), name)
return [e, l]
def module(module_name,
input_names,
output_names,
hyperp_names,
p_width_scale=1.0):
i_lst = [input(s) for s in input_names]
o_lst = [output(s) for s in output_names]
m = stz.rectangle([0, 0], [module_width, -module_height], module_s_fmt)
l = stz.latex(stz.center_coords(m), "\\textbf{%s}" % module_name)
stz.distribute_horizontally_with_spacing(i_lst, io_spacing)
stz.translate_bbox_top_left_to_coords(
i_lst, [module_inner_vertical_spacing, -module_inner_vertical_spacing])
stz.distribute_horizontally_with_spacing(o_lst, io_spacing)
stz.translate_bbox_bottom_left_to_coords(o_lst, [
module_inner_vertical_spacing,
-module_height + module_inner_vertical_spacing
])
if len(hyperp_names) > 0:
h_lst = [property(s, p_width_scale) for s in hyperp_names]
stz.distribute_vertically_with_spacing(h_lst, p_spacing)
stz.translate_bbox_top_right_to_coords(h_lst, [
module_width - module_inner_vertical_spacing,
-module_inner_vertical_spacing - delta_increment
])
return [[m, l], i_lst, o_lst, h_lst]
else:
return [[m, l], i_lst, o_lst]
def independent_hyperparameter(name, values_expr, value=None):
e = stz.ellipse([0, 0], h_width / 2.0, h_height / 2.0, hyperp_s_fmt)
l = stz.latex(stz.center_coords(e), "\\textbf{%s}" % name)
fn_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
if value is None:
l_vs = stz.latex(fn_cs, "\\textbf{[%s]}" % (values_expr,),
unassigned_h_s_fmt)
return [e, l, l_vs]
else:
v_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_v = stz.latex(v_cs, "\\textbf{%s}" % value, assigned_h_s_fmt)
return [e, l, l_v]
def dependent_hyperparameter(name, hyperp_names, fn_expr, value=None):
e = stz.ellipse([0, 0], h_width / 2.0, h_height / 2.0, hyperp_s_fmt)
if value is None:
e["horizontal_radius"] *= 2.1 * e["horizontal_radius"]
l_cs = stz.center_coords(e)
if value is None:
l_cs = stz.translate_coords_horizontally(l_cs, 0.1)
l = stz.latex(l_cs, "\\textbf{%s}" % name)
if value is None:
fn_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_fn = stz.latex(fn_cs, "\\textbf{fn: %s}" % (fn_expr,),
unassigned_h_s_fmt)
p = property("x", 0.25, 0.7)
p_cs = stz.translate_coords_horizontally(
stz.coords_from_bbox_with_fn(e, stz.left_center_coords), 0.1)
stz.translate_bbox_left_center_to_coords(p, p_cs)
return [e, l, l_fn, p]
else:
v_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_v = stz.latex(v_cs, "\\textbf{%s}" % value, assigned_h_s_fmt)
return [e, l, l_v]
def dense(idx):
return module("Dense-%d" % idx, ["in"], ["out"], ["units"])
def conv2d(idx):
return module("Conv2D-%d" % idx, ["in"], ["out"], ["filters"], 1.1)
def dropout(idx):
return module("Dropout-%d" % idx, ["in"], ["out"], ["prob"], 0.9)
def optional(idx):
return module("Optional-%d" % idx, ["in"], ["out"], ["opt"])
def concat(idx):
return module("Concat-%d" % idx, ["in0", "in1"], ["out"], [])
def repeat(idx):
return module("Repeat-%d" % idx, ["in"], ["out"], ["k"], 0.5)
def connect_modules(m_from, m_to, output_idx, input_idx):
return stz.line_segment(
stz.coords_from_bbox_with_fn(m_from[2][output_idx],
stz.bottom_center_coords),
stz.coords_from_bbox_with_fn(m_to[1][input_idx], stz.top_center_coords),
connect_s_fmt)
def connect_hyperp_to_module(h, m, property_idx):
return stz.line_segment(
stz.coords_from_bbox_with_fn(h[:2], stz.left_center_coords),
stz.coords_from_bbox_with_fn(m[3][property_idx],
stz.right_center_coords), connect_s_fmt)
def connect_hyperp_to_hyperp(h_from, h_to):
return stz.line_segment(
stz.coords_from_bbox_with_fn(h_from[:2], stz.right_center_coords),
stz.coords_from_bbox_with_fn(h_to[3], stz.top_center_coords),
connect_s_fmt)
def frame(frame_idx):
assert frame_idx >= 0 and frame_idx <= 3
c1 = conv2d(1)
o = optional(1)
r1 = repeat(1)
r2 = repeat(2)
cc = concat(1)
c2 = conv2d(2)
c3 = conv2d(3)
c4 = conv2d(4)
d = dropout(1)
stz.distribute_horizontally_with_spacing([r1, r2],
horizontal_module_spacing)
stz.distribute_horizontally_with_spacing([c2, [c3, c4]],
horizontal_module_spacing)
modules = []
if frame_idx == 0:
stz.distribute_vertically_with_spacing([cc, [r1, r2], o, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [r1, r2], o, c1], 0)
modules.extend([c1, o, r1, r2, cc])
else:
stz.distribute_vertically_with_spacing([c4, c3],
vertical_module_spacing)
stz.distribute_horizontally_with_spacing([c2, [c3, c4]],
horizontal_module_spacing)
stz.align_centers_vertically([[c3, c4], c2], 0)
if frame_idx == 1:
stz.distribute_vertically_with_spacing([cc, [c2, c3, c4], o, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [c2, c3, c4], o, c1], 0)
modules.extend([c1, o, c2, c3, c4, cc])
else:
stz.distribute_vertically_with_spacing([cc, [c2, c3, c4], d, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [c2, c3, c4], d, c1], 0)
modules.extend([c1, d, c2, c3, c4, cc])
module_connections = []
if frame_idx == 0:
module_connections.extend([
connect_modules(c1, o, 0, 0),
connect_modules(o, r1, 0, 0),
connect_modules(o, r2, 0, 0),
connect_modules(r1, cc, 0, 0),
connect_modules(r2, cc, 0, 1),
])
else:
if frame_idx == 1:
module_connections.extend([
connect_modules(c1, o, 0, 0),
connect_modules(o, c2, 0, 0),
connect_modules(o, c3, 0, 0),
])
else:
module_connections.extend([
connect_modules(c1, d, 0, 0),
connect_modules(d, c2, 0, 0),
connect_modules(d, c3, 0, 0),
])
module_connections.extend([
connect_modules(c3, c4, 0, 0),
connect_modules(c2, cc, 0, 0),
connect_modules(c4, cc, 0, 1),
])
# # hyperparameters
if frame_idx <= 1:
h_o = independent_hyperparameter("IH-2", "0, 1")
else:
h_o = independent_hyperparameter("IH-2", "0, 1", "1")
if frame_idx <= 0:
h_r1 = dependent_hyperparameter("DH-1", ["x"], "2*x")
h_r2 = independent_hyperparameter("IH-3", "1, 2, 4")
else:
h_r1 = dependent_hyperparameter("DH-1", ["x"], "2*x", "2")
h_r2 = independent_hyperparameter("IH-3", "1, 2, 4", "1")
if frame_idx <= 2:
h_c1 = independent_hyperparameter("IH-1", "64, 128")
h_c2 = independent_hyperparameter("IH-4", "64, 128")
h_c3 = independent_hyperparameter("IH-5", "64, 128")
h_c4 = independent_hyperparameter("IH-6", "64, 128")
h_d = independent_hyperparameter("IH-7", "0.25, 0.5")
else:
h_c1 = independent_hyperparameter("IH-1", "64, 128", "64")
h_c2 = independent_hyperparameter("IH-4", "64, 128", "128")
h_c3 = independent_hyperparameter("IH-5", "64, 128", "128")
h_c4 = independent_hyperparameter("IH-6", "64, 128", "64")
h_d = independent_hyperparameter("IH-7", "0.25, 0.5", "0.5")
def place_hyperp_right_of(h, m):
y_p = stz.center_coords(m[3])[1]
stz.align_centers_vertically([h], y_p)
stz.place_to_the_right(h, m, spacing_between_module_and_hyperp)
hyperparameters = []
place_hyperp_right_of(h_c1, c1)
if frame_idx in [0, 1]:
place_hyperp_right_of(h_o, o)
hyperparameters.append(h_o)
if frame_idx == 0:
place_hyperp_right_of(h_r1, r2)
stz.place_above_and_align_to_the_right(h_r2, h_r1, 0.8)
hyperparameters.extend([h_r1, h_r2, h_c1])
else:
place_hyperp_right_of(h_c1, c1)
place_hyperp_right_of(h_c3, c3)
place_hyperp_right_of(h_c4, c4)
stz.place_below(h_c2, h_c1, 3.0)
hyperparameters.extend([h_c1, h_c2, h_c3, h_c4])
if frame_idx in [2, 3]:
place_hyperp_right_of(h_d, d)
hyperparameters.extend([h_d])
unreachable_hyperps = []
if frame_idx == 1:
stz.distribute_vertically_with_spacing([h_r1, h_r2], 0.2)
unreachable_hyperps.extend([h_r1, h_r2])
if frame_idx >= 2:
stz.distribute_vertically_with_spacing([h_o, h_r1, h_r2], 0.2)
unreachable_hyperps.extend([h_r1, h_r2, h_o])
hyperparameters.extend(unreachable_hyperps)
cs_fn = lambda e: stz.coords_from_bbox_with_fn(e, stz.left_center_coords)
if frame_idx == 0:
stz.translate_bbox_left_center_to_coords(h_r2, cs_fn([h_o, h_r1]))
elif frame_idx == 1:
stz.translate_bbox_left_center_to_coords(h_c2, cs_fn([h_o, h_c3]))
else:
stz.translate_bbox_left_center_to_coords(h_c2, cs_fn([h_d, h_c3]))
hyperp_connections = [
connect_hyperp_to_module(h_c1, c1, 0),
]
if frame_idx in [0, 1]:
hyperp_connections.extend([connect_hyperp_to_module(h_o, o, 0)])
if frame_idx == 0:
hyperp_connections.extend([
connect_hyperp_to_module(h_r1, r2, 0),
connect_hyperp_to_module(h_r2, r1, 0),
connect_hyperp_to_hyperp(h_r2, h_r1)
])
else:
hyperp_connections.extend([
connect_hyperp_to_module(h_c2, c2, 0),
connect_hyperp_to_module(h_c3, c3, 0),
connect_hyperp_to_module(h_c4, c4, 0),
])
if frame_idx in [2, 3]:
hyperp_connections.append(connect_hyperp_to_module(h_d, d, 0))
f = stz.rectangle_from_width_and_height([0, 0], frame_height, frame_width,
frame_s_fmt)
e = [modules, module_connections, hyperparameters, hyperp_connections]
stz.translate_bbox_center_to_coords(
f, stz.translate_coords_horizontally(stz.center_coords(e), 0.8))
if len(unreachable_hyperps) > 0:
stz.translate_bbox_bottom_right_to_coords(unreachable_hyperps,
stz.bbox(e)[1])
# frame id
s = ["a", "b", "c", "d"][frame_idx]
label = [stz.latex([0, 0], "\\Huge \\textbf %s" % s)]
stz.translate_bbox_top_left_to_coords(
label,
stz.translate_coords_antidiagonally(
stz.coords_from_bbox_with_fn(f, stz.top_left_coords), 0.6))
return e + [f, label]
def search_space_transition():
e0 = frame(0)
e1 = frame(1)
e2 = frame(2)
e3 = frame(3)
e = [e0, e1, e2, e3]
def get_idx(e_frame, indices):
e = e_frame
for idx in indices:
e = e[idx]
return e
def highlight(e_frame, indices, idx, color):
e = get_idx(e_frame, indices)
s_fmt = fmt.combine_tikz_strs([e["tikz_str"], fmt.fill_color(color)])
e['tikz_str'] = s_fmt
# highlight new modules
highlight(e1, [0, 2, 0, 0], 0, "light_green_2")
highlight(e1, [0, 3, 0, 0], 0, "light_green_2")
highlight(e1, [0, 4, 0, 0], 0, "light_green_2")
highlight(e2, [0, 1, 0, 0], 0, "light_green_2")
# highlight new hyperparameters
highlight(e1, [2, 2, 0], 0, "light_green_2")
highlight(e1, [2, 3, 0], 0, "light_green_2")
highlight(e1, [2, 4, 0], 0, "light_green_2")
highlight(e2, [2, 4, 0], 0, "light_green_2")
# highlight assigned hyperparameters
highlight(e1, [2, 5, 0], 0, "light_red_2")
highlight(e1, [2, 6, 0], 0, "light_red_2")
highlight(e2, [2, 7, 0], 0, "light_red_2")
highlight(e3, [2, 0, 0], 0, "light_red_2")
highlight(e3, [2, 1, 0], 0, "light_red_2")
highlight(e3, [2, 2, 0], 0, "light_red_2")
highlight(e3, [2, 3, 0], 0, "light_red_2")
highlight(e3, [2, 4, 0], 0, "light_red_2")
# arrange the four frames
stz.align_tops(e, 0.0)
stz.distribute_horizontally_with_spacing([e0, e1], frame_spacing)
stz.distribute_horizontally_with_spacing([e2, e3], frame_spacing)
stz.distribute_vertically_with_spacing([[e2, e3], [e0, e1]], frame_spacing)
stz.draw_to_tikz_standalone(e, "deep_architect.tex", name2color)
search_space_transition()
| 33.918322
| 105
| 0.615164
| 2,300
| 15,365
| 3.778696
| 0.091304
| 0.009895
| 0.01841
| 0.025429
| 0.606489
| 0.483604
| 0.418134
| 0.378783
| 0.283512
| 0.204695
| 0
| 0.05074
| 0.252197
| 15,365
| 452
| 106
| 33.993363
| 0.705657
| 0.01562
| 0
| 0.268156
| 0
| 0
| 0.045779
| 0
| 0
| 0
| 0
| 0
| 0.002793
| 1
| 0.055866
| false
| 0
| 0.005587
| 0.02514
| 0.117318
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dfa3001c3ff293c70ee1d697f313a0584e7ea7e
| 25,801
|
py
|
Python
|
pytests/epengine/basic_ops.py
|
pavithra-mahamani/TAF
|
ff854adcc6ca3e50d9dc64e7756ca690251128d3
|
[
"Apache-2.0"
] | null | null | null |
pytests/epengine/basic_ops.py
|
pavithra-mahamani/TAF
|
ff854adcc6ca3e50d9dc64e7756ca690251128d3
|
[
"Apache-2.0"
] | null | null | null |
pytests/epengine/basic_ops.py
|
pavithra-mahamani/TAF
|
ff854adcc6ca3e50d9dc64e7756ca690251128d3
|
[
"Apache-2.0"
] | null | null | null |
import time
import json
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper, \
DurableExceptions
from couchbase_helper.tuq_generators import JsonGenerator
from membase.api.rest_client import RestConnection
from mc_bin_client import MemcachedClient, MemcachedError
from remote.remote_util import RemoteMachineShellConnection
from table_view import TableView
"""
Capture basic get, set operations, also the meta operations.
This is based on some 4.1.1 test which had separate
bugs with incr and delete with meta and I didn't see an obvious home for them.
This is small now but we will reactively add things
These may be parameterized by:
- full and value eviction
- DGM and non-DGM
"""
class basic_ops(BaseTestCase):
def setUp(self):
super(basic_ops, self).setUp()
self.key = 'test_docs'.rjust(self.key_size, '0')
nodes_init = self.cluster.servers[1:self.nodes_init] \
if self.nodes_init != 1 else []
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)
self.bucket_util.create_default_bucket(
replica=self.num_replicas, compression_mode=self.compression_mode,
bucket_type=self.bucket_type)
self.bucket_util.add_rbac_user()
self.src_bucket = self.bucket_util.get_all_buckets()
self.durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
# Reset active_resident_threshold to avoid further data load as DGM
self.active_resident_threshold = 0
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def do_basic_ops(self):
KEY_NAME = 'key1'
KEY_NAME2 = 'key2'
self.log.info('Starting basic ops')
rest = RestConnection(self.cluster.master)
default_bucket = self.bucket_util.get_all_buckets()[0]
smart_client = VBucketAwareMemcached(rest, default_bucket)
sdk_client = smart_client.get_client()
# mcd = client.memcached(KEY_NAME)
# MB-17231 - incr with full eviction
rc = sdk_client.incr(KEY_NAME, delta=1)
self.log.info('rc for incr: {0}'.format(rc))
# MB-17289 del with meta
rc = sdk_client.set(KEY_NAME, 0, 0,
json.dumps({'value': 'value2'}))
self.log.info('set is: {0}'.format(rc))
# cas = rc[1]
# wait for it to persist
persisted = 0
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = sdk_client.observe(KEY_NAME)
try:
rc = sdk_client.evict_key(KEY_NAME)
except MemcachedError as exp:
self.fail("Exception with evict meta - {0}".format(exp))
CAS = 0xabcd
try:
# key, exp, flags, seqno, cas
rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
except MemcachedError as exp:
self.fail("Exception with del_with meta - {0}".format(exp))
# Reproduce test case for MB-28078
def do_setWithMeta_twice(self):
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
self.fail("Error on First setWithMeta()")
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
self.log.info("Sleeping for 5 and checking stats again")
time.sleep(5)
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
stats = mc.stats()
self.log.info('After 2nd setWithMeta(), curr_items: {} and curr_temp_items:{}'
.format(stats['curr_items'],
stats['curr_temp_items']))
if int(stats['curr_temp_items']) == 1:
self.fail("Error on second setWithMeta(), expected curr_temp_items to be 0")
else:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
def generate_docs_bigdata(self, docs_per_day, start=0,
document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(
start=start, end=docs_per_day, value_size=document_size)
def test_doc_size(self):
def check_durability_failures():
self.log.error(task.sdk_acked_curd_failed.keys())
self.log.error(task.sdk_exception_crud_succeed.keys())
self.assertTrue(
len(task.sdk_acked_curd_failed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
self.assertTrue(
len(task.sdk_exception_crud_succeed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
"""
Basic tests for document CRUD operations using JSON docs
"""
doc_op = self.input.param("doc_op", None)
def_bucket = self.bucket_util.buckets[0]
ignore_exceptions = list()
retry_exceptions = list()
# Stat validation reference variables
verification_dict = dict()
ref_val = dict()
ref_val["ops_create"] = 0
ref_val["ops_update"] = 0
ref_val["ops_delete"] = 0
ref_val["rollback_item_count"] = 0
ref_val["sync_write_aborted_count"] = 0
ref_val["sync_write_committed_count"] = 0
one_less_node = self.nodes_init == self.num_replicas
if self.durability_level:
pass
#ignore_exceptions.append(
# "com.couchbase.client.core.error.RequestTimeoutException")
if self.target_vbucket and type(self.target_vbucket) is not list:
self.target_vbucket = [self.target_vbucket]
self.log.info("Creating doc_generator..")
# Load basic docs into bucket
doc_create = doc_generator(
self.key, 0, self.num_items, doc_size=self.doc_size,
doc_type=self.doc_type, target_vbucket=self.target_vbucket,
vbuckets=self.vbuckets)
self.log.info("Loading {0} docs into the bucket: {1}"
.format(self.num_items, def_bucket))
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_create, "create", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
if self.ryow:
check_durability_failures()
# Retry doc_exception code
self.log.info("Validating failed doc's (if any) exceptions")
doc_op_info_dict = dict()
doc_op_info_dict[task] = self.bucket_util.get_doc_op_info_dict(
def_bucket, "create", exp=0, replicate_to=self.replicate_to,
persist_to=self.persist_to, durability=self.durability_level,
timeout=self.sdk_timeout, time_unit="seconds",
ignore_exceptions=ignore_exceptions,
retry_exceptions=retry_exceptions)
self.bucket_util.verify_doc_op_task_exceptions(doc_op_info_dict,
self.cluster)
if len(doc_op_info_dict[task]["unwanted"]["fail"].keys()) != 0:
self.fail("Failures in retry doc CRUDs: {0}"
.format(doc_op_info_dict[task]["unwanted"]["fail"]))
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
# Update ref_val
ref_val["ops_create"] = self.num_items + len(task.fail.keys())
ref_val["sync_write_committed_count"] = self.num_items
# Validate vbucket stats
verification_dict["ops_create"] = ref_val["ops_create"]
verification_dict["rollback_item_count"] = \
ref_val["rollback_item_count"]
if self.durability_level:
verification_dict["sync_write_aborted_count"] = \
ref_val["sync_write_aborted_count"]
verification_dict["sync_write_committed_count"] = \
ref_val["sync_write_committed_count"]
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.vbuckets, expected_val=verification_dict,
one_less_node=one_less_node)
if failed:
self.fail("Cbstat vbucket-details verification failed")
# Verify initial doc load count
self.log.info("Validating doc_count in buckets")
self.bucket_util.verify_stats_all_buckets(self.num_items)
self.log.info("Creating doc_generator for doc_op")
num_item_start_for_crud = int(self.num_items / 2)
doc_update = doc_generator(
self.key, 0, num_item_start_for_crud,
doc_size=self.doc_size, doc_type=self.doc_type,
target_vbucket=self.target_vbucket, vbuckets=self.vbuckets)
expected_num_items = self.num_items
num_of_mutations = 1
if doc_op == "update":
self.log.info("Performing 'update' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "update", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
ref_val["ops_update"] = (doc_update.end - doc_update.start
+ len(task.fail.keys()))
if self.durability_level:
ref_val["sync_write_committed_count"] += \
(doc_update.end - doc_update.start)
if self.ryow:
check_durability_failures()
# Read all the values to validate update operation
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "read", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Update failed key", "CAS", "Value"])
for key, value in task.success.items():
if json.loads(str(value["value"]))["mutated"] != 1:
op_failed_tbl.add_row([key, value["cas"], value["value"]])
op_failed_tbl.display("Update failed for keys:")
if len(op_failed_tbl.rows) != 0:
self.fail("Update failed for few keys")
elif doc_op == "delete":
self.log.info("Performing 'delete' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "delete", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow, check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
expected_num_items = self.num_items \
- (self.num_items - num_item_start_for_crud)
ref_val["ops_delete"] = (doc_update.end - doc_update.start
+ len(task.fail.keys()))
if self.durability_level:
ref_val["sync_write_committed_count"] += \
(doc_update.end - doc_update.start)
if self.ryow:
check_durability_failures()
# Read all the values to validate update operation
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "read", 0,
batch_size=10, process_concurrency=8,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Delete failed key", "CAS", "Value"])
for key, value in task.success.items():
op_failed_tbl.add_row([key, value["cas"], value["value"]])
op_failed_tbl.display("Delete failed for keys:")
if len(op_failed_tbl.rows) != 0:
self.fail("Delete failed for few keys")
else:
self.log.warning("Unsupported doc_operation")
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
# Validate vbucket stats
verification_dict["ops_create"] = ref_val["ops_create"]
verification_dict["ops_update"] = ref_val["ops_update"]
verification_dict["ops_delete"] = ref_val["ops_delete"]
verification_dict["rollback_item_count"] = \
ref_val["rollback_item_count"]
if self.durability_level:
verification_dict["sync_write_aborted_count"] = \
ref_val["sync_write_aborted_count"]
verification_dict["sync_write_committed_count"] = \
ref_val["sync_write_committed_count"]
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.vbuckets, expected_val=verification_dict,
one_less_node=one_less_node)
if failed:
self.fail("Cbstat vbucket-details verification failed")
self.log.info("Validating doc_count")
self.bucket_util.verify_stats_all_buckets(expected_num_items)
def test_large_doc_size(self):
# bucket size=256MB, when Bucket gets filled 236MB then test starts failing
# document size=2MB, No of docs = 221 , load 250 docs
# generate docs with size >= 1MB , See MB-29333
self.doc_size *= 1024000
gens_load = self.generate_docs_bigdata(
docs_per_day=self.num_items, document_size=self.doc_size)
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
# check if all the documents(250) are loaded with default timeout
self.bucket_util.verify_stats_all_buckets(self.num_items)
def test_large_doc_20MB(self):
# test reproducer for MB-29258,
# Load a doc which is greater than 20MB
# with compression enabled and check if it fails
# check with compression_mode as active, passive and off
val_error = DurableExceptions.ValueTooLargeException
gens_load = self.generate_docs_bigdata(
docs_per_day=1, document_size=(self.doc_size * 1024000))
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if self.doc_size > 20:
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
else:
if len(task.success.keys()) == 0:
self.log_failure("Failures during large doc insert")
for bucket in self.bucket_util.buckets:
if self.doc_size > 20:
# failed with error "Data Too Big" when document size > 20MB
self.bucket_util.verify_stats_all_buckets(0)
else:
self.bucket_util.verify_stats_all_buckets(1)
gens_update = self.generate_docs_bigdata(
docs_per_day=1, document_size=(21 * 1024000))
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_update, "create", 0,
batch_size=10,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if len(task.success.keys()) != 0:
self.log_failure("Large docs inserted for keys: %s"
% task.success.keys())
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
self.bucket_util.verify_stats_all_buckets(1)
self.validate_test_failure()
def test_diag_eval_curl(self):
# Check if diag/eval can be done only by local host
self.disable_diag_eval_on_non_local_host = \
self.input.param("disable_diag_eval_non_local", False)
port = self.cluster.master.port
# check if local host can work fine
cmd = []
cmd_base = 'curl http://{0}:{1}@localhost:{2}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST -d \'case file:read_file("/etc/passwd") of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.\''
cmd.append(command)
shell = RemoteMachineShellConnection(self.cluster.master)
for command in cmd:
output, error = shell.execute_command(command)
self.assertNotEquals("API is accessible from localhost only", output[0])
# Disable allow_nonlocal_eval
if not self.disable_diag_eval_on_non_local_host:
command = cmd_base + '-X POST -d \'ns_config:set(allow_nonlocal_eval, true).\''
_, _ = shell.execute_command(command)
# Check ip address on diag/eval will not work fine when allow_nonlocal_eval is disabled
cmd = []
cmd_base = 'curl http://{0}:{1}@{2}:{3}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password,
self.cluster.master.ip, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST -d \'case file:read_file("/etc/passwd") of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.\''
cmd.append(command)
for command in cmd:
output, error = shell.execute_command(command)
if self.disable_diag_eval_on_non_local_host:
self.assertEquals("API is accessible from localhost only",
output[0])
else:
self.assertNotEquals("API is accessible from localhost only",
output[0])
def verify_stat(self, items, value="active"):
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
stats = mc.stats()
self.assertEquals(stats['ep_compression_mode'], value)
self.assertEquals(int(stats['ep_item_compressor_num_compressed']),
items)
self.assertNotEquals(int(stats['vb_active_itm_memory']),
int(stats['vb_active_itm_memory_uncompressed']))
def test_compression_active_and_off(self):
"""
test reproducer for MB-29272,
Load some documents with compression mode set to active
get the cbstats
change compression mode to off and wait for minimum 250ms
Load some more documents and check the compression is not done
epengine.basic_ops.basic_ops.test_compression_active_and_off,items=10000,compression_mode=active
:return:
"""
# Load some documents with compression mode as active
gen_create = doc_generator("eviction1_",
start=0,
end=self.num_items,
doc_size=self.doc_size)
gen_create2 = doc_generator("eviction2_",
start=0,
end=self.num_items,
doc_size=self.doc_size)
def_bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
remote = RemoteMachineShellConnection(self.cluster.master)
for bucket in self.bucket_util.buckets:
# change compression mode to off
output, _ = remote.execute_couchbase_cli(
cli_command='bucket-edit', cluster_host="localhost:8091",
user=self.cluster.master.rest_username,
password=self.cluster.master.rest_password,
options='--bucket=%s --compression-mode off' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1,
'compression mode set to off')
# sleep for 10 sec (minimum 250sec)
time.sleep(10)
# Load data and check stats to see compression
# is not done for newly added data
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create2, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items*2)
def do_get_random_key(self):
# MB-31548, get_Random key gets hung sometimes.
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
count = 0
while count < 1000000:
count += 1
try:
mc.get_random_key()
except MemcachedError as error:
self.fail("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
if count % 1000 == 0:
self.log.info('The number of iteration is {}'.format(count))
| 46.155635
| 138
| 0.609744
| 3,137
| 25,801
| 4.745298
| 0.141218
| 0.028819
| 0.023512
| 0.016929
| 0.63993
| 0.5833
| 0.5395
| 0.515384
| 0.491603
| 0.478772
| 0
| 0.016267
| 0.294756
| 25,801
| 558
| 139
| 46.238351
| 0.801825
| 0.07178
| 0
| 0.509302
| 0
| 0
| 0.122727
| 0.01985
| 0
| 0
| 0.001793
| 0
| 0.02093
| 1
| 0.030233
| false
| 0.02093
| 0.023256
| 0
| 0.05814
| 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dfb7758cdce3c78cd800cea3cdddc3f4635fbfc
| 1,025
|
py
|
Python
|
plot/different_optimal_classifier_scale_for_different_classes.py
|
ZGCTroy/guided-diffusion
|
af987bb2b65db2875148a5466df79736ea5ae6a1
|
[
"MIT"
] | null | null | null |
plot/different_optimal_classifier_scale_for_different_classes.py
|
ZGCTroy/guided-diffusion
|
af987bb2b65db2875148a5466df79736ea5ae6a1
|
[
"MIT"
] | null | null | null |
plot/different_optimal_classifier_scale_for_different_classes.py
|
ZGCTroy/guided-diffusion
|
af987bb2b65db2875148a5466df79736ea5ae6a1
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import yaml
import os
workspace = "/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion"
num_samples = 192
log = os.path.join(workspace, 'log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample{}_selectedClass'.format(num_samples))
legends = []
plt.figure()
for class_id in range(3):
fid = []
for scale in range(1,21):
result_name = 'result_scale{}.0_class{}_stepsddim25_sample{}.yaml'.format(scale, class_id, num_samples)
result_path = os.path.join(log,result_name)
with open(result_path, "r") as stream:
try:
result_dict = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
fid.append(result_dict['fid'])
print(result_dict)
plt.plot(fid)
plt.xlabel('classifier scale')
plt.ylabel(fid)
legends.append('sample{}_class{}'.format(num_samples, class_id))
plt.legend(legends)
plt.show()
| 25
| 177
| 0.68878
| 132
| 1,025
| 5.136364
| 0.492424
| 0.058997
| 0.029499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042373
| 0.194146
| 1,025
| 40
| 178
| 25.625
| 0.77845
| 0
| 0
| 0
| 0
| 0
| 0.267191
| 0.231827
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dfef6c55764a02f7c38cb42e6e52c30df77aaec
| 2,882
|
py
|
Python
|
main.py
|
swapmali/WalliScrapper
|
b7853f7d25da594045039847ad76eddd8d1204d8
|
[
"MIT"
] | null | null | null |
main.py
|
swapmali/WalliScrapper
|
b7853f7d25da594045039847ad76eddd8d1204d8
|
[
"MIT"
] | null | null | null |
main.py
|
swapmali/WalliScrapper
|
b7853f7d25da594045039847ad76eddd8d1204d8
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import urllib.request
from datetime import datetime
import time
from PIL import Image, ImageDraw, ImageFont
import ctypes
import os
import shutil
import socket
import sys
def is_connected(hostname):
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(hostname)
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
if __name__ == "__main__":
# check internet connection
while True:
#
if not is_connected("www.google.com"):
print("@author: Swapnil Mali \nPlease check your internet connection, will try again after 30 seconds..")
time.sleep(30)
continue
# move shortcut to main.exe to startup folder
try:
# get user name
user = os.getlogin()
path = r'C:\Users\{}\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\main - Shortcut.lnk'.format(user)
# print(path)
shutil.move(r'main - Shortcut.lnk', path)
except FileNotFoundError:
pass
# just credit and copyright stuff
print('@author: Swapnil Mali \n\n(Note: New wallpaper is available everyday after 2.00 pm)')
print("Downloading Today's Wallpaper...please wait!!")
# get image link from the website page
res = requests.get('https://bing.wallpaper.pics/')
soup = BeautifulSoup(res.text, 'lxml')
image_box = soup.find('a', {'class': 'cursor_zoom'})
image = image_box.find('img')
link = image['src']
# download and save the image
filename = datetime.now().strftime('%d-%m-%y')
urllib.request.urlretrieve(link, '{}.jpg'.format(filename))
# for copyright overlaying text over the image
image = Image.open('{}.jpg'.format(filename))
font_type = ImageFont.truetype('fonts/Quicksand-Bold.otf', 44)
draw = ImageDraw.Draw(image)
draw.text(xy=(800, 1000), text='© Swapnil Mali', fill=(0, 0, 0), font=font_type)
# image.show()
image.save('{}.jpg'.format(filename))
print("\n\n-------------------------------------------\nDone..New wallpaper saved as '{}.jpg'\n-------------------------------------------".format(filename))
time.sleep(1)
# set new image as desktop background
directory = os.getcwd()
image_path = '{}\{}.jpg'.format(directory, filename)
print("\nSetting new Wallpaper..".format(filename))
ctypes.windll.user32.SystemParametersInfoW(20, 0, image_path, 3)
time.sleep(2)
print("Done..Closing this window")
time.sleep(2)
sys.exit()
| 34.722892
| 165
| 0.597155
| 348
| 2,882
| 4.896552
| 0.514368
| 0.04108
| 0.02993
| 0.025822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014995
| 0.259542
| 2,882
| 82
| 166
| 35.146341
| 0.783037
| 0.148508
| 0
| 0.109091
| 0
| 0.036364
| 0.273585
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0.036364
| 0.2
| 0
| 0.254545
| 0.109091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dff88c39b7da4ce4056ad2977600b1620da0183
| 9,401
|
py
|
Python
|
model_rnn_attention.py
|
zhzhx2008/keras_text_classification
|
e10565fb82ffbfa8b1d685be8b162c26f1429784
|
[
"MIT"
] | 2
|
2019-07-11T17:01:17.000Z
|
2019-07-11T17:01:19.000Z
|
model_rnn_attention.py
|
zhzhx2008/keras_text_classification
|
e10565fb82ffbfa8b1d685be8b162c26f1429784
|
[
"MIT"
] | null | null | null |
model_rnn_attention.py
|
zhzhx2008/keras_text_classification
|
e10565fb82ffbfa8b1d685be8b162c26f1429784
|
[
"MIT"
] | 1
|
2019-12-24T01:03:47.000Z
|
2019-12-24T01:03:47.000Z
|
# coding=utf-8
# @Author : zhzhx2008
# @Time : 18-10-9
import os
import warnings
import jieba
import numpy as np
from keras import Input
from keras import Model
from keras import backend as K
from keras import initializers, regularizers, constraints
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.engine.topology import Layer
from keras.layers import Dropout, Bidirectional
from keras.layers import Embedding, Dense
from keras.layers import LSTM, SpatialDropout1D
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
seed = 2019
np.random.seed(seed)
def get_labels_datas(input_dir):
datas_word = []
datas_char = []
labels = []
label_dirs = os.listdir(input_dir)
for label_dir in label_dirs:
txt_names = os.listdir(os.path.join(input_dir, label_dir))
for txt_name in txt_names:
with open(os.path.join(input_dir, label_dir, txt_name), 'r') as fin:
content = fin.readline() # 只取第一行
content = content.strip().replace(' ', '')
datas_word.append(' '.join(jieba.cut(content)))
datas_char.append(' '.join(list(content)))
labels.append(label_dir)
return labels, datas_word, datas_char
def get_label_id_map(labels):
labels = set(labels)
id_label_map = {}
label_id_map = {}
for index, label in enumerate(labels):
id_label_map[index] = label
label_id_map[label] = index
return id_label_map, label_id_map
# 《Feed-Forward Networks with Attention Can Solve Some Long-Term Memory Problems》
# [https://arxiv.org/abs/1512.08756]
# https://www.kaggle.com/qqgeogor/keras-lstm-attention-glove840b-lb-0-043
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
# self.init = initializations.get('glorot_uniform')
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
# eij = K.dot(x, self.W) TF backend doesn't support it
# features_dim = self.W.shape[0]
# step_dim = x._keras_shape[1]
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
# print(weighted_input.shape)
# return weighted_input
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
# return input_shape[0], input_shape[1], self.features_dim
return input_shape[0], self.features_dim
input_dir = './data/THUCNews'
labels, datas_word, datas_char = get_labels_datas(input_dir)
id_label_map, label_id_map = get_label_id_map(labels)
labels, labels_test, datas_word, datas_word_test, datas_char, datas_char_test = train_test_split(labels, datas_word, datas_char, test_size=0.3, shuffle=True, stratify=labels)
labels_train, labels_dev, datas_word_train, datas_word_dev, datas_char_train, datas_char_dev = train_test_split(labels, datas_word, datas_char, test_size=0.1, shuffle=True, stratify=labels)
y_train = [label_id_map.get(x) for x in labels_train]
y_dev = [label_id_map.get(x) for x in labels_dev]
y_test = [label_id_map.get(x) for x in labels_test]
num_classes = len(set(y_train))
y_train_index = to_categorical(y_train, num_classes)
y_dev_index = to_categorical(y_dev, num_classes)
y_test_index = to_categorical(y_test, num_classes)
# keras extract feature
tokenizer = Tokenizer()
tokenizer.fit_on_texts(datas_word_train)
# feature5: word index for deep learning
x_train_word_index = tokenizer.texts_to_sequences(datas_word_train)
x_dev_word_index = tokenizer.texts_to_sequences(datas_word_dev)
x_test_word_index = tokenizer.texts_to_sequences(datas_word_test)
max_word_length = max([len(x) for x in x_train_word_index])
x_train_word_index = pad_sequences(x_train_word_index, maxlen=max_word_length)
x_dev_word_index = pad_sequences(x_dev_word_index, maxlen=max_word_length)
x_test_word_index = pad_sequences(x_test_word_index, maxlen=max_word_length)
input = Input(shape=(max_word_length,))
embedding = Embedding(len(tokenizer.word_index) + 1, 128)(input)
embedding = SpatialDropout1D(0.2)(embedding)
# rnn = SimpleRNN(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(SimpleRNN(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = GRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(GRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = CuDNNGRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNGRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = LSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
rnn = Bidirectional(LSTM(100, return_sequences=True))(embedding)
rnn = Attention(max_word_length)(rnn) # metrics value=0.38647342980771826
# rnn = GlobalMaxPool1D()(rnn)# 0.33816425149567464
# rnn = GlobalAvgPool1D()(rnn)# 0.20772946881499268
# rnn = Flatten()(rnn) # 0.3140096618357488
# rnn = concatenate([GlobalMaxPool1D()(rnn), GlobalAvgPool1D()(rnn)])# 0.24396135280097742
# rnn = CuDNNLSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNLSTM(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
drop = Dropout(0.2)(rnn)
output = Dense(num_classes, activation='softmax')(drop)
model = Model(inputs=input, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model_weight_file = './model_rnn_attention.h5'
model_file = './model_rnn_attention.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit(x_train_word_index,
y_train_index,
batch_size=32,
epochs=1000,
verbose=2,
callbacks=[early_stopping, model_checkpoint],
validation_data=(x_dev_word_index, y_dev_index),
shuffle=True)
model.load_weights(model_weight_file)
model.save(model_file)
evaluate = model.evaluate(x_test_word_index, y_test_index, batch_size=32, verbose=2)
print('loss value=' + str(evaluate[0]))
print('metrics value=' + str(evaluate[1]))
# loss value=1.562715420647273
# metrics value=0.2936507960160573
| 37.454183
| 189
| 0.688118
| 1,286
| 9,401
| 4.787714
| 0.233281
| 0.021926
| 0.031671
| 0.035732
| 0.264739
| 0.229657
| 0.199123
| 0.169888
| 0.148936
| 0.136268
| 0
| 0.032802
| 0.20551
| 9,401
| 251
| 190
| 37.454183
| 0.791538
| 0.271886
| 0
| 0.014184
| 0
| 0
| 0.026655
| 0.011231
| 0
| 0
| 0
| 0
| 0.007092
| 1
| 0.049645
| false
| 0
| 0.120567
| 0.014184
| 0.212766
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3b00d75cd611416080f44811a3c1f126a3ad61da
| 6,731
|
py
|
Python
|
fastfold/model/fastnn/ops.py
|
hpcaitech/FastFold
|
a65d5009279ef84c1518081344db5c02213c387a
|
[
"Apache-2.0"
] | 303
|
2022-03-03T01:59:47.000Z
|
2022-03-31T07:46:42.000Z
|
fastfold/model/fastnn/ops.py
|
hpcaitech/FastFold
|
a65d5009279ef84c1518081344db5c02213c387a
|
[
"Apache-2.0"
] | 6
|
2022-03-03T22:17:03.000Z
|
2022-03-17T06:09:11.000Z
|
fastfold/model/fastnn/ops.py
|
hpcaitech/FastFold
|
a65d5009279ef84c1518081344db5c02213c387a
|
[
"Apache-2.0"
] | 35
|
2022-03-03T01:58:56.000Z
|
2022-03-29T21:21:06.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from fastfold.model.fastnn.kernel import scale_mask_softmax, scale_mask_bias_softmax
from fastfold.model.fastnn.kernel import LayerNorm
from .initializer import glorot_uniform_af
from fastfold.model.fastnn.kernel import bias_sigmod_ele
from fastfold.distributed import gather, scatter
from fastfold.distributed.comm_async import gather_async, gather_async_opp
class DropoutRowwise(nn.Module):
def __init__(self, p):
super(DropoutRowwise, self).__init__()
self.p = p
self.dropout = nn.Dropout(p=p)
def forward(self, x):
dropout_mask = torch.ones_like(x[:, 0:1, :, :])
dropout_mask = self.dropout(dropout_mask)
return dropout_mask * x
class DropoutColumnwise(nn.Module):
def __init__(self, p):
super(DropoutColumnwise, self).__init__()
self.p = p
self.dropout = nn.Dropout(p=p)
def forward(self, x):
dropout_mask = torch.ones_like(x[:, :, 0:1, :])
dropout_mask = self.dropout(dropout_mask)
return dropout_mask * x
class Transition(nn.Module):
def __init__(self, d, n=4):
super(Transition, self).__init__()
self.norm = LayerNorm(d)
self.linear1 = Linear(d, n * d, initializer='relu')
self.linear2 = Linear(n * d, d, initializer='zeros')
def forward(self, src):
x = self.norm(src)
x = self.linear2(F.relu(self.linear1(x)))
return src + x
class OutProductMean(nn.Module):
def __init__(self, n_feat=64, n_feat_out=128, n_feat_proj=32):
super(OutProductMean, self).__init__()
self.layernormM = LayerNorm(n_feat)
self.linear_a = Linear(n_feat, n_feat_proj)
self.linear_b = Linear(n_feat, n_feat_proj)
self.o_linear = Linear(n_feat_proj * n_feat_proj,
n_feat_out,
initializer='zero',
use_bias=True)
def forward(self, M, M_mask):
M = self.layernormM(M)
right_act = self.linear_b(M)
right_act_all, work = gather_async(right_act, dim=2)
# right_act_all = gather(right_act, dim=2)
left_act = self.linear_a(M)
M_mask = M_mask.unsqueeze(-1)
M_mask_col = scatter(M_mask, dim=2)
left_act = M_mask_col * left_act
norm = torch.einsum('bsid,bsjd->bijd', M_mask_col, M_mask)
right_act_all = gather_async_opp(right_act_all, work, dim=2)
right_act_all = M_mask * right_act_all
O = torch.einsum('bsid,bsje->bijde', left_act, right_act_all)
O = rearrange(O, 'b i j d e -> b i j (d e)')
Z = self.o_linear(O)
Z /= (1e-3 + norm)
return Z
class Linear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just
like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found
in the code.
"""
def __init__(
self,
feature_in: int,
feature_out: int,
initializer: str = 'linear',
use_bias: bool = True,
bias_init: float = 0.,
):
super(Linear, self).__init__(feature_in, feature_out, bias=use_bias)
self.use_bias = use_bias
if initializer == 'linear':
glorot_uniform_af(self.weight, gain=1.0)
elif initializer == 'relu':
glorot_uniform_af(self.weight, gain=2.0)
elif initializer == 'zeros':
nn.init.zeros_(self.weight)
if self.use_bias:
with torch.no_grad():
self.bias.fill_(bias_init)
class SelfAttention(nn.Module):
"""
Multi-Head SelfAttention dealing with [batch_size1, batch_size2, len, dim] tensors
"""
def __init__(self, qkv_dim, c, n_head, out_dim, gating=True, last_bias_fuse=False):
super(SelfAttention, self).__init__()
self.qkv_dim = qkv_dim
self.c = c
self.n_head = n_head
self.out_dim = out_dim
self.gating = gating
self.last_bias_fuse = last_bias_fuse
self.scaling = self.c**(-0.5)
self.to_qkv = Linear(qkv_dim, 3 * n_head * c, initializer='linear', use_bias=False)
# self.to_q = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
# self.to_k = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
# self.to_v = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
if gating:
self.gating_bias = nn.parameter.Parameter(data=torch.ones((n_head * c,)))
self.gating_linear = Linear(qkv_dim, n_head * c, initializer='zero', use_bias=False)
self.o_linear = Linear(n_head * c,
out_dim,
initializer='zero',
use_bias=(not last_bias_fuse))
def forward(self, in_data, mask, nonbatched_bias=None):
"""
:param in_data: [batch_size1, batch_size2, len_qkv, qkv_dim]
:param bias: None or [batch_size1, batch_size2, n_head, len_q, len_kv]
:param nonbatched_bias: None or [batch_size1, n_head, len_q, len_kv]
"""
qkv = self.to_qkv(in_data).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b1 b2 n (h d) -> b1 b2 h n d', h=self.n_head), qkv)
# q = self.to_q(in_data)
# k = self.to_k(in_data)
# v = self.to_k(in_data)
# q, k, v = map(lambda t: rearrange(t, 'b1 b2 n (h d) -> b1 b2 h n d', h=self.n_head), [q, k, v])
# q = q * self.scaling
logits = torch.matmul(q, k.transpose(-1, -2))
# logits += mask
if nonbatched_bias is not None:
# logits += nonbatched_bias.unsqueeze(1)
bias = gather_async_opp(*nonbatched_bias, dim=1)
bias = rearrange(bias, 'b q k h -> b h q k')
weights = scale_mask_bias_softmax(logits, mask, bias.unsqueeze(1), self.scaling)
else:
weights = scale_mask_softmax(logits, mask, self.scaling)
# weights = torch.softmax(logits, dim=-1)
# weights = softmax(logits)
weighted_avg = torch.matmul(weights, v)
weighted_avg = rearrange(weighted_avg, 'b1 b2 h n d -> b1 b2 n (h d)')
if self.gating:
gate_values = self.gating_linear(in_data)
weighted_avg = bias_sigmod_ele(gate_values, self.gating_bias, weighted_avg)
output = self.o_linear(weighted_avg)
return output
| 34.875648
| 106
| 0.592334
| 930
| 6,731
| 4.022581
| 0.182796
| 0.018712
| 0.020583
| 0.022721
| 0.311147
| 0.233627
| 0.182572
| 0.148623
| 0.148623
| 0.148623
| 0
| 0.013108
| 0.297281
| 6,731
| 192
| 107
| 35.057292
| 0.777801
| 0.154658
| 0
| 0.117647
| 0
| 0
| 0.032754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092437
| false
| 0
| 0.084034
| 0
| 0.268908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3b031f123e10590a23278a8471646c084f1f967a
| 1,093
|
py
|
Python
|
src/input/__init__.py
|
huyingjun/PyAgent
|
ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4
|
[
"MIT"
] | 1
|
2021-12-23T11:56:19.000Z
|
2021-12-23T11:56:19.000Z
|
src/input/__init__.py
|
huyingjun/PyAgent
|
ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4
|
[
"MIT"
] | null | null | null |
src/input/__init__.py
|
huyingjun/PyAgent
|
ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
__init__.py
~~~~~~~~
数据收集插件 input
:author: Fufu, 2021/6/7
"""
from abc import abstractmethod
from asyncio import create_task, sleep
from typing import Any
from loguru import logger
from ..libs.plugin import BasePlugin
class InputPlugin(BasePlugin):
"""数据采集插件基类"""
module = 'input'
async def run(self):
"""定时执行收集"""
logger.debug(f'{self.module}.{self.name} is working')
while not self.is_closed():
create_task(self.gather())
await sleep(self.get_interval(60))
logger.debug(f'{self.module}.{self.name} is closed')
@abstractmethod
async def gather(self) -> Any:
"""获取数据"""
pass
def is_closed(self):
"""检查当前插件是否该关闭 (名称不在开启的插件中)"""
if self.name in self.conf.plugins_open:
return False
# 发送插件关闭信号 (特殊 Metric)
self.out_queue.put_nowait(self.metric(None, tag='__CLOSE_SIGNAL__'))
self.conf.plugins_working.discard(self.name)
logger.info(f'Plugin {self.name} is closed')
return True
| 22.770833
| 76
| 0.610247
| 134
| 1,093
| 4.843284
| 0.552239
| 0.061633
| 0.046225
| 0.049307
| 0.098613
| 0.098613
| 0.098613
| 0.098613
| 0
| 0
| 0
| 0.011057
| 0.255261
| 1,093
| 47
| 77
| 23.255319
| 0.786241
| 0.123513
| 0
| 0
| 0
| 0
| 0.133185
| 0.055494
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.043478
| 0.217391
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3b03f07ac42f24043a890f0020944e25aecce786
| 1,933
|
py
|
Python
|
repositorybots/bots/Librarian.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 2
|
2021-09-27T02:29:26.000Z
|
2021-10-20T19:10:39.000Z
|
repositorybots/bots/Librarian.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 14
|
2021-09-09T21:16:05.000Z
|
2022-03-28T09:31:09.000Z
|
repositorybots/bots/Librarian.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 2
|
2021-09-09T12:11:48.000Z
|
2022-01-28T20:25:26.000Z
|
import yaml
import re
from .SummonableBot import SummonableBot
class Librarian(SummonableBot):
def __init__(self, bot_name, event):
self.help_command = 'help'
self.help_preamble = "Here are my available responses"
self.event = event
with open('./responses.yml') as file:
response_list = yaml.load(file, Loader=yaml.FullLoader)
available_responses = response_list.get('responses').keys()
regex_for_responses = "\\s*|".join(available_responses)
self.summoning_regex = r'(@' + bot_name + r')\s*' + f'({regex_for_responses}\\s*|{self.help_command})'
def __prepare_new_issue_text(self, top_message, links):
s = top_message + """\n\n- """
s += "\n- ".join('['+ l.get('title') + '](' + l.get('url') +')' for l in links)
return s
def __prepare_help_response(self, top_message, responses):
s = top_message + """:\n\n- """
s += "\n- ".join(response for response in responses)
return s
def has_been_summoned(self, comment_body):
return re.search(self.summoning_regex, comment_body, re.MULTILINE)
async def check_library(self, user_help_match):
message = None
with open('./responses.yml') as file:
response_list = yaml.load(file, Loader=yaml.FullLoader)
response_to_fetch = user_help_match.group(2).strip()
if response_to_fetch == self.help_command:
message = self.__prepare_help_response(
self.help_preamble, response_list.get('responses').keys())
else:
requested_response = response_list.get('responses').get(response_to_fetch, '')
message = self.__prepare_new_issue_text(
requested_response.get('message', ''), requested_response.get('helpful_links', []))
if message:
await self.event.add_comment(message)
| 42.021739
| 114
| 0.621314
| 232
| 1,933
| 4.892241
| 0.331897
| 0.035242
| 0.039648
| 0.063436
| 0.206167
| 0.156828
| 0.156828
| 0.156828
| 0.123348
| 0.123348
| 0
| 0.000689
| 0.249353
| 1,933
| 45
| 115
| 42.955556
| 0.78153
| 0
| 0
| 0.162162
| 0
| 0
| 0.105018
| 0.024315
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.081081
| 0.027027
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3b04c0970a5c74618f4b5ea5a958ded0e0f252eb
| 8,201
|
py
|
Python
|
word_game_helper.py
|
avendesora/wordle-helper
|
651c1eddca14f56be798e0fe242c1f2cf98ae7ba
|
[
"MIT"
] | null | null | null |
word_game_helper.py
|
avendesora/wordle-helper
|
651c1eddca14f56be798e0fe242c1f2cf98ae7ba
|
[
"MIT"
] | null | null | null |
word_game_helper.py
|
avendesora/wordle-helper
|
651c1eddca14f56be798e0fe242c1f2cf98ae7ba
|
[
"MIT"
] | null | null | null |
import pprint
import statistics
from contextlib import suppress
from dataclasses import dataclass
from enum import Enum
from typing import Optional
@dataclass
class ValidCharacter:
definite_locations: set[int]
definite_not_locations: set[int]
class CharacterStatus(Enum):
GRAY = "gray"
GREEN = "green"
YELLOW = "yellow"
@dataclass
class CharacterGuess:
character: str
status: CharacterStatus
@dataclass
class GroupStats:
answer: str
is_potential_solution: bool
number_of_groups: int
average_group_size: float
largest_group: int
class WordGameHelper:
_eliminated_characters: set[str]
_included_characters: dict[str, ValidCharacter]
_original_possible_common_words: set[str]
possible_words: set[str]
possible_common_words: set[str]
def __init__(
self,
possible_words: Optional[set[str]],
possible_common_words: Optional[set[str]],
used_words: Optional[set[str]],
):
self._eliminated_characters = set()
self._included_characters = {}
self.possible_words = possible_words or set()
self.possible_common_words = possible_common_words or set()
self._original_possible_common_words = possible_common_words.copy()
if used_words:
self.possible_words = self.possible_words - used_words
self.possible_common_words = self.possible_common_words - used_words
def make_guess(self, guess: list[CharacterGuess]):
for index, character_guess in enumerate(guess):
self._update_characters(index, character_guess)
self._update_possible_words()
def print_possible_answers(self):
if len(self.possible_words) == 1:
print(f"The answer is {self.possible_words.pop().upper()}.")
return
# possible_answers: list[str] = list(self.possible_words)
# possible_answers.sort()
# print(f"There are {len(possible_answers)} possible answers.")
# print("\n".join(possible_answers))
# print()
if len(self.possible_common_words) == 1:
print(f"The answer is probably {self.possible_common_words.pop().upper()}.")
return
possible_common_answers: list[str] = list(self.possible_common_words)
possible_common_answers.sort()
print(f"There are {len(possible_common_answers)} common possible answers.")
if len(possible_common_answers) < 5:
print("\n".join(possible_common_answers))
if len(possible_common_answers) > 2:
self._get_best_guess()
def _get_best_guess(self):
answer_groups = {}
statuses = [CharacterStatus.GRAY, CharacterStatus.GREEN, CharacterStatus.YELLOW]
stats: list[GroupStats] = []
for index, answer in enumerate(self._original_possible_common_words):
answer_groups[answer] = []
group_lengths = []
for status1 in statuses:
for status2 in statuses:
for status3 in statuses:
for status4 in statuses:
for status5 in statuses:
helper = WordGameHelper(
self.possible_common_words,
self.possible_common_words,
set(),
)
helper.make_guess(
[
CharacterGuess(answer[0], status1),
CharacterGuess(answer[1], status2),
CharacterGuess(answer[2], status3),
CharacterGuess(answer[3], status4),
CharacterGuess(answer[4], status5),
]
)
if len(helper.possible_words) > 0:
group = helper.possible_common_words
answer_groups[answer].append(group)
group_lengths.append(len(group))
average_length = statistics.mean(group_lengths)
group_stats = GroupStats(
answer=answer,
is_potential_solution=answer in self.possible_common_words,
number_of_groups=len(group_lengths),
average_group_size=average_length,
largest_group=max(group_lengths),
)
# pprint.pprint(group_stats)
stats.append(group_stats)
stats.sort(key=lambda x: x.average_group_size)
print(f" The best guesses statistically are:")
count: int = 0
for stat in stats:
if stat.average_group_size > stats[0].average_group_size:
continue
if count > 10:
break
print(
f" {stat.answer}, "
f"is_potential_solution = {stat.is_potential_solution}, "
f"number_of_groups = {stat.number_of_groups}, "
f"average_group_size = {stat.average_group_size}, "
f"largest_group = {stat.largest_group}"
)
count += 1
print(f" The best, possibly-correct guesses statistically are:")
potential_solution_stats = [
stat for stat in stats if stat.is_potential_solution
]
for stat in potential_solution_stats[:10]:
# if stat.average_group_size > potential_solution_stats[0].average_group_size:
# continue
print(
f" {stat.answer}, "
f"is_potential_solution = {stat.is_potential_solution}, "
f"number_of_groups = {stat.number_of_groups}, "
f"average_group_size = {stat.average_group_size}, "
f"largest_group = {stat.largest_group}"
)
def _update_characters(self, position: int, guess: CharacterGuess):
value = self._included_characters.get(
guess.character, ValidCharacter(set(), set())
)
if (
guess.status == CharacterStatus.GRAY
and guess.character not in self._included_characters
):
value.definite_not_locations.add(position)
self._eliminated_characters.add(guess.character)
return
with suppress(KeyError):
self._eliminated_characters.remove(guess.character)
if guess.status in (CharacterStatus.YELLOW, CharacterStatus.GRAY):
value.definite_not_locations.add(position)
else:
value.definite_locations.add(position)
self._included_characters[guess.character] = value
def _update_possible_words(self):
updated_possible_words: set[str] = set()
updated_possible_common_words: set[str] = set()
for word in self.possible_words:
if len(set(word).intersection(self._eliminated_characters)) > 0:
continue
is_valid: bool = True
for character, valid_character in self._included_characters.items():
if not is_valid:
break
if character not in word:
is_valid = False
break
for invalid_location in valid_character.definite_not_locations:
if word[invalid_location] == character:
is_valid = False
break
for valid_location in valid_character.definite_locations:
if word[valid_location] != character:
is_valid = False
break
if not is_valid:
continue
updated_possible_words.add(word)
if word in self.possible_common_words:
updated_possible_common_words.add(word)
self.possible_words = updated_possible_words
self.possible_common_words = updated_possible_common_words
| 34.603376
| 90
| 0.573954
| 818
| 8,201
| 5.46577
| 0.156479
| 0.087676
| 0.093491
| 0.056587
| 0.33885
| 0.261463
| 0.139566
| 0.129278
| 0.070678
| 0.070678
| 0
| 0.00545
| 0.351177
| 8,201
| 236
| 91
| 34.75
| 0.834805
| 0.036703
| 0
| 0.210227
| 0
| 0
| 0.089216
| 0.039159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.034091
| 0
| 0.210227
| 0.056818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d178683893b91fd9a85c22e3c3785427e4b51812
| 2,249
|
py
|
Python
|
876.middle-of-the-linked-list.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
876.middle-of-the-linked-list.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
876.middle-of-the-linked-list.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# @lc app=leetcode id=876 lang=python
#
# [876] Middle of the Linked List
#
# https://leetcode.com/problems/middle-of-the-linked-list/description/
#
# algorithms
# Easy (64.97%)
# Likes: 593
# Dislikes: 42
# Total Accepted: 76.4K
# Total Submissions: 117.5K
# Testcase Example: '[1,2,3,4,5]'
#
# Given a non-empty, singly linked list with head node head, return a middle
# node of linked list.
#
# If there are two middle nodes, return the second middle node.
#
#
#
#
# Example 1:
#
#
# Input: [1,2,3,4,5]
# Output: Node 3 from this list (Serialization: [3,4,5])
# The returned node has value 3. (The judge's serialization of this node is
# [3,4,5]).
# Note that we returned a ListNode object ans, such that:
# ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next
# = NULL.
#
#
#
# Example 2:
#
#
# Input: [1,2,3,4,5,6]
# Output: Node 4 from this list (Serialization: [4,5,6])
# Since the list has two middle nodes with values 3 and 4, we return the second
# one.
#
#
#
#
# Note:
#
#
# The number of nodes in the given list will be between 1 and 100.
#
#
#
#
#
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
return "<ListNode %s -> %s>" % (self.val, self.next)
class Solution(object):
def middleNode(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return
elif not head.next:
return head
fast = low = head
while fast:
if fast and fast.next and fast.next.next:
fast = fast.next.next
else:
break
low = low.next
return low if not fast.next else low.next
# if __name__ == '__main__':
# s = Solution()
# print s.middleNode(None)
# head = ListNode(1)
# print s.middleNode(head)
# head.next = ListNode(2)
# print s.middleNode(head)
# head.next.next = ListNode(3)
# print s.middleNode(head)
# head.next.next.next = ListNode(4)
# print s.middleNode(head)
# head.next.next.next.next = ListNode(5)
# print s.middleNode(head)
| 21.834951
| 79
| 0.595376
| 329
| 2,249
| 4.021277
| 0.337386
| 0.066516
| 0.072562
| 0.075586
| 0.150416
| 0.11489
| 0.078609
| 0.054422
| 0
| 0
| 0
| 0.040954
| 0.272566
| 2,249
| 102
| 80
| 22.04902
| 0.767726
| 0.656292
| 0
| 0
| 0
| 0
| 0.028316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d17e6c29b97301453dbf67266605a0471b95c7b0
| 3,442
|
py
|
Python
|
ava_asd/vis.py
|
tuanchien/asd
|
190c1c6d155b16a27717596d6350598e5cd4ffac
|
[
"Apache-2.0",
"MIT"
] | 18
|
2020-06-19T01:18:13.000Z
|
2022-03-21T10:42:13.000Z
|
ava_asd/vis.py
|
tuanchien/asd
|
190c1c6d155b16a27717596d6350598e5cd4ffac
|
[
"Apache-2.0",
"MIT"
] | 8
|
2020-12-17T06:09:59.000Z
|
2021-07-10T02:07:41.000Z
|
ava_asd/vis.py
|
tuanchien/asd
|
190c1c6d155b16a27717596d6350598e5cd4ffac
|
[
"Apache-2.0",
"MIT"
] | 4
|
2020-06-20T01:05:01.000Z
|
2021-08-05T13:45:48.000Z
|
# New BSD License
#
# Copyright (c) 2007-2019 The scikit-learn developers.
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Scikit-learn Developers nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues, dpi=70):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
fig.set_dpi(dpi)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
| 40.494118
| 107
| 0.691458
| 481
| 3,442
| 4.914761
| 0.498961
| 0.044416
| 0.007614
| 0.01269
| 0.098985
| 0.07868
| 0.05753
| 0.05753
| 0.05753
| 0.05753
| 0
| 0.007496
| 0.224869
| 3,442
| 84
| 108
| 40.97619
| 0.878561
| 0.570017
| 0
| 0.058824
| 0
| 0
| 0.098384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.147059
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1802643daf10062a7ef847447ff5fef65abb757
| 1,757
|
py
|
Python
|
tests/state_tests.py
|
Alasdair-Macindoe/TuringMachineEmulator
|
4c2639876bd94209b170232b2f33ea1409a61a45
|
[
"MIT"
] | null | null | null |
tests/state_tests.py
|
Alasdair-Macindoe/TuringMachineEmulator
|
4c2639876bd94209b170232b2f33ea1409a61a45
|
[
"MIT"
] | null | null | null |
tests/state_tests.py
|
Alasdair-Macindoe/TuringMachineEmulator
|
4c2639876bd94209b170232b2f33ea1409a61a45
|
[
"MIT"
] | null | null | null |
import pytest
import sys
sys.path.append('.')
from turingmachine import Transition, Direction, State
def test_create_transition():
q0 = State()
q1 = State()
#In q0 upon reading a move to q1, output b, and move the tape 1 right
q0.create_transition('a', q1, 'b', Direction.RIGHT)
assert q0.transitions['a'].new_state == q1
assert q0.transitions['a'].output_letter == 'b'
assert q0.transitions['a'].movement_direction == Direction.RIGHT
def test_create_multiple_transitions():
q0 = State()
q1 = State()
q2 = State()
q0.create_transition('a', q1, 'b', Direction.RIGHT)
q1.create_transition('c', q2, 'd', Direction.LEFT)
with pytest.raises(KeyError):
q0.transitions['b'] is None
assert q0.transitions['a'].new_state.transitions['c'].new_state == q2
assert q1.transitions['c'].new_state == q2
assert q0.transitions['a'].new_state.transitions['c'].output_letter == 'd'
assert q1.transitions['c'].output_letter == 'd'
assert q0.transitions['a'].new_state.transitions['c'].movement_direction == Direction.LEFT
assert q1.transitions['c'].movement_direction == Direction.LEFT
def test_add_transition():
q0 = State()
q1 = State()
t = Transition(q1, 'b', Direction.RIGHT)
q0.add_transition('a', t)
assert q0.transitions['a'] == t
def test_create_with_transitions():
q0 = State()
t1 = Transition(q0, 'c', Direction.LEFT)
t2 = Transition(q0, 'd', Direction.RIGHT)
q1 = State({'a': t1, 'b' : t2})
assert q1.transitions['a'] == t1
assert q1.transitions['b'] == t2
def test_calc():
q0 = State()
t1 = Transition(q0, 'a', Direction.RIGHT)
q1 = State()
q1.add_transition('b', t1)
res = q1.calc('b')
assert res == t1
| 31.375
| 94
| 0.650541
| 243
| 1,757
| 4.588477
| 0.201646
| 0.093274
| 0.119283
| 0.125561
| 0.416144
| 0.335426
| 0.172197
| 0.172197
| 0
| 0
| 0
| 0.036466
| 0.188389
| 1,757
| 55
| 95
| 31.945455
| 0.745442
| 0.038702
| 0
| 0.244444
| 0
| 0
| 0.020735
| 0
| 0
| 0
| 0
| 0
| 0.288889
| 1
| 0.111111
| false
| 0
| 0.066667
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1813862bfc10545d923154e8ce565b2682d6c7b
| 558
|
py
|
Python
|
Python3/0041-First-Missing-Positive/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0041-First-Missing-Positive/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0041-First-Missing-Positive/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# constant space
# [1, len(nums) + 1]
n = len(nums)
for i, num in enumerate(nums):
if num < 0 or num > n:
nums[i] = 0
n += 1
for i, num in enumerate(nums):
idx = num % n
if idx:
nums[idx - 1] += n
for i, num in enumerate(nums, 1):
if num // n == 0:
return i
return n
| 26.571429
| 41
| 0.405018
| 68
| 558
| 3.323529
| 0.367647
| 0.053097
| 0.09292
| 0.119469
| 0.292035
| 0.292035
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.483871
| 558
| 21
| 42
| 26.571429
| 0.756944
| 0.121864
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d18168c2e3ac9ceaadcf572633e69461bbc92841
| 294
|
py
|
Python
|
default/modules/cwd.py
|
AshlynnInWonderland/zsh-powerline
|
e6f3326b3e15d8a89a0ea959314ea0ea5768ea86
|
[
"MIT"
] | null | null | null |
default/modules/cwd.py
|
AshlynnInWonderland/zsh-powerline
|
e6f3326b3e15d8a89a0ea959314ea0ea5768ea86
|
[
"MIT"
] | null | null | null |
default/modules/cwd.py
|
AshlynnInWonderland/zsh-powerline
|
e6f3326b3e15d8a89a0ea959314ea0ea5768ea86
|
[
"MIT"
] | null | null | null |
import os
def returnText():
cwd = os.getcwd().replace(os.environ['HOME'],'~')
lstCwd = str.split(cwd, '/')
if len(lstCwd) > 3:
lstCwd.reverse()
lstCwd = lstCwd[0:3]
lstCwd.append('+')
lstCwd.reverse()
strCwd = '/'.join(lstCwd)
return strCwd
| 22.615385
| 53
| 0.547619
| 34
| 294
| 4.735294
| 0.617647
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013953
| 0.268707
| 294
| 12
| 54
| 24.5
| 0.734884
| 0
| 0
| 0.181818
| 0
| 0
| 0.027211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d185040fe764c47e88800452a2deca88a3ec3079
| 13,255
|
py
|
Python
|
edna2/tasks/Is4aTasks.py
|
gsantoni/edna2
|
0aad63a3ea8091ce62118f0b2c8ac78a2286da9e
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
edna2/tasks/Is4aTasks.py
|
gsantoni/edna2
|
0aad63a3ea8091ce62118f0b2c8ac78a2286da9e
|
[
"CC0-1.0",
"MIT"
] | 2
|
2020-04-06T10:39:50.000Z
|
2021-04-14T19:24:37.000Z
|
edna2/tasks/Is4aTasks.py
|
gsantoni/edna2
|
0aad63a3ea8091ce62118f0b2c8ac78a2286da9e
|
[
"CC0-1.0",
"MIT"
] | 5
|
2019-06-14T07:28:38.000Z
|
2021-04-28T13:10:39.000Z
|
#
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "10/05/2019"
import json
import shutil
import pprint
from edna2.tasks.AbstractTask import AbstractTask
from edna2.tasks.ISPyBTasks import GetListAutoprocessingResults
class FindHklAsciiForMerge(AbstractTask):
"""
This task receives a list of data collection IDs and returns a
json schema for EXI2
"""
def getInDataSchema(self):
return {
"type": "object",
"properties": {
"token": {"type": "string"},
"proposal": {"type": "string"},
"dataCollectionId": {
"type": "array",
"items": {
"type": "integer",
}
}
}
}
# def getOutDataSchema(self):
# return {
# "type": "object",
# "required": ["dataForMerge"],
# "properties": {
# "dataForMerge": {
# "type": "object",
# "items": {
# "type": "object",
# "properties": {
# "spaceGroup": {"type": "string"}
# }
# }
# }
# }
# }
def run(self, inData):
urlError = None
token = inData['token']
proposal = inData['proposal']
listDataCollectionId = inData['dataCollectionId']
inDataGetListAutoprocessingResults = {
'token': token,
'proposal': proposal,
'dataCollectionId': listDataCollectionId
}
getListAutoprocessingResults = GetListAutoprocessingResults(
inData=inDataGetListAutoprocessingResults
)
getListAutoprocessingResults.execute()
outDataAutoprocessing = getListAutoprocessingResults.outData
if 'error' in outDataAutoprocessing:
urlError = outDataAutoprocessing['error']
else:
index = 1
properties = {}
listOrder = []
for dataCollection in outDataAutoprocessing['dataCollection']:
dataCollectionId = dataCollection['dataCollectionId']
dictEntry = {}
listEnumNames = []
listEnumValues = []
proteinAcronym = None
blSampleName = None
if 'error' in dataCollection['autoprocIntegration']:
urlError = dataCollection['autoprocIntegration']['error']
else:
for autoProcResult in dataCollection['autoprocIntegration']:
if proteinAcronym is None:
proteinAcronym = autoProcResult['Protein_acronym']
blSampleName = autoProcResult['BLSample_name']
for autoProcAttachment in autoProcResult['autoprocAttachment']:
if 'XDS_ASCII' in autoProcAttachment['fileName']:
fileName = autoProcAttachment['fileName']
program = autoProcResult['v_datacollection_processingPrograms']
attachmentId = autoProcAttachment['autoProcProgramAttachmentId']
enumName = '{0:30s} {1}'.format(program, fileName)
listEnumNames.append(enumName)
enumValue = attachmentId
listEnumValues.append(enumValue)
if urlError is None:
entryKey = 'hkl_' + str(dataCollectionId)
if entryKey not in properties:
dictEntry['title'] = 'Select HKL for data Collection #{0} {2} {1}-{2}'.format(
index,
proteinAcronym,
blSampleName
)
dictEntry['enum'] = listEnumValues
dictEntry['enumNames'] = listEnumNames
properties[entryKey] = dictEntry
listOrder.append(entryKey)
entryKey = 'minimum_I/SIGMA_' + str(dataCollectionId)
if entryKey not in properties:
# Minimum sigma
dictEntry = {
'integer': 'string',
'type': 'string',
'title': 'minimum_I/SIGMA for data Collection #{0} {2} {1}-{2}'.format(
index,
proteinAcronym,
blSampleName
)
}
properties[entryKey] = dictEntry
listOrder.append(entryKey)
index += 1
if urlError is None:
schema = {
'properties': properties,
'type': 'object',
'title': 'User input needed'
}
uiSchema = {
'ui:order': listOrder
}
outData = {
"schema": schema,
"uiSchema": uiSchema
}
else:
outData = {
'error': urlError
}
return outData
class FindPipelineForMerge(AbstractTask):
"""
This task receives a list of data collection IDs and returns a
json schema for EXI2
"""
def getInDataSchema(self):
return {
"type": "object",
"properties": {
"token": {"type": "string"},
"proposal": {"type": "string"},
"dataCollectionId": {
"type": "array",
"items": {
"type": "integer",
}
}
}
}
# def getOutDataSchema(self):
# return {
# "type": "object",
# "required": ["dataForMerge"],
# "properties": {
# "dataForMerge": {
# "type": "object",
# "items": {
# "type": "object",
# "properties": {
# "spaceGroup": {"type": "string"}
# }
# }
# }
# }
# }
def run(self, inData):
urlError = None
token = inData['token']
proposal = inData['proposal']
listDataCollectionId = inData['dataCollectionId']
inDataGetListAutoprocessingResults = {
'token': token,
'proposal': proposal,
'dataCollectionId': listDataCollectionId
}
getListAutoprocessingResults = GetListAutoprocessingResults(
inData=inDataGetListAutoprocessingResults
)
getListAutoprocessingResults.execute()
outDataAutoprocessing = getListAutoprocessingResults.outData
if 'error' in outDataAutoprocessing:
urlError = outDataAutoprocessing['error']
else:
index = 1
properties = {}
listOrder = []
dictEntry = {}
for dataCollection in outDataAutoprocessing['dataCollection']:
dataCollectionId = dataCollection['dataCollectionId']
listEnumValues = []
proteinAcronym = None
blSampleName = None
if 'error' in dataCollection['autoprocIntegration']:
urlError = dataCollection['autoprocIntegration']['error']
else:
for autoProcResult in dataCollection['autoprocIntegration']:
if len(autoProcResult['autoprocAttachment']) > 0:
if proteinAcronym is None:
proteinAcronym = autoProcResult['Protein_acronym']
blSampleName = autoProcResult['BLSample_name']
if '1' in autoProcResult['anomalous']:
anom = True
else:
anom = False
for autoProcAttachment in autoProcResult['autoprocAttachment']:
if 'XDS_ASCII' in autoProcAttachment['fileName']:
fileName = autoProcAttachment['fileName']
program = autoProcResult['v_datacollection_processingPrograms']
attachmentId = autoProcAttachment['autoProcProgramAttachmentId']
if anom:
entryKey = program + '_anom'
else:
entryKey = program + '_noanom'
if entryKey not in dictEntry:
dictEntry[entryKey] = []
dictEntry[entryKey].append({'id': attachmentId, 'fileName': fileName})
if urlError is None:
listEnumNames = []
dictInput = {}
for entryKey, listAttachment in dictEntry.items():
if len(listAttachment) == len(outDataAutoprocessing['dataCollection']):
listEnumNames.append(entryKey)
dictInput[entryKey] = listAttachment
index += 1
if len(listEnumNames) > 0:
dictSchema = {
'title': 'Select processing pipeline for data Collection {0}-{1}'.format(
proteinAcronym,
blSampleName
),
'type': 'string',
'enum': listEnumNames,
'enumNames': listEnumNames
}
key = "pipeline"
properties[key] = dictSchema
listOrder.append(key)
# Minimum sigma
dictSchema = {
'integer': 'string',
'type': 'string',
'title': 'minimum_I/SIGMA for data Collection {0}-{1}'.format(
proteinAcronym,
blSampleName
)
}
key = 'minimum_I/SIGMA'
properties[key] = dictSchema
listOrder.append(key)
if urlError is None:
schema = {
'properties': properties,
'type': 'object',
'title': 'User input needed'
}
uiSchema = {
'ui:order': listOrder
}
outData = {
'schema': {
"schema": schema,
"uiSchema": uiSchema
},
'input': dictInput
}
else:
outData = {
'error': urlError
}
return outData
class MergeUtls(AbstractTask):
"""
This task will run the Merge_utls.py program written by Shibom Basu
"""
def run(self, inData):
listHklLp = inData['listHklLp']
workingDir = self.getWorkingDirectory()
index = 1
for hklLp in listHklLp:
dataDir = workingDir / "data{0}".format(index)
dataDir.mkdir(exist_ok=True)
shutil.copy(hklLp['hkl'], str(dataDir / 'XDS_ASCII.HKL'))
index += 1
commandLine = 'Merge_utls.py --root {0} --expt serial-xtal'.format(str(workingDir))
self.runCommandLine(commandLine, logPath=None)
# Find Mergeing_results.json
resultPath = self.getWorkingDirectory() / 'adm_serial-xtal' / 'adm_3' / 'Mergeing_results.json'
if resultPath.exists():
with open(str(resultPath)) as f:
mergeResult = json.loads(f.read())
outData = {'mergeResult': mergeResult}
return outData
| 40.045317
| 106
| 0.485704
| 908
| 13,255
| 7.048458
| 0.276432
| 0.015625
| 0.00875
| 0.0125
| 0.592813
| 0.592813
| 0.564375
| 0.5375
| 0.493125
| 0.493125
| 0
| 0.005282
| 0.428668
| 13,255
| 330
| 107
| 40.166667
| 0.839826
| 0.167107
| 0
| 0.649194
| 0
| 0
| 0.137212
| 0.013264
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020161
| false
| 0
| 0.020161
| 0.008065
| 0.072581
| 0.004032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d187fe47d5524b63a0f74b45076d6dc9c23a3d02
| 1,556
|
py
|
Python
|
predict.py
|
SuperbTUM/RAW-image-denoising
|
9f81be8da6a576f641022707d98b8c37f5c599ab
|
[
"MIT"
] | 4
|
2021-10-18T04:13:52.000Z
|
2022-03-10T14:10:46.000Z
|
predict.py
|
SuperbTUM/computational-photography
|
9f81be8da6a576f641022707d98b8c37f5c599ab
|
[
"MIT"
] | 2
|
2021-12-10T02:59:30.000Z
|
2022-03-10T03:32:09.000Z
|
predict.py
|
SuperbTUM/computational-photography
|
9f81be8da6a576f641022707d98b8c37f5c599ab
|
[
"MIT"
] | 1
|
2021-12-10T02:57:34.000Z
|
2021-12-10T02:57:34.000Z
|
import numpy as np
from tqdm import *
from utils import DataLoaderX
from dataset import collate
from math import *
def prediction(data, model, batch_size, cuda):
data_loader = DataLoaderX(data, batch_size=batch_size, collate_fn=collate, num_workers=0)
model.training = False
iterator = tqdm(data_loader)
out = []
for sample in iterator:
sample['data'] = sample['data'].float()
if cuda:
out += model(sample['data']).cpu()
else:
out += model(sample['data'])
return out
def recovery(ori_shape, output, size):
if size[0] >= ori_shape[1] or size[1] >= ori_shape[2]:
# de-padding
output = output[0].detach().numpy()
diff_x = size[0] - ori_shape[1]
diff_y = size[1] - ori_shape[2]
return output[:, diff_x // 2:-(diff_x - diff_x // 2),
diff_y // 2:-(diff_y - diff_y // 2)]
h, w = size[0], size[1]
cols = ceil(ori_shape[2] / w)
rows = ceil(ori_shape[1] / h)
assert rows * cols == len(output)
results = np.zeros((ori_shape[0], rows * size[0], cols * size[1]))
for i, out in enumerate(output):
out = out.detach().numpy()
out = out[:, 8:-8, 8:-8]
end_col = (i + 1) % cols * size[1] if (i + 1) % cols > 0 else cols * size[1]
results[:, i // cols * size[0]:(i // cols + 1) * size[0],
i % cols * size[1]:end_col] = out
return results[:, 0:ori_shape[1], 0:ori_shape[2]]
if __name__ == '__main__':
a = np.zeros((4, 3, 3))
print(a[:, 0:-1, 0:-1].shape)
| 32.416667
| 93
| 0.561697
| 237
| 1,556
| 3.540084
| 0.28692
| 0.095352
| 0.042908
| 0.035757
| 0.066746
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039648
| 0.270566
| 1,556
| 47
| 94
| 33.106383
| 0.699559
| 0.006427
| 0
| 0
| 0
| 0
| 0.015544
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.051282
| false
| 0
| 0.128205
| 0
| 0.25641
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d188ece94a0b8fbdf8e8a6257addd7cf8fc804b8
| 3,318
|
py
|
Python
|
token_shift_gpt/autoregressive_wrapper.py
|
fwcwmc/token-shift-gpt
|
58c946a8a59976681a90424be5db85ed9a034a59
|
[
"MIT"
] | null | null | null |
token_shift_gpt/autoregressive_wrapper.py
|
fwcwmc/token-shift-gpt
|
58c946a8a59976681a90424be5db85ed9a034a59
|
[
"MIT"
] | null | null | null |
token_shift_gpt/autoregressive_wrapper.py
|
fwcwmc/token-shift-gpt
|
58c946a8a59976681a90424be5db85ed9a034a59
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from tqdm import tqdm
from entmax import entmax_bisect
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = -float("Inf")
logits[probs >= limit] = 1
return logits
ENTMAX_ALPHA = 1.3
entmax = entmax_bisect
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow=2.0, min_p_ratio=0.02, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in tqdm(range(seq_len)):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is entmax:
probs = entmax(logits / temperature, alpha = ENTMAX_ALPHA, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| 30.440367
| 171
| 0.622664
| 472
| 3,318
| 4.125
| 0.252119
| 0.020544
| 0.043143
| 0.053929
| 0.174114
| 0.13662
| 0.085259
| 0.085259
| 0.085259
| 0.085259
| 0
| 0.019894
| 0.257685
| 3,318
| 108
| 172
| 30.722222
| 0.770605
| 0.012658
| 0
| 0.08
| 0
| 0
| 0.003363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106667
| false
| 0
| 0.066667
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d18def4fbac29199069d5db3991e15a8d8b23343
| 2,225
|
py
|
Python
|
rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py
|
allenai/cordial-sync
|
4005fdc4816c86f6489e5f4b9252fa66b79602be
|
[
"MIT"
] | 28
|
2020-07-07T16:21:10.000Z
|
2021-11-15T11:15:20.000Z
|
rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py
|
allenai/cordial-sync
|
4005fdc4816c86f6489e5f4b9252fa66b79602be
|
[
"MIT"
] | 5
|
2020-09-29T07:54:43.000Z
|
2022-01-04T22:33:02.000Z
|
rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py
|
allenai/cordial-sync
|
4005fdc4816c86f6489e5f4b9252fa66b79602be
|
[
"MIT"
] | 2
|
2022-02-01T19:50:27.000Z
|
2022-03-21T12:23:16.000Z
|
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveGridExperimentConfig()
| 38.362069
| 86
| 0.621573
| 214
| 2,225
| 6.03271
| 0.420561
| 0.059644
| 0.058095
| 0.024787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006645
| 0.323596
| 2,225
| 57
| 87
| 39.035088
| 0.851163
| 0.022472
| 0
| 0.086957
| 0
| 0
| 0.018408
| 0.011045
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.086957
| 0.065217
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d191008bf7777b6c542e5bf7e0d000e40eac38e6
| 3,101
|
py
|
Python
|
apps/roster/views.py
|
dulrich15/spot
|
5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73
|
[
"MIT"
] | null | null | null |
apps/roster/views.py
|
dulrich15/spot
|
5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73
|
[
"MIT"
] | null | null | null |
apps/roster/views.py
|
dulrich15/spot
|
5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import unicode_literals
import re
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext
from django.template import loader
from models import *
from apps.core.views import get_bg_color
def list_students(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
context = {
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'roster/list_students.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def edit_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
student_list_csv = ''
for student in students:
student_csv = ','.join([student.last_name,student.first_name,''])
student_list_csv += student_csv + '\n'
context = {
'student_list_csv': student_list_csv,
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'roster/edit_student_list.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def post_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
if 'submit' in request.POST:
for student in students: # really should only delete those not in POST...
student.delete()
student_list = request.POST['student_list_csv'].splitlines()
for line in student_list:
[last_name, first_name, password] = [x.strip() for x in line.split(',')]
username = first_name[0].lower()
username += re.sub(r'[^a-z]', '', last_name.lower())[:7]
try:
student_user = User.objects.get(username=username)
except:
student_user = User()
student_user.username = username
student_user.last_name = last_name
student_user.first_name = first_name
student_user.set_password(password)
student_user.save()
student = Student()
student.classroom = classroom
student.user = student_user
student.save()
student_user.first_name = first_name
student_user.last_name = last_name
student_user.save()
return redirect('list_students', classroom_slug)
| 28.449541
| 84
| 0.653015
| 361
| 3,101
| 5.368421
| 0.221607
| 0.068111
| 0.03612
| 0.034056
| 0.516512
| 0.516512
| 0.516512
| 0.516512
| 0.447368
| 0.447368
| 0
| 0.000863
| 0.252822
| 3,101
| 108
| 85
| 28.712963
| 0.835563
| 0.014834
| 0
| 0.538462
| 0
| 0
| 0.067519
| 0.017699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.025641
| 0.115385
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d191a9aab35b5cf9693c2d15e10ff5f31d5411f3
| 6,635
|
py
|
Python
|
mac-changer.py
|
Hiiirad/Mac-Changer
|
df23de01dde3f55b45a8f0bacb065cf2170feb06
|
[
"MIT"
] | 1
|
2020-08-06T13:39:50.000Z
|
2020-08-06T13:39:50.000Z
|
mac-changer.py
|
Hiiirad/Mac-Changer
|
df23de01dde3f55b45a8f0bacb065cf2170feb06
|
[
"MIT"
] | null | null | null |
mac-changer.py
|
Hiiirad/Mac-Changer
|
df23de01dde3f55b45a8f0bacb065cf2170feb06
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from subprocess import call
from re import search
from random import sample, choice
from csv import reader
from os import popen
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
'''
The strings, input and output of this program is in lowercase. => case-insensitive
List of standard OUI:
http://standards-oui.ieee.org/oui/oui.txt
http://standards-oui.ieee.org/oui/oui.csv
'''
# Validating mac address
def mac_validation(mac):
if search(string=mac, pattern=r"^([0-9a-f]{2}:){5}[0-9a-f]{2}$"):
return "Valid mac"
else:
print("Invalid mac. Check it and try again")
quit()
# Validating Interface
def interface_validation(interface):
if search(string=interface, pattern=r"^(eth|wlan)\d{1}$"):
return "Valid interface"
else:
print("Invalid Interface. Check it and try again")
quit()
hex_characters = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"]
# Checking if user wants to choose new mac address randomly or not
random_or_not = prompt("Do you want your mac address to change randomly? [(Y)es or (N)o]\nOr\nDo you want to choose first part of your mac address based on other manufacturers mac address? [(O)UI]\nOr\nDo you want your mac address back to original one? [(R)everse]\nYour answer: ").lower()
interface = prompt("Please insert name of the interface you want to change its mac: [wlan* or eth*] ").lower()
interface_validation(interface)
if random_or_not == "y" or random_or_not == "yes":
# random mac
random_mac = []
for i in range(6):
random_mac.append("".join(sample(hex_characters, 2)))
random_mac = ":".join(random_mac)
print("Your new mac address will be {0}".format(random_mac))
elif random_or_not == "n" or random_or_not == "no":
# user's new mac
mac = prompt("Please insert your new mac: ").lower()
mac_validation(mac)
elif random_or_not == "r" or random_or_not == "reverse":
# back to normal
if search(string=interface, pattern=r"^eth\d{1}$"):
with open(file="/tmp/eth-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif search(string=interface, pattern=r"^wlan\d{1}$"):
with open(file="/tmp/wlan-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif random_or_not == "o" or random_or_not == "oui":
oui = {}
# Creating Template of our dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
oui[str(row[2]).replace(" ", " ")] = []
# Fill values of dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
value = oui[str(row[2]).replace(" ", " ")]
if len(str(row[1])) > 6:
continue
else:
value.append(str(row[1]))
oui[str(row[2])] = value
# Deleting keys with empty values []
# 273 keys were deleted from list.
for key, value in list(oui.items()):
if value == []:
del oui[key]
random_organization = prompt("Do you want to choose your mac address from specific manufacturer? [(Y)es or (N)o] ").lower()
if random_organization == "y" or random_organization == "yes":
organizations = WordCompleter(list(oui.keys()), ignore_case=True)
organization = prompt("Please select an organization name: ", completer=organizations)
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
elif random_organization == "n" or random_organization == "no":
organization = choice(list(oui.keys()))
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
else:
print("Please choose your answer correctly!")
quit()
else:
print("Please check your answer!")
quit()
# Saving old mac addresses | delete text files in reverse mode
if random_or_not == "r" or random_or_not == "reverse":
delete = prompt("Do you want to delete files related to your old mac address? [(Y)es or (N)o] ").lower()
if delete == "y" or delete =="yes":
call("rm /tmp/eth-old-mac.txt /tmp/wlan-old-mac.txt", shell=True)
elif delete == "n" or delete =="no":
pass
else:
print("Please check your answer! What do you want to do with old mac address text files?!")
quit()
else:
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '1p' > /tmp/eth-old-mac.txt", shell=True)
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '2p' > /tmp/wlan-old-mac.txt", shell=True)
# Checking kernel version to call different commands
kernel_version = popen("uname -r").read()
if float(".".join(kernel_version.split(".")[:2])) < 4.15:
# Start changing mac address for kernel versions lower than 4.15
call("ifconfig {0} down".format(interface), shell=True)
call("ifconfig {0} hw ether {1}".format(interface, mac), shell=True)
call("ifconfig {0} up".format(interface), shell=True)
else:
# Start changing mac address for kernel versions higher than 4.15
call("ip link set {0} down".format(interface), shell=True)
call("ip link set {0} address {1}".format(interface, mac), shell=True)
call("ip link set {0} up".format(interface), shell=True)
print("Done :)")
| 46.398601
| 289
| 0.64009
| 980
| 6,635
| 4.239796
| 0.232653
| 0.038508
| 0.050542
| 0.015644
| 0.51432
| 0.454874
| 0.408905
| 0.304452
| 0.304452
| 0.280626
| 0
| 0.018692
| 0.209797
| 6,635
| 142
| 290
| 46.725352
| 0.773794
| 0.091937
| 0
| 0.342593
| 0
| 0.055556
| 0.273963
| 0.016004
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0.009259
| 0.074074
| 0
| 0.111111
| 0.101852
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d194460e4175a7a303de85ee742fccf7806780cb
| 3,029
|
py
|
Python
|
scripts/check_status.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | null | null | null |
scripts/check_status.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | 21
|
2019-09-16T08:08:17.000Z
|
2020-05-27T06:49:34.000Z
|
scripts/check_status.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | 1
|
2019-10-16T11:23:38.000Z
|
2019-10-16T11:23:38.000Z
|
import datetime
import json
import os
import requests
import smtplib
import ssl
def check_status(config):
new_state = get_current_state()
last_known_state = get_last_known_state()
activated = get_activated(new_state, last_known_state)
deactivated = get_deactivated(new_state, last_known_state)
save_state(new_state)
if len(activated) == 0 and len(deactivated) == 0:
print("No change in the state, will not send any email.")
return
send_email(config, create_msg(activated, deactivated))
def send_email(config, msg):
context = ssl.create_default_context()
with smtplib.SMTP_SSL(config["host"], config["port"], context=context) as server:
server.login(config["sender_email"], config["password"])
server.sendmail(config["sender_email"], config["recipients"], msg)
server.quit()
print("Email sent.")
def create_msg(activated_sensors, deactivated_sensors):
msg = ["Subject: Sensors have changed state", ""]
if len(deactivated_sensors) > 0:
msg.append(
"I am sorry to inform you that one or more sensors might not be"
" active anymore. I have failed to receive status from:"
)
[msg.append("* " + sensor) for sensor in deactivated_sensors]
msg.append("")
if len(activated_sensors) > 0:
msg.append("Some sensors have been activated again:")
[msg.append("* " + sensor) for sensor in activated_sensors]
msg.append("")
msg.append("This message was generated {}".format(datetime.datetime.utcnow()))
msg.append("")
msg.append("Yours sincerely,")
msg.append("Walter")
return "\r\n".join(msg)
def get_activated(new_state, old_state):
result = []
for sensor, value in new_state.items():
if value is True and sensor in old_state and old_state[sensor] is False:
result.append(sensor)
return result
def get_deactivated(new_state, old_state):
result = []
for sensor, value in new_state.items():
if value is False and sensor in old_state and old_state[sensor] is True:
result.append(sensor)
return result
def get_last_known_state():
state_file_path = os.path.join("scripts", "check_status_state.json")
if not os.path.exists(state_file_path):
return {}
with open(state_file_path, "r") as f:
data = f.read()
if data == "":
return {}
return json.loads(data)
def save_state(state):
state_file_path = os.path.join("scripts", "check_status_state.json")
with open(state_file_path, "w+") as f:
json.dump(state, f, ensure_ascii=False, indent=4)
def get_current_state():
return {sensor["name"]: sensor["is_active"] for sensor in get_sensors()}
def get_sensors():
return requests.get("http://localhost:5000/api/sensors").json()
if __name__ == "__main__":
with open(os.path.join("scripts", "check_status_config.json"), "r") as f:
data = f.read()
config = json.loads(data)
check_status(config)
| 30.908163
| 85
| 0.665896
| 415
| 3,029
| 4.66506
| 0.286747
| 0.046488
| 0.036157
| 0.029442
| 0.294421
| 0.25
| 0.195248
| 0.158058
| 0.158058
| 0.158058
| 0
| 0.003775
| 0.212942
| 3,029
| 97
| 86
| 31.226804
| 0.808305
| 0
| 0
| 0.226667
| 0
| 0
| 0.167382
| 0.02311
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.013333
| 0.08
| 0.026667
| 0.32
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d19579492873b16f25e4b138c45496b98a9c1bd3
| 5,340
|
py
|
Python
|
mngSettings.py
|
guidanoli/fibonaccibot
|
fead3a151835648f7140945b94afdd0f32aa55ce
|
[
"MIT"
] | null | null | null |
mngSettings.py
|
guidanoli/fibonaccibot
|
fead3a151835648f7140945b94afdd0f32aa55ce
|
[
"MIT"
] | null | null | null |
mngSettings.py
|
guidanoli/fibonaccibot
|
fead3a151835648f7140945b94afdd0f32aa55ce
|
[
"MIT"
] | null | null | null |
# Settings Manager
# guidanoli
DEFAULT_STGS = {
"commas": "true",
"comments": "true",
"tknlistpath": "tknlist.tk",
"tokenpath": "token.tk"
}
SETTINGS_PATH = "fibonacci.cfg"
TYPE_STR = type("")
TYPE_LIST = type([])
def _validateFilename( filename , extension = "" ):
from re import match
return match("[^<>:\"\\/|?*]+"+extension,filename).groups() != None
def _validateEdit( label , new_value ):
if label == "commas":
return new_value in ["true","false"]
elif label == "comment":
return new_value in ["true", "false"]
elif label == "tknlistpath":
return _validateFilename(new_value,".tk")
elif label == "tokenpath":
return _validateFilename(new_value,".tk")
def _validateString( s ):
# returns True if OK, False if invalid
assert(type(s)==TYPE_STR)
return not( True in [ (c in ['=','\n']) for c in s ] )
def _writeSettings( settings_list ):
assert(type(settings_list)==TYPE_LIST)
try:
f = open(SETTINGS_PATH,"w")
f.write( "\n".join([ "=".join(s) for s in settings_list ]) )
f.close()
except IOError:
print("Could not write cfg file.")
return False
return True
def _getSettingsList():
# returns settings as
# <dict> "ok":boolean
# if ok == True , TYPE_LIST:list
try:
f = open(SETTINGS_PATH,"r")
l = [ p.strip().split('=') for p in f ]
f.close()
except FileNotFoundError:
print("Could not find cfg file. Creating default cfg file...")
if _generateSettingsFile():
print("The default cfg file was created successfully. Re-run me.")
return None
except IOError:
print("Could not read cfg file.")
return None
return l
def _generateSettingsFile():
# generates cfg file according to default settings
# returns True if successful and False if error occurred on I/O
return _writeSettings([ [k,v] for k,v in DEFAULT_STGS.items() ])
def _validateSettingFormat( s ):
if type(s) != TYPE_LIST:
print("Setting isn't table.")
return False
if len(s) != 2:
print("Setting table size is wrong.")
return False
if True in [ type(x) != TYPE_STR for x in s]:
print("Settings variables aren't string.")
return False
if False in [ _validateString(x) for x in s]:
print("Settings variables are invalid.")
return False
return True
def _getSettingLabel( s ):
assert(_validateSettingFormat(s))
return s[0]
def _getSettingValue( s ):
assert(_validateSettingFormat(s))
return s[1]
def _formatSetting( label, new_value ):
return [label,new_value]
def _getSettingValueFromLabel( settings_list , label ):
assert(type(settings_list)==TYPE_LIST)
assert(type(label)==TYPE_STR)
for s in settings_list:
if _getSettingLabel(s) == label:
return _getSettingValue(s)
return None
def _printSettings( settings_list ):
assert(type(settings_list)==TYPE_LIST)
print("{:<20}{:<20}".format("Label","Value"))
print("-"*40)
for s in settings_list:
if not _validateSettingFormat(s):
return
print("{:<20}{:<20}".format(_getSettingLabel(s),_getSettingValue(s)))
if len(settings_list) == 0:
print("No settings found.")
def _editSetting( settings_list , label , new_value ):
# saves the new value in the cfg file
assert(type(settings_list)==TYPE_LIST)
assert(type(label)==TYPE_STR)
assert(type(new_value)==TYPE_STR)
if len(new_value) == 0 or not _validateString(new_value):
print("\nInvalid string for new value.")
return False
lbl_list = [ _getSettingLabel(s) for s in settings_list ]
if not label in lbl_list:
print("\nUnexpected error occurred. Label not in list.")
return False
if not _validateEdit(label,new_value):
print("\nNew value does not meet label requirementes. Check README.")
return False
idx = lbl_list.index(label)
settings_list[idx] = _formatSetting(label,new_value)
return _writeSettings(settings_list)
def getSetting( label ):
# returns setting value through label
# returns None if error occurrs
assert(type(label)==TYPE_STR)
slist = _getSettingsList()
if slist == None:
return None
return _getSettingValueFromLabel(slist,label)
def launch( cmd ):
assert(type(cmd)==TYPE_STR)
if cmd == 'sd':
#resets settings to default
if _generateSettingsFile():
print("Settings were set to default.")
elif cmd in ['se','sv']:
#print settings list
slist = _getSettingsList()
if slist == None:
print("Could not print settings list.\n")
return
_printSettings(slist)
if cmd == 'se':
print()
lbl = input("Label: ")
curr_value = _getSettingValueFromLabel(slist,lbl)
if curr_value == None:
print("Label not recognized.\n")
return
print("Current value for '"+lbl+"': "+curr_value)
new_value = input("Setting new value: ")
if _editSetting(slist,lbl,new_value):
print("New value set successfully.")
else:
print("Command '"+cmd+"' not recognized.")
print()
| 31.597633
| 78
| 0.618914
| 649
| 5,340
| 4.949153
| 0.237288
| 0.047323
| 0.024284
| 0.027397
| 0.257783
| 0.160648
| 0.11208
| 0.079701
| 0.032379
| 0.032379
| 0
| 0.003797
| 0.260112
| 5,340
| 168
| 79
| 31.785714
| 0.809162
| 0.073408
| 0
| 0.335766
| 0
| 0
| 0.162173
| 0
| 0
| 0
| 0
| 0
| 0.087591
| 1
| 0.109489
| false
| 0
| 0.007299
| 0.014599
| 0.343066
| 0.182482
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d19832a8ebc406b607f9daf11bbd5483f8a533f1
| 632
|
py
|
Python
|
core/commands/public/staff.py
|
Smashulica/nebula8
|
010df165e3cc61e0154d20310fa972482ec0e7be
|
[
"Apache-2.0"
] | null | null | null |
core/commands/public/staff.py
|
Smashulica/nebula8
|
010df165e3cc61e0154d20310fa972482ec0e7be
|
[
"Apache-2.0"
] | null | null | null |
core/commands/public/staff.py
|
Smashulica/nebula8
|
010df165e3cc61e0154d20310fa972482ec0e7be
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
from core import decorators
from telegram.utils.helpers import mention_markdown
@decorators.public.init
@decorators.delete.init
def init(update,context):
bot = context.bot
administrators = update.effective_chat.get_administrators()
chat = update.effective_chat.id
string = "Group Staff:\n"
for admin in administrators:
user = admin.user
user_first = user.first_name
string += "👮 {}\n".format(mention_markdown(user.id, user_first, version=2))
bot.send_message(chat,string,parse_mode='MarkdownV2')
| 31.6
| 85
| 0.705696
| 80
| 632
| 5.4625
| 0.6125
| 0.061785
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005792
| 0.18038
| 632
| 20
| 86
| 31.6
| 0.835907
| 0.107595
| 0
| 0
| 0
| 0
| 0.053381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d19cf55d1eed29f5e017627cc562825403fd7101
| 2,937
|
py
|
Python
|
方法二/無LM算capacity.py
|
jell0213/MUNIT_DataHiding
|
75cb80a7ee5175c0a2235336e230ce3759f5b296
|
[
"Unlicense"
] | null | null | null |
方法二/無LM算capacity.py
|
jell0213/MUNIT_DataHiding
|
75cb80a7ee5175c0a2235336e230ce3759f5b296
|
[
"Unlicense"
] | null | null | null |
方法二/無LM算capacity.py
|
jell0213/MUNIT_DataHiding
|
75cb80a7ee5175c0a2235336e230ce3759f5b296
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
#######################################################
'''
input
路徑
圖片數量
MOD值
嵌密率
處理內容
輸入一張圖片的資料,包含:
1.資料夾名稱
2.檔案名稱(圖片),單純用來記錄在xlsx檔案中
3.輸出路徑-xlsx
4.嵌密mod值
5.嵌密率
output
產生輸入圖片的xlsx檔(依序將所有圖片的資料寫入xlsx檔中)
包含執行時間
'''
#######################################################
from skimage import io
from openpyxl import Workbook
import openpyxl
import os
import math
import time
def cal_capacity(in_dir,
num_image,
num_mod,
embed_ratio):
wb = Workbook()
ws = wb.active
ws.append(["無LM","mod="+str(num_mod),str(embed_ratio)+"%","256*256"])
ws.append(["檔名","嵌密量","bpp"])
a=[] #儲存各項平均值
for i in range(2):
a.append(0)
for i in range(num_image):
f_code= open(in_dir+"/output{:08d}".format(i)+"/output{:08d}_code.txt".format(i),'r') #打開location map.txt來計算capacity
words = f_code.read()
num_words = len(words)
num_words*=math.log(num_mod,2) #capacity
bpp=num_words/(256*256) #嵌入率(%)(txt和png相同)
ws.append(["output{:08d}".format(i),
float('%.2f'%round(num_words,2)), #四捨五入到指定小數位
float('%.2f'%round(bpp,2))])
a[0]+=num_words
a[1]+=bpp
if i % 250 == 0 :
print(i)
for i in range(2):
a[i]/=num_image
ws.append(["檔名","嵌密量","bpp"])
ws.append([
"",
float('%.2f'%round(a[0],2)),
float('%.2f'%round(a[1],2)),
])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
#---------------------------------------------------------------------------設定區
in_dir="D:\\108resercher\\====######RESEARCH######====\\GAN-research\\12.8\\無LM嵌密結果\\100%MOD3"
num_image = 5000
num_mod = 3
embed_ratio= 100
#---------------------------------------------------------------------------設定區
tStart = time.time() #計時開始
cal_capacity(in_dir,num_image,num_mod,embed_ratio) #執行程式
tEnd = time.time() #計時結束
wb = openpyxl.load_workbook(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio))
ws = wb['Sheet']
ws.append(["total time",str(round(tEnd-tStart,2))+" s"])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
| 40.232877
| 138
| 0.405856
| 301
| 2,937
| 3.82392
| 0.355482
| 0.041703
| 0.041703
| 0.028671
| 0.27715
| 0.249348
| 0.226759
| 0.226759
| 0.226759
| 0.226759
| 0
| 0.033762
| 0.364658
| 2,937
| 72
| 139
| 40.791667
| 0.583065
| 0.172625
| 0
| 0.12
| 0
| 0
| 0.128696
| 0.073913
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.12
| 0
| 0.14
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d19f00e9b0507a59fe36f2031d52bc840d7e8792
| 2,592
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from ansible_collections.community.hashi_vault.tests.unit.compat import mock
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultOptionGroupBase,
HashiVaultOptionAdapter,
)
PREREAD_OPTIONS = {
'opt1': 'val1',
'opt2': None,
'opt3': 'val3',
'opt4': None,
# no opt5
'opt6': None,
}
LOW_PREF_DEF = {
'opt1': dict(env=['_ENV_1A'], default='never'),
'opt2': dict(env=['_ENV_2A', '_ENV_2B']),
'opt4': dict(env=['_ENV_4A', '_ENV_4B', '_ENV_4C']),
'opt5': dict(env=['_ENV_5A']),
'opt6': dict(env=['_ENV_6A'], default='mosdefault'),
}
@pytest.fixture
def preread_options():
return PREREAD_OPTIONS.copy()
@pytest.fixture
def adapter(preread_options):
return HashiVaultOptionAdapter.from_dict(preread_options)
@pytest.fixture
def option_group_base(adapter):
return HashiVaultOptionGroupBase(adapter)
@pytest.fixture(params=[
# first dict is used to patch the environment vars
# second dict is used to patch the current options to get them to the expected state
#
# envpatch, expatch
({}, {'opt6': 'mosdefault'}),
({'_ENV_1A': 'alt1a'}, {'opt6': 'mosdefault'}),
({'_ENV_3X': 'noop3x'}, {'opt6': 'mosdefault'}),
({'_ENV_2B': 'alt2b'}, {'opt2': 'alt2b', 'opt6': 'mosdefault'}),
({'_ENV_2A': 'alt2a', '_ENV_2B': 'alt2b'}, {'opt2': 'alt2a', 'opt6': 'mosdefault'}),
({'_ENV_4B': 'alt4b', '_ENV_6A': 'defnot', '_ENV_4C': 'alt4c'}, {'opt4': 'alt4b', 'opt6': 'defnot'}),
({'_ENV_1A': 'alt1a', '_ENV_4A': 'alt4a', '_ENV_1B': 'noop1b', '_ENV_4C': 'alt4c'}, {'opt4': 'alt4a', 'opt6': 'mosdefault'}),
({'_ENV_5A': 'noop5a', '_ENV_4C': 'alt4c', '_ENV_2A': 'alt2a'}, {'opt2': 'alt2a', 'opt4': 'alt4c', 'opt6': 'mosdefault'}),
])
def with_env(request, preread_options):
envpatch, expatch = request.param
expected = preread_options.copy()
expected.update(expatch)
with mock.patch.dict(os.environ, envpatch):
yield expected
class TestHashiVaultOptionGroupBase(object):
def test_process_late_binding_env_vars(self, option_group_base, with_env, preread_options):
option_group_base.process_late_binding_env_vars(LOW_PREF_DEF)
assert preread_options == with_env, "Expected: %r\nGot: %r" % (with_env, preread_options)
| 31.609756
| 129
| 0.669367
| 321
| 2,592
| 5.099688
| 0.411215
| 0.085522
| 0.062309
| 0.037874
| 0.10507
| 0.074527
| 0
| 0
| 0
| 0
| 0
| 0.03604
| 0.154321
| 2,592
| 81
| 130
| 32
| 0.710766
| 0.121914
| 0
| 0.057692
| 0
| 0
| 0.21438
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0.096154
| false
| 0
| 0.096154
| 0.057692
| 0.269231
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d19f23d69e8c497f2703e0ce9519ea14add2903f
| 2,695
|
py
|
Python
|
app/client.py
|
akakou/privacy-enhanced-antivirus
|
4cd32b27374016dd489eb13ac196c2c044912933
|
[
"MIT"
] | null | null | null |
app/client.py
|
akakou/privacy-enhanced-antivirus
|
4cd32b27374016dd489eb13ac196c2c044912933
|
[
"MIT"
] | null | null | null |
app/client.py
|
akakou/privacy-enhanced-antivirus
|
4cd32b27374016dd489eb13ac196c2c044912933
|
[
"MIT"
] | null | null | null |
from kivy.lang import Builder
import array
import scipy
import os
import syft as sy
import tensorflow as tf
import numpy
import time
import scipy
import sys
from dataset import get_dataset
from cluster import get_cluster
from PIL import Image
import leargist
from skimage import transform
from imageio import imsave
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.uix.label import Label
# from kivy.uix.label import Label
from kivy.uix.screenmanager import CardTransition
THRESHOLD = 0
MAX_PATH_SIZE = 22
def read_file(filepath):
with open(filepath, 'rb') as f:
ln = os.path.getsize(filepath)
width = 256
rem = ln % width
a = array.array("B")
a.fromfile(f, ln-rem)
g = numpy.reshape(a, (int(len(a) / width), width))
g = numpy.uint8(g)
print(g)
imsave('/tmp/tmp.png', g)
pilimg = Image.open('/tmp/tmp.png')
img_resized = pilimg.resize((64, 64))
desc = leargist.color_gist(img_resized)
data = desc[0:1024]
data = numpy.resize(data, 1024)
data = data.reshape(32, 32, 1)
return data
def run(filepath):
hook = sy.KerasHook(tf.keras)
client = sy.TFEWorker()
cluster = get_cluster()
client.connect_to_model((1, 32, 32, 1), ((1, 25)), cluster)
_, test_X, _, test_Y = get_dataset()
# time.sleep(5)
data = read_file(filepath)
result = client.query_model(numpy.array([data]))
result = numpy.mean(result)
print("result:", result)
return result > THRESHOLD
class MainScreen(Screen):
pass
class SubScreen(Screen):
def __init__(self, title, img, **kwargs):
self.img = img
self.title = title
super(SubScreen, self).__init__(**kwargs)
class AntivirusApp(App):
def build(self):
self.main = MainScreen(name='main')
self.sm = ScreenManager()
self.sm.switch_to(self.main)
# self.sm.add_widget()
# self.sm.add_widget()
Window.bind(on_dropfile=self._on_file_drop)
return self.sm
def _on_file_drop(self, window, file_path):
result = run(file_path)
file_path = file_path.decode()
if len(file_path) > MAX_PATH_SIZE:
file_path = file_path[:MAX_PATH_SIZE] + "..."
if result:
title = f"Danger! \"{file_path}\" is malware :("
img = "malware"
else:
title = f"Safe! \"{file_path}\" is not malware :)"
img = "doc2"
self.sub = SubScreen(title, f"assets/img/{img}.png", name='sub')
self.sm.switch_to(self.sub)
# Builder.load_file('assets/main.kv')
AntivirusApp().run()
| 21.910569
| 72
| 0.638219
| 373
| 2,695
| 4.474531
| 0.345845
| 0.04314
| 0.026363
| 0.02876
| 0.112642
| 0.038945
| 0.038945
| 0.038945
| 0.038945
| 0
| 0
| 0.017604
| 0.241187
| 2,695
| 122
| 73
| 22.090164
| 0.798533
| 0.046011
| 0
| 0.025
| 0
| 0
| 0.047953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.0125
| 0.2625
| 0
| 0.4
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1a1b2cdca7fb838822d102ce1eb3031bc813ef4
| 5,910
|
py
|
Python
|
network/vgg16.py
|
CamilaAlvarez/tensorflow-demo
|
57f576bafe97054046610ded7a9ce39caa7e84b4
|
[
"MIT"
] | null | null | null |
network/vgg16.py
|
CamilaAlvarez/tensorflow-demo
|
57f576bafe97054046610ded7a9ce39caa7e84b4
|
[
"MIT"
] | null | null | null |
network/vgg16.py
|
CamilaAlvarez/tensorflow-demo
|
57f576bafe97054046610ded7a9ce39caa7e84b4
|
[
"MIT"
] | null | null | null |
from network.network import Network
import tensorflow as tf
import numpy as np
class VGG16(Network):
def __init__(self, input_shape, class_number, x, y, train=False, learning_rate=0.001):
super().__init__()
self.loss = None
self.accuracy = None
self._build_network(input_shape, class_number, train, learning_rate, x, y)
def _build_network(self, network_input_shape, class_number, train, starter_learning_rate, x, y):
self.x = x
if train:
self.keep_prob = 0.5
self.y_ = y
self.y = tf.one_hot(self.y_, class_number, 1.0, 0.0)
self.conv1_1 = self.conv_layer('conv1_1', layer_input=self.x, shape=[3, 3, self.x.get_shape()[3].value,
64])
self.conv1_2 = self.conv_layer('conv1_2', layer_input=self.conv1_1, shape=[3, 3, 64, 64])
self.max_pool1 = self.max_pool(self.conv1_2)
self.conv2_1 = self.conv_layer('conv2_1', layer_input=self.max_pool1, shape=[3, 3, 64, 128])
self.conv2_2 = self.conv_layer('conv2_2', layer_input=self.conv2_1, shape=[3, 3, 128, 128])
self.max_pool2 = self.max_pool(self.conv2_2)
self.conv3_1 = self.conv_layer('conv3_1', layer_input=self.max_pool2, shape=[3, 3, 128, 256])
self.conv3_2 = self.conv_layer('conv3_2', layer_input=self.conv3_1, shape=[3, 3, 256, 256])
self.conv3_3 = self.conv_layer('conv3_3', layer_input=self.conv3_2, shape=[3, 3, 256, 256])
self.max_pool3 = self.max_pool(self.conv3_3)
self.conv4_1 = self.conv_layer('conv4_1', layer_input=self.max_pool3, shape=[3, 3, 256, 512])
self.conv4_2 = self.conv_layer('conv4_2', layer_input=self.conv4_1, shape=[3, 3, 512, 512])
self.conv4_3 = self.conv_layer('conv4_3', layer_input=self.conv4_2, shape=[3, 3, 512, 512])
self.max_pool4 = self.max_pool(self.conv4_3)
self.conv5_1 = self.conv_layer('conv5_1', layer_input=self.max_pool4, shape=[3, 3, 512, 512])
self.conv5_2 = self.conv_layer('conv5_2', layer_input=self.conv5_1, shape=[3, 3, 512, 512])
self.conv5_3 = self.conv_layer('conv5_3', layer_input=self.conv5_2, shape=[3, 3, 512, 512])
self.max_pool5 = self.max_pool(self.conv5_3)
self.flat_max_pool5 = tf.reshape(self.max_pool5, shape=[-1, 7*7*512])
self.fc6 = self.fully_connected('fc6', self.flat_max_pool5, 4096)
self.fc6 = tf.nn.relu(self.fc6)
self.fc6 = tf.nn.dropout(self.fc6, keep_prob=self.keep_prob)
self.fc7 = self.fully_connected('fc7', self.fc6, 4096)
self.fc7 = tf.nn.relu(self.fc7)
self.fc7 = tf.nn.dropout(self.fc7, keep_prob=self.keep_prob)
self.fc8 = self.fully_connected('fc8', self.fc7, class_number)
if train:
self.global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,
decay_steps=100000, decay_rate=0.1, staircase=True)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.fc8))
self.train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
correct_prediction = tf.equal(tf.argmax(self.fc8,1), tf.argmax(self.y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def train(self, session):
if self.loss is None:
raise RuntimeError('Training a testing network!!')
_, loss_value, accuracy_value = session.run([self.train_step, self.loss, self.accuracy])
print('Loss {:.2f} Accuracy {:.2f}'.format(loss_value, accuracy_value))
def test(self, session, batch, labels):
if self.accuracy is None:
raise RuntimeError('Cannot compute accuracy!!')
accuracy = np.mean([session.run(self.accuracy, feed_dict={self.x: [batch[i]],
self.y_: [labels[i]],
self.keep_prob: 1.0})
for i in range(len(batch))])
print('Accuracy: {:.2f}'.format(accuracy))
def _restore_state(self, session):
self.conv1_1 = self._restore_conv(session, 'conv1_1', layer_input=self.x)
self.conv1_2 = self._restore_conv(session, 'conv1_2', layer_input=self.conv1_1)
self.conv2_1 = self._restore_conv(session, 'conv2_1', layer_input=self.max_pool1)
self.conv2_2 = self._restore_conv(session, 'conv2_2', layer_input=self.conv2_1)
self.conv3_1 = self._restore_conv(session, 'conv3_1', layer_input=self.max_pool2)
self.conv3_2 = self._restore_conv(session, 'conv3_2', layer_input=self.conv3_1)
self.conv3_3 = self._restore_conv(session, 'conv3_3', layer_input=self.conv3_2)
self.conv4_1 = self._restore_conv(session, 'conv4_1', layer_input=self.max_pool3)
self.conv4_2 = self._restore_conv(session, 'conv4_2', layer_input=self.conv4_1)
self.conv4_3 = self._restore_conv(session, 'conv4_3', layer_input=self.conv4_2)
self.conv5_1 = self._restore_conv(session, 'conv5_1', layer_input=self.max_pool4)
self.conv5_2 = self._restore_conv(session, 'conv5_2', layer_input=self.conv5_1)
self.conv5_3 = self._restore_conv(session, 'conv5_3', layer_input=self.conv5_2)
self.fc6 = self._restore_fully_connected(session, 'fc6', self.flat_max_pool5)
self.fc6 = tf.nn.relu(self.fc6)
self.fc6 = tf.nn.dropout(self.fc6, keep_prob=self.keep_prob)
self.fc7 = self._restore_fully_connected(session,'fc7', self.fc6)
self.fc7 = tf.nn.relu(self.fc7)
self.fc7 = tf.nn.dropout(self.fc7, keep_prob=self.keep_prob)
self.fc8 = self._restore_fully_connected(session,'fc8', self.fc7)
| 57.941176
| 111
| 0.644501
| 885
| 5,910
| 4.030508
| 0.136723
| 0.07289
| 0.102047
| 0.080179
| 0.454163
| 0.335015
| 0.295206
| 0.099243
| 0.087468
| 0.087468
| 0
| 0.073856
| 0.22335
| 5,910
| 101
| 112
| 58.514851
| 0.703268
| 0
| 0
| 0.119048
| 0
| 0
| 0.05011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.035714
| 0
| 0.107143
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1a66059d6aa2f43be85ef5e0f0969dc1f348e3f
| 4,299
|
py
|
Python
|
preprocessing/gen_clustering.py
|
HaowenWeiJohn/CV_Project
|
8e2414796f60a8c3fe452f3721e4a6ef7edfdb11
|
[
"MIT"
] | null | null | null |
preprocessing/gen_clustering.py
|
HaowenWeiJohn/CV_Project
|
8e2414796f60a8c3fe452f3721e4a6ef7edfdb11
|
[
"MIT"
] | null | null | null |
preprocessing/gen_clustering.py
|
HaowenWeiJohn/CV_Project
|
8e2414796f60a8c3fe452f3721e4a6ef7edfdb11
|
[
"MIT"
] | null | null | null |
import yaml
import os
import sys
import yaml
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils import load_poses, load_calib, load_files, load_vertex
from preprocessing.utils import *
from example.laserscan import *
from PC_cluster.ScanLineRun_cluster.build import ScanLineRun_Cluster
# data_path = '../data/sequences/08/velodyne/000030.bin'
# label_path = '../data/sequences/08/labels/000030.label'
CFG = yaml.safe_load(open('../config/semantic-kitti-mos.yaml', 'r'))
config_filename = '../config/mask_preparing.yaml'
if len(sys.argv) > 1:
config_filename = sys.argv[1]
if yaml.__version__ >= '5.1':
config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
else:
config = yaml.load(open(config_filename))
# ground truth info
color_dict = CFG["color_map"]
label_transfer_dict = CFG["learning_map"]
nclasses = len(color_dict)
# mask config
data_folder = config['data_folder']
debug = config['debug']
visualize = config['visualize']
range_image_params = config['range_image']
sequences = config['sequences']
sem_scan = LaserScan(project=True,
flip_sign=False,
H=range_image_params['height'],
W=range_image_params['width'],
fov_up=range_image_params['fov_up'],
fov_down=range_image_params['fov_down'])
cluster=ScanLineRun_Cluster.ScanLineRun_Cluster(0.5, 1)
# create mask folder
for sequence in sequences:
sequence_folder = os.path.join(data_folder, sequence)
visualization_folder = config['visualization_folder']
scan_folder = config['scan_folder']
label_folder = config['label_folder']
mask_image_folder = config['mask_image_folder']
visualization_folder = os.path.join(sequence_folder, visualization_folder)
scan_folder = os.path.join(sequence_folder, scan_folder)
label_folder = os.path.join(sequence_folder, label_folder)
mask_image_folder = os.path.join(sequence_folder, mask_image_folder)
# if not os.path.exists(mask_image_folder):
# os.makedirs(mask_image_folder)
#
# # create mask image visualization folder
# if visualize:
# if not os.path.exists(visualization_folder):
# os.makedirs(visualization_folder)
# load labels
scan_paths = load_files(scan_folder)
# label_paths = load_files(label_folder)
# create scan object
# index_range = list(range(0,len(scan_paths)))
print('Clustering:', sequence, 'Frames: ', str(len(scan_paths)))
for frame_idx in tqdm(range(len(scan_paths))):
cluster_file_name = os.path.join(mask_image_folder, str(frame_idx).zfill(6))
sem_scan.open_scan(scan_paths[frame_idx])
# x_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# y_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# z_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
instance_label = cluster.ScanLineRun_cluster(sem_scan.proj_xyz[:,:,0],
sem_scan.proj_xyz[:,:,1],
sem_scan.proj_xyz[:,:,2],
sem_scan.proj_mask,
range_image_params['height'],
range_image_params['width']
)
instance_label = np.array(instance_label)
# ground removal
# clustering
# if visualize:
# fig = plt.figure(frameon=False, figsize=(16, 10))
# fig.set_size_inches(20.48, 0.64)
# ax = plt.Axes(fig, [0., 0., 1., 1.])
# ax.set_axis_off()
# fig.add_axes(ax)
# img = label_new.copy()
# img[img<2]=0
# ax.imshow(img, vmin=0, vmax=1)
# image_name = os.path.join(visualization_folder, str(frame_idx).zfill(6))
# plt.savefig(image_name)
# plt.close()
#
# # save to npy file
# label_new_one_hot = depth_onehot(matrix=label_new, category=[0, 1, 2], on_value=1, off_value=0, channel_first=True)
#
# np.save(mask_file_name, [label_new, label_new_one_hot, sem_scan.proj_idx])
| 34.119048
| 125
| 0.623168
| 545
| 4,299
| 4.631193
| 0.282569
| 0.036054
| 0.04794
| 0.033281
| 0.172345
| 0.14065
| 0.049525
| 0.049525
| 0.039223
| 0.039223
| 0
| 0.017666
| 0.262619
| 4,299
| 125
| 126
| 34.392
| 0.778549
| 0.296813
| 0
| 0.035088
| 0
| 0
| 0.082969
| 0.020826
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.192982
| 0
| 0.192982
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1ac4c4b62c7a69033fe73553cd10cf79ee11495
| 638
|
py
|
Python
|
MyThread.py
|
hectorpadin1/Computer-Vision-Algorithms
|
4ef66353f2453ec1be764787e23260f6ef402e0f
|
[
"MIT"
] | null | null | null |
MyThread.py
|
hectorpadin1/Computer-Vision-Algorithms
|
4ef66353f2453ec1be764787e23260f6ef402e0f
|
[
"MIT"
] | null | null | null |
MyThread.py
|
hectorpadin1/Computer-Vision-Algorithms
|
4ef66353f2453ec1be764787e23260f6ef402e0f
|
[
"MIT"
] | null | null | null |
import threading
import sys
class ReturnValueThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.result = None
def run(self):
if self._target is None:
return # could alternatively raise an exception, depends on the use case
try:
self.result = self._target(*self._args, **self._kwargs)
except Exception as exc:
print(f'{type(exc).__name__}: {exc}', file=sys.stderr) # properly handle the exception
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self.result
| 31.9
| 99
| 0.617555
| 77
| 638
| 4.909091
| 0.545455
| 0.10582
| 0.074074
| 0.100529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261755
| 638
| 20
| 100
| 31.9
| 0.802548
| 0.145768
| 0
| 0
| 0
| 0
| 0.049724
| 0.038674
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.125
| 0
| 0.5
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1ae965b8719af361c251c7b3021070130bbaa7e
| 5,653
|
py
|
Python
|
LDA/lda.py
|
wimpykid26/Evolutionary-Classification
|
0a78cbebc252c0a13703aee20dac9fa234f07b08
|
[
"Apache-2.0"
] | 3
|
2019-11-10T08:51:11.000Z
|
2020-08-05T14:23:27.000Z
|
LDA/lda.py
|
wimpykid26/Evolutionary-Classification
|
0a78cbebc252c0a13703aee20dac9fa234f07b08
|
[
"Apache-2.0"
] | null | null | null |
LDA/lda.py
|
wimpykid26/Evolutionary-Classification
|
0a78cbebc252c0a13703aee20dac9fa234f07b08
|
[
"Apache-2.0"
] | 2
|
2017-12-12T13:35:41.000Z
|
2017-12-28T10:00:56.000Z
|
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import math
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
feature_dict = {i:label for i,label in zip(
range(4),
('sepal length in cm',
'sepal width in cm',
'petal length in cm',
'petal width in cm', ))}
df = pd.io.parsers.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None,
sep=','
)
print (feature_dict.items())
df.columns = [l for i,l in sorted(feature_dict.items())] + ['class label']
df.dropna(how="all", inplace=True) # to drop the empty line at file-end
df.tail()
X = df[['sepal length in cm','sepal width in cm', 'petal length in cm', 'petal width in cm']].values
y = df['class label'].values
enc = LabelEncoder()
label_encoder = enc.fit(y)
y = label_encoder.transform(y) + 1
label_dict = {1: 'Setosa', 2: 'Versicolor', 3:'Virginica'}
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12,6))
for ax,cnt in zip(axes.ravel(), range(4)):
# set bin sizes
min_b = math.floor(np.min(X[:,cnt]))
max_b = math.ceil(np.max(X[:,cnt]))
bins = np.linspace(min_b, max_b, 25)
# plottling the histograms
for lab,col in zip(range(1,4), ('blue', 'red', 'green')):
ax.hist(X[y==lab, cnt],
color=col,
label='class %s' %label_dict[lab],
bins=bins,
alpha=0.5,)
ylims = ax.get_ylim()
# plot annotation
leg = ax.legend(loc='upper right', fancybox=True, fontsize=8)
leg.get_frame().set_alpha(0.5)
ax.set_ylim([0, max(ylims)+2])
ax.set_xlabel(feature_dict[cnt])
ax.set_title('Iris histogram #%s' %str(cnt+1))
# hide axis ticks
ax.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
axes[0][0].set_ylabel('count')
axes[1][0].set_ylabel('count')
fig.tight_layout()
plt.show()
np.set_printoptions(precision=4)
mean_vectors = []
for cl in range(1,4):
mean_vectors.append(np.mean(X[y==cl], axis=0))
print('Mean Vector class %s: %s\n' %(cl, mean_vectors[cl-1]))
S_W = np.zeros((4,4))
for cl,mv in zip(range(1,4), mean_vectors):
class_sc_mat = np.zeros((4,4)) # scatter matrix for every class
for row in X[y == cl]:
row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors
class_sc_mat += (row-mv).dot((row-mv).T)
S_W += class_sc_mat # sum class scatter matrices
print('within-class Scatter Matrix:\n', S_W)
overall_mean = np.mean(X, axis=0)
S_B = np.zeros((4,4))
for i,mean_vec in enumerate(mean_vectors):
n = X[y==i+1,:].shape[0]
mean_vec = mean_vec.reshape(4,1) # make column vector
overall_mean = overall_mean.reshape(4,1) # make column vector
S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T)
print('between-class Scatter Matrix:\n', S_B)
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
for i in range(len(eig_vals)):
eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('\nEigenvector {}: \n{}'.format(i+1, eigvec_sc.real))
print('Eigenvalue {:}: {:.2e}'.format(i+1, eig_vals[i].real))
for i in range(len(eig_vals)):
eigv = eig_vecs[:,i].reshape(4,1)
np.testing.assert_array_almost_equal(np.linalg.inv(S_W).dot(S_B).dot(eigv),
eig_vals[i] * eigv,
decimal=6, err_msg='', verbose=True)
print('ok')
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in decreasing order:\n')
for i in eig_pairs:
print(i[0])
print('Variance explained:\n')
eigv_sum = sum(eig_vals)
for i,j in enumerate(eig_pairs):
print('eigenvalue {0:}: {1:.2%}'.format(i+1, (j[0]/eigv_sum).real))
W = np.hstack((eig_pairs[0][1].reshape(4,1), eig_pairs[1][1].reshape(4,1)))
print('Matrix W:\n', W.real)
X_lda = X.dot(W)
assert X_lda.shape == (150,2), "The matrix is not 150x2 dimensional."
def plot_step_lda():
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X_lda[:,0].real[y == label],
y=X_lda[:,1].real[y == label],
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label]
)
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title('LDA: Iris projection onto the first 2 linear discriminants')
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
plot_step_lda()
| 30.722826
| 101
| 0.613833
| 894
| 5,653
| 3.763982
| 0.288591
| 0.00951
| 0.035661
| 0.030312
| 0.305201
| 0.261218
| 0.199406
| 0.168202
| 0.157504
| 0.157504
| 0
| 0.0217
| 0.217407
| 5,653
| 183
| 102
| 30.89071
| 0.738924
| 0.081196
| 0
| 0.144
| 0
| 0.008
| 0.145837
| 0
| 0
| 0
| 0
| 0
| 0.016
| 1
| 0.008
| false
| 0
| 0.048
| 0
| 0.056
| 0.104
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1b01bba827b8c38a0f0739fb791912ffc9c1b74
| 29,968
|
py
|
Python
|
gentex/texmeas.py
|
NPann/GenTex
|
8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8
|
[
"BSD-3-Clause"
] | 3
|
2019-04-26T00:48:01.000Z
|
2020-07-06T19:10:17.000Z
|
gentex/texmeas.py
|
NPann/GenTex
|
8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8
|
[
"BSD-3-Clause"
] | null | null | null |
gentex/texmeas.py
|
NPann/GenTex
|
8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8
|
[
"BSD-3-Clause"
] | 2
|
2019-01-10T18:38:05.000Z
|
2021-05-19T16:54:01.000Z
|
""" gentex.texmeas package
"""
import numpy as np
class Texmeas:
"""Class texmeas for generating texture measures from co-occurrence matrix
Parameters
----------
comat: ndarray
Non-normalized co-occurrence matrix - chi-squared conditional distribution
comparisons require the actual number of counts so don't normalize this before
sending in
measure: string
Texture measure (default = 'Statistical Complexity'). Choice of:
* 'CM Entropy'
* 'EM Entropy'
* 'Statistical Complexity'
* 'Energy Uniformity'
* 'Maximum Probability'
* 'Contrast'
* 'Inverse Difference Moment'
* 'Correlation'
* 'Probability of Run Length'
* 'Epsilon Machine Run Length'
* 'Run Length Asymmetry'
* 'Homogeneity'
* 'Cluster Tendency'
* 'Multifractal Spectrum Energy Range'
* 'Multifractal Spectrum Entropy Range'
coordmo: int
Moment of coordinate differences in co-occurrence matrix
needed for calculating 'Contrast' and 'Inverse Difference Moment' (default=0)
probmom: int
Moment of individual cooccurence probabilities
needed for calculating 'Contrast' and 'Inverse Difference Moment' (default=0)
rllen: int
Length of run length used for generating probability
of a run length (the higher this probability the
larger the constant patches on the scale used for generating
the co-occurence matrix) or the epsilon machine run length (default=0)
clusmom: int
Moment used for generating cooccurence cluster tendency (default=0)
samelev: bool
Whether to treat the rows and columns in the cooccurence
matrix as identical 'states' (the methods are very general
so this needn't be the case, e.g. different template shapes
from different images with different quantization levels
could be used to generate the cooccurence matrix which could
be of arbitrary shape)
default = True assumes the cooccurrence matrix is square
and the rows and columns correspond to the same 'state'
betas: array
An array of 3 values, the lower limit, the upper limit and
the number of steps to use as the 'inverse temperature' range
for estimating the multifractal spectrum from an epsilon machine
- getting the range right for an 'arbitrary' epsilon machine is
tricky and is expected to be reset over a number of trials before
getting a full spectrum estimate. For details on the rationale
and algorithm see:
K. Young and J. P. Crutchfield, 'Fluctuation Spectroscopy',
Chaos, Solitons, and Fractals 4 (1993) 5-39.
Attributes
----------
emclus: int
Number of clusters ('states') found when estimating an epsilon machine from the co-occurrence matrix.
emest: bool
Whether or not an epsilon machine has been estimated yet
emmat: float
The estimated epsilon machine as a standard Markov process transition matrix.
condo: 2d-array
Co-occurrence matrix renormalized as a rowise matrix of conditional probabilites - built as part of
epsilon machine estimation
emclasses: list
List of which of the values in emclus each row in condo (and hence the cooccurence matrix) belongs to
clusp: float
Chisquared p value to use for clustering epsilon machine rows
val: float
Value of most recently calculated texture measure
mfsspec: array
Array containing the multifractal spectral estimates obtained
over the range of 'inverse temperatures' provided in betas
currval: string
One of the listed measures method which constitutes the current value in val
"""
def __init__(self, comat, measure="Statistical Complexity", coordmom=0, probmom=0, rllen=0, clusmom=0, clusp=0.001,
samelev=True, betas=[-20, 20, 40]):
self.comat = comat
self.totcount = np.sum(comat) # to get back histo after norm
self.measure = measure
self.coordmom = coordmom
self.probmom = probmom
self.rllen = rllen
self.clusmom = clusmom
self.clusp = clusp # chisquared p value to use for conditional
# distribution similarity
self.emclus = 0 # record the actual number of clusters
# found for the epsilon machine
self.emest = False # whether or not epsilon machine has been
# estimated
self.mfsest = False # whether or not multifractal spectrum has
# been estimated
self.emmat = np.array([]) # epsilon machine pre-array
self.condo = np.array([]) # raw em transition matrix (i..e
# array of conditional distributions
self.emclasses = np.array([]) # list of which class each row
# of self.emmat belongs to
self.samelev = samelev # Boolean for whether pre and post
# epsilon machine states should be
# treated as the same
if self.comat.shape[0] != self.comat.shape[1]:
self.samelev = False # - should automatically be set here
# to false if # rows != #cols in
# co-occurence matrix
self.betas = betas # "inverse temperature" range and
# step for estimating multifractal
# spectrum from epsilon machine
self.val = 0.0
self.currval = ""
self.cme = np.nan # CM Entropy
self.eme = np.nan # EM Entropy
self.stc = np.nan # Statistical Complexity
self.enu = np.nan # Energy Uniformity
self.map = np.nan # Maximum Probability
self.con = np.nan # Contrast
self.idm = np.nan # Inverse Difference Moment
self.cor = np.nan # Correlation
self.prl = np.nan # Probability of Run Length
self.erl = np.nan # Epsilon Machine Run Length
self.rla = np.nan # Run Length Asymmetry
self.hom = np.nan # Homogeneity
self.clt = np.nan # Cluster Tendency
self.mfu = np.nan # Multifractal max,min energy diff.
self.mfs = np.nan # Multifractal max,min entropy diff.
# initial empty array for the multifractal spectrum
# with size equla to the number of steps specified in self.betas
self.mfsspec = np.array([])
# Normalize cooccurence matrix in case it's not
if np.sum(self.comat) != 1.0:
self.comat = np.float_(self.comat) / np.sum(self.comat)
# Actually normalize row vectors... -- NO !! --
# if np.sum(self.comat) != self.comat.shape[0]:
# self.comat = np.transpose(np.transpose(np.float_(self.comat))/np.float_(np.sum(self.comat,axis=1)))
# Calculate an initial texture measure
self.calc_measure(self.measure)
def calc_measure(self, measure='Statistical Complexity', coordmom=0, probmom=0, rllen=0, clusmom=0, samelev=True):
"""Calculates the appropriate texture measure and puts the value in the class variable val and
updates the class variable currval with the passed string
For a discussion of Haralick co-occurrence style texture measures see:
R. M. Haralick, 'Statistical and structural approaches to texture'. Proceedings of the IEEE May 1979, 67(5).
786-804.
Parameters
----------
measure: string
One of the following measure methods (default = 'Statistical Complexity'):
- 'CM Entropy'
- 'EM Entropy'
- 'Statistical Complexity'
- 'Energy Uniformity'
- 'Maximum Probability'
- 'Contrast'
- 'Inverse Difference Moment'
- 'Correlation'
- 'Probability of Run Length'
- 'Epsilon Machine Run Length'
- 'Run Length Asymmetry'
- 'Homogeneity'
- 'Cluster Tendency'
- 'Multifractal Spectrum Energy Range'
- 'Multifractal Spectrum Entropy Range'
"""
self.measure = measure
# Allow for changed values of the following class variables
# to be passed to calc measure
if coordmom != 0:
self.coordmom = coordmom
if probmom != 0:
self.probmom = probmom
if rllen != 0:
self.rllen = rllen
if clusmom != 0:
self.clusmom = clusmom
if samelev == False:
self.samelev = False
if self.measure == "CM Entropy":
if np.isnan(self.cme):
self.cme = np.sum(
-np.where(self.comat > 0.0, self.comat, 1.0) * np.where(self.comat > 0.0, np.log2(self.comat), 0.0))
self.val = self.cme
self.currval = "CM Entropy"
elif self.measure == "EM Entropy":
if np.isnan(self.eme):
import scipy.linalg as L
if not self.emest:
self.est_em()
# get left eigenvector associated with lambda = 1
# (largest eignevalue)
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
# Node probabilities are elements of normalized left eigenvector
# associated with eigenvale 1 (assumes Scipy convention of
# returning sorted eignevalues so eignevalue 1 in this case is
# the first element of the returned eigenvalue array)
# nodep = v[:,0]/sum(v[:,0])
# ---- no longer make the above assumption
# found it was wrong - now specifically ask for eigenvector
# associated with eigenvalue 1 (greatest real part)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
self.eme = -np.sum(
np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))
self.val = self.eme
self.currval = "EM Entropy"
elif self.measure == "Statistical Complexity":
if np.isnan(self.stc):
import scipy.linalg as L
# estimate epsilon machine if it hasn't been made
if not self.emest:
self.est_em()
# get left eigenvector associated with lambda = 1
# (largest eignevalue)
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
# Node probabilities are elements of normalized left eigenvector # associated with eigenvale 1 (assumes Scipy convention of
# returning sorted eignevalues so eignevalue 1 in this case is
# the first element of the returned eigenvalue array)
# nodep = v[:,0]/sum(v[:,0])
# ---- no longer make the above assumption
# found it was wrong - now specifically ask for eigenvector
# associated with eigenvalue 1 (greatest real part)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
self.stc = -np.sum(nodep * np.log2(nodep))
self.val = self.stc
self.currval = "Statistical Complexity"
elif self.measure == "Energy Uniformity":
if np.isnan(self.enu):
self.enu = np.sum(np.where(self.comat > 0.0, self.comat * self.comat, 0.0))
self.val = self.enu
self.currval = "Energy Uniformity"
elif self.measure == "Maximum Probability":
if self.map is np.nan:
self.map = np.max(self.comat)
self.val = self.map
self.currval = "Maximum Probability"
elif self.measure == "Contrast":
if np.isnan(self.con):
if self.coordmom == 0 or self.probmom == 0:
if self.coordmom == 0:
print("Nonzero coordinate moment is required for calculating Contrast")
if self.probmom == 0:
print("Nonzero probability moment is required for calculating Contrast")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
self.con = np.sum((np.abs(crows - ccols) ** self.coordmom) * (self.comat ** self.probmom))
self.val = self.con
self.currval = "Contrast"
elif self.measure == "Inverse Difference Moment":
if np.isnan(self.idm):
if self.coordmom == 0 or self.probmom == 0:
if self.coordmom == 0:
print("Nonzero coordinate moment is required for calculating Inverse Difference Moment")
if self.probmom == 0:
print("Nonzero probability moment is required for calculating Inverse Difference Moment")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
codiffs = np.abs(crows - ccols) ** self.coordmom
# Set minimum coordinate difference for which you allow
# probability to be calculated
codiff_eps = 0.0000001
# Do following so test divides don't blow up and
# generte a warning
codiffs_ok = np.where(codiffs > codiff_eps, codiffs, 1.0)
self.idm = np.sum(np.where(codiffs > codiff_eps, (self.comat ** self.probmom) / codiffs_ok, 0.0))
self.val = self.idm
self.currval = "Inverse Difference Moment"
elif self.measure == "Correlation":
if np.isnan(self.cor):
import scipy.stats as ss
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.
ccols[:, i] = i + 1
rowmom = np.sum(crows * self.comat)
colmom = np.sum(ccols * self.comat)
comatvar = np.var(np.ravel(self.comat * crows))
self.cor = np.sum((crows - rowmom) * (ccols - colmom) * self.comat) / comatvar
self.val = self.cor
self.currval = "Correlation"
elif self.measure == "Probability of Run Length":
if np.isnan(self.prl):
if self.rllen == 0:
print("Nonzero run length is required for calculating Probability of Run Length")
else:
colprobs = np.zeros(self.comat.shape[0])
for i in range(self.comat.shape[0]):
colprobs[i] = np.sum(self.comat[i, :])
self.prl = 0.0
for i in range(self.comat.shape[0]):
if colprobs[i] != 0.0:
self.prl += ((colprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)
self.val = self.prl
self.currval = "Probability of Run Length"
elif self.measure == "Epsilon Machine Run Length":
if np.isnan(self.erl):
if self.rllen == 0:
print("Nonzero run length is required for calculating Epsilon Machine Run Length")
else:
if not self.emest:
self.est_em()
self.erl = 0.0
colprobs = np.zeros(self.emmat.shape[0])
for i in range(self.emmat.shape[0]):
colprobs[i] = np.sum(self.emmat[i, :])
for i in range(self.emmat.shape[0]):
self.erl += ((colprobs[i] - self.emmat[i, i]) ** 2 * (self.emmat[i, i] ** (self.rllen - 1))) / (
colprobs[i] ** self.rllen)
self.val = self.erl
self.currval = "Epsilon Machine Run Length"
elif self.measure == "Run Length Asymmetry":
if np.isnan(self.rla):
if self.rllen == 0:
print("Nonzero run length is required for calculating Run Length Asymmetry")
else:
colprobs = np.zeros(self.comat.shape[0])
rowprobs = np.zeros(self.comat.shape[0])
for i in range(self.comat.shape[0]):
colprobs[i] = np.sum(self.comat[i, :])
rowprobs[i] = np.sum(self.comat[:, i])
colval = 0.0
rowval = 0.0
for i in range(self.comat.shape[0]):
if colprobs[i] != 0.0:
colval += ((colprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)
if rowprobs[i] != 0.0:
rowval += ((rowprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (rowprobs[i] ** self.rllen)
self.rla = np.abs(colval - rowval)
self.val = self.rla
self.currval = "Run Length Asymmetry"
elif self.measure == "Homogeneity":
if np.isnan(self.hom):
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
self.hom = np.sum((self.comat) / (1 + np.abs(crows - ccols)))
self.val = self.hom
self.currval = "Homogeneity"
elif self.measure == "Cluster Tendency":
if np.isnan(self.clt):
if self.clusmom == 0:
print("Nonzero cluster moment is required for calculating Cluster Tendency")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.
ccols[:, i] = i + 1
rowmom = np.sum(crows * self.comat)
colmom = np.sum(ccols * self.comat)
self.clt = np.sum(((crows + ccols - rowmom - colmom) ** self.clusmom) * self.comat)
self.val = self.clt
self.currval = "Cluster Tendency"
elif self.measure == "Multifractal Spectrum Energy Range":
if not self.emest: # estimate epsilon machine
self.est_em()
if not self.mfsest: # estimate multifractal spectrum
self.est_multi_frac_spec()
if self.mfsspec.size != 0:
self.mfu = np.max(self.mfsspec[:, 0]) - np.min(self.mfsspec[:, 0])
else:
self.mfu = 0.0
self.val = self.mfu
self.currval = "Multifractal Spectrum Energy Range"
elif self.measure == "Multifractal Spectrum Entropy Range":
if not self.emest: # estimate epsilon machine
self.est_em()
if not self.mfsest: # estimate multifractal spectrum
self.est_multi_frac_spec()
if self.mfsspec.size != 0:
self.mfs = np.max(self.mfsspec[:, 1]) - np.min(self.mfsspec[:, 1])
else:
self.mfs = 0.0
self.val = self.mfs
self.currval = "Multifractal Spectrum Entropy Range"
else:
"Sorry don't know about texture measure ", self.measure
def est_multi_frac_spec(self):
"""TODO"""
import scipy.linalg as L
self.mfsspec = []
if not self.emest:
self.est_em()
# print "Epsilon machine",self.emmat
if self.betas[2] == 1:
print(
"Only 1 step asked for re. calculating multifractal spectrum, using lower limit specified, i.e. betas[0]")
step = 0
else:
step = (np.float(self.betas[1]) - np.float(self.betas[0])) / (np.float(self.betas[2]) - 1)
for i in range(self.betas[2]):
if i == 0: # in case self.betas[2] = 1 => step = 0
cb = np.float(self.betas[0])
else:
cb = np.float(self.betas[0] + i * step)
if cb == 1.0:
# in this case just do standard metric entrop calc.
# ( e.g. see above EM Entropy calculation for comments)
# as both u and s(u) are equal to the metric entropy
# in this case
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
su = -np.sum(
np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))
self.mfsspec.append([su, su])
# print i,cb,su,su
elif cb == 0.0:
# skip it for now - need to re-figure out beta -> 0 limit
# need placeholder though
splat = 0
else: # cb != 0,1
# get betafied epsilon machine
a = np.where(self.emmat > 0.0, np.exp(cb * np.log(self.emmat)), 0.0)
# get maximum eignvalue and take the log
# ("inv. temp." times "free energy")
[eb, vb] = L.eig(np.nan_to_num(a), left=False, right=True)
maxind = np.where(np.real(eb) == np.max(np.real(eb)))[0][0]
fe = np.log2(np.real(eb[maxind]))
# stochastisize betafied epsilon machine
b = np.dot((1 / eb[maxind]) * np.diag((1 / vb[:, maxind])), np.dot(a, (np.diag(vb[:, maxind]))))
# get metric entropy of stochasticized machine
# - same as "entropy" s(u) as func. of "energy" u
# - i.e. multifractal spectrum is analogue of
# - thermodynamic spectrum s(u) vs. u
[e, v] = L.eig(np.nan_to_num(b), left=True, right=False)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
# make sure they're real - sometimes linalg spits
# out complex values with 0 imaginary part
su = abs(-np.sum(np.transpose(nodep * np.ones(b.shape)) * (b * np.nan_to_num(np.log2(b)))))
# then get energy - i.e. "temperature" normalized
# difference between "entropy" and "free energy"
u = abs((su - fe) / cb)
self.mfsspec.append([u, su])
# print i,cb,u,su
self.mfsspec = np.array(np.real(self.mfsspec))
# waste the nan's - e.g. when the range wasn't quite right
self.mfsspec = np.delete(self.mfsspec, np.where(np.isnan(self.mfsspec))[0], 0)
self.mfsest = True
def est_em(self):
"""Estimate an epsilon machine from a co-occurrence matrix with #rows = #cols, done implicitly whenever one
of the related complexity/entropy measures (EM Entropy, Statistical Complexity, Epsilon Machine Run Length)
are calculated.
For info on epsilon machines and the related measures see:
- K. Young, Y. Chen, J. Kornak, G. B. Matson, N. Schuff, 'Summarizing complexity in high dimensions', \
Phys Rev Lett. (2005) Mar 11;94(9):098701.
- C. R. Shalizi and J. P. Crutchfield, 'Computational Mechanics: Pattern and Prediction, Structure and \
Simplicity', Journal of Statistical Physics 104 (2001) 819--881.
- K. Young and J. P. Crutchfield, 'Fluctuation Spectroscopy', Chaos, Solitons, and Fractals 4 (1993) 5-39.
- J. P. Crutchfield and K. Young, 'Computation at the Onset of Chaos', in Entropy, Complexity, and Physics \
of Information, W. Zurek, editor, SFI Studies in the Sciences of Complexity, VIII, Addison-Wesley, Reading,\
Massachusetts (1990) 223-269.
- C. R. Shalizi and J. P. Crutchfield, 'Computational Mechanics: Pattern and Prediction, Structure and \
Simplicity', Journal of Statistical Physics 104 (2001) 819--881.
"""
import scipy.stats as ss
# Make conditional distribution matrix, i.e. epsilon machine
# (row probabilities)
self.condo = np.transpose(np.transpose(self.comat) / np.sum(self.comat, axis=1))
# the following is n^2 - need to figure a better way
found = []
self.emclasses = np.zeros(self.condo.shape[0], int)
onclass = 0
for i in range(self.condo.shape[0]):
if i not in found:
found.append(i)
# if it's dinky just tack it on to class 0
# code below will just combine it in
if np.sum(self.condo[i, :]) < 0.00000001:
self.emclasses[i] = 0
else:
# it's a new one
self.emclasses[i] = onclass
for j in range(i + 1, self.condo.shape[0]):
if j not in found:
# check if rows ("distributions") are "close"
# i.e. p value in chi squred test < self.clusp
tester = ss.chisquare(self.totcount * self.condo[i, :], self.totcount * self.condo[j, :])[1]
if tester < self.clusp: # they're different
found.append(j)
onclass += 1
self.emclasses[j] = onclass
else: # they're not
found.append(j)
self.emclasses[j] = onclass
self.emclus = onclass + 1
for i in range(self.emclus):
rowinds = tuple(np.where(self.emclasses == i)[0])
if i == 0:
a = np.add.reduce(self.comat[rowinds, :], axis=0)
else:
a = np.vstack((a, np.add.reduce(self.comat[rowinds, :], axis=0)))
# If initial/final states are the same need to also combine columns
if self.samelev:
if len(a.shape) > 1:
for i in range(self.emclus):
colinds = tuple(np.where(self.emclasses == i)[0])
# seems like it has to be done rowise first...
if i == 0:
b = np.add.reduce(a[:, colinds], axis=1)
else:
b = np.vstack((b, np.add.reduce(a[:, colinds], axis=1)))
# ... then transposed
else:
for i in range(a.shape[0]):
if i == 0:
b = a
else:
b = np.vstack([b, a])
self.emmat = np.transpose(b)
else: # do it all over again for columns
found = []
self.emclasses = np.zeros(self.condo.shape[1], int)
onclass = 0
for i in range(self.condo.shape[1]):
if i not in found:
found.append(i)
# if it's dinky just tack it on to class 0
# code below will just combine it in
if np.sum(self.condo[:, i]) < 0.00000001:
self.emclasses[i] = 0
else:
# it's a new one
self.emclasses[i] = onclass
for j in range(self.condo.shape[1], i + 1):
if j not in found:
# check if rows ("distributions") are "close"
# i.e. p value in chi squred test < self.clusp
tester = \
ss.chisquare(self.totcount * self.condo[:, i], self.totcount * self.condo[:, j])[1]
if tester < self.clusp: # they're different
found.append(j)
onclass += 1
self.emclasses[j] = onclass
else: # they're not
found.append(j)
self.emclasses[j] = onclass
self.emclus = onclass + 1
for i in range(self.emclus):
colinds = tuple(np.where(self.emclasses == i)[1])
if i == 0:
a = np.add.reduce(self.comat[:, colinds], axis=1)
else:
a = np.vstack((a, np.add.reduce(self.comat[:, colinds], axis=1)))
self.emmat = np.transpose(a)
# and finally turned into a Markov matrix...
self.emmat = np.transpose(np.transpose(self.emmat) / np.sum(self.emmat, axis=1))
self.emest = True
| 45.613394
| 154
| 0.525894
| 3,572
| 29,968
| 4.399216
| 0.152296
| 0.038946
| 0.022273
| 0.0126
| 0.465763
| 0.418799
| 0.392453
| 0.381698
| 0.36337
| 0.357134
| 0
| 0.017308
| 0.375334
| 29,968
| 656
| 155
| 45.682927
| 0.822115
| 0.341464
| 0
| 0.436464
| 0
| 0.002762
| 0.069908
| 0
| 0
| 0
| 0
| 0.001524
| 0
| 1
| 0.01105
| false
| 0
| 0.016575
| 0
| 0.030387
| 0.024862
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1b22c20857895713f38d86719437c73c6f5f5b7
| 3,373
|
py
|
Python
|
AutoSketcher/utils/dataio.py
|
D1anaGreen/essaykiller
|
75311a23dc1f5dc8b5040114fdeda67248700f7a
|
[
"Apache-2.0"
] | 4,551
|
2020-09-29T14:50:03.000Z
|
2022-03-31T00:40:45.000Z
|
AutoSketcher/utils/dataio.py
|
D1anaGreen/essaykiller
|
75311a23dc1f5dc8b5040114fdeda67248700f7a
|
[
"Apache-2.0"
] | 28
|
2020-10-01T08:03:23.000Z
|
2022-03-30T15:40:40.000Z
|
AutoSketcher/utils/dataio.py
|
D1anaGreen/essaykiller
|
75311a23dc1f5dc8b5040114fdeda67248700f7a
|
[
"Apache-2.0"
] | 809
|
2020-10-01T05:34:58.000Z
|
2022-03-31T00:40:48.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zk
@contact: kun.zhang@nuance.com
@file: dataio.py
@time: 8/27/2019 4:31 PM
@desc:
"""
import os
def load_txt_data(path, mode='utf-8-sig', origin=False):
"""
This func is used to reading txt file
:param origin:
:param path: path where file stored
:param mode:
:type path: str
:return: string lines in file in a list
:rtype: list
"""
if type(path) != str:
raise TypeError
res = []
file = open(path, 'rb')
lines = file.read().decode(mode, 'ignore')
for line in lines.split('\n'):
line = line.strip()
if origin:
res.append(line)
else:
if line:
res.append(line)
file.close()
return res
def load_excel_data(path):
"""
This func is used to reading excel file
:param path: path where file stored
:type path: str
:return: data saved in a pandas DataFrame
:rtype: pandas.DataFrame
"""
if type(path) != str:
raise TypeError
import pandas as pd
return pd.read_excel(path).loc[:]
def load_variable(path):
"""
:param path:
:return:
"""
import pickle
return pickle.load(open(path, 'rb'))
def save_txt_file(data, path, end='\n'):
"""
This func is used to saving data to txt file
support data type:
list: Fully support
dict: Only save dict key
str: will save single char to each line
tuple: Fully support
set: Fully support
:param data: data
:param path: path to save
:type path: str
:param end:
:type end: str
:return: None
"""
if type(data) not in [list, dict, str, tuple, set] or type(path) != str:
raise TypeError
remove_old_file(path)
with open(path, 'a', encoding='utf-8') as f:
for item in data:
f.write(str(item) + end)
def save_variable(variable, path):
"""
:param variable:
:param path:
:return:
"""
import pickle
return pickle.dump(variable, open(path, 'wb'))
def load_file_name(path):
"""
This func can get root, subdir, file_names
:param path:
:type path:str
:return:
"""
for root, dirs, files in os.walk(path):
return root, dirs, files
def load_all_file_name(path, list_name, suffix='', not_include='.py'):
"""
Load all file name including sub folder
:param path:
:param list_name:
:param suffix:
:param not_include:
:return:
"""
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path) and not_include not in file_path:
load_all_file_name(file_path, list_name, suffix, not_include)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
def check_dir(path):
"""
check dir exists
:param path:
:type path:str
:return:
:rtype: bool
"""
return os.path.exists(path)
def mkdir(path):
"""
:param path:
:type path: str
:return: None
"""
path = path.strip()
if not check_dir(path):
os.makedirs(path)
def remove_old_file(path):
"""
:param path:
:type path: str
:return:
"""
if check_dir(path):
os.remove(path)
def delete_file(path):
os.remove(path)
if __name__ == '__main__':
pass
| 20.319277
| 76
| 0.590276
| 472
| 3,373
| 4.116525
| 0.286017
| 0.04632
| 0.056613
| 0.052496
| 0.227998
| 0.206897
| 0.071024
| 0
| 0
| 0
| 0
| 0.005824
| 0.287281
| 3,373
| 165
| 77
| 20.442424
| 0.802413
| 0.341536
| 0
| 0.192982
| 0
| 0
| 0.022129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192982
| false
| 0.017544
| 0.070175
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1b6e00f1b7c8a15539c5d29a89c356e88a3f73c
| 20,511
|
py
|
Python
|
music_maker.py
|
kenanbit/loopsichord
|
d02e021a68333c52adff38cc869bf217deebfc5c
|
[
"MIT"
] | null | null | null |
music_maker.py
|
kenanbit/loopsichord
|
d02e021a68333c52adff38cc869bf217deebfc5c
|
[
"MIT"
] | null | null | null |
music_maker.py
|
kenanbit/loopsichord
|
d02e021a68333c52adff38cc869bf217deebfc5c
|
[
"MIT"
] | null | null | null |
from constants import *
import pygame as pg
from time import sleep
from metronome import *
import math
import numpy as np
from copy import deepcopy
from audio import *
from instructions_panel import *
from loop import *
class MusicMaker:
def __init__(self, screen):
self.pitch = 0
self.screen = screen
self.pitch_range = PITCH_RANGE
self.b_left = 0
self.b_middle = 0
self.b_right = 0
self.saved = None
self.events = set()
self.metronome = Metronome(BUFFERS_PER_MEASURE)
self.is_measure = False
self.using_scales = list(range(1,6))
self.scale = self.using_scales[3]
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.background = None
self.background_needs_update = True
self.instructions = InstructionsPanel()
self.audio_player = None
self.audio_player = AudioPlayer(self)
self.audio_player.run()
def do_step(self):
## Avoid the race condition
while self.audio_player == None:
sleep(.1)
## Gather information from metronome, mouse, and keyboard
is_beat = self.metronome.is_beat(self.audio_player.loop_buffer_index)
self.is_measure = self.metronome.is_measure(self.audio_player.loop_buffer_index)
(m_x, m_y) = pygame.mouse.get_pos()
(last_b_left, last_b_middle, last_b_right) = (self.b_left, self.b_middle, self.b_right)
(self.b_left, self.b_middle, self.b_right) = pygame.mouse.get_pressed()
last_keys = keys[:]
keys.clear()
keys.extend(pygame.key.get_pressed())
## Center scales around mouse
if self.b_middle and not last_b_middle:
self.background_needs_update = True
m_x, m_y = self.center_scales_around(m_x, m_y)
## Run events scheduled for the beginning of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == BEGIN_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
###########################
## Keyboard and mouse input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
## These events aren't caught by the pygame.mouse methods
elif event.type == pygame.MOUSEBUTTONDOWN:
## Scroll down
if event.button == 5:
self.audio_player.decrease_volume()
## Scroll up
if event.button == 4:
self.audio_player.increase_volume()
## Window resize
elif event.type == pygame.VIDEORESIZE:
w,h = event.size
min_w, min_h = MIN_DIM
w = max(min_w, w)
h = max(min_h, h)
update_screen_size((w,h))
self.background_needs_update = True
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.screen = pygame.display.set_mode(SCREEN_DIM, pygame.RESIZABLE)
## Get the exact pitch from the mouse x coordinate
self.mouse_pitch = self.coord_to_pitch(m_x, coord=0, reverse=False)
## Close the application
if is_key_mod(ESCAPE, None):
self.audio_player.stop_stream()
print("Ending stream...")
## Start and stop recording
if not keys[SPACE] and self.audio_player.loop_recording:
self.events.add(EVENT_STOP_LOOP_REC)
if keys[SPACE] and not self.audio_player.loop_recording:
self.events.add(EVENT_START_LOOP_REC)
## Start and stop playing of all loops
if is_key_mod(K_P, None) and not last_keys[K_P]:
if self.audio_player.loop_playing:
self.events.add(EVENT_STOP_LOOP_PLAY)
else:
self.events.add(EVENT_START_LOOP_PLAY)
## If a loop is selected:
if self.audio_player.active_loops[0] >= 0 and not self.audio_player.loop_recording:
## Move the active loops left/right by one beat (with wrapping)
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1*self.metronome.beat_len)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(self.metronome.beat_len)
## Move the active loops left/right by one buffer (with wrapping)
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(1)
## Toggle mute on the active loops
if is_key_mod(K_M, None) and not last_keys[K_M]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].toggle_mute()
## Increase and decrease volume of the active loops
if keys[EQUALS] or keys[PLUS] or keys[KP_PLUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(.02)
if keys[MINUS] or keys[KP_MINUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(-.02)
## Copy the active loops below them as a group, and mute the copies
if is_key_mod(K_C, CTRL) and not last_keys[K_C]:
loop_copies = [self.audio_player.loops[i].get_copy() for i in self.audio_player.active_loops]
for i,loop in enumerate(loop_copies):
loop.set_mute(True)
self.audio_player.loops.insert(self.audio_player.active_loops[-1]+1+i, loop)
self.audio_player.active_loops = [x+len(loop_copies) for x in self.audio_player.active_loops]
## Move the active loops up and down in the lineup
other_index = -1
loops = self.audio_player.loops
if is_key_mod(UP, ALT) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
for index in self.audio_player.active_loops:
other_index = (index-1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x-1 for x in self.audio_player.active_loops]
elif is_key_mod(DOWN, ALT) and not last_keys[DOWN] and self.audio_player.active_loops[-1] < len(loops)-1:
for index in self.audio_player.active_loops[::-1]:
other_index = (index+1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x+1 for x in self.audio_player.active_loops]
## Add the selected loops
if is_key_mod(K_A, None) and not last_keys[K_A]:
while len(self.audio_player.active_loops) > 1:
i = self.audio_player.active_loops[0]
other = self.audio_player.active_loops.pop()
self.audio_player.loops[i].combine(self.audio_player.loops[other])
del self.audio_player.loops[other]
## Pitch shift the selected loops UP/DOWN
if is_key_mod(UP, CTRL) and is_key_mod(UP, SHIFT) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(.25)
elif is_key_mod(UP, CTRL) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(1)
elif is_key_mod(DOWN, CTRL) and is_key_mod(DOWN, SHIFT) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(-.25)
elif is_key_mod(DOWN, CTRL) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(-1)
## Delete the current loop with backspace or delete
if (is_key_mod(BACKSPACE, None) and not last_keys[BACKSPACE]) or (is_key_mod(DELETE, None) and not last_keys[DELETE]):
for i in self.audio_player.active_loops[::-1]:
del self.audio_player.loops[i]
self.audio_player.active_loops = [self.audio_player.active_loops[0]]
if self.audio_player.active_loops[0] >= len(self.audio_player.loops):
self.audio_player.active_loops[0] -= 1
else: ## Metronome selected (index -1)
##Only allow changes to the metronome when there are no loops:
if len(self.audio_player.loops) == 0:
## Add or subtract from the metronome length
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
self.metronome.change_measure_length(-self.metronome.beats)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
self.metronome.change_measure_length(self.metronome.beats)
## Add or subtract from the metronome beat count
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
self.metronome.change_beat_count(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
self.metronome.change_beat_count(1)
## Toggle justify pitch
if is_key_mod(K_J, None) and not last_keys[K_J]:
self.audio_player.justify_pitch = not self.audio_player.justify_pitch
self.background_needs_update = True
for loop in self.audio_player.loops:
loop.recalculate_buffers()
if not self.audio_player.loop_recording:
## Move the active loop indicator up and down
if is_key_mod(UP, None) and not last_keys[UP]:
self.audio_player.active_loops = [ self.audio_player.active_loops[0] % (len(self.audio_player.loops)+1) - 1 ]
if is_key_mod(DOWN, None) and not last_keys[DOWN]:
self.audio_player.active_loops = [ (self.audio_player.active_loops[-1]+2) % (len(self.audio_player.loops)+1) - 1 ]
## Select a range of loops
if is_key_mod(UP, SHIFT) and not is_key_mod(UP, CTRL) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
self.audio_player.active_loops.insert(0, self.audio_player.active_loops[0]-1)
if is_key_mod(DOWN, SHIFT) and not is_key_mod(DOWN, CTRL) and not last_keys[DOWN] and self.audio_player.active_loops[0] >= 0 and self.audio_player.active_loops[-1] < len(self.audio_player.loops) - 1:
self.audio_player.active_loops.append(self.audio_player.active_loops[-1]+1)
## Multiply metronome and loops a given number of times
for num in range(0,10):
if is_key_mod(NUMS[num], None) and not last_keys[NUMS[num]]:
self.audio_player.multiply_tracks(num)
## Articulating and continuing a note playing
if self.b_left:
if not self.audio_player.playing:
self.audio_player.articulate()
else:
self.audio_player.settle_to_volume()
## Allowing a note to fade away when not left clicking
if not self.b_left:
self.audio_player.volume_decay()
## Identify the current scale by mouse position
self.scale_index = (self.using_scales[0] + int(m_y / SCREEN_DIM[1] * len(self.using_scales))) %12
self.scale = SCALES[self.scale_index]
## Temporarily align to the chromatic scale on the current scale
if (self.b_right):
self.scale = CHROMATIC_SCALE
## Show and hide the instructions (really for QUESTION_MARK, but SLASH is more accepting)
if (keys[SLASH] and not last_keys[SLASH]):
self.instructions.minimized = not self.instructions.minimized
#######################
## Pitch decisionmaking
## Get scale degree of closest pitch
self.closest_pitch = sorted(self.scale, key=lambda x: min(abs((self.mouse_pitch%12)-x), 12 - abs((self.mouse_pitch%12)-x))) [0]
## Put closest pitch in correct octave
self.closest_pitch += math.floor(self.mouse_pitch / 12) * 12
## Correct an error by rounding up if self.mouse_pitch > 11.5
if abs(self.mouse_pitch - self.closest_pitch) > 10:
self.closest_pitch += 12
## In case we switched scales for the chromatic scale, switch back now that we decided on a closest pitch
self.scale = SCALES[self.scale_index]
## Decide whether to align to the closest pitch, or use the mouse pitch
#if not last_b_middle:
if self.b_left or self.audio_player.volume == 0:
if is_key_mod(K_S, None):
self.pitch = self.mouse_pitch
else:
self.pitch = self.closest_pitch
## Run events scheduled for the end of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == END_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
self.paint_screen()
def center_scales_around(self, m_x, m_y):
range_width = self.pitch_range[1] - self.pitch_range[0]
range_middle = self.pitch_range[1] - range_width // 2
diff = self.closest_pitch - range_middle
self.pitch_range = (self.pitch_range[0]+diff, self.pitch_range[1]+diff)
y_diff = self.scale_index - self.using_scales[len(self.using_scales)//2]
self.using_scales = [(i+y_diff)%12 for i in self.using_scales]
new_m_x = self.pitch_to_coord(self.mouse_pitch)
new_m_y = m_y-y_diff*self.scale_height
pygame.mouse.set_pos(new_m_x, new_m_y)
return new_m_x, new_m_y
def paint_screen(self):
## Draw the mostly unchanging buffered background
if self.background == None or self.background_needs_update:
self.background = self.redraw_background()
self.screen.blit(self.background, (0,0))
## Draw the active notes
y=0
notes = [l.recorded_notes[self.audio_player.loop_buffer_index] for l in self.audio_player.loops if not l.muted]
self.recorded_notes_to_draw = [rn for sublist in notes for rn in sublist]
for i in self.using_scales:
s = SCALES[i]
self.draw_scale_activity(s, y, self.scale is s)
y += self.scale_height
## Draw metronome
self.metronome.paint_self(self.screen, self.audio_player.loop_buffer_index, -1 in self.audio_player.active_loops)
## Draw the loops
y = 60
x = 10
w = self.metronome.measure_len * self.metronome.visual_buffer_width
h = 30
v_margin = 10
for i in range(len(self.audio_player.loops)):
loop = self.audio_player.loops[i]
loop.paint_self(self.screen, (x,y,w,h), i in self.audio_player.active_loops, self.audio_player.loop_recording)
y += h + v_margin
## Draw the instruction panel
self.instructions.paint_self(self.screen)
pygame.display.flip()
'''
Draws the active elements of a scale (row of notes) on the screen.
'''
def draw_scale_activity(self, scale, y, is_active):
notes_to_draw = [rn for rn in self.recorded_notes_to_draw if rn.scale==scale]
if self.scale == scale:
notes_to_draw.append(RecordedNote(-1, self.pitch, self.audio_player.volume, None, self.scale, None, None))
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
color = ACTIVE_COLORS[p_i] if is_active and self.closest_pitch == p else INACTIVE_COLORS[p_i]
##Determine line width based on notes_to_draw:
on_this_pitch = [rn for rn in notes_to_draw if rn.pitch == p]
notes_to_draw = [rn for rn in notes_to_draw if not rn in on_this_pitch]
if len(on_this_pitch) > 0:
sum_volume = sum(map(lambda rn: rn.get_loudness(), on_this_pitch))
line_width = max(INACTIVE_NOTE_WIDTH, int(sum_volume*ACTIVE_NOTE_STRETCH))
pygame.draw.line(self.screen, color, (x,y), (x,y+self.scale_height), line_width)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, color)
self.screen.blit(l1, (x+10, y+self.scale_height-30))
if is_active:
color = INACTIVE_COLORS[scale[0]]
pygame.draw.line(self.screen, color, (0,y), (SCREEN_DIM[0],y), 4)
pygame.draw.line(self.screen, color, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 4)
## The remaining pitches in notes_to_draw are not on a bar
for rn in notes_to_draw:
line_width = max(INACTIVE_NOTE_WIDTH, int(rn.get_loudness() * ACTIVE_NOTE_STRETCH))
x = self.pitch_to_coord(rn.pitch)
pygame.draw.line(self.screen, FREE_NOTE_COLOR, (x, y), (x,y+self.scale_height), line_width)
'''
Draws the inactive scale elements into a buffer image
'''
def redraw_background(self):
self.background_needs_update = False
screen = pygame.Surface(SCREEN_DIM)
screen.fill(BACK_COLOR)
y=0
for i in self.using_scales:
self.draw_scale_background(screen, SCALES[i], y)
y += self.scale_height
return screen
'''
Draws the inactive elements of one scale onto an image
'''
def draw_scale_background(self, screen, scale, y):
pygame.draw.rect(screen, DARK_COLORS[scale[0]], (0,y,SCREEN_DIM[0],self.scale_height))
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y), (SCREEN_DIM[0],y), 1)
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 1)
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
pygame.draw.line(screen, INACTIVE_COLORS[p_i], (x,y), (x,y+self.scale_height), INACTIVE_NOTE_WIDTH)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, INACTIVE_COLORS[p_i])
screen.blit(l1, (x+10, y+self.scale_height-30))
def coord_to_pitch(self, y, coord=0, reverse=False):
if reverse:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * (SCREEN_DIM[coord] - y) + self.pitch_range[0]
else:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * y + self.pitch_range[0]
def pitch_to_coord(self, p, coord=0, reverse=False, scale=None):
if scale != None and self.audio_player.justify_pitch:
p = pitch_to_just_pitch(p, scale)
if reverse:
return SCREEN_DIM[coord] - (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
else:
return (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
| 49.305288
| 211
| 0.610502
| 2,927
| 20,511
| 4.058763
| 0.11411
| 0.07803
| 0.130051
| 0.079545
| 0.54697
| 0.469949
| 0.386027
| 0.349158
| 0.298485
| 0.283333
| 0
| 0.013119
| 0.290186
| 20,511
| 415
| 212
| 49.424096
| 0.802871
| 0.106236
| 0
| 0.211604
| 0
| 0
| 0.000891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030717
| false
| 0
| 0.03413
| 0
| 0.088737
| 0.003413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1b7d1521d980a52988abbf6e1742ba50379f867
| 10,084
|
py
|
Python
|
danmu/danmaku/egame.py
|
simplecelery/zhibo
|
f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f
|
[
"Apache-2.0"
] | 4
|
2021-11-21T15:30:32.000Z
|
2022-03-11T02:49:30.000Z
|
danmu/danmaku/egame.py
|
simplecelery/zhibo
|
f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f
|
[
"Apache-2.0"
] | 1
|
2021-11-11T15:44:44.000Z
|
2021-11-11T15:44:44.000Z
|
danmu/danmaku/egame.py
|
simplecelery/zhibo
|
f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f
|
[
"Apache-2.0"
] | 9
|
2021-09-24T03:26:21.000Z
|
2022-03-23T01:32:15.000Z
|
import aiohttp
import struct
import json
import re
class eGame:
heartbeat = b'\x00\x00\x00\x12\x00\x12\x00\x01\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00'
heartbeatInterval = 60
@staticmethod
async def get_ws_info(url):
rid = url.split('/')[-1]
page_id = aid = rid
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
}
async with aiohttp.ClientSession() as session:
async with session.get('https://m.egame.qq.com/live?anchorid' + rid, headers=headers) as resp:
res = await resp.text()
res_ = re.findall(r'"videoInfo":(.*),"h5Url"', res)[0]
str_id = json.loads(res_)['pid']
params = {
'param': json.dumps({"0":{"module":"pgg.ws_token_go_svr.DefObj","method":"get_token","param":{"scene_flag":16,"subinfo":{"page":{"scene":1,"page_id":int(page_id),"str_id":str(str_id),"msg_type_list":[1,2]}},"version":1,"message_seq":-1,"dc_param":{"params":{"info":{"aid":aid}},"position":{"page_id":"QG_HEARTBEAT_PAGE_LIVE_ROOM"},"refer":{}},"other_uid":0}}})
}
async with session.post('https://share.egame.qq.com/cgi-bin/pgg_async_fcgi', data=params, headers=headers) as resp:
res = json.loads(await resp.text())
token = res['data']['0']['retBody']['data']['token']
# 开始拼接reg_datas
reg_datas = []
tokenbuf = token.encode('ascii')
bodybuf = struct.pack('!Bi', 7, len(tokenbuf)) + tokenbuf
headerbuf = struct.pack('!ihhhihh', 18 + len(bodybuf), 18, 1, 1, 0, 0, 0)
data = headerbuf + bodybuf
reg_datas.append(data)
reg_datas.append(eGame.heartbeat)
return 'wss://barragepush.egame.qq.com/sub', reg_datas
@staticmethod
def decode_msg(data):
"""
type: 0、3、9用户发言;7、33礼物信息;29、35欢迎信息;24、31系统提醒;23关注信息
"""
msgs = []
msg = {}
s = MessageDecode(data)
body = s.v()['body']
if body:
bin_datas = body['bin_data']
for bin_data in bin_datas:
# if bin_data['type'] in (0, 3, 9):
if bin_data.get('type', '') in (0, 3, 9):
msg['name'] = bin_data['nick']
msg['content'] = bin_data['content']
msg['msg_type'] = 'danmaku'
else:
msg = {'name': '', 'content': '', 'msg_type': 'other'}
msgs.append(msg.copy())
return msgs
else:
msg = {'name': '', 'content': '', 'msg_type': 'None'}
msgs.append(msg.copy())
return msgs
class MessageDecode:
"""
数据解包,还原JS中的操作步骤
"""
def __init__(self, data):
self.data = data
self.ie = {
'event_id': 0,
'msg_type': 1,
'bin_data': 2,
'params': 3,
'start_tm': 4,
'data_list': 6,
'end_tm': 5,
'message_seq': 7,
}
self.ne = {
'uid': 0,
'msgid': 1,
'nick': 2,
'content': 3,
'tm': 4,
'type': 5,
'scenes_flag': 6,
'ext': 7,
'send_scenes': 8
}
self.oe = {
'event_id': 0,
'event_name': 1,
'info': 2,
'params': 3,
'bin_data': 4
}
def v(self):
data = self.data
startPosition = 18
endPosition, = struct.unpack_from('!i', data, 0)
seq, = struct.unpack_from('!i', data, 10)
operation, = struct.unpack_from('!h', data, 8)
if endPosition != len(data):
raise Exception('The received packet length is abnormal')
return {
'seq': seq,
'operation': operation,
'body': self.w(operation, startPosition, endPosition, data)
}
def w(self, operation, startPosition, endPosition, data):
if operation == 3:
return self.x(startPosition, endPosition, data)
else:
return None
def x(self, startPosition, endPosition, data):
i, = struct.unpack_from('!i', data, startPosition)
n = data[startPosition: endPosition]
if len(n) >= (4 + i):
o = n[4:(4 + i)]
a = self.S(o)
y = self.ye(a)
return y
else:
return None
def ye(self, e):
return self.T({
'resultObj': e,
'template': self.ie,
'afterChange': 1,
})
def afterChange(self, e, t, i, n, o):
if t == 'bin_data':
v = []
ve = {}
for m in n:
a = self.S(e, m['ext'])
b = o['msg_type']
if b == 1:
ve = self.T({
'resultObj': a,
'template': self.ne
})
elif b == 2:
ve = self.T({
'resultObj': a,
'template': self.oe
})
v.append(ve.copy())
return v
else:
return n
def T(self, e):
i = e['resultObj']
n = e['template']
o = e.get('beforeChange', '')
r = e.get('afterChange', '')
a = {}
for s in n.keys():
for t in i[0]:
if t['tag'] == n[s]:
q = t
p = q['value']
c = q['ext']
if r:
a[s] = self.afterChange(i[1], s, c, p, a)
else:
a[s] = p
break
return a
def S(self, e, t=0):
if t == '':
t = 0
i = []
n = len(e)
while t < n:
o = self.m(e, t)
dict_ = {
'value': o['value'],
'lastPosition': o['position'],
'ext': o['ext'],
'tag': o['tag']
}
i.append(dict_.copy())
t = o['position']
return i, e
def m(self, e, t):
value = position = ext = ''
i = e
a, = struct.unpack_from('!B', i, t)
tag = (240 & a) >> 4
type = 15 & a
s_position = t + 1
if type == 0:
value, position = self.f0(i, s_position)
elif type == 1:
value, position = self.f1(i, s_position)
elif type == 2:
value, position = self.f2(i, s_position)
elif type == 3:
value, position = self.f3(i, s_position)
elif type == 6:
value, position, ext = self.f6(i, s_position)
elif type == 7:
value, position, ext = self.f7(i, s_position)
elif type == 8:
value, position = self.f8(i, s_position)
elif type == 9:
value, position = self.f9(i, s_position)
elif type == 12:
value, position = self.f12(i, s_position)
elif type == 13:
value, position = self.f13(i, s_position)
i = ''
return {
'i': i,
'tag': tag,
'type': type,
'value': value,
'position': position,
'ext': ext
}
def f0(self, e, t):
o = 1
try:
n, = struct.unpack_from('!B', e, t)
except:
n = ''
return n, t + o
def f1(self, e, t):
o = 2
try:
n, = struct.unpack_from('!H', e, t)
except:
n = ''
return n, t + o
def f2(self, e, t):
o = 4
try:
n, = struct.unpack_from('!I', e, t)
except:
n = ''
return n, t + o
def f3(self, e, t):
e = struct.unpack('!8B', e[t:t + 8])
i = (e[0] << 24) + (e[1] << 16) + (e[2] << 8) + e[3]
o = (e[4] << 24) + (e[5] << 16) + (e[6] << 8) + e[7]
value = (i << 32) + o
position = t + 8
return value, position
def f4(self, e, t):
o = 4
try:
n, = struct.unpack_from('!f', e, t)
except:
n = ''
return n, t + o
def f5(self, e, t):
o = 8
try:
n, = struct.unpack_from('!d', e, t)
except:
n = ''
return n, t + o
def f6(self, e, t):
n, = struct.unpack_from('!B', e, t)
r = t + 1
s = r + n
value = (e[r:s]).decode('utf8', errors='ignore')
return value, s, r
def f7(self, e, t):
n, = struct.unpack_from('!I', e, t)
r = t + 4
s = r + n
value = (e[r:s]).decode('utf8', errors='ignore')
return value, s, r
def f8(self, e, t):
i = {}
b = self.m(e, t)
o = b['value']
r = b['position']
while o > 0:
a = self.m(e, r)
s = self.m(e, a['position'])
if a['tag'] == 0 and s['tag'] == 1:
i[a['value']] = s['value']
r = s['position']
o -= 1
return i, r
def f9(self, e, t):
i = self.m(e, t)
n = i['value']
o = i['position']
r = []
while n > 0:
a = self.m(e, o)
r.append(a.copy())
o = a['position']
n -= 1
return r, o
def f10(self, e, t):
i = []
while True:
n = self.m(e, t)
t = n['position']
if n['type'] == 11:
return i, t
i.append(n['value'].copy())
def f11(self, e, t):
return '', t
def f12(self, e, t):
return 0, t
def f13(self, e, t):
i = self.m(e, t)
return e[(t + i['position']):i['value']], t + i['position'] + i['value']
| 28.485876
| 380
| 0.416501
| 1,231
| 10,084
| 3.340374
| 0.192526
| 0.015078
| 0.024805
| 0.030642
| 0.218872
| 0.126459
| 0.101167
| 0.069796
| 0.062986
| 0.037451
| 0
| 0.040636
| 0.426517
| 10,084
| 353
| 381
| 28.566572
| 0.670413
| 0.011503
| 0
| 0.207921
| 0
| 0.006601
| 0.128122
| 0.018433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079208
| false
| 0
| 0.013201
| 0.009901
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1bd46a35b1176540180e5d836f7a6d20314a7dc
| 3,703
|
py
|
Python
|
lib/cogs/reactionpolls.py
|
pille1842/gerfroniabot
|
291dc8f3cf9fb00f3f5e89e36b066660a410026f
|
[
"MIT"
] | null | null | null |
lib/cogs/reactionpolls.py
|
pille1842/gerfroniabot
|
291dc8f3cf9fb00f3f5e89e36b066660a410026f
|
[
"MIT"
] | null | null | null |
lib/cogs/reactionpolls.py
|
pille1842/gerfroniabot
|
291dc8f3cf9fb00f3f5e89e36b066660a410026f
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from discord import Embed
from discord.ext.commands import Cog
from discord.ext.commands import command
import logging
class Reactionpolls(Cog):
NUMBERS = [
"1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"
]
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger("gerfroniabot.reactionpolls")
self.polls = []
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("reactionpolls")
self.log.info("Reactionpolls cog ready")
@command(name="umfrage", aliases=["umf"], brief="Erstelle eine offene Umfrage")
async def make_poll(self, ctx, minutes: int, question: str, *options):
"""
Erstelle eine offene Umfrage, auf die alle anderen Mitglieder mit Emojis reagieren können, um abzustimmen.
Der erste Parameter ist die Dauer in Minuten, nach der der Bot das Ergebnis bekanntgeben wird. Der zweite
Parameter, der gegebenenfalls in "Anführungszeichen" gesetzt werden muss, wenn er Leerzeichen enthält, ist
die Frage, die du den Mitgliedern stellen möchtest. Alle weiteren Parameter (durch Leerzeichen getrennt)
werden als Antwortmöglichkeiten hinzugefügt. Du kannst höchstens zehn Optionen angeben.
"""
if minutes < 1 or minutes > 120:
await ctx.send(":ballot_box_with_check: Die Umfragedauer muss zwischen 1 und 120 Minuten liegen.")
return
if len(options) > 10:
await ctx.send(":ballot_box_with_check: Du kannst nicht mehr als 10 Antwortmöglichkeiten festlegen.")
return
embed = Embed(
title=f":ballot_box_with_check: {question}",
description=f"Umfrage von {ctx.author.display_name}",
timestamp=datetime.utcnow(),
colour=ctx.author.colour
)
run_until = datetime.now() + timedelta(minutes=minutes)
fields= [("Antwortmöglichkeiten", "\n".join([f"{self.NUMBERS[idx]} {option}" for idx, option in enumerate(options)]), False),
("Hilfe", f"Reagiere mit der entsprechenden Zahl auf diese Nachricht, um abzustimmen. "
f"Die Umfrage läuft bis {run_until.strftime('%H:%M')} Uhr.", False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
message = await ctx.send(embed=embed)
for emoji in self.NUMBERS[:len(options)]:
await message.add_reaction(emoji)
self.polls.append(message.id)
self.bot.scheduler.add_job(self.complete_poll, "date", run_date=run_until, args=[message.channel.id, message.id])
async def complete_poll(self, channel_id, message_id):
message = await self.bot.get_channel(channel_id).fetch_message(message_id)
most_voted = max(message.reactions, key=lambda r: r.count)
await message.channel.send(f":ballot_box_with_check: Die Abstimmung ist beendet. Option {most_voted.emoji} hat mit {most_voted.count-1} Stimmen gewonnen.")
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.message_id in self.polls:
message = await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
for reaction in message.reactions:
if (not payload.member.bot
and payload.member in await reaction.users().flatten()
and reaction.emoji != payload.emoji.name):
await message.remove_reaction(reaction.emoji, payload.member)
def setup(bot):
bot.add_cog(Reactionpolls(bot))
| 44.614458
| 163
| 0.654604
| 475
| 3,703
| 5.04
| 0.412632
| 0.020468
| 0.021721
| 0.030075
| 0.108605
| 0.04929
| 0.025063
| 0
| 0
| 0
| 0
| 0.007788
| 0.237105
| 3,703
| 82
| 164
| 45.158537
| 0.83292
| 0
| 0
| 0.068966
| 0
| 0.017241
| 0.215311
| 0.054864
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.086207
| 0
| 0.189655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1be71acaff6d8c302bc2e4dd7fae486925372c6
| 5,975
|
py
|
Python
|
scripts/visualize_image_dataset.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 1
|
2021-11-02T09:54:41.000Z
|
2021-11-02T09:54:41.000Z
|
scripts/visualize_image_dataset.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | null | null | null |
scripts/visualize_image_dataset.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 2
|
2020-04-17T10:50:06.000Z
|
2021-11-02T09:54:47.000Z
|
"""
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Basic libraries
import argparse
import numpy as np
import sys
from os.path import dirname, abspath
from pybf.pybf.io_interfaces import ImageLoader
from pybf.pybf.visualization import plot_image
def visualize_image_dataset(path_to_img_dataset,
save_path=None,
save_visualized_images=False,
show_images=True,
frames_to_plot=None,
low_res_img_to_plot=None,
db_range=None):
# Load beamformed images
imgLoader = ImageLoader(path_to_img_dataset)
# Check path to save images
if save_path is None:
# Construct save path (save to dataset folder)
len_to_cut = len(path_to_img_dataset.split('/')[-1])
save_path = path_to_img_dataset[:-1 - len_to_cut]
# Check simulation flag
if imgLoader._simulation_flag:
scs_coords_xz = imgLoader.get_scatters_coords()[[0,1],:]
else:
scs_coords_xz = None
# Get the coordinates of transducer elements
elements_coord = imgLoader.get_elements_coords()
# Calculate image sizes
pixels_coords = imgLoader.get_pixels_coords()
image_size_x_0 = pixels_coords[0, :].min()
image_size_x_1 = pixels_coords[0, :].max()
image_size_z_0 = pixels_coords[1, :].min()
image_size_z_1 = pixels_coords[1, :].max()
# Check the frames_to_plot list
if frames_to_plot is not None:
if len(frames_to_plot)is 0:
frames_to_plot = imgLoader.frame_indices
else:
frames_to_plot = []
# Check the low_res_img_to_plot list
if low_res_img_to_plot is not None:
if len(low_res_img_to_plot) is 0:
low_res_img_to_plot = imgLoader.lri_indices
else:
low_res_img_to_plot = []
# Iterate over frames amd low resolution images
for n_frame in frames_to_plot:
# Plot Low Resolution Images
for n_lri in low_res_img_to_plot:
# Get data
img_data = imgLoader.get_low_res_image(n_frame, n_lri)
# Extract envelope
img_data = np.abs(img_data)
plot_image(img_data,
elements_coords_xz=elements_coord,
title='Frame ' + str(n_frame) +' LRI ' + str(n_lri),
image_x_range=[image_size_x_0, image_size_x_1],
image_z_range=[image_size_z_0, image_size_z_1],
db_range=db_range,
scatters_coords_xz=scs_coords_xz,
framework='plotly',
save_fig=save_visualized_images,
show=show_images,
path_to_save=save_path)
# Plot High Resolution Image
# Get data
img_data = imgLoader.get_high_res_image(n_frame)
# Extract envelope
img_data = np.abs(img_data)
plot_image(img_data,
elements_coords_xz=elements_coord,
title='Frame ' + str(n_frame) +' HRI',
image_x_range=[image_size_x_0, image_size_x_1],
image_z_range=[image_size_z_0, image_size_z_1],
db_range=db_range,
scatters_coords_xz=scs_coords_xz,
framework='plotly',
save_fig=save_visualized_images,
show=show_images,
path_to_save=save_path)
# Close the file with beamformed images
imgLoader.close_file()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--path_to_img_dataset',
type=str,
default='',
help='Path to the image dataset file.')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# Parameters for visualization
parser.add_argument(
'--save_visualized_images',
type=str2bool,
nargs='?',
const=True,
default=False,
help='Flag to save visualized images.')
parser.add_argument(
'--frames_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of frames to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--low_res_img_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of low resolution images to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--db_range',
type=float,
default=None,
help='Decibels range for log compression of images ')
FLAGS, unparsed = parser.parse_known_args()
# Run main function
visualize_image_dataset(FLAGS.path_to_img_dataset,
FLAGS.save_visualized_images,
FLAGS.frames_to_plot,
FLAGS.low_res_img_to_plot,
FLAGS.db_range)
| 33.757062
| 75
| 0.59364
| 748
| 5,975
| 4.433155
| 0.26738
| 0.036188
| 0.036188
| 0.029855
| 0.308504
| 0.264777
| 0.241255
| 0.229192
| 0.229192
| 0.229192
| 0
| 0.008441
| 0.325858
| 5,975
| 177
| 76
| 33.757062
| 0.814796
| 0.186611
| 0
| 0.347826
| 0
| 0
| 0.06414
| 0.013744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017391
| false
| 0
| 0.052174
| 0
| 0.104348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1bef0f9641ed3b8503a1d2834c347e28d936599
| 4,725
|
py
|
Python
|
tests/ut/python/dataset/test_datasets_get_dataset_size.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | 7
|
2020-05-24T03:19:26.000Z
|
2020-05-24T03:20:00.000Z
|
tests/ut/python/dataset/test_datasets_get_dataset_size.py
|
liyong126/mindspore
|
930a1fb0a8fa9432025442c4f4732058bb7af592
|
[
"Apache-2.0"
] | 7
|
2020-03-30T08:31:56.000Z
|
2020-04-01T09:54:39.000Z
|
tests/ut/python/dataset/test_datasets_get_dataset_size.py
|
liyong126/mindspore
|
930a1fb0a8fa9432025442c4f4732058bb7af592
|
[
"Apache-2.0"
] | 1
|
2020-03-30T17:07:43.000Z
|
2020-03-30T17:07:43.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import mindspore.dataset as ds
IMAGENET_RAWDATA_DIR = "../data/dataset/testImageNetData2/train"
IMAGENET_TFFILE_DIR = ["../data/dataset/test_tf_file_3_images2/train-0000-of-0001.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0002.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0003.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0004.data"]
MNIST_DATA_DIR = "../data/dataset/testMnistData"
MANIFEST_DATA_FILE = "../data/dataset/testManifestData/test.manifest"
CIFAR10_DATA_DIR = "../data/dataset/testCifar10Data"
CIFAR100_DATA_DIR = "../data/dataset/testCifar100Data"
def test_imagenet_rawdata_dataset_size():
ds_total = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR)
assert ds_total.get_dataset_size() == 6
ds_shard_1_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 6
ds_shard_2_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 3
ds_shard_3_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 2
def test_imagenet_tf_file_dataset_size():
ds_total = ds.TFRecordDataset(IMAGENET_TFFILE_DIR)
assert ds_total.get_dataset_size() == 12
ds_shard_1_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 12
ds_shard_2_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 6
ds_shard_3_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 4
def test_mnist_dataset_size():
ds_total = ds.MnistDataset(MNIST_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
def test_manifest_dataset_size():
ds_total = ds.ManifestDataset(MANIFEST_DATA_FILE)
assert ds_total.get_dataset_size() == 4
ds_shard_1_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 4
ds_shard_2_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 2
ds_shard_3_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 2
def test_cifar10_dataset_size():
ds_total = ds.Cifar10Dataset(CIFAR10_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
ds_shard_7_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=7, shard_id=0)
assert ds_shard_7_0.get_dataset_size() == 1429
def test_cifar100_dataset_size():
ds_total = ds.Cifar100Dataset(CIFAR100_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
| 41.447368
| 90
| 0.748995
| 754
| 4,725
| 4.278515
| 0.155172
| 0.082455
| 0.108493
| 0.082455
| 0.735276
| 0.641971
| 0.614073
| 0.590515
| 0.424675
| 0.424675
| 0
| 0.073147
| 0.137778
| 4,725
| 113
| 91
| 41.814159
| 0.718704
| 0.13545
| 0
| 0.212121
| 0
| 0
| 0.104423
| 0.104423
| 0
| 0
| 0
| 0
| 0.378788
| 1
| 0.090909
| false
| 0
| 0.015152
| 0
| 0.106061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1bfe6581b046ee9479ce7089c84c5e5bea00961
| 4,651
|
py
|
Python
|
tobiko/shell/iperf/_interface.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | 1
|
2022-01-11T20:50:06.000Z
|
2022-01-11T20:50:06.000Z
|
tobiko/shell/iperf/_interface.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
tobiko/shell/iperf/_interface.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko.shell import sh
LOG = log.getLogger(__name__)
def get_iperf_command(parameters, ssh_client):
interface = get_iperf_interface(ssh_client=ssh_client)
return interface.get_iperf_command(parameters)
def get_iperf_interface(ssh_client):
manager = tobiko.setup_fixture(IperfInterfaceManager)
interface = manager.get_iperf_interface(ssh_client=ssh_client)
tobiko.check_valid_type(interface, IperfInterface)
return interface
class IperfInterfaceManager(tobiko.SharedFixture):
def __init__(self):
super(IperfInterfaceManager, self).__init__()
self.client_interfaces = {}
self.interfaces = []
self.default_interface = IperfInterface()
def add_iperf_interface(self, interface):
LOG.debug('Register iperf interface %r', interface)
self.interfaces.append(interface)
def get_iperf_interface(self, ssh_client):
try:
return self.client_interfaces[ssh_client]
except KeyError:
pass
LOG.debug('Assign default iperf interface to SSH client %r',
ssh_client)
self.client_interfaces[ssh_client] = self.default_interface
return self.default_interface
class IperfInterface(object):
def get_iperf_command(self, parameters):
command = sh.shell_command(['iperf3'] +
self.get_iperf_options(parameters))
LOG.debug(f'Got iperf command: {command}')
return command
def get_iperf_options(self, parameters):
options = []
port = parameters.port
if port:
options += self.get_port_option(port)
timeout = parameters.timeout
if timeout and parameters.mode == 'client':
options += self.get_timeout_option(timeout)
output_format = parameters.output_format
if output_format:
options += self.get_output_format_option(output_format)
bitrate = parameters.bitrate
if bitrate and parameters.mode == 'client':
options += self.get_bitrate_option(bitrate)
download = parameters.download
if download and parameters.mode == 'client':
options += self.get_download_option(download)
protocol = parameters.protocol
if protocol and parameters.mode == 'client':
options += self.get_protocol_option(protocol)
options += self.get_mode_option(parameters)
return options
@staticmethod
def get_mode_option(parameters):
mode = parameters.mode
if not mode or mode not in ('client', 'server'):
raise ValueError('iperf mode values allowed: [client|server]')
elif mode == 'client' and not parameters.ip:
raise ValueError('iperf client mode requires a destination '
'IP address')
elif mode == 'client':
return ['-c', parameters.ip]
else: # mode == 'server'
return ['-s', '-D'] # server mode is executed with daemon mode
@staticmethod
def get_download_option(download):
if download:
return ['-R']
else:
return []
@staticmethod
def get_protocol_option(protocol):
if protocol == 'tcp':
return []
elif protocol == 'udp':
return ['-u']
else:
raise ValueError('iperf protocol values allowed: [tcp|udp]')
@staticmethod
def get_timeout_option(timeout):
return ['-t', timeout]
@staticmethod
def get_output_format_option(output_format):
if output_format == 'json':
return ['-J']
else:
raise ValueError('iperf output format values allowed: '
'[json]')
@staticmethod
def get_port_option(port):
return ['-p', port]
@staticmethod
def get_bitrate_option(bitrate):
return ['-b', bitrate]
| 31.425676
| 78
| 0.641797
| 522
| 4,651
| 5.538314
| 0.289272
| 0.024905
| 0.033898
| 0.031823
| 0.141128
| 0.098236
| 0.075406
| 0
| 0
| 0
| 0
| 0.002656
| 0.27134
| 4,651
| 147
| 79
| 31.639456
| 0.850398
| 0.14212
| 0
| 0.128713
| 0
| 0
| 0.090383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138614
| false
| 0.009901
| 0.039604
| 0.029703
| 0.356436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1c1735ef2cb4649ea44c8972cfcfb01cf792d82
| 512
|
py
|
Python
|
Tensorflow_official/cnn/.ipynb_checkpoints/test.py
|
starkidstory/OmegaTensor
|
2a80d38236a7ce6d6460be59528b33227d98b93b
|
[
"MIT"
] | 2
|
2020-04-07T03:01:03.000Z
|
2020-04-16T14:33:21.000Z
|
Tensorflow_official/cnn/.ipynb_checkpoints/test.py
|
starkidstory/OmegaTensor
|
2a80d38236a7ce6d6460be59528b33227d98b93b
|
[
"MIT"
] | null | null | null |
Tensorflow_official/cnn/.ipynb_checkpoints/test.py
|
starkidstory/OmegaTensor
|
2a80d38236a7ce6d6460be59528b33227d98b93b
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#print(np.version.version)
#np.set_printoptions(precision=4)
dataset=tf.data.Dataset.from_tensor_slices([8,3,0,8,2,1])
num=np.arange(5)
numT=tf.convert_to_tensor(num)
numF=tf.cast(numT,dtype=tf.float32)
print(numT)
print(numF)
print(dataset)
mat=tf.convert_to_tensor(np.zeros([3,3]))
print(mat)
small_list=tf.convert_to_tensor([1,2,3],dtype=tf.float64)
print(small_list)
print(np.random.randint(0,5))
| 24.380952
| 57
| 0.785156
| 96
| 512
| 4.072917
| 0.46875
| 0.069054
| 0.084399
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039666
| 0.064453
| 512
| 21
| 58
| 24.380952
| 0.776618
| 0.111328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0.352941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1c51e9ef39f4d3feeb1e7c57ea1abdeb37eef20
| 18,815
|
py
|
Python
|
src/data/SatelitteSolarPowerSystemV4.py
|
j1996/EPM_UCC
|
cf2218c7681966963a179aea043328a2343f92fb
|
[
"MIT"
] | null | null | null |
src/data/SatelitteSolarPowerSystemV4.py
|
j1996/EPM_UCC
|
cf2218c7681966963a179aea043328a2343f92fb
|
[
"MIT"
] | null | null | null |
src/data/SatelitteSolarPowerSystemV4.py
|
j1996/EPM_UCC
|
cf2218c7681966963a179aea043328a2343f92fb
|
[
"MIT"
] | null | null | null |
import numpy as np
import trimesh
try:
from Satellite_Panel_Solar import Panel_Solar
from SatelitteActitud import SatelitteActitud
except:
from src.data.Satellite_Panel_Solar import Panel_Solar
from src.data.SatelitteActitud import SatelitteActitud
# noinspection SpellCheckingInspection
"""Satelitte Solar Power System
Es una clase donde se incluye todo el sistema de potencia del satelitte, permite incluir un modelo en CAD
y realizar su analisis de potencia dependiento de un vector utilizado como la direccion del sol hacia el satelite
Example:
Para llamar a esta clase solo hace falta, una linea como la de abajo:
$ Sat = SatelitteSolarPowerSystem(direccion='models/12U.stl')
Esta clase tiene varios atributos incluidos como la caracteristica de cada panel solar
para ello solo se necesita llamar a la clase con:
$ from Satellite_Panel_Solar import Panel_Solar
$ Sat.caracteristicas_panel_solar=[Panel_solar()]
Para ver como se configura cada Panel_solar hay que remitirse a su documentacion
Finalmente notar que el atributo mesh incluye todos aquellos del paquete trimesh
"""
class SatelitteSolarPowerSystem(object):
def __init__(self, direccion, SatelitteActitud, panel_despegable_dual=True, Despegables_orientables=False):
"""Se inicia la clase con el analisis de la figura para
encontrar paneles despegables, sombra, etc.
Args:
direccion: string con la direcion del archivo y con el tipo del archivo Ex. .STL, .OBJ, .PLY
panel_despegable_dual: Default(True)
"""
self.mesh = self.cargar_modelo(direccion)
self.numero_caras = len(self.mesh.facets)
# Normales de las caras en el momento 0
self.Normales_caras = self.mesh.facets_normal
self.Area_caras = self.mesh.facets_area
self.caracteristicas_panel_solar = [
Panel_Solar('Estandar')] * self.numero_caras
self.Caras_Despegables = self.caras_despegables()
self.sombra = self.posible_sombra()
self.mesh.vertices -= self.mesh.centroid
self.sun_plane = self.puntos_sol()
self.panel_despegable_dual = panel_despegable_dual
self.name = self.nombrar_caras()
self.actitud = SatelitteActitud
self.Despegables_orientables = Despegables_orientables
def cargar_modelo(self, direccion):
"""
cargar_modelo
Args:
direccion: string con la direcion del y con el tipo del archivo Ex. .STL, .OBJ, .PLY
Returns:
trimesh.mesh
"""
return trimesh.load_mesh(direccion)
def nombrar_caras(self):
"""
nombrar_caras
Nombra las caras del modelo para poder utilizarlas se realiza al principio porque si se gira cambiara
Simplemente nombra las caras con X, Y, Z
Returns:
name: devuelve el nombre de las caras de manera X, Y, Z
"""
name = []
j = 0
o = 0
for i in self.mesh.facets_normal:
i = np.round(i)
if (i == [1, 0, 0]).all():
name.append('X+')
elif (i == [-1, 0, 0]).all():
name.append('X-')
elif (i == [0, 1, 0]).all():
name.append('Y+')
elif (i == [0, -1, 0]).all():
name.append('Y-')
elif (i == [0, 0, -1]).all():
name.append('Z-')
elif (i == [0, 0, 1]).all():
name.append('Z+')
else:
name.append(f'Panel direction {i}')
if j in self.Caras_Despegables:
name[j] = name[j] + f' Panel Despegable {o}'
o += 1
j += 1
return name
def caras_despegables(self):
"""
caras_despegables
Localiza los paneles despegables, es un metodo bastante dificil
Returns:
caras_despeables: es el numero de las caras
"""
caras_despegables = []
# si las caras se encuentran con otras sin volumen como los paneles,
# esta las toman como rotas por trimesh por lo que se pueden localizar
for i in np.arange(0, len(trimesh.repair.broken_faces(self.mesh))):
caras_despegables.append(
np.array(np.where(self.mesh.facets == trimesh.repair.broken_faces(self.mesh)[i])).flatten()[0])
# se encuentran las caras que son despegables
# elimina las repetidas
caras_despegables = list(set(caras_despegables))
return caras_despegables
def posible_sombra(self):
"""
posible_sombra
buscar la cara mas cercana a los paneles que puede dar sombra
Returns:
sombra:numero de las caras que pueden tener sombra
"""
sombra = np.array(
np.where(self.mesh.facets_on_hull == False)).flatten()
return sombra
def puntos_sol(self):
"""
puntos_sol
Crea un conjunto de puntos de aquellos que darian sombra
con los centros de los paneles
Returns:
trimesh.mesh : Plano de puntos
"""
p = self.mesh.facets[self.sombra].flatten()
sun_plane = self.mesh.triangles_center[p]
return sun_plane
def celdas_activas(self, sun_vector):
"""
celdas_activas
Localiza las celdas activas de un mallado al buscarse los puntos donde golpearia un rayo en la malla des los puntos_sol
Args:
sun_vector (array(,3)) : Vector sol
Returns:
index_tri [array(n)]: El numero de triangulo que esta activo al ser golpeado por el sol
"""
sun_planeAux = self.puntos_sol()+5000*sun_vector
ray_origins = sun_planeAux
ray_directions = np.array([-sun_vector] * len(sun_planeAux))
if trimesh.ray.has_embree: # Hay una libreria que es embree solo funciona en linux pero va 50x mas rapido
index_tri = self.mesh.ray.intersects_first(
ray_origins=ray_origins, ray_directions=ray_directions)
else:
locations, index_ray, index_tri = self.mesh.ray.intersects_location(ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=False)
index_tri = list(set(index_tri))
return index_tri
def Add_prop_Panel(self, e):
"""
Add_prop_Panel
Añade propiedades al panel
Args:
e (Panel_Solar): Panel Solar
"""
self.caracteristicas_panel_solar.append(e)
def power_panel_solar(self, index_tri, Sun_vector, WSun):
"""
power_panel_solar
Obtiene la potencia producida por el satelite con actitud fija
Args:
index_tri (array(,:)): Celdas activas por el rayo
Sun_vector (array(,3)): Vector sol en LVLH
WSun (float): Potencia irradiada por el sol
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
n : numero de caras
"""
# Producto escalar
ang = list(map(Sun_vector.dot, self.mesh.facets_normal))
# Se inicializan las variables
area_potencia = []
W = []
for i in np.arange(0, len(self.mesh.facets)):
# Esto es para si consideramos que
if (i in self.Caras_Despegables) & (self.panel_despegable_dual == True) & (ang[i] < 0):
ang_inc = -ang[i]
else:
ang_inc = ang[i]
# Buscar en las zonas donde es posible la sombra el valor propocional de area en los que incide la luz
if i in self.sombra:
o = np.isin(index_tri, self.mesh.facets[i])
o = o[o == True]
area = (
len(o) / len(self.mesh.facets[i])) * self.mesh.facets_area[i] / (1000 ** 2)
area_potencia.append(area)
else:
area = self.mesh.facets_area[i] / (1000 ** 2) # esta en mm^2
area_potencia.append(area)
# Esto es para eliminar las areas que no cumplen la ley de que menos de 15 grados no producen energia
if (ang_inc >= 0) & (ang_inc > (np.cos((np.pi / 180) * 75))):
W.append(
area * self.caracteristicas_panel_solar[i].psolar_rendimiento * WSun * ang_inc)
else:
W.append(0.)
return W, area_potencia, ang
def power_panel_con_actitud(self, Sun_vector, WSun):
"""
power_panel_con_actitud
Obtiene la potencia producida por el satelite con actitud apuntando al sol
Args:
Sun_vector (array(,3)): Vector sol en LVLH
WSun (float): Potencia irradiada por el sol
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
angulo_giro (array(,n)) : Angulo de giro del satelite
n : numero de caras
"""
# Si los paneles son fijos al satelite
if self.Despegables_orientables == False:
if self.actitud.apuntado_sol == True:
# aqui empieza la magia
# la intencion era formar dos planos entre el eje de spin y el vector sol y otro
# con el eje de spin y una direccion principal de los paneles solares
# para poder calcular el angulo que deberia girarse entre los dos planos
direcion_principal = self.mesh.facets_normal[self.Caras_Despegables[0]]
plano0 = np.cross(Sun_vector, self.actitud.eje_de_spin)
plano0 = plano0/np.linalg.norm(plano0)
plano1 = np.cross(direcion_principal, self.actitud.eje_de_spin)
plano1 = plano1/np.linalg.norm(plano1)
angulo_giro = np.arccos(np.absolute(
np.dot(plano0, plano1)))/(np.linalg.norm(plano0)*np.linalg.norm(plano1))
if np.isnan(angulo_giro):
angulo_giro = 0.0
if angulo_giro == 0:
pass
else:
# Comprueba si la transformacion produciria que fuesen iguales los giros
prim = trimesh.transform_points(plano1.reshape(1, 3), trimesh.transformations.rotation_matrix(
angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
if not np.allclose(prim, plano0):
angulo_giro = -angulo_giro
self.mesh = self.mesh.apply_transform(trimesh.transformations.rotation_matrix(
angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
else:
angulo_giro = 0.0
index_tri = self.celdas_activas(Sun_vector)
W, area_potencia, ang = self.power_panel_solar(
index_tri, Sun_vector, WSun)
return W, area_potencia, ang, angulo_giro
else:
if self.actitud.apuntado_sol == True:
# mas magia por aqui
# pero ahora con lo de la proyeccion en unos ejes para poder utilizar el giro
# esto funciona bastante bien el problema es cuando se pasa el ecuador
direcion_principal = self.mesh.facets_normal[self.Caras_Despegables[0]]
direcion_principal = np.round(
direcion_principal/np.linalg.norm(direcion_principal), 5)
matrix_projection = trimesh.transformations.projection_matrix(
[0, 0, 0], self.actitud.eje_de_spin)[0:3, 0:3]
proyeccion = np.dot(matrix_projection, Sun_vector)
proyeccion = proyeccion/np.linalg.norm(proyeccion)
ver = np.arccos(np.dot(proyeccion, direcion_principal))
if np.isnan(ver):
ver = 0.0
if ver < 0.1e-4:
angulo_giro = 0.0
pass
else:
# print("proyeccion",proyeccion)
# print("direprinci",direcion_principal)
#angulo_giro=np.arccos(np.absolute(np.dot(direcion_principal, proyeccion)))/(np.linalg.norm(direcion_principal)*np.linalg.norm(proyeccion))
transforma = trimesh.geometry.align_vectors(
direcion_principal, proyeccion)
# posicion_eje=np.array(np.where(np.array(self.actitud.eje_de_spin)==1)).flatten().max()
angulo_giro = trimesh.transformations.rotation_from_matrix(transforma)[
0]
dir = trimesh.transform_points(
direcion_principal.reshape(1, 3), transforma)
if np.absolute(angulo_giro) > 0.05:
transforma2 = np.round(trimesh.geometry.align_vectors(
direcion_principal, -proyeccion), 5)
angulo_giro2 = trimesh.transformations.rotation_from_matrix(transforma2)[
0]
dir = trimesh.transform_points(
direcion_principal.reshape(1, 3), transforma)
if np.absolute(angulo_giro2) < np.absolute(angulo_giro):
transforma = transforma2
angulo_giro = angulo_giro2
else:
pass
# if plano1[posicion_eje]==0:
# angulo_giro=0.0
if np.isnan(angulo_giro):
angulo_giro = 0.0
pass
else:
self.mesh.apply_transform(transforma)
else:
angulo_giro = 0.0
ang = list(map(Sun_vector.dot, self.mesh.facets_normal))
area_potencia = []
W = []
angulo_giro = [angulo_giro]
for i in np.arange(0, len(self.mesh.facets)):
area = self.mesh.facets_area[i] / (1000 ** 2)
area_potencia.append(area)
if (i in self.Caras_Despegables):
angulo_giro.append(np.arccos(ang[i]))
ang[i] = 1
if (ang[i] >= 0) & (ang[i] > (np.cos((np.pi / 180) * 75))):
W.append(
area * self.caracteristicas_panel_solar[i].psolar_rendimiento * WSun * ang[i])
else:
W.append(0.)
return W, area_potencia, ang, angulo_giro
def Calculo_potencia(self, Sun_vector, WSun=1310):
"""
Calculo_potencia
Funcion general para llamar a las distintas funciones para calcular la potencia
Args:
Sun_vector ([type]): [description]
WSun (int, optional): [description]. Defaults to 1310.
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
angulo_giro (array(,n)) : Angulo de giro del satelite
n : numero de caras
"""
if self.actitud.control_en_actitud == False:
index_tri = self.celdas_activas(Sun_vector)
W, area_potencia, ang = self.power_panel_solar(
index_tri, Sun_vector, WSun)
angulo_giro = []
# Ya que no hay giro pero nos lo piden habra que crearlo
[angulo_giro.append(np.NaN) for i in len(self.Caras_Despegables)]
else:
W, area_potencia, ang, angulo_giro = self.power_panel_con_actitud(
Sun_vector, WSun)
return W, area_potencia, ang, angulo_giro
def apply_transform(self, matrix):
"""
apply_transform
creada para hacer coincidir correctamente las caras
aplica una transformacion al satelite y reinicia los nombres
Args:
matrix (array(4,4)): matriz de transformacion
"""
self.mesh = self.mesh.apply_transform(matrix)
self.name = []
self.name = self.nombrar_caras()
self.Normales_caras = np.round(self.mesh.facets_normal)
def visual(self):
"""
visual
Crea una imagen visual del satelite con unos ejes funciona muy bien en notebook
y en linux tambien deberia de poder funcionar
Returns:
(scene): retoma una escena con los ejes
"""
ax = trimesh.creation.axis(axis_radius=25, axis_length=200)
scene = trimesh.Scene([self.mesh.apply_scale(1), ax])
return scene.show()
def separar_satelite(self):
"""
separar_satelite
Separa el satelite en mallas
Returns:
[type]: [description]
"""
y = np.array(
np.where(np.isin(self.sombra, self.Caras_Despegables) == False)).flatten()
despiece = []
despiece.append(self.mesh.split()[0])
for i in self.sombra[y]:
normal = self.mesh.facets_normal[i]
despiece.append(trimesh.intersections.slice_mesh_plane(self.mesh,
self.mesh.facets_normal[i],
self.mesh.facets_origin[i]+0.0001*self.mesh.facets_normal[i]))
return despiece
if __name__ == '__main__':
filename = '12Unuv.stl'
actitud = SatelitteActitud(eje_de_spin=[0, 1, 0], control=True)
d = SatelitteSolarPowerSystem(
filename, actitud, Despegables_orientables=True)
d.apply_transform(trimesh.transformations.rotation_matrix(
np.pi/2, [0, 1, 0], [0, 0, 0]))
Sun_vector = np.array([-0.10486044, 0.91244007, 0.39554696])
print(d.mesh.facets_normal)
W, area_potencia, ang, angulo_giro = d.power_panel_con_actitud(
Sun_vector, 1)
| 39.116424
| 160
| 0.557906
| 2,171
| 18,815
| 4.690926
| 0.199908
| 0.030636
| 0.031618
| 0.019639
| 0.38698
| 0.318637
| 0.26522
| 0.228594
| 0.204635
| 0.178515
| 0
| 0.016862
| 0.360138
| 18,815
| 480
| 161
| 39.197917
| 0.829056
| 0.254531
| 0
| 0.281385
| 0
| 0
| 0.006685
| 0
| 0
| 0
| 0
| 0.00625
| 0
| 1
| 0.060606
| false
| 0.017316
| 0.025974
| 0
| 0.142857
| 0.004329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1c5cfefd7363489dfaa63b0a0dd5fcfd287ee0f
| 5,037
|
py
|
Python
|
Corrfunc/bases.py
|
dfm/suave
|
51c192f450821d9ebb0f3e7eef7461dfb1b2af5f
|
[
"MIT"
] | 7
|
2021-03-03T15:44:35.000Z
|
2021-03-21T09:01:12.000Z
|
Corrfunc/bases.py
|
dfm/suave
|
51c192f450821d9ebb0f3e7eef7461dfb1b2af5f
|
[
"MIT"
] | 3
|
2020-07-17T01:06:48.000Z
|
2021-01-20T02:59:26.000Z
|
Corrfunc/bases.py
|
dfm/suave
|
51c192f450821d9ebb0f3e7eef7461dfb1b2af5f
|
[
"MIT"
] | 2
|
2021-03-20T00:47:51.000Z
|
2021-03-21T09:01:03.000Z
|
import numpy as np
from scipy.interpolate import BSpline
from colossus.cosmology import cosmology
"""
Helper routines for basis functions for the continuous-function estimator.
"""
################
# Spline basis #
################
def spline_bases(rmin, rmax, projfn, ncomponents, ncont=2000, order=3):
'''
Compute a set of spline basis functions for the given order.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
ncomponents : int
Number of components (basis functions)
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
order : int, default=3
Order of spline to use; default is cubic spline
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if ncomponents<order*2:
raise ValueError("ncomponents must be at least twice the order")
kvs = _get_knot_vectors(rmin, rmax, ncomponents, order)
rcont = np.linspace(rmin, rmax, ncont)
bases = np.empty((ncont, ncomponents+1))
bases[:,0] = rcont
for n in range(ncomponents):
kv = kvs[n]
b = BSpline.basis_element(kv)
bases[:,n+1] = [b(r) if kv[0]<=r<=kv[-1] else 0 for r in rcont]
np.savetxt(projfn, bases)
return bases
def _get_knot_vectors(rmin, rmax, ncomponents, order):
nknots = order+2
kvs = np.empty((ncomponents, nknots))
width = (rmax-rmin)/(ncomponents-order)
for i in range(order):
val = i+1
kvs[i,:] = np.concatenate((np.full(nknots-val, rmin), np.linspace(rmin+width, rmin+width*val, val)))
kvs[ncomponents-i-1] = np.concatenate((np.linspace(rmax-width*val, rmax-width, val), np.full(nknots-val, rmax)))
for j in range(ncomponents-2*order):
idx = j+order
kvs[idx] = rmin+width*j + np.arange(0,nknots)*width
return kvs
#############
# BAO basis #
#############
def bao_bases(rmin, rmax, projfn, cosmo_base=None, ncont=2000,
redshift=0.0, alpha_guess=1.0, dalpha=0.001, bias=1.0,
k0=0.1, k1=10.0, k2=0.1, k3=0.001):
'''
Compute the 5-component BAO basis functions based on a cosmological model and
linearized around the scale dilation parameter alpha.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
cosmo_base : nbodykit cosmology object, default=nbodykit.cosmology.Planck15
Cosmology object for the BAO model.
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
redshift : double, default=0.0
Redshift at which to compute power spectrum
alpha_guess : double, default=1.0
The alpha (scale dilation parameter) at which to compute the model (alpha=1.0 is no scale shift)
dalpha : double, default=0.001
The change in alpha (scale dilation parameter) used to calculate the numerical partial derivative
bias : double, default=1.0
The bias parameter by which to scale the model amplitude (bias=1.0 indicates no bias)
k0 : double, default=0.1
The initial magnitude of the derivative term
k1 : double, default=1.0
The initial magnitude of the s^2 nuisance parameter term
k2 : double, default=0.1
The initial magnitude of the s nuisance parameter term
k3 : double, default=0.001
The initial magnitude of the constant nuisance parameter term
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if cosmo_base is None:
print("cosmo_base not provided, defaulting to Planck 2015 cosmology ('planck15')")
cosmo_base = cosmology.setCosmology('planck15')
cf = cosmo_base.correlationFunction
def cf_model(r):
return bias * cf(r, z=redshift)
rcont = np.linspace(rmin, rmax, ncont)
bs = _get_bao_components(rcont, cf_model, dalpha, alpha_guess, k0=k0, k1=k1, k2=k2, k3=k3)
nbases = len(bs)
bases = np.empty((ncont, nbases+1))
bases[:,0] = rcont
bases[:,1:nbases+1] = np.array(bs).T
np.savetxt(projfn, bases)
ncomponents = bases.shape[1]-1
return bases
def _get_bao_components(r, cf_func, dalpha, alpha, k0=0.1, k1=10.0, k2=0.1, k3=0.001):
b1 = k1/r**2
b2 = k2/r
b3 = k3*np.ones(len(r))
cf = cf_func(alpha*r)
b4 = cf
cf_dalpha = cf_func((alpha+dalpha)*r)
dcf_dalpha = _partial_derivative(cf, cf_dalpha, dalpha)
b5 = k0*dcf_dalpha
return b1,b2,b3,b4,b5
def _partial_derivative(f1, f2, dv):
df = f2-f1
deriv = df/dv
return deriv
| 29.115607
| 120
| 0.638674
| 724
| 5,037
| 4.392265
| 0.243094
| 0.03522
| 0.02673
| 0.01761
| 0.309434
| 0.274214
| 0.248428
| 0.224528
| 0.224528
| 0.2
| 0
| 0.03839
| 0.250149
| 5,037
| 172
| 121
| 29.284884
| 0.803548
| 0.419893
| 0
| 0.133333
| 0
| 0
| 0.049271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0.016667
| 0.25
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1c635399d92b3e1526049c9830b5922d5577a91
| 17,587
|
py
|
Python
|
src/data/tree_matches.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/data/tree_matches.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/data/tree_matches.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | 1
|
2021-08-19T15:21:50.000Z
|
2021-08-19T15:21:50.000Z
|
import glob
import os
import pandas as pd
import json
import ast
from tqdm import tqdm
import click
import pickle
from multiprocessing import Pool, cpu_count, Queue
from functools import partial
import itertools
import sys
sys.setrecursionlimit(15000)
import logging
logpath = "./tree_matches.log"
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(logpath)
# ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
def replace_function_subtrees(coral_repr):
ignore = []
new_tree = []
for i in range(len(coral_repr)):
node = coral_repr[i]
if i in ignore:
#ignore the children too:
ignore = ignore + node.get("children",[])
continue
elif node["type"] == "Call":
ignore = ignore + node.get("children",[])[1:]
new_tree.append(node)
return new_tree
class Snippet(object):
def __init__(self,slug,version_id,source,competition = None,
max_size=512):
self.slug = slug
self.max_size = max_size
self.version_id = version_id
self.source = source
self.coral_repr = parse_string(source)[:self.max_size]
self.function_args_removed_repr = replace_function_subtrees(self.coral_repr)
self.python_ast = ast.parse(source)
def coral_diff(self,other,key = None,attr="coral_repr"):
a_attr = getattr(self,attr)
b_attr = getattr(other,attr)
return self.tree_diff(a_attr, b_attr, key = key)
def rear_pad_list(a,n):
m = len(a)
return a + [None for i in range(n-m)]
def make_same_length(a,b):
n = max(len(a),len(b))
a = rear_pad_list(a,n)
b = rear_pad_list(b,n)
return (a,b)
def tree_diff(self,a,b,key=None):
a,b = make_same_length(a,b)
if not key:
key = lambda aa,bb: not aa == bb
return sum([key(aa,bb) for (aa,bb) in zip(a,b)])
def to_dict(self):
return {"slug":self.slug, "version_id" : self.version_id,
"source":self.source}
def rear_pad_list(a,n):
m = len(a)
return a + [None for i in range(n-m)]
def make_same_length(a,b):
n = max(len(a),len(b))
a = rear_pad_list(a,n)
b = rear_pad_list(b,n)
return (a,b)
def tree_diff(a,b,key=None):
a,b = make_same_length(a,b)
if not key:
key = lambda aa,bb: not aa == bb
return sum([key(aa,bb) for (aa,bb) in zip(a,b)])
def looks_like_string(node):
node_type = node.get("type")
if node_type == "Constant":
try:
float(node.get("value"))
return False
except (ValueError,TypeError):
return True
else:
return False
def dont_count_strings(a,b):
if a is None or b is None:
return True
if looks_like_string(a) and looks_like_string(b):
return False
else:
return (not a == b)
def remove_duplicate_matches(matches):
to_return = []
record = set()
for match in matches:
if not (match[0].source,match[1].source) in record:
record.add((match[0].source,match[1].source))
to_return.append(match)
return to_return
# def get_matching_cells(kernel_trees,diff_versions = False, key = None):
# matches = []
# all_cells = []
# for slug,versions in kernel_trees.items():
# all_version_cells = []
# for version_id, cells in versions.items():
# if cells:
# for cell in cells:
# all_version_cells.append(cell)
# n = len(all_version_cells)
# if n == 1:
# continue
# for i in range(n):
# for j in range(i+1,n):
# cell_i = all_version_cells[i]
# cell_j = all_version_cells[j]
# if diff_versions:
# if cell_i.version_id == cell_j.version_id:
# continue
# diff = cell_i.coral_diff(cell_j,key=key)
# if diff == 1:
# matches.append((cell_i,cell_j))
# all_cells = all_cells + all_version_cells
# return matches
def sort_versions_by_version_id(dictionary):
tuples = list(dictionary.items())
return sorted(tuples, key=lambda x : int(x[0]))
def get_sequential_matching( kernel_trees, key=None, attr="coral_repr"):
matches = []
for slug,versions in kernel_trees.items():
sorted_versions = sort_versions_by_version_id(versions)
for a,b in zip(sorted_versions, sorted_versions[1:]):
a_version_id, a_cells = a
b_version_id, b_cells = b
for a_cell in a_cells:
for b_cell in b_cells:
diff = a_cell.coral_diff(b_cell, key=key, attr=attr)
if diff == 1:
matches.append((a_cell, b_cell))
return matches
def get_matching_cells(kernel_trees,diff_versions = False, key = None,attr="coral_repr"):
matches = []
all_cells = []
for slug,versions in kernel_trees.items():
all_version_cells = []
for version_id, cells in versions.items():
if cells:
for cell in cells:
all_version_cells.append(cell)
n = len(all_version_cells)
if n == 1:
continue
for i in range(n):
for j in range(i+1,n):
cell_i = all_version_cells[i]
cell_j = all_version_cells[j]
if diff_versions:
if cell_i.version_id == cell_j.version_id:
continue
diff = cell_i.coral_diff(cell_j,key=key,attr=attr)
if diff == 1:
matches.append((cell_i,cell_j))
all_cells = all_cells + all_version_cells
return matches
def parse_string(string):
global c, d
tree = ast.parse(string)
json_tree = []
def gen_identifier(identifier, node_type = 'identifier'):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = node_type
json_node['value'] = identifier
return pos
def traverse_list(l, node_type = 'list'):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = node_type
children = []
for item in l:
children.append(traverse(item))
if (len(children) != 0):
json_node['children'] = children
return pos
def traverse(node):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = type(node).__name__
children = []
if isinstance(node, ast.Name):
json_node['value'] = node.id
elif isinstance(node, ast.Num):
json_node['value'] = str(node.n)
elif isinstance(node, ast.Str):
json_node['value'] = node.s
elif isinstance(node, ast.alias):
json_node['value'] = str(node.name)
if node.asname:
children.append(gen_identifier(node.asname))
elif isinstance(node, ast.FunctionDef):
json_node['value'] = str(node.name)
elif isinstance(node, ast.ClassDef):
json_node['value'] = str(node.name)
elif isinstance(node, ast.ImportFrom):
if node.module:
json_node['value'] = str(node.module)
elif isinstance(node, ast.Global):
for n in node.names:
children.append(gen_identifier(n))
elif isinstance(node, ast.keyword):
json_node['value'] = str(node.arg)
# Process children.
if isinstance(node, ast.For):
children.append(traverse(node.target))
children.append(traverse(node.iter))
children.append(traverse_list(node.body, 'body'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.If) or isinstance(node, ast.While):
children.append(traverse(node.test))
children.append(traverse_list(node.body, 'body'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.With):
children.append(traverse(node.context_expr))
if node.optional_vars:
children.append(traverse(node.optional_vars))
children.append(traverse_list(node.body, 'body'))
elif isinstance(node, ast.Try):
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.handlers, 'handlers'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.arguments):
children.append(traverse_list(node.args, 'args'))
children.append(traverse_list(node.defaults, 'defaults'))
if node.vararg:
children.append(gen_identifier(node.vararg, 'vararg'))
if node.kwarg:
children.append(gen_identifier(node.kwarg, 'kwarg'))
elif isinstance(node, ast.ExceptHandler):
if node.type:
children.append(traverse_list([node.type], 'type'))
if node.name:
children.append(traverse_list([node.name], 'name'))
children.append(traverse_list(node.body, 'body'))
elif isinstance(node, ast.ClassDef):
children.append(traverse_list(node.bases, 'bases'))
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.decorator_list, 'decorator_list'))
elif isinstance(node, ast.FunctionDef):
children.append(traverse(node.args))
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.decorator_list, 'decorator_list'))
else:
# Default handling: iterate over children.
for child in ast.iter_child_nodes(node):
if isinstance(child, ast.expr_context) or isinstance(child, ast.operator) or isinstance(child, ast.boolop) or isinstance(child, ast.unaryop) or isinstance(child, ast.cmpop):
# Directly include expr_context, and operators into the type instead of creating a child.
json_node['type'] = json_node['type'] + type(child).__name__
else:
children.append(traverse(child))
if isinstance(node, ast.Attribute):
children.append(gen_identifier(node.attr, 'attr'))
if (len(children) != 0):
json_node['children'] = children
return pos
traverse(tree)
return json_tree
def get_param_from_filename(param,filename):
template = "\?{}=(.*)\.|\?"
query_regex = re.compile(template.format(param))
try:
return re.findall(query_regex,filename)[0]
except IndexError:
return None
def get_slug_from_file(filename):
return re.split("\?|\.",filename)[0]
def load_cell_as_snippets(slug,version_id,path,max_size=512):
with open(path) as kernel_file:
cells = []
try:
res = json.load(kernel_file)
except ValueError:
return cells
if not (type(res) is dict) or not "cells" in res:
return cells
for cell in res["cells"]:
if not cell.get("source"):
continue
if type(cell["source"]) is list:
cell["source"] = "".join(cell["source"])
try:
cells.append(Snippet(slug,version_id,cell["source"],max_size=max_size))
except (SyntaxError, AttributeError):
continue
return cells
def get_slug_matches(competition_path,slug,ignore_function_args=False,
remove_exact_duplicates=False,
length_threshold=None, ignore_strings=False,max_size=512,
sequential_matches=False):
# in_path is a slug directory
kernel_version_snippets = {slug:{}}
for version_path in glob.glob(os.path.join(competition_path,slug,"*.json")):
filename = os.path.basename(version_path)
version_id = os.path.splitext(filename)[0]
if not version_id:
continue
version_snippets = load_cell_as_snippets(slug,version_id,version_path,max_size=max_size)
kernel_version_snippets[slug][version_id] = version_snippets
if ignore_function_args:
match_attr = "function_args_removed_repr"
else:
match_attr = "coral_repr"
if ignore_strings:
key = dont_count_strings
else:
key = None
if sequential_matches:
matches = get_sequential_matching(kernel_version_snippets,key=key, attr=match_attr)
else:
matches = get_matching_cells(kernel_version_snippets, diff_versions = True,
key=key, attr=match_attr)
if length_threshold:
matches=[x for x in matches if len(x[0].source.split("\n")) > 5]
if remove_exact_duplicates:
matches = remove_duplicate_matches(matches)
return matches
# def get_competition_matches(competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug)
# logger.info("Done with {}".format(competition_path))
# return matches
def get_competition_matches(ignore_function_args,length_threshold,remove_exact_duplicates,
ignore_strings, max_size, sequential_matches, competition_path):
slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
matches = []
for slug in tqdm(slugs):
matches = matches + get_slug_matches(competition_path,slug,ignore_function_args,
remove_exact_duplicates, length_threshold, ignore_strings,
max_size,sequential_matches)
logger.info("Done with {}".format(competition_path))
return matches
# def get_competition_matcher(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings):
# def get_competition_matches(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings, competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug,ignore_function_args,
# remove_exact_duplicates, length_threshold, ignore_strings)
# logger.info("Done with {}".format(competition_path))
# return matches
# return get_competition_matches
def write_matches(out_path,matches):
with open(os.path.join(out_path,"matches.jsonl"), 'w') as the_file:
for match in matches:
the_file.write(json.dumps([match[0].to_dict(),match[1].to_dict()]))
the_file.write("\n")
@click.command()
@click.argument('in_path', type=click.Path())
@click.argument('out_path', type = click.Path())
@click.option('--ignore_function_args', is_flag = True, default=False, show_default=True)
@click.option('--length_threshold', default=None, show_default=True)
@click.option('--remove_exact_duplicates',is_flag = True, default=False, show_default=True)
@click.option('--ignore_strings', is_flag = True,default=False, show_default=True)
@click.option('--max_size', default=512, show_default=True)
@click.option('--sequential_matches', is_flag=True, default=False,show_default=True)
def main(in_path,
out_path,
ignore_function_args,
length_threshold,
remove_exact_duplicates,
ignore_strings,
max_size,
sequential_matches):
all_comp_paths = glob.glob(os.path.join(in_path,"*"))[1:2]
n = len(all_comp_paths)
# all_matches = map(get_competition_matches,all_comp_paths)
all_matches = []
comp_matcher = partial(get_competition_matches,ignore_function_args,
length_threshold,
remove_exact_duplicates,
ignore_strings,
max_size,
sequential_matches)
all_matches = [comp_matcher(all_comp_paths[0])]
# with Pool(16) as pool:
# for result in tqdm(pool.imap_unordered(comp_matcher,all_comp_paths),total =n):
# all_matches.append(result)
# pool.join()
# pool.close()
# with Pool(8) as worker_pool:
# all_matches = tqdm(worker_pool.imap_unordered(get_competition_matches,all_comp_paths),total =n)
all_matches = itertools.chain.from_iterable(all_matches)
write_matches(out_path,all_matches)
if __name__ == '__main__':
main()
| 36.112936
| 189
| 0.591289
| 2,139
| 17,587
| 4.635344
| 0.122955
| 0.043772
| 0.05769
| 0.047201
| 0.52708
| 0.443873
| 0.408573
| 0.393343
| 0.386082
| 0.371457
| 0
| 0.003745
| 0.301643
| 17,587
| 487
| 190
| 36.112936
| 0.803534
| 0.14778
| 0
| 0.321839
| 0
| 0
| 0.039917
| 0.004889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077586
| false
| 0
| 0.04023
| 0.005747
| 0.20977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1c81880771dc78be0ce9b1719c11a105c654a6c
| 663
|
py
|
Python
|
examples/accessibility/test_sa11y.py
|
echo2477/demo-python
|
adc55aa8075dbd46f94d1ae68f2acfd8f20720d5
|
[
"MIT"
] | 42
|
2019-02-27T03:28:52.000Z
|
2022-01-25T21:18:45.000Z
|
examples/accessibility/test_sa11y.py
|
echo2477/demo-python
|
adc55aa8075dbd46f94d1ae68f2acfd8f20720d5
|
[
"MIT"
] | 12
|
2019-05-10T23:43:55.000Z
|
2021-11-05T21:20:02.000Z
|
examples/accessibility/test_sa11y.py
|
echo2477/demo-python
|
adc55aa8075dbd46f94d1ae68f2acfd8f20720d5
|
[
"MIT"
] | 38
|
2019-02-27T03:28:52.000Z
|
2022-02-17T07:27:08.000Z
|
import os
from selenium import webdriver
from sa11y.analyze import Analyze
import urllib3
urllib3.disable_warnings()
class TestAccessibilitySa11y(object):
def test_analysis(self):
capabilities = {
'browserName': 'chrome',
'sauce:options': {
'username': os.environ["SAUCE_USERNAME"],
'accesskey': os.environ["SAUCE_ACCESS_KEY"],
}
}
sauce_url = 'https://ondemand.us-west-1.saucelabs.com/wd/hub'
driver = webdriver.Remote(sauce_url, capabilities)
driver.get('https://www.saucedemo.com/')
Analyze(driver).results()
driver.quit()
| 22.862069
| 69
| 0.612368
| 67
| 663
| 5.955224
| 0.656716
| 0.065163
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014403
| 0.266968
| 663
| 28
| 70
| 23.678571
| 0.806584
| 0
| 0
| 0
| 0
| 0
| 0.226244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1ca40f0376f7b0e97f60f4e474395644c035a44
| 653
|
py
|
Python
|
275_hindex_ii.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2
|
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
275_hindex_ii.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
275_hindex_ii.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3
|
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
# 275. H-Index II
# Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
class Solution(object):
# http://blog.csdn.net/titan0427/article/details/50650006
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
start, end = 1, n
while start <= end:
h = (start + end) / 2
if citations[n-h] < h:
end = h-1
elif n-h-1 >= 0 and citations[n-h-1] > h:
start = h+1
else:
return h
return 0
| 29.681818
| 117
| 0.509954
| 86
| 653
| 3.872093
| 0.604651
| 0.024024
| 0.066066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056931
| 0.381317
| 653
| 21
| 118
| 31.095238
| 0.767327
| 0.347626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1cad5eb72fd592bce4b7879f6c49c197729b99c
| 6,172
|
py
|
Python
|
base/site-packages/news/templatetags/news_tags.py
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 285
|
2019-12-23T09:50:21.000Z
|
2021-12-08T09:08:49.000Z
|
base/site-packages/news/templatetags/news_tags.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | null | null | null |
base/site-packages/news/templatetags/news_tags.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 9
|
2019-12-23T12:59:25.000Z
|
2022-03-15T05:12:11.000Z
|
from django.conf import settings
from django import template
from news.models import NewsItem, NewsAuthor, NewsCategory
register = template.Library()
@register.tag
def get_news(parser, token):
"""
{% get_news 5 as news_items %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `get_news` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'get_news' template tag. Format is {% get_news 5 as news_items %}.")
return NewsItemNode(bits[-1], limit)
class NewsItemNode(template.Node):
"""
Returns a QuerySet of published NewsItems based on the lookup parameters.
"""
def __init__(self, varname, limit=None, author=None, category_slug=None, filters=None):
self.varname = varname
self.limit = limit
self.filters = filters
# author is either a literal NewsAuthor slug,
# or a template variable containing a NewsAuthor slug.
self.author = author
self.category = category_slug
def render(self, context):
# Base QuerySet, which will be filtered further if necessary.
news = NewsItem.on_site.published()
# Do we filter by author? If so, first attempt to resolve `author` as
# a template.Variable. If that doesn't work, use `author` as a literal
# NewsAuthor.slug lookup.
if self.author is not None:
try:
author_slug = template.Variable(self.author).resolve(context)
except template.VariableDoesNotExist:
author_slug = self.author
news = news.filter(author__slug=author_slug)
if self.category is not None:
try:
category_slug = template.Variable(self.category).resolve(context)
except template.VariableDoesNotExist:
category_slug = self.category
news = news.filter(category__slug=category_slug)
# Apply any additional lookup filters
if self.filters:
news = news.filter(**self.filters)
# Apply a limit.
if self.limit:
news = news[:self.limit]
context[self.varname] = news
return u''
def parse_token(token):
"""
Parses a token into 'slug', 'limit', and 'varname' values.
Token must follow format {% tag_name <slug> [<limit>] as <varname> %}
"""
bits = token.split_contents()
if len(bits) == 5:
# A limit was passed it -- try to parse / validate it.
try:
limit = abs(int(bits[2]))
except:
limit = None
elif len(bits) == 4:
# No limit was specified.
limit = None
else:
# Syntax is wrong.
raise template.TemplateSyntaxError("Wrong number of arguments: format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as': format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
return (bits[1], limit, bits[-1])
@register.tag
def get_posts_by_author(parser,token):
"""
{% get_posts_by_author <slug> [<limit>] as <varname> %}
{% get_posts_by_author foo 5 as news_items %} # 5 articles
{% get_posts_by_author foo as news_items %} # all articles
"""
author_slug, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, author=author_slug)
@register.tag
def get_posts_by_category(parser,token):
"""
{% get_posts_by_category <slug> [<limit>] as <varname> %}
{% get_posts_by_category foo 5 as news_items %} # 5 articles
{% get_posts_by_category foo as news_items %} # all articles
"""
category_slug, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, category_slug=category_slug)
@register.tag
def get_news_by_category(parser,token):
"""
This is because I got sick of having to debug issues due to the fact that I typed one or the other.
"""
return get_posts_by_category(parser,token)
@register.tag
def get_posts_by_tag(parser,token):
"""
{% get_posts_by_tag <tag> [<limit>] as <varname> %}
"""
tag, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, filters={'tags__contains':tag})
@register.tag
def months_with_news(parser, token):
"""
{% months_with_news 4 as months %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `months_with_news` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'months_with_news' template tag. Format is {% months_with_news 5 as months %}.")
return MonthNode(bits[-1], limit=limit)
class MonthNode(template.Node):
def __init__(self,varname,limit=None):
self.varname = varname
self.limit = limit # for MonthNode inheritance
def render(self, context):
try:
months = NewsItem.on_site.published().dates('date', 'month', order="DESC")
except:
months = None
if self.limit is not None:
months = list(months)
months = months[:self.limit]
context[self.varname] = months
return ''
@register.tag
def get_categories(parser,token):
"""
{% get_categories as <varname> %}
{% get_categories 5 as <varname> %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `get_categories` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'get_categories' template tag. Format is {% get_categories 5 as categories %}.")
return CategoryNode(bits[-1], limit=limit)
class CategoryNode(template.Node):
def __init__(self,varname,limit=None):
self.varname = varname
self.limit = limit
def render(self, context):
categories = NewsCategory.on_site.all()
if self.limit is not None:
categories = list(categories)
categories = categories[:self.limit]
context[self.varname] = categories
return ''
@register.inclusion_tag('news/news_ul.html')
def news_ul(slug):
try:
return {'category': NewsCategory.objects.get(slug=slug)}
except NewsCategory.DoesNotExist:
return {}
| 30.107317
| 137
| 0.70431
| 856
| 6,172
| 4.950935
| 0.191589
| 0.020765
| 0.025956
| 0.024068
| 0.495517
| 0.391222
| 0.321378
| 0.29967
| 0.29967
| 0.259556
| 0
| 0.006426
| 0.168017
| 6,172
| 205
| 138
| 30.107317
| 0.81889
| 0.222132
| 0
| 0.461538
| 0
| 0.007692
| 0.151554
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.023077
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1d19c31d7a08cd05475c969fbf2328d027248cd
| 15,337
|
py
|
Python
|
zed-align.py
|
zyndagj/zed-align
|
143b0043b0bfc88f553dc141f4873715bfabc379
|
[
"BSD-3-Clause"
] | 1
|
2017-03-17T15:57:04.000Z
|
2017-03-17T15:57:04.000Z
|
zed-align.py
|
zyndagj/ZED-bsmap-align
|
143b0043b0bfc88f553dc141f4873715bfabc379
|
[
"BSD-3-Clause"
] | null | null | null |
zed-align.py
|
zyndagj/ZED-bsmap-align
|
143b0043b0bfc88f553dc141f4873715bfabc379
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from math import ceil
import os
import sys
import argparse
import multiprocessing
import subprocess as sp
import re
#from pprint import pprint
from array import array
from yaml import load, dump
contexts = ('CG','CHG','CHH')
def main():
fCheck = fileCheck() #class for checking parameters
parser = argparse.ArgumentParser(description="Wrapper for Bisulfite Methylation Alignment.")
parser.add_argument('-R', metavar='FASTA', help='Reference for alignment', required=True, type=fCheck.fasta)
parser.add_argument('-r1', metavar='FASTQ', help='Single or first fastq from pair', required=True, type=fCheck.fastq)
parser.add_argument('-r2', metavar='FASTQ', help='Second read', type=fCheck.fastq)
parser.add_argument('-O', metavar='STR', help='Output directory (Default: %(default)s)', default='.', type=str)
parser.add_argument('-N', '--name', metavar='STR', help='Name for run')
parser.add_argument('-U', '--uniq', action='store_true', help="Only use unique alignments")
parser.add_argument('-q', help="Fastq Quality Encoding (Default: %(default)s)", default=33, type=int)
parser.add_argument('-C', metavar='Chrom', help="Chromosome to use for checking bisulfite conversion rate")
parser.add_argument('-S', dest='tileSize', metavar='N', type=int, help="Window size (Default: %(default)s)", default=100)
parser.add_argument('-d', metavar='N', type=int, help="Minimum coverage in tile for methylation to be printed (Default: %(default)s - all)", default=1)
parser.add_argument('--CG', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=3)
parser.add_argument('--CHG', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=3)
parser.add_argument('--CHH', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=6)
args = parser.parse_args()
######################################################
# Path Section
######################################################
if not args.name:
args.name = os.path.splitext(args.r1)[0]
if not os.path.exists(args.O): os.makedirs(args.O)
outPrefix = os.path.join(args.O, args.name)
######################################################
# Arguments Section
######################################################
config = {'bsmap':{}, 'methratio':{}, 'tiles':{}}
#-----------------------------------------------------
# Arguments for running BSMAP
#-----------------------------------------------------
config['bsmap']['-a'] = {'value':args.r1, 'description':'R1 input'}
config['bsmap']['-z'] = {'value':str(args.q), 'description':'Fastq quality encoding'}
config['bsmap']['-p'] = {'value':str(multiprocessing.cpu_count()), 'description':'Number of threads'}
config['bsmap']['-q'] = {'value':'20', 'description':"Quality threshold for trimming 3' ends of reads"}
config['bsmap']['-d'] = {'value':args.R, 'description':'Reference'}
config['bsmap']['-S'] = {'value':'77345', 'description':'Hardcoded random seed for mapping reproducibility'}
config['bsmap']['-w'] = {'value':'10000', 'description':'Number of candidate seeds to align against'}
#config['bsmap']['-V'] = {'value':'1', 'description':'Print major messages'}
#config['bsmap']['-o'] = {'value':args.name+".sam", 'description':'Output BAM'} # default SAM stdout is piped to samtools
#-----------------------------------------------------
# Arguments for methratio.py
#-----------------------------------------------------
#config['methratio']['-q'] = {'value':'', 'description':'Quiet'}
config['methratio']['-z'] = {'value':'', 'description':'Report locations with zero methylation'}
config['methratio']['-r'] = {'value':'', 'description':'Remove duplicate reads'}
config['methratio']['-d'] = {'value':args.R, 'description':'Reference'}
config['methratio']['-o'] = {'value':outPrefix+"_methratio.txt", 'description':'Output methylation ratio file'}
#-----------------------------------------------------
# Paired specific arguments
#-----------------------------------------------------
if args.r2:
config['bsmap']['-b'] = {'value':args.r2, 'description':'R2 input'}
config['methratio']['-p'] = {'value':'', 'description':'Require propper pairings'}
if args.uniq:
config['bsmap']['-r'] = {'value':'0', 'description':'No non-unique hits reported'}
config['methratio']['-u'] = {'value':'', 'description':'Only use unique alignments'}
else:
config['bsmap']['-r'] = {'value':'2', 'description':'non-unique hits reported'}
config['bsmap']['-w'] = {'value':'20', 'description':'Only 20 equal best hits reported'}
#-----------------------------------------------------
# Tile Section
#-----------------------------------------------------
config['tiles']['size'] = {'value':args.tileSize, 'description':'Size of tiles for summarizing methylation'}
config['tiles']['minCoverage'] = {'value':args.d, 'description':'Minimum Coverage'}
config['tiles']['CG'] = {'value':args.CG, 'description':'Minimum number of sites per tile'}
config['tiles']['CHG'] = {'value':args.CHG, 'description':'Minimum number of sites per tile'}
config['tiles']['CHH'] = {'value':args.CHH, 'description':'Minimum number of sites per tile'}
######################################################
# Check for Dependencies
######################################################
for d in ('bsmap','samtools','methratio.py','bedGraphToBigWig'):
if not which(d):
sys.exit("Please add %s to your path\n"%(d))
# Parse FAI
fai = args.R+'.fai'
if not os.path.exists(fai):
os.system("samtools faidx %s"%(args.R))
######################################################
# Run workflow
######################################################
faiDict = ParseFai(fai)
#-----------------------------------------------------
# run BSMAP
#-----------------------------------------------------
runBSMAP(config, outPrefix, args.r2)
#-----------------------------------------------------
# run methratio.py and calculate conversion rate
#-----------------------------------------------------
runRatio(config)
if args.C:
calcConversion(config, args.C, faiDict)
#-----------------------------------------------------
# Make Tiles and Bedgraphs
#-----------------------------------------------------
makeTile(config, outPrefix, faiDict)
#-----------------------------------------------------
# Make bigWig
#-----------------------------------------------------
makeBigWig(config,fai)
#-----------------------------------------------------
# Write YAML
#-----------------------------------------------------
dump(config, open(outPrefix+'.yaml','w'), default_flow_style=False, width=1000)
def calcConversion(config, chrom, faiDict):
if not chrom in faiDict:
chromStr = '\n - '.join(faiDict.keys())
sys.exit("Chromosome: %s not in reference. Please choose a chromosome from:\n - %s"%(chrom, chromStr))
ratioFile = config['methratio']['-o']['value']
p = sp.Popen(["grep", "^%s\s"%chrom, ratioFile], stdout=sp.PIPE).stdout
cSum = 0
ctSum = 0
for line in p:
tmp = line.split('\t')
cSum += int(tmp[6])
ctSum += int(tmp[7])
percent = round((1.0-float(cSum)/(float(ctSum)+1.0))*100.0, 2)
config['conversion'] = {}
config['conversion']['Chromosome'] = {'value':chrom, 'description':'Chromosome to calculate conversion efficiency from. No methylation should be expected on this chromosome.'}
config['conversion']['C'] = {'value':cSum, 'description':'Number of methylated cytosines'}
config['conversion']['CT'] = {'value':ctSum, 'description':'Number of un/methylated cytosines'}
config['conversion']['percent'] = {'value':percent, 'description':'Conversion rate: (1-C/CT)*100'}
p.close()
def runRatio(config):
ratioCMD = makeCMD('methratio.py', config, 'methratio')+[config['bsmap_stats']['output']['value']]
ratioOUT = sp.check_output(ratioCMD, stderr=sp.STDOUT)
statLine = ratioOUT.split('\n')[-2]
m = re.match(r".+total\s([0-9]+)\s.+,\s([0-9]+)\s.+age:\s(\w+\.\w+) fold", statLine)
mappings, covered, coverage = m.groups()
config['methratio_stats'] = {}
config['methratio_stats']['mappings'] = {'value':mappings, 'description':'Number of valid mappings'}
config['methratio_stats']['covered'] = {'value':covered, 'description':'Number of cytosines covered'}
config['methratio_stats']['coverage'] = {'value':coverage, 'description':'Average coverage fold'}
def runBSMAP(config, outPrefix, r2):
bsmapCMD = makeCMD('bsmap', config, 'bsmap')
bsP = sp.Popen(bsmapCMD, stderr=sp.PIPE, stdout=sp.PIPE)
cpus = str(multiprocessing.cpu_count())
samP = sp.Popen('samtools view -uS - | samtools sort -m 200M -@ %s -O bam -o %s.bam -T %s_tmp'%(cpus, outPrefix, outPrefix), shell=True, stdin=bsP.stdout, stdout=open(outPrefix+'.bam','wb'), stderr=sp.PIPE)
bsP.stdout.close()
bsOUT = bsP.stderr.read()
samP.wait()
if r2:
total, aligned, unique, mult = map(int, re.findall(r'pairs:\s+([0-9]+)', bsOUT))
unit='pairs'
else:
total, aligned, unique, mult = map(int, re.findall(r'reads:\s+([0-9]+)', bsOUT))
unit='reads'
config['bsmap_stats'] = {}
config['bsmap_stats']['output'] = {'value':outPrefix+".bam", 'description':'Output BAM'}
config['bsmap_stats']['input'] = {'value':total, 'description':'Total number of %s in input'%(unit)}
config['bsmap_stats']['aligned'] = {'value':aligned, 'description':'Total number of %s aligned'%(unit)}
config['bsmap_stats']['unique'] = {'value':unique, 'description':'Total number of %s uniquely aligned'%(unit)}
config['bsmap_stats']['mult'] = {'value':mult, 'description':'Total number of %s with multiple alignments'%(unit)}
def makeCMD(baseBin, config, section):
outCMD = [baseBin]
cSec = config[section]
for key in cSec.keys():
outCMD.append(key)
v = cSec[key]['value']
if v: outCMD.append(v)
return outCMD
def ParseFai(inFile):
'''
Parses a fa.fai into a python dictionary
Paramteters
================================
inFile FILE fai file
'''
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines())))
class fileCheck:
def check(self, file, exts):
ext = os.path.splitext(file)[1][1:]
fName = os.path.split(file)[1]
if not ext in exts:
raise argparse.ArgumentTypeError("%s not a %s"%(fName, exts[0]))
if not os.path.exists(file):
raise argparse.ArgumentTypeError("%s does not exist"%(file))
def fastq(self, file):
self.check(file, ['fastq','fq'])
return file
def fasta(self, file):
self.check(file, ['fasta','fa'])
return file
def makeBigWig(config,fai):
bedgraphs = config['tiles']['output']['bedgraphs']['value']
pool = []
bws = []
for bg in bedgraphs:
bw = os.path.splitext(bg)[0]+'.bw'
bws.append(bw)
pool.append(sp.Popen(['bedGraphToBigWig',bg,fai,bw]))
for p in pool:
p.wait()
config['bigwigs'] = {'value':bws,'description':'Bigwig versions of bedgraph files for jbrowse to load'}
def makeTile(config, outPrefix, faiDict):
# Make sure to do something with the coverage variable
bgNames = map(lambda x: outPrefix+'_'+x+'.bedgraph', contexts)
config['tiles']['output'] = {\
'bedgraphs':{'value':bgNames, 'description':'Mehtylation ratios for each methylation motif {CG, CHG, CHH} in bedgraph format.'},\
'tab':{'value':outPrefix+'.tab', 'description':'Tab delimited file of methylation ratios and coverage for each tile.'}}
buffer = 100000
bGs = map(lambda x: open(x, 'w', buffer), bgNames)
tab = open(outPrefix+'.tab', 'w', buffer)
# Write header
#headStr = '\t'.join(['Chr','Start','End']+[ c+'_'+t for c in contexts for t in ('ratio','C','CT')]) ## old out format
headStr = '\t'.join(['Chr','Start','End']+[ c+'_'+t for c in contexts for t in ('ratio','C','CT','sites')]) ## new out format
tab.write(headStr+'\n')
#######################################
# Get parameters
#######################################
tileSize = config['tiles']['size']['value']
ratioFile = config['methratio']['-o']['value']
nSitesT = map(lambda y: config['tiles'][y]['value'], contexts)
sortedChroms = sorted(faiDict.keys())
#######################################
# start writing by chromosome
#######################################
for chrom in sortedChroms:
#----------------------------------
# Create data arrays
#----------------------------------
offset = int(ceil(faiDict[chrom]/float(tileSize))) # number of tiles
C, CT, nSites = makeDataArrays(offset)
#----------------------------------
# Read Chrom and populate arrays
#----------------------------------
p = sp.Popen(["grep", "^%s\s"%chrom, ratioFile], stdout=sp.PIPE).stdout
for line in p:
chr, pos, cIndex, c, ct = formatLine(line)
index = offset*cIndex+pos/tileSize
C[index] += c
CT[index] += ct
nSites[index] += 1
p.close()
# zCheck is true if loc-1 had zero methylation
zCheck = [False, False, False]
for posIndex in xrange(offset): # tile index
start = posIndex*tileSize
end = min(start+tileSize, faiDict[chrom])
tabStr = '%s\t%i\t%i'%(chrom,start,end)
for cIndex in range(3):
loc = offset*cIndex+posIndex # data index
tabStr += makeTabStr(C[loc], CT[loc], nSites[loc])
#-------------------------
# Generate BG
#-------------------------
if C[loc]: # if methylated
if nSites[loc] < nSitesT[cIndex]:
if not zCheck[cIndex]:
bgStr = '%s\t%i\t'%(chrom,start)
zCheck[cIndex] = True
bGs[cIndex].write(bgStr)
else:
if zCheck[cIndex]: # if previous was 0
bgStr = '%i\t0\n'%(start,)
zCheck[cIndex] = False
bGs[cIndex].write(bgStr)
ratio = float(C[loc])/float(CT[loc])
bgStr = '%s\t%i\t%i\t%.2f\n'%(chrom,start,end,ratio)
bGs[cIndex].write(bgStr)
else:
if not zCheck[cIndex]:
bgStr = '%s\t%i\t'%(chrom,start)
zCheck[cIndex] = True
bGs[cIndex].write(bgStr)
#-------------------------
tab.write(tabStr+'\n')
#---------------------------------
# Write out orphaned zeros
#---------------------------------
for cIndex in range(3):
if zCheck[cIndex]:
bgStr = '%i\t0\n'%(end,)
bGs[cIndex].write(bgStr)
######################################
# Close files
######################################
for bg in bGs:
bg.close()
tab.close()
def makeTabStr(C, CT, nSites):
'''
Generates a tab-separated string for the .tab file.
'''
if C:
ratio = float(C)/float(CT)
return '\t%.2f\t%i\t%i\t%i'%(ratio, C, CT, nSites)
return '\t0\t%i\t%i\t%i'%(C, CT, nSites)
def formatLine(line):
tmp = line.split('\t')
chr = tmp[0]
pos = int(tmp[1])-1
cIndex = contexts.index(tmp[3])
c = int(tmp[6])
ct = int(tmp[7])
return (chr, pos, cIndex, c, ct)
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def makeDataArrays(offset):
'''
Function for creating arrays that keep track of data from
methratio.py output.
>>> makeDataArrays(1)
(array('H', [0, 0, 0]), array('H', [0, 0, 0]), array('H', [0, 0, 0]))
'''
C = array('H', [0]*(offset*3))
CT = array('H', [0]*(offset*3))
nSites = array('H', [0]*(offset*3)) # max is tile size
return (C, CT, nSites)
if __name__ == "__main__":
main()
| 43.447592
| 207
| 0.57671
| 1,893
| 15,337
| 4.645536
| 0.209192
| 0.027519
| 0.025131
| 0.01501
| 0.194678
| 0.117808
| 0.098817
| 0.086081
| 0.086081
| 0.066295
| 0
| 0.00885
| 0.123297
| 15,337
| 352
| 208
| 43.571023
| 0.645173
| 0.172328
| 0
| 0.128514
| 0
| 0.008032
| 0.309755
| 0.004358
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064257
| false
| 0
| 0.036145
| 0.004016
| 0.15261
| 0.004016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1d212dc12933a4a0f21c68d34b67d74f7e46ad2
| 4,316
|
py
|
Python
|
tests/test_metadata_model.py
|
statisticsnorway/microdata-validator
|
c6b6788ab3ba7a3dad889db9120ad2decc598e76
|
[
"Apache-2.0"
] | 1
|
2022-03-23T09:15:51.000Z
|
2022-03-23T09:15:51.000Z
|
tests/test_metadata_model.py
|
statisticsnorway/microdata-validator
|
c6b6788ab3ba7a3dad889db9120ad2decc598e76
|
[
"Apache-2.0"
] | 4
|
2022-02-17T08:41:30.000Z
|
2022-02-28T14:08:47.000Z
|
tests/test_metadata_model.py
|
statisticsnorway/microdata-validator
|
c6b6788ab3ba7a3dad889db9120ad2decc598e76
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
from microdata_validator import Metadata, PatchingError
RESOURCE_DIR = 'tests/resources/metadata_model'
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described.json') as f:
TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_update.json') as f:
UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated.json') as f:
ENUMERATED_TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_update.json') as f:
ENUMERATED_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_patched.json') as f:
PATCHED_ENUMERATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_patched.json') as f:
PATCHED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_illegal_update.json') as f:
# New variable name on line 18
ILLEGALLY_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_deleted_object.json') as f:
# Deleted keyType object line 34
DELETED_OBJECT_METADATA = json.load(f)
def test_object():
transformed_metadata = Metadata(TRANSFORMED_METADATA)
assert (
transformed_metadata.get_identifier_key_type_name()
== 'SYKDOMSTILFELLE'
)
assert transformed_metadata.to_dict() == TRANSFORMED_METADATA
def test_patch_described():
transformed_metadata = Metadata(TRANSFORMED_METADATA)
updated_metadata = Metadata(UPDATED_METADATA)
transformed_metadata.patch(updated_metadata)
assert transformed_metadata.to_dict() == PATCHED_METADATA
def test_patch_enumerated():
transformed_metadata = Metadata(ENUMERATED_TRANSFORMED_METADATA)
updated_metadata = Metadata(ENUMERATED_UPDATED_METADATA)
transformed_metadata.patch(updated_metadata)
assert transformed_metadata.to_dict() == PATCHED_ENUMERATED_METADATA
def test_patch_with_deleted_object():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
updated_metadata = Metadata(DELETED_OBJECT_METADATA)
transformed_metadata.patch(updated_metadata)
assert 'Can not delete KeyType' in str(e)
def test_patch_with_None():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
transformed_metadata.patch(None)
assert 'Can not patch with NoneType Metadata' in str(e)
def test_illegaly_patch():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
illegally_updated_metadata = Metadata(ILLEGALLY_UPDATED_METADATA)
transformed_metadata.patch(illegally_updated_metadata)
assert (
'Illegal change to one of these variable fields: '
'[name, dataType, format, variableRole]'
) in str(e)
def test_patch_metadata_with_code_list():
updated = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated.json')
expected = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_patched.json')
orig = Metadata(original)
orig.patch(Metadata(updated))
assert orig.to_dict() == expected
def test_patch_metadata_without_code_list():
updated = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described.json')
expected = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described_patched.json')
orig = Metadata(original)
orig.patch(Metadata(updated))
assert orig.to_dict() == expected
def test_patch_metadata_illegal_fields_changes():
"""
The "updated" contains randomly chosen fields that are not allowed to be changed.
"""
updated = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_illegal_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated.json')
with pytest.raises(PatchingError) as e:
orig = Metadata(original)
orig.patch(Metadata(updated))
assert 'Can not change these metadata fields [name, temporality, languageCode]' in str(e)
def load_file(file_name: str):
with open(file_name) as f:
source = json.load(f)
return source
| 37.530435
| 96
| 0.765524
| 553
| 4,316
| 5.652803
| 0.168174
| 0.145873
| 0.06142
| 0.043506
| 0.664107
| 0.595969
| 0.574216
| 0.557262
| 0.492962
| 0.419706
| 0
| 0.001086
| 0.146432
| 4,316
| 114
| 97
| 37.859649
| 0.847448
| 0.032901
| 0
| 0.292683
| 0
| 0
| 0.266298
| 0.21121
| 0
| 0
| 0
| 0
| 0.121951
| 1
| 0.121951
| false
| 0
| 0.036585
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1d273fedbebba3a9ba1430c685e07560c2562dd
| 680
|
py
|
Python
|
tests/platforms/macOS/dmg/test_mixin.py
|
chuckyQ/briefcase
|
06e84e7b1c3af016c828a5a640d277809de6644b
|
[
"BSD-3-Clause"
] | 3
|
2020-09-29T15:32:35.000Z
|
2021-11-08T09:41:04.000Z
|
tests/platforms/macOS/dmg/test_mixin.py
|
CuPidev/briefcase
|
35619cbe4b512c8521ad3733341e6bc3422efb58
|
[
"BSD-3-Clause"
] | null | null | null |
tests/platforms/macOS/dmg/test_mixin.py
|
CuPidev/briefcase
|
35619cbe4b512c8521ad3733341e6bc3422efb58
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T11:52:02.000Z
|
2021-03-26T11:52:02.000Z
|
import sys
import pytest
from briefcase.platforms.macOS.dmg import macOSDmgCreateCommand
if sys.platform != 'darwin':
pytest.skip("requires macOS", allow_module_level=True)
def test_binary_path(first_app_config, tmp_path):
command = macOSDmgCreateCommand(base_path=tmp_path)
binary_path = command.binary_path(first_app_config)
assert binary_path == tmp_path / 'macOS' / 'First App' / 'First App.app'
def test_distribution_path(first_app_config, tmp_path):
command = macOSDmgCreateCommand(base_path=tmp_path)
distribution_path = command.distribution_path(first_app_config)
assert distribution_path == tmp_path / 'macOS' / 'First App-0.0.1.dmg'
| 29.565217
| 76
| 0.772059
| 92
| 680
| 5.402174
| 0.347826
| 0.112676
| 0.096579
| 0.144869
| 0.539235
| 0.370221
| 0.273642
| 0.273642
| 0.273642
| 0.273642
| 0
| 0.005093
| 0.133824
| 680
| 22
| 77
| 30.909091
| 0.83871
| 0
| 0
| 0.153846
| 0
| 0
| 0.104412
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1d4630b4a1d77b92aebe2079bfb6cc0bd824f76
| 674
|
py
|
Python
|
meutils/clis/conf.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 3
|
2020-12-03T07:30:02.000Z
|
2021-02-07T13:37:33.000Z
|
meutils/clis/conf.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | null | null | null |
meutils/clis/conf.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 1
|
2021-02-07T13:37:38.000Z
|
2021-02-07T13:37:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : conf
# @Time : 2021/1/31 10:20 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from meutils.pipe import *
# 定义参数
class TrainConf(BaseConfig):
epoch = 10
batch_size = 128
def train(**kwargs):
logger.info("开始训练")
time.sleep(3)
# 使用参数
def run(**kwargs):
logger.info(f"输入参数: {kwargs}")
c = TrainConf.parse_obj(kwargs)
logger.info(f"使用参数: {c.dict()}")
train(**c.dict())
# 传入参数
conf_cli = lambda: fire.Fire(run) # <conf_cli> --epoch 11 --batch_size 111
# fire.Fire()需要指定命令对象
| 18.216216
| 75
| 0.587537
| 89
| 674
| 4.393258
| 0.674157
| 0.092072
| 0.122762
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04501
| 0.24184
| 674
| 36
| 76
| 18.722222
| 0.720157
| 0.440653
| 0
| 0
| 0
| 0
| 0.093664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1d82814692baf55384c0af692ceedac9c370b19
| 4,517
|
py
|
Python
|
edualgo/circular-linked-list.py
|
VaishnaviNandakumar/eduAlgo
|
5eb24058d969ab6dae2cbd19f9048ea1a353b48e
|
[
"MIT"
] | 22
|
2021-02-25T04:35:57.000Z
|
2022-02-14T13:33:19.000Z
|
edualgo/circular-linked-list.py
|
VaishnaviNandakumar/eduAlgo
|
5eb24058d969ab6dae2cbd19f9048ea1a353b48e
|
[
"MIT"
] | 40
|
2021-02-26T06:59:41.000Z
|
2021-11-10T07:40:29.000Z
|
edualgo/circular-linked-list.py
|
VaishnaviNandakumar/eduAlgo
|
5eb24058d969ab6dae2cbd19f9048ea1a353b48e
|
[
"MIT"
] | 17
|
2021-02-25T00:58:57.000Z
|
2021-11-08T23:46:06.000Z
|
from __init__ import print_msg_box
class Node:
def __init__(self, dataValue=None):
self.dataValue = dataValue
self.next = None
class singleLinkedList:
def __init__(self):
self.headValue = None
self.temp = None
def insertLast(self, *elements):
for data in elements:
if self.headValue is None:
self.headValue = Node(data)
self.temp = self.headValue
else:
self.temp.next = Node(data)
self.temp = self.temp.next
self.temp.next = self.headValue
pass
def insertFirst(self, *elements):
if self.headValue is not None:
prevheadValue = self.headValue
self.headValue = None
else:
prevheadValue = None
for data in elements:
if self.headValue is None:
self.headValue = Node(data)
self.temp = self.headValue
else:
self.temp.next = Node(data)
self.temp = self.temp.next
if prevheadValue is not None:
self.temp.next = prevheadValue
self.temp = self.temp.next
while self.temp.next != prevheadValue:
self.temp = self.temp.next
self.temp.next = self.headValue
def insertMiddle(self, arg1: "data", arg2: "position"):
node = self.headValue
for i in range(1,arg2-1):
if node.next is None:
return
node = node.next
prev = node.next
node.next = Node(arg1)
node = node.next
node.next = prev
while node.next != self.headValue:
node = node.next
node.next = self.headValue
def delete(self, position: "Position to be deleted"):
#[data|next] --> [data|next] --> [data|next] --> [data|next]
# ^_______________^
node = self.headValue
for i in range(position-2):
node = node.next
node.next = node.next.next
while node.next != self.headValue:
node = node.next
node.next = self.headValue
def display(self):
printValue = self.headValue
if printValue is None:
print("list is empty")
while printValue is not None:
print (printValue.dataValue)
printValue = printValue.next
pass
def hint(self):
message=""""
Create a node class to have two variables
1. Store data (datavalue)
2. Next data address in last it is usually null in circular (next)
linked list
Create another class to perform manipulation in list
Insert First:
*To insert first element we need to have the data to whether any
data exist before if so then we have to store it safely
* Storing the data in headval
* Taking previous value to set next value of another node
* It repeats until it reaches the previous head value
* Setting the last value to head node
Insert last:
*To insert last element we need to have the data to whether any
data exist before if so then we have to store it safely
* It repeats until it reaches the head value is occurred
* Setting the last node next value to head node
Insert Middle:
*To insert middle element we need to have the data to whether any
data exist before if so then we have to store it safely
* Taking previous value to set next value of another node
* It repeats until it reaches the previous head value
* Setting the last next value to head node
Display:
Display will take next value of node repeatedly so the list is
infinite loop
"""
#creating object
#list = singleLinkedList()
#list.insertLast(50, 60,70)
#list.display()
'''
It shows the entered things at last
output:
=======
50
60
70
50...
'''
#list.insertFirst(10,20,30)
#list.display()
'''
It shows the entered things at first then remaining
output:
=======
10
20
30
50
60
70
10...
'''
#print(list.insertMiddle.__annotations__)
#list.insertMiddle(40,4)
#list.display()
'''
It shows the inserted element at nth position
output:
=======
10
20
30
40
50
60
70
10...
'''
#list.delete(6)
#list.display()
'''
It shows the list after deleting it
output:
=======
10
20
30
40
50
60
10...
'''
| 23.404145
| 75
| 0.579367
| 572
| 4,517
| 4.517483
| 0.208042
| 0.095588
| 0.04644
| 0.037152
| 0.53483
| 0.458978
| 0.436533
| 0.400929
| 0.373065
| 0.345975
| 0
| 0.025856
| 0.340713
| 4,517
| 192
| 76
| 23.526042
| 0.84184
| 0.072172
| 0
| 0.427083
| 0
| 0
| 0.378244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.020833
| 0.010417
| 0
| 0.125
| 0.072917
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1e1bcedb2edbb2d5f4a7e0929b4350832d56cb6
| 1,280
|
py
|
Python
|
keypoints_SIFT_Descriptor.py
|
praxitelisk/OpenCV_Image_Mining
|
8fb6af58a677e9acd9711164080910e4f62f7de8
|
[
"MIT"
] | null | null | null |
keypoints_SIFT_Descriptor.py
|
praxitelisk/OpenCV_Image_Mining
|
8fb6af58a677e9acd9711164080910e4f62f7de8
|
[
"MIT"
] | null | null | null |
keypoints_SIFT_Descriptor.py
|
praxitelisk/OpenCV_Image_Mining
|
8fb6af58a677e9acd9711164080910e4f62f7de8
|
[
"MIT"
] | null | null | null |
#import Libraries
import cv2
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
##################################################
'''
This example illustrates how to extract interesting key points
as features from an image
Usage:
keypointsSIFTDescriptor.py [<image_name>]
image argument defaults to fruits.jpg
'''
#Read from input
try:
fn = sys.argv[1]
except IndexError:
fn = "img/home.jpg"
##################################################
#Read image and plot it
img_original = mpimg.imread(fn)
img = mpimg.imread(fn)
plt.subplot(121), plt.imshow(img)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
#grayscale it
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
##################################################
#use SIFT descriptor for image key points feature extraction
sift = cv2.xfeatures2d.SIFT_create()
(kps, sift) = sift.detectAndCompute(gray, None)
##################################################
#draw the keypoints
img = cv2.drawKeypoints(gray,kps,None,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.subplot(122), plt.imshow(img)
plt.title('Image with extracted keypoints'), plt.xticks([]), plt.yticks([])
plt.show()
##################################################
| 28.444444
| 92
| 0.603906
| 153
| 1,280
| 4.993464
| 0.522876
| 0.02356
| 0.034031
| 0.039267
| 0.052356
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013135
| 0.107813
| 1,280
| 45
| 93
| 28.444444
| 0.655867
| 0.110938
| 0
| 0
| 0
| 0
| 0.081042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1e232b6f4bcb98d057d8080fd878bcc9a488c24
| 1,103
|
py
|
Python
|
lib/getHostInfoResponse.py
|
jacksitlab/esxi-client
|
0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e
|
[
"MIT"
] | null | null | null |
lib/getHostInfoResponse.py
|
jacksitlab/esxi-client
|
0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e
|
[
"MIT"
] | null | null | null |
lib/getHostInfoResponse.py
|
jacksitlab/esxi-client
|
0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
from .baseVmWareXmlResponse import BaseVmWareXmlResponse
class GetHostInfoResponse(BaseVmWareXmlResponse):
def __str__(self):
return ('GetHostInfoResponse[vendor={} model={} vCPUs={} memory={}]').format(
self.vendor, self.model, self.vCPUs, self.memory)
def toDict(self):
return dict(vendor=self.vendor, model=self.model, vCPUs=self.vCPUs, memory=self.memory)
def __init__(self, response):
data = ET.fromstring(response)
innerData = self.getSubTreeByTree(
data, ['Body', 'RetrievePropertiesExResponse', 'returnval', 'objects'])
dataSet = self.findPropertySetValue(innerData,'summary.hardware',False)
if dataSet is None:
print(response)
raise ValueError('no know response data found')
self.vendor = self.getSubTree(dataSet,'vendor').text
self.model = self.getSubTree(dataSet,'model').text
self.vCPUs = int(self.getSubTree(dataSet,'numCpuThreads').text)
self.memory = int(self.getSubTree(dataSet,'memorySize').text)
| 40.851852
| 95
| 0.676337
| 114
| 1,103
| 6.473684
| 0.438596
| 0.075881
| 0.113821
| 0.065041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201269
| 1,103
| 26
| 96
| 42.423077
| 0.837684
| 0
| 0
| 0
| 0
| 0
| 0.166062
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0.1
| 0.4
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1e50fb8283a579fbdd6f28ea13ffe7026e7416d
| 1,651
|
py
|
Python
|
pyefriend_api/app/v1/setting/router.py
|
softyoungha/pyefriend
|
43a9db224be50308458f0b939ac0181b3bd63d0b
|
[
"MIT"
] | 8
|
2021-11-26T14:22:21.000Z
|
2022-03-26T03:32:51.000Z
|
pyefriend_api/app/v1/setting/router.py
|
softyoungha/pyefriend
|
43a9db224be50308458f0b939ac0181b3bd63d0b
|
[
"MIT"
] | 1
|
2021-12-19T13:08:26.000Z
|
2021-12-19T13:22:28.000Z
|
pyefriend_api/app/v1/setting/router.py
|
softyoungha/pyefriend
|
43a9db224be50308458f0b939ac0181b3bd63d0b
|
[
"MIT"
] | 5
|
2022-01-12T17:54:40.000Z
|
2022-03-25T10:22:36.000Z
|
import os
from typing import Optional, List
from fastapi import APIRouter, Request, Response, status, Depends
from pyefriend_api.models.setting import Setting as SettingModel
from pyefriend_api.app.auth import login_required
from .schema import SettingOrm, SettingUpdate
r = APIRouter(prefix='/setting',
tags=['setting'])
@r.get('/', response_model=List[SettingOrm])
async def get_settings(user=Depends(login_required)):
"""### 세팅 가능한 값 전부 조회 """
return [SettingOrm.from_orm(item) for item in SettingModel.list()]
@r.post('/', status_code=status.HTTP_200_OK)
async def initialize_settings(user=Depends(login_required)):
"""
### 세팅값 초기화
- force: True일 경우 기존 값 초기화
"""
SettingModel.initialize(first=False)
return Response('Success', status_code=status.HTTP_200_OK)
@r.get('/{section}/{key}', response_model=SettingOrm)
async def get_a_setting(section: str, key: str, user=Depends(login_required)):
"""
### 세팅값 조회
- section: setting 테이블 내 조회할 section
- key: section 내 조회할 key
"""
return SettingOrm.from_orm(SettingModel.get(section=section, key=key))
@r.put('/{section}/{key}', status_code=status.HTTP_200_OK)
async def change_setting(section: str,
key: str,
request: SettingUpdate,
user=Depends(login_required)):
"""
### 세팅값 수정
- section: setting 테이블 내 조회할 section
- key: section 내 조회할 key
"""
SettingModel.update(section=section,
key=key,
value=request.value)
return Response('Success', status_code=status.HTTP_200_OK)
| 30.018182
| 78
| 0.65536
| 207
| 1,651
| 5.091787
| 0.342995
| 0.056926
| 0.060721
| 0.091082
| 0.393738
| 0.235294
| 0.235294
| 0.235294
| 0.172676
| 0.085389
| 0
| 0.009375
| 0.224712
| 1,651
| 54
| 79
| 30.574074
| 0.814063
| 0
| 0
| 0.074074
| 0
| 0
| 0.045785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1e88bdba0945c9b9cc4455b24e5747284f786b4
| 368
|
py
|
Python
|
circular_rings.py
|
irahorecka/Diffraction-Simulations--Angular-Spectrum-Method
|
c2eb1de944685018f887c7861301f7098354e9f5
|
[
"MIT"
] | 1
|
2021-01-04T17:04:55.000Z
|
2021-01-04T17:04:55.000Z
|
circular_rings.py
|
irahorecka/Diffraction-Simulations--Angular-Spectrum-Method
|
c2eb1de944685018f887c7861301f7098354e9f5
|
[
"MIT"
] | null | null | null |
circular_rings.py
|
irahorecka/Diffraction-Simulations--Angular-Spectrum-Method
|
c2eb1de944685018f887c7861301f7098354e9f5
|
[
"MIT"
] | null | null | null |
from simulator import PolychromaticField, cf, mm
F = PolychromaticField(
spectrum=1.5 * cf.illuminant_d65,
extent_x=12.0 * mm,
extent_y=12.0 * mm,
Nx=1200,
Ny=1200,
)
F.add_aperture_from_image(
"./apertures/circular_rings.jpg", pad=(9 * mm, 9 * mm), Nx=1500, Ny=1500
)
rgb = F.compute_colors_at(z=1.5)
F.plot(rgb, xlim=[-8, 8], ylim=[-8, 8])
| 23
| 76
| 0.649457
| 63
| 368
| 3.650794
| 0.619048
| 0.017391
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112583
| 0.179348
| 368
| 15
| 77
| 24.533333
| 0.649007
| 0
| 0
| 0
| 0
| 0
| 0.081522
| 0.081522
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1efcc031c8bf6f3a8fed9857aad8b4235615828
| 897
|
py
|
Python
|
merge-sort.py
|
bauluk/algorithms
|
9020d2a6150e58ad26d18b8fede32ded966f8a8b
|
[
"MIT"
] | null | null | null |
merge-sort.py
|
bauluk/algorithms
|
9020d2a6150e58ad26d18b8fede32ded966f8a8b
|
[
"MIT"
] | null | null | null |
merge-sort.py
|
bauluk/algorithms
|
9020d2a6150e58ad26d18b8fede32ded966f8a8b
|
[
"MIT"
] | null | null | null |
import random
def mergeSort(numbers):
if len(numbers) <= 1:
return numbers
left = numbers[:len(numbers)//2]
right = numbers[len(numbers)//2:]
left = mergeSort(left)
right = mergeSort(right)
numbers = merge(left, right, numbers)
return numbers
def merge(left, right, numbers):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
numbers[k] = left[i]
i += 1
else:
numbers[k] = right[j]
j += 1
k +=1
# process any leftovers
while i < len(left):
numbers[k] = left[i]
i += 1
k +=1
while j < len(right):
numbers[k] = right[j]
j += 1
k +=1
return numbers
numbers = []
for i in range(0, 100):
numbers.append(random.randint(1, 100))
numbers = mergeSort(numbers)
print(numbers)
| 19.933333
| 43
| 0.518395
| 120
| 897
| 3.875
| 0.25
| 0.129032
| 0.019355
| 0.077419
| 0.141935
| 0.141935
| 0.077419
| 0.077419
| 0
| 0
| 0
| 0.035897
| 0.347826
| 897
| 44
| 44
| 20.386364
| 0.758974
| 0.023411
| 0
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0
| 0.166667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f02ab69517e03a599a2beb69e3009f8624f7cc
| 1,586
|
py
|
Python
|
W2/task4.py
|
mcv-m6-video/mcv-m6-2021-team6
|
701fc1420930342f3b3733e8f8fc4675c21d8f3f
|
[
"Unlicense"
] | null | null | null |
W2/task4.py
|
mcv-m6-video/mcv-m6-2021-team6
|
701fc1420930342f3b3733e8f8fc4675c21d8f3f
|
[
"Unlicense"
] | 2
|
2021-03-23T10:34:33.000Z
|
2021-03-23T18:54:28.000Z
|
W2/task4.py
|
mcv-m6-video/mcv-m6-2021-team6
|
701fc1420930342f3b3733e8f8fc4675c21d8f3f
|
[
"Unlicense"
] | 1
|
2021-03-08T21:13:15.000Z
|
2021-03-08T21:13:15.000Z
|
from utilsw2 import *
from Reader import *
from Adapted_voc_evaluation import *
import glob
path_to_video = 'datasets/AICity_data/train/S03/c010/vdo.avi'
path_to_frames = 'datasets/frames/'
results_path = 'Results/Task1_1'
def task4(color_space=cv2.COLOR_BGR2GRAY, mu_file = f"W2/task1_1/mu.pkl",sigma_file= f"W2/task1_1/sigma.pkl"):
video_n_frames = len(glob.glob1(path_to_frames, "*.jpg"))
mu, sigma = GetGaussianModel(path_to_frames, video_n_frames,color_space,mu_file,sigma_file)
lowLimit = int(video_n_frames * 0.25)
highLimit = int(video_n_frames)
det_bb = remove_background(mu,
sigma,
6,
path_to_frames,
lowLimit,
highLimit,
animation=True,
color_space=color_space)
reader = AICityChallengeAnnotationReader(path='datasets/AICity_data/train/S03/c010/gt/gt.txt',initFrame=int(video_n_frames * 0.25), finalFrame=int(video_n_frames))
gt = reader.get_annotations(classes=['car'], only_not_parked=True)
bb_gt = []
# for frame in gt.keys():
for frame in range(int(video_n_frames * 0.25), int(video_n_frames)):
annotations = gt.get(frame, [])
bb_gt.append(annotations)
ap, prec, rec = mean_average_precision(bb_gt , det_bb)
print (ap)
if __name__ == '__main__':
colors = [cv2.COLOR_BGR2HSV, cv2.COLOR_BGR2RGB, cv2.COLOR_BGR2YCrCb, cv2.COLOR_BGR2LAB]
for c in colors:
task4(c,f"W2/task4_1/mu{str(c)}.pkl",f"W2/task4_1/sigma{str(c)}.pkl")
| 38.682927
| 167
| 0.655107
| 225
| 1,586
| 4.315556
| 0.386667
| 0.049434
| 0.098867
| 0.092688
| 0.144181
| 0.117405
| 0
| 0
| 0
| 0
| 0
| 0.039152
| 0.226986
| 1,586
| 40
| 168
| 39.65
| 0.752855
| 0.014502
| 0
| 0
| 0
| 0
| 0.144138
| 0.090327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.15625
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f0cff2e554ccf456ca71299fa80fb9f25a8ffe
| 3,207
|
py
|
Python
|
src/dictstore/file_handler.py
|
sampathbalivada/dictstore
|
d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6
|
[
"Apache-2.0"
] | 1
|
2021-12-21T14:23:50.000Z
|
2021-12-21T14:23:50.000Z
|
src/dictstore/file_handler.py
|
sampathbalivada/dictstore
|
d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6
|
[
"Apache-2.0"
] | null | null | null |
src/dictstore/file_handler.py
|
sampathbalivada/dictstore
|
d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Sai Sampath Kumar Balivada
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
file handler reads and writes datastore entries to and from the disk.
file paths are case sensitive.
"""
import os.path
import datetime
from pathlib import Path
from dictstore.exceptions import InvalidFileExtension
def generate_file_header_string() -> str:
"""Generates file header string for the data file"""
header = '// Python Dictstore File\n'
date_string = str(datetime.datetime.now())
header += '// Last Rewrite: ' + date_string + '\n'
return header
class FileHandler:
"""
handles the dictstore datastore file(s)
"""
def __has_valid_file_extension(self):
"""Checks if the given file path ends with .dictstore"""
if self.file_path.endswith('.dictstore'):
return True
return False
def __init__(self, file_path) -> None:
"""
creates a file handler for the datastore file.
Exceptions:
OSError
InvalidFileExtension
"""
# store the given file path
self.file_path = file_path
# check if the filename is valid
if not self.__has_valid_file_extension():
raise InvalidFileExtension()
# check if file exists at path
# and create a datastore file if it doesn't exist
if not os.path.exists(self.file_path):
Path(os.path.dirname(self.file_path)).mkdir(
parents=True,
exist_ok=True
)
with open(self.file_path, 'w', encoding='utf-8') as data_file:
data_file.write(generate_file_header_string())
# open the file and read its contents
with open(self.file_path, 'r', encoding='utf-8') as data_file:
self.file_contents = data_file.read()
def rewrite_to_file(self, lines) -> None:
"""Writes the given lines to data file"""
with open(self.file_path, 'w', encoding='utf-8') as data_file:
data_file.write(generate_file_header_string())
data_file.writelines(lines)
def append_to_file(self, string: str) -> None:
"""Appends the given string to data file"""
with open(self.file_path, 'a', encoding='utf-8') as data_file:
data_file.write(string)
def read_from_file(self) -> str:
"""
Reads the contents of data file and
returns all the contents of file
without the first two lines
"""
with open(self.file_path, 'r', encoding='utf-8') as data_file:
data_file.readline()
data_file.readline()
return data_file.readlines()
| 33.061856
| 74
| 0.640474
| 426
| 3,207
| 4.687793
| 0.35446
| 0.068102
| 0.06009
| 0.04006
| 0.164747
| 0.164747
| 0.164747
| 0.164747
| 0.140711
| 0.123185
| 0
| 0.00557
| 0.272217
| 3,207
| 96
| 75
| 33.40625
| 0.850043
| 0.394138
| 0
| 0.205128
| 0
| 0
| 0.047222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f1be9cfd0e8788923ad96d397bd4e298d8a339
| 2,432
|
py
|
Python
|
tests/mappers/test_action_mapper.py
|
mik-laj/oozie-to-airflow
|
c04952ddc8354bcafa340703b30f7ff33f844f4e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/mappers/test_action_mapper.py
|
mik-laj/oozie-to-airflow
|
c04952ddc8354bcafa340703b30f7ff33f844f4e
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-07-01T21:57:45.000Z
|
2019-07-01T21:57:45.000Z
|
tests/mappers/test_action_mapper.py
|
mik-laj/oozie-to-airflow
|
c04952ddc8354bcafa340703b30f7ff33f844f4e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests action mapper"""
import unittest
from o2a.converter.relation import Relation
from o2a.converter.task import Task
from o2a.mappers.action_mapper import ActionMapper
TEST_MAPPER_NAME = "mapper_name"
TEST_DAG_NAME = "dag_name"
class TestActionMapper(unittest.TestCase):
def test_prepend_task_no_tasks(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
with self.assertRaises(IndexError):
ActionMapper.prepend_task(task_to_prepend=task_1, tasks=[], relations=[])
def test_prepend_task_empty_relations(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
task_2 = Task(task_id=TEST_MAPPER_NAME + "_2", template_name="pig.tpl")
tasks, relations = ActionMapper.prepend_task(task_to_prepend=task_1, tasks=[task_2], relations=[])
self.assertEqual([task_1, task_2], tasks)
self.assertEqual([Relation(from_task_id="mapper_name_1", to_task_id="mapper_name_2")], relations)
def test_prepend_task_some_relations(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
task_2 = Task(task_id=TEST_MAPPER_NAME + "_2", template_name="pig.tpl")
task_3 = Task(task_id=TEST_MAPPER_NAME + "_3", template_name="pig.tpl")
tasks, relations = ActionMapper.prepend_task(
task_to_prepend=task_1,
tasks=[task_2, task_3],
relations=[Relation(from_task_id="mapper_name_2", to_task_id="mapper_name_3")],
)
self.assertEqual([task_1, task_2, task_3], tasks)
self.assertEqual(
[
Relation(from_task_id="mapper_name_1", to_task_id="mapper_name_2"),
Relation(from_task_id="mapper_name_2", to_task_id="mapper_name_3"),
],
relations,
)
| 41.931034
| 106
| 0.702303
| 348
| 2,432
| 4.600575
| 0.29023
| 0.099938
| 0.059963
| 0.07995
| 0.517177
| 0.480949
| 0.434728
| 0.434728
| 0.434728
| 0.405996
| 0
| 0.021839
| 0.190378
| 2,432
| 57
| 107
| 42.666667
| 0.791265
| 0.240954
| 0
| 0.147059
| 0
| 0
| 0.09688
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f1e91e085496f9d5527679e19a038eaba7f62a
| 1,265
|
py
|
Python
|
euclidean_gcd/Python/euclidean_gcd.py
|
parammittal16/Algorithms
|
b9c3b6086ebf9f96bacaa55c2c29961be42676f6
|
[
"MIT"
] | 1
|
2018-10-04T13:10:23.000Z
|
2018-10-04T13:10:23.000Z
|
euclidean_gcd/Python/euclidean_gcd.py
|
Rajeev00021/Algorithms
|
2aeeff13b63f17bae2145ffc9583dacbe2070994
|
[
"MIT"
] | 2
|
2019-10-15T06:31:33.000Z
|
2019-10-15T06:32:19.000Z
|
euclidean_gcd/Python/euclidean_gcd.py
|
Rajeev00021/Algorithms
|
2aeeff13b63f17bae2145ffc9583dacbe2070994
|
[
"MIT"
] | 1
|
2019-10-05T18:24:04.000Z
|
2019-10-05T18:24:04.000Z
|
def euclidean_gcd(first, second):
"""
Calculates GCD of two numbers using the division-based Euclidean Algorithm
:param first: First number
:param second: Second number
"""
while(second):
first, second = second, first % second
return first
def euclidean_gcd_recursive(first, second):
"""
Calculates GCD of two numbers using the recursive Euclidean Algorithm
:param first: First number
:param second: Second number
"""
if not second:
return first
return euclidean_gcd_recursive(second, first % second)
def main():
first, second = map(int, input('Enter 2 integers: ').split())
print('Division-based: GCD of {} and {} is: {}'.format(first,
second,
euclidean_gcd(
first, second)))
print('Recursive: GCD of {} and {} is: {}'.format(first,
second,
euclidean_gcd_recursive(
first, second)))
if __name__ == '__main__':
main()
| 34.189189
| 79
| 0.480632
| 112
| 1,265
| 5.285714
| 0.303571
| 0.185811
| 0.086149
| 0.077703
| 0.559122
| 0.489865
| 0.489865
| 0.489865
| 0.489865
| 0.209459
| 0
| 0.001403
| 0.436364
| 1,265
| 36
| 80
| 35.138889
| 0.828892
| 0.207115
| 0
| 0.3
| 0
| 0
| 0.103556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f402dc0bcbd7349f6046e391a89f06ba005aeb
| 1,627
|
py
|
Python
|
util/metrics/covariance.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
util/metrics/covariance.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
util/metrics/covariance.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
""" Distances metrics based on the covariance matrix (mostly in the context of merging and compress)"""
import torch
import numpy as np
import torch.nn.functional as F
np.random.seed(0)
def cov(m, y=None):
"""computes covariance of m"""
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
def cov_norm(m, y):
"""computes similarity of x, y covariance matrices"""
m = (m - m.mean(dim=0)) / m.std(dim=0)
y = (y - y.mean(dim=0)) / y.std(dim=0)
# print(m.size())
# print(y.size())
m = cov(m)
y = cov(y)
return torch.norm(m) - torch.norm(y)
def get_svd(m, y):
m = (m - m.mean(dim=0)) / m.std(dim=0)
y = (y - y.mean(dim=0)) / y.std(dim=0)
u1, s1, v1 = torch.svd(m)
u2, s2, v2 = torch.svd(y)
return s1, s2
def cov_eig(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
d = (s1 - s2) if k is None else (s1[:k] - s2[:k])
d = d.sum().abs()
return d
def cov_eig_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
if k is not None: s1, s2 = s1[:k] - s2[:k]
d = F.kl_div(F.softmax(s1) - F.softmax(s2))
return d
def cov_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
m_p = F.softmax(m.flatten())
y_p = F.softmax(y.flatten())
d = F.kl_div(m_p, y_p)
return d
if __name__ == "__main__":
x = torch.randn((100, 20))
y = torch.randn((100, 50))
print(cov_norm(x, y))
| 23.926471
| 103
| 0.562999
| 303
| 1,627
| 2.940594
| 0.244224
| 0.022447
| 0.089787
| 0.094276
| 0.3367
| 0.320988
| 0.320988
| 0.320988
| 0.274972
| 0.274972
| 0
| 0.037829
| 0.252612
| 1,627
| 67
| 104
| 24.283582
| 0.694901
| 0.212661
| 0
| 0.214286
| 0
| 0
| 0.006421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f4b4fbb3b683f57ba6d1034a8a600f1e9bf050
| 3,415
|
py
|
Python
|
tfhub_context.py
|
thingumajig/simple_flask_tfhub
|
75daae03299b43310b674664d41c273b6e3994c0
|
[
"Apache-2.0"
] | null | null | null |
tfhub_context.py
|
thingumajig/simple_flask_tfhub
|
75daae03299b43310b674664d41c273b6e3994c0
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:42:39.000Z
|
2022-02-10T00:10:23.000Z
|
tfhub_context.py
|
thingumajig/simple_flask_tfhub
|
75daae03299b43310b674664d41c273b6e3994c0
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
class TFHubContext:
def __init__(self, url="https://tfhub.dev/google/universal-sentence-encoder-large/3") -> None:
super().__init__()
print('Initialize graph:')
# Create graph and finalize (finalizing optional but recommended).
self.g = tf.Graph()
with self.g.as_default():
# We will be feeding 1D tensors of text into the graph.
self.text_input = tf.placeholder(dtype=tf.string, shape=[None])
self.embed = hub.Module(url)
self.embedded_text = self.get_embedded_text()
self.init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
self.g.finalize()
def get_embedded_text(self):
return self.embed(self.text_input)
def get_embedding(self, texts):
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session(graph=self.g) as session:
session.run(self.init_op)
texts_embeddings = session.run(self.embedded_text, feed_dict={self.text_input: texts})
# for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
# print("Message: {}".format(texts[i]))
# print("Embedding size: {}".format(len(message_embedding)))
# message_embedding_snippet = ", ".join(
# (str(x) for x in message_embedding[:3]))
# print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
def close(self):
print('TFHubContext closed')
class ElmoTFHubContext(TFHubContext):
def __init__(self, url="https://tfhub.dev/google/elmo/2", type='elmo') -> None:
super().__init__(url)
self.type = type
def get_embedded_text(self):
return self.embed(self.text_input, signature='default', as_dict=True)
def get_embedding(self, texts):
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session(graph=self.g) as session:
session.run(self.init_op)
texts_embeddings = session.run(self.embedded_text, feed_dict={self.text_input: texts})[self.type]
# for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
# print("Message: {}".format(texts[i]))
# print("Embedding size: {}".format(len(message_embedding)))
# message_embedding_snippet = ", ".join(
# (str(x) for x in message_embedding[:3]))
# print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
def get_use_embedding(texts):
use_embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-large/3")
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
texts_embeddings = session.run(use_embed(texts))
for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
print("Message: {}".format(texts[i]))
print("Embedding size: {}".format(len(message_embedding)))
message_embedding_snippet = ", ".join(
(str(x) for x in message_embedding[:3]))
print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
if __name__ == '__main__':
emb = ElmoTFHubContext(type='default')
tt = emb.get_embedding(['This is a sentence.', 'This is another sentence.'])
print(tt.shape)
| 36.72043
| 103
| 0.685505
| 444
| 3,415
| 5.065315
| 0.245496
| 0.106714
| 0.061361
| 0.025345
| 0.698088
| 0.698088
| 0.698088
| 0.656292
| 0.656292
| 0.581147
| 0
| 0.002467
| 0.16896
| 3,415
| 93
| 104
| 36.72043
| 0.789993
| 0.286384
| 0
| 0.22
| 0
| 0
| 0.127431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.06
| 0.04
| 0.36
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f8ab1e5dcd509c7bb1c75102e032a178319bb7
| 1,020
|
py
|
Python
|
src/genemap/main/map_ids.py
|
jrderuiter/genemap
|
0413474294cae9e17252d88c8b9ff1382e4a2f0f
|
[
"MIT"
] | null | null | null |
src/genemap/main/map_ids.py
|
jrderuiter/genemap
|
0413474294cae9e17252d88c8b9ff1382e4a2f0f
|
[
"MIT"
] | 2
|
2018-05-25T17:28:21.000Z
|
2019-01-07T19:14:01.000Z
|
src/genemap/main/map_ids.py
|
jrderuiter/genemap
|
0413474294cae9e17252d88c8b9ff1382e4a2f0f
|
[
"MIT"
] | 3
|
2018-05-25T16:49:13.000Z
|
2018-05-25T16:51:45.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from genemap.mappers import get_mappers
def main(args):
"""Main function."""
mapper = args.mapper.from_args(args)
mapped = mapper.map_ids(args.ids)
print(' '.join(mapped))
def configure_subparser(subparser):
"""Configures subparser for subcommand."""
parser = subparser.add_parser('map_ids')
parser.set_defaults(main=main)
mapper_subparser = parser.add_subparsers(dest='mapper')
mapper_subparser.required = True
mappers = get_mappers(with_command_line=True).items()
for name, class_ in mappers:
mapper_parser = mapper_subparser.add_parser(name)
class_.configure_parser(mapper_parser)
mapper_parser.add_argument('ids', nargs='+')
mapper_parser.set_defaults(mapper=class_)
| 28.333333
| 74
| 0.732353
| 125
| 1,020
| 5.728
| 0.408
| 0.078212
| 0.064246
| 0.083799
| 0.150838
| 0.150838
| 0.150838
| 0.150838
| 0
| 0
| 0
| 0.001157
| 0.152941
| 1,020
| 35
| 75
| 29.142857
| 0.827546
| 0.214706
| 0
| 0
| 0
| 0
| 0.022843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f8f6e84f58dfa799a34b9718329b0459fc7d49
| 3,463
|
py
|
Python
|
project_gendl/splice42.py
|
KorfLab/datacore
|
f6eb04650d8257a8e2eecd44928a60368d374d38
|
[
"MIT"
] | null | null | null |
project_gendl/splice42.py
|
KorfLab/datacore
|
f6eb04650d8257a8e2eecd44928a60368d374d38
|
[
"MIT"
] | null | null | null |
project_gendl/splice42.py
|
KorfLab/datacore
|
f6eb04650d8257a8e2eecd44928a60368d374d38
|
[
"MIT"
] | null | null | null |
import gzip
import random
import subprocess
import sys
def get_acceptors(filename):
accs = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
s1 = intron[-22:-2]
s2 = intron[-2:]
s3 = exon2[0:20]
accs.append((s1, s2, s3, expression))
random.shuffle(accs)
return accs
def get_donors(filename):
dons = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
s1 = exon1[-20:]
s2 = intron[0:2]
s3 = intron[2:22]
dons.append((s1, s2, s3, expression))
return dons
def write_fasta(filename, name, seqs):
with open(filename, 'w') as fp:
n = 1
for s1, s2, s3, x in seqs:
fp.write(f'>{name}-{n} {x}\n')
fp.write(f'{s1}{s2}{s3}\n')
n += 1
def randomseq(size, contents='ACGT'):
seq = ''
for i in range(size):
seq += random.choice(contents)
return seq
def make_negative1(seqs):
neg = []
for i in range(len(seqs)):
s1 = randomseq(20)
s2 = seqs[0][1] # either GT or AG
s3 = randomseq(20)
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative2(seqs):
s1seq = '' # composition of part 1
s3seq = '' # composition of part 2
for s1, s2, s3, x in seqs:
s1seq += s1
s3seq += s3
neg = []
for i in range(len(seqs)):
s1 = randomseq(20, s1seq)
s2 = seqs[0][1] # either GT or AG
s3 = randomseq(20, s3seq)
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative3(seqs):
col1 = [[] for i in range(20)]
col3 = [[] for i in range(20)]
for s1, s2, s3, x in seqs:
for i in range(20):
col1[i].append(s1[i])
col3[i].append(s3[i])
neg = []
for i in range(len(seqs)):
s1 = ''
s3 = ''
for j in range(20):
s1 += random.choice(col1[j])
s3 += random.choice(col3[j])
s2 = seqs[0][1] # either GT or AG
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative4(seqs):
comp = str.maketrans('ACGTRYMKWSBDHV', 'TGCAYRKMWSVHDB')
neg = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
seq = exon1 + intron + exon2
anti = seq.translate(comp)[::-1]
for i in range(20, len(seq) -20):
if anti[i:i+2] == 'GT':
pass # this is actually completed elsewhere and not checked in...
#############
# 42 nt set # 20 nt upstream and downstream of canonical GT|AG
#############
genomes = ('at', 'ce', 'dm')
for gen in genomes:
# observed
eie = f'eie.{gen}.txt.gz'
dons = get_donors(eie)
accs = get_acceptors(eie)
write_fasta(f'splice42/{gen}.don.fa', 'don', dons)
write_fasta(f'splice42/{gen}.acc.fa', 'acc', accs)
# negative 1 - totally random
nd = make_negative1(dons)
na = make_negative1(accs)
write_fasta(f'splice42/{gen}.n1don.fa', 'n1don', nd)
write_fasta(f'splice42/{gen}.n1acc.fa', 'n1acc', na)
# negative 2 - compositional but not positional
nd = make_negative2(dons)
na = make_negative2(accs)
write_fasta(f'splice42/{gen}.n2don.fa', 'n2don', nd)
write_fasta(f'splice42/{gen}.n2acc.fa', 'n2acc', na)
# negative 3 - compositional and positional
nd = make_negative3(dons)
na = make_negative3(accs)
write_fasta(f'splice42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'splice42/{gen}.n3acc.fa', 'n3acc', na)
write_fasta(f'data42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'data42/{gen}.n3acc.fa', 'n3acc', na)
# negative 4 - sequences from the opposite strand
nd, na = make_negative4(eie)
| 24.913669
| 70
| 0.634421
| 562
| 3,463
| 3.862989
| 0.231317
| 0.050668
| 0.050668
| 0.040534
| 0.446799
| 0.357439
| 0.295716
| 0.273607
| 0.228006
| 0.228006
| 0
| 0.061922
| 0.188565
| 3,463
| 138
| 71
| 25.094203
| 0.710676
| 0.110598
| 0
| 0.279279
| 0
| 0
| 0.119079
| 0.073026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072072
| false
| 0.009009
| 0.036036
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1f924e262151141ecf3892ae5654b295df1f760
| 1,300
|
py
|
Python
|
old-stuff/crimes/atividade.py
|
paulopieczarka/DataScience-Uni
|
4013fe97f2a40da8923f11a8ce5907423ed8addd
|
[
"MIT"
] | null | null | null |
old-stuff/crimes/atividade.py
|
paulopieczarka/DataScience-Uni
|
4013fe97f2a40da8923f11a8ce5907423ed8addd
|
[
"MIT"
] | null | null | null |
old-stuff/crimes/atividade.py
|
paulopieczarka/DataScience-Uni
|
4013fe97f2a40da8923f11a8ce5907423ed8addd
|
[
"MIT"
] | null | null | null |
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def get_columns(db, col1, col2):
inputs = db[[col1, col2]]
coords = inputs.as_matrix(columns=None)
return np.array(coords)
def plot_colored_graph(inputs, kmeans_result):
x = inputs.transpose()
df = pd.DataFrame(dict(
crime=x[0],
dias_para_completar=x[1],
color=x[0]
))
sns.lmplot('crime', 'dias_para_completar', data=df, hue='color', fit_reg=False)
plt.title('Tempo para finalizar um crime')
clusterX = [row[0] for row in kmeans_result]
clusterY = [row[1] for row in kmeans_result]
plt.plot(clusterX, clusterY, 'rs')
plt.show()
def find_elbow(inputs, max_k):
distorsions = []
for k in max_k:
kmeans = KMeans(n_clusters=k)
kmeans.fit(inputs)
distorsions.append(kmeans.inertia_)
plt.plot(max_k, distorsions)
plt.title('Elbow curve')
def main():
# Load dataset
crimes_db = pd.read_csv('base/result_min.csv')
inputs = get_columns(crimes_db, 'description', 'clearance_days')
# find best k
find_elbow(inputs, range(2, 20))
# run k-means
kmeans = KMeans(n_clusters=8, random_state=0).fit(inputs)
print(kmeans.cluster_centers_)
plot_colored_graph(inputs, kmeans.cluster_centers_)
main()
| 23.636364
| 81
| 0.707692
| 198
| 1,300
| 4.479798
| 0.464646
| 0.040586
| 0.022548
| 0.049605
| 0.10823
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012927
| 0.166923
| 1,300
| 54
| 82
| 24.074074
| 0.806094
| 0.027692
| 0
| 0
| 0
| 0
| 0.09127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.131579
| 0
| 0.263158
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1fb7ac3548bddd8881f407edfa6134b66678d18
| 19,216
|
py
|
Python
|
search_sampler/__init__.py
|
gserapio/search_sampler
|
38c8a5c7414edb21126e767ea70e7cd355223f2a
|
[
"MIT"
] | 1
|
2021-02-09T19:50:17.000Z
|
2021-02-09T19:50:17.000Z
|
search_sampler/__init__.py
|
gserapio/search_sampler
|
38c8a5c7414edb21126e767ea70e7cd355223f2a
|
[
"MIT"
] | null | null | null |
search_sampler/__init__.py
|
gserapio/search_sampler
|
38c8a5c7414edb21126e767ea70e7cd355223f2a
|
[
"MIT"
] | null | null | null |
import os
import pandas
import time
from datetime import datetime, timedelta
from collections import defaultdict
from copy import deepcopy
from googleapiclient.discovery import build
"""
All functions that are used for querying, processing, and saving
the data are located here.
"""
VALID_PERIOD_LENGTHS = ["day", "week", "month"]
class SearchSampler(object):
"""
TrendsSampler contains all functions required to sample the Google Health API
:param api_key: The API key you received from Google
:param search_name: A suffix for your output file. It will be placed in the `{output_path}/{region}`\
folder with the filename `{region}-{search_name}.csv`.
:param search_params: A dictionary containing parameters. Must contain keys with:\
`search_term, region, period_start, period_end, period_length`\
Example: {\
"region": "US-DC",\
"search_term": "test",\
"period_start": "2017-01-01",\
"period_end": "2017-01-31",\
"period_length": "week"\
}\
The `search_term` can be a single string, or a list of strings. It can also include Boolean logic.\
See the report methodology for more details. The `region` can be a country, state, or DMA.\
States are formatted like `US-CA`, DMAs are a 3-digit code (see Nielsen for info).\
The `period_start` and `period_end` parameters need to be in the format `YYYY-MM-DD`.\
The `period_length` can be "day", "week", or "month" - but we have only tested this extensively\
with week.
:param server: The endpoint to which requests will be made (default is "https://www.googleapis.com")
:param version: The API version to use (default is `v1beta`)
:param output_path: The path to the folder where query results will be saved (folder will be created\
if it doesn't already exist.)
:Example:
>>> params = {
'search_term': ['cough', 'sneeze', 'fever'],
'region': 'US-DC',
'period_start': '2017-01-01',
'period_end': '2017-02-01',
'period_length': 'week'
}
>>> search_name = "flu_symptoms"
>>> output_path = "data"
>>> num_samples = 5
>>> from SearchSampler.sampler import SearchSampler
>>> sampler = SearchSampler(api_key, search_name, params, output_path=output_path)
>>> df_results = sampler.pull_rolling_window(num_samples=num_samples)
>>> sampler.save_file(df_results, append=True)
"""
def __init__(
self,
api_key,
search_name,
search_params,
server="https://www.googleapis.com",
version="v1beta",
output_path="data"
):
# Basic variables
if not api_key:
raise SystemError('ERROR: Must provide an api_key as the first parameter')
self._search_name = search_name
self._server = server
self._version = version
self.service = self._get_service(api_key)
# Below exception is to ensure that people actually provide something for an output_path
if output_path == "":
raise ValueError("Please provide an output path")
self.output_path = output_path
## Search parameters
# Initialize a dictionary with default parameters
self.params = {
"search_term": None,
"region": None,
"period_start": None,
"period_end": None,
"period_length": "week"
}
# Force search_term to be a dictionary
if not isinstance(search_params, dict):
raise ValueError('ERROR: search_params needs to be a dictionary')
if type(search_params.get("search_term", None)) == str:
search_params["search_term"] = [search_params["search_term"]]
self.params.update(search_params)
for k, v in self.params.items():
if not v:
raise SystemError('ERROR: Must provide a {}'.format(k))
# Check that start date is before end date
if self.params['period_end'] < self.params['period_start']:
raise ValueError('ERROR: start of period must be before end of period')
def _get_service(self, api_key):
"""
Sets up the connection to the Google Trends Health API
:param api_key: API Key
:return: Properly configured API object
"""
url = "/".join([
str(self._server),
'discovery/v1/apis/trends',
str(self._version),
"rest"
])
service = build(
'trends',
self._version,
developerKey=api_key,
discoveryServiceUrl=url
)
return service
def _get_file_path(self):
"""
:return: 2-tuple containing the file path and file name
"""
str_path = os.path.join(str(self.output_path), str(self.params["region"]))
str_file_name = '{region}-{identifier}.csv'.format(
region=self.params['region'],
identifier=self._search_name
)
return (str_path, str_file_name)
def load_file(self):
"""
Loads a csv file for later analysis, based on naming scheme used within class
:return: Pandas dataframe
"""
load_path, load_filename = self._get_file_path()
full_file_path = os.path.join(str(load_path), str(load_filename))
print('Attempting to load local file: {}'.format(full_file_path))
return pandas.read_csv(full_file_path)
def save_file(self, df, append=True):
"""
Saves data in df to folder, based on the following structure\:
`{output_path}/{region}/{region}-{search_identifier}.csv`
:param df: Dataframe to save. Expects format\: Period, value (though names don't matter)
:param append: Whether or not to add the new results to an existing file with the same name.\
Setting this to `False` will overwrite any existing file.
:return: None
"""
# set up paths and file name
load_path, load_filename = self._get_file_path()
# Verify the directory exists; if not, create
if not os.path.exists(load_path):
os.makedirs(load_path)
# If appending results, load previous results and join
else:
if append:
try:
df_prev_results = self.load_file()
except FileNotFoundError:
print('No previous data found. Will save to new file')
else:
df = pandas.concat([df_prev_results, df])
full_file_path = os.path.join(str(load_path), str(load_filename))
print('Saving local file: {}'.format(full_file_path))
df.to_csv(full_file_path, encoding='utf-8', index=False)
def _perform_pull(self, graph_object, attempt=0, sleep_minutes=1, limit=20):
"""
Given a connection object to the API, return a set of unformatted data. This method
accommodates API connection problems up to the specified limit (default 20).
:param graph_object: Properly formatted
:param attempt: Internal, do not use. Function uses in instances in which the API fails.
:param sleep_minutes:
:param limit:
:return: Unformatted data from API
"""
# Call API
# Enclosed in a try/except block because the API will randomly return a Rate Limit exceeded error
# Usually as an HTTPError
try:
response_health = graph_object.execute()
except Exception as msg:
attempt += 1
if attempt <= limit:
if attempt % 5 == 0:
print(
'WARNING: Attempt #{}. This may require an extended period. Sleeping for 5 minutes. \
Error message:\n {}'.format(attempt, str(msg))
)
# Sleep for 5 minutes
time.sleep(5 * 60)
else:
print(
'WARNING: Attempt #{}. Sleeping for just 1 minute. \
Error message:\n {}'.format(attempt, str(msg))
)
# Sleep for 1 minutes
time.sleep(sleep_minutes * 60)
response_health = self._perform_pull(graph_object, attempt)
else:
# Give up entirely
raise SystemError("Attempted query 5 times and couldn't connect")
response_health = None
return response_health
def pull_data_from_api(self, params=None, format='dict'):
"""
Pulls data from the API given a set of search terms and other restrictions.
:param params: Set of search parameters. Uses the object-level search params (from __init__) if empty.
:return: Dataframe with results from API that match parameters.
"""
# set local parameters to class parameters if necessary
if not params:
params = deepcopy(self.params)
# Check period_length
if params['period_length'] not in VALID_PERIOD_LENGTHS:
raise SystemError('Period length {} is of the wrong type.'.format(params['period_length']))
# Check region type. Because this changes the parameters in the API call, this sets up the API call
# See the difference between geoRestriction_region, _country, and _dma
if isinstance(params['region'], list):
test_region = str(params['region'][0])
params['region'] = "'{}'".format("', '".join(str(params['region'])))
else:
test_region = str(params['region'])
if test_region[:2] == 'US':
# nation-wide
if test_region == 'US':
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_country=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
# Can only use multiple values for states and DMAs
# Cannot mix national, state or DMA in the same call, unfortunately
# Valid options are ISO-3166-2
else:
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_region=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
else:
# This assumes a DMA
# To properly retrieve data, it needs to be a number, so test for this first
# For more, see: https://support.google.com/richmedia/answer/2745487
if not isinstance(params['region'], int):
raise ValueError('Region "{}" is not an integer, but looks like it is meant to be a DMA' \
.format(params['region']))
# otherwise
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_dma=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
# Now, finally, call the API
print('INFO: Running period {} - {}'.format(params['period_start'], params['period_end']))
response_health = self._perform_pull(graph_health)
if not response_health:
return None
else:
d_results = {}
for results in response_health['lines']:
curr_term = results['term']
df = pandas.DataFrame(results['points'])
# re-format date into actual date objects
try:
df['period'] = pandas.to_datetime(df.date, format='%b %d %Y')
except:
df['period'] = pandas.to_datetime(df.date, format='%b %Y')
d_results[curr_term] = df
if format == 'dataframe':
# process of saving is slightly different when asking for multiple
# search terms than for just one
# Need to convert from a dictionary of dataframes
if len(d_results) > 1:
df = pandas.concat(d_results).reset_index()[['level_0', 'date', 'value', 'period']]
df = df.rename(columns={'level_0':'search_term'})
else:
df = pandas.DataFrame(d_results)
return df
elif format == 'dict':
return d_results
else:
raise ValueError("Please provide a proper format for results. Available options are: dict, dataframe.")
def _serialize_period_values(self, df, dd_periods=None, lst_periods=None):
"""
Converts sample into period specific list of values. Assumes dd_periods is a defaultdict
:param df: Dataframe with sample values. Must at least have the columns [period, value]
:param dd_periods: A dictionary, with periods as keys and lists of query results as values
:param lst_periods: A list of valid periods
:return: dd_periods with added values
"""
if not lst_periods:
lst_periods = []
if not dd_periods:
dd_periods = defaultdict(list)
for index, row in df.iterrows():
# If a list of periods was provided, we only expand dd_periods for the ones that were specified
if len(lst_periods) > 0:
if row['period'] in lst_periods:
dd_periods[row['period']].append(row['value'])
else:
dd_periods[row['period']].append(row['value'])
return dd_periods
def pull_rolling_window(self, num_samples=5):
"""
Separates pull into a rolling set of samples to get multiple samples in the same run.
This takes advantage of the fact that the API does not cache results if you change the length of time
in the search
:param num_samples: Amount of samples to pull
:return: Dataframe with results from API. Does not include information about the sample frame.
"""
query_time = datetime.now()
# First we run a single query, so we can get the dates for each period from the API.
# Could do this logic locally, but this is easier
local_params = deepcopy(self.params)
local_params['search_term'] = local_params['search_term'][0]
samples_taken = 0
d_range_all = self.pull_data_from_api(local_params)
lst_periods = list(d_range_all.values())[0]['period'].tolist()
d_periods = {}
# Next, we pull each week individually. This will always get saved.
print("INFO: Running Search Term: {}".format(self.params['search_term']))
for period in lst_periods:
curr_date = datetime.strftime(period, '%Y-%m-%d')
local_params = deepcopy(self.params)
local_params['period_start'] = curr_date
local_params['period_end'] = curr_date
d_single = self.pull_data_from_api(local_params)
if not d_single:
raise ValueError('Problems with period {}'.format(curr_date))
for term, result in d_single.items():
if term in d_periods:
d_periods[term] = self._serialize_period_values(result, dd_periods=d_periods[term])
else:
d_periods[term] = self._serialize_period_values(result, dd_periods=defaultdict(list))
# Increment samples taken by 1 - since each period has been sampled individually
samples_taken += 1
# Now do the rolling sample
# Using some logic to figure out the window size and how far back to go
# First, we get the window size
window_size = num_samples - samples_taken
print("INFO: window_size: {}".format(str(window_size)))
# If in the above samples we've already gotten all that we've asked for, no need to do the rest
if window_size > 0:
# There's a weird race condition in which window_size = 1, but we've already done the single period samples
# So we just change this to a 2 period window size and they get an extra sample
if window_size == 1:
window_size = 2
# Calculate days before and after, erring on the side of having more periods...
# So that we have symmetry between sides if there are an odd number of weeks
local_params = deepcopy(self.params)
days_diff = window_size * 7
# Get the starting period, specifying that the first window is window_size before the first date
starting_period = lst_periods[0] - timedelta(days=days_diff)
# Get the ending period, specifying that the last window is window_size after the last date
ending_period = lst_periods[-1] + timedelta(days=days_diff)
# Set up the loop
# Initial window is (starting_period) to (starting_period + window_size)
curr_start = starting_period
curr_end = curr_start + timedelta(days=days_diff)
# Loop until each window is done
while curr_end <= ending_period:
# Set up query params
local_params['period_start'] = datetime.strftime(curr_start, '%Y-%m-%d')
local_params['period_end'] = datetime.strftime(curr_end, '%Y-%m-%d')
# Call the API
d_window = self.pull_data_from_api(local_params)
# Save the results
for term, result in d_window.items():
d_periods[term] = self._serialize_period_values(
result,
dd_periods=d_periods[term],
lst_periods=lst_periods
)
# Increment the window by one week
curr_start += timedelta(days=7)
curr_end += timedelta(days=7)
rows = []
for term, timestamps in d_periods.items():
for timestamp, samples in timestamps.items():
for i, sample in enumerate(samples):
if i < num_samples:
# Due to the sampling method, we sometimes draw an extra sample
# This will skip over that
rows.append({
"term": term,
"period": timestamp,
"sample": i,
"value": sample,
"query_time": query_time
})
return pandas.DataFrame(rows)
| 40.454737
| 119
| 0.585762
| 2,308
| 19,216
| 4.728336
| 0.214905
| 0.020893
| 0.014661
| 0.005498
| 0.170805
| 0.141299
| 0.124072
| 0.10263
| 0.090351
| 0.076239
| 0
| 0.007116
| 0.327175
| 19,216
| 474
| 120
| 40.540084
| 0.836956
| 0.344921
| 0
| 0.177686
| 0
| 0
| 0.116843
| 0.00411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03719
| false
| 0
| 0.028926
| 0
| 0.107438
| 0.033058
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1fdd3005698252bde84e97c3ad5be6bf947e18b
| 3,620
|
py
|
Python
|
google-cloud-sdk/lib/surface/compute/users/delete.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/users/delete.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/users/delete.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting users."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.users import client as users_client
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Delete(base.DeleteCommand):
"""Delete Google Compute Engine users.
*{command}* deletes one or more Google Compute Engine users.
## EXAMPLES
To delete one or more users by name, run:
$ {command} example-user-1 example-user-2
To delete all users for one or more owners, run:
$ {command} example-owner-1@gmail.com example-owner-2@gmail.com --owners
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--owners',
action='store_true',
help=('The owner of the user to be created. The owner must be an email '
'address associated with a Google account'))
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the users to delete.')
def GetOwnerAccounts(self, client, owners):
"""Look up all users on the current project owned by the list of owners."""
requests = []
for owner in owners:
requests += lister.FormatListRequests(
client.users,
properties.VALUES.core.project.GetOrFail(), None, None,
'owner eq ' + owner)
errors = []
responses = request_helper.MakeRequests(
requests=requests,
http=client.http,
batch_url='https://www.googleapis.com/batch/',
errors=errors)
if errors:
utils.RaiseException(errors, users_client.UserException, error_message=(
'Could not get users for owners:'))
return [response.name for response in responses]
def Run(self, args):
"""Issues requests necessary for deleting users."""
holder = base_classes.ComputeUserAccountsApiHolder(self.ReleaseTrack())
client = holder.client
if args.owners:
names = self.GetOwnerAccounts(client, args.names)
else:
names = args.names
user_refs = [holder.resources.Parse(
user,
params={'project': properties.VALUES.core.project.GetOrFail},
collection='clouduseraccounts.users') for user in names]
utils.PromptForDeletion(user_refs)
requests = []
for user_ref in user_refs:
request = client.MESSAGES_MODULE.ClouduseraccountsUsersDeleteRequest(
project=user_ref.project,
user=user_ref.Name())
requests.append((client.users, 'Delete', request))
errors = []
responses = list(
request_helper.MakeRequests(
requests=requests,
http=client.http,
batch_url='https://www.googleapis.com/batch/',
errors=errors))
if errors:
utils.RaiseToolException(
errors, error_message='Could not fetch resource:')
return responses
| 33.518519
| 80
| 0.690331
| 448
| 3,620
| 5.520089
| 0.386161
| 0.05095
| 0.042459
| 0.048524
| 0.193692
| 0.152042
| 0.092196
| 0.092196
| 0.092196
| 0.092196
| 0
| 0.004231
| 0.216575
| 3,620
| 107
| 81
| 33.831776
| 0.867772
| 0.286464
| 0
| 0.212121
| 0
| 0
| 0.130966
| 0.009073
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.106061
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d1fff7908412416073cac969804d096355f1b2f7
| 3,195
|
py
|
Python
|
hexomino-core/gen_hexos/gen.py
|
chmnchiang/hexomino
|
483a86c11bc0ccf9cdaae4ad6e102168be3cf320
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
hexomino-core/gen_hexos/gen.py
|
chmnchiang/hexomino
|
483a86c11bc0ccf9cdaae4ad6e102168be3cf320
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
hexomino-core/gen_hexos/gen.py
|
chmnchiang/hexomino
|
483a86c11bc0ccf9cdaae4ad6e102168be3cf320
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from dataclasses import dataclass
from functools import total_ordering
from collections import Counter
import typing
import textwrap
@dataclass(frozen=True)
@total_ordering
class Point:
x: int
y: int
def __add__(self, they):
return Point(self.x + they.x, self.y + they.y)
def __sub__(self, they):
return Point(self.x - they.x, self.y - they.y)
def reflect(self):
return Point(-self.x, self.y)
def rotate(self):
return Point(-self.y, self.x)
def __lt__(self, they):
return (self.x, self.y) < (they.x, they.y)
Poly = typing.Tuple[Point, ...]
def reflect(poly: Poly) -> Poly:
return tuple(p.reflect() for p in poly)
def rotate(poly: Poly) -> Poly:
return tuple(p.rotate() for p in poly)
def minimal_repr(poly: Poly) -> Poly:
points = sorted(poly)
return tuple(p - points[0] for p in points)
def normalize(poly: Poly) -> Poly:
def all_repr(poly):
for i in range(4):
yield poly
yield reflect(poly)
poly = rotate(poly)
min_repr = min(minimal_repr(r) for r in all_repr(poly))
return min_repr
def generate_from_poly(poly) -> typing.Generator[Poly, None, None]:
points = set(poly)
for p in poly:
for df in ((0, 1), (0, -1), (1, 0), (-1, 0)):
q = p + Point(df[0], df[1])
if q in points:
continue
new_poly = normalize((*poly, q))
yield new_poly
def generate(n: int) -> typing.List[Poly]:
if n == 1:
return [(Point(0, 0),)]
prev_results = generate(n - 1)
results = set()
for prev_poly in prev_results:
results.update(generate_from_poly(prev_poly))
return list(results)
def hexo_borders(poly: Poly) -> typing.List[typing.Tuple[Point, Point]]:
dfs = tuple(Point(x, y) for x, y in ((0, 0), (0, 1), (1, 1), (1, 0)))
counter = Counter()
for tile in poly:
for i in range(4):
d1 = dfs[i]
d2 = dfs[(i+1) % 4]
if d1 < d2:
d1, d2 = d2, d1
border = (tile + d1, tile + d2)
counter[border] += 1
outer_borders = [border for border, cnt in counter.items() if cnt == 1]
return outer_borders
def hexo_to_repr(poly: Poly) -> str:
assert len(poly) == 6
tiles_str = ', '.join(f'Pos {{ x: {p.x}, y: {p.y} }}' for p in poly)
borders = hexo_borders(poly)
borders_str = ', '.join(
f'(Pos {{ x: {p1.x}, y: {p1.y} }}, Pos {{ x: {p2.x}, y: {p2.y} }})'
for (p1, p2) in borders)
return (
f'''__Hexo {{
tiles: [{tiles_str}],
borders: &[{borders_str}],
}}''')
if __name__ == '__main__':
codegen_template = textwrap.dedent(
'''\
#[cfg(not(test))]
pub const N_HEXOS: usize = {n_hexos};
#[cfg(not(test))]
pub const HEXOS: [__Hexo; {n_hexos}] = [
{hexos}
];
'''
)
I = tuple(Point(0, y) for y in range(6))
hexos = [poly for poly in generate(6) if poly != I]
hexos_str = ',\n '.join(hexo_to_repr(hexo) for hexo in hexos)
print(codegen_template.format(n_hexos = len(hexos), hexos = hexos_str))
| 27.782609
| 75
| 0.553678
| 466
| 3,195
| 3.66309
| 0.203863
| 0.056239
| 0.017575
| 0.023433
| 0.146456
| 0.096075
| 0.049209
| 0.049209
| 0.049209
| 0.049209
| 0
| 0.021768
| 0.295462
| 3,195
| 114
| 76
| 28.026316
| 0.736562
| 0
| 0
| 0.023529
| 0
| 0.011765
| 0.068
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 1
| 0.164706
| false
| 0
| 0.058824
| 0.082353
| 0.411765
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0603e6bbd9ecddad191163178ca4161b1b3decfd
| 1,064
|
py
|
Python
|
digsby/src/oscar/snac/family_x0a.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35
|
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
digsby/src/oscar/snac/family_x0a.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4
|
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
digsby/src/oscar/snac/family_x0a.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15
|
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
import logging
import oscar
x0a_name="User lookup"
log = logging.getLogger('oscar.snac.x0a')
subcodes = {}
def x0a_init(o, sock, cb):
log.info('initializing')
cb()
log.info('finished initializing')
def x0a_x01(o, sock, data):
'''
SNAC (xa, x1): User lookup Family Error
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_01.html}
'''
errcode, errmsg, subcode = oscar.snac.error(data)
submsg = subcodes.setdefault(subcode, 'Unknown') if subcode else None
raise oscar.snac.SnacError(0x0a, (errcode, errmsg), (subcode, submsg))
def x0a_x02(email):
'''
SNAC (xa, x2): Search by email
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_02.html}
'''
return 0x0a, 0x02, email
def x0a_x03(o, sock, data):
'''
SNAC (xa, x3): Search response
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_03.html}
'''
fmt = (('tlvs', 'tlv_list'),)
name_tlvs, data = oscar.unpack(fmt, data)
assert not data
names = [tlv.v for tlv in name_tlvs]
| 25.95122
| 75
| 0.62594
| 147
| 1,064
| 4.435374
| 0.469388
| 0.082822
| 0.064417
| 0.101227
| 0.230061
| 0.184049
| 0.184049
| 0.184049
| 0.184049
| 0
| 0
| 0.037713
| 0.227444
| 1,064
| 40
| 76
| 26.6
| 0.755474
| 0.269737
| 0
| 0
| 0
| 0
| 0.113402
| 0
| 0
| 0
| 0.017673
| 0
| 0.05
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
060485709baa0b9492d85e40f90068c48154acf0
| 2,928
|
py
|
Python
|
setup.py
|
rochacon/punch
|
7f6fb81221049ab74ef561fb40a4174bdb3e77ef
|
[
"MIT"
] | null | null | null |
setup.py
|
rochacon/punch
|
7f6fb81221049ab74ef561fb40a4174bdb3e77ef
|
[
"MIT"
] | null | null | null |
setup.py
|
rochacon/punch
|
7f6fb81221049ab74ef561fb40a4174bdb3e77ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""setup.py
Defines the setup instructions for the punch framework
Copyright (C) 2016 Rodrigo Chacon
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
extra_kwargs = {'tests_require': ['pytest']}
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main())
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
readme = ''
setup(name='punch',
version='0.0.1',
description='A Python framework focused (but not limited) in JSON APIs.',
long_description=readme,
author='Rodrigo Chacon',
author_email='rochacon@gmail.com',
url='https://github.com/rochacon/punch',
license='MIT',
packages=['punch'],
requires=['webob'],
install_requires=['webob'],
cmdclass={'test': PyTest},
keywords='Web, Python, Python3, Refactoring, REST, Framework, RPC',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'],
**PyTest.extra_kwargs)
| 39.04
| 112
| 0.663934
| 353
| 2,928
| 5.475921
| 0.541076
| 0.045525
| 0.0776
| 0.067253
| 0.027936
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008119
| 0.242828
| 2,928
| 74
| 113
| 39.567568
| 0.86378
| 0.39071
| 0
| 0
| 0
| 0
| 0.373874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0607341543b37f814977e95ae2726476134dd618
| 2,745
|
py
|
Python
|
manage.py
|
Zauberer2/touchresume
|
c558f6383722f289cf8087a15f6e049b4213c010
|
[
"MIT"
] | 3
|
2020-02-25T04:18:22.000Z
|
2021-12-25T17:03:50.000Z
|
manage.py
|
Zauberer2/touchresume
|
c558f6383722f289cf8087a15f6e049b4213c010
|
[
"MIT"
] | 3
|
2019-09-02T07:49:35.000Z
|
2021-12-19T17:46:31.000Z
|
manage.py
|
Zauberer2/touchresume
|
c558f6383722f289cf8087a15f6e049b4213c010
|
[
"MIT"
] | 1
|
2021-12-23T18:11:07.000Z
|
2021-12-23T18:11:07.000Z
|
#!/usr/bin/env python
import os
import re
import unittest
from git import Repo
from semver import match
from click import option, argument, echo, ClickException
from touchresume.cli import cli
from touchresume import __version__
@cli.command(with_appcontext=False)
@option('-d', '--dir', default='tests', help='Directory with tests')
def test(dir):
"""Discover and run unit tests."""
testsuite = unittest.TestLoader().discover(dir)
unittest.TextTestRunner(verbosity=2, buffer=True).run(testsuite)
@cli.command(with_appcontext=False)
@option('-d', '--dev', default='dev', help='Develop branch (dev)')
@option('-m', '--master', default='master', help='Master branch (master)')
@argument('version')
def release(dev, master, version, app_path='touchresume'):
"""Make Git release."""
if not match(version, f'>{__version__}'):
raise ClickException(f'Version must be greater than {__version__}')
repo = Repo()
release = f'release/{version}'
echo(f'Create {release} branch')
repo.head.ref = repo.heads[dev]
repo.head.ref = repo.create_head(release)
echo(f'Bump version - {version}')
version_file = os.path.join(app_path, '__init__.py')
with open(version_file, 'r+') as f:
content = f.read()
target = f"__version__ = '{__version__}'"
value = f"__version__ = '{version}'"
f.seek(0)
f.write(content.replace(target, value))
repo.index.add([version_file])
repo.index.commit(f'bump version - v{version}')
diff = repo.head.commit.diff(None)
cf = re.compile(r'^change[s|log].*')
changelog_files = [d.a_path for d in diff if cf.match(d.a_path.lower())]
if changelog_files:
echo(f'Commit {", ".join(changelog_files)}')
repo.index.add(changelog_files)
repo.index.commit(f'update changelog - v{version}')
rf = 'readme'
readme_files = [d.a_path for d in diff if d.a_path.lower().startswith(rf)]
if readme_files:
echo(f'Commit {", ".join(readme_files)}')
repo.index.add(readme_files)
repo.index.commit(f'update readme - v{version}')
echo(f'Merge {release} into {master}')
repo.head.ref = repo.heads[master]
parents = (repo.branches[release].commit, repo.branches[master].commit)
repo.index.commit(f'merge {release}', parent_commits=parents)
echo(f'Create v{version} tag')
repo.create_tag(f'v{version}')
echo(f'Merge {release} back into {dev}')
repo.head.ref = repo.heads[dev]
dev_parents = (repo.branches[release].commit, repo.branches[dev].commit)
repo.index.commit(f'merge {release} back', parent_commits=dev_parents)
echo(f'Delete {release} branch')
repo.delete_head(release)
if __name__ == '__main__':
cli()
| 32.294118
| 78
| 0.668488
| 378
| 2,745
| 4.690476
| 0.298942
| 0.022561
| 0.042301
| 0.045121
| 0.283136
| 0.239143
| 0.15454
| 0.025945
| 0.025945
| 0
| 0
| 0.000882
| 0.17377
| 2,745
| 84
| 79
| 32.678571
| 0.780864
| 0.024408
| 0
| 0.064516
| 0
| 0
| 0.236595
| 0.016873
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06076fc2131eb37f5f2f55c95d8358153da24655
| 485
|
py
|
Python
|
reb/scrape.py
|
vibya/Economic-Downturn
|
03df854f4c314d5a944cd99474b980a95f088f39
|
[
"MIT"
] | 1
|
2018-09-18T01:07:53.000Z
|
2018-09-18T01:07:53.000Z
|
reb/scrape.py
|
aidinhass/reb
|
33fc9d9781e2c0fce8faa6240ec2d56899ee2c07
|
[
"MIT"
] | null | null | null |
reb/scrape.py
|
aidinhass/reb
|
33fc9d9781e2c0fce8faa6240ec2d56899ee2c07
|
[
"MIT"
] | 3
|
2018-09-18T01:08:01.000Z
|
2019-03-10T10:06:41.000Z
|
from reb.src import pynyt
from reb.conf import APIKEY_NYT_ARTICLE
nyt = pynyt.ArticleSearch(APIKEY_NYT_ARTICLE)
nytArchive = pynyt.ArchiveApi(APIKEY_NYT_ARTICLE)
# # get 1000 news articles from the Foreign newsdesk from 1987
# results_obama = nyt.query(
# q='obama',
# begin_date="20170101",
# end_date="20170102",
# # facet_field=['source', 'day_of_week'],
# # facet_filter = True,
# verbose=True)
arch = nytArchive.query(
year="2012",
month="1"
)
| 23.095238
| 62
| 0.692784
| 64
| 485
| 5.046875
| 0.671875
| 0.083591
| 0.148607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073232
| 0.183505
| 485
| 21
| 63
| 23.095238
| 0.742424
| 0.494845
| 0
| 0
| 0
| 0
| 0.021645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
060a86f44e032bdb0deaf25d27674c930c7491c8
| 3,385
|
py
|
Python
|
hooks/relations.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | null | null | null |
hooks/relations.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | null | null | null |
hooks/relations.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | 1
|
2022-03-16T16:12:32.000Z
|
2022-03-16T16:12:32.000Z
|
# -*- coding: utf-8 -*-
'''
Relations for BIRD.
'''
import socket
import netaddr
import netifaces
from charmhelpers.core import hookenv
from charmhelpers.core.services.helpers import RelationContext
def router_id():
'''
Determine the router ID that should be used.
This function uses the common logic of finding the IPv4 addresses
assigned on all interfaces and picking the numerically lowest of
them (that is not in the 127.0.0.0/8 block).
'''
def get_assigned_ips():
ifs = netifaces.interfaces()
for interface in ifs:
if_addrs = netifaces.ifaddresses(interface)
ip4_data = if_addrs.get(netifaces.AF_INET, [])
for ip4 in ip4_data:
yield netaddr.IPAddress(ip4['addr'])
excluded_net = netaddr.IPNetwork('127.0.0.0/8')
for addr in sorted(get_assigned_ips()):
if addr not in excluded_net:
return str(addr)
def resolve_domain_name(name, ip_version=4):
'''
Takes a domain name and resolves it to an IP address
of a given version.
Currently only ever returns one address.
'''
results = socket.getaddrinfo(name, None)
addresses = (netaddr.IPAddress(r[4][0]) for r in results)
filtered = (a for a in addresses if a.version == ip_version)
try:
addr = filtered.next()
except StopIteration:
addr = ''
return str(addr)
def local_ipv6_address():
'''
Determines the IPv6 address to use to contact this machine. Excludes
link-local addresses.
Currently only returns the first valid IPv6 address found.
'''
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for addr in addresses.get(netifaces.AF_INET6, []):
# Make sure we strip any interface specifier from the address.
addr = netaddr.IPAddress(addr['addr'].split('%')[0])
if not (addr.is_link_local() or addr.is_loopback()):
return str(addr)
class BgpRRRelation(RelationContext):
'''
Relation context for the BGP Route Reflector interface.
'''
name = 'bgp-route-reflector'
interface = 'bgp-route-reflector'
required_keys = []
def is_ready(self):
return True
def _is_ready(self, data):
return set(data.keys()).issuperset(set(['addr', 'addr6']))
def get_data(self):
peers = []
peers6 = []
for rid in hookenv.relation_ids(self.name):
for unit in hookenv.related_units(rid):
rel = hookenv.relation_get(attribute='addr',
rid=rid,
unit=unit)
if rel is not None:
addr = resolve_domain_name(rel)
if addr:
peers.append(addr)
rel6 = hookenv.relation_get(attribute='addr6',
rid=rid,
unit=unit)
if rel6 is not None:
peers6.append(rel6)
self['bgp_peers'] = peers
self['bgp_peers6'] = peers6
self['router_id'] = router_id()
return
def provide_data(self):
return {
'addr': hookenv.unit_get('private-address'),
'addr6': local_ipv6_address()
}
| 27.298387
| 74
| 0.578139
| 399
| 3,385
| 4.802005
| 0.370927
| 0.016701
| 0.020355
| 0.006263
| 0.024008
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017113
| 0.326736
| 3,385
| 123
| 75
| 27.520325
| 0.823607
| 0.191137
| 0
| 0.104478
| 0
| 0
| 0.048466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119403
| false
| 0
| 0.074627
| 0.044776
| 0.358209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
060b2a571442e70a179db487667f330e3647e19a
| 1,136
|
py
|
Python
|
common/cache.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | 1
|
2020-08-17T06:24:56.000Z
|
2020-08-17T06:24:56.000Z
|
common/cache.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | null | null | null |
common/cache.py
|
govtrack/django-lorien-common
|
27241ff72536b442dfd64fad8589398b8a6e9f4d
|
[
"BSD-3-Clause"
] | null | null | null |
from hashlib import sha1
from django.core.cache import cache
from django.utils.encoding import smart_str
def cached(key=None, timeout=300):
"""
Cache the result of function call.
Args:
key: the key with which value will be saved. If key is None
then it is calculated automatically
timeout: number of seconds after which the cached value would be purged.
"""
_key = key
def func_wrapper(func):
def args_wrapper(*args, **kwargs):
# this is workaround of strange python behaviour
key = _key
if key is None:
# Not sure that this will work correct in all cases
key = sha1(str(func.__module__) + str(func.__name__) +\
smart_str(args) +\
smart_str(frozenset(kwargs.items()))).hexdigest()
value = cache.get(key)
if value:
return value
else:
value = func(*args, **kwargs)
cache.set(key, value)
return value
return args_wrapper
return func_wrapper
| 32.457143
| 80
| 0.564261
| 136
| 1,136
| 4.588235
| 0.477941
| 0.038462
| 0.022436
| 0.035256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006935
| 0.365317
| 1,136
| 34
| 81
| 33.411765
| 0.85853
| 0.285211
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
060d03c63bb8152f4e45ecb98502c75a5900990a
| 1,417
|
py
|
Python
|
dtecsv.py
|
varnav/dte-usage-plotter
|
cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca
|
[
"MIT"
] | null | null | null |
dtecsv.py
|
varnav/dte-usage-plotter
|
cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca
|
[
"MIT"
] | null | null | null |
dtecsv.py
|
varnav/dte-usage-plotter
|
cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
1. Go to:
https://usage.dteenergy.com/?interval=hour
2. Download CSV
3. Run:
python dtecsv.py .\electric_usage_report_05-31-2021_to_06-05-2021.csv
"""
import csv
import datetime
import click
import matplotlib.pyplot as plt
x = []
y = []
@click.command()
@click.argument('file', type=click.Path(exists=True))
def main(file):
"""
Will plot data from DTE Energy CSV
:param file: DTE CSV file
"""
with open(file, 'r') as file:
lines = csv.reader(file)
next(lines) # Skip first line that is header
for row in lines:
rawdate = row[1] + ' ' + row[2] # 05/15/2021 11:00 AM
# date = datetime.datetime.strptime(rawdate, "%m/%d/%Y %I:00 %p").strftime("%Y-%m-%d %H:00")
date = datetime.datetime.strptime(rawdate, "%m/%d/%Y %I:00 %p").strftime("%b %d %H:00")
x.append(date)
y.append(float(row[3]))
# Risize the figure (optional)
plt.figure(figsize=(18, 9))
# Plot the x and y values on the graph
plt.plot(x, y)
# Here you specify the ticks you want to display
# You can also specify rotation for the tick labels in degrees or with keywords.
plt.xticks(x[::2], rotation='vertical')
# Add margins (padding) so that markers don't get clipped by the axes
plt.margins(0.2)
# Display the graph
plt.show()
if __name__ == '__main__':
main()
| 23.616667
| 104
| 0.614679
| 221
| 1,417
| 3.882353
| 0.565611
| 0.006993
| 0.04662
| 0.065268
| 0.11655
| 0.11655
| 0.11655
| 0.11655
| 0.11655
| 0.11655
| 0
| 0.045539
| 0.240649
| 1,417
| 59
| 105
| 24.016949
| 0.751859
| 0.460127
| 0
| 0
| 0
| 0
| 0.068587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
060ddb65bbe8989145f472ee9db47a8d7aff5843
| 12,598
|
py
|
Python
|
model_navigator/model_analyzer/profiler.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 49
|
2021-04-09T18:32:07.000Z
|
2022-03-29T07:32:24.000Z
|
model_navigator/model_analyzer/profiler.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2021-07-13T09:00:12.000Z
|
2021-11-15T17:16:35.000Z
|
model_navigator/model_analyzer/profiler.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2021-04-09T18:31:56.000Z
|
2022-03-01T08:08:04.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import List, Optional
import yaml
from model_navigator.converter import DatasetProfileConfig
from model_navigator.exceptions import ModelNavigatorProfileException
from model_navigator.kubernetes.yaml import CustomDumper
from model_navigator.model_analyzer import ModelAnalyzer, ModelAnalyzerProfileConfig
from model_navigator.model_analyzer.config import BaseConfigGenerator, ModelAnalyzerTritonConfig
from model_navigator.model_analyzer.model_analyzer import ModelAnalyzerMode
from model_navigator.model_analyzer.model_analyzer_config import ModelAnalyzerConfig
from model_navigator.perf_analyzer import PerfMeasurementConfig
from model_navigator.triton import DeviceKind
from model_navigator.triton.model_config import TritonModelConfigGenerator
from model_navigator.triton.utils import get_shape_params
from model_navigator.utils import Workspace
LOGGER = logging.getLogger(__name__)
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
class Profiler:
def __init__(
self,
*,
workspace: Workspace,
triton_docker_image: str,
gpus: List[str],
verbose: bool = False,
profile_config: ModelAnalyzerProfileConfig,
triton_config: ModelAnalyzerTritonConfig,
perf_measurement_config: PerfMeasurementConfig,
dataset_profile_config: Optional[DatasetProfileConfig] = None,
profiling_data_path: Optional[Path] = None,
):
self._workspace = workspace
self._triton_config = triton_config
self._triton_docker_image = triton_docker_image
self._profile_config = profile_config
self._dataset_profile_config = dataset_profile_config
self._profiling_data_path = profiling_data_path
self._perf_measurement_config = perf_measurement_config
self._config_generator: ProfileConfigGenerator = ProfileConfigGenerator(
workspace=self._workspace,
profile_config=self._profile_config,
triton_config=triton_config,
triton_docker_image=triton_docker_image,
verbose=verbose,
dataset_profile_config=dataset_profile_config,
profiling_data_path=profiling_data_path,
perf_measurement_config=perf_measurement_config,
gpus=gpus,
)
self._profile_config_path = self._config_generator.analyzer_path / "config-profile.yaml"
self._verbose = verbose
self._prepare_catalogs()
def run(self) -> Path:
config = self._config_generator.generate_config()
self._profile_config_path.parent.mkdir(parents=True, exist_ok=True)
with self._profile_config_path.open("w") as config_file:
config_content = yaml.dump(config, Dumper=CustomDumper)
LOGGER.debug("Triton Model Analyzer profile config:\n" f"{config_content}")
config_file.write(config_content)
analyzer_config = ModelAnalyzerConfig()
analyzer_config["config-file"] = self._profile_config_path.as_posix()
analyzer = ModelAnalyzer(config=analyzer_config)
analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=self._verbose)
latest_checkpoint_path = self._find_latest_checkpoint()
LOGGER.info(f"Triton Model Analyzer profiling done. Results are stored in {latest_checkpoint_path}")
return latest_checkpoint_path
def _find_latest_checkpoint(self):
checkpoints_paths = sorted(
self._config_generator.checkpoints_dir_path.glob("*.ckpt"),
key=lambda path: int(path.stem),
)
latest_checkpoint_path = checkpoints_paths[-1] if checkpoints_paths else None
return latest_checkpoint_path
def _prepare_catalogs(self):
def _remove_and_create_dir(dir_path: Path):
if dir_path.is_dir():
LOGGER.debug(f"Removing {dir_path}")
shutil.rmtree(dir_path)
dir_path.mkdir(parents=True)
_remove_and_create_dir(self._config_generator.analyzer_path)
class ProfileConfigGenerator(BaseConfigGenerator):
def __init__(
self,
*,
workspace: Workspace,
profile_config: ModelAnalyzerProfileConfig,
triton_config: ModelAnalyzerTritonConfig,
perf_measurement_config: PerfMeasurementConfig,
gpus: List[str],
triton_docker_image: Optional[str] = None,
verbose: int = 0,
dataset_profile_config: Optional[DatasetProfileConfig] = None,
profiling_data_path: Optional[Path] = None,
):
super().__init__(workspace=workspace, verbose=verbose)
self._analyzer_triton_log_path = self._analyzer_path / "triton.log"
self._triton_config = triton_config
self._triton_docker_image = triton_docker_image
self._verbose = verbose
self._profile_config = profile_config
self._dataset_profile_config = dataset_profile_config
self._profiling_data_path = profiling_data_path
self._perf_measurement_config = perf_measurement_config
self._gpus = gpus
@property
def triton_log_path(self) -> Path:
return self._analyzer_triton_log_path.resolve()
def generate_config(self):
model_repository = self._triton_config.model_repository
models_list = [model_dir.name for model_dir in model_repository.glob("*") if model_dir.is_dir()]
LOGGER.info(f"Prepare profiling for {len(models_list)} models from {model_repository}:")
for model_name in models_list:
LOGGER.info(f"\t- {model_name}")
model_names_with_profile_config = {
model_name: self._get_profile_config_for_model(model_name) for model_name in models_list
}
if any(profile_config for model_name, profile_config in model_names_with_profile_config.items()):
models_list = model_names_with_profile_config
if self._profile_config.config_search_max_preferred_batch_size > 0:
max_preferred_batch_size = self._profile_config.config_search_max_preferred_batch_size
else:
max_preferred_batch_size = 1
manual_config_search = all(
isinstance(models_list, dict) and models_list[model_name].get("model_config_parameters")
for model_name in models_list
)
# https://github.com/triton-inference-server/model_analyzer/blob/r21.12/docs/config.md
config = {
"run_config_search_disable": manual_config_search,
"profile_models": models_list,
"triton_docker_image": self._triton_docker_image,
"triton_launch_mode": self._triton_config.triton_launch_mode.value,
"model_repository": model_repository.resolve().as_posix(),
"checkpoint_directory": self._analyzer_checkpoints_dir_path.as_posix(),
"output_model_repository_path": self.output_model_repository_path.as_posix(),
"export_path": self._analyzer_path.resolve().as_posix(),
"triton_server_flags": {"strict-model-config": False},
"run_config_search_max_concurrency": self._profile_config.config_search_max_concurrency,
"run_config_search_max_instance_count": self._profile_config.config_search_max_instance_count,
"run_config_search_max_preferred_batch_size": max_preferred_batch_size,
"perf_analyzer_timeout": self._perf_measurement_config.perf_analyzer_timeout,
"perf_analyzer_flags": self._get_perf_analyzer_flags(),
"triton_server_path": self._triton_config.triton_server_path,
"override_output_model_repository": True,
"gpus": list(self._gpus),
"summarize": self._verbose,
"verbose": self._verbose,
"perf_output": self._verbose,
"triton_output_path": self.triton_log_path.as_posix(),
}
return config
def _get_perf_analyzer_flags(self):
configuration = {}
if self._profiling_data_path:
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
configuration["input-data"] = [self._profiling_data_path.as_posix()]
else:
configuration["input-data"] = self._profiling_data_path.as_posix()
elif self._dataset_profile_config and self._dataset_profile_config.max_shapes:
shapes = get_shape_params(self._dataset_profile_config)
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
configuration["shape"] = shapes
else:
configuration["shape"] = " ".join(shapes)
configuration["measurement-interval"] = self._perf_measurement_config.perf_measurement_interval
configuration["measurement-mode"] = self._perf_measurement_config.perf_measurement_mode
configuration["measurement-request-count"] = self._perf_measurement_config.perf_measurement_request_count
return configuration
def _get_profile_config_for_model(self, model_dir_name):
original_model_config_path = self._triton_config.model_repository / model_dir_name / "config.pbtxt"
original_model_config = TritonModelConfigGenerator.parse_triton_config_pbtxt(original_model_config_path)
model_config = {}
if self._profile_config.config_search_instance_counts:
mapping = {DeviceKind.GPU: "KIND_GPU", DeviceKind.CPU: "KIND_CPU"}
model_config["instance_group"] = [
{"kind": mapping[kind], "count": counts}
for kind, counts in self._profile_config.config_search_instance_counts.items()
]
if self._profile_config.config_search_max_batch_sizes:
model_config["max_batch_size"] = self._profile_config.config_search_max_batch_sizes
if self._profile_config.config_search_preferred_batch_sizes:
model_config["dynamic_batching"] = {
"preferred_batch_size": self._profile_config.config_search_preferred_batch_sizes
}
if self._profile_config.config_search_backend_parameters:
original_backend_parameters = original_model_config.backend_parameters_config.triton_backend_parameters
original_backend_parameters = {
param_name: {"string_value": [param_value]}
for param_name, param_value in original_backend_parameters.items()
}
model_config["parameters"] = {
**original_backend_parameters,
**{
param_name: {"string_value": list(map(str, param_values))}
for param_name, param_values in self._profile_config.config_search_backend_parameters.items()
},
}
configuration = {}
if model_config:
configuration["model_config_parameters"] = model_config
if self._profile_config.config_search_concurrency:
configuration["parameters"] = {"concurrency": self._profile_config.config_search_concurrency}
engine_count_per_device = original_model_config.instances_config.engine_count_per_device
if self._profile_config.config_search_max_instance_count and engine_count_per_device:
if len(set(engine_count_per_device)) > 1:
raise ModelNavigatorProfileException(
"Triton Model config instance group have more than 1 device kind. "
"Use manual profile to swipe over instance group count"
)
elif DeviceKind.CPU in engine_count_per_device:
configuration["cpu_only"] = True
return configuration
| 44.992857
| 115
| 0.710192
| 1,420
| 12,598
| 5.88169
| 0.183803
| 0.0716
| 0.04478
| 0.041307
| 0.380747
| 0.288673
| 0.230723
| 0.204382
| 0.139488
| 0.117098
| 0
| 0.002736
| 0.216542
| 12,598
| 279
| 116
| 45.154122
| 0.843465
| 0.052945
| 0
| 0.218182
| 0
| 0
| 0.101544
| 0.029708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.095455
| 0.004545
| 0.177273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
061117f2066d00451f5045f7338796a6dddd1a21
| 906
|
py
|
Python
|
IOPool/Input/test/PrePool2FileInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
IOPool/Input/test/PrePool2FileInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
IOPool/Input/test/PrePool2FileInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
# The following comments couldn't be translated into the new config version:
# Test storing OtherThing as well
# Configuration file for PrePoolInputTest
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST2ND")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
#process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32(11)
#)
#process.Thing = cms.EDProducer("ThingProducer")
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *',
'drop *_Thing_*_*'),
fileName = cms.untracked.string('PoolInput2FileTest.root')
)
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:PoolInputOther.root") )
process.p = cms.Path(process.OtherThing)
process.ep = cms.EndPath(process.output)
| 28.3125
| 91
| 0.733996
| 100
| 906
| 6.61
| 0.59
| 0.090772
| 0.057489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007742
| 0.144592
| 906
| 31
| 92
| 29.225806
| 0.845161
| 0.296909
| 0
| 0
| 0
| 0
| 0.261563
| 0.145136
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0611b8f8b1f08d15f75771f8b58463a12ef35fc0
| 24,165
|
py
|
Python
|
scripts/old_scripts/compare_svo_multiple.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 1
|
2021-11-02T15:16:17.000Z
|
2021-11-02T15:16:17.000Z
|
scripts/old_scripts/compare_svo_multiple.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 5
|
2021-04-14T17:08:59.000Z
|
2021-05-27T21:41:02.000Z
|
scripts/old_scripts/compare_svo_multiple.py
|
noambuckman/mpc-multiple-vehicles
|
a20949c335f1af97962569eed112e6cef46174d9
|
[
"MIT"
] | 2
|
2022-02-07T08:16:05.000Z
|
2022-03-09T23:30:17.000Z
|
import datetime
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import casadi as cas
##### For viewing the videos in Jupyter Notebook
import io
import base64
from IPython.display import HTML
# from ..</src> import car_plotting
# from .import src.car_plotting
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import src.MPC_Casadi as mpc
import src.car_plotting as cplot
import src.TrafficWorld as tw
np.set_printoptions(precision=2)
import src.IterativeBestResponseMPCMultiple as mibr
import pickle
SAVE = False
PLOT = False
rounds_ibr = 225
n_other_cars = 4
N = 50
###### LATEX Dimensions (Not currently Working)
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
fig_size = [6, 4]
#################33
def find_t_final(x, goal_x):
i_upper = np.searchsorted(x[0,:], goal_x)
i_lower = i_upper - 1
dt = 0.2
# if i_upper >= x.shape[1]:
# print(i_upper, x[0,i_lower])
# print("Check: %.03f < %.03f"%(x[0,i_lower], goal_x))
t_lower = i_lower*dt
x_lower = x[0, i_lower]
x_remaining = goal_x - x_lower
v_x = np.cos(x[2, i_lower]) * x[4, i_lower]
t_remaining = x_remaining / v_x
t_final = t_lower + t_remaining
# print("%.03f %.03f"%(t_lower, t_final))
return t_final
#### STEP 1: Sort all the files into the correct SVO
all_subdir = [
"20200301_215332random_ego",
"20200301_215346random_pro",
"20200301_215432random_altru",
"20200301_215520random_pro",
"20200301_215526random_altru",
"20200301_215537random_ego",
"20200301_215551random_pro",
"20200301_215602random_altru",
"20200301_215608random_ego",
"20200301_215623random_pro",
"20200301_215629random_altru",
"20200301_215636random_ego",
"20200301_215652random_pro",
"20200301_215658random_altru",
"20200301_215703random_ego",
"20200301_215713random_pro",
"20200301_215724random_altru",
"20200301_215742random_ego",
"20200301_215751random_pro",
"20200301_215757random_altru",
"20200301_215806random_ego",
"20200302_104840random_1p",
"20200302_104913random_2p",
"20200302_104916random_3p",
"20200302_104920random_4p",
"20200302_104926random_1e",
"20200302_104941random_2e",
"20200302_104946random_3e",
"20200302_105002random_4e",
"20200302_105059random_1a",
"20200302_105101random_2a",
"20200302_105104random_3a",
"20200302_105108random_4a",
"20200302_114834random_5e",
"20200302_114839random_6e",
"20200302_114841random_7e",
"20200302_114844random_8e",
"20200302_114853random_5p",
"20200302_114856random_6p",
"20200302_114859random_7p",
"20200302_114902random_8p",
"20200302_114909random_5a",
"20200302_114912random_6a",
"20200302_114914random_7a",
"20200302_114916random_8a",
"20200227_133704less_kxdotlarger",
"20200228_114359random_pro",
"20200228_114437random_pro",
"20200228_114440random_pro",
"20200228_114443random_pro",
"20200228_114448random_pro",
"20200228_114450random_pro",
"20200228_114913random_pro",
"20200228_114914random_pro",
"20200228_114916random_pro",
"20200228_114917random_pro",
"20200227_142916pi_01_ego",
"20200228_114517random_ego",
"20200228_114518random_ego",
"20200228_114528random_ego",
"20200228_114532random_ego",
"20200228_114547random_ego",
"20200228_114551random_ego",
"20200228_114803random_ego",
"20200228_114805random_ego",
"20200228_114806random_ego",
"20200227_141954pi2_5altru",
"20200228_114501random_altru",
"20200228_114503random_altru",
"20200228_114505random_altru",
"20200228_114506random_altru",
"20200228_114507random_altru",
"20200228_114509random_altru",
"20200228_114850random_altru",
"20200228_114851random_altru",
"20200228_114852random_altru",
]
subdir_name_prosocial_list = []
subdir_name_ego_list = []
subdir_name_altruistic_list = []
altr_theta = []
ego_theta = []
pro_theta = []
NO_GRASS = False
world = tw.TrafficWorld(2, 0, 1000)
for subdir in all_subdir:
try:
file_name = "results/" + subdir+"/data/"+"mpc3.p"
mpc = pickle.load(open(file_name,'rb'))
if mpc.min_y < -999999 or mpc.max_y > 9999999:
print("Messed up ymin/max", file_name)
continue
elif mpc.min_y > world.y_min + 0.000001:
print("Grass is NOT allowed!", file_name)
if not NO_GRASS:
print("Too grass lmmited, ignored", file_name)
continue
elif mpc.min_y <= world.y_min + 0.00001:
print("Grass is allowed!", file_name)
if NO_GRASS:
print("NO Grass, dataset ignored", file_name)
continue
if mpc.theta_iamb > np.pi/3:
subdir_name_altruistic_list += [subdir]
altr_theta += [mpc.theta_iamb]
elif mpc.theta_iamb <= np.pi/6.0:
subdir_name_ego_list += [subdir]
ego_theta += [mpc.theta_iamb]
else:
subdir_name_prosocial_list += [subdir]
pro_theta += [mpc.theta_iamb]
except FileNotFoundError:
print("Not found:", file_name)
print("Atruistic np.pi/2 = 1.5ish")
print(subdir_name_altruistic_list)
print(altr_theta)
print("Egoistic 0")
print(subdir_name_ego_list)
print(ego_theta)
print("Pro-Social", np.pi/2)
print(subdir_name_prosocial_list)
print(pro_theta)
# subdir_name_prosocial_list = [
# "20200227_133704less_kxdotlarger",
# "20200228_114359random_pro",
# "20200228_114437random_pro",
# "20200228_114440random_pro",
# "20200228_114443random_pro",
# "20200228_114448random_pro",
# "20200228_114450random_pro",
# "20200228_114913random_pro",
# "20200228_114914random_pro",
# "20200228_114916random_pro",
# "20200228_114917random_pro",
# ]
# subdir_name_prosocial = "20200227_133704less_kxdotlarger"
# folder_prosocial = "results/" + subdir_name_prosocial + "/"
# subdir_name_ego_list = [
# "20200227_142916pi_01_ego",
# "20200228_114517random_ego",
# "20200228_114518random_ego",
# "20200228_114528random_ego",
# "20200228_114532random_ego",
# "20200228_114547random_ego",
# "20200228_114551random_ego",
# "20200228_114803random_ego",
# "20200228_114805random_ego",
# "20200228_114806random_ego",
# ]
# subdir_name_ego = "20200227_142916pi_01_ego"
# folder_ego = "results/" + subdir_name_ego + "/"
# subdir_name_altruistic_list = [
# "20200227_141954pi2_5altru",
# "20200228_114501random_altru",
# "20200228_114503random_altru",
# "20200228_114505random_altru",
# "20200228_114506random_altru",
# "20200228_114507random_altru",
# "20200228_114509random_altru",
# "20200228_114850random_altru",
# "20200228_114851random_altru",
# "20200228_114852random_altru"]
# subdir_name_altruistic = "20200227_141954pi2_5altru"
# folder_altruistic = "results/" + subdir_name_altruistic + "/"
################ Analyze Results
all_xamb_pro = []
all_uamb_pro = []
all_other_x_pro = []
all_other_u_pro = []
ibr_brounds_array_pro = []
all_xamb_ego = []
all_uamb_ego = []
all_other_x_ego = []
all_other_u_ego = []
ibr_brounds_array_ego = []
all_xamb_altru = []
all_uamb_altru = []
all_other_x_altru = []
all_other_u_altru = []
ibr_brounds_array_altru = []
all_tfinalamb_pro = []
all_tfinalamb_ego = []
all_tfinalamb_altru = []
for sim_i in range(3):
if sim_i==0:
subdir_name_list = subdir_name_prosocial_list
elif sim_i==1:
subdir_name_list = subdir_name_ego_list
else:
subdir_name_list = subdir_name_altruistic_list
for folder in subdir_name_list:
n_full_rounds = 0 # rounods that the ambulance planned
n_all_rounds = 0
all_xamb = np.zeros((6, N+1, rounds_ibr))
all_uamb = np.zeros((2, N, rounds_ibr))
all_xcost = np.zeros((3, rounds_ibr))
all_tfinalamb = np.zeros((1, rounds_ibr))
all_other_x = [np.zeros((6, N+1, rounds_ibr)) for i in range(n_other_cars)]
all_other_u = [np.zeros((2, N, rounds_ibr)) for i in range(n_other_cars)]
all_other_cost = [np.zeros((3, rounds_ibr)) for i in range(n_other_cars)]
all_other_tfinal = [np.zeros((1, rounds_ibr)) for i in range(n_other_cars)]
for amb_ibr_i in range(rounds_ibr):
if (amb_ibr_i % (n_other_cars + 1) == 1) and amb_ibr_i>51: # We only look at sims when slack activated
ibr_prefix = '%03d'%amb_ibr_i
try:
xamb, uamb, xamb_des, xothers, uothers, xothers_des = mibr.load_state("results/" + folder + "/" + "data/" + ibr_prefix, n_other_cars)
all_xamb[:,:,n_full_rounds] = xamb
all_uamb[:,:,n_full_rounds] = uamb
x_goal = 130
all_tfinalamb[:, n_full_rounds] = find_t_final(xamb, x_goal)
for i in range(n_other_cars):
all_other_x[i][:,:,n_full_rounds] = xothers[i]
all_other_u[i][:,:,n_full_rounds] = uothers[i]
# all_other_tfinal[i][:,n_full_rounds] = find_t_final(xothers[i], 120)
n_full_rounds += 1
except FileNotFoundError:
# print("amb_ibr_i %d missing"%amb_ibr_i)
pass
n_all_rounds += 1
### Clip the extra dimension
all_xamb = all_xamb[:,:,:n_full_rounds]
all_uamb = all_uamb[:,:,:n_full_rounds]
all_tfinalamb = all_tfinalamb[:,:n_full_rounds]
for i in range(n_other_cars):
all_other_x[i] = all_other_x[i][:,:,:n_full_rounds]
all_other_u[i] = all_other_u[i][:,:,:n_full_rounds]
ibr_brounds_array = np.array(range(1, n_full_rounds +1))
if n_full_rounds > 0 : # only save those that meet slack requirement
if sim_i==0: #prosocial directory
all_xamb_pro += [all_xamb]
all_uamb_pro += [all_uamb]
all_other_x_pro += [all_other_x]
all_other_u_pro += [all_other_u]
ibr_brounds_array_pro += [ibr_brounds_array]
all_tfinalamb_pro += [all_tfinalamb]
elif sim_i==1: #egoistic directory
all_xamb_ego += [all_xamb]
all_uamb_ego += [all_uamb]
all_other_x_ego += [all_other_x]
all_other_u_ego += [all_other_u]
ibr_brounds_array_ego += [ibr_brounds_array]
all_tfinalamb_ego += [all_tfinalamb]
else: #altruistic directory
all_xamb_altru += [all_xamb]
all_uamb_altru += [all_uamb]
all_other_x_altru += [all_other_x]
all_other_u_altru += [all_other_u]
ibr_brounds_array_altru += [ibr_brounds_array]
all_tfinalamb_altru += [all_tfinalamb]
else:
print("No slack eligible", folder)
### SAVING IN PROSOCIAL'S DIRECTORy
folder = "random" #<----
fig_trajectory, ax_trajectory = plt.subplots(1,1)
ax_trajectory.set_title("Ambulance Trajectories")
# fig_trajectory.set_figheight(fig_height)
# fig_trajectory.set_figwidth(fig_width)
fig_trajectory.set_size_inches((8,6))
print(len(all_xamb_pro))
print(all_xamb_pro[0].shape)
ax_trajectory.plot(all_xamb_pro[0][0,:,-1], all_xamb_pro[0][1,:,-1], '-o', label="Prosocial")
ax_trajectory.plot(all_xamb_ego[0][0,:,-1], all_xamb_ego[0][1,:,-1], '-o', label="Egoistic")
ax_trajectory.plot(all_xamb_altru[0][0,:,-1], all_xamb_altru[0][1,:,-1], '-o', label="Altruistic")
ax_trajectory.set_xlabel("X [m]")
ax_trajectory.set_ylabel("Y [m]")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig1_amb_trajectory.eps'
fig_trajectory.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################333333
svo_labels = ["Egoistic", "Prosocial", "Altruistic"]
fig_uamb, ax_uamb = plt.subplots(3,1)
fig_uamb.set_size_inches((8,8))
fig_uamb.suptitle("Ambulance Control Input over IBR Iterations")
# ax_uamb[0].plot(ibr_brounds_array, np.sum(all_uamb[0,:,:] * all_uamb[0,:,:], axis=0), '-o')
ax_uamb[0].bar(range(3), [
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[0].set_xlabel("IBR Iteration")
ax_uamb[0].set_ylabel(r"$\sum u_{\delta}^2$")
ax_uamb[0].set_xticks(range(3))
ax_uamb[0].set_xticklabels(svo_labels)
ax_uamb[1].bar(range(3), [
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[1].set_xlabel("IBR Iteration")
ax_uamb[1].set_ylabel(r"$\sum u_{v}^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
# ax_uamb[2].bar(range(3), [
# np.sum(all_uamb_ego[0,:,-1] * all_uamb_ego[0,:,-1],axis=0) + np.sum(all_uamb_ego[1,:,-1] * all_uamb_ego[1,:,-1],axis=0),
# np.sum(all_uamb_pro[0,:,-1] * all_uamb_pro[1,:,-1], axis=0) + np.sum(all_uamb_pro[1,:,-1] * all_uamb_pro[1,:,-1], axis=0),
# np.sum(all_uamb_altru[0,:,-1] * all_uamb_altru[0,:,-1],axis=0) + np.sum(all_uamb_altru[1,:,-1] * all_uamb_altru[1,:,-1],axis=0)],)
# ax_uamb[2].set_xlabel("Vehicles' Social Value Orientation")
# ax_uamb[2].set_ylabel("$\sum ||u||^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig2_amb_ctrl_iterations.eps'
fig_uamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################################
#### Convergence
#########################################################
fig_reluamb, ax_reluamb = plt.subplots(2,1)
# fig_reluamb.set_figheight(fig_height)
# fig_reluamb.set_figwidth(fig_width)
fig_reluamb.set_size_inches((8,6))
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_reluamb[0].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1])*(all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[1].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1])*(all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[0].set_ylabel("$\sum (u_{v\delta,t}-u_{\delta,t-1})^2$")
ax_reluamb[1].set_xlabel("IBR Iteration")
ax_reluamb[1].set_ylabel("$\sum (u_{v,t}-u_{v,t-1})^2$")
ax_reluamb[0].legend()
ax_reluamb[1].legend()
fig_reluamb.suptitle("Change in Ambulance Control Input over IBR Iterations")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig3_change_amb_ctrl_iterations.eps'
fig_reluamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
###################################################################3
##################################################################
fig_xfinal, ax_xfinal = plt.subplots(2,1)
fig_xfinal.suptitle("Final Ambulance State Over Iterations")
fig_xfinal.set_size_inches((8,6))
# fig_xfinal.set_figheight(fig_height)
# fig_xfinal.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_xfinal[0].plot(ibr_brounds_array[0], all_xamb[0][0,-1,:], '-o', label=label)
ax_xfinal[1].plot(ibr_brounds_array[0], all_xamb[0][2,-1,:], '-o', label=label)
# ax_reluamb[0].set_xlabel("IBR Iteration")
ax_xfinal[0].set_ylabel("$x_{final}$")
ax_xfinal[0].legend()
ax_xfinal[1].set_xlabel("IBR Iteration")
ax_xfinal[1].set_ylabel(r"$\Theta_{final}$")
ax_xfinal[1].legend()
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig4_iterations_ambperformance.eps'
fig_xfinal.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
################################################################################
###################### NOW PLOTTING THE OTHER VEHICLES #########################
fig_xfinal_all, ax_xfinal_all = plt.subplots(3,1)
fig_xfinal_all.suptitle("Comparing Distance Travel for the Vehicles")
fig_xfinal_all.set_size_inches((8,8))
# fig_xfinal_all.set_figheight(fig_height)
# fig_xfinal_all.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
bar_width = 0.5
inter_car_width = 2*bar_width
width_offset = bar_width*sim_i
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
# print(len(all_ither_x))
# ax_xfinal_all[0].bar(ticks,
# [np.mean([all_x[0, -1, -1] - all_x[0, 0, -1] for all_x in all_xamb])] + [np.mean(all_o_x[i][0,-1,-1] - all_o_x[i][0,0,-1]) for i in range(n_other_cars) for all_o_x in all_other_x],
# bar_width, label=label)
# ax_xfinal_all[0].set_xticks(range(n_other_cars + 1))
# ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[1].bar(ticks,
# [all_xamb[-1, -1, -1] - all_xamb[-1, 0, -1]] + [all_other_x[i][-1,-1,-1] - all_other_x[i][-1,0,-1] for i in range(n_other_cars)],
# bar_width, label=label)
# # ax_xfinal_all[1].set_xticks(range(n_other_cars + 1))
# # ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[2].bar(ticks,
# [np.sum(all_xamb[2,:,-1]*all_xamb[2,:,-1])] + [np.sum(all_other_x[i][2,:,-1]*all_other_x[i][2,:,-1]) for i in range(n_other_cars)],
# bar_width, label=label)
width_offset = bar_width*1
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
ax_xfinal_all[2].legend()
ax_xfinal_all[2].set_xticks(ticks)
ax_xfinal_all[2].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[0].set_ylabel("Horizontal Displacement $\Delta x$")
ax_xfinal_all[0].legend()
ax_xfinal_all[0].set_xticks(ticks)
ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[1].set_ylabel("Total Distance $s_f - s_i$")
ax_xfinal_all[1].legend()
ax_xfinal_all[1].set_xticks(ticks)
ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[2].set_ylabel("Angular Deviation $\sum_{t} \Theta_t^2$")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig5_vehicles_comparison.eps'
fig_xfinal_all.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
#########################Let's Reproduce the Table ####################33
print("Amb X Final Avg. Min. Max. ")
final_metric_ego = [all_x[0,-1,-1] for all_x in all_xamb_ego]
final_metric_pro = [all_x[0,-1,-1] for all_x in all_xamb_pro]
final_metric_altru = [all_x[0,-1,-1] for all_x in all_xamb_altru]
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru)))
final_metric_ego = [t_final[:,-1] for t_final in all_tfinalamb_ego]
final_metric_pro = [t_final[:,-1] for t_final in all_tfinalamb_pro]
final_metric_altru = [t_final[:,-1] for t_final in all_tfinalamb_altru]
# print(all_tfinalamb_ego[0].shape)
# print(final_metric_ego)
# print(final_metric_ego.shape)
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Time To "+str(x_goal)+"m")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego),len(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro),len(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru),len(final_metric_altru)))
print("Veh 1 Final Avg. Min. Max. ")
i = 0
veh_displace_ego = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_ego]
veh_displace_pro = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_pro]
veh_displace_altru = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_altru]
print(" ")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(veh_displace_ego), np.std(veh_displace_ego), np.min(veh_displace_ego), np.max(veh_displace_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f "%(np.mean(veh_displace_pro), np.std(veh_displace_pro), np.min(veh_displace_pro), np.max(veh_displace_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f "%( np.mean(veh_displace_altru), np.std(veh_displace_altru), np.min(veh_displace_altru), np.max(veh_displace_altru)))
if PLOT:
plt.show()
| 39.679803
| 192
| 0.665798
| 3,690
| 24,165
| 3.989431
| 0.11084
| 0.009374
| 0.025678
| 0.012227
| 0.595408
| 0.515114
| 0.458529
| 0.433734
| 0.416752
| 0.38727
| 0
| 0.109347
| 0.165901
| 24,165
| 608
| 193
| 39.745066
| 0.621006
| 0.228595
| 0
| 0.210526
| 0
| 0.002392
| 0.197289
| 0.12214
| 0.002392
| 0
| 0
| 0
| 0
| 1
| 0.002392
| false
| 0.002392
| 0.0311
| 0
| 0.035885
| 0.088517
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0613ddb7599b3120261ade10d3011d5c27649921
| 2,082
|
py
|
Python
|
AI_maker/celule_leucemie.py
|
pamintandrei/Tiroidaptinfoed
|
2671f219de2ef8ecf68ae7a932ed82462365d889
|
[
"MIT"
] | 5
|
2019-06-10T10:42:22.000Z
|
2019-07-10T14:05:13.000Z
|
AI_maker/celule_leucemie.py
|
pamintandrei/Tiroidaptinfoed
|
2671f219de2ef8ecf68ae7a932ed82462365d889
|
[
"MIT"
] | null | null | null |
AI_maker/celule_leucemie.py
|
pamintandrei/Tiroidaptinfoed
|
2671f219de2ef8ecf68ae7a932ed82462365d889
|
[
"MIT"
] | 2
|
2018-08-30T14:36:20.000Z
|
2019-06-17T13:07:18.000Z
|
import numpy as np
from tensorflow.keras.callbacks import TensorBoard
import cv2
import sys
import threading
import keras
from keras.layers import Conv2D,Dense,MaxPooling2D,Flatten,BatchNormalization,Dropout
from IPython.display import display
from PIL import Image
import tensorflow as tf
np.random.seed(1)
with tf.device('/gpu:0'):
keras_data=keras.preprocessing.image.ImageDataGenerator()
path1="D:\\tiroida\\celule\\leucemie_train"
date1 = keras_data.flow_from_directory(path1, target_size = (450, 450),batch_size=32, classes = ["normal","leucemie"], class_mode = "binary")
path2="D:\\tiroida\\celule\\leucemie_test"
date2 = keras_data.flow_from_directory(path2, target_size = (450, 450),batch_size=100, classes = ["normal","leucemie"], class_mode = "binary")
tfmodel=keras.models.Sequential()
tfmodel.add(Conv2D(filters=4,kernel_size=(3,3), padding='same',activation="relu",input_shape=(450,450,3)))
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=16, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Flatten())
tfmodel.add(Dense(16, activation="relu"))
tfmodel.add(Dense(1, activation="sigmoid"))
tfmodel.compile(optimizer='Adam',loss="binary_crossentropy", metrics=["accuracy"])
checkpoint = keras.callbacks.ModelCheckpoint(filepath='leucemie.h5', save_best_only=True,monitor='val_acc')
tfmodel.fit_generator(date1,validation_data=date2,epochs=10,steps_per_epoch=100,validation_steps=1,callbacks=[checkpoint])
model=keras.models.load_model('leucemie.h5')
print(model.evaluate_generator(date2,steps=1))
input()
| 50.780488
| 146
| 0.739193
| 280
| 2,082
| 5.371429
| 0.378571
| 0.086436
| 0.053191
| 0.076463
| 0.384973
| 0.350399
| 0.269282
| 0.269282
| 0.269282
| 0.269282
| 0
| 0.042941
| 0.105187
| 2,082
| 41
| 147
| 50.780488
| 0.764359
| 0
| 0
| 0.216216
| 0
| 0
| 0.108497
| 0.033125
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.27027
| 0
| 0.27027
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06155bb97d79c4a708e108ac4d37d0955dc2bd9c
| 3,002
|
py
|
Python
|
test.py
|
mricaldone/Gramatica
|
a7e2ff933fe875f5b8a95338c2c312f403ba5679
|
[
"MIT"
] | null | null | null |
test.py
|
mricaldone/Gramatica
|
a7e2ff933fe875f5b8a95338c2c312f403ba5679
|
[
"MIT"
] | null | null | null |
test.py
|
mricaldone/Gramatica
|
a7e2ff933fe875f5b8a95338c2c312f403ba5679
|
[
"MIT"
] | null | null | null |
import Gramatica
def testSeparadorDeSilabas(entrada, esperado):
try:
salida = Gramatica.separarEnSilabas(entrada)
except Gramatica.NoHayVocal:
print("[ERROR]","Salida esperada:", "\"" + esperado + "\"", "|", "Salida obtenida:", "Excepcion: No hay vocal")
return
if esperado != salida:
print("[ERROR]","Salida esperada:", "\"" + esperado + "\"", "|", "Salida obtenida:", "\"" + salida + "\"")
else:
print("[OK]","Entrada:", "\"" + entrada + "\"", "|", "Salida:", "\"" + salida + "\"")
testSeparadorDeSilabas("AprEnDer", "A-prEn-Der")
testSeparadorDeSilabas("ÉpiCo", "É-pi-Co")
testSeparadorDeSilabas("PÓDIO", "PÓ-DIO")
testSeparadorDeSilabas("aprender", "a-pren-der")
testSeparadorDeSilabas("tabla", "ta-bla")
testSeparadorDeSilabas("ratón", "ra-tón")
testSeparadorDeSilabas("épico", "é-pi-co")
testSeparadorDeSilabas("brocha", "bro-cha") # grupos consonanticos br, cr, dr, gr, fr, kr, tr, bl, cl, gl, fl, kl, pl son inseparables
testSeparadorDeSilabas("abrazo", "a-bra-zo")
testSeparadorDeSilabas("submarino", "sub-ma-ri-no") # los prefijos pueden o no separarse
testSeparadorDeSilabas("perspicacia", "pers-pi-ca-cia") # 3 consonantes consecutivas, 2 van a la silaba anterior y 1 a la siguiente
testSeparadorDeSilabas("conspirar", "cons-pi-rar")
testSeparadorDeSilabas("obscuro", "obs-cu-ro")
testSeparadorDeSilabas("irreal", "i-rre-al") # no se pueden separar las rr
testSeparadorDeSilabas("acallar", "a-ca-llar") # no se pueden separar las ll
testSeparadorDeSilabas("abstracto", "abs-trac-to") # 4 consonantes consecutivas, 2 van a la silaba anterior y 2 a la siguiente
testSeparadorDeSilabas("rubia", "ru-bia") # los diptongos no se separan
testSeparadorDeSilabas("labio", "la-bio")
testSeparadorDeSilabas("caigo", "cai-go")
testSeparadorDeSilabas("oigo", "oi-go")
testSeparadorDeSilabas("descafeinado", "des-ca-fei-na-do")
testSeparadorDeSilabas("diurno", "diur-no")
testSeparadorDeSilabas("ruido", "rui-do")
testSeparadorDeSilabas("pódio", "pó-dio")
testSeparadorDeSilabas("aplanar", "a-pla-nar")
testSeparadorDeSilabas("ocre", "o-cre")
testSeparadorDeSilabas("archi", "ar-chi")
testSeparadorDeSilabas("leer", "le-er")
testSeparadorDeSilabas("caos", "ca-os")
testSeparadorDeSilabas("baúl", "ba-úl")
testSeparadorDeSilabas("ambiguo", "am-bi-guo")
testSeparadorDeSilabas("antifaz", "an-ti-faz")
testSeparadorDeSilabas("transplantar", "trans-plan-tar")
testSeparadorDeSilabas("substraer", "subs-tra-er")
testSeparadorDeSilabas("abstraer", "abs-tra-er")
testSeparadorDeSilabas("abstracto", "abs-trac-to")
testSeparadorDeSilabas("pingüino", "pin-güi-no")
testSeparadorDeSilabas("vergüenza", "ver-güen-za")
testSeparadorDeSilabas("bilingüe", "bi-lin-güe")
testSeparadorDeSilabas("baúl ocre", "ba-úl o-cre")
testSeparadorDeSilabas("", "")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas("k", "k")
testSeparadorDeSilabas("1", "1")
testSeparadorDeSilabas("abstraer abstracto", "abs-tra-er abs-trac-to")
| 50.033333
| 134
| 0.72052
| 320
| 3,002
| 6.759375
| 0.54375
| 0.005548
| 0.012483
| 0.022191
| 0.264448
| 0.17938
| 0.084142
| 0.041609
| 0.041609
| 0
| 0
| 0.002956
| 0.098601
| 3,002
| 60
| 135
| 50.033333
| 0.796378
| 0.118255
| 0
| 0.087719
| 0
| 0
| 0.324621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.017544
| 0
| 0.052632
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
061561270f389e6138b7861cea448dfbc7f9b7ae
| 1,201
|
py
|
Python
|
web/scripts/minify_json.py
|
albertomh/SqueezeCompass
|
30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f
|
[
"MIT"
] | null | null | null |
web/scripts/minify_json.py
|
albertomh/SqueezeCompass
|
30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f
|
[
"MIT"
] | null | null | null |
web/scripts/minify_json.py
|
albertomh/SqueezeCompass
|
30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f
|
[
"MIT"
] | null | null | null |
#
# Minify JSON data files in the `/dist` directory.
# Script invoked by the npm postbuild script after building the project with `npm run build`.
#
from os import (
path,
listdir,
fsdecode
)
import json
from datetime import datetime
class JSONMinifier:
DIST_CONSTITUENT_DATA_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..', 'dist', 'assets', 'data'))
DIST_SNAPSHOT_DATA_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..', 'dist', 'assets', 'data'))
def minify_json(self, directory):
for file in listdir(directory):
filename = fsdecode(file)
if filename.endswith(".json"):
with open(path.join(directory, filename), "r+") as f:
data = json.loads(f.read())
f.seek(0)
f.write(json.dumps(data, separators=(',', ':')))
f.truncate()
print(f"{datetime.now().strftime('%Y/%m/%d %H:%M:%S')} | Minified {filename}")
if __name__ == '__main__':
minifier = JSONMinifier()
minifier.minify_json(minifier.DIST_CONSTITUENT_DATA_DIRECTORY)
minifier.minify_json(minifier.DIST_SNAPSHOT_DATA_DIRECTORY)
| 34.314286
| 117
| 0.623647
| 141
| 1,201
| 5.092199
| 0.460993
| 0.05571
| 0.052925
| 0.077994
| 0.253482
| 0.169916
| 0.169916
| 0.169916
| 0.169916
| 0.169916
| 0
| 0.001094
| 0.238968
| 1,201
| 34
| 118
| 35.323529
| 0.784464
| 0.11657
| 0
| 0
| 0
| 0.041667
| 0.110795
| 0.032197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.291667
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae044bb52fdc9d56a4ae83f40e90c43b75adb5a4
| 13,751
|
py
|
Python
|
CPU-Name.py
|
acidburn0zzz/CPU-Name
|
2322da712a9ac47f38f22a43bf9bcbc0240e062b
|
[
"MIT"
] | 1
|
2021-11-30T18:35:46.000Z
|
2021-11-30T18:35:46.000Z
|
CPU-Name.py
|
acidburn0zzz/CPU-Name
|
2322da712a9ac47f38f22a43bf9bcbc0240e062b
|
[
"MIT"
] | null | null | null |
CPU-Name.py
|
acidburn0zzz/CPU-Name
|
2322da712a9ac47f38f22a43bf9bcbc0240e062b
|
[
"MIT"
] | null | null | null |
import subprocess
import platform
from Scripts import plist, utils
class CPUName:
def __init__(self, **kwargs):
self.u = utils.Utils("CPU-Name")
self.plist_path = None
self.plist_data = {}
self.clear_empty = True
self.detected = self.detect_cores()
self.cpu_model = self.detect_cpu_model()
def ensure_path(self, plist_data, path_list, final_type = list):
if not path_list: return plist_data
last = plist_data
for index,path in enumerate(path_list):
if not path in last:
if index >= len(path_list)-1:
last[path] = final_type()
else:
last[path] = {}
last = last[path]
return plist_data
def select_plist(self):
while True:
self.u.head("Select Plist")
print("")
print("M. Return To Menu")
print("Q. Quit")
print("")
plist_path = self.u.grab("Please drag and drop your config.plist here: ")
if not len(plist_path): continue
elif plist_path.lower() == "m": return
elif plist_path.lower() == "q": self.u.custom_quit()
path_checked = self.u.check_path(plist_path)
if not path_checked: continue
# Got a valid path here - let's try to load it
try:
with open(path_checked,"rb") as f:
plist_data = plist.load(f)
if not isinstance(plist_data,dict):
raise Exception("Plist root is not a dictionary")
except Exception as e:
self.u.head("Error Loading Plist")
print("\nCould not load {}:\n\n{}\n\n".format(path_checked,repr(e)))
self.u.grab("Press [enter] to return...")
continue
# Got valid plist data - let's store the vars and return
self.plist_path = path_checked
self.plist_data = plist_data
return (path_checked,plist_data)
def get_value(self, plist_data, search="revcpuname"):
boot_args = plist_data.get("NVRAM",{}).get("Add",{}).get("7C436110-AB2A-4BBB-A880-FE41995C9F82",{}).get("boot-args","")
nvram_val = plist_data.get("NVRAM",{}).get("Add",{}).get("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",{}).get(search,"")
boota_val = ""
for arg in boot_args.split():
if not arg.startswith(search+"="): continue
boota_val = arg.split("=")[-1]
break # Only take the first instance
return (boota_val,nvram_val)
def get_cpu_name(self, plist_data):
return self.get_value(plist_data,"revcpuname")
def get_rev_cpu(self, plist_data):
return self.get_value(plist_data,"revcpu")
def get_proc_type(self, plist_data):
return plist_data.get("PlatformInfo",{}).get("Generic",{}).get("ProcessorType",0)
def get_kext(self, plist_data):
kext_list = plist_data.get("Kernel",{}).get("Add",[])
found = enabled = False
for kext in kext_list:
if kext.get("ExecutablePath","").lower() == "contents/macos/restrictevents":
found = True
if kext.get("Enabled"):
enabled = True
break
return (found,enabled)
def get_new_proc_type(self, plist_data):
while True:
p_type = self.get_proc_type(plist_data)
p_label = " (8+ Core)" if p_type == 3841 else " (1, 2, 4, or 6 Core)" if p_type == 1537 else " (Must be 0x0601 or 0x0F01 to work)"
self.u.head("ProcessorType")
print("")
print("Current Processor Type: {}{}".format(self.get_hex(p_type),p_label))
print("")
print("1. Set to 0x0601 for 1, 2, 4, or 6 Core")
print("2. Set to 0x0F01 for 8+ Core")
print("3. Reset to the default 0x00")
print("")
if self.detected != -1:
print("L. Use Local Machine's Value ({:,} Core{} = {})".format(self.detected, "" if self.detected==1 else "s", "0x0601" if self.detected < 8 else "0x0F01"))
print("M. Return To Menu")
print("Q. Quit")
print("")
proc = self.u.grab("Please select an option: ")
if not len(proc): continue
if proc.lower() == "m": return None
elif proc.lower() == "q": self.u.custom_quit()
elif proc == "1": return 1537
elif proc == "2": return 3841
elif proc == "3": return 0
elif self.detected != -1 and proc.lower() == "l": return 1537 if self.detected < 8 else 3841
def detect_cpu_model(self):
try:
_platform = platform.system().lower()
if _platform == "darwin":
return subprocess.check_output(["sysctl", "-n", "machdep.cpu.brand_string"]).decode().strip()
elif _platform == "windows":
return subprocess.check_output(["wmic", "cpu", "get", "Name"]).decode().split("\n")[1].strip()
elif _platform == "linux":
data = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode().split("\n")
for line in data:
if line.startswith("model name"):
return ": ".join([x for x in line.split(": ")[1:]])
except:
pass
return ""
def detect_cores(self):
try:
_platform = platform.system().lower()
if _platform == "darwin":
return int(subprocess.check_output(["sysctl", "-a", "machdep.cpu.core_count"]).decode().split(":")[1].strip())
elif _platform == "windows":
return int(subprocess.check_output(["wmic", "cpu", "get", "NumberOfCores"]).decode().split("\n")[1].strip())
elif _platform == "linux":
data = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode().split("\n")
for line in data:
if line.startswith("cpu cores"):
return int(line.split(":")[1].strip())
except:
pass
return -1
def set_values(self, revcpu, cpuname, proctype, plist_data):
# Clear any prior values and ensure pathing
plist_data = self.clear_values(plist_data)
plist_data = self.ensure_path(plist_data,["NVRAM","Add","4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"],dict)
plist_data = self.ensure_path(plist_data,["PlatformInfo","Generic","ProcessorType"],int)
# Set our new values
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["revcpu"] = revcpu
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["revcpuname"] = cpuname
plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = proctype
return plist_data
def clear_values(self, plist_data):
# Ensure Delete values exist so we can prevent old values from sticking
plist_data = self.ensure_path(plist_data,["NVRAM","Delete","4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"],list)
plist_data = self.ensure_path(plist_data,["NVRAM","Delete","7C436110-AB2A-4BBB-A880-FE41995C9F82"],list)
# Gather our values
boot_args = plist_data["NVRAM"].get("Add",{}).get("7C436110-AB2A-4BBB-A880-FE41995C9F82",{}).get("boot-args","")
nv_a_val = plist_data["NVRAM"].get("Add",{}).get("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",{})
nv_d_val = plist_data["NVRAM"]["Delete"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]
# Walk boot args to see if we use any revcpu* values and remove them
if any(x in boot_args for x in ("revcpu=","revcpuname=")):
boot_args = " ".join([x for x in boot_args.split() if not x.startswith(("revcpu=","revcpuname="))])
plist_data["NVRAM"]["Add"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"]["boot-args"] = boot_args
# Remove them from the NVRAM -> Add section
if any(x in nv_a_val for x in ("revcpu","revcpuname")):
for x in ("revcpu","revcpuname"):
nv_a_val.pop(x,None)
if nv_a_val:
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"] = nv_a_val
elif self.clear_empty:
# Clean out the UUID if empty
plist_data["NVRAM"]["Add"].pop("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",None)
# Ensure they remain in the NVRAM -> Delete section to prevent stuck values
for x in ("revcpu","revcpuname"):
if x in nv_d_val: continue
nv_d_val.append(x)
# Make sure we override boot-args to avoid any stickage too
if not "boot-args" in plist_data["NVRAM"]["Delete"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"]:
plist_data["NVRAM"]["Delete"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"].append("boot-args")
plist_data["NVRAM"]["Delete"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"] = nv_d_val
if plist_data.get("PlatformInfo",{}).get("Generic",{}).get("ProcessorType",0) != 0:
plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = 0
return plist_data
def get_hex(self, value, pad_to=2):
if not isinstance(value,int): return ""
h = hex(value)[2:]
return "0x"+("0"*(len(h)%pad_to))+h.upper()
def get_new_cpu_name(self, plist_data):
while True:
cpu_nam = self.get_cpu_name(plist_data)
self.u.head("New CPU Name")
print("")
print("Current CPU Name: {}".format(cpu_nam[0]+" (boot-arg)" if cpu_nam[0] else cpu_nam[1] if cpu_nam[1] else "Not Set"))
print("")
if self.cpu_model:
print("L. Use Local Machine's Value ({})".format(self.cpu_model))
print("M. Return To Menu")
print("Q. Quit")
print("")
name = self.u.grab("Please enter a new CPU name: ")
if not len(name): continue
elif name.lower() == "m": return
elif name.lower() == "q": self.u.custom_quit()
elif self.cpu_model and name.lower() == "l": return self.cpu_model
return name
def save_plist(self):
try:
with open(self.plist_path,"wb") as f:
plist.dump(self.plist_data,f)
except Exception as e:
self.u.head("Error Saving Plist")
print("\nCould not save {}:\n\n{}\n\n".format(self.plist_path,repr(e)))
self.u.grab("Press [enter] to return...")
return False
return True
def main(self):
while True:
cpu_rev = self.get_rev_cpu(self.plist_data)
cpu_nam = self.get_cpu_name(self.plist_data)
p_type = self.get_proc_type(self.plist_data)
p_label = " (8+ Core)" if p_type == 3841 else " (1, 2, 4, or 6 Core)" if p_type == 1537 else " (Must be 0x0601 or 0x0F01 to work!)"
f,e = self.get_kext(self.plist_data)
k_label = "Not Found (Must be present and Enabled to work!)" if not f else "Disabled (Must be Enabled to work!)" if not e else "Found and Enabled"
self.u.head()
print("")
print("Selected Plist: {}".format(self.plist_path))
print("Rev CPU Name: {}".format("" if not self.plist_path else cpu_nam[0]+" (boot-arg)" if cpu_nam[0] else cpu_nam[1] if cpu_nam[1] else "Not Set"))
print("Rev CPU: {}".format("" if not self.plist_path else cpu_rev[0]+" (boot-arg)" if cpu_rev[0] else cpu_rev[1] if cpu_rev[1] else "Not Set"))
print("Processor Type: {}{}".format("" if not self.plist_path else self.get_hex(p_type),"" if not self.plist_path else p_label))
print("RestrictEvents: {}".format("" if not self.plist_path else k_label))
print("")
print("Note: Changes are saved to the target plist immediately.")
print(" Make sure you keep a backup!")
print("")
print("1. Change CPU Name")
print("2. Change Processor Type")
print("3. Clear CPU Name, Rev CPU, and Processor Type")
print("4. Select Plist")
print("")
print("Q. Quit")
print("")
menu = self.u.grab("Please select an option: ")
if not len(menu): continue
elif menu.lower() == "q": self.u.custom_quit()
if menu in ("1","2","3") and not self.plist_path:
self.select_plist()
if not self.plist_path: continue
p_type = self.get_proc_type(self.plist_data) # Gather new proc type after loading
if menu == "1":
if not p_type in (3841,1537):
new_type = self.get_new_proc_type(self.plist_data)
if new_type is None: continue
p_type = new_type
new_name = self.get_new_cpu_name(self.plist_data)
if new_name is None: continue
self.plist_data = self.set_values(1,new_name,p_type,self.plist_data)
self.save_plist()
elif menu == "2":
new_type = self.get_new_proc_type(self.plist_data)
if new_type is None: continue
self.plist_data = self.ensure_path(self.plist_data,["PlatformInfo","Generic","ProcessorType"],int)
self.plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = new_type
self.save_plist()
elif menu == "3":
self.plist_data = self.clear_values(self.plist_data)
self.save_plist()
elif menu == "4":
self.select_plist()
c = CPUName()
c.main()
| 49.464029
| 172
| 0.563304
| 1,784
| 13,751
| 4.190022
| 0.142377
| 0.084281
| 0.046957
| 0.026756
| 0.504883
| 0.405485
| 0.342074
| 0.296187
| 0.265953
| 0.125753
| 0
| 0.046117
| 0.296706
| 13,751
| 277
| 173
| 49.642599
| 0.726812
| 0.042251
| 0
| 0.270161
| 0
| 0
| 0.201125
| 0.049483
| 0
| 0
| 0.003953
| 0
| 0
| 1
| 0.068548
| false
| 0.008065
| 0.012097
| 0.012097
| 0.169355
| 0.165323
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae046c38a2e79a1620b18d8e95f3afd8af8e8031
| 3,853
|
py
|
Python
|
solvcon/parcel/gasplus/probe.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 16
|
2015-12-09T02:54:42.000Z
|
2021-04-20T11:26:39.000Z
|
solvcon/parcel/gasplus/probe.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 95
|
2015-12-09T00:49:40.000Z
|
2022-02-14T13:34:55.000Z
|
solvcon/parcel/gasplus/probe.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 13
|
2015-05-08T04:16:42.000Z
|
2021-01-15T09:28:06.000Z
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016, Yung-Yu Chen <yyc@solvcon.net>
# BSD 3-Clause License, see COPYING
import os
import numpy as np
import solvcon as sc
class Probe(object):
"""
Represent a point in the mesh.
"""
def __init__(self, *args, **kw):
self.speclst = kw.pop('speclst')
self.name = kw.pop('name', None)
self.crd = np.array(args, dtype='float64')
self.pcl = -1
self.vals = list()
def __str__(self):
crds = ','.join(['%g'%val for val in self.crd])
return 'Pt/%s#%d(%s)%d' % (self.name, self.pcl, crds, len(self.vals))
def locate_cell(self, svr):
icl, ifl, jcl, jfl = svr.alg.locate_point(self.crd)
self.pcl = icl
def __call__(self, svr, time):
ngstcell = svr.ngstcell
vlist = [time]
for spec in self.speclst:
arr = None
if isinstance(spec, str):
arr = svr.der[spec] # FIXME: translate to qty
elif isinstance(spec, int):
if spec >= 0 and spec < svr.neq:
arr = svr.sol.so0n.F[:,spec]
elif spec < 0 and -1-spec < svr.neq:
spec = -1-spec
arr = svr.sol.so0c.F[:,spec]
if arr is None:
raise IndexError('spec %s incorrect'%str(spec))
vlist.append(arr[ngstcell+self.pcl])
self.vals.append(vlist)
class ProbeAnchor(sc.MeshAnchor):
"""
Anchor for probe.
"""
def __init__(self, svr, **kw):
speclst = kw.pop('speclst')
self.points = list()
for data in kw.pop('coords'):
pkw = {'speclst': speclst, 'name': data[0]}
self.points.append(Probe(*data[1:], **pkw))
super(ProbeAnchor, self).__init__(svr, **kw)
def preloop(self):
for point in self.points: point.locate_cell(self.svr)
for point in self.points: point(self.svr, self.svr.time)
def postfull(self):
for point in self.points: point(self.svr, self.svr.time)
class ProbeHook(sc.MeshHook):
"""
Point probe.
"""
def __init__(self, cse, **kw):
self.name = kw.pop('name', 'ppank')
super(ProbeHook, self).__init__(cse, **kw)
self.ankkw = kw
self.points = None
def drop_anchor(self, svr):
ankkw = self.ankkw.copy()
ankkw['name'] = self.name
self._deliver_anchor(svr, ProbeAnchor, ankkw)
def _collect(self):
cse = self.cse
if cse.is_parallel:
dom = cse.solver.domainobj
dealer = cse.solver.dealer
allpoints = list()
for iblk in range(dom.nblk):
dealer[iblk].cmd.pullank(self.name, 'points', with_worker=True)
allpoints.append(dealer[iblk].recv())
npt = len(allpoints[0])
points = [None]*npt
for rpoints in allpoints:
ipt = 0
while ipt < npt:
if points[ipt] == None and rpoints[ipt].pcl >=0:
points[ipt] = rpoints[ipt]
ipt += 1
else:
svr = self.cse.solver.solverobj
points = [pt for pt in svr.runanchors[self.name].points
if pt.pcl >= 0]
self.points = points
def postmarch(self):
psteps = self.psteps
istep = self.cse.execution.step_current
if istep%psteps != 0: return False
self._collect()
return True
def postloop(self):
for point in self.points:
ptfn = '%s_pt_%s_%s.npy' % (
self.cse.io.basefn, self.name, point.name)
ptfn = os.path.join(self.cse.io.basedir, ptfn)
np.save(ptfn, np.array(point.vals, dtype='float64'))
# vim: set ff=unix fenc=utf8 ft=python nobomb et sw=4 ts=4 tw=79:
| 30.101563
| 79
| 0.534908
| 497
| 3,853
| 4.062374
| 0.336016
| 0.031204
| 0.019812
| 0.027737
| 0.10847
| 0.070827
| 0.05894
| 0.042595
| 0.042595
| 0.042595
| 0
| 0.01165
| 0.33169
| 3,853
| 127
| 80
| 30.338583
| 0.772427
| 0.066701
| 0
| 0.022222
| 0
| 0
| 0.033004
| 0
| 0
| 0
| 0
| 0.007874
| 0
| 1
| 0.133333
| false
| 0
| 0.033333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae059eac36d79675fbab914a2bbf4174d3306bb6
| 8,600
|
py
|
Python
|
data/dataset.py
|
1chimaruGin/EfficientDet
|
8adf636db1f7c5c64b65c1e897a0d18f682e6251
|
[
"Apache-2.0"
] | 9
|
2020-09-02T09:53:04.000Z
|
2022-01-16T11:16:57.000Z
|
data/dataset.py
|
1chimaruGin/EfficientDet
|
8adf636db1f7c5c64b65c1e897a0d18f682e6251
|
[
"Apache-2.0"
] | null | null | null |
data/dataset.py
|
1chimaruGin/EfficientDet
|
8adf636db1f7c5c64b65c1e897a0d18f682e6251
|
[
"Apache-2.0"
] | 1
|
2021-06-15T15:55:46.000Z
|
2021-06-15T15:55:46.000Z
|
""" COCO dataset (quick and dirty)
Hacked together by Ross Wightman
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import os
import cv2
import random
import torch
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
class CocoDetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
"""
def __init__(self, root, ann_file, transform=None):
super(CocoDetection, self).__init__()
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.transform = transform
self.yxyx = True # expected for TF model, most PT are xyxy
self.include_masks = False
self.include_bboxes_ignore = False
self.has_annotations = 'image_info' not in ann_file
self.coco = None
self.cat_ids = []
self.cat_to_label = dict()
self.img_ids = []
self.img_ids_invalid = []
self.img_infos = []
self._load_annotations(ann_file)
def _load_annotations(self, ann_file):
assert self.coco is None
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
img_ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for img_id in sorted(self.coco.imgs.keys()):
info = self.coco.loadImgs([img_id])[0]
valid_annotation = not self.has_annotations or img_id in img_ids_with_ann
if valid_annotation and min(info['width'], info['height']) >= 32:
self.img_ids.append(img_id)
self.img_infos.append(info)
else:
self.img_ids_invalid.append(img_id)
def _parse_img_ann(self, img_id, img_info):
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
bboxes = []
bboxes_ignore = []
cls = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if self.include_masks and ann['area'] <= 0:
continue
if w < 1 or h < 1:
continue
# To subtract 1 or not, TF doesn't appear to do this so will keep it out for now.
if self.yxyx:
#bbox = [y1, x1, y1 + h - 1, x1 + w - 1]
bbox = [y1, x1, y1 + h, x1 + w]
else:
#bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
if self.include_bboxes_ignore:
bboxes_ignore.append(bbox)
else:
bboxes.append(bbox)
cls.append(self.cat_to_label[ann['category_id']] if self.cat_to_label else ann['category_id'])
if bboxes:
bboxes = np.array(bboxes, dtype=np.float32)
cls = np.array(cls, dtype=np.int64)
else:
bboxes = np.zeros((0, 4), dtype=np.float32)
cls = np.array([], dtype=np.int64)
if self.include_bboxes_ignore:
if bboxes_ignore:
bboxes_ignore = np.array(bboxes_ignore, dtype=np.float32)
else:
bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(img_id=img_id, bbox=bboxes, cls=cls, img_size=(img_info['width'], img_info['height']))
if self.include_bboxes_ignore:
ann['bbox_ignore'] = bboxes_ignore
return ann
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, annotations (target)).
"""
img_id = self.img_ids[index]
img_info = self.img_infos[index]
if self.has_annotations:
ann = self._parse_img_ann(img_id, img_info)
else:
ann = dict(img_id=img_id, img_size=(img_info['width'], img_info['height']))
path = img_info['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img, ann = self.transform(img, ann)
return img, ann
def __len__(self):
return len(self.img_ids)
class Custom_Dataset(data.Dataset):
def __init__(self, root, data, image_ids, transform=None, test=False):
self.root = root
self.data = data
self.image_ids = image_ids
self.transform = transform
self.test = test
def _load_data(self, index):
image_id = self.image_ids[index]
image = cv2.imread(f'{self.root}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
record = self.data[self.data['image_id'] == image_id]
boxes = record[['x', 'y', 'w', 'h']].values
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
return image, boxes
def _load_cutmix_data(self, index, imgsize=1024):
w, h = imgsize, imgsize
s = imgsize // 2
xc, yc = [int(random.uniform(imgsize * .25, imgsize * .75)) for _ in range(2)]
indexes = [index] + [random.randint(0, self.image_ids.shape[0] - 1) for _ in range(3)]
result_image = np.full((imgsize, imgsize, 3), 1, dtype=np.float32)
result_boxes = []
for i, index in enumerate(indexes):
image, boxes = self._load_data(index)
if i == 0:
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
result_image[y1a:y2a, x1a:x2a] = image[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
boxes[:, 0] += padw
boxes[:, 1] += padh
boxes[:, 2] += padw
boxes[:, 3] += padh
result_boxes.append(boxes)
result_boxes = np.concatenate(result_boxes, 0)
np.clip(result_boxes[:, 0:], 0, 2 * s, out=result_boxes[:, 0:])
result_boxes = result_boxes.astype(np.int32)
result_boxes = result_boxes[np.where((result_boxes[:, 2] - result_boxes[:, 0]) * (result_boxes[:, 3] - result_boxes[:, 1]) > 0)]
return result_image, result_boxes
def __getitem__(self, index: int):
image_id = self.image_ids[index]
if self.test or random.random() > 0.35:
image, boxes = self._load_data(index)
elif random.random() > 0.5:
image, boxes = self._load_cutmix_data(index)
else:
image, boxes = self._load_cutmix_data(index)
labels = torch.ones((boxes.shape[0]), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor(index)
if self.transform:
for i in range(10):
sample = self.transform(**{
'image': image,
'bboxes': target['boxes'],
'labels': labels
})
if len(sample['bboxes']) > 0:
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
target['boxes'][:, [0, 1, 2, 3]] = target['boxes'][:, [1, 0, 3, 2]]
break
return image, target, image_id
def __len__(self) -> int:
return self.image_ids.shape[0]
| 37.391304
| 136
| 0.546395
| 1,136
| 8,600
| 3.964789
| 0.206866
| 0.036634
| 0.013321
| 0.020426
| 0.160524
| 0.108348
| 0.062167
| 0.031528
| 0.008881
| 0
| 0
| 0.034287
| 0.325116
| 8,600
| 230
| 137
| 37.391304
| 0.74173
| 0.087209
| 0
| 0.132184
| 0
| 0
| 0.029385
| 0.003351
| 0
| 0
| 0
| 0
| 0.005747
| 1
| 0.057471
| false
| 0
| 0.063218
| 0.011494
| 0.172414
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae0b04625ca9a862eb715fd13d3b553a6fb19211
| 12,715
|
py
|
Python
|
test/abstract_lut_test.py
|
sgtm/ColorPipe-tools
|
971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T13:35:20.000Z
|
2021-06-21T13:35:20.000Z
|
test/abstract_lut_test.py
|
sgtm/ColorPipe-tools
|
971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b
|
[
"BSD-3-Clause"
] | null | null | null |
test/abstract_lut_test.py
|
sgtm/ColorPipe-tools
|
971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b
|
[
"BSD-3-Clause"
] | null | null | null |
""" Testing Abstract LUT model
"""
import unittest
import os
import shutil
import tempfile
from PyOpenColorIO.Constants import INTERP_LINEAR, INTERP_TETRAHEDRAL
from utils import lut_presets as presets
from utils.lut_presets import PresetException, OUT_BITDEPTH
import utils.abstract_lut_helper as alh
from utils.colorspaces import REC709, SGAMUTSLOG, ALEXALOGCV3
from utils.csp_helper import CSP_HELPER
from utils.cube_helper import CUBE_HELPER
from utils.threedl_helper import THREEDL_HELPER, SHAPER, MESH
from utils.spi_helper import SPI_HELPER
from utils.ascii_helper import ASCII_HELPER, AsciiHelperException
from utils.clcc_helper import CLCC_HELPER
from utils.json_helper import JSON_HELPER
from utils.ocio_helper import create_ocio_processor
from utils.lut_utils import get_input_range
DISPLAY = False
class AbstractLUTTest(unittest.TestCase):
""" Test export of different type of LUTs
"""
def setUp(self):
test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
self.tmp_dir = os.path.join(tempfile.gettempdir(), 'testCoPipe')
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
# create OCIO processor
lut1d = os.path.join(test_dir, 'CineonToLin_1D.csp')
lut3d = os.path.join(test_dir, 'saturation.3dl')
self.processor_1d = create_ocio_processor(lut1d,
interpolation=INTERP_LINEAR)
self.processor_3d = create_ocio_processor(lut3d,
interpolation=INTERP_TETRAHEDRAL)
self.helpers_1d_to_test = [
(CUBE_HELPER, '.cube'),
[SPI_HELPER, '.spi1d'],
(CSP_HELPER, '.csp'),
]
self.helpers_3d_to_test = [
(CUBE_HELPER, '.cube', True),
[SPI_HELPER, '.spi3d', True],
(CSP_HELPER, '.csp', True),
(THREEDL_HELPER, '.3dl', True),
(CLCC_HELPER, '.cc', False),
(JSON_HELPER, '.json', False)
]
def test_default_1d_lut(self):
""" Test a default 1d LUT export
"""
outlutfiles = []
for helper, ext in self.helpers_1d_to_test:
outlutfile = os.path.join(self.tmp_dir, "default_1D" + ext)
args_1d = helper.get_default_preset()
helper.write_1d_lut(self.processor_1d.applyRGB, outlutfile,
args_1d)
# create a processor and try it
proc = create_ocio_processor(outlutfile,
interpolation=INTERP_LINEAR)
proc.applyRGB([0, 0, 0])
proc.applyRGB([1, 1, 1])
outlutfiles.append(outlutfile)
if DISPLAY:
import plot_that_lut
plot_that_lut.plot_that_lut(outlutfiles)
def test_default_3d_lut(self):
""" Test a default 3d LUT export
"""
for helper, ext, ocio_compatible in self.helpers_3d_to_test:
outlutfile = os.path.join(self.tmp_dir, "default_3D" + ext)
args_3d = helper.get_default_preset()
helper.write_3d_lut(self.processor_3d.applyRGB,
outlutfile,
args_3d)
if ocio_compatible:
# create a processor and try it
proc = create_ocio_processor(outlutfile,
interpolation=INTERP_LINEAR)
proc.applyRGB([0, 0, 0])
proc.applyRGB([1, 1, 1])
if DISPLAY:
import plot_that_lut
plot_that_lut.plot_that_lut(outlutfile)
def test_check_attributes(self):
""" Test preset check function
"""
outlutfile = os.path.join(self.tmp_dir, "test.cube")
default_preset = presets.get_default_preset()
CUBE_HELPER.check_preset(default_preset)
# test missing attr
cust_preset = {}
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
for attr in presets.BASIC_ATTRS:
cust_preset[attr] = default_preset[attr]
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
## test specific attr
# change type to 1D
cust_preset[presets.TYPE] = '1D'
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.OUT_BITDEPTH] = 12
CUBE_HELPER.check_preset(cust_preset)
# try to write a 3D LUT with a 1D preset
self.assertRaises(alh.AbstractLUTException,
CUBE_HELPER.write_3d_lut,
self.processor_1d,
outlutfile,
cust_preset)
# change type to 2D
cust_preset[presets.TYPE] = '3D'
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.CUBE_SIZE] = 17
CUBE_HELPER.check_preset(cust_preset)
# try to write a 1D LUT with a 3D preset
self.assertRaises(alh.AbstractLUTException,
CUBE_HELPER.write_1d_lut,
self.processor_1d,
outlutfile,
cust_preset)
# # test value type
# cube size
cust_preset[presets.CUBE_SIZE] = presets.CUBE_SIZE_MAX_VALUE + 1
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.CUBE_SIZE] = default_preset[presets.CUBE_SIZE]
# range
tests = 'test', ['a', 'a'], [0.0, 0.5, 1.0], 0.1
for test in tests:
cust_preset[presets.IN_RANGE] = test
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset,
cust_preset)
cust_preset[presets.IN_RANGE] = 0.1, 1
CUBE_HELPER.check_preset(cust_preset)
cust_preset[presets.IN_RANGE] = (0.1, 1)
CUBE_HELPER.check_preset(cust_preset)
def test_float_luts(self):
""" Test float LUT transparency
"""
helpers_float_to_test = [(CSP_HELPER, '.csp'),
(SPI_HELPER, '.spi1d')]
colorspace_to_test = [REC709, SGAMUTSLOG, ALEXALOGCV3]
delta = 0.00001
for helper, ext in helpers_float_to_test:
for colorspace in colorspace_to_test:
# define file name
name = colorspace.__class__.__name__
encode_filename = "linTo{0}_1D{1}".format(name, ext)
decode_filename = "{0}ToLin_1D{1}".format(name, ext)
encode_filepath = os.path.join(self.tmp_dir, encode_filename)
decode_filepath = os.path.join(self.tmp_dir, decode_filename)
# set preset
args_1d = CSP_HELPER.get_default_preset()
args_1d[presets.OUT_BITDEPTH] = 16
decode_min = colorspace.decode_gradation(0)
decode_max = colorspace.decode_gradation(1)
args_1d[presets.IN_RANGE] = get_input_range(colorspace,
"encode",
10)
# write encode LUT
helper.write_2d_lut(colorspace.encode_gradation,
encode_filepath,
args_1d)
# write decode LUT
args_1d[presets.IN_RANGE] = get_input_range(colorspace,
"decode",
10)
helper.write_2d_lut(colorspace.decode_gradation,
decode_filepath,
args_1d)
# test transparency
proc = create_ocio_processor(encode_filepath,
postlutfile=decode_filepath,
interpolation=INTERP_LINEAR)
test_values = [[decode_min] * 3,
[decode_max] * 3,
[0] * 3,
[0.5] * 3,
[1] * 3]
for rgb in test_values:
res = proc.applyRGB(rgb)
abs_value = abs(rgb[0] - res[0])
self.assertTrue(abs_value < delta,
"{0} transparency test failed : {1:8f} >"
" acceptable delta ({2:8f})".format(name,
abs_value,
delta)
)
def test_3dl_preset(self):
""" Test 3dl preset
"""
preset = presets.get_default_preset()
# test type must be 3D
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[presets.TYPE] = '3D'
# test shaper attr exists
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[SHAPER] = True
# test mesh attr exists
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[MESH] = True
# test preset is ok
THREEDL_HELPER.check_preset(preset)
# test ranges are int
outlutfile = os.path.join(self.tmp_dir, "test.3dl")
self.assertRaises(PresetException,
THREEDL_HELPER.write_3d_lut,
self.processor_3d.applyRGB,
outlutfile,
preset)
def test_ascii_lut(self):
""" Test ascii 1D / 2D export
"""
colorspace = REC709
# 2D LUT
outlutfile = os.path.join(self.tmp_dir, "default_2D.lut")
preset = ASCII_HELPER.get_default_preset()
ASCII_HELPER.write_2d_lut(colorspace.decode_gradation,
outlutfile,
preset)
# 1D LUT
outlutfile = os.path.join(self.tmp_dir, "default_1D.lut")
preset = ASCII_HELPER.get_default_preset()
ASCII_HELPER.write_1d_lut(colorspace.decode_gradation,
outlutfile,
preset)
# test out bit depth inadequate with output range
preset[OUT_BITDEPTH] = 12
self.assertRaises(AsciiHelperException, ASCII_HELPER.write_1d_lut,
colorspace.decode_gradation, outlutfile, preset)
def test_complete_attributes(self):
""" Test preset complete function
"""
colorspace = REC709
outlutfile = os.path.join(self.tmp_dir, "default_ascii_1D.lut")
default_preset = ASCII_HELPER.get_default_preset()
cust_preset = {}
cust_preset = ASCII_HELPER.complete_preset(cust_preset)
expression = set(default_preset).issubset(set(cust_preset))
self.assertTrue(expression,
("Something went wrong in preset completion :\n"
"Completed preset:\n{0}\nDefault one:\n{1}"
).format(cust_preset, default_preset))
ASCII_HELPER.check_preset(cust_preset)
# try to write a float ascii lut without forcing float mode
cust_preset[presets.IN_RANGE] = [0, 1.0]
self.assertRaises(PresetException, ASCII_HELPER.write_1d_lut,
colorspace.decode_gradation,
outlutfile,
cust_preset)
# force float mode
cust_preset[presets.IS_FLOAT] = True
ASCII_HELPER.write_1d_lut(colorspace.decode_gradation,
outlutfile,
cust_preset)
def tearDown(self):
# Remove test directory
shutil.rmtree(self.tmp_dir)
if __name__ == '__main__':
unittest.main()
| 42.811448
| 83
| 0.533464
| 1,273
| 12,715
| 5.05813
| 0.146897
| 0.05125
| 0.047212
| 0.035875
| 0.462649
| 0.397733
| 0.382979
| 0.342289
| 0.307346
| 0.259823
| 0
| 0.020316
| 0.392214
| 12,715
| 296
| 84
| 42.956081
| 0.812888
| 0.073614
| 0
| 0.320175
| 0
| 0
| 0.034209
| 0.001796
| 0
| 0
| 0
| 0
| 0.070175
| 1
| 0.039474
| false
| 0
| 0.087719
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|