hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c988a3bbfa24fe5c3273607b2e3a5909c559524
| 2,241
|
py
|
Python
|
controlimcap/models/flatattn.py
|
SikandarBakht/asg2cap
|
d8a6360eaccdb8c3add5f9c4f6fd72764e47e762
|
[
"MIT"
] | 169
|
2020-03-15T08:41:39.000Z
|
2022-03-30T09:36:17.000Z
|
controlimcap/models/flatattn.py
|
wtr850/asg2cap
|
97a1d866d4a2b86c1f474bb168518f97eb2f8b96
|
[
"MIT"
] | 25
|
2020-05-23T15:14:00.000Z
|
2022-03-10T06:20:31.000Z
|
controlimcap/models/flatattn.py
|
wtr850/asg2cap
|
97a1d866d4a2b86c1f474bb168518f97eb2f8b96
|
[
"MIT"
] | 25
|
2020-04-02T10:08:01.000Z
|
2021-12-09T12:10:10.000Z
|
import torch
import torch.nn as nn
import framework.configbase
import caption.encoders.vanilla
import caption.decoders.attention
import caption.models.attention
import controlimcap.encoders.flat
from caption.models.attention import MPENCODER, ATTNENCODER, DECODER
class NodeBUTDAttnModel(caption.models.attention.BUTDAttnModel):
def forward_encoder(self, input_batch):
attn_embeds = self.submods[ATTNENCODER](input_batch['attn_fts'])
graph_embeds = torch.sum(attn_embeds * input_batch['attn_masks'].unsqueeze(2), 1)
graph_embeds = graph_embeds / torch.sum(input_batch['attn_masks'], 1, keepdim=True)
enc_states = self.submods[MPENCODER](
torch.cat([input_batch['mp_fts'], graph_embeds], 1))
return {'init_states': enc_states, 'attn_fts': attn_embeds}
class NodeRoleBUTDAttnModelConfig(caption.models.attention.AttnModelConfig):
def __init__(self):
super().__init__()
self.subcfgs[ATTNENCODER] = controlimcap.encoders.flat.EncoderConfig()
class NodeRoleBUTDAttnModel(caption.models.attention.BUTDAttnModel):
def build_submods(self):
submods = {}
submods[MPENCODER] = caption.encoders.vanilla.Encoder(self.config.subcfgs[MPENCODER])
submods[ATTNENCODER] = controlimcap.encoders.flat.Encoder(self.config.subcfgs[ATTNENCODER])
submods[DECODER] = caption.decoders.attention.BUTDAttnDecoder(self.config.subcfgs[DECODER])
return submods
def prepare_input_batch(self, batch_data, is_train=False):
outs = super().prepare_input_batch(batch_data, is_train=is_train)
outs['node_types'] = torch.LongTensor(batch_data['node_types']).to(self.device)
outs['attr_order_idxs'] = torch.LongTensor(batch_data['attr_order_idxs']).to(self.device)
return outs
def forward_encoder(self, input_batch):
attn_embeds = self.submods[ATTNENCODER](input_batch['attn_fts'],
input_batch['node_types'], input_batch['attr_order_idxs'])
graph_embeds = torch.sum(attn_embeds * input_batch['attn_masks'].unsqueeze(2), 1)
graph_embeds = graph_embeds / torch.sum(input_batch['attn_masks'], 1, keepdim=True)
enc_states = self.submods[MPENCODER](
torch.cat([input_batch['mp_fts'], graph_embeds], 1))
return {'init_states': enc_states, 'attn_fts': attn_embeds}
| 43.096154
| 95
| 0.764391
| 287
| 2,241
| 5.714286
| 0.236934
| 0.085366
| 0.068293
| 0.046341
| 0.417683
| 0.373171
| 0.373171
| 0.373171
| 0.373171
| 0.373171
| 0
| 0.00402
| 0.112004
| 2,241
| 51
| 96
| 43.941176
| 0.820101
| 0
| 0
| 0.3
| 0
| 0
| 0.080804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c99a84bfa19cff4eef1b2a7eb8aeb82d35b63a6
| 5,169
|
py
|
Python
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 10
|
2015-11-19T12:39:50.000Z
|
2021-02-21T20:15:29.000Z
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 109
|
2015-06-15T05:03:33.000Z
|
2018-01-14T10:18:48.000Z
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 8
|
2015-07-29T04:18:27.000Z
|
2018-11-02T17:15:40.000Z
|
"""
Console
-------
A module containing functions for interacting with a Windows
console.
"""
from six import integer_types
from pywincffi.core import dist
from pywincffi.core.checks import NON_ZERO, NoneType, input_check, error_check
from pywincffi.exceptions import WindowsAPIError
from pywincffi.wintypes import HANDLE, SECURITY_ATTRIBUTES, wintype_to_cdata
def SetConsoleTextAttribute(hConsoleOutput, wAttributes):
"""
Sets the attributes of characters written to a console buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:param int wAttributes:
The character attribute(s) to set.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
input_check("wAttributes", wAttributes, integer_types)
ffi, library = dist.load()
# raise Exception(type(wAttributes))
# info = ffi.new("PCHAR_INFO")
code = library.SetConsoleTextAttribute(
wintype_to_cdata(hConsoleOutput),
ffi.cast("ATOM", wAttributes)
)
error_check("SetConsoleTextAttribute", code=code, expected=NON_ZERO)
def GetConsoleScreenBufferInfo(hConsoleOutput):
"""
Retrieves information about the specified console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:returns:
Returns a ffi data structure with attributes corresponding to
the fields on the ``PCONSOLE_SCREEN_BUFFER_INFO`` struct.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
ffi, library = dist.load()
info = ffi.new("PCONSOLE_SCREEN_BUFFER_INFO")
code = library.GetConsoleScreenBufferInfo(
wintype_to_cdata(hConsoleOutput), info)
error_check("GetConsoleScreenBufferInfo", code, expected=NON_ZERO)
return info
def CreateConsoleScreenBuffer(
dwDesiredAccess, dwShareMode, lpSecurityAttributes=None, dwFlags=None):
"""
Creates a console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/createconsolescreenbuffer
:type dwDesiredAccess: int or None
:param dwDesiredAccess:
The access to the console screen buffer. If `None` is provided
then the Windows APIs will use a default security descriptor.
:type dwShareMode: int or None
:param dwShareMode:
Controls the options for sharing the resulting handle. If `None` or
0 then the resulting buffer cannot be shared.
:keyword pywincffi.wintypes.SECURITY_ATTRIBUTES lpSecurityAttributes:
Extra security attributes that determine if the resulting handle
can be inherited. If `None` is provided, which is the default, then
the handle cannot be inherited.
:keyword int dwFlags:
The type of console buffer to create. The flag is superficial because
it only accepts None or ``CONSOLE_TEXTMODE_BUFFER`` as inputs. If no
value is provided, which is the default, then
``CONSOLE_TEXTMODE_BUFFER`` is automatically used.
:rtype: :class:`pywincffi.wintypes.HANDLE``
:returns:
Returns the handle created by the underlying C function.
:func:`pywincffi.kernel32.CloseHandle` should be called on the handle
when you are done with it.
"""
ffi, library = dist.load()
if dwDesiredAccess is None:
dwDesiredAccess = ffi.NULL
if dwShareMode is None:
dwShareMode = 0
if dwFlags is None:
dwFlags = library.CONSOLE_TEXTMODE_BUFFER
input_check(
"dwDesiredAccess", dwDesiredAccess, allowed_values=(
ffi.NULL,
library.GENERIC_READ,
library.GENERIC_WRITE,
library.GENERIC_READ | library.GENERIC_WRITE
))
input_check(
"dwShareMode", dwShareMode, allowed_values=(
0,
library.FILE_SHARE_READ,
library.FILE_SHARE_WRITE,
library.FILE_SHARE_READ | library.FILE_SHARE_WRITE,
))
input_check(
"dwFlags", dwFlags,
allowed_values=(
library.CONSOLE_TEXTMODE_BUFFER,
))
input_check(
"lpSecurityAttributes", lpSecurityAttributes,
allowed_types=(NoneType, SECURITY_ATTRIBUTES))
if lpSecurityAttributes is None:
lpSecurityAttributes = ffi.NULL
handle = library.CreateConsoleScreenBuffer(
ffi.cast("DWORD", dwDesiredAccess),
ffi.cast("DWORD", dwShareMode),
lpSecurityAttributes,
ffi.cast("DWORD", dwFlags),
ffi.NULL # _reserved_
)
if handle == library.INVALID_HANDLE_VALUE: # pragma: no cover
raise WindowsAPIError(
"CreateConsoleScreenBuffer", "Invalid Handle",
library.INVALID_HANDLE_VALUE,
expected_return_code="not INVALID_HANDLE_VALUE")
return HANDLE(handle)
| 32.923567
| 83
| 0.691623
| 562
| 5,169
| 6.240214
| 0.291815
| 0.022812
| 0.027089
| 0.01882
| 0.250927
| 0.201312
| 0.15854
| 0.140861
| 0.117479
| 0.117479
| 0
| 0.001259
| 0.231573
| 5,169
| 156
| 84
| 33.134615
| 0.881672
| 0.433933
| 0
| 0.176471
| 0
| 0
| 0.092115
| 0.037214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.073529
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c9a8ed33f779646dc17846360f63018c12812e8
| 2,568
|
py
|
Python
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 1
|
2021-03-23T15:32:39.000Z
|
2021-03-23T15:32:39.000Z
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 7
|
2021-02-05T00:52:08.000Z
|
2022-03-01T21:37:43.000Z
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 2
|
2021-02-24T20:29:39.000Z
|
2021-03-23T15:32:44.000Z
|
"""
extracts the climate observation data from the xlsx spreadsheet to a csv file
so that ens weather scripts can consume it.
Looks in the folder os.environ["ENS_CLIMATE_OBS"]
determines the relationship between the xlsx source and the csv destinations
deleteds any csv's and regenerates them by exporting the ALL_DATa sheet
from the corresponding xlsx file
"""
import csv
import glob
import logging.config
import openpyxl
import os
import pandas as pd
import config.logging_config
logging.config.dictConfig(config.logging_config.LOGGING_CONFIG)
LOGGER = logging.getLogger(__name__)
excelFileDir = os.environ["ENS_CLIMATE_OBS"]
excelFileGlobPattern = "ClimateDataOBS_*.xlsx"
csvFileNamePattern = "climate_obs_{year}.csv"
sheetName = 'ALL_DATA'
def convertCsvXlrd(excelFile, sheetName, csvFile):
# print(f"sheetname: {sheetName}") read_only=True
wb = openpyxl.load_workbook(excelFile, data_only=True, keep_vba=True, read_only=True)
sh = wb[sheetName]
if sh.calculate_dimension() == "A1:A1":
sh.reset_dimensions()
with open(csvFile, "w", newline="") as f:
c = csv.writer(f)
cnt = 0
for r in sh.iter_rows(): # generator; was sh.rows
c.writerow([cell.value for cell in r])
#print(cnt)
cnt += 1
def convertCsvPandas(excelFile, csvFileFullPath):
"""
Doesn't work for some reason
"""
data_xls = pd.read_excel(excelFile, sheet_name="ALL_DATA")
data_xls.to_csv(csvFileFullPath, encoding="utf-8", index=False, header=True)
if __name__ == "__main__":
globDir = os.path.join(excelFileDir, excelFileGlobPattern)
LOGGER.debug(f"glob pattern: {excelFileGlobPattern}")
excelClimateObservationFiles = glob.glob(globDir)
for excelFile in excelClimateObservationFiles:
LOGGER.info(f"input excelFile: {excelFile}")
# extract the year from the filename
excelFileBasename = os.path.basename(excelFile)
year = os.path.splitext(excelFileBasename)[0].split("_")[1]
LOGGER.debug(f"year from excel file parse: {year}")
csvFileName = csvFileNamePattern.format(year=year)
LOGGER.info(f"output csv file: {csvFileName}")
csvFileFullPath = os.path.join(excelFileDir, csvFileName)
if os.path.exists(csvFileFullPath):
LOGGER.info(f"deleting the csv file: {csvFileFullPath}")
os.remove(csvFileFullPath)
LOGGER.info(f"dumping the sheet: {sheetName} from the file {excelFile} to {csvFileFullPath}")
convertCsvXlrd(excelFile, sheetName, csvFileFullPath)
| 35.178082
| 101
| 0.712227
| 320
| 2,568
| 5.59375
| 0.428125
| 0.036313
| 0.042458
| 0.021229
| 0.060335
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003352
| 0.186916
| 2,568
| 72
| 102
| 35.666667
| 0.853927
| 0.195093
| 0
| 0
| 0
| 0
| 0.166258
| 0.031878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.159091
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c9d412ce7e720587944a183ef63dc8c3a37cb1a
| 2,437
|
py
|
Python
|
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | 1
|
2020-12-30T19:39:56.000Z
|
2020-12-30T19:39:56.000Z
|
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | null | null | null |
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | null | null | null |
import pandas as pd
class Session:
def __init__(self, students_df, df_session_chat, meta_data):
self._first_message_time = df_session_chat["time"].sort_values().iloc[0]
self._relevant_chat = self.get_participants_in_session(students_df, df_session_chat, meta_data)
@ staticmethod
def get_participants_in_session(df_students, df_chat, meta_data):
"""
finds students that attendant to the session. runs over each mode which represent different way to declare that
the student attendant (for example: phone number, ID). merges this data to the csv table with the zoom name that
added it
:param df_chat: that table of the chat for the specific session
:return: df of the attendance in the session
"""
final_df = None
for mode in meta_data.filter_modes:
merged_df = pd.merge(df_students, df_chat.reset_index(), left_on=mode, right_on="message", how="left")
final_df = pd.concat([merged_df, final_df])
final_df.sort_values(by="time", inplace=True)
df_participated = final_df.groupby("zoom_name").first().reset_index()
df_participated["index"] = df_participated["index"].astype(int)
df_participated = df_participated.loc[:, ["id", "zoom_name", "time", "message", "index"]].set_index("index")
filt = df_chat['zoom_name'].str.contains('|'.join(meta_data.zoom_names_to_ignore))
df_relevant_chat = pd.merge(df_chat[~filt], df_participated, how="left")
df_relevant_chat["relevant"] = df_relevant_chat["id"].apply(lambda x: 1 if x == x else 0)
df_relevant_chat["id"] = df_relevant_chat["id"].apply(lambda x: int(x) if x == x else -1)
return df_relevant_chat
def zoom_names_table(self, session_id):
zoom_df = self._relevant_chat.loc[:, ["zoom_name", "id"]].rename(columns={"zoom_name": "name", "id": "student_id"})
zoom_df['session_id'] = pd.Series([session_id] * zoom_df.shape[0])
return zoom_df.sort_values(by="student_id", ascending=False).groupby("name").first().reset_index()
def chat_table(self, zoom_df):
relevant_chat = self._relevant_chat.drop(columns=["id"])
chat_session_table = pd.merge(relevant_chat, zoom_df, left_on="zoom_name", right_on="name")
return chat_session_table.drop(columns=["zoom_name", "name", "session_id", "student_id"]).rename(columns={"id": "zoom_names_id"})
| 55.386364
| 137
| 0.684858
| 356
| 2,437
| 4.379213
| 0.283708
| 0.08467
| 0.062861
| 0.030789
| 0.07569
| 0.07569
| 0.07569
| 0
| 0
| 0
| 0
| 0.002515
| 0.184243
| 2,437
| 44
| 137
| 55.386364
| 0.78169
| 0.140336
| 0
| 0
| 0
| 0
| 0.108333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.035714
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3c9f1d64c05ce80fd3ad121b31d428afa01b9e36
| 4,538
|
py
|
Python
|
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | 2
|
2021-04-26T14:41:11.000Z
|
2021-08-20T09:13:03.000Z
|
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | null | null | null |
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | null | null | null |
import imageio # type: ignore
import logging
import numpy # type: ignore
import os
import pathlib
import torch
import torchvision # type: ignore
import utils
from torch import Tensor
from typing import Callable
def load(path: str, encoding: str = 'RGB') -> Tensor:
'''
Loads the image at the given path using the supplied encoding.
Args:
path: Path to the image.
encoding: Encoding of the image.
Returns:
Tensor [R, C, X] representing the normalized pixel values in the image.
'''
assert path, "Path cannot be empty or set to None."
array = imageio.imread(path)
device = utils.get_device_name()
image = torchvision.transforms.ToTensor()(array).to(device).permute(1, 2, 0)[:, :, :3]
if encoding == 'sRGB':
image = convert_sRGB_to_RGB(image)
elif encoding == 'Greyscale':
image = convert_RGB_to_greyscale(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
logging.debug('Loaded image from "%s"', path)
return image
def save(path: str, image: Tensor, encoding: str = 'RGB') -> None:
'''
Saves the given image to the specified path using the supplied encoding.
Args:
path: Path to the image.
image: Tensor [R, C, X] of normalized pixel values in the image.
encoding: Encoding of the image.
'''
assert path, "Path cannot be empty or set to None."
assert torch.all(0 <= image) and torch.all(image <= 1), "Image values must fall in the closed range [0, 1]."
if encoding == 'sRGB':
image = convert_RGB_to_sRGB(image)
elif encoding == 'Greyscale':
image = convert_greyscale_to_RGB(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
pathlib.Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
imageio.imwrite(path, torch.floor(255 * image).detach().cpu().numpy().astype(numpy.uint8))
logging.debug('Saved image to "%s"', path)
def clamp(function: Callable[[Tensor], Tensor]) -> Callable:
'''
Decorator which clamps an image destined for the given function to the range [ϵ, 1]. Note that ϵ is used in favour
of 0 to enable differentiation through fractional exponents.
Args:
function: Function that accepts an image Tensor as input.
Returns:
Wrapper which implements the aforementioned behaviour.
'''
return lambda image: function(image.clamp(1E-8, 1))
@clamp
def convert_sRGB_to_RGB(image: Tensor) -> Tensor:
'''
Converts an sRGB image into a linear RGB image.
Args:
image: Tensor [R, C, 3] of an sRGB image.
Returns:
Tensor [R, C, 3] of a linear RGB image.
'''
assert len(image.shape) >= 3 and image.size(-1) == 3, 'sRGB image must have dimensionality [*, R, C, 3].'
below = (image <= 0.04045) * image / 12.92
above = (image > 0.04045) * ((image + 0.055) / 1.055)**2.4
return below + above
@clamp
def convert_RGB_to_sRGB(image: Tensor) -> Tensor:
'''
Converts a linear RGB image into an sRGB image.
Args:
image: Tensor [R, C, 3] of a linear RGB image.
Returns:
Tensor [R, C, 3] of an sRGB image.
'''
assert len(image.shape) >= 3 and image.size(-1) == 3, 'RGB image must have dimensionality [*, R, C, 3].'
below = (image <= 0.0031308) * image * 12.92
above = (image > 0.0031308) * (1.055 * image**(1 / 2.4) - 0.055)
return below + above
def convert_RGB_to_greyscale(image: Tensor) -> Tensor:
'''
Converts a linear RGB image into a greyscale image.
Args:
image: Tensor [R, C, 3] of an RGB image.
Returns:
Tensor [R, C, 1] of a greyscale image.
'''
assert len(image.shape) == 3 and (image.size(2) == 1 or image.size(2) == 3), 'RGB image must have dimensionality [R, C, 1] or [R, C, 3].'
if image.size(2) == 3:
assert torch.all((image[:, :, 0] == image[:, :, 1]) & (image[:, :, 0] == image[:, :, 2])), 'RGB image must have the same value across each colour channel.'
return image[:, :, [0]]
return image
def convert_greyscale_to_RGB(image: Tensor) -> Tensor:
'''
Converts a greyscale image into a linear RGB image.
Args:
image: Tensor [R, C, 1] of a greyscale image.
Returns:
Tensor [R, C, 3] of a linear RGB image.
'''
assert len(image.shape) == 3 and image.size(2) == 1, 'Greyscale image must have dimensionality [R, C, 1].'
return image.expand(-1, -1, 3)
| 32.884058
| 163
| 0.628691
| 654
| 4,538
| 4.321101
| 0.227829
| 0.010616
| 0.028309
| 0.037155
| 0.521585
| 0.458599
| 0.374735
| 0.342887
| 0.305732
| 0.254069
| 0
| 0.032324
| 0.243279
| 4,538
| 137
| 164
| 33.124088
| 0.790623
| 0.313134
| 0
| 0.275862
| 0
| 0
| 0.194185
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.12069
| false
| 0
| 0.172414
| 0
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1af4bb14846eb251b39a1c7a18e1ee46ffce810
| 12,611
|
py
|
Python
|
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from vertex import Vertex
from heap import PriorityQueue
class NodeGraph():
'''
The NodeGraph conception comes from computer science textbooks work on graphs
in the mathematical sense―a set of vertices with edges connecting them.
It contrasts with GridGraph, which looks like a tiled game map
'''
pass
class LNodeGraph(NodeGraph): #save as linked list
def __init__(self, vertices=None, positions=None, weights=None, heuristic=None): #edges
self.vertices = vertices
self.positions = positions
self.weights = weights
self.heuristic = heuristic
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices=None):
self._vertices = {}
if isinstance(vertices, list):
for chain in vertices:
head = Vertex(chain[0])
head.weight = 0
for sub_vertex in chain[-1:0:-1]:
head.insert(sub_vertex, weight=1)
self._vertices[chain[0]] = head
@property
def weights(self): #weight saved in sub/copied vertex, return with edges
return self.edges
@weights.setter
def weights(self, weights):
if isinstance(weights, dict):
for from_u, head in self.vertices.items():
to_v = head.succ
while to_v:
edge = (from_u, to_v.name)
if edge in weights:
to_v.weight = weights[edge]
else:
to_v.weight = 0
to_v = to_v.succ
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, positions):
self._positions = positions
@property
def heuristic(self):
_heuristic = {}
for name, ver in self.vertices.items():
_heuristic[name] = ver.heur
return _heuristic
@heuristic.setter
def heuristic(self, heuristic):
if isinstance(heuristic, dict):
for name, ver in self.vertices.items():
if name in heuristic:
ver.heur = heuristic[name]
else:
ver.heur = float('inf')
@property
def edges(self):
if not hasattr(self, "_edges"):
self._edges = {}
for from_u, chain in self.vertices.items():
to_v = chain.succ
while to_v:
self._edges[(from_u, to_v.name)] = to_v.weight
to_v = to_v.succ
return self._edges
def check_edge(self, from_u, to_v):
if from_u not in self.vertices or to_v not in self.vertices:
return False
succ = self.vertices[from_u].succ
while succ:
if succ.name == to_v:
return True
succ = succ.suc
return False
def BFS_reset_vertices(self):
for v in self.vertices.values():
v.reset()
v.dist = float("inf")
def BFS(self, s):
if not s in self.vertices:
return False
self.BFS_reset_vertices()
self.vertices[s].visited = 1
self.vertices[s].dist = 0
self.vertices[s].weight = 0
queue = []
queue.append(s)
while queue:
from_u = queue.pop(0)
succ_ver = self.vertices[from_u].succ
while succ_ver:
to_v = succ_ver.name
if self.vertices[to_v].visited == 0:
self.vertices[to_v].visited = 1
self.vertices[to_v].prec = from_u #or self.vertices[from_u].dist
self.vertices[to_v].dist = self.vertices[from_u].dist + succ_ver.weight
self.vertices[to_v].dist = self.vertices[from_u].dist + 1
queue.append(to_v)
succ_ver = succ_ver.succ
self.vertices[from_u].visited = 2
return True
def DFS_reset_vertices(self):
for v in self.vertices.values():
v.reset()
v.dist = float("inf")
def DFS_trackback(self, from_u):
self._steps += 1
self.vertices[from_u].entry = self._steps
self.vertices[from_u].visited = 1
succ_v = self.vertices[from_u].succ
while succ_v:
to_v = succ_v.name
if self.vertices[to_v].visited == 0:
self.vertices[to_v].prec = from_u
self.DFS_trackback(succ_v.name)
succ_v = succ_v.succ
self._steps += 1
self.vertices[from_u].back = self._steps
self.vertices[from_u].visited = 2
def DFS(self):
self.DFS_reset_vertices()
self._steps = 0
for from_u in self.vertices.keys():
if self.vertices[from_u].visited == 0:
self.DFS_trackback(from_u)
def Dijkstra_reset_vertices(self):
for vertex in self.vertices.values():
vertex.dist = float('inf')
vertex.prec = None
# vertex.visited = 0 # not used
def Dijkstra(self, start):
self.Dijkstra_reset_vertices()
self.vertices[start].dist = 0
#全量加入,逐步加入均可,此处采用全量加入, 增量加入即 OPEN、CLOSE、UNUSED情况,减少节点数
priQueue = PriorityQueue(list(self.vertices.values()), sortby='dist')
while priQueue:
from_u = priQueue.dequeue()
to_v = from_u.succ
while to_v:
new_dist = from_u.dist + to_v.weight
if new_dist < self.vertices[to_v.name].dist:
self.vertices[to_v.name].dist = new_dist
self.vertices[to_v.name].prec = from_u.name
to_v = to_v.succ
def AStar_reset_vertex(self):
for vertex in self.vertices.values():
vertex.dist = float('inf')
vertex.prec = None
# vertex.visited = 0 #not used
def AStar(self, start, goal):
self.AStar_reset_vertex()
self.vertices[start].dist = 0
preQueue = PriorityQueue([self.vertices[start]], sortby=['dist', 'heur']) #按 dist+heur 进行排序
# preQueue is on behalf of OPEN
while preQueue:
from_u = preQueue.dequeue() #dist+heur 值最小的进行选择
if from_u.name == goal:
return self.AStar_reconstruct_path(start, goal) #把路径翻转重建
else:
to_v = from_u.succ
while to_v:
tentative_dist = from_u.dist + to_v.weight
to_v_name = to_v.name
if tentative_dist < self.vertices[to_v_name].dist:
self.vertices[to_v_name].dist = tentative_dist
self.vertices[to_v_name].prec = from_u.name
if not to_v in preQueue:
preQueue.enqueue(self.vertices[to_v_name]) #重复访问的问题(先出,后进)当heuristic/启发函数的设置满足一致性条件时,每个节点最多访问一次, 会不会陷入死循环呢?
to_v = to_v.succ
return False #未找到目标
def AStar_reconstruct_path(self, start, goal):
path = [goal]
prec_u = self.vertices[goal].prec
while prec_u:
path.append(prec_u)
if prec_u == start:
break
prec_u = self.vertices[prec_u].prec
path = path[-1::-1]
return path
@property
def fig(self):
if not hasattr(self, "_fig"):
self._fig = plt.gcf()
self._fig.set_figheight(6)
self._fig.set_figwidth(12)
self._fig.gca().axis("off")
return self._fig
def draw_init(self):
return self.fig
def draw_vertices(self, heuristic=False, color='blue'):
pos_array = np.array(list(self.positions.values()))
plt.scatter(pos_array[:, 0], pos_array[:, 1],
s=1000, c=color, marker='o', alpha=0.9)
for name, pos in self.positions.items():
plt.annotate(name, (pos[0]-0.009, pos[1]-0.015),
fontsize=20, color='white', multialignment='center')
if heuristic:
plt.annotate("h="+str(self.vertices[name].heur), (pos[0]-0.02, pos[1]+0.09),
fontsize=15, color='black', backgroundcolor='white')
def draw_edges(self, weight=False, color='blue'):
for edge in self.edges.keys():
from_u = self.positions[edge[0]]
to_v = self.positions[edge[1]]
plt.plot([from_u[0], to_v[0]], [from_u[1], to_v[1]],
color=color, linewidth=2, alpha=0.9)
# edges' lables
if weight:
center = [(from_u[0] + to_v[0])/2-0.009, (from_u[1] + to_v[1])/2-0.015]
plt.annotate(self.edges[edge], center,
fontsize=15, color='black', backgroundcolor='white')
def draw_graph(self, node=True, edge=True, node_head=True, edge_label=True):
self.draw_vertices()
self.draw_edges()
def draw_tree(self, color='black'):
for to_v, head in self.vertices.items():
if head.prec:
from_u = self.positions[head.prec]
to_v = self.positions[to_v]
dx = from_u[0] - to_v[0]
dy = from_u[1] - to_v[1]
plt.arrow(to_v[0], to_v[1], dx, dy, length_includes_head=True,
head_width=0.03, head_length=0.03, shape='full', color=color)
def draw_BFS_tree(self, color='red'):
self.draw_tree(color=color)
def draw_DFS_forest(self, color='green'):
self.draw_tree(color=color)
def draw_Dijkstra_tree(self, color='magenta'): #'cyan' 'magenta'
self.draw_tree(color=color)
def draw_A_star_path(self, start, goal, color='cyan'):
self.draw_tree(color='magenta') #
to_v = goal
while to_v:
from_u = self.vertices[to_v].prec
if from_u:
to_pos = self.positions[to_v]
from_pos = self.positions[from_u]
dx = from_pos[0] - to_pos[0]
dy = from_pos[1] - to_pos[1]
plt.arrow(to_pos[0], to_pos[1], dx, dy, length_includes_head=True,
head_width=0.03, head_length=0.03, shape='full', color=color)
if from_u == start:
break
to_v = from_u
def show(self):
plt.show()
def save(self, name='graph.jpg'):
plt.savefig(name)
class MNodeGraph(NodeGraph): #save as matrix
def __init__(self):
pass
if __name__ == "__main__":
vertices = [['S', 'A', 'B', 'C'],
['A', 'S', 'D', 'E'],
['B', 'S', 'E', 'F'],
['C', 'S', 'K'],
['D', 'A', 'G'],
['E', 'A', 'B', 'G'],
['F', 'B', 'K', 'G'],
['K', 'C', 'F', 'G'],
['G', 'D', 'E', 'F', 'K']]
positions = {"S":[0.05, 0.5], #0
"A":[0.3, 0.8], #1
"B":[0.3, 0.5], #2
"C":[0.3, 0.2], #3
"D":[0.6, 0.95], #4
"E":[0.6, 0.65], #5
"F":[0.6, 0.4], #6
"K":[0.8, 0.2], #7
"G":[0.99, 0.5],} #8
weights = { ('S', 'A'): 9, ('S', 'B'): 6, ('S', 'C'): 8, ('A', 'S'): 9, ('B', 'S'): 6, ('C', 'S'): 8,
('A', 'D'): 7, ('A', 'E'): 9, ('D', 'A'): 7, ('E', 'A'): 9,
('B', 'E'): 8, ('B', 'F'): 8, ('E', 'B'): 8, ('F', 'B'): 8,
('C', 'K'): 20, ('K', 'C'): 20,
('D', 'G'): 16, ('G', 'D'): 16,
('E', 'G'): 13, ('G', 'E'): 13,
('F', 'G'): 13, ('F', 'K'): 5, ('G', 'F'): 13, ('K', 'F'): 5,
('K', 'G'): 6, ('G', 'K'): 6 }
heuristic = { "S": 20, #0
"A": 15, #1
"B": 17, #2
"C": 15, #3
"D": 11, #4
"E": 12, #5
"F": 10, #6
"K": 5, #7
"G": 0,} #8
lgraph = LNodeGraph(vertices, positions, weights, heuristic)
lgraph.BFS('S')
lgraph.draw_init()
lgraph.draw_vertices(heuristic=True)
lgraph.draw_edges(weight=True)
# lgraph.draw_BFS_tree()
# lgraph.DFS()
# lgraph.draw_DFS_forest()
# lgraph.Dijkstra('S')
# lgraph.draw_Dijkstra_tree()
lgraph.AStar('S', 'G')
lgraph.draw_A_star_path('S', 'G')
lgraph.show()
| 35.624294
| 135
| 0.505273
| 1,622
| 12,611
| 3.770037
| 0.145499
| 0.030908
| 0.034342
| 0.036795
| 0.311365
| 0.251513
| 0.22502
| 0.15045
| 0.128536
| 0.128536
| 0
| 0.027197
| 0.358576
| 12,611
| 353
| 136
| 35.725212
| 0.728644
| 0.062089
| 0
| 0.20678
| 0
| 0
| 0.021087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115254
| false
| 0.00678
| 0.013559
| 0.013559
| 0.189831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1b2da34505536ccd8a8d170d37deaec68c901e7
| 1,534
|
py
|
Python
|
Y2018/Day09.py
|
dnsdhrj/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | 7
|
2020-11-28T10:29:45.000Z
|
2022-02-03T07:37:54.000Z
|
Y2018/Day09.py
|
sonowz/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | null | null | null |
Y2018/Day09.py
|
sonowz/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | null | null | null |
import re
class Doubly:
def __init__(self, value, prev=None, next=None):
self.value = value
self.prev = prev or self
self.next = next or self
def move(self, n):
curr = self
for i in range(abs(n)):
if n < 0:
curr = curr.prev
else:
curr = curr.next
return curr
def insert(self, v):
prev = self.prev
new_node = Doubly(v, prev, self)
prev.next = new_node
self.prev = new_node
return new_node
# Make sure 'del' this too
def delete(self):
self.prev.next = self.next
self.next.prev = self.prev
return self.value, self.next
def put_marble(t, c):
return c.move(2).insert(t)
def put_marble_23(n_player, t, c, s):
player = t % n_player
p1 = t
(p2, nc) = c.move(-7).delete()
del c
s[player] += p1 + p2
return nc, s
def game(n_player, max_turn):
c = Doubly(0)
s = [0 for i in range(n_player + 1)]
for t in range(1, max_turn + 1):
if t % 23 != 0:
c = put_marble(t, c)
else:
(c, s) = put_marble_23(n_player, t, c, s)
return s
def solve1(n_player, turn):
return max(game(n_player, turn))
def solve2(n_player, turn):
return max(game(n_player, turn * 100))
with open('09.txt') as f:
line = f.read()
[n_player, turn] = [int(x) for x in re.search(r'(\d+)[^\d]*(\d+).*$', line).groups()]
print(solve1(n_player, turn))
print(solve2(n_player, turn))
| 23.96875
| 89
| 0.541069
| 243
| 1,534
| 3.300412
| 0.279835
| 0.104738
| 0.09601
| 0.027431
| 0.139651
| 0.139651
| 0.139651
| 0.139651
| 0.087282
| 0
| 0
| 0.026975
| 0.323338
| 1,534
| 63
| 90
| 24.349206
| 0.745665
| 0.015645
| 0
| 0.039216
| 0
| 0
| 0.016578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.019608
| 0.058824
| 0.372549
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1b3c90a89f10cc3abca5ea3c241070e29f4d3b5
| 628
|
py
|
Python
|
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
from fipeapi import CARRO, CAMINHAO, MOTO, consulta_preco_veiculo, pega_anos_modelo, pega_modelos
from time import sleep
def consulta_preco(marca="HONDA"):
modelo = pega_modelos(tipo_veiculo=CAMINHAO, marca=marca)[0]['modelo']
print(f"\nAnos do Modelo {modelo} da {marca}:")
sleep(2)
anos = pega_anos_modelo(marca=marca, modelo=modelo, tipo_veiculo=CAMINHAO)[0]
preco = consulta_preco_veiculo(tipo_veiculo=CAMINHAO, marca=marca, modelo=modelo,
ano_do_modelo=anos['ano'], combustivel=anos['combustivel'])
print(preco)
if __name__ == '__main__':
consulta_preco()
| 36.941176
| 97
| 0.705414
| 81
| 628
| 5.160494
| 0.382716
| 0.124402
| 0.136364
| 0.114833
| 0.138756
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005803
| 0.176752
| 628
| 16
| 98
| 39.25
| 0.802708
| 0
| 0
| 0
| 0
| 0
| 0.111465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1b47b065e5504e7082a3670697994dcf84ff418
| 853
|
py
|
Python
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 71
|
2016-12-25T12:06:07.000Z
|
2021-02-21T21:14:48.000Z
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 7
|
2016-12-23T23:18:45.000Z
|
2021-06-10T18:58:14.000Z
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 30
|
2017-01-01T16:18:19.000Z
|
2021-04-21T15:06:47.000Z
|
from django.core.management.base import BaseCommand, CommandError
from channels import Channel, Group, channel_layers
import json
from builtins import str
class Command(BaseCommand):
help = 'Send text announcement on notifications channel (events view)'
def add_arguments(self, parser):
parser.add_argument(
'-m',
'--message',
dest='message',
required=True,
help='announcement message text',
metavar = "MESSAGE"
)
def handle(self, *args, **options):
Group("announcement").send({
"text": json.dumps({'announce':True,
'message': options['message']
})
})
self.stdout.write('announcement done\n')
| 27.516129
| 74
| 0.52755
| 75
| 853
| 5.96
| 0.613333
| 0.035794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.37163
| 853
| 31
| 75
| 27.516129
| 0.833955
| 0
| 0
| 0.090909
| 0
| 0
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1b9101a00a5671a8a714dcff7906632b6da9851
| 849
|
py
|
Python
|
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
from typing import List
from django.template.defaultfilters import slugify
from jcms.models.single_menu_item import SingleMenuItem
class GenericMenuItem:
"""
Generic menu item that can be seen in the left bar in the cms
"""
def __init__(self, title: str, single_menu_items: List[SingleMenuItem], slug: str = False):
"""
:param slug: The slug the single menu items will have in front of them
:type slug: str
:param title: Display name for the MenuItem
:type title: str
:param single_menu_items: SingleMenuItems that are shown as children
:type single_menu_items: List[SingleMenuItem]
"""
if slug:
self.slug = slug
else:
self.slug = slugify(title)
self.title = title
self.single_menu_items = single_menu_items
| 30.321429
| 95
| 0.657244
| 110
| 849
| 4.927273
| 0.472727
| 0.129151
| 0.166052
| 0.070111
| 0.121771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277974
| 849
| 27
| 96
| 31.444444
| 0.884176
| 0.381625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1ba9b4717e2cdd9d9bb6e7e1745006030876674
| 9,572
|
py
|
Python
|
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | 1
|
2021-12-03T08:56:33.000Z
|
2021-12-03T08:56:33.000Z
|
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | null | null | null |
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | null | null | null |
# Hysteresis class to model battery charging / discharge hysteresis
# Copyright (C) 2022 Dave Gutz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# See http://www.fsf.org/licensing/licenses/lgpl.txt for full license text.
__author__ = 'Dave Gutz <davegutz@alum.mit.edu>'
__version__ = '$Revision: 1.1 $'
__date__ = '$Date: 2022/01/08 13:15:02 $'
import numpy as np
from pyDAGx.lookup_table import LookupTable
class Hysteresis():
# Use variable resistor to create hysteresis from an RC circuit
def __init__(self, t_dv=None, t_soc=None, t_r=None, cap=3.6e6, scale=1.):
# Defaults
if t_dv is None:
t_dv = [-0.09, -0.07,-0.05, -0.03, 0.000, 0.03, 0.05, 0.07, 0.09]
if t_soc is None:
t_soc = [0, .5, 1]
if t_r is None:
t_r = [1e-7, 0.0064, 0.0050, 0.0036, 0.0015, 0.0024, 0.0030, 0.0046, 1e-7,
1e-7, 1e-7, 0.0050, 0.0036, 0.0015, 0.0024, 0.0030, 1e-7, 1e-7,
1e-7, 1e-7, 1e-7, 0.0036, 0.0015, 0.0024, 1e-7, 1e-7, 1e-7]
for i in range(len(t_dv)):
t_dv[i] *= scale
t_r[i] *= scale
self.lut = LookupTable()
self.lut.addAxis('x', t_dv)
self.lut.addAxis('y', t_soc)
self.lut.setValueTable(t_r)
self.cap = cap / scale # maintain time constant = R*C
self.res = 0.
self.soc = 0.
self.ib = 0.
self.ioc = 0.
self.voc_stat = 0.
self.voc = 0.
self.dv_hys = 0.
self.dv_dot = 0.
self.saved = Saved()
def __str__(self, prefix=''):
s = prefix + "Hysteresis:\n"
res = self.look_hys(dv=0., soc=0.8)
s += " res(median) = {:6.4f} // Null resistance, Ohms\n".format(res)
s += " cap = {:10.1f} // Capacitance, Farads\n".format(self.cap)
s += " tau = {:10.1f} // Null time constant, sec\n".format(res*self.cap)
s += " ib = {:7.3f} // Current in, A\n".format(self.ib)
s += " ioc = {:7.3f} // Current out, A\n".format(self.ioc)
s += " voc_stat = {:7.3f} // Battery model voltage input, V\n".format(self.voc_stat)
s += " voc = {:7.3f} // Discharge voltage output, V\n".format(self.voc)
s += " soc = {:7.3f} // State of charge input, dimensionless\n".format(self.soc)
s += " res = {:7.3f} // Variable resistance value, ohms\n".format(self.res)
s += " dv_dot = {:7.3f} // Calculated voltage rate, V/s\n".format(self.dv_dot)
s += " dv_hys = {:7.3f} // Delta voltage state, V\n".format(self.dv_hys)
return s
def calculate_hys(self, ib, voc_stat, soc):
self.ib = ib
self.voc_stat = voc_stat
self.soc = soc
self.res = self.look_hys(self.dv_hys, self.soc)
self.ioc = self.dv_hys / self.res
self.dv_dot = -self.dv_hys / self.res / self.cap + self.ib / self.cap
def init(self, dv_init):
self.dv_hys = dv_init
def look_hys(self, dv, soc):
self.res = self.lut.lookup(x=dv, y=soc)
return self.res
def save(self, time):
self.saved.time.append(time)
self.saved.soc.append(self.soc)
self.saved.res.append(self.res)
self.saved.dv_hys.append(self.dv_hys)
self.saved.dv_dot.append(self.dv_dot)
self.saved.ib.append(self.ib)
self.saved.ioc.append(self.ioc)
self.saved.voc_stat.append(self.voc_stat)
self.saved.voc.append(self.voc)
def update(self, dt):
self.dv_hys += self.dv_dot * dt
self.voc = self.voc_stat + self.dv_hys
return self.voc
class Saved:
# For plot savings. A better way is 'Saver' class in pyfilter helpers and requires making a __dict__
def __init__(self):
self.time = []
self.dv_hys = []
self.dv_dot = []
self.res = []
self.soc = []
self.ib = []
self.ioc = []
self.voc = []
self.voc_stat = []
if __name__ == '__main__':
import sys
import doctest
from datetime import datetime
from unite_pictures import unite_pictures_into_pdf
import os
doctest.testmod(sys.modules['__main__'])
import matplotlib.pyplot as plt
def overall(hys=Hysteresis().saved, filename='', fig_files=None, plot_title=None, n_fig=None, ref=None):
if fig_files is None:
fig_files = []
if ref is None:
ref = []
plt.figure()
n_fig += 1
plt.subplot(221)
plt.title(plot_title)
plt.plot(hys.time, hys.soc, color='red', label='soc')
plt.legend(loc=3)
plt.subplot(222)
plt.plot(hys.time, hys.res, color='black', label='res, Ohm')
plt.legend(loc=3)
plt.subplot(223)
plt.plot(hys.time, hys.ib, color='blue', label='ib, A')
plt.plot(hys.time, hys.ioc, color='green', label='ioc, A')
plt.legend(loc=2)
plt.subplot(224)
plt.plot(hys.time, hys.dv_hys, color='red', label='dv_hys, V')
plt.legend(loc=2)
fig_file_name = filename + "_" + str(n_fig) + ".png"
fig_files.append(fig_file_name)
plt.savefig(fig_file_name, format="png")
plt.figure()
n_fig += 1
plt.subplot(111)
plt.title(plot_title)
plt.plot(hys.soc, hys.voc, color='red', label='voc vs soc')
plt.legend(loc=2)
fig_file_name = filename + "_" + str(n_fig) + ".png"
fig_files.append(fig_file_name)
plt.savefig(fig_file_name, format="png")
return n_fig, fig_files
class Pulsar:
def __init__(self):
self.time_last_hold = 0.
self.time_last_rest = -100000.
self.holding = False
self.resting = True
self.index = -1
self.amp = [100., 0., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
100., 100., 100., 100., 100., 100., 100., 100., 100., 100.]
self.dur = [16000., 0., 600., 600., 600., 600., 600., 600., 600., 600., 600., 600.,
600., 600., 600., 600., 600., 600., 600., 600., 600., 600.]
self.rst = [600., 7200., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 7200.,
3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 46800.]
self.pulse_value = self.amp[0]
self.end_time = self.time_end()
def calculate(self, time):
if self.resting and time >= self.time_last_rest + self.rst[self.index]:
if time < self.end_time:
self.index += 1
self.resting = False
self.holding = True
self.time_last_hold = time
self.pulse_value = self.amp[self.index]
elif self.holding and time >= self.time_last_hold + self.dur[self.index]:
self.index += 0 # only advance after resting
self.resting = True
self.holding = False
self.time_last_rest = time
self.pulse_value = 0.
return self.pulse_value
def time_end(self):
time = 0
for du in self.dur:
time += du
for rs in self.rst:
time += rs
return time
def main():
# Setup to run the transients
dt = 10
# time_end = 2
# time_end = 500000
pull = Pulsar()
time_end = pull.time_end()
hys = Hysteresis()
# Executive tasks
t = np.arange(0, time_end + dt, dt)
soc = 0.2
current_in_s = []
# time loop
for i in range(len(t)):
if t[i] < 10000:
current_in = 0
elif t[i] < 20000:
current_in = 40
elif t[i] < 30000:
current_in = -40
elif t[i] < 80000:
current_in = 8
elif t[i] < 130000:
current_in = -8
elif t[i] < 330000:
current_in = 2
elif t[i] < 440000:
current_in = -2
else:
current_in = 0
current_in = pull.calculate(t[i])
init_ekf = (t[i] <= 1)
if init_ekf:
hys.init(0.0)
# Models
soc = min(max(soc + current_in / 100. * dt / 20000., 0.), 1.)
voc_stat = 13. + (soc - 0.5)
hys.calculate_hys(ib=current_in, voc_stat=voc_stat, soc=soc)
hys.update(dt=dt)
# Plot stuff
current_in_s.append(current_in)
hys.save(t[i])
# Data
print('hys: ', str(hys))
# Plots
n_fig = 0
fig_files = []
date_time = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
filename = sys.argv[0].split('/')[-1]
plot_title = filename + ' ' + date_time
n_fig, fig_files = overall(hys.saved, filename, fig_files, plot_title=plot_title, n_fig=n_fig, ref=current_in_s)
plt.show()
main()
| 35.191176
| 120
| 0.531446
| 1,355
| 9,572
| 3.612546
| 0.219926
| 0.023289
| 0.033095
| 0.041675
| 0.231869
| 0.178958
| 0.11665
| 0.093769
| 0.090705
| 0.090705
| 0
| 0.081977
| 0.332219
| 9,572
| 271
| 121
| 35.321033
| 0.683824
| 0.099561
| 0
| 0.130435
| 0
| 0
| 0.094296
| 0.002678
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062802
| false
| 0
| 0.038647
| 0
| 0.144928
| 0.004831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1bc9799f169be42f1deb800510f1f294b2fb871
| 3,822
|
py
|
Python
|
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
"""@file
This file is responsible for extracting website from google search results and formatting them for later use.
"""
import json
from urllib.parse import urlparse
import nltk
import os
tc = 0
cp = 0
def find_website(raw_data):
"""
Uses several rule based techniques to find candidate websites for a company
:param raw_data:
:return: list of candidate websites
"""
if raw_data["context"] != []:
print(raw_data["context"])
website = set()
removed_tokens = ["ltd", "ltd.", "co", "co.", "limited", "services", "private", "govt", "government", "industries"
,"incorporation", "public", "pvt", "and", "&"]
c_name = [tok for tok in raw_data["query"].lower().strip().split() if tok not in removed_tokens]
for ele in raw_data["top_urls"]:
try:
domain = urlparse(ele["url"]).netloc
if "official" in ele["description"] and "website" in ele["description"]:
website.add(domain)
else:
abbreviation = "".join([tok[0] for tok in c_name])
webname = domain.split(".")
if len(webname) < 2:
continue
elif len(webname) == 2:
webname = webname[0]
else:
if webname[1] == "co":
webname = webname[0]
else:
webname = webname[1]
if nltk.edit_distance(webname, abbreviation) <= 2:
website.add(domain)
elif any((tok in domain) and (len(tok) > 4) for tok in c_name):
website.add(domain)
except Exception as e:
print(str(e), ele)
if len(website) > 0:
global tc, cp
cp += 1
tc += len(website)
# if len(website) > 1:
# print(c_name, website)
return list(website)
def get_websites(raw):
"""
get all candidate websites for all search results in raw
:param raw: google search results
:return: dict with company name and candidate websites
"""
count = 0
data = {}
for key,val in raw.items():
data[key] = {
"Company": val["query"],
"website": find_website(val)
}
count += 1
print(f"\rProgress: {count}", end="")
return data
def reformat(data, links):
"""
Reformat data to better suit the global data paradigm
:param data: unformatted data
:param links: the exhaustive linkslist used
:return: the formatted data
"""
rev_map = {}
for ele in links["data"]:
rev_map[ele[1].lower().strip()] = ele[0]
new_data = {}
for key, val in data.items():
cin = rev_map[val["Company"].lower().strip()]
new_data[cin] = val["website"]
print(len(new_data))
return new_data
def get_all_websites(dir_path):
"""
Get all websites for all files in a directory
:param dir_path: path to directory
:return: dict of unformatted comany names and candidate websites
"""
data = {}
for file_name in os.listdir(dir_path):
if file_name.endswith(".json") and file_name != "final_data.json":
file = open(dir_path + file_name)
raw = json.load(file)
file.close()
websites = get_websites(raw)
for key, val in websites.items():
data[key] = val
return data
if __name__ == "__main__":
data = get_all_websites("../../data/google.com/")
print("\n", cp, tc)
file = open("../../data/zaubacorp.com/linkslist.json")
links = json.load(file)
file.close()
data = reformat(data, links)
file = open("../../data/google.com/final_data.json", "w+")
json.dump(data, file, indent=4)
file.close()
| 27.695652
| 118
| 0.554422
| 471
| 3,822
| 4.399151
| 0.305732
| 0.02027
| 0.011583
| 0.015927
| 0.047297
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00728
| 0.317111
| 3,822
| 137
| 119
| 27.89781
| 0.78659
| 0.193878
| 0
| 0.180723
| 0
| 0
| 0.114363
| 0.032963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.048193
| 0
| 0.144578
| 0.060241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1c12120eb1970800352a4b0dd3d40166babaf18
| 2,354
|
py
|
Python
|
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
from rest_framework.serializers import ModelSerializer
from .models import Degree, Job, Skill, DataOrigin, Address, Qualification, User
class DegreeSerializer(ModelSerializer):
class Meta:
model = Degree
fields = ('id', 'name', 'description')
class AddressSerializer(ModelSerializer):
class Meta:
model = Address
fields = ('id', 'zip_code', 'country', 'state', 'city', 'street', 'street_number')
class QualificationSerializer(ModelSerializer):
class Meta:
model = Qualification
fields = ('id', 'name', 'description', 'degree_id')
class SkillRelationSerializer(ModelSerializer):
class Meta:
model = Skill
fields = ('id', 'name', 'color')
class DataOriginSerializer(ModelSerializer):
class Meta:
model = DataOrigin
fields = ('id', 'name', 'url')
class DataOriginRelationSerializer(ModelSerializer):
class Meta:
model = DataOrigin
fields = ('id', 'name')
class JobSerializer(ModelSerializer):
skills = SkillRelationSerializer(many=True, read_only=True)
origin_id = DataOriginRelationSerializer(read_only=True)
class Meta:
model = Job
fields = (
'id', 'original_id', 'url', 'number', 'title', 'state', 'created_at', 'closed_at', 'description',
'location', 'origin_id', 'skills'
)
class SkillSerializer(ModelSerializer):
class Meta:
model = Skill
fields = ('id', 'original_id', 'url', 'name', 'color', 'description', 'origin_id')
class UserSerializer(ModelSerializer):
skills = SkillRelationSerializer(many=True, read_only=True)
qualifications = QualificationSerializer(many=True, read_only=True)
class Meta:
ref_name = 'User'
model = User
fields = ('id', 'email', 'name', 'avatar_url', 'address_id', 'birth_date', 'skills', 'qualifications')
class UserCreationSerializer(ModelSerializer):
class Meta:
ref_name = 'UserCreation'
model = User
fields = (
'id', 'email', 'name', 'password', 'avatar_url', 'address_id', 'birth_date', 'skills', 'qualifications'
)
def create(self, validated_data):
instance = super().create(validated_data)
instance.set_password(validated_data['password'])
instance.save()
return instance
| 28.707317
| 115
| 0.643161
| 222
| 2,354
| 6.698198
| 0.315315
| 0.060525
| 0.129119
| 0.136516
| 0.365837
| 0.309348
| 0.274378
| 0.217888
| 0
| 0
| 0
| 0
| 0.229397
| 2,354
| 81
| 116
| 29.061728
| 0.819735
| 0
| 0
| 0.344828
| 0
| 0
| 0.169074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0.034483
| 0.034483
| 0
| 0.482759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1c1b0752a916c3d0a0607d4658e6692c2c8187f
| 506
|
py
|
Python
|
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
import urllib.request
urls = [ "https://www.google.com","httpr://www.python.org" ]
for link in urls:
request = urllib.request.Request( link)
response = urllib.request.urlopen( request)
'''
action here
'''
'''\
NORMAL: sloooow
[][][] [][] [][]{}{} {}{}{} {}{}{} {}
THREADING: still sloow
google: [] [] [] [][] [][][][] []
python: {}{}{} {} {}{} {} {}{}
ASYNCIO: Event Loop: fastest
[] {} [] {} [] {} {}{}{} [][][] {}{} [][]
'''
| 23
| 60
| 0.420949
| 39
| 506
| 5.461538
| 0.692308
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.280632
| 506
| 22
| 61
| 23
| 0.585165
| 0
| 0
| 0
| 0
| 0
| 0.209524
| 0.104762
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1c431a1f0a698ee3cb88df0ac882e928a41cf16
| 1,133
|
py
|
Python
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 22
|
2018-08-07T06:55:10.000Z
|
2021-06-12T02:12:19.000Z
|
CS303_Artifical-Intelligence/NCS/algorithm_ncs/ncs_client.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | 28
|
2020-03-04T23:47:22.000Z
|
2022-02-26T18:50:00.000Z
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 4
|
2019-11-09T15:41:26.000Z
|
2021-10-10T08:56:57.000Z
|
import json
from algorithm_ncs import ncs_c as ncs
import argparse
parser = argparse.ArgumentParser(description="This is a NCS solver")
parser.add_argument("-c", "--config", default="algorithm_ncs/parameter.json", type=str, help="a json file that contains parameter")
parser.add_argument("-d", "--data", default="6", type=int, help="the problem dataset that need to be solved")
args = parser.parse_args()
"""
how to use it?
example:
python3 -m algorithm_ncs.ncs_client -d 12 -c algorithm_ncs/parameter.json
good luck!
"""
if __name__ == '__main__':
config_file = args.config
p = args.data
with open(config_file) as file:
try:
ncs_para = json.loads(file.read())
except:
raise Exception("not a json format file")
_lambda = ncs_para["lambda"]
r = ncs_para["r"]
epoch = ncs_para["epoch"]
n= ncs_para["n"]
ncs_para = ncs.NCS_CParameter(tmax=300000, lambda_exp=_lambda, r=r, epoch=epoch, N=n)
print("************ start problem %d **********" % p)
ncs_c = ncs.NCS_C(ncs_para, p)
ncs_res = ncs_c.loop(quiet=False, seeds=0)
print(ncs_res)
| 29.815789
| 131
| 0.655781
| 170
| 1,133
| 4.164706
| 0.470588
| 0.069209
| 0.048023
| 0.070621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.194175
| 1,133
| 37
| 132
| 30.621622
| 0.763417
| 0
| 0
| 0
| 0
| 0
| 0.225198
| 0.027778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1c525fad1b20ec7dd22a4699a9e0a34d0093f34
| 1,999
|
py
|
Python
|
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | null | null | null |
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | 2
|
2018-11-25T12:06:08.000Z
|
2018-12-05T14:37:59.000Z
|
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | null | null | null |
# name
# name of the package short string (1)
# version
# version of this release short string (1)(2)
# author
# package author’s name short string (3)
# author_email
# email address of the package author email address (3)
# maintainer
# package maintainer’s name short string (3)
# maintainer_email
# email address of the package maintainer email address (3)
# url
# home page for the package URL (1)
# description
# short, summary description of the package short string
# long_description
# longer description of the package long string (5)
# download_url
# location where the package may be downloaded URL (4)
# classifiers
# a list of classifiers list of strings (4)
# platforms
# a list of platforms list of strings
# license
# license for the package short string (6)
from distutils.core import setup, Extension
import sys
# print('sys.argv =', sys.argv)
# print('type(sys.argv) =', type(sys.argv))
if '--pg' in sys.argv:
suffix = '_pg'
sys.argv.remove('--pg')
else:
suffix = ''
# print('suffix =', suffix)
ext_name = '_par2' + suffix
module_par2 = \
Extension(ext_name, sources=[
'par2/par2/pypar2.c',
'par2/par2/libpar2.c'
],
)
ext_name = '_gcdext' + suffix
module_gcdext = \
Extension(ext_name, sources = ['ecc/ecc/_gcdext.c'],
)
ext_name = '_montgomery' + suffix
module_montgomery = \
Extension(ext_name, sources = ['montgomery/pymontgomery.c'])
ext_name = '_camellia' + suffix
module_camellia = \
Extension(ext_name, sources = ['camellia/pycamellia.c',
'camellia/camellia.c',
'libfugou.c'])
setup( name = 'fugou',
version = '8.0',
author = '梅濁酒(umedoblock)',
author_email = 'umedoblock@gmail.com',
url = 'empty',
description = 'This is a gcdext() package',
ext_modules = [
module_montgomery, module_gcdext, module_camellia
])
| 27.763889
| 64
| 0.630815
| 247
| 1,999
| 4.995951
| 0.323887
| 0.06483
| 0.048622
| 0.074554
| 0.111831
| 0.047002
| 0
| 0
| 0
| 0
| 0
| 0.014795
| 0.256128
| 1,999
| 71
| 65
| 28.15493
| 0.815064
| 0.435218
| 0
| 0
| 0
| 0
| 0.22384
| 0.041856
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1ca7d47ebdd386eeb55838e16468d553751ab0a
| 2,910
|
py
|
Python
|
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | 1
|
2021-01-16T16:09:33.000Z
|
2021-01-16T16:09:33.000Z
|
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | null | null | null |
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | 1
|
2021-01-16T16:09:36.000Z
|
2021-01-16T16:09:36.000Z
|
# -*- coding: utf-8 -*-
# https://oldj.net/
u"""
同步两个文件夹
用法:
python syncdir.py source_dir target_dir
执行后,source_dir 中的文件将被同步到 target_dir 中
这个同步是单向的,即只将 source_dir 中更新或新增的文件拷到 target_dir 中,
如果某个文件在 source_dir 中不存在而在 target_dir 中存在,本程序不会删除那个文件,
也不会将其拷贝到 source_dir 中
判断文件是否更新的方法是比较文件最后修改时间以及文件大小是否一致
"""
import os
import sys
import shutil
def errExit(msg):
print("-" * 50)
print("ERROR:")
print(msg)
sys.exit(1)
def main(source_dir, target_dir):
print("synchronize '%s' >> '%s'..." % (source_dir, target_dir))
print("=" * 50)
sync_file_count = 0
sync_file_size = 0
for root, dirs, files in os.walk(source_dir):
if "backup" not in root and ".git" not in root:
relative_path = root.replace(source_dir, "")
if len(relative_path) > 0 and relative_path[:1] in ("/", "","\\"):
relative_path = relative_path[1:]
dist_path = os.path.join(target_dir, relative_path)
if not os.path.isdir(dist_path) :
os.makedirs(dist_path)
last_copy_folder = ""
for fn0 in files:
fn = os.path.join(root, fn0)
fn2 = os.path.join(dist_path, fn0)
is_copy = False
if not os.path.isfile(fn2):
is_copy = True
else:
statinfo = os.stat(fn)
statinfo2 = os.stat(fn2)
is_copy = (
round(statinfo.st_mtime, 3) != round(statinfo2.st_mtime, 3)
or statinfo.st_size != statinfo2.st_size
)
if is_copy:
if dist_path != last_copy_folder:
print("[ %s ]" % dist_path)
last_copy_folder = dist_path
print("copying '%s' ..." % fn0)
shutil.copy2(fn, fn2)
sync_file_count += 1
sync_file_size += os.stat(fn).st_size
if sync_file_count > 0:
print("-" * 50)
print("%d files synchronized!" % sync_file_count)
if sync_file_size > 0:
print("%d bytes." % sync_file_size)
print("done!")
if __name__ == "__main__":
# if len(sys.argv) != 3:
# if "-h" in sys.argv or "--help" in sys.argv:
# print(__doc__)
# sys.exit(1)
# errExit(u"invalid arguments!")
# source_dir, target_dir = sys.argv[1:]
# if not os.path.isdir(source_dir):
# errExit(u"'%s' is not a folder!" % source_dir)
# elif not os.path.isdir(target_dir):
# errExit(u"'%s' is not a folder!" % target_dir)
source_dir = "D:\\UGit\\HoudiniDigitalAssetSet"
target_dir = "D:\\NExTWorkSpace\\ArkWorkSpace\\Projects\\Ark2019\\Trunk\\UE4NEXT_Stable\\Engine\\Binaries\\ThirdParty\\Houdini\\HoudiniDigitalAssetSet"
main(source_dir, target_dir)
| 30.957447
| 155
| 0.55189
| 362
| 2,910
| 4.207182
| 0.312155
| 0.082731
| 0.049245
| 0.059094
| 0.143139
| 0.031517
| 0.031517
| 0.031517
| 0
| 0
| 0
| 0.019319
| 0.324055
| 2,910
| 93
| 156
| 31.290323
| 0.754957
| 0.224399
| 0
| 0.037037
| 0
| 0.018519
| 0.126452
| 0.075067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.055556
| 0
| 0.092593
| 0.203704
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1cc39d59dda967c7dcf371addd5df5990b99e23
| 5,004
|
py
|
Python
|
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | null | null | null |
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | 2
|
2019-12-03T20:05:03.000Z
|
2021-09-30T17:37:45.000Z
|
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | 1
|
2019-12-03T19:23:05.000Z
|
2019-12-03T19:23:05.000Z
|
# Copyright 2018 SpiderOak, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import yaml
import pyaml
import threading
from functools import wraps
from collections import OrderedDict
from pprint import pformat
from pygments import highlight, lexers, formatters
import curio
from curio.meta import (
curio_running, _from_coroutine, _isasyncgenfunction, finalize)
from curio.monitor import Monitor
def load_yaml(stream, Loader=yaml.SafeLoader, object_pairs_hook=OrderedDict, load_doc=False):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
if load_doc:
return list(yaml.load_all(stream, OrderedLoader))
return yaml.load(stream, OrderedLoader)
def format_json(obj, sort_keys=True):
return highlight(
json.dumps(obj, sort_keys=sort_keys, indent=2),
lexers.JsonLexer(),
formatters.TerminalFormatter()
)
def format_yaml(obj, prefix='---\n'):
return highlight(
prefix + pyaml.dumps(obj, safe=True).decode('utf-8'),
lexers.YamlLexer(),
formatters.TerminalFormatter()
)
def format_diff(diff):
return highlight(diff, lexers.DiffLexer(), formatters.TerminalFormatter())
def format_python(obj):
return highlight(
pformat(obj),
lexers.PythonLexer(),
formatters.TerminalFormatter()
)
def flatten_kube_lists(items):
for obj in items:
if obj.get('kind', '').endswith('List'):
for obj in flatten_kube_lists(obj['items']):
yield obj
else:
yield obj
_locals = threading.local()
def get_kernel():
try:
return _locals.curio_kernel
except AttributeError:
_locals.curio_kernel = k = curio.Kernel()
if 'CURIOMONITOR' in os.environ:
m = Monitor(k)
k._call_at_shutdown(m.close)
return k
def set_kernel(kernel):
_locals.curio_kernel = kernel
def close_kernel():
try:
k = _locals.curio_kernel
except AttributeError:
return
k.run(shutdown=True)
del _locals.curio_kernel
def sync_wrap(asyncfunc):
if _isasyncgenfunction(asyncfunc):
def _gen(*args, **kwargs):
k = get_kernel()
it = asyncfunc(*args, **kwargs)
f = finalize(it)
sentinal = object()
async def _next():
try:
return await it.__anext__()
except StopAsyncIteration:
return sentinal
k.run(f.__aenter__)
try:
while True:
item = k.run(_next)
if item is sentinal:
return
yield item
finally:
k.run(f.__aexit__, *sys.exc_info())
@wraps(asyncfunc)
def wrapped(*args, **kwargs):
if _from_coroutine() or curio_running():
return asyncfunc(*args, **kwargs)
else:
return _gen(*args, **kwargs)
else:
@wraps(asyncfunc)
def wrapped(*args, **kwargs):
if _from_coroutine() or curio_running():
return asyncfunc(*args, **kwargs)
else:
return get_kernel().run(asyncfunc(*args, **kwargs))
wrapped._awaitable = True
return wrapped
class AsyncInstanceType(curio.meta.AsyncInstanceType):
__call__ = sync_wrap(curio.meta.AsyncInstanceType.__call__)
class AsyncObject(metaclass=AsyncInstanceType):
pass
class SyncIterWrapper:
_sentinel = object()
def __init__(self, aiter):
self._aiter = aiter
@sync_wrap
async def _anext(self):
try:
return await self._aiter.__anext__()
except StopAsyncIteration:
return self._sentinel
def __next__(self):
item = self._anext()
if item is self._sentinel:
raise StopIteration()
return item
class SyncIter:
def __iter__(self):
return SyncIterWrapper(self.__aiter__())
class SyncContextManager:
@sync_wrap
async def __enter__(self):
return await self.__aenter__()
@sync_wrap
async def __exit__(self, typ, val, tb):
return await self.__aexit__(typ, val, tb)
| 25.927461
| 93
| 0.632894
| 563
| 5,004
| 5.394316
| 0.351687
| 0.026342
| 0.027988
| 0.035561
| 0.088904
| 0.064537
| 0.064537
| 0.064537
| 0.064537
| 0.064537
| 0
| 0.002771
| 0.278777
| 5,004
| 192
| 94
| 26.0625
| 0.838736
| 0.110312
| 0
| 0.264706
| 0
| 0
| 0.007886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.014706
| 0.095588
| 0.036765
| 0.455882
| 0.007353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1d089298e5f4bb67268690bc90d7e531a39929b
| 7,710
|
py
|
Python
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 1
|
2017-07-28T12:54:09.000Z
|
2017-07-28T12:54:09.000Z
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/model/document.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
import logging
from datetime import datetime, timedelta
from normality import ascii_text
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db
from aleph.model.metadata import Metadata
from aleph.model.validate import validate
from aleph.model.collection import Collection
from aleph.model.reference import Reference
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.text import index_form
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
_schema = 'document.json#'
SCHEMA = 'Document'
TYPE_TEXT = 'text'
TYPE_TABULAR = 'tabular'
TYPE_OTHER = 'other'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True)
type = db.Column(db.Unicode(10), nullable=False, index=True)
status = db.Column(db.Unicode(10), nullable=True, index=True)
meta = db.Column(JSONB, default={})
crawler = db.Column(db.Unicode(), index=True)
crawler_run = db.Column(db.Unicode())
error_type = db.Column(db.Unicode(), nullable=True)
error_message = db.Column(db.Unicode(), nullable=True)
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True) # noqa
children = db.relationship('Document', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
def update(self, data):
validate(data, self._schema)
self.title = data.get('title')
self.summary = data.get('summary')
self.languages = data.get('languages')
self.countries = data.get('countries')
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete_references(self, origin=None):
pq = db.session.query(Reference)
pq = pq.filter(Reference.document_id == self.id)
if origin is not None:
pq = pq.filter(Reference.origin == origin)
# pq.delete(synchronize_session='fetch')
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_references()
self.delete_records()
db.session.delete(self)
def insert_records(self, sheet, iterable, chunk_size=1000):
chunk = []
for index, data in enumerate(iterable):
chunk.append({
'document_id': self.id,
'index': index,
'sheet': sheet,
'data': data
})
if len(chunk) >= chunk_size:
db.session.bulk_insert_mappings(DocumentRecord, chunk)
chunk = []
if len(chunk):
db.session.bulk_insert_mappings(DocumentRecord, chunk)
def text_parts(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
for record in pq.yield_per(1000):
for text in record.text_parts():
yield text
@classmethod
def crawler_last_run(cls, crawler_id):
q = db.session.query(func.max(cls.updated_at))
q = q.filter(cls.crawler == crawler_id)
return q.scalar()
@classmethod
def is_crawler_active(cls, crawler_id):
# TODO: add a function to see if a particular crawl is still running
# this should be defined as having "pending" documents.
last_run_time = cls.crawler_last_run(crawler_id)
if last_run_time is None:
return False
return last_run_time > (datetime.utcnow() - timedelta(hours=1))
@classmethod
def crawler_stats(cls, crawler_id):
# Check if the crawler was active very recently, if so, don't
# allow the user to execute a new run right now.
stats = {
'updated': cls.crawler_last_run(crawler_id),
'running': cls.is_crawler_active(crawler_id)
}
q = db.session.query(cls.status, func.count(cls.id))
q = q.filter(cls.crawler == crawler_id)
q = q.group_by(cls.status)
for (status, count) in q.all():
stats[status] = count
return stats
@classmethod
def by_keys(cls, parent_id=None, collection=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
if collection is not None:
q = q.filter(Document.collection_id == collection.id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.type = cls.TYPE_OTHER
document.collection_id = collection.id
document.collection = collection
document.parent_id = parent_id
document.foreign_id = foreign_id
document.content_hash = content_hash
document.status = document.STATUS_PENDING
db.session.add(document)
return document
def to_dict(self):
data = self.to_meta_dict()
try:
from flask import request # noqa
data['public'] = request.authz.collection_public(self.collection_id) # noqa
except:
data['public'] = None
data.update({
'id': self.id,
'type': self.type,
'status': self.status,
'parent_id': self.parent_id,
'foreign_id': self.foreign_id,
'content_hash': self.content_hash,
'crawler': self.crawler,
'crawler_run': self.crawler_run,
'error_type': self.error_type,
'error_message': self.error_message,
'collection_id': self.collection_id,
'created_at': self.created_at,
'updated_at': self.updated_at
})
return data
def to_index_dict(self):
data = self.to_dict()
data['text'] = index_form(self.text_parts())
data['schema'] = self.SCHEMA
data['schemata'] = [self.SCHEMA]
data['name_sort'] = ascii_text(data.get('title'))
data['title_latin'] = ascii_text(data.get('title'))
data['summary_latin'] = ascii_text(data.get('summary'))
data.pop('tables')
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.type, self.title)
| 35.366972
| 113
| 0.623476
| 952
| 7,710
| 4.893908
| 0.212185
| 0.027044
| 0.02361
| 0.029191
| 0.25499
| 0.185018
| 0.128783
| 0.086714
| 0.075123
| 0.075123
| 0
| 0.002644
| 0.264073
| 7,710
| 217
| 114
| 35.529954
| 0.81847
| 0.054475
| 0
| 0.136364
| 0
| 0
| 0.060369
| 0
| 0
| 0
| 0
| 0.004608
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0.005682
| 0.357955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1d542377c13c57ca40f0aad4217a57a0a2f3e27
| 5,438
|
py
|
Python
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 19
|
2019-10-07T14:42:40.000Z
|
2022-03-24T15:02:02.000Z
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 13
|
2021-08-25T12:54:37.000Z
|
2022-03-05T03:31:34.000Z
|
tests/test_filters.py
|
maniospas/pygrank
|
a92f6bb6d13553dd960f2e6bda4c041a8027a9d1
|
[
"Apache-2.0"
] | 4
|
2019-09-25T09:54:51.000Z
|
2020-12-09T00:11:21.000Z
|
import networkx as nx
import pygrank as pg
import pytest
from .test_core import supported_backends
def test_zero_personalization():
assert pg.sum(pg.PageRank()(next(pg.load_datasets_graph(["graph9"])), {}).np) == 0
def test_abstract_filter_types():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.GraphFilter().rank(graph)
with pytest.raises(Exception):
pg.RecursiveGraphFilter().rank(graph)
with pytest.raises(Exception):
pg.ClosedFormGraphFilter().rank(graph)
with pytest.raises(Exception):
pg.Tuner().rank(graph)
def test_filter_invalid_parameters():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.HeatKernel(normalization="unknown").rank(graph)
with pytest.raises(Exception):
pg.HeatKernel(coefficient_type="unknown").rank(graph)
def test_convergence_string_conversion():
# TODO: make convergence trackable from wrapping objects
graph = next(pg.load_datasets_graph(["graph5"]))
ranker = pg.PageRank()
ranker(graph)
assert str(ranker.convergence.iteration)+" iterations" in str(ranker.convergence)
def test_pagerank_vs_networkx():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
ranker = pg.Normalize("sum", pg.PageRank(normalization='col', tol=1.E-9))
test_result = ranker(graph)
test_result2 = nx.pagerank(graph, tol=1.E-9)
# TODO: assert that 2.5*epsilon is indeed a valid limit
assert pg.Mabs(test_result)(test_result2) < 2.5*pg.epsilon()
def test_prevent_node_lists_as_graphs():
graph = next(pg.load_datasets_graph(["graph5"]))
with pytest.raises(Exception):
pg.PageRank().rank(list(graph))
def test_non_convergence():
graph = next(pg.load_datasets_graph(["graph9"]))
with pytest.raises(Exception):
pg.PageRank(max_iters=5).rank(graph)
def test_custom_runs():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
ranks1 = pg.Normalize(pg.PageRank(0.85, tol=pg.epsilon(), max_iters=1000, use_quotient=False)).rank(graph, {"A": 1})
ranks2 = pg.Normalize(pg.GenericGraphFilter([0.85**i*len(graph) for i in range(80)], tol=pg.epsilon())).rank(graph, {"A": 1})
assert pg.Mabs(ranks1)(ranks2) < 1.E-6
def test_completion():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
pg.PageRank().rank(graph)
pg.HeatKernel().rank(graph)
pg.AbsorbingWalks().rank(graph)
pg.HeatKernel().rank(graph)
assert True
def test_quotient():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
test_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=True).rank(graph)
norm_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=pg.Normalize("sum")).rank(graph)
assert pg.Mabs(test_result)(norm_result) < pg.epsilon()
def test_automatic_graph_casting():
graph = next(pg.load_datasets_graph(["graph9"]))
for _ in supported_backends():
signal = pg.to_signal(graph, {"A": 1})
test_result1 = pg.PageRank(normalization='col').rank(signal, signal)
test_result2 = pg.PageRank(normalization='col').rank(personalization=signal)
assert pg.Mabs(test_result1)(test_result2) < pg.epsilon()
with pytest.raises(Exception):
pg.PageRank(normalization='col').rank(personalization={"A": 1})
with pytest.raises(Exception):
pg.PageRank(normalization='col').rank(graph.copy(), signal)
def test_absorbing_vs_pagerank():
graph = next(pg.load_datasets_graph(["graph9"]))
personalization = {"A": 1, "B": 1}
for _ in supported_backends():
pagerank_result = pg.PageRank(normalization='col').rank(graph, personalization)
absorbing_result = pg.AbsorbingWalks(0.85, normalization='col', max_iters=1000).rank(graph, personalization)
assert pg.Mabs(pagerank_result)(absorbing_result) < pg.epsilon()
def test_kernel_locality():
graph = next(pg.load_datasets_graph(["graph9"]))
personalization = {"A": 1, "B": 1}
for _ in supported_backends():
for kernel_algorithm in [pg.HeatKernel, pg.BiasedKernel]:
pagerank_result = pg.Normalize("sum", pg.PageRank(max_iters=1000)).rank(graph, personalization)
kernel_result = pg.Normalize("sum", kernel_algorithm(max_iters=1000)).rank(graph, personalization)
assert pagerank_result['A'] < kernel_result['A']
assert pagerank_result['I'] > kernel_result['I']
def test_optimization_dict():
from timeit import default_timer as time
graph = next(pg.load_datasets_graph(["bigraph"]))
personalization = {str(i): 1 for i in range(200)}
preprocessor = pg.preprocessor(assume_immutability=True)
preprocessor(graph)
tic = time()
for _ in range(10):
pg.ParameterTuner(preprocessor=preprocessor, tol=1.E-9).rank(graph, personalization)
unoptimized = time()-tic
optimization = dict()
tic = time()
for _ in range(10):
pg.ParameterTuner(optimization_dict=optimization, preprocessor=preprocessor, tol=1.E-9).rank(graph, personalization)
optimized = time() - tic
assert len(optimization) == 20
assert unoptimized > optimized
| 39.693431
| 136
| 0.685914
| 692
| 5,438
| 5.219653
| 0.193642
| 0.054817
| 0.03876
| 0.069767
| 0.501938
| 0.45598
| 0.368494
| 0.286545
| 0.267165
| 0.206811
| 0
| 0.019564
| 0.172858
| 5,438
| 137
| 137
| 39.693431
| 0.783459
| 0.01986
| 0
| 0.349057
| 0
| 0
| 0.03247
| 0
| 0
| 0
| 0
| 0.007299
| 0.113208
| 1
| 0.132075
| false
| 0
| 0.04717
| 0
| 0.179245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1d7b3ea3f8d942998560e953fec761fcb002a45
| 2,433
|
py
|
Python
|
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
procgen.py
|
tredfern/rdl2021-tutorial
|
18f992c9c09ab18ee8e2927cf53d707c251d4948
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Trevor Redfern
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from __future__ import annotations
from typing import Tuple, Iterator, List, TYPE_CHECKING
import random
import tcod
from game_map import GameMap
import tile_types
if TYPE_CHECKING:
from entity import Entity
class RectangularRoom:
def __init__(self, x: int, y: int, width: int, height: int) -> None:
self.x1 = x
self.y1 = y
self.x2 = x + width
self.y2 = y + height
@property
def center(self) -> Tuple[int, int]:
centerX = int((self.x1 + self.x2) / 2)
centerY = int((self.y1 + self.y2) / 2)
return centerX, centerY
@property
def inner(self) -> Tuple[slice, slice]:
return slice(self.x1 + 1, self.x2), slice(self.y1 + 1, self.y2)
def intersects(self, other: RectangularRoom) -> bool:
return (
self.x1 <= other.x2 and
self.x2 >= other.x1 and
self.y1 <= other.y2 and
self.y2 >= other.y1
)
def generateDungeon(
maxRooms: int,
roomMinSize: int,
roomMaxSize: int,
mapWidth: int,
mapHeight: int,
player: Entity) -> GameMap:
dungeon = GameMap(mapWidth, mapHeight)
rooms: List[RectangularRoom] = []
for r in range(maxRooms):
roomWidth = random.randint(roomMinSize, roomMaxSize)
roomHeight = random.randint(roomMinSize, roomMaxSize)
x = random.randint(0, dungeon.width - roomWidth - 1)
y = random.randint(0, dungeon.height - roomHeight - 1)
newRoom = RectangularRoom(x, y, roomWidth, roomHeight)
if any(newRoom.intersects(otherRoom) for otherRoom in rooms):
continue
dungeon.tiles[newRoom.inner] = tile_types.floor
if len(rooms) == 0:
player.x, player.y = newRoom.center
else:
for x, y in tunnelBetween(rooms[-1].center, newRoom.center):
dungeon.tiles[x, y] = tile_types.floor
rooms.append(newRoom)
return dungeon
def tunnelBetween( start: Tuple[int, int], end: Tuple[int, int]) -> Iterator[Tuple[int, int]]:
x1, y1 = start
x2, y2 = end
if random.random() < 0.5:
cornerX, cornerY = x2, y1
else:
cornerX, cornerY = x1, y2
for x, y in tcod.los.bresenham((x1, y1), (cornerX, cornerY)).tolist():
yield x, y
for x, y in tcod.los.bresenham((cornerX, cornerY), (x2, y2)).tolist():
yield x, y
| 25.610526
| 95
| 0.628442
| 320
| 2,433
| 4.734375
| 0.3125
| 0.009241
| 0.029043
| 0.013861
| 0.030363
| 0.030363
| 0.030363
| 0
| 0
| 0
| 0
| 0.026403
| 0.252774
| 2,433
| 94
| 96
| 25.882979
| 0.806931
| 0.048911
| 0
| 0.092308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.107692
| 0.030769
| 0.276923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1d821122ad47a7fa47c073b2ce27f383a3871d3
| 1,492
|
py
|
Python
|
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | 1
|
2018-12-10T01:38:48.000Z
|
2018-12-10T01:38:48.000Z
|
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | null | null | null |
examples/plot_simulate_bo.py
|
pmdaly/supereeg
|
750f55db3cbfc2f3430e879fecc7a1f5407282a6
|
[
"MIT"
] | 1
|
2019-06-25T21:34:12.000Z
|
2019-06-25T21:34:12.000Z
|
# -*- coding: utf-8 -*-
"""
=============================
Simulating a brain object
=============================
In this example, we demonstrate the simulate_bo function.
First, we'll load in some example locations. Then we'll simulate 1
brain object specifying a noise parameter and the correlational structure
of the data (a toeplitz matrix). We'll then subsample 10 locations from the
original brain object.
"""
# Code source: Lucy Owen & Andrew Heusser
# License: MIT
import supereeg as se
from supereeg.helpers import _corr_column
import numpy as np
# simulate 100 locations
locs = se.simulate_locations(n_elecs=100)
# simulate brain object
bo = se.simulate_bo(n_samples=1000, sample_rate=100, cov='random', locs=locs, noise =.1)
# sample 10 locations, and get indices
sub_locs = locs.sample(90, replace=False).sort_values(['x', 'y', 'z']).index.values.tolist()
# index brain object to get sample patient
bo_sample = bo[: ,sub_locs]
# plot sample patient locations
bo_sample.plot_locs()
# plot sample patient data
bo_sample.plot_data()
# make model from brain object
r_model = se.Model(data=bo, locs=locs)
# predict
bo_s = r_model.predict(bo_sample, nearest_neighbor=False)
# find indices for reconstructed locations
recon_labels = np.where(np.array(bo_s.label) != 'observed')
# find correlations between predicted and actual data
corrs = _corr_column(bo.get_data().as_matrix(), bo_s.get_data().as_matrix())
# index reconstructed correlations
corrs[recon_labels].mean()
| 27.127273
| 92
| 0.731233
| 223
| 1,492
| 4.753363
| 0.457399
| 0.062264
| 0.026415
| 0.039623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016988
| 0.132038
| 1,492
| 54
| 93
| 27.62963
| 0.801544
| 0.536863
| 0
| 0
| 0
| 0
| 0.025449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1d8cc75992fcd005adcc90ea90aa099fbd29007
| 5,031
|
py
|
Python
|
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
examples/fmanipulator.py
|
mateusmoutinho/python-cli-args
|
40b758db808e96b3c12a3e0a87b6904660e90d9b
|
[
"MIT"
] | null | null | null |
from io import TextIOWrapper
from typing import IO, Text
from cli_args_system import Args
from cli_args_system import Args, FlagsContent
from sys import exit
HELP = """this is a basic file manipulator to demonstrate
args_system usage with file flags
-------------------flags----------------------------
-join: join the files passed and save in the --out flag
-replace: replace the text on file and save in the --out flag
if there is no out flag, it will save in the same file
-remove: remove the given text in the file
-------------------usage----------------------------
$ python3 fmanipulator.py -join a.txt b.txt -out c.txt
will join the content on a.txt and b.txt, and save in c.txt
$ python3 fmanipulator.py a.txt -replace a b
will replace the char a for char b in the a.txt file
$ python3 fmanipulator.py a.txt -replace a b -out b.txt
will replace the char a for char b and save in b.txt
$ python3 fmanipulator.py a.txt -r test
will remove the text: test in the file a.txt
$ python3 fmanipulator.py a.txt -r test -out b.txt
will remove the text: test in the file a.txt and save in b.txt"""
def exit_with_mensage(mensage:str):
"""kills the aplcation after printing the mensage \n
mensage: the mensage to print"""
print(mensage)
exit(1)
def get_file_text(args:Args) ->str:
"""returns the file text of args[0] (argv[0]) \n
args:The args Object"""
try:
with open(args[0],'r') as f:
return f.read()
except (FileNotFoundError,IndexError):
#if doenst find the file text,kilss the aplcation
exit_with_mensage(mensage='no file')
def get_out_wraper(args:Args,destroy_if_dont_find=True)->TextIOWrapper or None:
"""returns the out wraper of out[0] flag\n
args: The args Object \n
destroy_if_dont_find: if True it will destroy the aplication
if doesnt find out[0] flag"""
out = args.flags_content('out','o','out-file','outfile','out_file')
if out.filled():
return open(out[0],'w')
else:
#check if is to destroy
if destroy_if_dont_find:
exit_with_mensage(mensage='not out file')
def write_text_in_out_file_or_same_file(text:str,args:Args):
"""write text in out flag if exist,
otherwhise write on same file args(0)\n
text: the text to write \n
args: The args Object \n
"""
out = get_out_wraper(args,destroy_if_dont_find=False)
#if out is not passed it replace in the same file
if out is None:
open(args[0],'w').write(text)
else:
#otherwise write in the out file
out.write(text)
def join_files(join:FlagsContent,args:Args):
"""join the files of join flag, in the out flag content
join: the join FlagsContent \n
args: The args Object"""
if len(join) < 2:
print('must bee at least 2 files')
exit(1)
full_text = ''
#make a iteration on join flag
for file_path in join:
try:
#try to open and add in the full text, the content of
#file path
with open(file_path,'r') as file:
full_text+=file.read()
except FileNotFoundError:
print(f'file {file_path} not exist')
exit(1)
#write the changes in the out file
get_out_wraper(args).write(full_text)
def replace_elements(replace:FlagsContent,args:Args):
"""replace in file (args[0) with replace[0] to replace[1]
replace: the replace FlagsContent
args: The args Object
"""
if len(replace) != 2:
exit_with_mensage(mensage='must bee two elements to replace')
#get the file of args[0]
file = get_file_text(args)
#make the replace
replaced_text = file.replace(replace[0],replace[1])
write_text_in_out_file_or_same_file(text=replaced_text,args=args)
def remove_text(remove:FlagsContent,args:Args):
"""this function remove the text in passed in the remove flags \n
remove: the remove FlagsContent \n
args: The args Object """
if not remove.filled():
exit_with_mensage('not text to remove')
text_file = get_file_text(args)
#goes in a iteration in remove flags
for text in remove:
text_file = text_file.replace(text,'')
write_text_in_out_file_or_same_file(text=text_file,args=args)
if __name__ == '__main__':
#construct the args
args = Args(convert_numbers=False)
#for help flag
help = args.flags_content('h','help')
if help.exist():
print(HELP);exit(0)
join = args.flags_content('join','j')
#if join flag exist, call the join_files
if join.exist():
join_files(join,args)
replace = args.flags_content('replace','substitute')
#if replace flag exist call the replace_elements function
if replace.exist():
replace_elements(replace,args)
remove = args.flags_content('r','remove','pop')
#if remove flag exist call the remove_text
if remove.exist():
remove_text(remove,args)
| 29.25
| 79
| 0.650566
| 781
| 5,031
| 4.067862
| 0.165173
| 0.02046
| 0.020774
| 0.032106
| 0.217816
| 0.179729
| 0.129997
| 0.109852
| 0.051621
| 0.021404
| 0
| 0.006799
| 0.239913
| 5,031
| 171
| 80
| 29.421053
| 0.824006
| 0.252634
| 0
| 0.082353
| 0
| 0
| 0.314774
| 0.028666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082353
| false
| 0.011765
| 0.058824
| 0
| 0.164706
| 0.047059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1dc9ba592a6ef41c372eaa2cd477c8b9c68c9a0
| 7,289
|
py
|
Python
|
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | 1
|
2021-04-14T12:52:47.000Z
|
2021-04-14T12:52:47.000Z
|
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | null | null | null |
src/Navigate.py
|
Qu-Xiangjun/CQU_NK_Research_Project
|
8634ce3496801610bc94aa3a424bcd9cff8d042e
|
[
"MIT"
] | null | null | null |
"""
@Author: Qu Xiangjun
@Time: 2021.01.26
@Describe: 此文件负责根据雷达数据进行导航的线程类定义
"""
import socket
import time
from threading import Thread
import threading
import numpy as np
# python3.8.0 64位(python 32位要用32位的DLL)
from ctypes import *
from Navigation_help import *
from Can_frame_help import *
VCI_USBCAN2 = 4 # 设备类型 USBCAN-2A或USBCAN-2C或CANalyst-II
STATUS_OK = 1
# 定义初始化CAN的数据类型
class VCI_INIT_CONFIG(Structure):
_fields_ = [("AccCode", c_uint), # 接收滤波验收码
("AccMask", c_uint), # 接收滤波屏蔽码
("Reserved", c_uint),
("Filter", c_ubyte), # '滤波方式 0,1接收所有帧。2标准帧滤波,3是扩展帧滤波。
# 500kbps Timing0=0x00 Timing1=0x1C
("Timing0", c_ubyte), # 波特率参数1,具体配置,请查看二次开发库函数说明书。
("Timing1", c_ubyte), # 波特率参数1
("Mode", c_ubyte) # '模式,0表示正常模式,1表示只听模式,2自测模式
]
# 定义CAN信息帧的数据类型。
class VCI_CAN_OBJ(Structure):
_fields_ = [("ID", c_uint),
("TimeStamp", c_uint), # 时间标识
("TimeFlag", c_ubyte), # 是否使用时间标识
("SendType", c_ubyte), # 发送标志。保留,未用
("RemoteFlag", c_ubyte), # 是否是远程帧
("ExternFlag", c_ubyte), # 是否是扩展帧
("DataLen", c_ubyte), # 数据长度
("Data", c_ubyte*8), # 数据
("Reserved", c_ubyte*3) # 保留位
]
CanDLLName = './ControlCAN.dll' # 把DLL放到对应的目录下
canDLL = windll.LoadLibrary('./ControlCAN.dll')
# Linux系统下使用下面语句,编译命令:python3 python3.8.0.py
#canDLL = cdll.LoadLibrary('./libcontrolcan.so')
class Navigate_Thread(threading.Thread):
"""
导航线程
"""
def __init__(self,thread_draw_lidar, socket_server_thread):
"""
:param thread_draw_lidar: 绘画雷达图线程类实例
:param socket_server_thread: 远程Socket传输数据类实例
"""
threading.Thread.__init__(self) # 初始化父类
# 绘制雷达
self.thread_draw_lidar = thread_draw_lidar
# 改变雷达数据远程传输线程内容
self.socket_server_thread = socket_server_thread
def run(self):
"""
Can接口连接scout——mini 底盘
"""
# 打开设备
ret = canDLL.VCI_OpenDevice(VCI_USBCAN2, 0, 0)
if ret == STATUS_OK:
print('调用 VCI_OpenDevice成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_OpenDevice出错\r\n')
# 初始0通道
vci_initconfig = VCI_INIT_CONFIG(0x80000008, 0xFFFFFFFF, 0,
0, 0x00, 0x1C, 0) # 波特率500k,正常模式
ret = canDLL.VCI_InitCAN(VCI_USBCAN2, 0, 0, byref(vci_initconfig))
if ret == STATUS_OK:
print('调用 VCI_InitCAN0成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_InitCAN0出错\r\n')
# 开启通道
ret = canDLL.VCI_StartCAN(VCI_USBCAN2, 0, 0)
if ret == STATUS_OK:
print('调用 VCI_StartCAN0成功\r\n')
if ret != STATUS_OK:
print('调用 VCI_StartCAN0出错\r\n')
# 设置底盘为指令控制模式
ret = canDLL.VCI_Transmit(
VCI_USBCAN2, 0, 0, byref(get_start_controller_inst()), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
'''
socket配置
'''
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("localhost", 8888)) # 服务器端,将Socket与网络地址和端口绑定起来,
server.listen(0) # backlog 指定最大的连接数
connection, address = server.accept()
print("socket connect:", connection)
print("socket ip address:", address)
global lidar_data_list
lidar_data_list = [0 for i in range(1536)] # 初始化
register_direct = 0 # 记忆上一次转动方向,1位左,0位前进,-1位右
'''
执行导航
'''
while True:
# get lidar data
try:
recv_str = connection.recv(9216) # 1536个数据,每个为6bytes
except(ConnectionResetError):
print("[ConnectionResetError] Lost lidar socket connnetion.")
break
# recv_str=str(recv_str) 这样不行带有了b''
recv_str = recv_str.decode("GBK") # type(recv_str) = str
lidar_data_bytes = recv_str.split(",")
lidar_data_bytes = lidar_data_bytes[0:-1]
dirty_count = 0
for i in range(len(lidar_data_bytes)): # 1536个数据
lidar_data_bytes[i] = int(lidar_data_bytes[i]) # 单位从毫米
if(lidar_data_bytes[i] == 0):
if(i == 0): # 起始处不管
lidar_data_bytes[i] = 0
else:
lidar_data_bytes[i] = lidar_data_bytes[i-1]
dirty_count += 1
for i in range(125):
lidar_data_bytes[i] = 0
for i in range(1411,1536):
lidar_data_bytes[i] = 0
lidar_data_list = lidar_data_bytes
# if(dirty_count > 200): # 脏点太多,设置界限报错
# print("[WARNING] Lidar is very dirty.")
# exit(1)
print("lidar_data_list",lidar_data_list)
# 数据不规整报错
if(len(lidar_data_list) != 1536):
print("[ERROR] Lidar frame's length is not 1536*6 bytes.")
continue
# 写入文件查看数据
# f = open('test.txt', 'w')
# f.write(str(lidar_data_list))
# f.close()
self.thread_draw_lidar.lidar_data_list = lidar_data_list # 更新绘图线程的雷达数据
self.socket_server_thread.lidar_data_list = lidar_data_list # 更新发送雷达数据线程的雷达数据
# get direction
best_direction = navigate(lidar_data_list) # 导航得到的方向
print("best_direction", best_direction)
# time.sleep(1)
# 发送控制命令给小车
if(best_direction == None):
# 没有方向时就自转找方向
best_direction = 5
register_direct = 1
ret = canDLL.VCI_Transmit(
VCI_USBCAN2, 0, 0, get_move_inst(best_direction, 0), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
continue
# 记忆转动方向
if(register_direct == -1 ): # 曾经是右转
if(best_direction == 0):
register_direct = 0
else:
best_direction = -5
elif(register_direct == 1 ): # 曾经左转
if(best_direction == 0):
register_direct = 0
else:
best_direction = 5
else:
if(best_direction < 0):
register_direct = -1
best_direction = -5
elif(best_direction > 0):
register_direct = 1
best_direction = 5
else:
register_direct = 0
for i in range(1): # 只用发送一次即可,这里可设置循环增强控制效果
ret = canDLL.VCI_Transmit(VCI_USBCAN2, 0, 0, get_move_inst(
best_direction, best_speed=default_best_speed), 1)
if ret == STATUS_OK:
print('CAN1通道发送成功\r\n')
if ret != STATUS_OK:
print('CAN1通道发送失败\r\n')
connection.close()
| 33.589862
| 89
| 0.522568
| 779
| 7,289
| 4.650834
| 0.32991
| 0.064587
| 0.050235
| 0.043058
| 0.259453
| 0.216671
| 0.187414
| 0.181065
| 0.172233
| 0.127795
| 0
| 0.040229
| 0.375909
| 7,289
| 216
| 90
| 33.74537
| 0.755771
| 0.154754
| 0
| 0.311594
| 0
| 0
| 0.089873
| 0.003689
| 0
| 0
| 0.004695
| 0
| 0
| 1
| 0.014493
| false
| 0
| 0.057971
| 0
| 0.108696
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1e3076f57089de6bfe7eeff45ef0b802cbca8fa
| 5,057
|
py
|
Python
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 40
|
2019-05-05T08:08:18.000Z
|
2021-10-17T00:07:58.000Z
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 8
|
2019-06-13T06:00:08.000Z
|
2021-07-24T05:25:33.000Z
|
superviselySDK/supervisely_lib/geometry/bitmap_base.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 6
|
2019-07-30T06:36:27.000Z
|
2021-06-03T11:57:36.000Z
|
# coding: utf-8
import numpy as np
from supervisely_lib.geometry.constants import DATA, ORIGIN
from supervisely_lib.geometry.geometry import Geometry
from supervisely_lib.geometry.point_location import PointLocation
from supervisely_lib.geometry.rectangle import Rectangle
from supervisely_lib.imaging.image import resize_inter_nearest, restore_proportional_size
# TODO: rename to resize_bitmap_and_origin
def resize_origin_and_bitmap(origin: PointLocation, bitmap: np.ndarray, in_size, out_size):
new_size = restore_proportional_size(in_size=in_size, out_size=out_size)
row_scale = new_size[0] / in_size[0]
col_scale = new_size[1] / in_size[1]
# TODO: Double check (+restore_proportional_size) or not? bitmap.shape and in_size are equal?
# Make sure the resulting size has at least one pixel in every direction (i.e. limit the shrinkage to avoid having
# empty bitmaps as a result).
scaled_rows = max(round(bitmap.shape[0] * row_scale), 1)
scaled_cols = max(round(bitmap.shape[1] * col_scale), 1)
scaled_origin = PointLocation(row=round(origin.row * row_scale), col=round(origin.col * col_scale))
scaled_bitmap = resize_inter_nearest(bitmap, (scaled_rows, scaled_cols))
return scaled_origin, scaled_bitmap
class BitmapBase(Geometry):
def __init__(self, data: np.ndarray, origin: PointLocation=None, expected_data_dims=None):
"""
:param origin: PointLocation
:param data: np.ndarray
"""
if origin is None:
origin = PointLocation(row=0, col=0)
if not isinstance(origin, PointLocation):
raise TypeError('BitmapBase "origin" argument must be "PointLocation" object!')
if not isinstance(data, np.ndarray):
raise TypeError('BitmapBase "data" argument must be numpy array object!')
data_dims = len(data.shape)
if expected_data_dims is not None and data_dims != expected_data_dims:
raise ValueError('BitmapBase "data" argument must be a {}-dimensional numpy array. '
'Instead got {} dimensions'.format(expected_data_dims, data_dims))
self._origin = origin.clone()
self._data = data.copy()
@classmethod
def _impl_json_class_name(cls):
"""Descendants must implement this to return key string to look up serialized representation in a JSON dict."""
raise NotImplementedError()
@staticmethod
def base64_2_data(s: str) -> np.ndarray:
raise NotImplementedError()
@staticmethod
def data_2_base64(data: np.ndarray) -> str:
raise NotImplementedError()
def to_json(self):
return {
self._impl_json_class_name(): {
ORIGIN: [self.origin.col, self.origin.row],
DATA: self.data_2_base64(self.data)
}
}
@classmethod
def from_json(cls, json_data):
json_root_key = cls._impl_json_class_name()
if json_root_key not in json_data:
raise ValueError(
'Data must contain {} field to create MultichannelBitmap object.'.format(json_root_key))
if ORIGIN not in json_data[json_root_key] or DATA not in json_data[json_root_key]:
raise ValueError('{} field must contain {} and {} fields to create MultichannelBitmap object.'.format(
json_root_key, ORIGIN, DATA))
col, row = json_data[json_root_key][ORIGIN]
data = cls.base64_2_data(json_data[json_root_key][DATA])
return cls(data=data, origin=PointLocation(row=row, col=col))
@property
def origin(self) -> PointLocation:
return self._origin.clone()
@property
def data(self) -> np.ndarray:
return self._data.copy()
def translate(self, drow, dcol):
translated_origin = self.origin.translate(drow, dcol)
return self.__class__(data=self.data, origin=translated_origin)
def fliplr(self, img_size):
flipped_mask = np.flip(self.data, axis=1)
flipped_origin = PointLocation(row=self.origin.row, col=(img_size[1] - flipped_mask.shape[1] - self.origin.col))
return self.__class__(data=flipped_mask, origin=flipped_origin)
def flipud(self, img_size):
flipped_mask = np.flip(self.data, axis=0)
flipped_origin = PointLocation(row=(img_size[0] - flipped_mask.shape[0] - self.origin.row), col=self.origin.col)
return self.__class__(data=flipped_mask, origin=flipped_origin)
def scale(self, factor):
new_rows = round(self._data.shape[0] * factor)
new_cols = round(self._data.shape[1] * factor)
mask = self._resize_mask(self.data, new_rows, new_cols)
origin = self.origin.scale(factor)
return self.__class__(data=mask, origin=origin)
@staticmethod
def _resize_mask(mask, out_rows, out_cols):
return resize_inter_nearest(mask.astype(np.uint8), (out_rows, out_cols)).astype(np.bool)
def to_bbox(self):
return Rectangle.from_array(self._data).translate(drow=self._origin.row, dcol=self._origin.col)
| 40.782258
| 120
| 0.686969
| 677
| 5,057
| 4.889217
| 0.215657
| 0.029003
| 0.026586
| 0.024169
| 0.147734
| 0.107553
| 0.107553
| 0.093051
| 0.063444
| 0.063444
| 0
| 0.008048
| 0.213763
| 5,057
| 123
| 121
| 41.113821
| 0.824447
| 0.088392
| 0
| 0.141176
| 0
| 0
| 0.074852
| 0
| 0
| 0
| 0
| 0.00813
| 0
| 1
| 0.176471
| false
| 0
| 0.070588
| 0.058824
| 0.388235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1edfb7e986ee60ac0da1a869a4e400f7398c3fe
| 1,492
|
py
|
Python
|
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/ags/tests/test_tasks.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
"""Test suite for Average Genome Size tasks."""
from app.display_modules.ags.ags_tasks import boxplot, ags_distributions
from app.samples.sample_models import Sample
from app.tool_results.microbe_census.tests.factory import create_microbe_census
from tests.base import BaseTestCase
class TestAverageGenomeSizeTasks(BaseTestCase):
"""Test suite for Average Genome Size tasks."""
def test_boxplot(self):
"""Ensure boxplot method creates correct boxplot."""
values = [37, 48, 30, 53, 3, 83, 19, 71, 90, 16, 19, 7, 11, 43, 43]
result = boxplot(values)
self.assertEqual(3, result['min_val'])
self.assertEqual(17.5, result['q1_val'])
self.assertEqual(37, result['mean_val'])
self.assertEqual(50.5, result['q3_val'])
self.assertEqual(90, result['max_val'])
def test_ags_distributions(self):
"""Ensure ags_distributions task works."""
def create_sample(i):
"""Create test sample."""
metadata = {'foo': f'bar{i}'}
return Sample(name=f'SMPL_{i}',
metadata=metadata,
microbe_census=create_microbe_census())
samples = [create_sample(i).fetch_safe() for i in range(15)]
result = ags_distributions.delay(samples).get()
self.assertIn('foo', result)
self.assertIn('bar0', result['foo'])
self.assertIn('bar1', result['foo'])
self.assertIn('min_val', result['foo']['bar0'])
| 38.25641
| 79
| 0.635389
| 186
| 1,492
| 4.956989
| 0.430108
| 0.081345
| 0.078091
| 0.041215
| 0.073753
| 0.073753
| 0.073753
| 0
| 0
| 0
| 0
| 0.04007
| 0.230563
| 1,492
| 38
| 80
| 39.263158
| 0.763066
| 0.125335
| 0
| 0
| 0
| 0
| 0.064113
| 0
| 0
| 0
| 0
| 0
| 0.36
| 1
| 0.12
| false
| 0
| 0.16
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1efcf80cebb01dff50a1e2a45ff4368cec1958a
| 4,428
|
py
|
Python
|
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | 1
|
2020-07-25T11:27:17.000Z
|
2020-07-25T11:27:17.000Z
|
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | null | null | null |
metrics.py
|
efratkohen/Project
|
d95d20a1be8fe0e0918b3e699c640f36704639f8
|
[
"MIT"
] | null | null | null |
import traceback
import numpy as np
from matplotlib import pyplot, pyplot as plt
from sklearn.metrics import (
mean_squared_error,
median_absolute_error,
roc_curve,
auc,
f1_score,
precision_recall_curve,
r2_score,
)
from sklearn.metrics import confusion_matrix
import column_labeler as clabel
from math import sqrt
def calc_best_f1(Ytest, Yhat, selected_value=clabel.AMMONIA):
max_val = 0
best_i = 0
for i in range(1, 100):
accuracy = f1_score(Ytest, (Yhat > 0.01 * i).astype(int))
if accuracy > max_val:
max_val = accuracy
best_i = i
f1_score(Ytest, (Yhat > 0.01 * best_i).astype(int))
return max_val
def calc_rmse(Ytest, Yhat, graph=(20, 15)):
rmse = sqrt(mean_squared_error(Ytest, Yhat))
if graph:
print("RMSE", rmse)
pyplot.figure(figsize=graph)
pyplot.plot(Yhat, label="predictions")
pyplot.plot(Ytest, label="real")
pyplot.legend()
# import datetime
pyplot.show()
# pyplot.savefig("Images\\%s" % str(datetime.datetime.now()))
return rmse
def calc_mape(Ytest, Yhat, graph=True):
return np.mean(np.abs((Ytest - Yhat) / Ytest)) * 100
def calc_mae(Ytest, Yhat, graph=True):
return median_absolute_error(Ytest, Yhat)
def calc_rsquared(Ytest, Yhat, graph=True):
# R-squared
return r2_score(Ytest, Yhat)
def calc_tp_fp_rate(Ytest, Yhat, selected_value, binary=False, graph=True):
global y_not_bad_real, y_not_bad_hat
if binary:
y_not_bad_hat = Yhat.astype(int)
y_not_bad_real = Ytest.astype(int)
else:
mdict = clabel.limits[selected_value]
good_limit = mdict[clabel.GOOD]
not_bad_limit = mdict[clabel.NOT_BAD]
y_good_hat = Yhat > good_limit
y_good_real = Ytest > good_limit
y_not_bad_hat = Yhat > not_bad_limit
y_not_bad_real = Ytest > not_bad_limit
if graph:
print(confusion_matrix(y_not_bad_real, y_not_bad_hat))
res = confusion_matrix(y_not_bad_real, y_not_bad_hat).ravel()
if len(res) > 1:
return res
return res[0], 0, 0, 0
def calc_best_accuracy(Ytest, Yhat, selected_value=clabel.AMMONIA):
max_val = 0
best_i = 0
for i in range(1, 100):
tn, fp, fn, tp = calc_tp_fp_rate(
Ytest,
(Yhat > 0.01 * i).astype(int),
selected_value=selected_value,
binary=True,
graph=False,
)
accuracy = (tn + tp) / (tn + fp + fn + tp)
if accuracy > max_val:
max_val = accuracy
best_i = i
calc_tp_fp_rate(
Ytest,
(Yhat > 0.01 * best_i).astype(int),
selected_value=selected_value,
binary=True,
graph=True,
)
return max_val
def roc(Ytest, Yhat, graph=False):
fpr, tpr, threshold = roc_curve(Ytest, Yhat)
roc_auc = auc(fpr, tpr)
# method I: plt
if graph:
pyplot.title("Receiver Operating Characteristic")
pyplot.plot(fpr, tpr, "b", label="AUC = %0.2f" % roc_auc)
pyplot.legend(loc="lower right")
pyplot.plot([0, 1], [0, 1], "r--")
pyplot.xlim([0, 1])
pyplot.ylim([0, 1])
pyplot.ylabel("True Positive Rate")
pyplot.xlabel("False Positive Rate")
pyplot.show()
return fpr, tpr, threshold, roc_auc
def calc_histogram(Ytest, Yhat):
plt.figure(figsize=(15, 4))
plt.hist(Ytest.flatten(), bins=100, color="orange", alpha=0.5, label="pred")
plt.hist(Yhat.flatten(), bins=100, color="green", alpha=0.5, label="true")
plt.legend()
plt.title("value distribution")
plt.show()
def calc_precision_recall(Ytest, Yhat, threshold=0.002, graph=True):
lr_precision, lr_recall, _ = precision_recall_curve(Ytest, Yhat)
try:
lr_f1 = f1_score(Ytest, (Yhat > threshold).astype(int))
except:
traceback.print_exc()
lr_f1 = 1
lr_f1, lr_auc = lr_f1, auc(lr_recall, lr_precision)
if graph:
pyplot.title("Receiver Operating Characteristic")
pyplot.plot(
lr_recall,
lr_precision,
"b",
label="F1 = %0.2f , AUC = %0.2f" % (lr_f1, lr_auc),
)
pyplot.legend(loc="lower right")
pyplot.xlim([0, 1])
pyplot.ylim([0, 1])
pyplot.ylabel("Precision")
pyplot.xlabel("Recall")
pyplot.show()
return lr_f1, lr_auc
| 28.203822
| 80
| 0.613144
| 622
| 4,428
| 4.155949
| 0.215434
| 0.073114
| 0.027079
| 0.021277
| 0.34352
| 0.301741
| 0.288201
| 0.255706
| 0.217408
| 0.17176
| 0
| 0.026535
| 0.268067
| 4,428
| 156
| 81
| 28.384615
| 0.771058
| 0.022358
| 0
| 0.275591
| 0
| 0
| 0.054579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07874
| false
| 0
| 0.055118
| 0.023622
| 0.212598
| 0.023622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1f203c60f7518be9918994e126f2868a0f76ed4
| 30,681
|
py
|
Python
|
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | 3
|
2019-09-23T10:04:05.000Z
|
2021-03-10T12:12:28.000Z
|
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | null | null | null |
main.py
|
RohiBaner/Beijing-Air-Quality-Prediction
|
4ec823ceacef1b61e1c1e5689a97a1335e4b5867
|
[
"MIT"
] | null | null | null |
''' --------------------------------------------IMPORTING NECESSARY LIBRARIES------------------------------------------- '''
import numpy as np
import pandas as pd
from math import radians, cos, sin, asin, sqrt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from itertools import cycle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn'
''' ---------------------------FUNCTIONS TO FIND NEAREST DISTANCE BETWEEN ALL NECESSARY STATIONS------------------------ '''
# Function to find nearest station between two points using Haversine Distance
def haversine_dist(lon1, lat1, lon2, lat2):
# Calculate the great circle distance between two points on the earth
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # Convert to radians
# Haversine distance formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 #Radius of earth in kilometers
return c * r
# Find nearest AQ to AQ station
def near_aq_to_aq(lat, long):
distances = station_aq.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return station_aq.loc[distance.idxmin(), 'station']
# Find nearest GW to GW station
def near_gw_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return gw_station.loc[distance.idxmin(), 'station_id']
# Find nearest OBW to OBW station
def near_obw_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return obw_station.loc[distance.idxmin(), 'station_id']
# Find nearest AQ to OBW station
def near_aq_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return obw_station.loc[distances.idxmin(), 'station_id']
# Find nearest AQ to GW station
def near_aq_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return gw_station.loc[distances.idxmin(), 'station_id']
# Function to calculate the model error via SMAPE
def smape(actual, predicted):
dividend= np.abs(np.array(actual) - np.array(predicted))
denominator = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(dividend, denominator, out=np.zeros_like(dividend), where=denominator!=0, casting='unsafe'))
''' ------------------------------------------TRAIN: AIR QUALITY PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Air Quality Dataset!')
# Read all the air quality datasets
aq_2017 = pd.read_csv("airQuality_201701-201801.csv")
aq_2018 = pd.read_csv("airQuality_201802-201803.csv")
aq_2018a = pd.read_csv("aiqQuality_201804.csv")
# Renaming the header of April AQ dataset to match the other AQ datasets
aq_2018a.rename(columns={'station_id': 'stationId', 'time': 'utc_time', 'PM25_Concentration':'PM2.5'\
,'PM10_Concentration':'PM10','NO2_Concentration':'NO2'\
,'CO_Concentration':'CO', 'O3_Concentration':'O3'\
,'SO2_Concentration':'SO2'}, inplace=True)
aq_2018a= aq_2018a.drop(columns=['id'], axis=1)
# Merge all AQ datasets together into a single dataframe
aq_train = aq_2017.append(aq_2018, ignore_index=True)
aq_train = aq_train.append(aq_2018a, ignore_index=True)
# Convert the entire 'utc_time' column into the same format
aq_train["utc_time"] = pd.to_datetime(aq_train["utc_time"])
# Delete unnecessary dataframes to save space
del(aq_2017)
del(aq_2018)
del(aq_2018a)
# Set the time column as the index of the dataframe
aq_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the AQ dataframe
min_date=aq_train.index.min()
max_date=aq_train.index.max()
# Drop any duplicates present in the AQ dataframe
aq_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the AQ station location file and find nearest station for each AQ station
# This dataset was created by us
station_aq = pd.read_csv("Beijing_AirQuality_Stations.csv")
station_aq["nearest_station"] = station_aq.apply(lambda row: near_aq_to_aq(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
time_hours = pd.DataFrame({"date": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all AQ stations and the above dataframe
aq_all_time = pd.merge(time_hours.assign(key=0), station_aq.assign(key=0), on='key').drop('key', axis=1)
# Join the AQ dataset with the dataframe containing all the timestamps for each AQ station
aq_train1 = pd.merge(aq_train, aq_all_time, how='right', left_on=['stationId','utc_time'], right_on = ['station','date'])
aq_train1 = aq_train1.drop('stationId', axis=1)
aq_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest AQ station (same time interval)
aq_train_copy = aq_train1.copy()
aq_train_copy = aq_train_copy.drop(['nearest_station','longitude', 'latitude', 'type'], axis=1)
aq_train_copy.rename(columns={'PM2.5': 'n_PM2.5','PM10': 'n_PM10', "NO2":"n_NO2","CO":"n_CO","O3":"n_O3",
"SO2":"n_SO2", "date":"n_date", "station":"n_station" }, inplace=True)
# Merge original AQ data and the copy AQ data to get all attributes of a particular AQ station and its nearest AQ station
aq_train2 = pd.merge(aq_train1, aq_train_copy, how='left', left_on=['nearest_station','date'], right_on = ['n_station','n_date'])
# Sort the final dataframe based on AQ station and then time
aq_train2 = aq_train2.sort_values(by=['n_station', 'date'], ascending=[True,True])
aq_train2 = aq_train2.reset_index(drop=True)
# Drop all unncessary attributes
aq_train2.drop(['n_station', 'longitude', 'latitude', 'n_date'], axis=1, inplace=True)
# Create two attributes - month and hour
aq_train2['month'] = pd.DatetimeIndex(aq_train2['date']).month
aq_train2['hour'] = pd.DatetimeIndex(aq_train2['date']).hour
# Fill in missing values of attributes with their corresponding values in the nearest AQ station (within same time)
aq_train2['PM10'].fillna(aq_train2['n_PM10'], inplace=True)
aq_train2['PM2.5'].fillna(aq_train2['n_PM2.5'], inplace=True)
aq_train2['NO2'].fillna(aq_train2['n_NO2'], inplace=True)
aq_train2['CO'].fillna(aq_train2['n_CO'], inplace=True)
aq_train2['O3'].fillna(aq_train2['n_O3'], inplace=True)
aq_train2['SO2'].fillna(aq_train2['n_SO2'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
aq_train2[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']] = aq_train2.groupby(["station","month","hour"])[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']].transform(lambda x: x.fillna(x.mean()))
# Create final AQ dataset after dropping all unnecessary attributes
aq_train_final = aq_train2.drop(['type','nearest_station','n_PM2.5','n_PM10','n_NO2','n_CO','n_O3','n_SO2'],axis=1)
# Delete unnecessary dataframes to save space
del(aq_train1)
del(aq_train2)
del(aq_train_copy)
del(aq_all_time)
print('Done!')
print('-'*50)
''' ------------------------------------------TRAIN: GRID DATASET PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Grid Weather Dataset!')
# Read all the grid weather train datasets
gw_2017 = pd.read_csv("gridWeather_201701-201803.csv")
gw_2018 = pd.read_csv("gridWeather_201804.csv")
# Renaming the headers of the GW data to match each other
gw_2017.rename(columns={'stationName': 'station_id', 'wind_speed/kph': 'wind_speed'}, inplace=True)
gw_2018.rename(columns={'station_id':'station_id', 'time':'utc_time'}, inplace=True)
# Merge all GW train datasets into a single dataframe
gw_train = gw_2017.append(gw_2018, ignore_index=True)
gw_train = gw_train.drop(columns=['id','weather'], axis=1)
# Delete unnecessary dataframes to save space
del(gw_2017)
del(gw_2018)
# Set the time column as the index of the dataframe
gw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the GW dataframe
min_date = gw_train.index.min()
max_date = gw_train.index.max()
# Drop any duplicates present in the GW dataframe
gw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the GW station location file and find nearest station for each GW station
gw_station = pd.read_csv("Beijing_grid_weather_station.csv", header=None, names=['station_id','latitude','longitude'])
gw_station["nearest_station"] = gw_station.apply(lambda row: near_gw_to_gw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
gw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all GW stations and the above dataframe
gw_all_time = pd.merge(gw_time_hours.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
gw_all_time['time'] = gw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the GW dataset with the dataframe containing all the timestamps for each GW station
gw_train1 = pd.merge(gw_train, gw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
gw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest GW station (same time interval)
gw_train_copy = gw_train1.copy()
gw_train_copy.drop(['nearest_station','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
gw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original GW data and the copy GW data to get all attributes of a particular GW station and its nearest GW station
gw_train2 = pd.merge(gw_train1, gw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on GW station and then time
gw_train2 = gw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True])
gw_train2 = gw_train2.reset_index(drop=True)
# Drop all unncessary attributes
gw_train2.drop(['n_station_id', 'n_time','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
# Create two attributes - month and hour
gw_train2['month'] = pd.DatetimeIndex(gw_train2['time']).month
gw_train2['hour'] = pd.DatetimeIndex(gw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest GW station (within same time)
gw_train2['humidity'].fillna(gw_train2['n_humidity'], inplace=True)
gw_train2['pressure'].fillna(gw_train2['n_pressure'], inplace=True)
gw_train2['temperature'].fillna(gw_train2['n_temperature'], inplace=True)
gw_train2['wind_speed'].fillna(gw_train2['n_wind_speed'], inplace=True)
gw_train2['wind_direction'].fillna(gw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
gw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = gw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final GW dataset after dropping all unnecessary attributes
gw_train_final = gw_train2.drop(['nearest_station','n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(gw_train1)
del(gw_train2)
del(gw_train_copy)
del(gw_all_time)
print('Done!')
print('-'*50)
''' -----------------------------------TRAIN: OBSERVED WEATHER DATASET PREPROCESSING------------------------------------ '''
print('Preprocessing and cleaning the train Observed Weather Dataset!')
# Read all the observed weather train datasets
obw_2017 = pd.read_csv("observedWeather_201701-201801.csv")
obw_2018 = pd.read_csv("observedWeather_201802-201803.csv")
obw_2018a = pd.read_csv("observedWeather_201804.csv")
obw_2018a.rename(columns={'time': 'utc_time'}, inplace=True)
# Read the time stamp in the April observed weather data in the same format as the other datasets
#obw_2018a['utc_time'] = pd.to_datetime(obw_2018a['utc_time'], format='%d-%m-%Y %H:%M:%S')
obw_2018a['utc_time'] = obw_2018a['utc_time'].astype(str)
# Merge all OBW train datasets into a single dataframe
obw_train = obw_2017.append(obw_2018, ignore_index=True)
obw_train = obw_train.append(obw_2018a, ignore_index=True)
obw_train.drop(['id','weather'],axis=1, inplace=True) # Drop unnecessary columns
# Delete unnecessary dataframes to save space
del(obw_2017)
del(obw_2018)
del(obw_2018a)
# Set the time column as the index of the dataframe
obw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the OBW dataframe
min_date = obw_train.index.min()
max_date = obw_train.index.max()
# Drop any duplicates present in the OBW dataframe
obw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the OBW station location file
obw_station = obw_train[["station_id","latitude","longitude"]]
obw_station = obw_station.drop_duplicates().dropna()
obw_station = obw_station.reset_index(drop=True)
# Find nearest station for each OBW station
obw_station["nearest_station"] = obw_station.apply(lambda row: near_obw_to_obw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
obw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all OBW stations and the above dataframe
obw_all_time = pd.merge(obw_time_hours.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
obw_all_time['time'] = obw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the OBW dataset with the dataframe containing all the timestamps for each OBW station
obw_train1 = pd.merge(obw_train, obw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
obw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest OBW station (same time interval)
obw_train_copy = obw_train1.copy()
obw_train_copy.drop(['nearest_station','longitude_x', 'latitude_x','longitude_y', 'latitude_y'], axis=1, inplace=True)
obw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original OBW data and the copy OBW data to get all attributes of a particular OBW station and its nearest OBW station
obw_train2 = pd.merge(obw_train1, obw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on OBW station and then time
obw_train2 = obw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True] )
obw_train2.drop(['n_station_id', 'n_time'], axis=1, inplace=True)
obw_train2 = obw_train2.reset_index(drop=True)
# Create two attributes - month and hour
obw_train2['month'] = pd.DatetimeIndex(obw_train2['time']).month
obw_train2['hour'] = pd.DatetimeIndex(obw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest OBW station (within same time)
obw_train2['humidity'].fillna(obw_train2['n_humidity'], inplace=True)
obw_train2['pressure'].fillna(obw_train2['n_pressure'], inplace=True)
obw_train2['temperature'].fillna(obw_train2['n_temperature'], inplace=True)
obw_train2['wind_speed'].fillna(obw_train2['n_wind_speed'], inplace=True)
obw_train2['wind_direction'].fillna(obw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
obw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = obw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final OBW dataset after dropping all unnecessary attributes
obw_train_final = obw_train2.drop(['longitude_x', 'latitude_x','longitude_y', 'latitude_y','nearest_station',\
'n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(obw_train1)
del(obw_train2)
del(obw_train_copy)
del(obw_all_time)
print('Done!')
print('-'*50)
''' --------------------------MERGING ALL TRAINING DATASETS AND GETTING READY FOR MODEL TRAINING------------------------- '''
aq_train_final['date'] = aq_train_final['date'].astype(str)
print('Getting the training model ready!')
# Convert wind speed in grid weather data from kmph to m/s (observed weather data is already in m/s)
gw_train_final['wind_speed'] = (gw_train_final['wind_speed']*5)/18
# Make all start and end times equal for the training datasets
gw_train_final = gw_train_final[gw_train_final['time']>='2017-01-30 16:00:00']
aq_train_final = aq_train_final[aq_train_final['date']>='2017-01-30 16:00:00']
# Replace noise values with previous hours value in both Observed and Grid datasets
obw_train_final.replace(999999,np.NaN,inplace=True)
obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
gw_train_final.replace(999999,np.NaN,inplace=True)
gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
# Replace wind direction with the noise value '999017' when wind speed is less than 0.5m/s
# This value will then be replaced with data from the nearest observed or grid station for the same timestamp
obw_train_final.loc[obw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
gw_train_final.loc[gw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
# Find nearest OBW and GW station for every AQ station for proper joining of attributes
obw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_obw"] = station_aq.apply(lambda row: near_aq_to_obw(row['latitude'], row['longitude']), axis=1)
gw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_gw"] = station_aq.apply(lambda row: near_aq_to_gw(row['latitude'], row['longitude']), axis=1)
# Merge the AQ training dataset with the nearest OBW and GW stations for every time stamp
aq_train1 = pd.merge(aq_train_final, station_aq, how='left', on='station')
aq_train1.drop(['type','nearest_station'], axis=1, inplace=True)
# Append all GW data attributes with the AQ training set based on nearest GW station and time stamp
aq_train2 = pd.merge(aq_train1, gw_train_final, how='left', left_on=['near_gw','date'], right_on=['station_id','time'])
# Remove unnecessary columns and rename columns to prepare for merging of OBW data
aq_train2.drop(['station_id','time','month_y','hour_y'],axis=1, inplace=True)
aq_train2 = aq_train2.rename(columns={'month_x': 'month_aq', 'hour_x': 'hour_aq', 'longitude':'longitude_aq',\
'latitude':'latitude_aq', 'humidity': 'humidity_gw','pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw', 'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Append all OBW data attributes with the AQ training set based on nearest OBW station and time stamp
TRAIN = pd.merge(aq_train2, obw_train_final, how='left', left_on=['near_obw','date'], right_on=['station_id','time'])
TRAIN.drop(['station_id','time','month','hour'],axis=1, inplace=True)
TRAIN = TRAIN.rename(columns={'humidity': 'humidity_obw','pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw', 'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Final clean of all 999017 noise from the OBW and GW for wind direction
TRAIN.loc[TRAIN.wind_dir_gw == 999017, 'wind_dir_gw'] = TRAIN['wind_dir_obw']
TRAIN.loc[TRAIN.wind_dir_obw == 999017, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
# Some observed data points are very outliers (probably wrongly noted by humans)
TRAIN.loc[TRAIN.humidity_obw > 100, 'humidity_obw'] = TRAIN['humidity_gw']
TRAIN.loc[TRAIN.pressure_obw > 1040, 'pressure_obw'] = TRAIN['pressure_gw']
TRAIN.loc[TRAIN.temperature_obw > 50, 'temperature_obw'] = TRAIN['temperature_gw']
TRAIN.loc[TRAIN.wind_dir_obw > 360, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
TRAIN.loc[TRAIN.wind_speed_obw > 20, 'wind_speed_obw'] = TRAIN['wind_speed_gw']
# Sort the final train set based on station and then timestamp
TRAIN = TRAIN.sort_values(by=['station', 'date'], ascending=[True,True])
print('Ready to be trained by the model!')
print('-'*50)
''' ----------------------TEST DATA: CLEANING, PREPROCESSING AND GETTING READY FOR MODEL-------------------------------- '''
print('Getting the testing data ready for the model!')
# Read the AQ test dataset for test data - This dataset was found from the Beijing meteorological datasets
# This dataset helps in getting the values for the NO2, SO2 and CO attributes for the test data timestamps
test_aq = pd.read_csv('MAY_AQ.csv')
test_aq['Time'] = pd.to_datetime(test_aq['Time'], format='%d-%m-%Y %H:%M')
test_aq['Time'] = test_aq['Time'].astype(str)
# Merge the dataset with nearest GW and OBW stations with the AQ test dataset
test1 = pd.merge(test_aq, station_aq, how='left', left_on='station_id', right_on='station').drop(['station','longitude','latitude','type','nearest_station','AQI'],axis=1)
# Find time stamp range for test data: from 1st May 00:00 to 2nd May 23:00
test1.set_index("Time", inplace = True)
min_date_test = test1.index.min()
max_date_test = test1.index.max()
test1.reset_index(inplace=True)
# Grid Test Data Preprocessing
test_gw = pd.read_csv('gridWeather_20180501-20180502.csv') # Read GW test data
test_gw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all GW stations
test_gw1 = pd.DataFrame({"time": pd.date_range(min_date_test, max_date_test, freq='H')})
test_gw2 = pd.merge(test_gw1.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
test_gw2['time'] = test_gw2['time'].astype(str) # Convert time in correct format
gw_test_final = pd.merge(test_gw2, test_gw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Observed Test Data Preprocessing
test_obw = pd.read_csv('observedWeather_20180501-20180502.csv') # Read OBW test data
test_obw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all OBW stations
test_obw1 = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
test_obw2 = pd.merge(test_obw1.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
test_obw2['time'] = test_obw2['time'].astype(str) # Convert time in correct format
obw_test_final = pd.merge(test_obw2, test_obw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Join AQ Test dataframe with test GW dataframe
test_aq1 = pd.merge(test1, gw_test_final, how='left', left_on=['near_gw','Time'], right_on=['station_id','time'])
test_aq1.drop(['station_id_y','latitude','longitude'],axis=1, inplace=True)
# Rename certain columns to prepare for joining the OBW test dataframe
test_aq1 = test_aq1.rename(columns={'station_id_x':'station_id_aq',\
'humidity': 'humidity_gw',\
'pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw',\
'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Join the updated AQ Test dataframe with test OBW dataframe
TEST = pd.merge(test_aq1, obw_test_final, how='left', left_on=['near_obw','time'], right_on=['station_id','time'])
TEST.drop(['station_id','latitude','longitude','time'],axis=1, inplace=True)
# Rename certain columns
TEST = TEST.rename(columns={'humidity': 'humidity_obw',\
'pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw',\
'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Create attributes for month and hour - to be taken as input parameters
TEST['month'] = pd.DatetimeIndex(TEST['Time']).month
TEST['hour'] = pd.DatetimeIndex(TEST['Time']).hour
# Remove missing values based on nearest GW data (as very few values are missing in OBW data)
TEST = TEST.sort_values(by=['station_id_aq', 'Time'], ascending=[True,True])
TEST['humidity_obw'] = TEST['humidity_obw'].fillna(TEST['humidity_gw'])
TEST['temperature_obw'] = TEST['temperature_obw'].fillna(TEST['temperature_gw'])
TEST['pressure_obw'] = TEST['pressure_obw'].fillna(TEST['pressure_gw'])
TEST['wind_speed_obw'] = TEST['wind_speed_obw'].fillna(TEST['wind_speed_gw'])
TEST['wind_dir_obw'] = TEST['wind_dir_obw'].fillna(TEST['wind_dir_gw'])
# Take care of noise 999017 when wind speed is less than 0.5m/s
TEST.loc[TEST.wind_dir_gw == 999017, 'wind_dir_gw'] = TEST['wind_dir_obw']
TEST.loc[TEST.wind_dir_obw == 999017, 'wind_dir_obw'] = TEST['wind_dir_gw']
print('Ready to be tested by the model!')
''' ---------------------------------TRAINING THE MODEL AND PREDICTING REQUIRED OUTPUT----------------------------------- '''
# Train the model with only April, May and June's data
TRAIN = TRAIN.loc[TRAIN['month_aq'].isin([4,5,6])]
# Extract output columns for training the model
Y = TRAIN[['PM2.5','PM10','O3']].values
# Input parameters for the model
X = TRAIN.drop(['PM2.5','PM10','O3','latitude_aq','longitude_aq'], axis=1)
# Create new features for the model
X['AQ'] = (X['SO2']*X['NO2']*X['CO'])
X['wind'] = X['wind_dir_gw']/X['wind_speed_gw']
# Final input parameters after feature engineering
X_train = X[['station','month_aq','hour_aq','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One Hot encode the station column and normalize the entire input data
le = LabelEncoder()
ohe = OneHotEncoder(categorical_features=[0])
scaler = MinMaxScaler()
X_train[:,0] = le.fit_transform(X_train[:,0])
X_train = ohe.fit_transform(X_train).toarray()
X_train_sc = scaler.fit_transform(X_train)
# Use Random Forest Regressor to predict the values
model_rf = RandomForestRegressor(random_state=42)
# Use K Fold Cross Validation to check the efficiency of the model
print('-------Printing the Cross Validation SMAPE errors-------')
kf = KFold(n_splits=10, shuffle=True, random_state=42)
for train_index, test_index in kf.split(X_train_sc):
x_train, x_val = X_train_sc[train_index], X_train_sc[test_index]
y_train, y_val = Y[train_index], Y[test_index]
model_rf.fit(x_train, y_train)
pred_val = model_rf.predict(x_val)
print(smape(y_val,pred_val))
# Get the Test data ready for the model by following the above steps
TEST['AQ'] = (TEST['CO']*TEST['SO2']*TEST['NO2'])
TEST['wind'] = TEST['wind_dir_gw']/TEST['wind_speed_gw']
# Final test data input features
X_test = TEST[['station_id_aq','month','hour','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One hot encode and normalize similair to train data
X_test[:,0] = le.transform(X_test[:,0])
X_test = ohe.transform(X_test).toarray()
X_test_sc = scaler.transform(X_test)
# Predict the results after training the model on the whole final train dataset
model_rf.fit(X_train_sc,Y)
pred = model_rf.predict(X_test_sc)
''' --------------------------EXPORTING THE PREDICTED RESULTS INTO THE SPECIFIED FORMAT---------------------------------- '''
index_test = TEST[['station_id_aq']]
index = list(range(0,48)) # Create a list with all the values in the range (each for one hour over a period of two days)
# Turn the above numbers into a continuous cycle
index1 = cycle(index)
index_test['index'] = [next(index1) for i in range(len(index_test))]
# Create a column with all 35 AQ station names and all time indexes
index_test['test_id'] = index_test['station_id_aq']+'#'+index_test['index'].astype(str)
# Extract the required column and join it with the predicted output
# Both test and train data are sorted by station name and time - hence predicted output will be in arranged order
index_test.drop(['index','station_id_aq'],axis=1, inplace=True)
index_test1 = index_test.values
output = np.concatenate((index_test1, pred), axis=1)
np.savetxt('submission.csv', output, delimiter=',', header='test_id,PM2.5,PM10,O3', fmt='%s,%f,%f,%f', comments='')
print('The code is complete - please find your results in the "submission.csv" file!')
print("--- %s seconds ---" % (time.time() - start_time))
'''-------------------------------------------------------END-------------------------------------------------------------'''
| 57.671053
| 249
| 0.702031
| 4,597
| 30,681
| 4.484011
| 0.09876
| 0.027749
| 0.009315
| 0.012419
| 0.514481
| 0.433901
| 0.392519
| 0.358463
| 0.308398
| 0.283462
| 0
| 0.026379
| 0.14126
| 30,681
| 531
| 250
| 57.779661
| 0.755987
| 0.268831
| 0
| 0.070513
| 0
| 0
| 0.267038
| 0.018116
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022436
| false
| 0
| 0.032051
| 0
| 0.076923
| 0.057692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1f22c9adbe507763be9a3e8cffbcec89c6b45a4
| 234
|
py
|
Python
|
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4
|
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
examples/SortTimeDemo.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5
|
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
import random
import time
n = eval(input("Enter the number of elements to sort: "))
lst = list(range(n))
random.shuffle(lst)
startTime = time.time()
lst.sort()
print("Sort time in Python is", int(time.time() - startTime), "seconds")
| 23.4
| 72
| 0.705128
| 37
| 234
| 4.459459
| 0.648649
| 0.09697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136752
| 234
| 10
| 72
| 23.4
| 0.816832
| 0
| 0
| 0
| 0
| 0
| 0.285106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1f5f177dec08c59abe32983e95271dfac01dbdf
| 1,239
|
py
|
Python
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 10
|
2019-02-07T20:07:10.000Z
|
2020-12-30T20:29:32.000Z
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 1
|
2021-12-05T15:00:13.000Z
|
2021-12-05T15:00:13.000Z
|
tests/conftest.py
|
andrewsayre/pysmartapp
|
5c3be867584d7e82d00b5998295b20bd12eccf94
|
[
"MIT"
] | 2
|
2020-10-17T20:20:45.000Z
|
2021-09-28T12:58:50.000Z
|
"""Define common test configuraiton."""
import pytest
from pysmartapp.dispatch import Dispatcher
from pysmartapp.smartapp import SmartApp, SmartAppManager
@pytest.fixture
def smartapp(event_loop) -> SmartApp:
"""Fixture for testing against the SmartApp class."""
app = SmartApp(dispatcher=Dispatcher(loop=event_loop))
app.name = 'SmartApp'
app.description = 'SmartApp Description'
app.permissions.append('l:devices')
app.config_app_id = 'myapp'
return app
@pytest.fixture
def manager(event_loop) -> SmartAppManager:
"""Fixture for testing against the SmartAppManager class."""
return SmartAppManager('/path/to/app',
dispatcher=Dispatcher(loop=event_loop))
@pytest.fixture
def handler():
"""Fixture handler to mock in the dispatcher."""
def target(*args, **kwargs):
target.fired = True
target.args = args
target.kwargs = kwargs
target.fired = False
return target
@pytest.fixture
def async_handler():
"""Fixture async handler to mock in the dispatcher."""
async def target(*args, **kwargs):
target.fired = True
target.args = args
target.kwargs = kwargs
target.fired = False
return target
| 26.361702
| 66
| 0.67958
| 142
| 1,239
| 5.880282
| 0.316901
| 0.062275
| 0.076647
| 0.057485
| 0.435928
| 0.292216
| 0.22515
| 0.22515
| 0.22515
| 0.22515
| 0
| 0
| 0.215496
| 1,239
| 46
| 67
| 26.934783
| 0.859054
| 0.184019
| 0
| 0.451613
| 0
| 0
| 0.054822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.096774
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1f8c5ac672b61358853182ee48a06e86cda8b9c
| 294
|
py
|
Python
|
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
to_do_list.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
todo_list = ["" for i in range(11)]
command = input()
while command != 'End':
task = command.split('-')
importance = int(task[0])
thing_to_do = task[1]
todo_list[importance] = thing_to_do
command = input()
final_list = [x for x in todo_list if x != ""]
print(final_list)
| 21
| 46
| 0.629252
| 45
| 294
| 3.911111
| 0.533333
| 0.136364
| 0.102273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.217687
| 294
| 14
| 47
| 21
| 0.747826
| 0
| 0
| 0.2
| 0
| 0
| 0.013559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1fa447d2310139f7a8d64aba2e5e1395276502b
| 6,035
|
py
|
Python
|
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
run.py
|
Tracymbone/password_locker
|
346a3c770174d20fe24720fd4875f5f4e222d582
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.8
from socket import create_server
from users import Users
from credentials import Credentials
def create_credentials(first_name, last_name, user_name, credential):
users = Users(first_name, last_name, user_name, credential)
return users
def save_user(users):
users.save_user()
def delete_users(users):
users.delete_users()
def find_users(user_name):
return Users.find_by_user_name(user_name)
def isexist_users(user_name):
return Users.users_exists(user_name)
def display_users():
return Users.display_users()
def create_page(page, credentials):
credentials = credentials(page, credentials)
return credentials
def save_page(credentials):
credentials.save_page()
def find_page(pager):
return Credentials.find_by_page(pager)
def isexist_page(pager):
return Credentials.page_exists(pager)
def delete_page(credential):
Credentials.delete_page()
def display_pages():
return Credentials.display_page()
def main():
print('WELCOME TO PASSWORD_LOCKER')
print('Use the following information to pick their corresponding values')
while True:
print(" 1) SIGN IN \n 2) REGESTER \n 3) ABOUT PASSWORD_LOCKER \n 4) DISPLAY USERS \n 5) SIGN OUT")
choice = int(input())
if choice == 1:
print('Enter username')
username = input()
print('Enter credential')
Credentials = input()
user = find_users(username)
if user.user_name == user_name and user.credentials == Credentials:
print('logged in ')
while True:
print(
f'Welcome {user_name}, Use the following numbers to select their corresponding values')
print(
' 1) Save new credential \n 2) Delete credential \n 3) Display saved credentials \n 4) Log out ')
log_choice = int(input())
if log_choice == 1:
print('New page')
print('*'*100)
print('Page name')
page = input()
print('credentials')
Credentials = input()
# created and saved page
save_page(create_page(page, Credentials))
elif log_choice == 2:
print("Enter the name of the page you want to delete")
page = input()
if isexist_page(page):
remove_page = (page)
delete_page(remove_page)
else:
print(f'I cant find {page}')
elif log_choice == 3:
if display_pages():
for pag in display_pages():
print(
f'{pag.page}:{pag.credential}'
)
else:
print('NO CREDENTIAL SAVED YET')
print('\n')
elif log_choice == 4:
print('adios')
break
else:
print('wrong credentials')
if choice == 2:
print('NEW USERS')
print('*'*100)
print('FIRSTNAME')
first_name = input()
print('LASTNAME')
last_name = input()
print('USERNAME')
user_name = input()
print('CREDENTIALS')
Credentials = input()
save_user((
first_name, last_name, user_name, Credentials))
# save and create a new user
print('USER FORMED')
while True:
print(
f'Welcome {user_name}, Use the following numbers to select their corresponding values')
print(
' 1) Save new credential \n 2) Delete credential \n 3) Display saved credential \n 4) Log out ')
log_choice = int(input())
if log_choice == 1:
print('New page')
print('*'*100)
print('Page name')
page = input()
print('credential')
Credentials = input()
# created and saved page
save_page(create_page(page, Credentials))
elif log_choice == 2:
print("Enter the name of the page you want to delete")
page = input()
if isexist_page(page):
remove_page = (page)
delete_page(remove_page)
else:
print(f'I cant find {page}')
elif log_choice == 3:
if display_pages():
for pag in display_pages():
print(
f'{pag.page}:{pag.credential}'
)
else:
print('NO CREDENTIAL SAVED YET')
elif log_choice == 4:
break
elif choice == 3:
print('ABOUT PASSWORD_LOCKER')
print(
'''
This is a terminal based project where users can input their credentials according to the different accounts that they have.
''')
elif choice == 4:
if display_users():
for account in display_users():
print(
f'{Users.user_name}'
)
else:
print('NO USERS')
elif choice == 5:
print('Bye!welcome back again')
break
if __name__ == '__main__':
main()
| 28.875598
| 136
| 0.471914
| 581
| 6,035
| 4.75043
| 0.185886
| 0.04058
| 0.028261
| 0.018478
| 0.45471
| 0.419928
| 0.41087
| 0.385507
| 0.385507
| 0.385507
| 0
| 0.011098
| 0.447556
| 6,035
| 209
| 137
| 28.875598
| 0.816737
| 0.015907
| 0
| 0.471831
| 0
| 0.021127
| 0.17738
| 0.009382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091549
| false
| 0.021127
| 0.021127
| 0.042254
| 0.169014
| 0.288732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1faa38cc22b54eb622228d21323a509bcdbceb8
| 2,346
|
py
|
Python
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 1
|
2019-06-21T17:13:35.000Z
|
2019-06-21T17:13:35.000Z
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 18
|
2019-06-25T00:48:11.000Z
|
2019-07-11T17:52:24.000Z
|
menu_info/menu_details.py
|
averytorres/WazHack-Clone
|
e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda
|
[
"Unlicense"
] | 1
|
2019-06-21T17:08:23.000Z
|
2019-06-21T17:08:23.000Z
|
from game_states import GameStates
from action_consumer.available_actions_enum import Action
def get_menu_title(menu_name):
menu_titles = {}
menu_titles.update({GameStates.SHOW_INVENTORY:'Press the key next to an item to use it, or Esc to cancel.\n'})
menu_titles.update({GameStates.DROP_INVENTORY: 'Press the key next to an item to drop it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_WEAPON_INVENTORY: 'Press the key next to an item to equip/unequip it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_ARMOR_INVENTORY: 'Press the key next to an item to equip/unequip it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_SCROLL_INVENTORY: 'Press the key next to an item to read it, or Esc to cancel.\n'})
menu_titles.update({GameStates.SHOW_QUAFF_INVENTORY: 'Press the key next to an item to quaff it, or Esc to cancel.\n'})
menu_titles.update({GameStates.LEVEL_UP: 'Level up! Choose a stat to raise:'})
menu_titles.update({GameStates.CHARACTER_SCREEN: 'Character Information'})
menu_titles.update({GameStates.PLAYERS_TURN: ''})
menu_titles.update({GameStates.PLAYER_DEAD: ''})
menu_titles.update({None: ''})
return menu_titles[menu_name]
def get_menu_width(menu_name):
menu_width = {}
menu_width.update({GameStates.SHOW_INVENTORY: 50})
menu_width.update({GameStates.DROP_INVENTORY: 50})
menu_width.update({GameStates.SHOW_WEAPON_INVENTORY: 50})
menu_width.update({GameStates.SHOW_ARMOR_INVENTORY: 50})
menu_width.update({GameStates.SHOW_SCROLL_INVENTORY: 50})
menu_width.update({GameStates.SHOW_QUAFF_INVENTORY: 50})
menu_width.update({GameStates.LEVEL_UP: 40})
menu_width.update({GameStates.CHARACTER_SCREEN: 10})
menu_width.update({GameStates.PLAYERS_TURN: 24})
menu_width.update({GameStates.PLAYER_DEAD: 50})
menu_width.update({None: 24})
return menu_width[menu_name]
def get_menu_height(screen_height):
return int(screen_height * 1.8)
def get_main_menu_options():
return ['Play a new game', 'Continue last game', 'Quit']
def get_main_menu_key(index):
index = int(index)
if index == 0:
return {Action.NEW_GAME: True}
elif index == 1:
return {Action.LOAD_GAME: True}
elif index == 2:
return {Action.EXIT: True}
else:
return {}
| 39.1
| 132
| 0.728048
| 347
| 2,346
| 4.697406
| 0.230548
| 0.196319
| 0.107975
| 0.159509
| 0.531902
| 0.446626
| 0.402454
| 0.304294
| 0.304294
| 0.220859
| 0
| 0.013699
| 0.159847
| 2,346
| 59
| 133
| 39.762712
| 0.813293
| 0
| 0
| 0
| 0
| 0
| 0.202559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.045455
| 0.045455
| 0.340909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1fc50952b7cf799deab08fe85f0849c2cbaf2f0
| 1,154
|
py
|
Python
|
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/fileserver/test_hgfs.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
# Import Salt libs
import salt.fileserver.hgfs as hgfs
class HgfsFileTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
hgfs: {}
}
def test_env_is_exposed(self):
'''
test _env_is_exposed method when
base is in whitelist
'''
with patch.dict(hgfs.__opts__,
{'hgfs_saltenv_whitelist': 'base',
'hgfs_saltenv_blacklist': ''}):
assert hgfs._env_is_exposed('base')
def test_env_is_exposed_blacklist(self):
'''
test _env_is_exposed method when
base is in blacklist
'''
with patch.dict(hgfs.__opts__,
{'hgfs_saltenv_whitelist': '',
'hgfs_saltenv_blacklist': 'base'}):
assert not hgfs._env_is_exposed('base')
| 28.85
| 72
| 0.618718
| 127
| 1,154
| 5.275591
| 0.393701
| 0.044776
| 0.107463
| 0.095522
| 0.352239
| 0.235821
| 0.235821
| 0.235821
| 0.113433
| 0.113433
| 0
| 0.001229
| 0.294627
| 1,154
| 39
| 73
| 29.589744
| 0.821867
| 0.165511
| 0
| 0.1
| 0
| 0
| 0.115942
| 0.098105
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.15
| false
| 0
| 0.25
| 0.05
| 0.5
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1fd1af131dc102c96ef990fe42c7c22c4e492de
| 1,273
|
py
|
Python
|
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 5
|
2021-09-07T13:33:45.000Z
|
2022-02-12T18:56:45.000Z
|
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | null | null | null |
networks/model_factory.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 4
|
2021-09-25T06:56:38.000Z
|
2022-03-24T18:06:08.000Z
|
import torch.nn as nn
from networks.resnet import resnet18
from networks.shufflenet import shufflenet_v2_x1_0
from networks.cifar_net import Net
from networks.mlp import MLP
class ModelFactory():
def __init__(self):
pass
@staticmethod
def get_model(target_model, num_classes, img_size, pretrained=False):
if target_model == 'mlp':
return MLP(feature_size=img_size, hidden_dim=40, num_class=num_classes)
elif target_model == 'resnet':
if pretrained:
model = resnet18(pretrained=True)
model.fc = nn.Linear(in_features=512, out_features=num_classes, bias=True)
else:
model = resnet18(pretrained=False, num_classes=num_classes)
return model
elif target_model == 'cifar_net':
return Net(num_classes=num_classes)
elif target_model == 'shufflenet':
if pretrained:
model = shufflenet_v2_x1_0(pretrained=True)
model.fc = nn.Linear(in_features=1024, out_features=num_classes, bias=True)
else:
model = shufflenet_v2_x1_0(pretrained=False, num_classes=num_classes)
return model
else:
raise NotImplementedError
| 31.04878
| 91
| 0.639434
| 152
| 1,273
| 5.092105
| 0.328947
| 0.129199
| 0.054264
| 0.05814
| 0.427649
| 0.363049
| 0.317829
| 0.317829
| 0
| 0
| 0
| 0.026432
| 0.286724
| 1,273
| 40
| 92
| 31.825
| 0.825991
| 0
| 0
| 0.233333
| 0
| 0
| 0.022013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.033333
| 0.166667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b1ff61ec8eb947ca5da56f846d344d35e22df2db
| 5,536
|
py
|
Python
|
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | 1
|
2022-03-04T09:25:11.000Z
|
2022-03-04T09:25:11.000Z
|
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | null | null | null |
main.py
|
MarySueTeam/Video_Maker
|
a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671
|
[
"MIT"
] | 1
|
2022-01-25T16:19:25.000Z
|
2022-01-25T16:19:25.000Z
|
from manim import *
from TTS.TTS import get_mp3_file
from utils import cut, get_duration, deal_text
import time
class Video(Scene):
def construct(self):
# INFO 视频开头
LOGO = ImageMobject("./media/images/logo.png").scale(0.3).to_edge(UP, buff=2)
Slogan_text = "为你收集日落时的云朵,为你收藏下雨后的天空"
get_mp3_file(text=f"{Slogan_text}", output_path="./media/sounds/video_start", rate="-10%")
Slogan = Text(Slogan_text, font="Muyao-Softbrush", weight=MEDIUM, color="#FCA113").scale(0.7).next_to(LOGO, DOWN, buff=1)
self.play(FadeIn(LOGO, run_time=0.1))
self.wait(0.5)
self.play(FadeIn(Slogan), run_time=1)
self.add_sound("./media/sounds/video_start.mp3")
self.wait(5)
self.play(FadeOut(Slogan, LOGO))
# INFO 主视频内容
LOGO = ImageMobject("./media/images/logo.png").scale(0.1).to_edge(UL)
username = Text("@仙女玛丽苏吖",font="Muyao-Softbrush").scale(0.5).next_to(LOGO, RIGHT)
self.add(LOGO, username)
title = "在本子上写上他的名字"
title = "《" + title + "》"
title = Text(title, font="Muyao-Softbrush", color=ORANGE).scale(0.5).to_edge(UP, buff=0.75)
self.add(title)
with open("./media/words/words.txt", "rt", encoding="utf-8") as f:
content = f.readline()
while content:
audio_path = "./media/sounds/video_content_"+str(round(time.time()*1000))
# content = deal_text(content)
get_mp3_file(text=content,output_path=audio_path,rate="-10%")
audio_path = audio_path + ".mp3"
audio_time = get_duration(audio_path)
content = MarkupText(content, font="Muyao-Softbrush", font_size=60, justify=True).scale(0.5)
run_time = len(content)//50
self.play(Write(content), run_time=run_time)
self.add_sound(audio_path, time_offset = 1)
self.wait(audio_time)
self.play(FadeOut(content))
content = f.readline()
self.play(FadeOut(title,username,LOGO))
# INFO 视频结尾
LOGO = ImageMobject("./media/images/logo.png").scale(0.2).to_edge(UP, buff=2)
messages_text = "你可以在下面的平台找到我,这一期就先到这里,我们下期再见。"
messages = Text("-你可以在下面的平台找到我-", font="Muyao-Softbrush").scale(0.4).next_to(LOGO, DOWN)
# INFO 获取音频文件
get_mp3_file(text=f"{messages_text}",output_path="./media/sounds/video_end",rate="-10%")
gonzhonghao = ImageMobject("./media/images/icon/weixin.png").scale(0.2)
username1 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(gonzhonghao)
zhihu = ImageMobject("./media/images/icon/zhihu.png").next_to(gonzhonghao, RIGHT, buff=1).scale(0.2)
username2 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(zhihu)
xiaohongshu = ImageMobject("./media/images/icon/xiaohongshu.png").next_to(zhihu, RIGHT, buff=1).scale(0.2)
username3 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(xiaohongshu)
bilibili = ImageMobject("./media/images/icon/bilibili.png").next_to(gonzhonghao).scale(0.2)
username4 = Text("@仙女玛丽苏吖", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(bilibili)
douyin = ImageMobject("./media/images/icon/douyin.png").next_to(bilibili, RIGHT, buff=1).scale(0.2)
username5 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(douyin)
toutiao = ImageMobject("./media/images/icon/toutiao1.png").next_to(douyin, RIGHT, buff=1).scale(0.2)
username6 =Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(toutiao)
jianshu = ImageMobject("./media/images/icon/jianshu.png").next_to(bilibili).scale(0.2)
username7 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(jianshu)
kuaishou = ImageMobject("./media/images/icon/kuaishou.png").next_to(jianshu, RIGHT, buff=1).scale(0.2)
username8 = Text("@仙女玛丽苏吖", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(kuaishou)
xiguashipin = ImageMobject("./media/images/icon/xiguashipin.png").next_to(kuaishou, RIGHT, buff=1).scale(0.2)
username9 = Text("@仙女玛丽苏", font="Smartisan Compact CNS", weight=MEDIUM).scale(0.25).next_to(xiguashipin)
Recommend_group1 = Group(
gonzhonghao,
username1,
zhihu,
username2,
xiaohongshu,
username3,
).next_to(LOGO, DOWN, buff=1)
Recommend_group2 = Group(
bilibili,
username4,
douyin,
username5,
toutiao,
username6,
).next_to(Recommend_group1, DOWN, buff=0.2)
Recommend_group3 = Group(
jianshu,
username7,
kuaishou,
username8,
xiguashipin,
username9,
).next_to(Recommend_group2, DOWN, buff=0.2)
Recommend_group = Group(
Recommend_group1,
Recommend_group2,
Recommend_group3
)
self.play(FadeIn(LOGO))
duration = get_duration("./media/sounds/video_end.mp3")
self.add_sound("./media/sounds/video_end.mp3", time_offset=0.5)
self.play(Write(messages), run_rime=0.5)
self.play(FadeIn(Recommend_group))
self.wait(duration)
self.play(FadeOut(Recommend_group,messages,LOGO))
| 48.99115
| 129
| 0.62211
| 700
| 5,536
| 4.797143
| 0.195714
| 0.046456
| 0.082192
| 0.072365
| 0.328172
| 0.268017
| 0.191781
| 0.191781
| 0.156045
| 0.156045
| 0
| 0.033247
| 0.228504
| 5,536
| 113
| 130
| 48.99115
| 0.752985
| 0.012825
| 0
| 0.020833
| 0
| 0
| 0.183916
| 0.108628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.041667
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5901159e3f1532199cb8c881801333e8fca64f93
| 1,518
|
py
|
Python
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 46
|
2016-04-27T12:51:17.000Z
|
2021-11-24T23:43:12.000Z
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 111
|
2016-05-25T15:44:31.000Z
|
2022-02-05T20:45:37.000Z
|
sevenbridges/models/compound/tasks/batch_by.py
|
sbg/sevenbridges-python
|
b3e14016066563470d978c9b13e1a236a41abea8
|
[
"Apache-2.0"
] | 37
|
2016-04-27T12:10:43.000Z
|
2021-03-18T11:22:28.000Z
|
from sevenbridges.meta.resource import Resource
# noinspection PyUnresolvedReferences,PyProtectedMember
class BatchBy(Resource, dict):
"""
Task batch by resource.
"""
_name = 'batch_by'
# noinspection PyMissingConstructor
def __init__(self, **kwargs):
self.parent = kwargs.pop('_parent')
self.api = kwargs.pop('api')
for k, v in kwargs.items():
super().__setitem__(k, v)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.parent._data[self._name][key] = value
if self._name not in self.parent._dirty:
self.parent._dirty.update({self._name: {}})
self.parent._dirty[self._name][key] = value
def __getitem__(self, item):
try:
return self.parent._data[self._name][item]
except KeyError:
return None
def __repr__(self):
values = {}
for k, _ in self.items():
values[k] = self[k]
return str(values)
__str__ = __repr__
def update(self, e=None, **f):
other = {}
if e:
other.update(e, **f)
else:
other.update(**f)
for k, v in other.items():
if other[k] != self[k]:
self[k] = other[k]
def equals(self, other):
if not type(other) == type(self):
return False
return (
self is other or
self._parent._data[self._name] == other._parent._data[self._name]
)
| 27.107143
| 77
| 0.554018
| 176
| 1,518
| 4.482955
| 0.318182
| 0.08872
| 0.070976
| 0.091255
| 0.08365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320817
| 1,518
| 55
| 78
| 27.6
| 0.765276
| 0.073781
| 0
| 0
| 0
| 0
| 0.012959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.02439
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
590721cca2145e8661012d52208da3bcc5dbe108
| 230
|
py
|
Python
|
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | null | null | null |
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | null | null | null |
Semester-1/Lab8/src/lab_A.py
|
Vipul-Cariappa/Collage-CS-Lab
|
0a0193df9575a4e69b60759d974423202ddf544b
|
[
"MIT"
] | 2
|
2022-03-04T14:06:15.000Z
|
2022-03-16T17:32:10.000Z
|
# program to display first n lines in a text file
n = int(input("Enter number of lines: "))
with open("note.txt") as file:
while n > 0:
print(
file.readline(),
end=""
)
n -= 1
| 19.166667
| 49
| 0.5
| 32
| 230
| 3.59375
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013986
| 0.378261
| 230
| 11
| 50
| 20.909091
| 0.79021
| 0.204348
| 0
| 0
| 0
| 0
| 0.171271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59079f538bc9e256df53c65451be92c382f11c5c
| 23,420
|
py
|
Python
|
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | 1
|
2018-02-06T17:41:12.000Z
|
2018-02-06T17:41:12.000Z
|
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | null | null | null |
eplusplus/view/mainWindow.py
|
labeee/EPlusPlus
|
da6cbd60575146a8f165fb72e165919cd83ddc24
|
[
"MIT"
] | 1
|
2021-06-29T02:49:59.000Z
|
2021-06-29T02:49:59.000Z
|
import os
import sys
import ctypes
import webbrowser
from .lineEdit import LineEdit
from .dialogWithCheckBox import DialogWithCheckBox
from eplusplus.controller import ActorUser
from eplusplus.exception import ColumnException, NoIdfException, InstallException, NoCsvException
from PyQt5.QtCore import QSize, Qt, QRect
from PyQt5.QtGui import QPixmap, QIcon, QIntValidator
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QRadioButton
from PyQt5.QtWidgets import QGridLayout, QFileDialog, QMessageBox, QApplication
from PyQt5.QtWidgets import QButtonGroup, QLineEdit, QAction, QMenuBar
##
## @brief This class implements the main window of the eplusplus
## application. The UI use the PyQt to create and configure
## all the components. Also, besides the components like
## labels, radio buttons, buttons and line text, the main
## window has a actorUser, that represents the controller, to call
## all the methods implemented in the logic of the program.
##
class MainWindow(QWidget):
def __init__(self, args):
super(MainWindow, self).__init__()
msgBox = DialogWithCheckBox(self)
self.firstTime = True
self.pathToIcon = "./media/icon.png"
self.actorUser = ActorUser()
if not self.actorUser.existsFileConfirmCheckBox():
checkedBox = msgBox.exec_()[1]
if checkedBox:
self.actorUser.createFileConfirmCheckBox()
self.logo = QLabel()
self.casesButton = QPushButton("Gerar casos")
self.simulationButton = QPushButton("Executar simulação")
self.confirmButtonCases = QPushButton("Confirmar")
self.cancelButton = QPushButton("Cancelar")
self.chooseIdfButton = QPushButton("Escolher IDF...")
self.chooseCSVButton = QPushButton("Escolher CSV...")
self.chooseFolderButton = QPushButton("Escolher pasta...")
self.chooseEpwButton = QPushButton("Escolher EPW...")
self.confirmButtonSimulation = QPushButton("Confirmar")
self.setWindowIcon(QIcon(self.pathToIcon))
self.lineIdf = LineEdit(self)
self.lineCsv = LineEdit(self)
self.lineFolder = LineEdit(self)
self.lineEpw = LineEdit(self)
self.lineCases = QLineEdit()
self.validatorCases = QIntValidator(1, 9999999, self)
self.lineCases.setValidator(self.validatorCases)
self.group = QButtonGroup()
self.lhsRB = QRadioButton("Latin Hypercube Sampling")
self.randomRB = QRadioButton("Random")
self.group.addButton(self.randomRB)
self.group.addButton(self.lhsRB)
self.gridLayout = QGridLayout()
self.menuBar = QMenuBar()
self.help = self.menuBar.addMenu("Ajuda")
self.helpAction = QAction("Documentação", self)
self.help.addAction(self.helpAction)
self.helpAction.triggered.connect(self.documentationClicked)
self.processingMessage = QLabel()
self.gridLayout.setMenuBar(self.menuBar)
self.initComponents()
##
## @brief This method is called at the constructor method or
## a cancel button is clicked to go back to the first screen.
## This method configures the layout. Also if is the first
## time that this method is called, then all buttons will
## be connected to the corresponding method.
##
## @param self Non static method.
##
## @return This is a void method.
##
def initComponents(self):
pixmap = QPixmap("./media/title.png")
self.logo.setPixmap(pixmap)
self.gridLayout.addWidget(self.logo, 0, 0)
self.gridLayout.addWidget(self.casesButton, 1, 0)
self.gridLayout.addWidget(self.simulationButton, 2, 0)
if self.firstTime:
self.firstTime = False
self.casesButton.clicked.connect(self.casesButtonClicked)
self.simulationButton.clicked.connect(self.simulationButtonClicked)
self.cancelButton.clicked.connect(self.cancelButtonClicked)
self.confirmButtonCases.clicked.connect(self.confirmButtonCasesClicked)
self.chooseIdfButton.clicked.connect(self.chooseIdfClicked)
self.chooseCSVButton.clicked.connect(self.chooseCsvClicked)
self.chooseFolderButton.clicked.connect(self.chooseFolderClicked)
self.chooseEpwButton.clicked.connect(self.chooseEpwButtonClicked)
self.confirmButtonSimulation.clicked.connect(self.confirmButtonSimulationClicked)
self.checkAndInstall()
self.setLayout(self.gridLayout)
self.setFixedSize(650, 250)
self.setWindowTitle("EPlusPlus")
self.show()
##
## @brief This method is actived whenever the "casesButton" is
## pressed. First of all, it remove all components from
## the window. After that it justs configures labels,
## lineTexts and buttons into the grid layout.
##
## @param self Non static method.
##
## @return This is a void method.
##
def casesButtonClicked(self):
self.clearAll()
idfLabel = QLabel()
csvLabel = QLabel()
folderStoreLabel = QLabel()
methodSamplingLabel = QLabel()
sampleSize = QLabel()
idfLabel.setText("Arquivo base IDF:")
csvLabel.setText("Arquivo de configuração CSV:")
folderStoreLabel.setText("Pasta para salvar os arquivos IDF's:")
methodSamplingLabel.setText("Método de amostragem")
sampleSize.setText("Número da amostragem")
self.gridLayout.addWidget(idfLabel, 1, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseIdfButton, 1, 1)
self.gridLayout.addWidget(self.lineIdf, 1, 2)
self.gridLayout.addWidget(csvLabel, 2, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseCSVButton, 2, 1)
self.gridLayout.addWidget(self.lineCsv, 2, 2)
self.gridLayout.addWidget(folderStoreLabel, 3, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseFolderButton, 3, 1)
self.gridLayout.addWidget(self.lineFolder, 3, 2)
self.gridLayout.addWidget(methodSamplingLabel, 4, 1, Qt.AlignBottom)
self.gridLayout.addWidget(self.randomRB, 5, 0, Qt.AlignTop)
self.gridLayout.addWidget(self.lhsRB, 5, 2, Qt.AlignRight)
self.gridLayout.addWidget(sampleSize, 6, 0, 1, 2)
self.gridLayout.addWidget(self.lineCases, 6, 2)
self.gridLayout.addWidget(self.confirmButtonCases, 7, 0, 1, 3, Qt.AlignTop)
self.gridLayout.addWidget(self.cancelButton, 8, 0, 1, 3, Qt.AlignTop)
##
## @brief This method is actived whenever the "simulationButton" is
## pressed. First of all, it remove all components from
## the window. After that it justs configures labels,
## lineTexts and buttons into the grid layout.
##
## @param self Non static method
##
## @return This is a void method
##
def simulationButtonClicked(self):
self.clearAll()
folderStoreLabel = QLabel()
epwLabel = QLabel()
folderStoreLabel.setText("Pasta com os arquivos idf's")
epwLabel.setText("Arquivo EPW")
self.gridLayout.addWidget(folderStoreLabel, 1, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseFolderButton, 1, 1)
self.gridLayout.addWidget(self.lineFolder, 1, 2)
self.gridLayout.addWidget(epwLabel, 2, 0, Qt.AlignRight)
self.gridLayout.addWidget(self.chooseEpwButton, 2, 1)
self.gridLayout.addWidget(self.lineEpw, 2, 2)
# Doing this just to the UI get a little bit more beautiful
self.gridLayout.addWidget(QLabel(), 3, 0)
self.gridLayout.addWidget(self.processingMessage, 4, 0, 1, 3, Qt.AlignCenter)
self.gridLayout.addWidget(self.confirmButtonSimulation, 7, 0, 1, 3, Qt.AlignBottom)
self.gridLayout.addWidget(self.cancelButton, 8, 0, 1, 3, Qt.AlignBottom)
##
## @brief This method is actived whenever the "chooseIdf" button is
## pressed. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## idf file. The QFileDialog will show only idf files and
## folders. After choosed the idf file, the "lineIdf" attribute
## will have its text setted to the absolute path to the csv
## choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseIdfClicked(self):
msg = "Escolha o arquivo idf"
filename = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.idf")
self.setLineIdfText(filename[0])
##
## @brief This method is actived whenever the "chooseCsv" buttons is
## pressed. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## csv file. After choosed the csv file, the "lineCsv"
## attribute will have its text setted to the absolute path
## to the csv choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseCsvClicked(self):
msg = "Escolha o arquivo base csv"
filename = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.csv")
self.setLineCsvText(filename[0])
##
## @brief This method is actived whenever the "chooseFolder" button is
## clicked. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## folder to save the new idf's files that gonna be generated.
## After choosed the folder, the "lineFolder" attribute
## will have its text changed to the absolute folder choosed.
##
## @param self Non static method.
##
## @return This is a void method.
##
def chooseFolderClicked(self):
msg = "Escolha a pasta para salvar os arquivos IDF's"
folder = QFileDialog.getExistingDirectory(self, msg, os.getenv("HOME"))
self.setLineFolderText(folder)
##
## @brief This method is activated when the cancel button is
## pressed. This method remove all components from the
## screen and go back to the initial screen.
##
## @param self Non static method.
##
## @return This is a void method.
##
def cancelButtonClicked(self):
self.clearAll()
self.initComponents()
##
## @brief This method is actived whenever the confirm button
## is pressed. This method checks if all the lineText
## fields where filled and one radio button. If not, the
## user will be informed through a QMessageBox. Otherwise,
## if all fields where covered then the cases will be generate.
## See the "generateCases" method for more info.
##
## @param self Non static method.
##
## @return This is a void method.
##
def confirmButtonCasesClicked(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msgBox.setWindowTitle("EPlusPlus-WAR")
msgBox.setText("Todos os campos devem estar preenchidos para prosseguir!")
if self.lineIdf.text() == "":
msgBox.exec_()
elif self.lineCsv.text() == "":
msgBox.exec_()
elif self.lineFolder.text() == "":
msgBox.exec_()
elif self.lineCases.text() == "":
msgBox.exec_()
elif not self.lhsRB.isChecked() and not self.randomRB.isChecked():
msgBox.exec_()
else:
self.generateCases()
##
## @brief This method is actived whenever the "chooseEpwButton" is
## clicked. When this method is activated, a QFileDialog will
## be show to the user and it will be possible to choose a
## EPW file. After choosed the EPW, the "lineEpw" attribute
## will have its text changed to the absolute path to EPW
## choosed.
##
## @param self Non static method
##
## @return This is a void method
##
def chooseEpwButtonClicked(self):
msg = "Escolha o arquivo EPW"
epwFile = QFileDialog.getOpenFileName(self, msg, os.getenv("HOME"), filter="*.epw")
self.setLineEpwText(epwFile[0])
##
## @brief This method is called whenever the confirm button of the
## screen of simulation is clicked. This method check if all
## fields are filled. If not, a warning message will appear
## to the user through a MessageBox informing that all fields
## need to be completed. Otherwise, if all fields were filled,
## the simulation will be executed.
##
## @param self Non static method
##
## @return This is a void method
##
def confirmButtonSimulationClicked(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msgBox.setWindowTitle("EPlusPlus-WAR")
msgBox.setText("Todos os campos devem estar preenchidos para prosseguir!")
if self.lineFolder.text() == "":
msgBox.exec_()
elif self.lineEpw.text() == "":
msgBox.exec_()
else:
self.runSimulation()
##
## @brief This method is used every time the "Documentation" button
## is clicked at the menu bar. This method open the manual
## of the program in pdf format at the default browser of the
## current user.
##
## @param self Non static method
##
## @return This is a void method.
##
def documentationClicked(self):
doc = "./docs/documentacaoEPlusPlus.pdf"
webbrowser.open("file://"+os.path.abspath(doc))
##
## @brief This method takes all values informed by the user through
## the lineEdit fields. After analyze the sampling method
## choosed, the UI will call the actorUser to generate
## the cases. If all happens as it should, then a QmessageBox
## will inform the user. Otherwise, if a "ColumnException"
## raise from the the "actorUser", the user will be informed
## that the Csv or the Idf are not matching.
##
## @param self Non static method.
##
## @return This is a void method.
##
def generateCases(self):
pathToIdf = self.lineIdf.text()
pathToCsv = self.lineCsv.text()
pathToFolder = self.lineFolder.text()
sampleSize = int(self.lineCases.text())
msgBox = QMessageBox()
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = ""
if self.lhsRB.isChecked():
method = "LHS"
else:
method = "RANDOM"
try:
self.actorUser.generateCases(pathToIdf, pathToCsv, pathToFolder, sampleSize, method)
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("EPlusPlus-INF")
msg = "Processo finalizado! Verifique a pasta informada para acessar os arquivos."
msgBox.setText(msg)
msgBox.exec_()
self.cancelButtonClicked()
except ColumnException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
##
## @brief At first lines, we transform the content informed by the
## user at the current screen into strings. After that, we
## create a QMessageBox to show important information. Then
## it will try to run the simulation through the "actorUser" (
## see its documentation for more info). If no IDF file be
## founded at the folder informed, a exception will be raised.
## Otherwise, if at least, one IDF be founded, the simulation
## will occur normally. After that, the 'actorUser' will try
## insert the data from the csv of result into the database.
## If no csv be found, a exception will be raise.
##
## @param self Non static method
##
## @return This is a void method.
##
def runSimulation(self):
pathToFolder = self.lineFolder.text()
pathToEpw = self.lineEpw.text()
msgBox = QMessageBox()
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = ""
try:
self.actorUser.findIdfFiles(pathToFolder)
msg = "Processando arquivos..."
self.processingMessage.setText(msg)
QApplication.processEvents()
self.actorUser.runSimulation(pathToFolder, pathToEpw)
except NoIdfException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
try:
self.actorUser.insertIntoDatabase(pathToFolder)
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("EPlusPlus-INF")
msg = "Processo finalizado com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
ask = 'Você gostaria de apagar os arquivos e manter somente a base de dados?'
reply = QMessageBox.question(self, "EPlusPlus-INF", ask, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
self.actorUser.removeDirectories(pathToFolder)
msg = "Arquivos removidos com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
self.cancelButtonClicked()
except NoCsvException as e:
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
##
## @brief This method is responsible for check if all tools are
## installed on the curren machine. If not, a message will
## be shown to the user and the installation will start. If
## by any means, a problem occurs, a error message will appear
## at the screen. If all goes well, a mensagem of sucess will
## be show.
##
## @param self Non static method
##
## @return This is a void method
##
def checkAndInstall(self):
msgBox = QMessageBox()
msgBox.setWindowTitle("EPlusPlus-INF")
msgBox.setWindowIcon(QIcon(self.pathToIcon))
msg = "O EPlusPlus irá agora instalar as ferramentas necessárias para"
msg += " o seu correto funcionamento!"
if not self.actorUser.checkTools():
try:
msgBox.setText(msg)
msgBox.setIcon(QMessageBox.Information)
msgBox.exec_()
self.actorUser.checkAndInstall()
msg = "Instalações feitas com sucesso!"
msgBox.setText(msg)
msgBox.exec_()
except InstallException as e:
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle("EPlusPlus-ERR")
msg = str(e)
msgBox.setText(msg)
msgBox.exec_()
sys.exit()
##
## @brief This method sets the first lineText of the 2nd screen
## with the string equals to the path where the idf file
## is saved, informed by the user through the QFileDialog.
##
## @param self The object
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineIdfText(self, string):
self.lineIdf.setText(string)
##
## @brief This method sets the second lineText of the 2nd
## screen with the string equals to the path where
## the csv file is saved, choosed by the user.
##
## @param self Non static method.
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineCsvText(self, string):
self.lineCsv.setText(string)
##
## @brief This method sets the third lineText of the 2nd
## screen with the string equals to the path where the new
## idf's file will be saved, choosed by the user.
##
## @param self Non static method.
## @param string String that will be show at the lineText.
##
## @return This is a void method.
##
def setLineFolderText(self, string):
self.lineFolder.setText(string)
##
## @brief This method sets the fourth lineText of the 2nd screen
## with the value equals to the string passed as arg.
##
## @param self Non static method
## @param string String that will be show at the lineCases
##
## @return This is a void method
##
def setLineCasesText(self, string):
self.lineCases.setText(string)
##
## @brief This method sets the second lineText of the 3rd screen
## with the value equals to the string passed as arg.
##
## @param self Non static method
## @param string String that will be show at the lineEpw
##
## @return This is a void method.
##
def setLineEpwText(self, string):
self.lineEpw.setText(string)
##
## @brief This method removes every component at the current window,
## except for the layout. Also, this method clear all lineText
## attributes and clear the values of the radio buttons. The
## "setExclusive" False and "setExclusive" True is needed to
## clear the values of the radio button components.
##
## @param self Non static method.
##
## @return This is a void method.
##
def clearAll(self):
for component in reversed(range(self.gridLayout.count())):
self.gridLayout.itemAt(component).widget().setParent(None)
self.setLineIdfText("")
self.setLineCsvText("")
self.setLineFolderText("")
self.setLineCasesText("")
self.setLineEpwText("")
self.processingMessage.setText("")
self.group.setExclusive(False)
self.randomRB.setChecked(False)
self.lhsRB.setChecked(False)
self.group.setExclusive(True)
| 40.589255
| 101
| 0.608839
| 2,525
| 23,420
| 5.637228
| 0.184158
| 0.033441
| 0.04686
| 0.039834
| 0.405929
| 0.360967
| 0.328369
| 0.291766
| 0.263594
| 0.234228
| 0
| 0.006056
| 0.302007
| 23,420
| 577
| 102
| 40.589255
| 0.864685
| 0.363877
| 0
| 0.27972
| 0
| 0
| 0.079667
| 0.002217
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073427
| false
| 0
| 0.048951
| 0
| 0.125874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5907d7fbfcc198ea821785faf5ae482c8f858484
| 4,555
|
py
|
Python
|
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
CHAPTER 11 (search trees)/red_black_trees_class.py
|
ahammadshawki8/Data-Structures-Algorithms-in-Python-
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
[
"MIT"
] | null | null | null |
from tree_map_class import *
class RedBlackTreeMap(TreeMap):
"""Sorted map implementation using red-black tree."""
class _Node(TreeMap._Node):
"""Node class for red-black tree maintains bit that denotes color."""
__slots__ = "_red" # add additional data member to the Node class
def __init__(self,element,parent=None,left=None,right=None):
super().__init__(element,parent,left,right)
self.red = True # new node is red by default
#-----------------positional based utility methods-----------------------------
# we consider a nonexisting child to be trivially black
def _set_red(self,p):
p._node._red = True
def _set_black(self,p):
p._node._red = False
def _set_color(self,p,make_red):
p._node._red = make_red
def _is_red(self,p):
return (p is not None) and p._node._red
def _is_red_leaf(self,p):
return self._is_red(p) and self._is_leaf(p)
def _get_red_child(self,p):
"""Return a red child of p (or None if no such child)."""
for child in (self.left(p),self.right(p)):
if self._is_red(child):
return child
return None
#-----------------------support for insertations------------------------------
def _rebalance_insert(self,p):
self._resolve_red(p) # new node is always red
def _resolve_red(p):
if self.is_root(p):
self._set_black(p) # make root black
else:
parent = self.parent(p)
if self.is_red(parent): # double red problem
uncle = self.sibling(parent)
if not self.is_red(uncle): # Case 1: misshapen 4-node
middle = self._restructure(p) # do trinode restructing
self._set_black(middle) # and fix the colors
self._set_red(self.left(middle))
self._set_red(self.right(middle))
else: # Case 2: overfull 5-node
grand = self.parent(parent)
self._set_red(grand) # grandparent becomes red
self._set_black(self.left(grand)) # its children becomes black
self._resolve_red(grand) # continue recur at grandparent
# the double restrucuture were handled previously in the restructure method
#-------------------------support for deletions--------------------------------
def _rebalance_delete(self,p):
if len(self) == 1:
self._set_black(self.root()) # special case ensure that the root is black
elif p is not None:
n = self.num_children(p)
if n == 1: # deficit exits unless child is red leaf
c = next(self.children(p))
if not self._is_red_leaf(c):
self._fix_deficit(p,c)
elif n == 2: # removed black node with red child
if self._is_red_leaf(self,left(p)):
self._set_black(self.left(p))
else:
self._set_black(self.right(p))
def _fix_deficit(self,z,y):
"""Resolve black deficit at z, where y is the root of z's heavier subtree."""
if not self._is_red(y): # y is black; will apply case 1 or 2
x = self._get_red_child(y)
if x is not None: # Case 1: y is black and has red child x; do transfer
old_color = self._is_red(z)
middle = self._restucture(x)
self._set_color(middle,old_color) # middle gets old color of z
self._set_black(self.left(middle)) # children becomes black
self._set_black(self.right(middle))
else: # case 2: y is black, but no red children; recolor as fusion
self._set_red(y)
if self.is_red(z):
self._set_black(z) # this resolves the problem
elif not self.is_root(x):
self._fix_deficit(self.parent(z), self.sibling(z)) # recur upward
else: # Case 3: y is red; rotate misalligned 3-node and repeat
self.rotate(y)
self.set_black(y)
self.set_red(z)
if z == self.right(y):
self._fix_deficit(z,self.left(z))
else:
self._fix_deficit(z,self.right(z))
| 46.958763
| 88
| 0.528211
| 585
| 4,555
| 3.907692
| 0.247863
| 0.048994
| 0.052493
| 0.041995
| 0.118548
| 0.020997
| 0
| 0
| 0
| 0
| 0
| 0.004407
| 0.35236
| 4,555
| 96
| 89
| 47.447917
| 0.770508
| 0.286718
| 0
| 0.076923
| 0
| 0
| 0.00125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141026
| false
| 0
| 0.012821
| 0.025641
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5909f08bda2ad877f9982af2cd854a38d7dd516a
| 13,029
|
py
|
Python
|
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | null | null | null |
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | 3
|
2021-05-29T19:46:36.000Z
|
2022-01-15T14:15:22.000Z
|
intake_sdmx.py
|
dr-leo/intake_sdmx
|
dccd51e6ce4aa352fba0a0c25dfac82148acd1e3
|
[
"Apache-2.0"
] | 1
|
2021-05-28T13:14:53.000Z
|
2021-05-28T13:14:53.000Z
|
"""intake plugin for SDMX data sources"""
import intake
from intake.catalog import Catalog
from intake.catalog.utils import reload_on_change
from intake.catalog.local import LocalCatalogEntry, UserParameter
import pandasdmx as sdmx
from collections.abc import MutableMapping
from datetime import date
from itertools import chain
__version__ = "0.1.0"
NOT_SPECIFIED = "n/a"
class LazyDict(MutableMapping):
def __init__(self, func, *args, **kwargs):
super().__init__()
self._dict = dict(*args, **kwargs)
self._func = func
def update(self, *args, **kwargs):
return self._dict.update(*args, **kwargs)
def __getitem__(self, key):
if self._dict[key] is None:
self._dict[key] = self._func(key)
return self._dict[key]
def __setitem__(self, key, value):
return self._dict.__setitem__(key, value)
def __contains__(self, key):
return self._dict.__contains__(key)
def __len__(self):
return self._dict.__len__()
def __delitem__(self, key):
return self._dict.__delitem__(key)
def __iter__(self):
return self._dict.__iter__()
def __str__(self):
return "".join((self.__class__.__name__, "(", str(self._dict), ")"))
class SDMXSources(Catalog):
"""
catalog of SDMX data sources, a.k.a. agencies
supported by pandaSDMX
"""
name = "sdmx"
description = "SDMX sources supported by pandaSDMX"
version = __version__
container = "catalog"
def _load(self):
# exclude sources which do not support dataflows
# and datasets (eg json-based ABS and OECD)
excluded = ["ABS", "OECD", "IMF", "SGR", "STAT_EE"]
for source_id, source in sdmx.source.sources.items():
if source_id not in excluded:
descr = source.name
metadata = {"source_id": source_id}
e = LocalCatalogEntry(
source_id + "_SDMX_dataflows",
descr,
SDMXDataflows,
direct_access=True,
# set storage_options to {} if not set. This avoids TypeError
# when passing it to sdmx.Request() later
args={"storage_options": self.storage_options or {}},
cache=[],
parameters=[],
metadata=metadata,
catalog_dir="",
getenv=False,
getshell=False,
catalog=self,
)
self._entries[source_id] = e
class SDMXCodeParam(UserParameter):
def __init__(self, allowed=None, **kwargs):
super(SDMXCodeParam, self).__init__(**kwargs)
self.allowed = allowed
def validate(self, value):
# Convert short-form multiple selections to list, e.g. 'DE+FR'
if isinstance(value, str) and "+" in value:
value = value.split("+")
# Single code as str
if isinstance(value, str):
if not value in self.allowed:
raise ValueError(
"%s=%s is not one of the allowed values: %s"
% (self.name, value, ",".join(map(str, self.allowed)))
)
# So value must be an iterable of str, e.g. multiple selection
elif not all(c in self.allowed for c in value):
not_allowed = [c for c in value if not c in self.allowed]
raise ValueError(
"%s=%s is not one of the allowed values: %s"
% (self.name, not_allowed, ",".join(map(str, self.allowed)))
)
return value
class SDMXDataflows(Catalog):
"""
catalog of dataflows for a given SDMX source
"""
version = __version__
container = "catalog"
partition_access = False
def _make_entries_container(self):
return LazyDict(self._make_dataflow_entry)
def _load(self):
# read metadata on dataflows
self.name = self.metadata["source_id"] + "_SDMX_dataflows"
# Request dataflows from remote SDMX service
self.req = sdmx.Request(self.metadata["source_id"], **self.storage_options)
# get full list of dataflows
self._flows_msg = self.req.dataflow()
# to mapping from names to IDs for later back-translation
# We use this catalog to store 2 entries per dataflow: ID and# human-readable name
self.name2id = {}
for dataflow in self._flows_msg.dataflow.values():
flow_id, flow_name = dataflow.id, str(dataflow.name)
# make 2 entries per dataflow using its ID and name
self._entries[flow_id] = None
self._entries[flow_name] = None
self.name2id[flow_name] = flow_id
def _make_dataflow_entry(self, flow_id):
# if flow_id is actually its name, get the real id
if flow_id in self.name2id:
flow_id = self.name2id[flow_id]
# Download metadata on specified dataflow
flow_msg = self.req.dataflow(flow_id)
flow = flow_msg.dataflow[flow_id]
dsd = flow.structure
descr = str(flow.name)
metadata = self.metadata.copy()
metadata["dataflow_id"] = flow_id
metadata["structure_id"] = dsd.id
# Make user params for coded dimensions
# Check for any content constraints to codelists
if hasattr(flow_msg, "constraint") and flow_msg.constraint:
constraint = (
next(iter(flow_msg.constraint.values())).data_content_region[0].member
)
else:
constraint = None
params = []
# params for coded dimensions
for dim in dsd.dimensions:
lr = dim.local_representation
# only dimensions with enumeration, i.e. where values are codes
if lr.enumerated:
ci = dim.concept_identity
# Get code ID and name as its description
if constraint and dim.id in constraint:
codes_iter = (
c
for c in lr.enumerated.items.values()
if c in constraint[dim.id]
)
else:
codes_iter = lr.enumerated.items.values()
codes = {*chain(*((c.id, str(c.name)) for c in codes_iter))}
# allow "" to indicate wild-carded dimension
codes.add(NOT_SPECIFIED)
p = UserParameter(
name=dim.id,
description=str(ci.name),
type="str",
allowed=codes,
default=NOT_SPECIFIED,
)
params.append(p)
# Try to retrieve ID of time and freq dimensions for DataFrame index
dim_candidates = [d.id for d in dsd.dimensions if "TIME" in d.id]
try:
time_dim_id = dim_candidates[0]
except IndexError:
time_dim_id = NOT_SPECIFIED
# Ffrequency for period index generation
dim_candidates = [p.name for p in params if "FREQ" in p.name]
try:
freq_dim_id = dim_candidates[0]
except IndexError:
freq_dim_id = NOT_SPECIFIED
# params for startPeriod and endPeriod
year = date.today().year
params.extend(
[
UserParameter(
name="startPeriod",
description="startPeriod",
type="datetime",
default=str(year - 1),
),
UserParameter(
name="endPeriod", description="endPeriod", type="datetime"
),
UserParameter(
name="dtype",
description="""data type for pandas.DataFrame. See pandas docs
for allowed values.
Default is '' which translates to 'float64'.""",
type="str",
),
UserParameter(
name="attributes",
description="""Include any attributes alongside observations
in the DataFrame. See pandasdmx docx for details.
Examples: 'osgd' for all attributes, or
'os': only attributes at observation and series level.""",
type="str",
),
UserParameter(
name="index_type",
description="""Type of pandas Series/DataFrame index""",
type="str",
allowed=["object", "datetime", "period"],
default="object",
),
UserParameter(
name="freq_dim",
description="""To generate PeriodIndex (index_type='period')
Default is set based on heuristics.""",
type="str",
default=freq_dim_id,
),
UserParameter(
name="time_dim",
description="""To generate datetime or period index.
Ignored if index_type='object'.""",
type="str",
default=time_dim_id,
),
]
)
args = {p.name: f"{{{{{p.name}}}}}" for p in params}
args["storage_options"] = self.storage_options
return LocalCatalogEntry(
name=flow_id,
description=descr,
driver=SDMXData,
direct_access=True,
cache=[],
parameters=params,
args=args,
metadata=metadata,
catalog_dir="",
getenv=False,
getshell=False,
catalog=self,
)
@reload_on_change
def search(self, text):
words = text.lower().split()
cat = SDMXDataflows(
name=self.name + "_search",
description=self.description,
ttl=self.ttl,
getenv=self.getenv,
getshell=self.getshell,
metadata=(self.metadata or {}).copy(),
storage_options=self.storage_options,
)
cat.metadata["search"] = {"text": text, "upstream": self.name}
cat.cat = self
cat._entries._dict.clear()
keys = [
*chain.from_iterable(
(self.name2id[k], k)
for k in self
if any(word in k.lower() for word in words)
)
]
cat._entries.update({k: None for k in keys})
return cat
def filter(self, func):
raise NotImplemented
class SDMXData(intake.source.base.DataSource):
"""
Driver for SDMX data sets of a given SDMX dataflow
"""
version = __version__
name = "sdmx_dataset"
container = "dataframe"
partition_access = True
def __init__(self, metadata=None, **kwargs):
super(SDMXData, self).__init__(metadata=metadata)
self.name = self.metadata["dataflow_id"]
self.req = sdmx.Request(self.metadata["source_id"], **self.storage_options)
self.kwargs = kwargs
def read(self):
# construct key
key_ids = (
p.name for p in self.entry._user_parameters if isinstance(p, SDMXCodeParam)
)
key = {i: self.kwargs[i] for i in key_ids if self.kwargs[i]}
# params for request. Currently, only start- and endPeriod are supported
params = {k: str(self.kwargs[k].year) for k in ["startPeriod", "endPeriod"]}
# remove endPeriod if it is prior to startPeriod (which is the default)
if params["endPeriod"] < params["startPeriod"]:
del params["endPeriod"]
# Now request the data via HTTP
# TODO: handle Request.get kwargs eg. fromfile, timeout.
data_msg = self.req.data(self.metadata["dataflow_id"], key=key, params=params)
# get writer config.
# Capture only non-empty values as these will be filled by the writer
writer_config = {
k: self.kwargs[k] for k in ["dtype", "attributes"] if self.kwargs[k]
}
# construct args to conform to writer API
index_type = self.kwargs["index_type"]
freq_dim = self.kwargs["freq_dim"]
time_dim = self.kwargs["time_dim"]
if index_type == "datetime":
writer_config["datetime"] = True if freq_dim == NOT_SPECIFIED else freq_dim
elif index_type == "period":
datetime = {}
datetime["freq"] = True if freq_dim == NOT_SPECIFIED else freq_dim
datetime["dim"] = True if time_dim == NOT_SPECIFIED else time_dim
writer_config["datetime"] = datetime
# generate the Series or dataframe
self._dataframe = data_msg.to_pandas(**writer_config)
return self._dataframe
def _close(self):
self._dataframe = None
| 36.805085
| 90
| 0.54939
| 1,420
| 13,029
| 4.859859
| 0.215493
| 0.010433
| 0.014201
| 0.00739
| 0.107086
| 0.088683
| 0.073323
| 0.063179
| 0.063179
| 0.052746
| 0
| 0.001908
| 0.356359
| 13,029
| 353
| 91
| 36.909348
| 0.821011
| 0.131629
| 0
| 0.194853
| 0
| 0
| 0.116882
| 0.001871
| 0
| 0
| 0
| 0.002833
| 0
| 1
| 0.073529
| false
| 0
| 0.029412
| 0.029412
| 0.209559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5910779f16295dd8d8929f180e23470f2321f629
| 1,388
|
py
|
Python
|
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
apps/exp/afe/afe_bfcc.py
|
yt7589/mgs
|
2faae1b69e6d4cde63afb9b2432b1bf49ebdd770
|
[
"Apache-2.0"
] | null | null | null |
#
#import scipy
#from scipy import io as sio
import scipy.io.wavfile
from ext.spafe.utils import vis
from ext.spafe.features.bfcc import bfcc
class AfeBfcc:
@staticmethod
def extract_bfcc(wav_file):
print('获取BFCC特征')
num_ceps = 13
low_freq = 0
high_freq = 2000
nfilts = 24
nfft = 512
dct_type = 2,
use_energy = False,
lifter = 5
normalize = False
# read wav
fs, sig_raw = scipy.io.wavfile.read(wav_file)
sig = sig_raw #[:, :1] #.reshape((sig_raw.shape[0],))
print('fs: {0}\r\n{1}\r\n***********'.format(type(fs), fs))
print('sig: {0}\r\n{1}\r\n******************'.format(sig.shape, sig))
# compute features
bfccs = bfcc(sig=sig,
fs=fs,
num_ceps=num_ceps,
nfilts=nfilts,
nfft=nfft,
low_freq=low_freq,
high_freq=high_freq,
dct_type=dct_type,
use_energy=use_energy,
lifter=lifter,
normalize=normalize)
print('step 1')
# visualize spectogram
vis.spectogram(sig, fs)
print('step 2')
# visualize features
vis.visualize_features(bfccs, 'BFCC Index', 'Frame Index')
print('step 3')
| 30.844444
| 77
| 0.50072
| 163
| 1,388
| 4.128834
| 0.374233
| 0.011887
| 0.041605
| 0.011887
| 0.035661
| 0.035661
| 0.035661
| 0
| 0
| 0
| 0
| 0.026498
| 0.37464
| 1,388
| 45
| 78
| 30.844444
| 0.748848
| 0.103026
| 0
| 0
| 0
| 0
| 0.09135
| 0.046079
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.083333
| 0
| 0.138889
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
591491ff550ba32d4e2ae2cbc52705d6ad0c7c72
| 4,673
|
py
|
Python
|
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
notifier_bot.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
import logging
from datetime import date
from telegram import Update
from telegram.ext import ApplicationBuilder, CommandHandler
from config.notif_config import NotifConfig
from src.emojis import Emojis
from src.team_fixtures_manager import TeamFixturesManager
from src.telegram_bot.bot_commands_handler import NextAndLastMatchCommandHandler, NotifierBotCommandsHandler
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
async def start(update: Update, context):
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo="https://media.api-sports.io/football/players/154.png",
caption=f"{Emojis.WAVING_HAND.value} Hola {update.effective_user.first_name}, soy FootballNotifier bot!\n\n"
f"{Emojis.JOYSTICK.value} /help - Chequeá mis comandos disponibles ;) \n\n"
f"{Emojis.GOAT.value} {Emojis.ARGENTINA.value} Vamos Messi!",
parse_mode="HTML",
)
async def help(update: Update, context):
text = (
f"{Emojis.WAVING_HAND.value}Hola {update.effective_user.first_name}!\n\n"
f" {Emojis.JOYSTICK.value} Estos son mis comandos disponibles (por ahora):\n\n"
f"• /next_match <team>: próximo partido del equipo.\n"
f"• /last_match <team>: último partido jugado del equipo.\n"
f"• /available_teams: equipos disponibles."
)
await context.bot.send_message(chat_id=update.effective_chat.id, text=text)
async def available_teams(update: Update, context):
notifier_commands_handler = NotifierBotCommandsHandler()
text = (
f"{Emojis.WAVING_HAND.value}Hola {update.effective_user.first_name}!\n\n"
f" {Emojis.TELEVISION.value} Estos son los equipos disponibles:\n\n"
f"{notifier_commands_handler.available_teams_text()}"
)
await context.bot.send_message(chat_id=update.effective_chat.id, text=text)
async def next_match(update: Update, context):
command_handler = NextAndLastMatchCommandHandler(context.args)
validated_input = command_handler.validate_command_input()
if validated_input:
await context.bot.send_message(
chat_id=update.effective_chat.id, text=validated_input
)
else:
team = command_handler.get_managed_team(context.args[0])
current_season = date.today().year
team_fixtures_manager = TeamFixturesManager(current_season, team.id)
text, photo = team_fixtures_manager.get_next_team_fixture_text(
update.effective_user.first_name
)
if photo:
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=photo,
caption=text,
parse_mode="HTML",
)
else:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=text,
parse_mode="HTML",
)
async def last_match(update: Update, context):
command_handler = NextAndLastMatchCommandHandler(context.args)
validated_input = command_handler.validate_command_input()
if validated_input:
await context.bot.send_message(
chat_id=update.effective_chat.id, text=validated_input
)
else:
team = command_handler.get_managed_team(context.args[0])
current_season = date.today().year
team_fixtures_manager = TeamFixturesManager(current_season, team.id)
text, photo = team_fixtures_manager.get_last_team_fixture_text(
update.effective_user.first_name
)
if photo:
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=photo,
caption=text,
parse_mode="HTML",
)
else:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=text,
parse_mode="HTML",
)
if __name__ == "__main__":
application = ApplicationBuilder().token(NotifConfig.TELEGRAM_TOKEN).build()
start_handler = CommandHandler("start", start)
next_match_handler = CommandHandler("next_match", next_match)
last_match_handler = CommandHandler("last_match", last_match)
available_teams_handler = CommandHandler("available_teams", available_teams)
help_handler = CommandHandler("help", help)
application.add_handler(start_handler)
application.add_handler(next_match_handler)
application.add_handler(last_match_handler)
application.add_handler(help_handler)
application.add_handler(available_teams_handler)
application.run_polling()
| 37.685484
| 116
| 0.684571
| 549
| 4,673
| 5.577413
| 0.216758
| 0.035271
| 0.04115
| 0.061724
| 0.556499
| 0.517636
| 0.506205
| 0.506205
| 0.506205
| 0.506205
| 0
| 0.001374
| 0.221485
| 4,673
| 123
| 117
| 37.99187
| 0.839472
| 0
| 0
| 0.490385
| 0
| 0.009615
| 0.18853
| 0.073614
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59177fedfb201ef7cf401094e43b1d49ac1b2c09
| 8,576
|
py
|
Python
|
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | null | null | null |
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | 4
|
2021-04-08T21:23:49.000Z
|
2022-03-12T00:44:54.000Z
|
events/models.py
|
Strategy-Tap/Novizi-BackEnd
|
536edde68dc79ad5467f2dbb0931a56930a4edea
|
[
"MIT"
] | 1
|
2020-06-12T16:08:46.000Z
|
2020-06-12T16:08:46.000Z
|
"""Collection of model."""
from typing import Any
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from djgeojson.fields import PointField
from .utils import get_read_time, unique_slug
def event_upload_to(instance: "Event", filename: str) -> str:
"""A help Function to change the image upload path.
Args:
instance: django model
filename: the uploaded file name
Returns:
path in string format
"""
return f"images/events/cover/{instance.title}/{filename}"
class Tag(models.Model):
"""Reference tag model."""
name = models.CharField(verbose_name=_("name"), max_length=200, unique=True)
class Meta:
"""Meta data."""
verbose_name = _("tag")
verbose_name_plural = _("tags")
def __str__(self: "Tag") -> str:
"""It return readable name for the model."""
return f"{self.name}"
def total_events(self: "Tag") -> int:
"""Getting total of events for the tag."""
return self.events.count()
total_events.short_description = _("Events")
total_events.int = 0
class Event(models.Model):
"""Reference event model."""
title = models.CharField(verbose_name=_("title"), max_length=400)
description = models.TextField(verbose_name=_("description"))
read_time = models.IntegerField(default=0, verbose_name=_("read time"))
slug = models.SlugField(verbose_name=_("slug"), unique=True, blank=True)
event_date = models.DateTimeField(verbose_name=_("event date"))
total_guest = models.PositiveIntegerField(
verbose_name=_("total of guest"), default=1
)
hosted_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("hosted by"),
on_delete=models.CASCADE,
related_name="events",
db_index=True,
)
cover = models.ImageField(
verbose_name=_("cover"), blank=True, null=True, upload_to=event_upload_to
)
tags = models.ManyToManyField(
to=Tag, verbose_name=_("tags"), related_name="events", blank=True
)
organizers = models.ManyToManyField(
to=settings.AUTH_USER_MODEL,
verbose_name=_("organizers"),
related_name="events_organizers",
blank=True,
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
geom = PointField(verbose_name=_("geo location"))
class Meta:
"""Meta data."""
verbose_name = _("event")
verbose_name_plural = _("events")
def __str__(self: "Event") -> str:
"""It return readable name for the model."""
return f"{self.title}"
def total_attendees(self: "Event") -> int:
"""Getting total of attendees for the event."""
return self.attendees.count()
def available_place(self: "Event") -> int:
"""Getting total of available place for the event."""
return self.total_guest - self.attendees.count()
def total_attended(self: "Event") -> int:
"""Getting total of people who actual attended for the event."""
return self.attendees.filter(has_attended=True).count()
def total_not_attended(self: "Event") -> int:
"""Getting total of people who didn't attended for the event."""
return self.attendees.filter(has_attended=False).count()
def total_sessions(self: "Event") -> int:
"""Getting total of sessions in event."""
return self.sessions.count()
def total_draft_sessions(self: "Event") -> int:
"""Getting total of draft sessions in event."""
return self.sessions.filter(status="Draft").count()
def total_accepted_sessions(self: "Event") -> int:
"""Getting total of accepted sessions in event."""
return self.sessions.filter(status="Accepted").count()
def total_denied_sessions(self: "Event") -> int:
"""Getting total of denied sessions in event."""
return self.sessions.filter(status="Denied").count()
def total_talk(self: "Event") -> int:
"""Getting total of talk in event."""
return self.sessions.filter(session_type="Talk", status="Accepted").count()
def total_lighting_talk(self: "Event") -> int:
"""Getting total of lighting talk in event."""
return self.sessions.filter(
session_type="Lighting Talk", status="Accepted"
).count()
def total_workshop(self: "Event") -> int:
"""Getting total of workshop in event."""
return self.sessions.filter(session_type="WorkShop", status="Accepted").count()
total_sessions.short_description = _("Sessions")
total_draft_sessions.short_description = _("Draft Sessions")
total_accepted_sessions.short_description = _("Accepted Sessions")
total_denied_sessions.short_description = _("Denied Sessions")
total_talk.short_description = _("Talk")
total_lighting_talk.short_description = _("Lighting Talk")
total_workshop.short_description = _("Workshop")
total_attendees.short_description = _("Attendees")
total_attended.short_description = _("Has Attended")
total_not_attended.short_description = _("Has Not Attended")
available_place.short_description = _("Available Place")
class Attendee(models.Model):
"""Reference attendee model."""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("user"),
on_delete=models.CASCADE,
related_name="attendees",
db_index=True,
)
events = models.ForeignKey(
Event,
verbose_name=_("events"),
on_delete=models.CASCADE,
related_name="attendees",
db_index=True,
)
has_attended = models.BooleanField(
verbose_name=_("has attended"), blank=True, null=True
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
class Meta:
"""Meta data."""
verbose_name = _("attendee")
verbose_name_plural = _("attendees")
def __str__(self: "Attendee") -> str:
"""It return readable name for the model."""
return f"{self.user}"
class Session(models.Model):
"""Reference session model."""
choose_category = (
("Talk", _("Talk")),
("Lighting Talk", _("Lighting Talk")),
("WorkShop", _("WorkShop")),
)
choose_status = (
("Draft", _("Draft")),
("Accepted", _("Accepted")),
("Denied", _("Denied")),
)
title = models.CharField(verbose_name=_("title"), max_length=400)
description = models.TextField(verbose_name=_("description"))
session_type = models.CharField(
max_length=100, choices=choose_category, verbose_name=_("session type")
)
slug = models.SlugField(verbose_name=_("slug"), unique=True, blank=True)
events = models.ForeignKey(
Event,
verbose_name=_("events"),
on_delete=models.CASCADE,
related_name="sessions",
db_index=True,
)
status = models.CharField(
verbose_name=_("status"), max_length=10, choices=choose_status, default="Draft"
)
proposed_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("proposed by"),
on_delete=models.CASCADE,
related_name="sessions",
db_index=True,
)
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
class Meta:
"""Meta data."""
verbose_name = _("session")
verbose_name_plural = _("sessions")
def __str__(self: "Session") -> str:
"""It return readable name for the model."""
return f"{self.title}"
@receiver(pre_save, sender=Session)
def session_slug_creator(sender: Session, instance: Session, **kwargs: Any) -> None:
"""Single for Session."""
if not instance.slug:
instance.slug = unique_slug(title=instance.title)
@receiver(pre_save, sender=Event)
def event_creator(sender: Event, instance: Event, **kwargs: Any) -> None:
"""Single for Event."""
if not instance.slug:
instance.slug = unique_slug(title=instance.title)
if instance.description:
instance.read_time = get_read_time(words=instance.description)
| 30.519573
| 87
| 0.653451
| 994
| 8,576
| 5.395372
| 0.15493
| 0.073839
| 0.033563
| 0.038038
| 0.483125
| 0.459258
| 0.410218
| 0.360992
| 0.319038
| 0.267201
| 0
| 0.002526
| 0.215252
| 8,576
| 280
| 88
| 30.628571
| 0.794354
| 0.119986
| 0
| 0.267857
| 0
| 0
| 0.111638
| 0.006368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113095
| false
| 0
| 0.047619
| 0
| 0.488095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5918b94351e68baf0dc788cb62fb44c5a012741d
| 2,276
|
py
|
Python
|
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | 1
|
2021-11-13T12:59:53.000Z
|
2021-11-13T12:59:53.000Z
|
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | null | null | null |
raster_compare/base/raster_data_difference.py
|
jomey/raster_compare
|
5199005d01f569e187e944d62af0ea70c383d16a
|
[
"MIT"
] | null | null | null |
import numpy as np
from osgeo import gdal
from .median_absolute_deviation import MedianAbsoluteDeviation
from .raster_file import RasterFile
class RasterDataDifference(object):
GDAL_DRIVER = gdal.GetDriverByName('GTiff')
def __init__(self, lidar, sfm, band_number):
self.lidar = RasterFile(lidar, band_number)
self.sfm = RasterFile(sfm, band_number)
self._aspect = None
self.band_values = self.sfm.band_values() - self.lidar.band_values()
self.band_mask = self.band_values.mask
self.mad = MedianAbsoluteDeviation(self.band_values.compressed())
self._slope = None
@property
def band_values(self):
return self._band_values
@band_values.setter
def band_values(self, value):
self._band_values = value
@property
def band_mask(self):
return self._band_mask
@band_mask.setter
def band_mask(self, value):
self._band_mask = np.copy(value)
def band_outlier_max(self):
return self.mad.data_median + self.mad.standard_deviation(2)
def band_outlier_min(self):
return self.mad.data_median - self.mad.standard_deviation(2)
@property
def band_filtered(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_outside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def band_unfiltered(self):
self.band_values.mask = self.band_mask
return self.band_values
@property
def band_outliers(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_inside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def aspect(self):
if self._aspect is None:
self._aspect = self.sfm.aspect - self.lidar.aspect
return self._aspect
@property
def slope(self):
if self._slope is None:
self._slope = self.sfm.slope - self.lidar.slope
return self._slope
| 27.756098
| 76
| 0.629174
| 279
| 2,276
| 4.870968
| 0.197133
| 0.135394
| 0.113319
| 0.05298
| 0.353937
| 0.318617
| 0.318617
| 0.318617
| 0.286976
| 0.286976
| 0
| 0.001229
| 0.28471
| 2,276
| 81
| 77
| 28.098765
| 0.833538
| 0
| 0
| 0.333333
| 0
| 0
| 0.002197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.060606
| 0.060606
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
591f579f62bec7c986797fa9d6cc59de7656817e
| 527
|
py
|
Python
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | 5
|
2015-03-29T19:19:16.000Z
|
2015-06-20T09:37:39.000Z
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | 28
|
2015-04-07T13:34:57.000Z
|
2015-05-25T13:30:36.000Z
|
util/logger.py
|
code4hk/NewsdiffHK-Backend
|
76ffd933fe9900a0bd2191597a210ddf86d2a8cd
|
[
"MIT"
] | null | null | null |
from util.env import log_dir
import logging
class MyFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
return logging.Formatter.formatTime(self, record, datefmt).replace(',', '.')
def get(name):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
formatter = MyFormatter('%(asctime)s:%(levelname)s:%(message)s')
ch = logging.FileHandler(log_dir() + '/news_diff.log')
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
return log
| 29.277778
| 84
| 0.70019
| 65
| 527
| 5.630769
| 0.507692
| 0.032787
| 0.10929
| 0.147541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157495
| 527
| 18
| 85
| 29.277778
| 0.824324
| 0
| 0
| 0
| 0
| 0
| 0.100379
| 0.070076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.071429
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592176ee7d34af8c375b741cef8c2df674d9c4b5
| 2,243
|
py
|
Python
|
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
piservicebusclient.py
|
nikkh/pi
|
237c0c0effcf69c15c6fb2791c7fd49eb1e254aa
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import colorsys
from azure.servicebus import ServiceBusService
from azure.servicebus import Message
from blinkt import set_pixel, set_brightness, show, clear
import time
import json
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def snake( r, g, b ):
"This creates a snake effect on the blinkt using the specified colour"
clear()
for count in range(1,20):
print(count)
for i in range(8):
clear()
set_pixel(i, r, g, b)
show()
time.sleep(0.05)
clear()
return;
def rainbow():
clear()
spacing = 360.0 / 16.0
hue = 0
set_brightness(0.1)
for count in range(1,160):
print(count)
hue = int(time.time() * 100) % 360
for x in range(8):
offset = x * spacing
h = ((hue + offset) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
set_pixel(x, r, g, b)
show()
time.sleep(0.001)
return;
set_brightness(0.1)
print('Nicks Raspberry Pi Python Service Bus Client version 0.1')
service_namespace='nixpitest'
key_name = 'RootManageSharedAccessKey' # SharedAccessKeyName from Azure portal
with open('private/keys.txt', 'r') as myfile:
keyval=myfile.read().replace('\n', '')
key_value = keyval # SharedAccessKey from Azure portal
sbs = ServiceBusService(service_namespace,
shared_access_key_name=key_name,
shared_access_key_value=key_value)
sbs.create_queue('testpythonqueue1')
while True:
newmsg = None
newmsg = sbs.receive_queue_message('testpythonqueue1', peek_lock=False)
if newmsg.body is not None:
print ("message: ", newmsg.body, "\n")
p = Payload(newmsg.body)
if p.device: print(p.device)
if p.effect: print(p.effect)
if p.led: print(p.led)
if p.colour: print(p.colour)
if p.state: print(p.state)
if p.effect == 'snake':
if p.colour == 'red':
snake(255,0,0)
elif p.colour == 'green':
snake(0,255,0)
elif p.colour == 'blue':
snake(0,0,255)
if p.effect == 'rainbow':
rainbow()
clear()
time.sleep(1)
| 28.392405
| 78
| 0.602764
| 317
| 2,243
| 4.164038
| 0.388013
| 0.018182
| 0.009091
| 0.037879
| 0.05
| 0.025758
| 0.025758
| 0
| 0
| 0
| 0
| 0.041743
| 0.273741
| 2,243
| 78
| 79
| 28.75641
| 0.76857
| 0.069996
| 0
| 0.185714
| 0
| 0
| 0.113436
| 0.011623
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.085714
| 0
| 0.142857
| 0.128571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59269ff1d7149784a5bf3e067f0e6975db562830
| 14,031
|
py
|
Python
|
apps/part_interpolation&replacement/part_replacement.py
|
GuillaumeDufau/3D-point-capsule-networks
|
369206df643edb263d43cf2d05923cf0a26841e5
|
[
"MIT"
] | 283
|
2019-04-14T12:58:54.000Z
|
2022-03-30T11:49:38.000Z
|
apps/part_interpolation&replacement/part_replacement.py
|
LONG-9621/3D-Point-Capsule-Networks
|
161ac9042ca9c048f4b531ae26fe94a29b13e777
|
[
"MIT"
] | 20
|
2019-05-01T05:40:02.000Z
|
2021-11-20T11:15:17.000Z
|
apps/part_interpolation&replacement/part_replacement.py
|
LONG-9621/3D-Point-Capsule-Networks
|
161ac9042ca9c048f4b531ae26fe94a29b13e777
|
[
"MIT"
] | 55
|
2019-04-22T12:14:42.000Z
|
2022-03-25T06:26:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 17:45:51 2018
@author: zhao
"""
import argparse
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../models')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import matplotlib.pyplot as plt
from pointcapsnet_ae import PointCapsNet,PointCapsNetDecoder
from capsule_seg_net import CapsSegNet
import json
from open3d import *
def main():
blue = lambda x:'\033[94m' + x + '\033[0m'
cat_no={'Airplane': 0, 'Bag': 1, 'Cap': 2, 'Car': 3, 'Chair': 4, 'Earphone': 5,
'Guitar': 6, 'Knife': 7, 'Lamp': 8, 'Laptop': 9, 'Motorbike': 10,
'Mug': 11, 'Pistol': 12, 'Rocket': 13, 'Skateboard': 14, 'Table': 15}
#generate part label one-hot correspondence from the catagory:
dataset_main_path=os.path.abspath(os.path.join(BASE_DIR, '../../dataset'))
oid2cpid_file_name=os.path.join(dataset_main_path, opt.dataset,'shapenetcore_partanno_segmentation_benchmark_v0/shapenet_part_overallid_to_catid_partid.json')
oid2cpid = json.load(open(oid2cpid_file_name, 'r'))
object2setofoid = {}
for idx in range(len(oid2cpid)):
objid, pid = oid2cpid[idx]
if not objid in object2setofoid.keys():
object2setofoid[objid] = []
object2setofoid[objid].append(idx)
all_obj_cat_file = os.path.join(dataset_main_path, opt.dataset, 'shapenetcore_partanno_segmentation_benchmark_v0/synsetoffset2category.txt')
fin = open(all_obj_cat_file, 'r')
lines = [line.rstrip() for line in fin.readlines()]
objcats = [line.split()[1] for line in lines]
# objnames = [line.split()[0] for line in lines]
# on2oid = {objcats[i]:i for i in range(len(objcats))}
fin.close()
colors = plt.cm.tab10((np.arange(10)).astype(int))
blue = lambda x:'\033[94m' + x + '\033[0m'
# load the model for point cpas auto encoder
capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
capsule_net.load_state_dict(torch.load(opt.model))
if USE_CUDA:
capsule_net = torch.nn.DataParallel(capsule_net).cuda()
capsule_net=capsule_net.eval()
# load the model for only decoding
capsule_net_decoder = PointCapsNetDecoder(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
capsule_net_decoder.load_state_dict(torch.load(opt.model),strict=False)
if USE_CUDA:
capsule_net_decoder = capsule_net_decoder.cuda()
capsule_net_decoder=capsule_net_decoder.eval()
# load the model for capsule wised part segmentation
caps_seg_net = CapsSegNet(latent_caps_size=opt.latent_caps_size, latent_vec_size=opt.latent_vec_size , num_classes=opt.n_classes)
if opt.part_model != '':
caps_seg_net.load_state_dict(torch.load(opt.part_model))
if USE_CUDA:
caps_seg_net = caps_seg_net.cuda()
caps_seg_net = caps_seg_net.eval()
train_dataset = shapenet_part_loader.PartDataset(classification=False, class_choice=opt.class_choice, npoints=opt.num_points, split='test')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
# container for ground truth
pcd_gt_source=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_source.append(pcd)
pcd_gt_target=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_target.append(pcd)
# container for ground truth cut and paste
pcd_gt_replace_source=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_replace_source.append(pcd)
pcd_gt_replace_target=[]
for i in range(2):
pcd = PointCloud()
pcd_gt_replace_target.append(pcd)
# container for capsule based part replacement
pcd_caps_replace_source=[]
for i in range(opt.latent_caps_size):
pcd = PointCloud()
pcd_caps_replace_source.append(pcd)
pcd_caps_replace_target=[]
for i in range(opt.latent_caps_size):
pcd = PointCloud()
pcd_caps_replace_target.append(pcd)
# apply a transformation in order to get a better view point
##airplane
rotation_angle=np.pi/2
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
flip_transforms = [[1, 0, 0,-2],[0,cosval, -sinval,1.5],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transforms_r = [[1, 0, 0,2],[0, 1, 0,-1.5],[0, 0, 1,0],[0, 0, 0, 1]]
flip_transform_gt_s = [[1, 0, 0,-3],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_t = [[1, 0, 0,-3],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_re_s = [[1, 0, 0,0],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_gt_re_t = [[1, 0, 0,0],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_caps_re_s = [[1, 0, 0,3],[0,cosval, -sinval,-1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
flip_transform_caps_re_t = [[1, 0, 0,3],[0,cosval, -sinval,1],[0,sinval, cosval, 0],[0, 0, 0, 1]]
colors = plt.cm.tab20((np.arange(20)).astype(int))
part_replace_no=1 # the part that is replaced
for batch_id, data in enumerate(train_dataloader):
points, part_label, cls_label= data
if not (opt.class_choice==None ):
cls_label[:]= cat_no[opt.class_choice]
if(points.size(0)<opt.batch_size):
break
all_model_pcd=PointCloud()
gt_source_list0=[]
gt_source_list1=[]
gt_target_list0=[]
gt_target_list1=[]
for point_id in range(opt.num_points):
if(part_label[0,point_id]==part_replace_no ):
gt_source_list0.append(points[0,point_id,:])
else:
gt_source_list1.append(points[0,point_id,:])
if( part_label[1,point_id]==part_replace_no):
gt_target_list0.append(points[1,point_id,:])
else:
gt_target_list1.append(points[1,point_id,:])
# viz GT with colored part
pcd_gt_source[0].points=Vector3dVector(gt_source_list0)
pcd_gt_source[0].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
pcd_gt_source[0].transform(flip_transform_gt_s)
all_model_pcd+=pcd_gt_source[0]
pcd_gt_source[1].points=Vector3dVector(gt_source_list1)
pcd_gt_source[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_source[1].transform(flip_transform_gt_s)
all_model_pcd+=pcd_gt_source[1]
pcd_gt_target[0].points=Vector3dVector(gt_target_list0)
pcd_gt_target[0].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_gt_target[0].transform(flip_transform_gt_t)
all_model_pcd+=pcd_gt_target[0]
pcd_gt_target[1].points=Vector3dVector(gt_target_list1)
pcd_gt_target[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_target[1].transform(flip_transform_gt_t)
all_model_pcd+=pcd_gt_target[1]
# viz replaced GT colored parts
pcd_gt_replace_source[0].points=Vector3dVector(gt_target_list0)
pcd_gt_replace_source[0].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_gt_replace_source[0].transform(flip_transform_gt_re_s)
all_model_pcd+=pcd_gt_replace_source[0]
pcd_gt_replace_source[1].points=Vector3dVector(gt_source_list1)
pcd_gt_replace_source[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_replace_source[1].transform(flip_transform_gt_re_s)
all_model_pcd+=pcd_gt_replace_source[1]
pcd_gt_replace_target[0].points=Vector3dVector(gt_source_list0)
pcd_gt_replace_target[0].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
pcd_gt_replace_target[0].transform(flip_transform_gt_re_t)
all_model_pcd+=pcd_gt_replace_target[0]
pcd_gt_replace_target[1].points=Vector3dVector(gt_target_list1)
pcd_gt_replace_target[1].paint_uniform_color([0.8,0.8,0.8])
pcd_gt_replace_target[1].transform(flip_transform_gt_re_t)
all_model_pcd+=pcd_gt_replace_target[1]
#capsule based replacement
points_ = Variable(points)
points_ = points_.transpose(2, 1)
if USE_CUDA:
points_ = points_.cuda()
latent_caps, reconstructions= capsule_net(points_)
reconstructions=reconstructions.transpose(1,2).data.cpu()
cur_label_one_hot = np.zeros((2, 16), dtype=np.float32)
for i in range(2):
cur_label_one_hot[i, cls_label[i]] = 1
cur_label_one_hot=torch.from_numpy(cur_label_one_hot).float()
expand =cur_label_one_hot.unsqueeze(2).expand(2,16,opt.latent_caps_size).transpose(1,2)
latent_caps, expand = Variable(latent_caps), Variable(expand)
latent_caps,expand = latent_caps.cuda(), expand.cuda()
# predidt the part label of each capsule
latent_caps_with_one_hot=torch.cat((latent_caps,expand),2)
latent_caps_with_one_hot,expand=Variable(latent_caps_with_one_hot),Variable(expand)
latent_caps_with_one_hot,expand=latent_caps_with_one_hot.cuda(),expand.cuda()
latent_caps_with_one_hot=latent_caps_with_one_hot.transpose(2, 1)
output_digit=caps_seg_net(latent_caps_with_one_hot)
for i in range (2):
iou_oids = object2setofoid[objcats[cls_label[i]]]
non_cat_labels = list(set(np.arange(50)).difference(set(iou_oids)))
mini = torch.min(output_digit[i,:,:])
output_digit[i,:, non_cat_labels] = mini - 1000
pred_choice = output_digit.data.cpu().max(2)[1]
#
# saved the index of capsules which are assigned to current part
part_no=iou_oids[part_replace_no]
part_viz=[]
for caps_no in range (opt.latent_caps_size):
if(pred_choice[0,caps_no]==part_no and pred_choice[1,caps_no]==part_no):
part_viz.append(caps_no)
#replace the capsules
latent_caps_replace=latent_caps.clone()
latent_caps_replace= Variable(latent_caps_replace)
latent_caps_replace = latent_caps_replace.cuda()
for j in range (len(part_viz)):
latent_caps_replace[0,part_viz[j],]=latent_caps[1,part_viz[j],]
latent_caps_replace[1,part_viz[j],]=latent_caps[0,part_viz[j],]
reconstructions_replace = capsule_net_decoder(latent_caps_replace)
reconstructions_replace=reconstructions_replace.transpose(1,2).data.cpu()
for j in range(opt.latent_caps_size):
current_patch_s=torch.zeros(int(opt.num_points/opt.latent_caps_size),3)
current_patch_t=torch.zeros(int(opt.num_points/opt.latent_caps_size),3)
for m in range(int(opt.num_points/opt.latent_caps_size)):
current_patch_s[m,]=reconstructions_replace[0][opt.latent_caps_size*m+j,]
current_patch_t[m,]=reconstructions_replace[1][opt.latent_caps_size*m+j,]
pcd_caps_replace_source[j].points = Vector3dVector(current_patch_s)
pcd_caps_replace_target[j].points = Vector3dVector(current_patch_t)
part_no=iou_oids[part_replace_no]
if(j in part_viz):
pcd_caps_replace_source[j].paint_uniform_color([colors[6,0], colors[6,1], colors[6,2]])
pcd_caps_replace_target[j].paint_uniform_color([colors[5,0], colors[5,1], colors[5,2]])
else:
pcd_caps_replace_source[j].paint_uniform_color([0.8,0.8,0.8])
pcd_caps_replace_target[j].paint_uniform_color([0.8,0.8,0.8])
pcd_caps_replace_source[j].transform(flip_transform_caps_re_s)
pcd_caps_replace_target[j].transform(flip_transform_caps_re_t)
all_model_pcd+=pcd_caps_replace_source[j]
all_model_pcd+=pcd_caps_replace_target[j]
draw_geometries([all_model_pcd])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--prim_caps_size', type=int, default=1024, help='number of primary point caps')
parser.add_argument('--prim_vec_size', type=int, default=16, help='scale of primary point caps')
parser.add_argument('--latent_caps_size', type=int, default=64, help='number of latent caps')
parser.add_argument('--latent_vec_size', type=int, default=64, help='scale of latent caps')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--part_model', type=str, default='../../checkpoints/part_seg_100percent.pth', help='model path for the pre-trained part segmentation network')
parser.add_argument('--model', type=str, default='../../checkpoints/shapenet_part_dataset_ae_200.pth', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset: shapenet_part, shapenet_core13, shapenet_core55, modelent40')
parser.add_argument('--n_classes', type=int, default=50, help='part classes in all the catagories')
parser.add_argument('--class_choice', type=str, default='Airplane', help='choose the class to eva')
opt = parser.parse_args()
print(opt)
USE_CUDA = True
main()
| 44.26183
| 170
| 0.662818
| 2,057
| 14,031
| 4.209528
| 0.156539
| 0.048504
| 0.027717
| 0.025523
| 0.515533
| 0.414829
| 0.338838
| 0.2953
| 0.254995
| 0.231666
| 0
| 0.035776
| 0.2131
| 14,031
| 317
| 171
| 44.26183
| 0.748483
| 0.058798
| 0
| 0.121622
| 0
| 0
| 0.069737
| 0.019426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004505
| false
| 0
| 0.067568
| 0
| 0.072072
| 0.004505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592749e0c27abaef8d986702717878c311749a54
| 6,839
|
py
|
Python
|
src/Grid.py
|
RavinSG/aaivu-ride-hailing-simulation
|
eb7bc7cc6a5830d40509ce22fe4fa2eb013e6767
|
[
"Apache-2.0"
] | 8
|
2021-02-18T19:02:59.000Z
|
2022-02-19T13:38:48.000Z
|
src/Grid.py
|
Programmer-RD-AI/aaivu-ride-hailing-simulation
|
f315661c94c9e3f26bab1d8bb9c35d21b1a60479
|
[
"Apache-2.0"
] | null | null | null |
src/Grid.py
|
Programmer-RD-AI/aaivu-ride-hailing-simulation
|
f315661c94c9e3f26bab1d8bb9c35d21b1a60479
|
[
"Apache-2.0"
] | 2
|
2021-02-14T03:28:51.000Z
|
2022-02-19T13:38:51.000Z
|
import simpy
import itertools
import numpy as np
from RideSimulator.Driver import Driver
from RideSimulator.HexGrid import HexGrid
def get_spot_locations(width: int, height: int, interval: int) -> np.ndarray:
"""
:param width: width of the grid
:param height: height of the grid
:param interval: distance between two spots
:return: an array of all the spot locations
"""
x_points = np.arange(0, width, interval)
y_points = np.arange(0, height, interval)
# If the distance to the nearest taxi spot from the corner is greater than the minimum search radius additional
# spots are added along the edges of thr map.
if (width - x_points[-1]) > (interval / np.sqrt(2)):
x_points = np.append(x_points, width)
if (height - y_points[-1]) > (interval / np.sqrt(2)):
y_points = np.append(y_points, height)
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
return np.array([spots, len(y_points), len(x_points)], dtype=object)
class Grid(object):
"""
Handles all the information and processes related to the grid. The distances between grid units can be translated to
real world distances using the units_per_km attribute.
Taxi spots are used to anchor drivers into locations in the map to make it easier to find the closest driver for a
given trip.
A hexagon overlay is used to cluster grid locations into regions where hotspots, traffic and other features are
calculated based on the hotspots.
"""
def __init__(self, env: simpy.Environment, width: int, height: int, interval: int, num_drivers: int,
hex_area: float, units_per_km: int = 1, seed: int = None):
"""
:param env: simpy environment
:param width: width of the grid
:param height: height of the grid
:param interval: distance between two spots
:param num_drivers: number of drivers in the grid
:param hex_area: area size of a single hex tile
:param units_per_km: number of grid units per km
"""
if seed is not None:
np.random.seed(seed)
self.width = width
self.height = height
self.interval = interval
self.hex_overlay = HexGrid(hex_area=hex_area, width=width, height=height, units_per_km=units_per_km)
self.taxi_spots, self.spot_height, self.spot_width = get_spot_locations(width=width, height=height,
interval=interval)
self.driver_pools = simpy.FilterStore(env, capacity=num_drivers)
def get_random_location(self) -> np.ndarray:
x = np.random.randint(0, self.width)
y = np.random.randint(0, self.height)
return np.array([x, y])
# Temp function to get location id until hexagons are implemented
def get_location_id(self, location):
grid_width = 10 # no. of cells in one axis (create 10x10 grid)
x = np.floor((location[0] - 0) * grid_width / self.width)
y = np.floor((location[1] - 0) * grid_width / self.height)
return x * grid_width + y
@staticmethod
def get_distance(loc1: np.ndarray, loc2: np.ndarray) -> float:
distance = np.linalg.norm(loc1 - loc2)
return np.round(distance, 1)
def get_spot_id(self, location):
return int(np.round(location[0]) * self.spot_height + np.round(location[1]))
def get_nearest_spot(self, location: np.ndarray, search_radius=1) -> list:
"""
Find the nearest driver spot for a given location.
Initially it'll only return the nearest spot to the driver. When search_radius = 2, the 4 taxi spots surrounding
the rider are returned. Afterwards, with each increment to the search_radius, all taxi spots inside a square
centered on the driver location with a side length of search_radius are returned.
:param location: x,y coords of the location
:param search_radius: number of breaths the search will carry out on
:return: a list of the closest taxi spots
"""
x_spot = location[0] / self.interval
y_spot = location[1] / self.interval
closet_spot = [np.round(x_spot), np.round(y_spot)]
if search_radius == 1:
spot_no = [self.get_spot_id(closet_spot)]
elif search_radius == 2:
spot_no = []
x_points = {np.floor(x_spot), np.ceil(x_spot)}
y_points = {np.floor(y_spot), np.ceil(y_spot)}
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
for spot in spots:
spot_no.append(self.get_spot_id(spot))
else:
spot_no = []
x_points = [closet_spot[0]]
y_points = [closet_spot[1]]
for i in range(1, search_radius - 1):
x_points.append(max(0, closet_spot[0] - i))
x_points.append(min(self.spot_width - 1, closet_spot[0] + i))
y_points.append(max(0, closet_spot[1] - i))
y_points.append(min(self.spot_height - 1, closet_spot[1] + i))
x_points = set(x_points)
y_points = set(y_points)
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
for spot in spots:
spot_no.append(self.get_spot_id(spot))
return spot_no
def get_closest_drivers(self, location: np.ndarray, search_radius: int) -> list:
"""
A more accurate closest driver search using driver distances of all the drivers in the closest taxi spots.
Since this is more computationally expensive and the increment in accuracy does not outweigh the cost, this is
not used at the moment.
:param location: location the distances should be calculated from
:param search_radius: number of breaths the search will carry out on
:return: a list of driver ids sorted in the ascending order according to their distances to the location
"""
spots = self.get_nearest_spot(location, search_radius=search_radius)
driver_ids = []
distances = []
for driver in self.driver_pools.items:
if driver.spot_id in spots:
driver_ids.append(driver.id)
distances.append(self.get_distance(location, driver.location))
if len(driver_ids) > 0:
_, driver_ids = zip(*sorted(zip(distances, driver_ids)))
return driver_ids
def assign_spot(self, driver: Driver):
"""
Assign the driver to his nearest driver pool.
:param driver: driver object
"""
driver_loc = driver.location
spot_id = self.get_nearest_spot(driver_loc)[0]
driver.spot_id = spot_id
self.driver_pools.put(driver)
| 41.70122
| 120
| 0.640445
| 972
| 6,839
| 4.365226
| 0.209877
| 0.023097
| 0.014141
| 0.013198
| 0.20198
| 0.181711
| 0.130332
| 0.130332
| 0.130332
| 0.130332
| 0
| 0.009455
| 0.273139
| 6,839
| 163
| 121
| 41.957055
| 0.844096
| 0.324901
| 0
| 0.105882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105882
| false
| 0
| 0.058824
| 0.011765
| 0.258824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592b099ed5239bc2e197e2c20d2d55bdd277f278
| 881
|
py
|
Python
|
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | 5
|
2015-09-11T04:13:01.000Z
|
2021-11-17T14:35:28.000Z
|
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | null | null | null |
src/block_constants.py
|
cemulate/minecraft-hdl
|
a46da8d2a29aad9c2fc84037d677190c6db80dcd
|
[
"MIT"
] | 1
|
2021-03-15T17:31:27.000Z
|
2021-03-15T17:31:27.000Z
|
REDSTONE = 55
REPEATER = 93
TORCH = 75
AIR = 0
GLASS = 20
SLAB = 44
DOUBLE_SLAB = 43
WOOL = 35
DIR_WEST_POS_Z = 0
DIR_NORTH_NEG_X = 1
DIR_EAST_NEG_Z = 2
DIR_SOUTH_POS_X = 3
TORCH_ON_GROUND = 5
TORCH_POINTING_POS_X = 1
TORCH_POINTING_NEG_X = 2
TORCH_POINTING_POS_Z = 3
TORCH_POINTING_NEG_Z = 4
STONE_SLAB_TOP = 8
DOUBLE_SLAB_STONE = 0
WOOL_BLACK = 15
REPEATER_TOWARD_POS_X = 1
REPEATER_TOWARD_POS_Z = 2
REPEATER_TOWARD_NEG_X = 3
CLOSE_SIDE = 0
FAR_SIDE = 1
WOOL_NAMES = {0: "White",
1: "Orange",
2: "Magenta",
3: "Light blue",
4: "Yellow",
5: "Lime",
6: "Pink",
7: "Grey",
8: "Light grey",
9: "Cyan",
10: "Purple",
11: "Blue",
12: "Brown",
13: "Green",
14: "Red",
15: "Black"}
| 17.62
| 29
| 0.538025
| 129
| 881
| 3.333333
| 0.503876
| 0.12093
| 0.074419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097345
| 0.358683
| 881
| 50
| 30
| 17.62
| 0.663717
| 0
| 0
| 0
| 0
| 0
| 0.099773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592b8f8cacb2754ab7e4528631c3f40cfdc1b7e7
| 4,973
|
py
|
Python
|
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
qfc/dirhandler.py
|
akhilkedia/qfc
|
101861bd2fb818564245249fc93f278752684b51
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import sys
class CVSHandler():
""" Handler of CVS (fir, mercurial...) directories,
The main purpose of this class is to cache external cvs command output, and determine the appropriate files to yield when navigating to a subdirectory of a project.
This basically means that the external command is run once (ie git ls-files), cached, and when calling get_source_files on a subdirectory of the project root (ie project-root/subdir),
filtering from all project files of is done here.
"""
def __init__(self, cvs):
self._roots_cache = {}
self._not_tracked_cache = set()
self.cvs = cvs
def _get_root_from_cache(self, directory):
""" a directory is considered cached if it's the project root or a subdirectory of that project root.
returns the project root dir, or None if the directory is not cached.
"""
if directory in self._roots_cache:
return directory
if os.path.dirname(directory) == directory:
return None
return self._get_root_from_cache(os.path.dirname(directory))
def get_source_files(self, directory):
if directory in self._not_tracked_cache:
return None
root_dir = self._get_root_from_cache(directory)
if not root_dir:
try:
# check if it's a tracked cvs dir, if yes, get the project root and the source files
root_dir = self.cvs._get_root(directory)
self._roots_cache[root_dir] = self.cvs._get_tracked_files(root_dir)
except Exception as e:
# not a cvs tracked dir, save it to not issue that command again
self._not_tracked_cache.add(directory)
return None
files = self._roots_cache[root_dir]
# the passed directory argument is a subdirectory of the project root
if directory != root_dir:
rel_dir = os.path.relpath(directory, root_dir)
files = [f[len(rel_dir)+1:] for f in files if f.startswith(rel_dir)]
return files
class Git():
@staticmethod
def _get_root(directory):
return run_command("cd %s && git rev-parse --show-toplevel" % directory).strip()
@staticmethod
def _get_tracked_files(directory):
return run_command("cd %s && git ls-files && git ls-files --others --exclude-standard" % directory).strip().split('\n')
class Mercurial():
@staticmethod
def _get_root(directory):
return run_command("cd %s && hg root" % directory).strip()
@staticmethod
def _get_tracked_files(directory):
return run_command("cd %s && (hg status -marcu | cut -d' ' -f2)" % directory).strip().split('\n')
class DefaultDirHandler():
""" The default directory handler uses the 'find' external program to return all the files inside a given directory up to MAX_depth depth (ie, if maxdepth=2, returns all files inside that dir, and all files in a subdir of that directory)"""
def __init__(self):
self._cache = {}
self.MAX_DEPTH = 2
def _walk_down(self, start_dir):
try:
out = run_command(
"{ find %s -maxdepth %s -not -path '*/\\.*' -type d -print | sed 's!$!/!'; find %s -maxdepth %s -not -path '*/\\.*' -type f -or -type l ; } | sed -n 's|^%s/||p'" % (start_dir, self.MAX_DEPTH, start_dir, self.MAX_DEPTH, start_dir))
except subprocess.CalledProcessError as e:
# Find returns a non 0 exit status if listing a directory fails (for example, permission denied), but still output all files in other dirs
# ignore those failed directories.
out = e.output
if sys.version_info >= (3, 0):
out = out.decode('utf-8')
if not out:
return []
files = out.split('\n')
return [f for f in files if f]
def get_source_files(self, start_dir):
if not start_dir in self._cache:
self._cache[start_dir] = self._walk_down(start_dir)
return self._cache[start_dir]
def run_command(string):
''' fork a process to execute the command string given as argument, returning the string written to STDOUT '''
DEVNULL = open(os.devnull, 'wb')
out = subprocess.check_output(string, stderr=DEVNULL, shell=True)
if sys.version_info >= (3, 0):
return out.decode('utf-8')
return out
git = CVSHandler(Git)
hg = CVSHandler(Mercurial)
default = DefaultDirHandler()
def get_source_files(directory):
""" check first if the given directory is inside a git tracked project, if no, check with mercurial, if no, fallback to the default handler """
files = git.get_source_files(directory)
# if the returned files list is empty, it's considered not a tracked directory
if files:
return files
files = hg.get_source_files(directory)
if files:
return files
return default.get_source_files(directory)
| 42.87069
| 246
| 0.648904
| 696
| 4,973
| 4.482759
| 0.248563
| 0.020192
| 0.03141
| 0.032051
| 0.246154
| 0.151923
| 0.112821
| 0.078205
| 0.078205
| 0.078205
| 0
| 0.002983
| 0.258395
| 4,973
| 115
| 247
| 43.243478
| 0.843004
| 0.311884
| 0
| 0.246914
| 0
| 0.012346
| 0.101649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.037037
| 0.049383
| 0.45679
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592c8f23fd0453baefac3223ac8d226123072b8f
| 436
|
py
|
Python
|
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
demo1/jsons.py
|
dollarkillerx/Python-Data-Analysis
|
f208d5ce9951e9fca2d084a89290100b7e543154
|
[
"MIT"
] | null | null | null |
import json
filename = "data.json"
mydata = {
"title":"我的测试数据",
"lesson":{
"python":"学习中",
'vue':"学习完毕",
"golang":"基本精通"
},
"games":{
"GAT":"一年没有玩了"
},
}
# 文件写入
with open(filename,'w',encoding="utf-8") as data:
# 数据,文件句柄,json缩进空格数
json.dump(mydata,data,indent=4)
# 读文件
with open(filename,'r',encoding='utf-8') as data:
# 句柄
rdata = json.load(data)
print(rdata)
| 16.769231
| 49
| 0.538991
| 54
| 436
| 4.351852
| 0.685185
| 0.068085
| 0.13617
| 0.119149
| 0.153191
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009317
| 0.261468
| 436
| 25
| 50
| 17.44
| 0.720497
| 0.066514
| 0
| 0
| 0
| 0
| 0.19403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
592ca011fcc9c84fa4da0a8bde9dd4daf4629fd5
| 280
|
py
|
Python
|
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/malware_scan/classess/progress.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
import progressbar, time
from .colors import *
# progress bar
def animated_marker():
widgets = ['In Process: ', progressbar.AnimatedMarker()]
bar = progressbar.ProgressBar(widgets=widgets).start()
for i in range(18):
time.sleep(0.1)
bar.update(i)
| 28
| 61
| 0.660714
| 34
| 280
| 5.411765
| 0.676471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.214286
| 280
| 10
| 62
| 28
| 0.818182
| 0.042857
| 0
| 0
| 0
| 0
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
593150e1f3c9a373acbf0b4f5ce7f05a49bde1de
| 4,406
|
py
|
Python
|
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
single_subject_workflow.py
|
tknapen/reward_np_analysis
|
29bcc02d5acd23689dee7059ecb1607d2814cdf0
|
[
"MIT"
] | null | null | null |
# from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import os.path as op
import glob
import json
import nipype
from nipype import config, logging
import matplotlib.pyplot as plt
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.utils.filemanip import copyfile
import nibabel as nib
from IPython.display import Image
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from IPython.display import Image
from IPython import embed as shell
from workflows.pupil_workflow import create_pupil_workflow
from workflows.bold_wholebrain_fir_workflow import create_bold_wholebrain_fir_workflow
# we will create a workflow from a BIDS formatted input, at first for the specific use case
# of a 7T PRF experiment's preprocessing.
# a project directory that we assume has already been created.
raw_data_dir = '/home/raw_data/-2014/reward/human_reward/data/'
preprocessed_data_dir = '/home/shared/-2014/reward/new/'
FS_subject_dir = os.path.join(raw_data_dir, 'FS_SJID')
# booleans that determine whether given stages of the
# analysis are run
pupil = True
wb_fir = True
for si in range(1,7): #
sub_id, FS_ID = 'sub-00%i'%si, 'sub-00%i'%si
sess_id = 'ses-*'
# now we set up the folders and logging there.
opd = op.join(preprocessed_data_dir, sub_id)
try:
os.makedirs(op.join(opd, 'log'))
except OSError:
pass
config.update_config({ 'logging': {
'log_directory': op.join(opd, 'log'),
'log_to_file': True,
'workflow_level': 'INFO',
'interface_level': 'INFO'
},
'execution': {
'stop_on_first_crash': False
}
})
logging.update_logging(config)
# load the sequence parameters from json file
with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
json_s = f.read()
acquisition_parameters = json.loads(json_s)
# load the analysis parameters from json file
with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
json_s = f.read()
analysis_info = json.loads(json_s)
# load the analysis/experimental parameters for this subject from json file
with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
json_s = f.read()
experimental_parameters = json.loads(json_s)
analysis_info.update(experimental_parameters)
if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
try:
os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
except OSError:
pass
# copy json files to preprocessed data folder
# this allows these parameters to be updated and synced across subjects by changing only the raw data files.
copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'), os.path.join(preprocessed_data_dir, 'acquisition_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'), os.path.join(preprocessed_data_dir, 'analysis_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json'), os.path.join(preprocessed_data_dir, sub_id ,'experimental_parameters.json'), copy = True)
if pupil:
pwf = create_pupil_workflow(analysis_info,'pupil')
pwf.inputs.inputspec.sub_id = sub_id
pwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
pwf.write_graph(opd + '_pupil.svg', format='svg', graph2use='colored', simple_form=False)
pwf.run('MultiProc', plugin_args={'n_procs': 6})
if wb_fir:
wbfwf = create_bold_wholebrain_fir_workflow(analysis_info,'wb_fir')
wbfwf.inputs.inputspec.sub_id = sub_id
wbfwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
wbfwf.write_graph(opd + '_wb_fir.svg', format='svg', graph2use='colored', simple_form=False)
wbfwf.run('MultiProc', plugin_args={'n_procs': 6})
| 40.422018
| 170
| 0.682705
| 591
| 4,406
| 4.890017
| 0.307953
| 0.041176
| 0.041522
| 0.031488
| 0.438062
| 0.39827
| 0.351211
| 0.261246
| 0.172318
| 0.104498
| 0
| 0.005578
| 0.226963
| 4,406
| 108
| 171
| 40.796296
| 0.842924
| 0.159555
| 0
| 0.148649
| 0
| 0
| 0.142935
| 0.084893
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.027027
| 0.27027
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
593db3c128dcad16c4059d93406558fd51b30469
| 5,617
|
py
|
Python
|
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | 1
|
2017-05-24T00:25:39.000Z
|
2017-05-24T00:25:39.000Z
|
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | null | null | null |
wark.py
|
rcorre/wark
|
fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa
|
[
"MIT"
] | null | null | null |
import os
import json
import uuid
import shlex
import weechat
import requests
from ciscosparkapi import CiscoSparkAPI
from ws4py.client.threadedclient import WebSocketClient
SCRIPT_NAME = "spark"
FULL_NAME = "plugins.var.python.{}".format(SCRIPT_NAME)
SPARK_SOCKET_URL = 'https://wdm-a.wbx2.com/wdm/api/v1/devices'
api = None
listener = None
rooms = None
buffers = []
def unixtime(msg):
"""Get the unix timestamp from a spark message object"""
t = time.strptime(msg.created, '%Y-%m-%dT%H:%M:%S.%fZ')
return int(time.mktime(t))
class Buffer():
"""Represents a weechat buffer connected to a spark room."""
def __init__(self, buf, room, api):
self.buf = buf
self.room = room
self.api = api
def show(self, msg):
"""Display a message in the buffer."""
weechat.prnt_date_tags(self.buf, unixtime(msg), "", msg.text)
def send(self, txt):
"""Send a message to the room."""
self.api.messages.create(roomId=self.room.id, markdown=txt)
# Cisco Spark has a websocket interface to listen for message events
# It isn't documented, I found it here:
# https://github.com/marchfederico/ciscospark-websocket-events
class EventListener(WebSocketClient):
"""Listens to the cisco spark web socket."""
def __init__(self, buffers):
self.buffers = buffers
spec = {
"deviceName": "weechat",
"deviceType": "DESKTOP",
"localizedModel": "python2",
"model": "python2",
"name": "weechat",
"systemName": "weechat",
"systemVersion": "0.1"
}
self.bearer = 'Bearer ' + os.getenv("SPARK_ACCESS_TOKEN")
self.headers = {'Authorization': self.bearer}
resp = requests.post(SPARK_SOCKET_URL, headers=self.headers, json=spec,
timeout=10.0)
if resp.status_code != 200:
print("Failed to register device {}: {}".format(name, resp.json()))
info = resp.json()
self.dev_url = info['url']
super(EventListener, self).__init__(
info['webSocketUrl'], protocols=['http-only', 'chat'])
def opened(self):
# authentication handshake
self.send(json.dumps({
'id': str(uuid.uuid4()),
'type': 'authorization',
'data': { 'token': self.bearer }
}))
def closed(self, code, reason=None):
resp = requests.delete(self.dev_url, headers=self.headers,
timeout=10.0)
if resp.status_code != 200:
print("Failed to unregister websocket device from Spark")
def received_message(self, m):
try:
j = json.loads(str(m))
except:
print("Failed to parse message {}".format(m))
return
timestamp = j['timestamp']
data = j['data']
name = data.get('actor', {}).get('displayName')
ev = data['eventType']
if ev == 'status.start_typing':
weechat.prnt('', '{} started typing'.format(name))
elif ev == 'status.stop_typing':
weechat.prnt('', '{} stopped typing'.format(name))
elif ev == 'conversation.activity':
act = data['activity']
verb = act['verb']
if verb == 'post':
msg = api.messages.get(act['id'])
for buf in self.buffers:
if buf.room.id == msg.roomId:
buf.show(msg)
else:
print('Unknown event {}'.format(ev))
class CommandException(Exception):
pass
def buffer_input_cb(data, buf, input_data):
weechat.prnt(buf, input_data)
return weechat.WEECHAT_RC_OK
def buffer_close_cb(data, buf):
"""Called on closing a buffer."""
return weechat.WEECHAT_RC_OK
def room_list(buf):
"""Print a list of visible rooms."""
weechat.prnt(buf, '--Rooms--')
weechat.prnt(buf, '\n'.join(rooms.keys()))
weechat.prnt(buf, '---------')
def room_open(buf, name):
"""Open a new buffer connected to a spark room."""
room = rooms[name]
newbuf = weechat.buffer_new("spark." + room.title, "buffer_input_cb", "",
"buffer_close_cb", "")
buffers[room.id] = Buffer(buf, room, api)
def rehistory(_buf):
#messages = api.messages.list(roomId=room.id)
#for msg in sorted(messages, key=unixtime):
# text = msg.text.encode('ascii', 'replace') if msg.text else ''
# weechat.prnt_date_tags(newbuf, unixtime(msg), "", text)
pass
COMMANDS = {
'rooms': room_list,
'open': room_open,
}
def spark_command_cb(data, buf, command):
parts = shlex.split(command)
cmd = parts[0]
args = parts[1:]
if not cmd in COMMANDS:
weechat.prnt(buf, "Unknown command " + cmd)
return weechat.WEECHAT_RC_ERROR
try:
COMMANDS[cmd](buf, *args)
return weechat.WEECHAT_RC_OK
except CommandException as ex:
weechat.prnt(buf, 'Error: {}'.format(ex))
return weechat.WEECHAT_RC_ERROR
weechat.register(SCRIPT_NAME, "rcorre", "0.1", "MIT", "Spark Client", "", "")
api = CiscoSparkAPI()
rooms = {room.title: room for room in api.rooms.list()}
listener = EventListener()
listener.connect()
weechat.hook_command(
# Command name and description
'spark', '',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(['history']) +
'\nUse /spark help [command] to find out more\n',
# Completions
'|'.join(COMMANDS.keys()),
# Function name
'spark_command_cb', '')
| 28.226131
| 79
| 0.590707
| 675
| 5,617
| 4.820741
| 0.333333
| 0.033805
| 0.025814
| 0.033805
| 0.096497
| 0.059004
| 0.025814
| 0.025814
| 0.025814
| 0.025814
| 0
| 0.00582
| 0.2658
| 5,617
| 198
| 80
| 28.368687
| 0.78322
| 0.142069
| 0
| 0.098485
| 0
| 0
| 0.166177
| 0.013219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0.015152
| 0.060606
| 0
| 0.242424
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
593ea1a84d21ae7ff3a90ce0dfc4e0f0d6b66ac7
| 4,728
|
py
|
Python
|
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | 2
|
2020-08-26T04:01:03.000Z
|
2020-09-11T05:21:32.000Z
|
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | null | null | null |
Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py
|
leander-dsouza/MRM-Tenure
|
3f372ffeeb12b04f4c5c636235db61725d47c3c6
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
import pygame
from pygame import locals
import pygame.display
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
speedA = 0.000
speedB = 0.000
x = 512.00
y = 512.00
# frequency=100Hz
t_on = 0.00
t_off = 0.00
ledpin1 =35 # left_fwd
ledpin2 =36 # right_fwd
ledpin3 =37 # left_bck
ledpin4 =38 # right_bck
GPIO.setup(ledpin1, GPIO.OUT)
GPIO.setup(ledpin2, GPIO.OUT)
GPIO.setup(ledpin3, GPIO.OUT)
GPIO.setup(ledpin4, GPIO.OUT)
GPIO.output(ledpin1, False)
GPIO.output(ledpin2, False)
GPIO.output(ledpin3, False)
GPIO.output(ledpin4, False)
p=GPIO.PWM(ledpin1,100)
q=GPIO.PWM(ledpin2,100)
r=GPIO.PWM(ledpin3,100)
s=GPIO.PWM(ledpin4,100)
p.start(0.00)
q.start(0.00)
r.start(0.00)
s.start(0.00)
def arduino_map(x, in_min, in_max, out_min, out_max):
return ((x - in_min) * (out_max - out_min) / (in_max - in_min)) + out_min
def oct1(x, y):
speedA = arduino_map(y, 1023, 512, 255, 0)
speedB = arduino_map(x + y, 1535, 1023, 255, 0)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct2(x, y):
speedA = arduino_map(x, 512, 0, 0, 255)
speedB = arduino_map(x + y, 1023, 512, 0, 255)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
q.ChangeDutyCycle(0)
r.ChangeDutyCycle(0)
def oct3(x, y):
speedA = arduino_map(y - x, 512, 0, 255, 0)
speedB = arduino_map(x, 512, 0, 0, 255)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct4(x, y):
speedA = arduino_map(x - y, 512, 0, 255, 0)
speedB = arduino_map(y, 512, 0, 0, 255)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct5(x, y):
speedA = arduino_map(y, 512, 0, 0, 255)
speedB = arduino_map(x + y, 1023, 512, 0, 255)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
s.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
q.ChangeDutyCycle(0)
def oct6(x, y):
speedA = arduino_map(x, 1023, 512, 255, 0)
speedB = arduino_map(x + y, 1535, 1023, 255, 0)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct7(x, y):
speedA = arduino_map(x - y, 0, 512, 0, 255)
speedB = arduino_map(x, 1023, 512, 255, 0)
r.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
p.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
def oct8(x, y):
speedA = arduino_map(y - x, 0, 512, 0, 255)
speedB = arduino_map(y, 1023, 512, 255, 0)
p.ChangeDutyCycle(speedA*(100.000000/255.000000))
q.ChangeDutyCycle(speedB*(100.000000/255.000000))
r.ChangeDutyCycle(0)
s.ChangeDutyCycle(0)
pygame.init()
pygame.display.init()
pygame.joystick.init() # main joystick device system
try:
j = pygame.joystick.Joystick(0) # create a joystick instance
j.init() # init instance
print("Enabled joystick:")
except pygame.error:
print("no joystick found.")
while 1:
for e in pygame.event.get(): # iterate over event stack
if e.type == pygame.locals.JOYAXISMOTION:
x, y = j.get_axis(0), j.get_axis(1)
x = round(arduino_map(x, -1, 1, 1023, 0))
y = round(arduino_map(y, 1, -1, 0, 1023))
print("X=", x)
print("Y=", y)
# QUAD 1
if (x <= 512) & ((y >= 512) & (y <= 1023)):
if (x + y) >= 1023: # OCT1
oct1(x, y)
if (x + y) < 1023: # OCT2
oct2(x, y)
# QUAD 2
if (x <= 512) & (y <= 512):
if (x - y) <= 0: # OCT3
oct3(x, y)
if (x - y) > 0: # OCT4
oct4(x, y)
# QUAD 3
if ((x >= 512) & (x <= 1023)) & (y <= 512):
if (x + y) <= 1023: # OCT5
oct5(x, y)
if (x + y) > 1023: # OCT6
oct6(x, y)
# QUAD 4
if ((x >= 512) & (x <= 1023)) & ((y >= 512) & (y <= 1023)):
if (y - x) <= 0: # OCT7
oct7(x, y)
if (y - x) > 0: # OCT8
oct8(x, y)
| 27.172414
| 78
| 0.556684
| 675
| 4,728
| 3.844444
| 0.151111
| 0.022351
| 0.073988
| 0.110983
| 0.56262
| 0.544123
| 0.509056
| 0.402698
| 0.402698
| 0.396146
| 0
| 0.191489
| 0.294205
| 4,728
| 173
| 79
| 27.32948
| 0.586155
| 0.045474
| 0
| 0.28125
| 0
| 0
| 0.009032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070313
| false
| 0
| 0.039063
| 0.007813
| 0.117188
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
593f2fd2545bc28f967b04b9e6d7e99629ac3a94
| 8,548
|
py
|
Python
|
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | 1
|
2019-07-03T16:29:05.000Z
|
2019-07-03T16:29:05.000Z
|
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | null | null | null |
rest_helpers/type_serializers.py
|
WillFr/restlax
|
ec47617d915094137077f641427976f04acd8d47
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains functions that are geared toward serializing objects,
in particular JSON API objects.
"""
import decimal
from collections import Iterable
from rest_helpers.jsonapi_objects import Resource, Response, Link, JsonApiObject, Relationship
def to_jsonable(obj, no_empty_field=False, is_private=None):
"""
This is a low level function to transform any object into a json
serializable (jsonable) object based on its __dict__.
Arguments:
obj {any type} -- the object to be transformed.
Keyword Arguments:
no_empty_field {bool} -- if set to true, the empty field (empty
string or None) will be removed from the resulting jsonable object
(default: {False})
is_private -- callback/function can be passed through to define what
does or does not surface in json payload.
Returns:
dict -- A dictionary that can be used by json.dumps
"""
if is_private is None:
is_private = lambda k: True if str(k)[0] != '_' else False
if isinstance(obj, list):
return [to_jsonable(r, no_empty_field, is_private) for r in obj]
dic = obj if isinstance(obj, dict) else \
obj.__dict__ if hasattr(obj, "__dict__") else \
None
if dic is None:
if isinstance(obj, decimal.Decimal):
str_rep = str(obj)
return int(obj) if '.' not in str_rep else str_rep
return obj
return {str(k): to_jsonable(v, no_empty_field, is_private)for k, v in dic.items() if is_private(k) and (not no_empty_field or v is not None and v != "")}
def response_to_jsonable(response, generate_self_links=True, id_only=False,is_private=None):
"""
Transform a response object into a json serializable (jsonable) object that
matches the jsonapi requirements.
Arguments:
resource {Response} -- The response to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Response object.
"""
assert isinstance(response, Response)
# Data is a resource object (or a list of resource object,
# hence it needs some special serialization logic)
dic = response.__dict__.copy()
dic.pop("data")
return_value = to_jsonable(dic, no_empty_field=True,is_private=is_private)
if response.data is not None:
jsonable_data = resource_to_jsonable(response.data, generate_self_links,is_private=is_private)
if id_only:
jsonable_data = jsonable_data["id"] if not isinstance(jsonable_data, Iterable) else [x["id"] for x in jsonable_data]
return_value["data"] = jsonable_data
return return_value
def resource_to_jsonable(resource, generate_self_links=True,is_private=None):
"""
Transform a resource object or a resource object list into
a json serializable (jsonable) object that matches the jsonapi
requirements.
Arguments:
resource {Resource|list<Resource>} -- The resource or list of resources
to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Resource object.
"""
if isinstance(resource, list):
return [resource_to_jsonable(x,is_private) for x in resource]
assert isinstance(resource, Resource)
json_resource = resource.to_primitive() if (hasattr(resource, "to_primitive") and callable(resource,to_primitive)) else to_jsonable(resource, is_private=is_private)
special = ["id", "type", "relationships", "links", "meta"]
for key in special:
json_resource.pop(key, None)
relationships = relationships_to_jsonable(
resource.relationships, "{0}?json_path=/{1}".format(resource.id, "relationships"),
generate_self_links)
resource_links = resource.links
if generate_self_links and "self" not in resource_links:
resource_links = resource.links.copy()
resource_links["self"] = Link(resource.id)
links = links_to_jsonable(resource_links)
return_value = {
"id" : resource.id,
"type" : resource.type,
"relationships" : relationships,
"links" : links,
"meta" : resource.meta,
"attributes" :json_resource
}
_remove_empty_fields(return_value)
return return_value
def link_to_jsonable(link):
"""
Transforms a json api link object into a dictionary that can be used by json.dumps.
Arguments:
link {Link} -- the link to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Link object.
"""
assert isinstance(link, Link)
if link.meta is None:
return link.url
else:
return {
"href": link.url,
"meta": to_jsonable(link.meta)
}
def links_to_jsonable(links):
"""
Transform a json api Link object dictionary into a dictionaty that can be used
by json dumps.
Arguments:
links {dict<Link>} -- the dictionary of Link objects to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the dictionary of link
objects.
"""
if links is None:
return None
assert isinstance(links, dict)
return {k: link_to_jsonable(v) for k, v in links.items()}
def jsonapiobject_to_jsonable(jsonapiobject):
"""
Transforms a jsonapi json api objects into a dictionary that can be used by json dumps
Arguments:
jsonapiobject {JsonApiObject} -- The jsonapiobject to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the JsonApiObject object.
"""
assert isinstance(jsonapiobject, JsonApiObject)
return to_jsonable(jsonapiobject, no_empty_field=True)
def relationship_to_jsonable(relationship, self_link=None):
"""
Tranform a json api relationship object into a json serializable object that matches
the json api specification.
Arguments:
relationship {Relationship} -- a relationship object to be serialized.
Keyword Arguments:
self_link {string} -- link to the relationship to be serialized. If not None, a link
json api object will be created based on this value and added to the links of the
relationship object to be serialized (default: {None}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship object.
"""
assert isinstance(relationship, Relationship)
return_value = dict()
links = relationship.links.copy() if relationship.links is not None else dict()
if self_link is not None:
links["self"] = Link(self_link)
if any(links):
return_value["links"] = links_to_jsonable(links)
if relationship.data is not None:
return_value["data"] = {"type": relationship.data.type, "id": relationship.data.id}
return return_value
def relationships_to_jsonable(relationships, self_link_prefix=None, generate_self_link=False):
"""
Tranform a dictionary of json api relationship object nto a json
serializable object that matches the json api specification.
Arguments:
relationships {dict<Relationships>} -- a dict of
relationship objects to be serialized.
Keyword Arguments:
self_link_prefix {string} -- prefix to be used as the link prefix when generate_self_link
is set to true. (default: {None})
generate_self_link {bool} -- when set to true, a self link will be autogenerated when
serializing the relationship object (default: {False}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship
dictionary.
"""
if relationships is None:
return None
assert isinstance(relationships, dict)
if generate_self_link:
return {k: relationship_to_jsonable(v, "{0}/{1}".format(self_link_prefix, k))
for k, v in relationships.items()}
else:
return {k: relationship_to_jsonable(v) for k, v in relationships.items()}
#region private
def _remove_empty_fields(dic):
for key in [k for k, v in dic.items() if v is None or v == ""]:
dic.pop(key)
#endregion
| 33.786561
| 168
| 0.681797
| 1,148
| 8,548
| 4.939895
| 0.134146
| 0.038794
| 0.017457
| 0.025216
| 0.344207
| 0.283372
| 0.247928
| 0.213895
| 0.208076
| 0.208076
| 0
| 0.000768
| 0.238184
| 8,548
| 252
| 169
| 33.920635
| 0.870086
| 0.437997
| 0
| 0.074468
| 0
| 0
| 0.039167
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 1
| 0.095745
| false
| 0
| 0.031915
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5942ff8661f94ed3c33e9cd05d6389cd70d923f4
| 1,753
|
py
|
Python
|
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
Wizard Battle App/wizardbattle.py
|
rayjustinhuang/PythonApps
|
ba5572fbff38de71f806558c5d0be5827962aebb
|
[
"MIT"
] | null | null | null |
import random
import time
from characters import Wizard, Creature
def main():
game_header()
game_loop()
def game_header():
print('------------------------------')
print(' WIZARD TEXT GAME APP')
print('------------------------------')
def game_loop():
creatures = [
Creature('Toad', 1),
Creature('Tiger', 12),
Creature('Bat', 3),
Creature('Dragon', 50),
Creature('Evil Wizard', 1000),
]
# print(creatures)
hero = Wizard('Gandalf', 75)
while True:
active_creature = random.choice(creatures)
print('A {} of level {} has appeared from a dark and foggy forest...'
.format(active_creature.name, active_creature.level))
print()
cmd = input('Do you [a]ttack, [r]un away, or [l]ook around? ')
if cmd == 'a':
# print('attack')
if hero.attack(active_creature):
creatures.remove(active_creature)
else:
print('The wizard retreats to recover...')
time.sleep(5)
print('The wizard returns revitalized')
elif cmd == 'r':
# print('run away')
print('The wizard has become unsure of himself and flees...')
elif cmd == 'l':
# print('look around')
print('The wizard {} takes a look around and sees...'.format(hero.name))
for c in creatures:
print(' * A {} of level {}'.format(c.name, c.level))
else:
print('exiting game...')
break
if not creatures:
print("You've defeated all the creatures!!! You win!")
break
print()
if __name__ == '__main__':
main()
| 25.405797
| 84
| 0.50599
| 190
| 1,753
| 4.578947
| 0.457895
| 0.08046
| 0.064368
| 0.03908
| 0.050575
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01113
| 0.333714
| 1,753
| 68
| 85
| 25.779412
| 0.733733
| 0.040502
| 0
| 0.170213
| 0
| 0
| 0.285629
| 0.035778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0
| 0.12766
| 0.276596
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5943869d3d4d2e30ae0802900ea733c4c32ec043
| 2,581
|
py
|
Python
|
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | 2
|
2015-01-21T22:13:53.000Z
|
2017-12-13T17:57:37.000Z
|
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | null | null | null |
xmastreegame/ThreadedTree.py
|
martinohanlon/GPIOXmasTreeGame
|
0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a
|
[
"MIT"
] | null | null | null |
import threading
from time import sleep
import RPi.GPIO as GPIO
illumination_time_default = 0.001
class XmasTree(threading.Thread):
#Pins
#Model B+ or A+
#A, B, C, D = 21, 19, 26, 20
#Other model, probably Model A or Model B
#A, B, C, D = 7, 9, 11, 8
def __init__(self, A = 21,B = 19, C = 26, D = 20):
#setup threading
threading.Thread.__init__(self)
#setup properties
self.running = False
self.stopped = False
self.leds = 0
self.A, self.B, self.C, self.D = A, B, C, D
def run(self):
self.running = True
#loop until its stopped
while not self.stopped:
for i in range(8):
self._single_led_on(self.leds & (1<<i))
sleep(illumination_time_default)
#once stopped turn the leds off
self.leds_on(0)
self.running = False
def stop(self):
self.stopped = True
#wait for it to stop running
while self.running:
sleep(0.01)
def leds_on(self, leds):
self.leds = leds
def _single_led_on(self, n):
A, B, C, D = self.A, self.B, self.C, self.D
# First, set all the nodes to be input (effectively
# 'disconnecting' them from the Raspberry Pi)
GPIO.setup(A, GPIO.IN)
GPIO.setup(B, GPIO.IN)
GPIO.setup(C, GPIO.IN)
GPIO.setup(D, GPIO.IN)
# Now determine which nodes are connected to the anode
# and cathode for this LED
if (n==1): anode, cathode = C, A
elif (n==2): anode, cathode = C, D
elif (n==4): anode, cathode = D, C
elif (n==8): anode, cathode = D, B
elif (n==16): anode, cathode = B, D
elif (n==32): anode, cathode = A, B
elif (n==64): anode, cathode = B, A
elif (n==128): anode, cathode = A, C
else: return # invalid LED number
# Configure the anode and cathode nodes to be outputs
GPIO.setup(anode, GPIO.OUT)
GPIO.setup(cathode, GPIO.OUT)
# Make the anode high (+3.3v) and the cathode low (0v)
GPIO.output(anode, GPIO.HIGH)
GPIO.output(cathode, GPIO.LOW)
#test
if __name__ == "__main__":
L0 = 1
L1 = 2
L2 = 4
L3 = 8
L4 = 16
L5 = 32
L6 = 64
ALL = 1+2+4+8+16+32+64
GPIO.setmode(GPIO.BCM)
try:
tree = XmasTree()
tree.start()
tree.leds_on(ALL)
while(True):
sleep(0.1)
finally:
tree.stop()
GPIO.cleanup()
| 25.058252
| 62
| 0.533514
| 376
| 2,581
| 3.585106
| 0.332447
| 0.071217
| 0.008902
| 0.011869
| 0.029674
| 0.029674
| 0.029674
| 0.029674
| 0
| 0
| 0
| 0.046108
| 0.352964
| 2,581
| 102
| 63
| 25.303922
| 0.761078
| 0.20341
| 0
| 0.031746
| 0
| 0
| 0.003927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59445fc42f57f15739274fff9371a3ae622d87a7
| 1,962
|
py
|
Python
|
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap7/ex5.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
jogo = [[], [], []], [[], [], []], [[], [], []]
cont = 0
contx = conto = contxc = contoc = 0
while True:
l = int(input('Informe a linha: '))
c = int(input('Informe a coluna: '))
if l < 4 and c < 4:
if cont % 2 == 0:
jogo[l-1][c-1] = 'X'
else:
jogo[l-1][c-1] = 'O'
cont += 1
for x in range(0, 3):
for j in jogo[x]:
if j == 'X':
contx += 1
if j == 'O':
conto +=1
for k in range(0, 3):
if jogo[k][x] == 'X':
contxc += 1
if jogo[k][x] == 'O':
contoc += 1
print(jogo[x])
if jogo[0][0] == 'X' and jogo[1][1] == 'X' and jogo[2][2] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][0] == 'O' and jogo[1][1] == 'O' and jogo[2][2] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][2] == 'X' and jogo[1][1] == 'X' and jogo[2][0] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if jogo[0][2] == 'O' and jogo[1][1] == 'O' and jogo[2][0] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabéns, X venceu!')
break
if contx == 3 or contxc == 3:
print(jogo[x+1])
print(f'Parabéns, X venceu!')
break
if conto == 3 or contoc == 3:
print(jogo[x+1])
print(f'Parabéns, O venceu!')
break
contx = conto = contxc = contoc = 0
else:
print('Posição já preenchida')
| 35.035714
| 77
| 0.35474
| 249
| 1,962
| 2.795181
| 0.160643
| 0.086207
| 0.158046
| 0.094828
| 0.610632
| 0.521552
| 0.521552
| 0.501437
| 0.426724
| 0.317529
| 0
| 0.056039
| 0.472477
| 1,962
| 55
| 78
| 35.672727
| 0.616425
| 0
| 0
| 0.454545
| 0
| 0
| 0.095821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.327273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5944d36b482e6230d5854a8d2998c95179d5d03e
| 23,625
|
py
|
Python
|
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
lib/intercom_test/framework.py
|
rtweeks/intercom_test
|
a682088af93d280297764b639f4727ec4716673f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PayTrace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import functools
from io import StringIO
import json
import logging
import os.path
import shutil
import yaml
from .cases import (
IdentificationListReader as CaseIdListReader,
hash_from_fields as _hash_from_fields,
)
from .exceptions import MultipleAugmentationEntriesError, NoAugmentationError
from .augmentation.compact_file import (
augment_dict_from,
case_keys as case_keys_in_compact_file,
TestCaseAugmenter as CompactFileAugmenter,
Updater as CompactAugmentationUpdater,
)
from .augmentation import update_file
from .utils import (
FilteredDictView as _FilteredDictView,
open_temp_copy,
)
from .yaml_tools import (
YAML_EXT,
content_events as _yaml_content_events,
get_load_all_fn as _get_yaml_load_all,
)
logger = logging.getLogger(__name__)
class InterfaceCaseProvider:
"""Test case data manager
Use an instance of this class to:
* Generate test case data :class:`dict`\ s
* Decorate the case runner function (if auto-updating of compact
augmentation data files is desired)
* Merge extension test case files to the main test case file
* Other case augmentation management tasks
Setting :attr:`use_body_type_magic` to ``True`` automatically parses the
``"request body"`` value as JSON if ``"request type"`` in the same test
case is ``"json"``, and similarly for ``"response body"`` and
``"response type"``.
.. automethod:: __init__
"""
use_body_type_magic = False
safe_yaml_loading = True
class _UpdateState(Enum):
not_requested = '-'
requested = '?'
aborted = '!'
def __repr__(self, ):
return "<{}.{}>".format(type(self).__name__, self.name)
_case_augmenter = None
def __init__(self, spec_dir, group_name, *, case_augmenter=None):
"""Constructing an instance
:param spec_dir: File system directory for test case specifications
:param group_name: Name of the group of tests to load
:keyword case_augmenter:
*optional* An object providing the interface of a
:class:`.CaseAugmenter`
The main test case file of the group is located in *spec_dir* and is
named for *group_name* with the '.yml' extension added. Extension
test case files are found in the *group_name* subdirectory of
*spec_dir* and all have '.yml' extensions.
"""
super().__init__()
self._spec_dir = spec_dir
self._group_name = group_name
self._compact_files_update = self._UpdateState.not_requested
if case_augmenter:
self._case_augmenter = case_augmenter
self._augmented_case = case_augmenter.augmented_test_case
@property
def spec_dir(self):
"""The directory containing the test specification files for this instance"""
return self._spec_dir
@property
def group_name(self):
"""Name of group of test cases to load for this instance"""
return self._group_name
@property
def case_augmenter(self):
"""The :class:`.CaseAugmenter` instance used by this object, if any"""
return self._case_augmenter
@property
def main_group_test_file(self):
"""Path to the main test file of the group for this instance"""
return os.path.join(self.spec_dir, self.group_name + YAML_EXT)
def extension_files(self, ):
"""Get an iterable of the extension files of this instance"""
return extension_files(self.spec_dir, self.group_name)
def cases(self, ):
"""Generates :class:`dict`\ s of test case data
This method reads test cases from the group's main test case file
and auxiliary files, possibly extending them with augmented data (if
*case_augmentations* was given in the constructor).
"""
yield from self._cases_from_file(self.main_group_test_file)
for ext_file in sorted(self.extension_files()):
yield from self._cases_from_file(ext_file)
if self._compact_files_update is self._UpdateState.requested:
self.update_compact_files()
def update_compact_augmentation_on_success(self, fn):
"""Decorator for activating compact data file updates
Using this decorator around the test functions tidies up the logic
around whether to propagate test case augmentation data from update
files to compact files. The compact files will be updated if all
interface tests succeed and not if any of them fail.
The test runner function can be automatically wrapped with this
functionality through :meth:`case_runners`.
"""
CFUpdate = self._UpdateState
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if self._compact_files_update is not CFUpdate.aborted:
self._compact_files_update = CFUpdate.requested
try:
return fn(*args, **kwargs)
except:
self._compact_files_update = CFUpdate.aborted
raise
return wrapper
def case_runners(self, fn, *, do_compact_updates=True):
"""Generates runner callables from a callable
The callables in the returned iterable each call *fn* with all the
positional arguments they are given, the test case :class:`dict` as an
additional positional argument, and all keyword arguments passed to
the case runner.
Using this method rather than :meth:`cases` directly for running tests
has two advantages:
* The default of *do_compact_updates* automatically applies
:meth:`update_compact_augmentation_on_success` to *fn*
* Each returned runner callable will log the test case as YAML prior
to invoking *fn*, which is helpful when updating the augmenting data
for the case becomes necessary
Each callable generated will also have the case data available via
an :attr:`case` on the callable.
"""
if do_compact_updates and self._case_augmenter is not None:
fn = self.update_compact_augmentation_on_success(fn)
for case in self.cases():
@functools.wraps(fn)
def wrapper(*args, **kwargs):
logger.info("{}\n{}".format(
" CASE TESTED ".center(40, '*'),
yaml.dump([case]),
))
return fn(*args, case, **kwargs)
wrapper.case = case
yield wrapper
def update_compact_files(self, ):
"""Calls the :class:`CaseAugmenter` to apply compact data file updates
:raises NoAugmentationError:
when no case augmentation data was specified during construction
of this object
"""
if self._case_augmenter is None:
raise NoAugmentationError("No augmentation data specified")
return self._case_augmenter.update_compact_files()
def merge_test_extensions(self, ):
"""Merge the extension files of the target group into the group's main file"""
ext_files = sorted(self.extension_files())
with open(self.main_group_test_file, 'ab') as fixed_version_specs:
for ext_file in ext_files:
ext_file_ref = os.path.relpath(ext_file, os.path.join(self.spec_dir, self.group_name))
print("---\n# From {}\n".format(ext_file_ref).encode('utf8'), file=fixed_version_specs)
with open(ext_file, 'rb') as ext_specs:
shutil.copyfileobj(ext_specs, fixed_version_specs)
for ext_file in ext_files:
os.remove(ext_file)
def _augmented_case(self, x):
"""This method is defined to be overwritten on the instance level when augmented data is used"""
return x
def _cases_from_file(self, filepath):
with open(filepath, 'rb') as file:
load_all_yaml = _get_yaml_load_all(safe=self.safe_yaml_loading)
for test_case in (
tc
for case_set in load_all_yaml(file)
for tc in case_set
):
if self.use_body_type_magic:
_parse_json_bodies(test_case)
yield self._augmented_case(test_case)
def extension_files(spec_dir, group_name):
"""Iterator of file paths for extensions of a test case group
:param spec_dir: Directory in which specifications live
:param group_name: Name of the group to iterate
"""
yield from data_files(os.path.join(spec_dir, group_name))
def data_files(dir_path):
"""Generate data file paths from the given directory"""
try:
dir_listing = os.listdir(dir_path)
except FileNotFoundError:
return
for entry in dir_listing:
entry = os.path.join(dir_path, entry)
if not os.path.isfile(entry):
continue
if not entry.endswith(YAML_EXT):
continue
yield entry
def _parse_json_bodies(test_case):
if test_case.get('request type') == 'json':
test_case['request body'] = json.loads(test_case['request body'])
if test_case.get('response type') == 'json':
test_case['response body'] = json.loads(test_case['response body'])
class CaseAugmenter:
"""Base class of case augmentation data managers
This class uses and manages files in a case augmentation directory. The
data files are intended to either end in '.yml' or '.update.yml'.
The version control system should, typically, be set up to ignore files
with the '.update.yml' extension. These two kinds of files have a different
"data shape".
Update files (ending in '.update.yml') are convenient for manual editing
because they look like the test case file from which the case came, but
with additional entries in the case data :class:`dict`. The problems with
long term use of this file format are A) it is inefficient for correlation
to test cases, and B) it duplicates data from the test case, possibly
leading to confusion when modifying the .update.yml file does not change
the test case.
Compact data files (other files ending in '.yml') typically are generated
through this package. The format is difficult to manually correlate with
the test file, but does not duplicate all of the test case data as does the
update file data format. Instead, the relevant keys of the test case are
hashed and the hash value is used to index the additional augmentation
value entries.
It is an error for a test case to have multiple augmentations defined
within .yml files (excluding .update.yml files), whether in the same or
different files. It is also an error for multiple files with the
.update.yml extension to specify augmentation for the same case, though
within the same file the last specification is taken. When augmentations
for a case exist within both one .update.yml and one .yml file, the
.update.yml is used (with the goal of updating the .yml file with the
new augmentation values).
Methods of this class depend on the class-level presence of
:const:`CASE_PRIMARY_KEYS`, which is not provided in this class. To use
this class's functionality, derive from it and define this constant in
the subclass. Two basic subclasses are defined in this module:
:class:`HTTPCaseAugmenter` and :class:`RPCCaseAugmenter`.
.. automethod:: __init__
"""
UPDATE_FILE_EXT = ".update" + YAML_EXT
# Set this to False to allow arbitrary object instantiation and code
# execution from loaded YAML
safe_loading = True
def __init__(self, augmentation_data_dir):
"""Constructing an instance
:param augmentation_data_dir:
path to directory holding the augmentation data
"""
super().__init__()
# Initialize info on extension data location
self._case_augmenters = {}
self._updates = {} # compact_file_path -> dict of update readers
working_files = []
self._augmentation_data_dir = augmentation_data_dir
for file_path in data_files(augmentation_data_dir):
if file_path.endswith(self.UPDATE_FILE_EXT):
working_files.append(file_path)
else:
self._load_compact_refs(file_path)
self._index_working_files(working_files)
@property
def augmentation_data_dir(self):
return self._augmentation_data_dir
def _load_compact_refs(self, file_path):
for case_key, start_byte in case_keys_in_compact_file(file_path):
if case_key in self._case_augmenters:
self._excessive_augmentation_data(case_key, self._case_augmenters[case_key].file_path, file_path)
self._case_augmenters[case_key] = CompactFileAugmenter(file_path, start_byte, case_key, safe_loading=self.safe_loading)
self._case_augmenters[case_key].safe_loading = self.safe_loading
def _excessive_augmentation_data(self, case_key, file1, file2):
if file1 == file2:
error_msg = "Test case key \"{}\" has multiple augmentation entries in {}".format(
case_key,
file1,
)
else:
error_msg = "Test case key \"{}\" has augmentation entries in {} and {}".format(
case_key,
file1,
file2,
)
raise MultipleAugmentationEntriesError(error_msg)
def _index_working_files(self, working_files):
for case_key, augmenter in update_file.index(working_files, self.CASE_PRIMARY_KEYS, safe_loading=self.safe_loading).items():
existing_augmenter = self._case_augmenters.get(case_key)
if isinstance(existing_augmenter, CompactFileAugmenter):
if augmenter.deposit_file_path != existing_augmenter.file_path:
raise MultipleAugmentationEntriesError(
"case {} conflicts with case \"{}\" in {}; if present, this case must be in {}".format(
augmenter.case_reference,
case_key,
existing_augmenter.file_path,
os.path.basename(existing_augmenter.file_path).replace(
YAML_EXT,
self.UPDATE_FILE_EXT
),
)
)
elif existing_augmenter is not None:
raise MultipleAugmentationEntriesError(
"case {} conflicts with case {}".format(
augmenter.case_reference,
existing_augmenter.case_reference,
)
)
self._updates.setdefault(augmenter.deposit_file_path, {})[case_key] = augmenter
self._case_augmenters[case_key] = augmenter
@classmethod
def key_of_case(cls, test_case):
"""Compute the key (hash) value of the given test case"""
if hasattr(test_case, 'items'):
test_case = test_case.items()
return _hash_from_fields(
(k, v) for k, v in test_case
if k in cls.CASE_PRIMARY_KEYS
)
def augmented_test_case(self, test_case):
"""Add key/value pairs to *test_case* per the stored augmentation data
:param dict test_case: The test case to augment
:returns: Test case with additional key/value pairs
:rtype: dict
"""
case_key = self.key_of_case(test_case)
augment_case = self._case_augmenters.get(case_key)
if not augment_case:
return test_case
aug_test_case = dict(test_case)
augment_case(aug_test_case)
return aug_test_case
def augmented_test_case_events(self, case_key, case_id_events):
"""Generate YAML events for a test case
:param str case_key:
The case key for augmentation
:param case_id_events:
An iterable of YAML events representing the key/value pairs of the
test case identity
This is used internally when extending an updates file with the existing
data from a case, given the ID of the case as YAML.
"""
case_augmenter = self._case_augmenters.get(case_key)
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from case_id_events
if case_augmenter is not None:
yield from case_augmenter.case_data_events()
yield yaml.MappingEndEvent()
def update_compact_files(self, ):
"""Update compact data files from update data files"""
for file_path, updates in self._updates.items():
if os.path.exists(file_path):
with open_temp_copy(file_path, binary=True) as instream, open(file_path, 'wb') as outstream:
updated_events = self._updated_compact_events(
yaml.parse(instream),
updates
)
yaml.emit(updated_events, outstream)
else:
with open(file_path, 'wb') as outstream:
yaml.emit(self._fresh_content_events(updates.items()), outstream)
def extend_updates(self, file_name_base):
"""Create an object for extending a particular update file
The idea is::
case_augmenter.extend_updates('foo').with_current_augmentation(sys.stdin)
"""
return UpdateExtender(file_name_base, self, safe_loading=self.safe_loading)
def _updated_compact_events(self, events, updates):
mutator = CompactAugmentationUpdater(
_FilteredDictView(
updates,
value_transform=self._full_yaml_mapping_events_from_update_augmentation
),
self.CASE_PRIMARY_KEYS
)
yield from (
output_event
for input_event in events
for output_event in mutator.filter(input_event)
)
@classmethod
def _full_yaml_mapping_events_from_update_augmentation(cls, augmenter):
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from augmenter.case_data_events()
yield yaml.MappingEndEvent()
def _fresh_content_events(self, content_iterable):
# Header events
yield yaml.StreamStartEvent()
yield yaml.DocumentStartEvent()
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
# Content events
for key, value in content_iterable:
yield yaml.ScalarEvent(None, None, (True, False), key)
if isinstance(value, dict):
yield from _yaml_content_events(dict(
(k, v)
for k, v in value.items()
if k not in self.CASE_PRIMARY_KEYS
))
elif callable(getattr(value, 'case_data_events')):
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from value.case_data_events()
yield yaml.MappingEndEvent()
else:
yield yaml.MappingStartEvent(None, None, True, flow_style=False)
yield from value
yield yaml.MappingEndEvent()
# Tail events
yield yaml.MappingEndEvent()
yield yaml.DocumentEndEvent()
yield yaml.StreamEndEvent()
class HTTPCaseAugmenter(CaseAugmenter):
"""A :class:`.CaseAugmenter` subclass for augmenting HTTP test cases"""
CASE_PRIMARY_KEYS = frozenset((
'url', 'method', 'request body',
))
class RPCCaseAugmenter(CaseAugmenter):
"""A :class:`.CaseAugmenter` subclass for augmenting RPC test cases"""
CASE_PRIMARY_KEYS = frozenset((
'endpoint', 'request parameters',
))
class UpdateExtender:
safe_loading = True
def __init__(self, file_name_base, case_augmenter, *, safe_loading=None):
super().__init__()
if safe_loading is not None and safe_loading is not self.safe_loading:
self.safe_loading = safe_loading
self._file_name = os.path.join(
case_augmenter.augmentation_data_dir,
file_name_base + case_augmenter.UPDATE_FILE_EXT
)
self._case_augmenter = case_augmenter
@property
def file_name(self):
return self._file_name
def with_current_augmentation(self, stream):
"""Append the full test case with its current augmentation data to the target file
:param stream:
A file-like object (which could be passed to :func:`yaml.parse`)
The *stream* contains YAML identifying the test case in question. The
identifying YAML from the test case _plus_ the augmentative key/value
pairs as currently defined in the augmenting data files will be written
to the file :attr:`file_name`.
"""
if stream.isatty():
print("Input test cases from interface, ending with a line containing only '...':")
buffered_input = StringIO()
for line in stream:
if line.rstrip() == "...":
break
buffered_input.write(line)
buffered_input.seek(0)
stream = buffered_input
id_list_reader = CaseIdListReader(self._case_augmenter.CASE_PRIMARY_KEYS, safe_loading=self.safe_loading)
for event in yaml.parse(stream):
test_case = id_list_reader.read(event)
if test_case is None:
continue
# Look up augmentation for case_id
case_as_currently_augmented_events = (
self._case_augmenter.augmented_test_case_events(*test_case)
)
# Append augmentation case to self.file_name
with open(self.file_name, 'ab') as outstream:
yaml.emit(
self._case_yaml_events(case_as_currently_augmented_events),
outstream,
)
def _case_yaml_events(self, content_events):
yield yaml.StreamStartEvent()
yield yaml.DocumentStartEvent(explicit=True)
yield yaml.SequenceStartEvent(None, None, implicit=True, flow_style=False)
yield from content_events
yield yaml.SequenceEndEvent()
yield yaml.DocumentEndEvent()
yield yaml.StreamEndEvent()
| 40.247019
| 132
| 0.634201
| 2,860
| 23,625
| 5.025874
| 0.177972
| 0.03395
| 0.008418
| 0.007931
| 0.202379
| 0.140392
| 0.086128
| 0.043829
| 0.031167
| 0.017671
| 0
| 0.001144
| 0.296847
| 23,625
| 586
| 133
| 40.3157
| 0.864134
| 0.324233
| 0
| 0.201807
| 0
| 0
| 0.036031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108434
| false
| 0
| 0.042169
| 0.009036
| 0.246988
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5945f3b8e933ce01f957d7f582aa80cb9b902687
| 1,283
|
py
|
Python
|
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/03/day3.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | 1
|
2021-12-04T10:37:09.000Z
|
2021-12-04T10:37:09.000Z
|
#!/usr/bin/env python3
# Day 3, with some speed optimizations
# Not really necessary for day 3, but probably later
import sys
import typing
import array
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
width = 0
heigth = 0
# Use 1-d array of bytes to keep pixels
def read_input(fname: str) -> array.array[int]:
global width
global heigth
a = array.array('b')
width = len(open(fname).readline().rstrip())
for line in open(fname).read().splitlines():
heigth += 1
for c in line:
# Each pixel is True or False
a.append(c == '#')
return a
a = read_input(sys.argv[1])
# for faster x,y lookup in a
ytab = array.array('I')
for y in range(heigth):
ytab.append(y * width)
def get_pixel(x: int, y: int) -> int:
return a[(x % width) + ytab[y]]
def slope(dx: int, dy: int) -> int:
x = 0
y = 0
trees = 0
while True:
x += dx
y += dy
if y >= heigth:
break
if get_pixel(x, y) == True:
trees += 1
return trees
# part 1
print(slope(3, 1))
# part 2
slopes = [
(1,1),
(3,1),
(5,1),
(7,1),
(1,2)
]
f = 1
for s in slopes:
f *= slope(s[0], s[1])
print(f)
| 18.070423
| 52
| 0.533125
| 205
| 1,283
| 3.317073
| 0.409756
| 0.030882
| 0.026471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036613
| 0.318784
| 1,283
| 70
| 53
| 18.328571
| 0.741419
| 0.168355
| 0
| 0
| 0
| 0
| 0.018886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0.020833
| 0.1875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59470f4e50387be73fea566efd45c232849a6813
| 226
|
py
|
Python
|
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py
|
Dittz/Learning_Python
|
4c0c97075ef5e1717f82e2cf24b0587f0c8504f5
|
[
"MIT"
] | null | null | null |
x = 23
epsilon = 0.001
guess = x/2
tries = 0
while abs(guess**2- x) >= epsilon:
if guess**2 > x:
guess /=2
else:
guess *=1.5
tries +=1
print(f'Number of tries: {tries}')
print(f'Guess = {guess}')
| 15.066667
| 34
| 0.535398
| 38
| 226
| 3.184211
| 0.473684
| 0.14876
| 0.115702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.287611
| 226
| 14
| 35
| 16.142857
| 0.664596
| 0
| 0
| 0
| 0
| 0
| 0.172566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
594b9e391b71aa4e58f65f8b436f15f1fdaebd0a
| 2,440
|
py
|
Python
|
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_refresh_utils.py
|
anukaal/cloud-sql-python-connector
|
e8799c7de46dbe11a91a9a29173a5cfd279a561d
|
[
"Apache-2.0"
] | null | null | null |
""""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
import aiohttp
import google.auth
import pytest # noqa F401 Needed to run the tests
from google.cloud.sql.connector.refresh_utils import _get_ephemeral, _get_metadata
from google.cloud.sql.connector.utils import generate_keys
@pytest.mark.asyncio
async def test_get_ephemeral(connect_string: str) -> None:
"""
Test to check whether _get_ephemeral runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default(
scopes=[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
_, pub_key = await generate_keys()
async with aiohttp.ClientSession() as client_session:
result: Any = await _get_ephemeral(
client_session, credentials, project, instance, pub_key
)
result = result.split("\n")
assert (
result[0] == "-----BEGIN CERTIFICATE-----"
and result[len(result) - 1] == "-----END CERTIFICATE-----"
)
@pytest.mark.asyncio
async def test_get_metadata(connect_string: str) -> None:
"""
Test to check whether _get_metadata runs without problems given a valid
connection string.
"""
project = connect_string.split(":")[0]
instance = connect_string.split(":")[2]
credentials, project = google.auth.default(
scopes=[
"https://www.googleapis.com/auth/sqlservice.admin",
"https://www.googleapis.com/auth/cloud-platform",
]
)
async with aiohttp.ClientSession() as client_session:
result = await _get_metadata(client_session, credentials, project, instance)
assert result["ip_addresses"] is not None and isinstance(
result["server_ca_cert"], str
)
| 30.123457
| 84
| 0.690164
| 311
| 2,440
| 5.302251
| 0.421222
| 0.036386
| 0.043663
| 0.05094
| 0.493633
| 0.413584
| 0.413584
| 0.374773
| 0.31413
| 0.264403
| 0
| 0.008776
| 0.206148
| 2,440
| 80
| 85
| 30.5
| 0.84254
| 0.240164
| 0
| 0.390244
| 0
| 0
| 0.167282
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 1
| 0
| false
| 0
| 0.146341
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca23892448af2cabbc53d9df0bfd9fc4244b346
| 1,416
|
py
|
Python
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 20
|
2016-05-16T11:09:04.000Z
|
2021-12-08T09:30:33.000Z
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 1
|
2018-12-30T09:55:31.000Z
|
2018-12-30T14:08:30.000Z
|
crack-data-structures-and-algorithms/leetcode/sort_list_q148.py
|
Watch-Later/Eureka
|
3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35
|
[
"MIT"
] | 11
|
2016-05-02T09:17:12.000Z
|
2021-12-08T09:30:35.000Z
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
return merge_sort_list(head)
def merge_sort_list(head):
if not head or not head.next:
return head
slow = fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
# Split into two lists.
# Why head2 starts from the next node of mid(slow)?
# Assume we have only two nodes, A -> B -> ^
# The strategy we use here eseentially is like floor((l + r) / 2), which
# always stucks on A if we make mid the head.
# Logically, mid with floor strategy makes it the **last element** of the first part.
head2 = slow.next
slow.next = None
l1 = merge_sort_list(head)
l2 = merge_sort_list(head2)
return merge_lists(l1, l2)
def merge_lists(l1, l2):
# Introduce dummy node to simplify merge.
# No need to check l1 & l2 up front
dummy = ListNode(0)
p = dummy
while l1 and l2:
if l1.val < l2.val:
p.next = l1
l1 = l1.next
else:
p.next = l2
l2 = l2.next
p = p.next
if l1:
p.next = l1
if l2:
p.next = l2
return dummy.next
| 22.47619
| 89
| 0.57274
| 208
| 1,416
| 3.831731
| 0.413462
| 0.031368
| 0.065245
| 0.06399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028785
| 0.337571
| 1,416
| 62
| 90
| 22.83871
| 0.820896
| 0.325565
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca2e7b053503c5f1274ef05c3605bdeeddc592f
| 71,712
|
py
|
Python
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 13
|
2019-05-13T22:45:32.000Z
|
2022-02-27T07:19:16.000Z
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 2
|
2019-09-03T03:57:06.000Z
|
2021-11-21T14:01:31.000Z
|
Source Codes/CDBC_Source_Code.py
|
CDBCTool/CDBC
|
70e64241e4fb7687832e3771f316cb036f6fc3c7
|
[
"MIT"
] | 3
|
2019-11-04T17:05:02.000Z
|
2021-12-29T18:14:51.000Z
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys,os,time
from scipy.stats import gamma, norm, beta
import matplotlib.pyplot as plt
from datetime import date, timedelta
import numpy as np
import tkinter
from os import listdir
from os.path import isfile, join
def sorted_values(Obs,Sim):
count = 0
for i in range(len(Obs)):
if Obs[i] == 0:
count += 1
Rank = [i+1 for i in range(len(Obs))]
Dict = dict(zip(Rank,Sim))
SortedSim = sorted(Dict.values())
SortedRank = sorted(Dict, key=Dict.get)
for i in range(count):
SortedSim[i] = 0
ArrangedDict = dict(zip(SortedRank,SortedSim))
SortedDict_by_Rank = sorted(ArrangedDict.items())
ArrangedSim = [v for k,v in SortedDict_by_Rank]
return ArrangedSim
def sorted_values_thresh(Sim, Fut):
try:
Min_Positive_Value_Sim = min(i for i in sim if i > 0)
except:
Min_Positive_Value_Sim = 0
for i in range(len(Fut)):
if Fut[i] < Min_Positive_Value_Sim:
Fut[i] = 0
return Fut
class TitleBar(QDialog):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowFlags(Qt.FramelessWindowHint)
StyleTitleBar='''QDialog{
background-color: rgb(2,36,88);
}
QLabel{
color: rgb(0, 255, 255);
font: 11pt "MS Shell Dlg 2";
}'''
self.setStyleSheet(StyleTitleBar)
self.setAutoFillBackground(True)
self.setFixedSize(750,30)
Style_minimize='''QToolButton{
background-color: transparent;
color: rgb(255, 255, 255);
border: none;
}
QToolButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}'''
Style_close='''QToolButton{
background-color: rgb(217, 0, 0);
color: rgb(255, 255, 255);
border: none;
}
QToolButton:hover{
background-color: rgb(255, 0, 0);
border: none;
}'''
Font=QFont('MS Shell Dlg 2',11)
Font.setBold(True)
self.minimize = QToolButton(self)
self.minimize.setText('–')
self.minimize.setFixedHeight(20)
self.minimize.setFixedWidth(25)
self.minimize.setStyleSheet(Style_minimize)
self.minimize.setFont(Font)
self.close = QToolButton(self)
self.close.setText(u"\u00D7")
self.close.setFixedHeight(20)
self.close.setFixedWidth(45)
self.close.setStyleSheet(Style_close)
self.close.setFont(Font)
image = QPixmap(r"Interpolation-2.png")
labelImg =QLabel(self)
labelImg.setFixedSize(QSize(20,20))
labelImg.setScaledContents(True)
labelImg.setPixmap(image)
labelImg.setStyleSheet('border: none;')
label = QLabel(self)
label.setText(" Climate Data Bias Corrector (RAIN, TEMP, SRAD)")
label.setFont(Font)
label.setStyleSheet('border: none;')
hbox=QHBoxLayout(self)
hbox.addWidget(labelImg)
hbox.addWidget(label)
hbox.addWidget(self.minimize)
hbox.addWidget(self.close)
hbox.insertStretch(2,600)
hbox.setSpacing(1)
hbox.setContentsMargins(5,0,5,0)
self.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Fixed)
self.maxNormal=False
self.close.clicked.connect(self.closeApp)
self.minimize.clicked.connect(self.showSmall)
def showSmall(self):
widget.showMinimized();
def closeApp(self):
widget.close()
def mousePressEvent(self,event):
if event.button() == Qt.LeftButton:
widget.moving = True
widget.offset = event.pos()
def mouseMoveEvent(self,event):
if widget.moving:
widget.move(event.globalPos()-widget.offset)
class HFTab(QTabWidget):
def __init__(self, parent = None):
super(HFTab, self).__init__(parent)
self.HTab = QWidget()
self.FTab = QWidget()
self.setStyleSheet('QTabBar { font: bold }')
self.addTab(self.HTab,"For Historical Data")
self.addTab(self.FTab,"For Future Data")
self.HTabUI()
self.FTabUI()
self.started = False
def HTabUI(self):
grid = QGridLayout()
grid.addWidget(self.input(), 0, 0)
grid.addWidget(self.output(), 1, 0)
grid.addWidget(self.method(), 2, 0)
grid.addWidget(self.progress(), 3, 0)
grid.setContentsMargins(0,0,0,0)
## self.setTabText(0,"Historical")
self.HTab.setLayout(grid)
def input(self):
##########Layout for taking input climate data to be bias corrected ##########
gBox = QGroupBox("Inputs:")
layout1 = QGridLayout()
self.Obsfile = QLineEdit()
self.browse2 = QPushButton("...")
self.browse2.setMaximumWidth(25)
self.browse2.clicked.connect(self.browse2_file)
self.q1 = QPushButton("?")
self.q1.setMaximumWidth(15)
self.q1.clicked.connect(self.Info1)
self.Obsfile.setPlaceholderText("File with observed climate data (*.csv or *.txt)")
layout1.addWidget(self.Obsfile,1,0,1,3)
layout1.addWidget(self.q1,1,3,1,1)
layout1.addWidget(self.browse2,1,4,1,1)
self.ModHfile = QLineEdit()
self.ModHfile.setPlaceholderText("File with GCM outputs (*.csv or *.txt)")
self.q2 = QPushButton("?")
self.q2.setMaximumWidth(15)
self.q2.clicked.connect(self.Info2)
self.browse3 = QPushButton("...")
self.browse3.setMaximumWidth(25)
self.browse3.clicked.connect(self.browse3_file)
layout1.addWidget(self.ModHfile,2,0,1,3)
layout1.addWidget(self.q2,2,3,1,1)
layout1.addWidget(self.browse3,2,4,1,1)
## ##########Layout for taking comma delimited vs tab delimited################################
## sublayout1 = QGridLayout()
##
## self.label1 = QLabel("Input Format:\t")
## self.b1 = QRadioButton("Comma Delimated (*.csv)")
## #self.b1.setChecked(True)
## self.b2 = QRadioButton("Tab Delimited (*.txt)")
##
## self.b1.toggled.connect(lambda:self.btnstate(self.b1))
## self.b2.toggled.connect(lambda:self.btnstate(self.b2))
##
## sublayout1.addWidget(self.label1,1,0)
## sublayout1.addWidget(self.b1,1,1)
## sublayout1.addWidget(self.b2,1,2)
## layout1.addLayout(sublayout1,3,0)
gBox.setLayout(layout1)
return gBox
def output(self):
##########Layout for output file location and interpolation##########
gBox = QGroupBox("Outputs:")
layout4 = QGridLayout()
self.outputfile_location = QLineEdit()
self.outputfile_location.setPlaceholderText("Folder to save bias corrected GCM outputs")
self.browse4 = QPushButton("...")
self.browse4.setMaximumWidth(25)
self.browse4.clicked.connect(self.browse4_file)
layout4.addWidget(self.outputfile_location,1,0,1,3)
layout4.addWidget(self.browse4,1,3,1,1)
########################Layout for taking comma delimited vs tab delimited################################
sublayout2 = QGridLayout()
output_label = QLabel("Output Format:\t")
self.b3 = QRadioButton("Comma Delimated (*.csv)")
#self.b3.setChecked(True)
self.b4 = QRadioButton("Tab Delimited (*.txt)")
self.b3.toggled.connect(lambda:self.btn2state(self.b3))
self.b4.toggled.connect(lambda:self.btn2state(self.b4))
sublayout2.addWidget(output_label,1,0)
sublayout2.addWidget(self.b3,1,1)
sublayout2.addWidget(self.b4,1,2)
layout4.addLayout(sublayout2,2,0)
gBox.setLayout(layout4)
return gBox
def method(self):
########################Layout for taking methods of Bias Correction ################################
gBox = QGroupBox("Variable/Distribution")
layout5 = QGridLayout()
self.b5 = QRadioButton("Rainfall/Gamma")
#self.b3.setChecked(True)
self.b6 = QRadioButton("Temperature/Normal")
self.b7 = QRadioButton("Solar Radiation/Beta")
self.b5.toggled.connect(lambda:self.btn3state(self.b5))
self.b6.toggled.connect(lambda:self.btn3state(self.b6))
self.b7.toggled.connect(lambda:self.btn3state(self.b7))
self.show_hide = QPushButton("Show Details")
Font=QFont()
Font.setBold(True)
#self.show_hide.setFont(Font)
self.show_hide.setCheckable(True)
#self.show_hide.toggle()
self.show_hide.clicked.connect(self.ShowHide)
self.show_hide.setFixedWidth(90)
self.show_hide.setFixedHeight(25)
Style_show_hide_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:Checked{
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}
"""
self.show_hide.setStyleSheet(Style_show_hide_Button)
self.show_plots = QPushButton("Show Plots")
self.show_plots.clicked.connect(self.ShowPlots)
self.show_plots.setFixedWidth(75)
self.show_plots.setFixedHeight(25)
self.show_plots.setStyleSheet(Style_show_hide_Button)
self.start = QPushButton("Run")
self.start.setFixedWidth(50)
self.start.setFixedHeight(25)
Style_Run_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(0,121,0);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(0,121,0,230);
}
"""
self.start.clicked.connect(self.start_correctionH)
#self.start.setFont(Font)
self.start.setStyleSheet(Style_Run_Button)
self.stop = QPushButton("Cancel")
self.stop.setMaximumWidth(60)
self.stop.setFixedHeight(25)
Style_Cancel_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(180,0,0,240);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(180,0,0,220);
}
"""
self.stop.clicked.connect(self.stop_correctionH)
#self.stop.setFont(Font)
self.stop.setStyleSheet(Style_Cancel_Button)
layout5.addWidget(self.b5,1,1)
layout5.addWidget(self.b6,1,2)
layout5.addWidget(self.b7,1,3)
layout5.addWidget(self.show_hide,1,7)
layout5.addWidget(self.start,1,4)
layout5.addWidget(self.stop,1,6)
layout5.addWidget(self.show_plots,1,5)
## layout5.addWidget(self.b5,1,1)
## layout5.addWidget(self.b6,1,2)
## layout5.addWidget(self.b7,1,3)
## layout5.addWidget(self.show_hide,2,5)
## layout5.addWidget(self.start,1,4)
## layout5.addWidget(self.stop,2,4)
## layout5.addWidget(self.show_plots,1,5)
gBox.setLayout(layout5)
return gBox
########## Layout for progress of Bias Correction ##########
def progress(self):
gBox = QGroupBox()
layout6 = QVBoxLayout()
STYLE2 = """
QProgressBar{
text-align: center;
}
QProgressBar::chunk {
background-color: rgb(0,121,0);
}
"""
self.status = QLabel('')
self.progressbar = QProgressBar()
## self.progressbarfinal = QProgressBar()
## self.progressbar.setMinimum(1)
self.progressbar.setFixedHeight(13)
## self.progressbarfinal.setFixedHeight(13)
self.progressbar.setStyleSheet(STYLE2)
## self.progressbarfinal.setStyleSheet(STYLE2)
self.textbox = QTextEdit()
self.textbox.setReadOnly(True)
self.textbox.moveCursor(QTextCursor.End)
self.textbox.hide()
self.scrollbar = self.textbox.verticalScrollBar()
layout6.addWidget(self.status)
layout6.addWidget(self.progressbar)
## layout6.addWidget(self.progressbarfinal)
layout6.addWidget(self.textbox)
gBox.setLayout(layout6)
return gBox
########################### Control Buttons ####################################################
def browse2_file(self):
Obs_file = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.Obsfile.setText(QDir.toNativeSeparators(Obs_file))
def browse3_file(self):
ModH_file = QFileDialog.getOpenFileName(self,caption = "Open File", directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModHfile.setText(QDir.toNativeSeparators(ModH_file))
def browse4_file(self):
output_file = QFileDialog.getExistingDirectory(self, "Save File in Folder", r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
QFileDialog.ShowDirsOnly)
self.outputfile_location.setText(QDir.toNativeSeparators(output_file))
def Info1(self):
QMessageBox.information(self, "Information About Input Files (Observed)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (Obs).csv
''')
def Info2(self):
QMessageBox.information(self, "Information About Input File (Model)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (Mod).csv
''')
## def btnstate(self,b):
## if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
## self.seperator = ','
## self.seperatorname = '.csv'
## if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
## self.seperator = '\t'
## self.seperatorname = '.txt'
def btn2state(self,b):
if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
self.seperator2 = ','
self.seperatorname2 = '.csv'
if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
self.seperator2 = '\t'
self.seperatorname2 = '.txt'
def btn3state(self,b):
if b.text() == "Rainfall/Gamma" and b.isChecked() == True:
self.methodname = b.text()
if b.text() == "Temperature/Normal" and b.isChecked() == True:
self.methodname = b.text()
if b.text() == "Solar Radiation/Beta" and b.isChecked() == True:
self.methodname = b.text()
def start_correctionH(self):
self.started = True
self.BiasCorrectH()
def stop_correctionH(self):
if self.started:
self.started = False
QMessageBox.information(self, "Information", "Bias correction is aborted.")
def ShowHide(self):
if self.show_hide.text() == "Hide Details" and self.show_hide.isChecked() == False:
self.textboxF.hide()
self.textbox.hide()
## self.setFixedSize(700,372)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Show Details')
self.show_hide.setText('Show Details')
if self.show_hide.text() == "Show Details" and self.show_hide.isChecked() == True:
self.textboxF.show()
self.textbox.show()
## self.setFixedSize(700,620)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Hide Details')
self.show_hide.setText('Hide Details')
def BiasCorrectH(self):
if self.Obsfile.text() == "":
QMessageBox.critical(self, "Message", "File containing observed climate data (*.csv or *.txt) is not given.")
self.started = False
if self.ModHfile.text() == "":
QMessageBox.critical(self, "Message", "File containing GCM outputs (*.csv or *.txt) is not given.")
self.started = False
if self.outputfile_location.text() == "":
QMessageBox.critical(self, "Message", "Folder to save bias corrected GCM outputs is not given")
self.started = False
try:
## sep = self.seperator
## sepname = self.seperatorname
sep2 = self.seperator2
sepname2 = self.seperatorname2
except:
QMessageBox.critical(self, "Message", "Format is not defined.")
self.started = False
try:
method = self.methodname
except:
QMessageBox.critical(self, "Message", "Variable/Distribution is not defined.")
self.started = False
self.textbox.setText("")
start = time.time()
self.status.setText('Status: Correcting')
## self.progressbarfinal.setMinimum(0)
## self.progressbarfinal.setValue(0)
self.progressbar.setMinimum(0)
self.progressbar.setValue(0)
Fobs = self.Obsfile.text()
Fmod = self.ModHfile.text()
ObsData, ModData, CorrectedData = [], [], []
with open(Fobs) as f:
line = [line for line in f]
for i in range(len(line)):
if Fobs.endswith('.csv'):
ObsData.append([word for word in line[i].split(",") if word])
if Fobs.endswith('.txt'):
ObsData.append([word for word in line[i].split("\t") if word])
lat = [float(ObsData[0][c]) for c in range(1,len(ObsData[0]))]
lon = [float(ObsData[1][c]) for c in range(1,len(ObsData[0]))]
Latitude = []
Longitude = []
with open(Fmod) as f:
line = [line for line in f]
for i in range(len(line)):
if Fmod.endswith('.csv'):
ModData.append([word for word in line[i].split(",") if word])
if Fmod.endswith('.txt'):
ModData.append([word for word in line[i].split("\t") if word])
DateObs = [ObsData[r][0] for r in range(len(ObsData))]
DateMod = [ModData[r][0] for r in range(len(ModData))]
OutPath = self.outputfile_location.text()
CorrectedData.append(DateMod)
YMod = int(DateMod[2][-4:])
YObs = int(DateObs[2][-4:])
app.processEvents()
if len(lat)>1:
random_count = np.random.randint(len(lat),size=(1))
else:
random_count = 0
fig = plt.figure(figsize=(15,7))
plt.style.use('ggplot')
## plt.style.use('fivethirtyeight')
for j in range(len(lat)):
obs = [float(ObsData[r][j+1]) for r in range(2,len(ObsData))]
MOD = [float(ModData[r][j+1]) for r in range(2,len(ModData))]
Date = [date(YMod,1,1)+timedelta(i) for i in range(len(MOD))]
DateObs = [date(YObs,1,1)+timedelta(i) for i in range(len(obs))]
if method == 'Rainfall/Gamma' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
Obs_MonthFreq = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
MOD_MonthFreq = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(obs)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
Obs_Monthwise[m].append(obs[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
MOD_Month.append(sorted_values(Obs_Monthwise[m],MOD_Monthwise[m]))
MOD_Monthwise = MOD_Month
for m in range(12):
for i in range(len(MOD_Monthwise[m])):
if MOD_Monthwise[m][i]>0:
MOD_MonthFreq[m].append(MOD_Monthwise[m][i])
if Obs_Monthwise[m][i]>0:
Obs_MonthFreq[m].append(Obs_Monthwise[m][i])
nplot=1
for m in range(12):
Cor = []
if len(MOD_MonthFreq[m])>0 and len(Obs_MonthFreq[m])>0:
Mo, Mg, Vo, Vg = np.mean(Obs_MonthFreq[m]), np.mean(MOD_MonthFreq[m]), np.std(Obs_MonthFreq[m])**2, np.std(MOD_MonthFreq[m])**2
if not any(param<0.000001 for param in [Mo, Mg, Vo, Vg]):
O_alpha, O_beta, G_alpha, G_beta = Mo**2/Vo, Vo/Mo, Mg**2/Vg, Vg/Mg
O_loc, G_loc = 0, 0
## print('G',O_alpha, O_beta, G_alpha, G_beta)
else:
O_alpha, O_loc, O_beta = gamma.fit(Obs_MonthFreq[m], loc=0)
G_alpha, G_loc, G_beta = gamma.fit(MOD_MonthFreq[m], loc=0)
## print('fit',O_alpha, O_beta, G_alpha, G_beta)
## print(O_alpha, O_beta, G_alpha, G_beta)
prob = gamma.cdf(MOD_Monthwise[m],G_alpha, scale=G_beta)
Corr = gamma.ppf(prob, O_alpha, scale=O_beta)
for i in range(len(Obs_Monthwise[m])):
if len(MOD_MonthFreq[m])>0:
if MOD_Monthwise[m][i] >= min(MOD_MonthFreq[m]):
Cor.append(Corr[i])
else:
Cor.append(0)
else:
Cor.append(0)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = gamma.cdf(Obs_Monthwise[m], O_alpha, O_loc, O_beta)
mod_cdf = gamma.cdf(MOD_Monthwise[m], G_alpha, G_loc, G_beta)
Mc, Vc = np.mean(Cor), np.std(Cor)**2
if not any(param<0.000001 for param in [Mc, Vc]):
CF_alpha, CF_beta = Mc**2/Vc, Vc/Mc
CF_loc, G_loc = 0, 0
else:
CF_alpha, CF_loc, CF_beta=gamma.fit(Cor)
cor_cdf = gamma.cdf(Cor, CF_alpha, CF_loc, CF_beta)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Temperature/Normal' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(MOD)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
for i in range(len(obs)):
if DateObs[i].month == m+1:
Obs_Monthwise[m].append(obs[i])
nplot=1
for m in range(12):
Cor = []
Mo, So = norm.fit(Obs_Monthwise[m])
Mg, Sg = norm.fit(MOD_Monthwise[m])
prob = norm.cdf(MOD_Monthwise[m],Mg, Sg)
Cor = norm.ppf(prob, Mo, So)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = norm.cdf(Obs_Monthwise[m], Mo, So)
mod_cdf = norm.cdf(MOD_Monthwise[m], Mg, Sg)
Mc, Sc = norm.fit(Cor)
cor_cdf = norm.cdf(Cor, Mc, Sc)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Solar Radiation/Beta' and self.started == True:
MOD_Month=[]
Obs_Monthwise = [[] for m in range(12)]
MOD_Monthwise = [[] for m in range(12)]
Cor_Monthwise = []
Date_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(MOD)):
if Date[i].month == m+1:
Date_Monthwise[m].append(Date[i])
MOD_Monthwise[m].append(MOD[i])
for m in range(12):
for i in range(len(obs)):
if DateObs[i].month == m+1:
Obs_Monthwise[m].append(obs[i])
nplot=1
for m in range(12):
Cor = []
oMin, oMax = min(Obs_Monthwise[m]), max(Obs_Monthwise[m])
gMin, gMax = min(MOD_Monthwise[m]), max(MOD_Monthwise[m])
Mo = (np.mean(Obs_Monthwise[m])-oMin)/(oMax - oMin)
Mg = (np.mean(MOD_Monthwise[m])-gMin)/(gMax - gMin)
Vo = np.std(Obs_Monthwise[m])**2/(oMax - oMin)**2
Vg = np.std(MOD_Monthwise[m])**2/(gMax - gMin)**2
ao, ag = -Mo*(Vo + Mo**2 - Mo)/Vo, -Mg*(Vg + Mg**2 - Mg)/Vg
bo, bg = ao*(1 - Mo)/Mo, ag*(1 - Mg)/Mg
TransO = [(Obs_Monthwise[m][i]-oMin)/(oMax-oMin) for i in range(len(Obs_Monthwise[m]))]
TransG = [(MOD_Monthwise[m][i]-gMin)/(gMax-gMin) for i in range(len(MOD_Monthwise[m]))]
prob = beta.cdf(TransG, ag, bg)
TransC = beta.ppf(prob, ao, bo)
Cor = [TransC[i]*(oMax-oMin)+oMin for i in range(len(TransC))]
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obs_cdf = beta.cdf(TransO, ao, bo)
mod_cdf = beta.cdf(TransG, ag, bg)
Mc = (np.mean(Cor)-min(Cor))/(max(Cor)-min(Cor))
Vc = np.std(Cor)**2/(max(Cor)-min(Cor))**2
ac = -Mc*(Vc + Mc**2 - Mc)/Vc
bc = ac*(1 - Mc)/Mc
cor_cdf = beta.cdf(TransC, ac, bc)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(Obs_Monthwise[m], obs_cdf, '.b')
m, = ax.plot(MOD_Monthwise[m], mod_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
Date_Month=[]
for m in range(12):
for i in range(len(Date_Monthwise[m])):
Date_Month.append(Date_Monthwise[m][i])
DateCorr_Dict = dict(zip(Date_Month,Cor_Monthwise))
SortedCorr = sorted(DateCorr_Dict.items())
CorrectedData.append([lat[j],lon[j]]+[v for k,v in SortedCorr])
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(j)
## self.progressbarfinal.setValue(j)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
self.textbox.append('Corrected '+ str(j+1)+' out of '+str(len(lat))+':\tLat: %.1f'%lat[j]+'\tLon: %.1f'%lon[j])
self.status.setText('Status: Writing Bias Corrected Data to File.')
self.textbox.append('\nWriting Bias Corrected Data to File.')
app.processEvents()
if sep2 == ',':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YMod)+'.csv','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write(','.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textbox.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textbox.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textbox.append("Writing %drd day data" % (c+1))
else:
self.textbox.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(len(lat)+c+1)
## self.progressbarfinal.setValue(len(lat)+c+1)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.status.setText('Status: Completed.')
self.textbox.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
if sep2 == '\t':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YMod)+'.txt','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write('\t'.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textbox.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textbox.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textbox.append("Writing %drd day data" % (c+1))
else:
self.textbox.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbar.setValue(self.scrollbar.maximum())
self.progressbar.setValue(len(lat)+c+1)
self.progressbar.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinal.setValue(len(lat)+c+1)
## self.progressbarfinal.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.status.setText('Status: Completed.')
self.textbox.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
def ShowPlots(self):
plt.show()
def FTabUI(self):
gridF = QGridLayout()
gridF.addWidget(self.inputF(), 0, 0)
gridF.addWidget(self.outputF(), 1, 0)
gridF.addWidget(self.methodF(), 2, 0)
gridF.addWidget(self.progressF(), 3, 0)
gridF.setContentsMargins(0,0,0,0)
## self.setTabText(0,"Historical")
self.FTab.setLayout(gridF)
def inputF(self):
##########Layout for taking input climate data to be bias corrected ##########
gBoxF = QGroupBox("Inputs:")
layout1F = QGridLayout()
self.ObsfileF = QLineEdit()
self.browse2F = QPushButton("...")
self.browse2F.setMaximumWidth(25)
self.browse2F.clicked.connect(self.browse2_fileF)
self.q1F = QPushButton("?")
self.q1F.setMaximumWidth(15)
self.q1F.clicked.connect(self.Info1F)
self.ObsfileF.setPlaceholderText("File with observed historical climate data (*.csv or *.txt)")
self.ModHfileF = QLineEdit()
self.browse1F = QPushButton("...")
self.browse1F.setMaximumWidth(25)
self.browse1F.clicked.connect(self.browse1_fileF)
self.q0F = QPushButton("?")
self.q0F.setMaximumWidth(15)
self.q0F.clicked.connect(self.Info0F)
self.ModHfileF.setPlaceholderText("File with GCM historical climate projections (*.csv or *.txt)")
layout1F.addWidget(self.ObsfileF,1,0,1,3)
layout1F.addWidget(self.q1F,1,3,1,1)
layout1F.addWidget(self.browse2F,1,4,1,1)
layout1F.addWidget(self.ModHfileF,1,5,1,3)
layout1F.addWidget(self.q0F,1,8,1,1)
layout1F.addWidget(self.browse1F,1,9,1,1)
self.ModFfileF = QLineEdit()
self.ModFfileF.setPlaceholderText("File with GCM future climate projections (*.csv or *.txt)")
self.q2F = QPushButton("?")
self.q2F.setMaximumWidth(15)
self.q2F.clicked.connect(self.Info2F)
self.browse3F = QPushButton("...")
self.browse3F.setMaximumWidth(25)
self.browse3F.clicked.connect(self.browse3_fileF)
layout1F.addWidget(self.ModFfileF,3,0,1,8)
layout1F.addWidget(self.q2F,3,8,1,1)
layout1F.addWidget(self.browse3F,3,9,1,1)
## ##########Layout for taking comma delimited vs tab delimited################################
## sublayout1 = QGridLayout()
##
## self.label1 = QLabel("Input Format:\t")
## self.b1 = QRadioButton("Comma Delimated (*.csv)")
## #self.b1.setChecked(True)
## self.b2 = QRadioButton("Tab Delimited (*.txt)")
##
## self.b1.toggled.connect(lambda:self.btnstate(self.b1))
## self.b2.toggled.connect(lambda:self.btnstate(self.b2))
##
## sublayout1.addWidget(self.label1,1,0)
## sublayout1.addWidget(self.b1,1,1)
## sublayout1.addWidget(self.b2,1,2)
## layout1.addLayout(sublayout1,3,0)
gBoxF.setLayout(layout1F)
return gBoxF
def outputF(self):
##########Layout for output file location and interpolation##########
gBoxF = QGroupBox("Outputs:")
layout4F = QGridLayout()
self.outputfile_locationF = QLineEdit()
self.outputfile_locationF.setPlaceholderText("Folder to save bias corrected GCM outputs")
self.browse4F = QPushButton("...")
self.browse4F.setMaximumWidth(25)
self.browse4F.clicked.connect(self.browse4_fileF)
layout4F.addWidget(self.outputfile_locationF,1,0,1,3)
layout4F.addWidget(self.browse4F,1,3,1,1)
########################Layout for taking comma delimited vs tab delimited################################
sublayout2F = QGridLayout()
output_labelF = QLabel("Output Format:\t")
self.b3F = QRadioButton("Comma Delimated (*.csv)")
#self.b3.setChecked(True)
self.b4F = QRadioButton("Tab Delimited (*.txt)")
self.b3F.toggled.connect(lambda:self.btn2stateF(self.b3F))
self.b4F.toggled.connect(lambda:self.btn2stateF(self.b4F))
sublayout2F.addWidget(output_labelF,1,0)
sublayout2F.addWidget(self.b3F,1,1)
sublayout2F.addWidget(self.b4F,1,2)
layout4F.addLayout(sublayout2F,2,0)
gBoxF.setLayout(layout4F)
return gBoxF
def methodF(self):
########################Layout for taking methods of Bias Correction ################################
gBoxF = QGroupBox("Variable/Distribution")
layout5F = QGridLayout()
self.b5F = QRadioButton("Rainfall/Gamma")
#self.b3F.setChecked(True)
self.b6F = QRadioButton("Temperature/Normal")
self.b7F = QRadioButton("Solar Radiation/Beta")
self.b5F.toggled.connect(lambda:self.btn3stateF(self.b5F))
self.b6F.toggled.connect(lambda:self.btn3stateF(self.b6F))
self.b7F.toggled.connect(lambda:self.btn3stateF(self.b7F))
self.show_hideF = QPushButton("Show Details")
Font=QFont()
Font.setBold(True)
#self.show_hideF.setFont(Font)
self.show_hideF.setCheckable(True)
#self.show_hideF.toggle()
self.show_hideF.clicked.connect(self.ShowHideF)
self.show_hideF.setFixedWidth(90)
self.show_hideF.setFixedHeight(25)
Style_show_hideF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:Checked{
background-color: rgb(66, 131, 221);
border: none;
}
QPushButton:hover{
background-color: rgb(66, 131, 221,230);
border: none;
}
"""
self.show_hideF.setStyleSheet(Style_show_hideF_Button)
self.show_plotsF = QPushButton("Show Plots")
self.show_plotsF.clicked.connect(self.ShowPlotsF)
self.show_plotsF.setFixedWidth(75)
self.show_plotsF.setFixedHeight(25)
self.show_plotsF.setStyleSheet(Style_show_hideF_Button)
self.startF = QPushButton("Run")
self.startF.setFixedWidth(50)
self.startF.setFixedHeight(25)
Style_RunF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(0,121,0);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(0,121,0,230);
}
"""
self.startF.clicked.connect(self.start_correctionF)
#self.startF.setFont(Font)
self.startF.setStyleSheet(Style_RunF_Button)
self.stopF = QPushButton("Cancel")
self.stopF.setMaximumWidth(60)
self.stopF.setFixedHeight(25)
Style_CancelF_Button = """
QPushButton{
color: rgb(255, 255, 255);
background-color: rgb(180,0,0,240);
border-color: none;
border: none;
}
QPushButton:hover{
background-color: rgb(180,0,0,220);
}
"""
self.stopF.clicked.connect(self.stop_correctionF)
#self.stopF.setFont(Font)
self.stopF.setStyleSheet(Style_CancelF_Button)
layout5F.addWidget(self.b5F,1,1)
layout5F.addWidget(self.b6F,1,2)
layout5F.addWidget(self.b7F,1,3)
layout5F.addWidget(self.show_hideF,1,7)
layout5F.addWidget(self.startF,1,4)
layout5F.addWidget(self.stopF,1,6)
layout5F.addWidget(self.show_plotsF,1,5)
## layout5F.addWidget(self.b5F,1,1)
## layout5F.addWidget(self.b6F,1,2)
## layout5F.addWidget(self.b7F,1,3)
## layout5F.addWidget(self.show_hideF,2,5)
## layout5F.addWidget(self.startF,1,4)
## layout5F.addWidget(self.stopF,2,4)
## layout5F.addWidget(self.show_plotsF,1,5)
gBoxF.setLayout(layout5F)
return gBoxF
########## Layout for progress of Bias Correction ##########
def progressF(self):
gBoxF = QGroupBox()
layout6F = QVBoxLayout()
STYLE2 = """
QProgressBar{
text-align: center;
}
QProgressBar::chunk {
background-color: rgb(0,121,0);
}
"""
self.statusF = QLabel('')
self.progressbarF = QProgressBar()
## self.progressbarfinalF = QProgressBar()
#self.progressbarF.setMinimum(1)
self.progressbarF.setFixedHeight(13)
## self.progressbarfinalF.setFixedHeight(13)
self.progressbarF.setStyleSheet(STYLE2)
## self.progressbarfinalF.setStyleSheet(STYLE2)
self.textboxF = QTextEdit()
self.textboxF.setReadOnly(True)
self.textboxF.moveCursor(QTextCursor.End)
self.textboxF.hide()
self.scrollbarF = self.textboxF.verticalScrollBar()
layout6F.addWidget(self.statusF)
layout6F.addWidget(self.progressbarF)
## layout6F.addWidget(self.progressbarfinalF)
layout6F.addWidget(self.textboxF)
gBoxF.setLayout(layout6F)
return gBoxF
########################### Control Buttons ####################################################
def browse1_fileF(self):
ModH_fileF = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModHfileF.setText(QDir.toNativeSeparators(ModH_fileF))
def browse2_fileF(self):
Obs_fileF = QFileDialog.getOpenFileName(self,caption = "Open File",directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ObsfileF.setText(QDir.toNativeSeparators(Obs_fileF))
def browse3_fileF(self):
ModF_fileF = QFileDialog.getOpenFileName(self,caption = "Open File", directory=r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
filter="Comma Delimated (*.csv);;Tab Delimated (*.txt)")
self.ModFfileF.setText(QDir.toNativeSeparators(ModF_fileF))
def browse4_fileF(self):
output_fileF = QFileDialog.getExistingDirectory(self, "Save File in Folder", r"C:\Users\gupta\OneDrive\0. M.Tech. Research Work\Codes\GUIs\Bias Correction\\",
QFileDialog.ShowDirsOnly)
self.outputfile_locationF.setText(QDir.toNativeSeparators(output_fileF))
def Info0F(self):
QMessageBox.information(self, "Information About Input Files (Model Historical)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ModH).csv
''')
def Info1F(self):
QMessageBox.information(self, "Information About Input Files (Observed Historical)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ObsH).csv
''')
def Info2F(self):
QMessageBox.information(self, "Information About Input File (Model Future)",
'''Sample input (.csv or .txt) should be same as it is shown in Sample Example:\nC:\Program Files (x86)\Climate Data Bias Corrector\Sample Input (ModF).csv
''')
## def btnstateF(self,b):
## if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
## self.seperatorF = ','
## self.seperatornameF = '.csv'
## if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
## self.seperatorF = '\t'
## self.seperatornameF = '.txt'
def btn2stateF(self,b):
if b.text() == "Comma Delimated (*.csv)" and b.isChecked() == True:
self.seperator2F = ','
self.seperatorname2F = '.csv'
if b.text() == "Tab Delimited (*.txt)" and b.isChecked() == True:
self.seperator2F = '\t'
self.seperatorname2F = '.txt'
def btn3stateF(self,b):
if b.text() == "Rainfall/Gamma" and b.isChecked() == True:
self.methodnameF = b.text()
if b.text() == "Temperature/Normal" and b.isChecked() == True:
self.methodnameF = b.text()
if b.text() == "Solar Radiation/Beta" and b.isChecked() == True:
self.methodnameF = b.text()
def start_correctionF(self):
self.started = True
self.BiasCorrectF()
def stop_correctionF(self):
if self.started:
self.started = False
QMessageBox.information(self, "Information", "Bias correction is aborted.")
def ShowHideF(self):
if self.show_hideF.text() == "Hide Details" and self.show_hideF.isChecked() == False:
self.textboxF.hide()
self.textbox.hide()
## self.setFixedSize(700,372)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Show Details')
self.show_hide.setText('Show Details')
if self.show_hideF.text() == "Show Details" and self.show_hideF.isChecked() == True:
self.textboxF.show()
self.textbox.show()
## self.setFixedSize(700,620)
ShowHide(self.show_hideF.text())
ShowHide(self.show_hide.text())
self.show_hideF.setText('Hide Details')
self.show_hide.setText('Hide Details')
def BiasCorrectF(self):
if self.ObsfileF.text() == "":
QMessageBox.critical(self, "Message", "File with observed historical climate data (*.csv or *.txt) is not given.")
self.started = False
if self.ModHfileF.text() == "":
QMessageBox.critical(self, "Message", "File with GCM historical climate projections (*.csv or *.txt) is not given.")
self.started = False
if self.ModFfileF.text() == "":
QMessageBox.critical(self, "Message", "File with GCM future climate projections (*.csv or *.txt) is not given.")
self.started = False
if self.outputfile_locationF.text() == "":
QMessageBox.critical(self, "Message", "Folder to save bias corrected GCM outputs is not given")
self.started = False
try:
## sepF = self.seperator
## sepnameF = self.seperatorname
sep2F = self.seperator2F
sepname2F = self.seperatorname2F
except:
QMessageBox.critical(self, "Message", "Format is not defined.")
self.started = False
try:
method = self.methodnameF
except:
QMessageBox.critical(self, "Message", "Variable/Distribution is not defined.")
self.started = False
self.textboxF.setText("")
start = time.time()
self.statusF.setText('Status: Correcting.')
## self.progressbarfinalF.setMinimum(0)
## self.progressbarfinalF.setValue(0)
self.progressbarF.setMinimum(0)
self.progressbarF.setValue(0)
FobsH = self.ObsfileF.text()
FmodH = self.ModHfileF.text()
FmodF = self.ModFfileF.text()
ObsHData, ModHData, ModFData, CorrectedData = [], [], [], []
with open(FobsH) as f:
line = [line for line in f]
for i in range(len(line)):
if FobsH.endswith('.csv'):
ObsHData.append([word for word in line[i].split(",") if word])
if FobsH.endswith('.txt'):
ObsHData.append([word for word in line[i].split("\t") if word])
lat = [float(ObsHData[0][c]) for c in range(1,len(ObsHData[0]))]
lon = [float(ObsHData[1][c]) for c in range(1,len(ObsHData[0]))]
Latitude = []
Longitude = []
with open(FmodH) as f:
line = [line for line in f]
for i in range(len(line)):
if FmodH.endswith('.csv'):
ModHData.append([word for word in line[i].split(",") if word])
if FmodH.endswith('.txt'):
ModData.append([word for word in line[i].split("\t") if word])
with open(FmodF) as f:
line = [line for line in f]
for i in range(len(line)):
if FmodF.endswith('.csv'):
ModFData.append([word for word in line[i].split(",") if word])
if FmodF.endswith('.txt'):
ModFData.append([word for word in line[i].split("\t") if word])
DateObsH = [ObsHData[r][0] for r in range(len(ObsHData))]
DateModH = [ModHData[r][0] for r in range(len(ModHData))]
DateModF = [ModFData[r][0] for r in range(len(ModFData))]
OutPath = self.outputfile_locationF.text()
CorrectedData.append(DateModF)
YObsH = int(DateObsH[2][-4:])
YModH = int(DateModH[2][-4:])
YModF = int(DateModF[2][-4:])
app.processEvents()
if len(lat)>1:
random_count = np.random.randint(len(lat),size=(1))
else:
random_count = 0
fig = plt.figure(figsize=(15,7))
plt.style.use('ggplot')
## plt.style.use('fivethirtyeight')
for j in range(len(lat)):
ObsH = [float(ObsHData[r][j+1]) for r in range(2,len(ObsHData))]
ModH = [float(ModHData[r][j+1]) for r in range(2,len(ModHData))]
ModF = [float(ModFData[r][j+1]) for r in range(2,len(ModFData))]
DateObsH = [date(YObsH,1,1)+timedelta(i) for i in range(len(ObsH))]
DateModH = [date(YModH,1,1)+timedelta(i) for i in range(len(ModH))]
DateModF = [date(YModF,1,1)+timedelta(i) for i in range(len(ModF))]
if method == 'Rainfall/Gamma' and self.started == True:
DateH=DateModH
DateF=DateModF
ModH_Month=[]
ModF_Month=[]
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ObsH_MonthFreq = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModH_MonthFreq = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
ModF_MonthFreq = [[] for m in range(12)]
DateH_Monthwise= [[] for m in range(12)]
DateF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateH[i].month == m+1:
DateH_Monthwise[m].append(DateH[i])
ObsH_Monthwise[m].append(ObsH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateF[i].month == m+1:
DateF_Monthwise[m].append(DateF[i])
ModF_Monthwise[m].append(ModF[i])
for m in range(12):
ModH_Month.append(sorted_values(ObsH_Monthwise[m],ModH_Monthwise[m]))
ModF_Month.append(sorted_values_thresh(ModH_Month[m], ModF_Monthwise[m]))
ModH_Monthwise = ModH_Month
ModF_Monthwise = ModF_Month
for m in range(12):
for i in range(len(ModH_Monthwise[m])):
if ModH_Monthwise[m][i]>0:
ModH_MonthFreq[m].append(ModH_Monthwise[m][i])
if ObsH_Monthwise[m][i]>0:
ObsH_MonthFreq[m].append(ObsH_Monthwise[m][i])
for i in range(len(ModF_Monthwise[m])):
if ModF_Monthwise[m][i]>0:
ModF_MonthFreq[m].append(ModF_Monthwise[m][i])
nplot=1
for m in range(12):
Cor = []
if len(ModH_MonthFreq[m])>0 and len(ObsH_MonthFreq[m])>0 and len(ModF_MonthFreq[m])>0:
Moh, Mgh, Mgf, Voh, Vgh, Vgf = np.mean(ObsH_MonthFreq[m]), np.mean(ModH_MonthFreq[m]), np.mean(ModF_MonthFreq[m]), np.std(ObsH_MonthFreq[m])**2, np.std(ModH_MonthFreq[m])**2, np.std(ModF_MonthFreq[m])**2
if not any(param<0.000001 for param in [Moh, Mgh, Mgf, Voh, Vgh, Vgf]):
aoh, boh, agh, bgh, agf, bgf = Moh**2/Voh, Voh/Moh, Mgh**2/Vgh, Vgh/Mgh, Mgf**2/Vgf, Vgf/Mgf
loh, lgh, lgf = 0, 0, 0
else:
aoh, loh, boh = gamma.fit(ObsH_MonthFreq[m], loc=0)
agh, lgh, bgh = gamma.fit(ModH_MonthFreq[m], loc=0)
agf, lgf, bgf = gamma.fit(ModF_MonthFreq[m], loc=0)
'CDF of ModF with ModH Parameters'
Prob_ModF_ParaModH = gamma.cdf(ModF_Monthwise[m],agh, scale=bgh)
'Inverse of Prob_ModF_ParaModH with ParaObsH to get corrected transformed values of Future Model Time Series'
Cor = gamma.ppf(Prob_ModF_ParaModH, aoh, scale=boh)
else:
for i in range(len(ModF_Monthwise[m])):
Cor.append(0)
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = gamma.cdf(ObsH_Monthwise[m], aoh, loh, boh)
modF_cdf = gamma.cdf(ModF_Monthwise[m], agf, lgf, bgf)
Mc, Vc = np.mean(Cor), np.std(Cor)**2
if not any(param<0.000001 for param in [Mc, Vc]):
acf, bcf = Mc**2/Vc, Vc/Mc
lcf = 0
else:
acf, lcf, bcf = gamma.fit(Cor)
cor_cdf = gamma.cdf(Cor, acf, lcf, bcf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Temperature/Normal' and self.started == True:
DateH=DateModH
DateF=DateModF
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
DateH_Monthwise= [[] for m in range(12)]
DateF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateH[i].month == m+1:
DateH_Monthwise[m].append(DateH[i])
ObsH_Monthwise[m].append(ObsH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateF[i].month == m+1:
DateF_Monthwise[m].append(DateF[i])
ModF_Monthwise[m].append(ModF[i])
nplot=1
for m in range(12):
Cor = []
Moh, Mgh, Mgf, Soh, Sgh, Sgf = np.mean(ObsH_Monthwise[m]), np.mean(ModH_Monthwise[m]), np.mean(ModF_Monthwise[m]), np.std(ObsH_Monthwise[m]), np.std(ModH_Monthwise[m]), np.std(ModF_Monthwise[m])
Prob_ModF = norm.cdf(ModF_Monthwise[m], Mgf, Sgf)
Inv_of_Prob_ModF_ParaObsH = norm.ppf(Prob_ModF, Moh, Soh)
Inv_of_Prob_ModF_ParaModH = norm.ppf(Prob_ModF, Mgh, Sgh)
for i in range(len(ModF_Monthwise[m])):
Cor.append(ModF_Monthwise[m][i]+Inv_of_Prob_ModF_ParaObsH[i]-Inv_of_Prob_ModF_ParaModH[i])
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = norm.cdf(ObsH_Monthwise[m], Moh, Soh)
modF_cdf = norm.cdf(ModF_Monthwise[m], Mgf, Sgf)
Mcf, Scf = norm.fit(Cor)
cor_cdf = norm.cdf(Cor, Mcf, Scf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
if method =='Solar Radiation/Beta' and self.started == True:
ModH_Month=[]
Cor_Monthwise = []
ObsH_Monthwise = [[] for m in range(12)]
ModH_Monthwise = [[] for m in range(12)]
ModF_Monthwise = [[] for m in range(12)]
DateObsH_Monthwise= [[] for m in range(12)]
DateModH_Monthwise= [[] for m in range(12)]
DateModF_Monthwise= [[] for m in range(12)]
for m in range(12):
for i in range(len(ObsH)):
if DateObsH[i].month == m+1:
DateObsH_Monthwise[m].append(DateObsH[i])
ObsH_Monthwise[m].append(ObsH[i])
for m in range(12):
for i in range(len(ModH)):
if DateModH[i].month == m+1:
DateModH_Monthwise[m].append(DateModH[i])
ModH_Monthwise[m].append(ModH[i])
for m in range(12):
for i in range(len(ModF)):
if DateModF[i].month == m+1:
DateModF_Monthwise[m].append(DateModF[i])
ModF_Monthwise[m].append(ModF[i])
nplot=1
for m in range(12):
Cor = []
'Maximum and minimum value monthwise of whole time series are calculated below for ObsH, ModH and ModF'
ohMin, ohMax = min(ObsH_Monthwise[m]), max(ObsH_Monthwise[m])
ghMin, ghMax = min(ModH_Monthwise[m]), max(ModH_Monthwise[m])
gfMin, gfMax = min(ModF_Monthwise[m]), max(ModF_Monthwise[m])
'Mean and variance value monthwise of whole time series are calculated below for ObsH, ModH and ModF'
Moh = (np.mean(ObsH_Monthwise[m])-ohMin)/(ohMax - ohMin)
Mgh = (np.mean(ModH_Monthwise[m])-ghMin)/(ghMax - ghMin)
Mgf = (np.mean(ModF_Monthwise[m])-gfMin)/(gfMax - gfMin)
Voh = np.std(ObsH_Monthwise[m])**2/(ohMax - ohMin)**2
Vgh = np.std(ModH_Monthwise[m])**2/(ghMax - ghMin)**2
Vgf = np.std(ModF_Monthwise[m])**2/(gfMax - gfMin)**2
'a,b parameters in beta distribution, monthwise of whole time series, are calculated below for ObsH, ModH and ModF'
aoh, agh, agf = -Moh*(Voh + Moh**2 - Moh)/Voh, -Mgh*(Vgh + Mgh**2 - Mgh)/Vgh, -Mgf*(Vgf + Mgf**2 - Mgf)/Vgf
boh, bgh, bgf = aoh*(1 - Moh)/Moh, agh*(1 - Mgh)/Mgh, agf*(1 - Mgf)/Mgf
'All the time series are transformed to range (0,1)'
TransOH = [(ObsH_Monthwise[m][i]-ohMin)/(ohMax-ohMin) for i in range(len(ObsH_Monthwise[m]))]
TransGH = [(ModH_Monthwise[m][i]-ghMin)/(ghMax-ghMin) for i in range(len(ModH_Monthwise[m]))]
TransGF = [(ModF_Monthwise[m][i]-gfMin)/(gfMax-gfMin) for i in range(len(ModF_Monthwise[m]))]
'CDF of ModF with ModH Parameters'
Prob_ModF_ParaModH = beta.cdf(TransGF, agh, bgh)
'Inverse of Prob_ModF_ParaModH with ParaObsH to get corrected transformed values of Future Model Time Series'
TransC = beta.ppf(Prob_ModF_ParaModH, aoh, boh)
Cor = [TransC[i]*(ohMax-ohMin)+ohMin for i in range(len(TransC))]
for c in Cor:
Cor_Monthwise.append('%.1f'%c)
DateF_Monthwise = DateModF_Monthwise
if j == random_count:
ax = fig.add_subplot(3,4,nplot)
obsH_cdf = beta.cdf(TransOH, aoh, boh)
modF_cdf = beta.cdf(TransGF, agf, bgf)
Mcf = (np.mean(Cor)-min(Cor))/(max(Cor)-min(Cor))
Vcf = np.std(Cor)**2/(max(Cor)-min(Cor))**2
acf = -Mcf*(Vcf + Mcf**2 - Mcf)/Vcf
bcf = acf*(1 - Mcf)/Mcf
cor_cdf = beta.cdf(TransC, acf, bcf)
ax.set_title('Month: '+str(m+1), fontsize=12)
o, = ax.plot(ObsH_Monthwise[m], obsH_cdf, '.b')
m, = ax.plot(ModF_Monthwise[m], modF_cdf, '.r')
c, = ax.plot(Cor, cor_cdf, '.g')
nplot=nplot+1
fig.legend([o,m,c,(o,m,c,)],['Observed','Before Correction','After Correction'],ncol=3,loc=8,frameon=False, fontsize=14)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.suptitle('CDF Plots of ' + method.split('/')[0] + ' for Randomly Selected Lat: '+str(lat[j])+' Lon: '+str(lon[j]),fontsize=16)
Date_Month=[]
for m in range(12):
for i in range(len(DateF_Monthwise[m])):
Date_Month.append(DateF_Monthwise[m][i])
DateCorr_Dict = dict(zip(Date_Month,Cor_Monthwise))
SortedCorr = sorted(DateCorr_Dict.items())
CorrectedData.append([lat[j],lon[j]]+[v for k,v in SortedCorr])
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(j)
## self.progressbarfinalF.setValue(j)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
self.textboxF.append('Corrected '+ str(j+1)+' out of '+str(len(lat))+':\tLat: %.1f'%lat[j]+'\tLon: %.1f'%lon[j])
self.statusF.setText('Status: Writing Bias Corrected Data to File.')
self.textboxF.append('\nWriting Bias Corrected Data to File.')
app.processEvents()
if sep2F == ',':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YModF)+'.csv','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write(','.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textboxF.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textboxF.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textboxF.append("Writing %drd day data" % (c+1))
else:
self.textboxF.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(len(lat)+c+1)
## self.progressbarfinalF.setValue(len(lat)+c+1)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.statusF.setText('Status: Completed.')
self.textboxF.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
if sep2F == '\t':
f = open(OutPath+'\Bias Corrected '+method.split('/')[0]+' '+str(YModF)+'.txt','w')
for c in range(len(CorrectedData[0])):
app.processEvents()
if self.started==True:
f.write('\t'.join(str(CorrectedData[r][c]) for r in range(len(CorrectedData))))
f.write('\n')
if (c+1)%10 == 1 and (c+1) != 11:
self.textboxF.append("Writing %dst day data" % (c+1))
elif (c+1)%10 == 2:
self.textboxF.append("Writing %dnd day data" % (c+1))
elif (c+1)%10 == 3:
self.textboxF.append("Writing %drd day data" % (c+1))
else:
self.textboxF.append("Writing %dth day data" % (c+1))
app.processEvents()
self.scrollbarF.setValue(self.scrollbarF.maximum())
self.progressbarF.setValue(len(lat)+c+1)
self.progressbarF.setMaximum(len(lat)+len(CorrectedData[0])-2)
## self.progressbarfinalF.setValue(len(lat)+c+1)
## self.progressbarfinalF.setMaximum(len(lat)+len(CorrectedData[0])-2)
if c == len(CorrectedData[0])-1:
end = time.time()
t = end-start
self.statusF.setText('Status: Completed.')
self.textboxF.append("\nTotal Time Taken: %.2d:%.2d:%.2d" % (t/3600,(t%3600)/60,t%60))
QMessageBox.information(self, "Information", "Bias Correction is completed.")
f.close()
def ShowPlotsF(self):
plt.show()
class BiasCorrection(QWidget):
def __init__(self, parent=None):
super(BiasCorrection,self).__init__(parent)
grid = QGridLayout()
self.m_titlebar=TitleBar(self)
grid.addWidget(self.m_titlebar, 0, 0)
self.tabs = HFTab(self)
grid.addWidget(self.tabs, 1, 0)
self.setLayout(grid)
grid.setContentsMargins(0,0,0,0)
## self.setWindowTitle("Weather Data Interpolator")
self.setFocus()
self.adjustSize()
self.Widget_Width = self.frameGeometry().width()
self.Widget_Height = self.frameGeometry().height()
## self.setFixedSize(750,354)
self.setFixedSize(750,self.Widget_Height)
## self.move(350,100)
self.setWindowFlags(Qt.FramelessWindowHint)
## self.setWindowFlags(Qt.WindowMaximizeButtonHint)
started = False
app = QApplication(sys.argv)
widget = BiasCorrection()
app_icon = QIcon()
app_icon.addFile('Interpolation-2.ico', QSize(40,40))
app.setWindowIcon(app_icon)
pixmap = QPixmap("Splash_CDBC.png")
splash = QSplashScreen(pixmap)
splash.show()
screen_resolution = app.desktop().screenGeometry()
width, height = screen_resolution.width(), screen_resolution.height()
widget.move(width/2-widget.width()/2,height/2-widget.height()/2)
time.sleep(2)
def ShowHide(text):
if text == 'Show Details':
widget.setFixedSize(750,BiasCorrection().Widget_Height+BiasCorrection().Widget_Height*2/3)
print(widget.height())
## widget.setFixedSize(750,620)
if text == 'Hide Details':
widget.setFixedSize(750,BiasCorrection().Widget_Height+1)
print(widget.height())
## widget.setFixedSize(750,354)
##widget.setFixedWidth(500)
##widget.setFixedHeight(400)
widget.show()
splash.finish(widget)
app.exec_()
| 45.76388
| 228
| 0.526146
| 8,150
| 71,712
| 4.560982
| 0.081104
| 0.022409
| 0.014796
| 0.01598
| 0.618826
| 0.583934
| 0.546379
| 0.525638
| 0.501345
| 0.465942
| 0
| 0.031493
| 0.336262
| 71,712
| 1,566
| 229
| 45.793103
| 0.749443
| 0.077755
| 0
| 0.443099
| 0
| 0.006457
| 0.118288
| 0.004236
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037127
| false
| 0
| 0.008071
| 0
| 0.05569
| 0.001614
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca4fb77d1058786e6c3813cfbd46b9161c2b28a
| 3,473
|
py
|
Python
|
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
lagom/core/es/base_es_master.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
from lagom.core.multiprocessing import BaseIterativeMaster
class BaseESMaster(BaseIterativeMaster):
"""
Base class for master of parallelized evolution strategies (ES).
It internally defines an ES algorithm.
In each generation, it distributes all sampled solution candidates, each for one worker,
to compute a list of object function values and then update the ES.
For more details about how master class works, please refer
to the documentation of the class, BaseIterativeMaster.
All inherited subclasses should implement the following function:
1. make_es(self)
2. _process_es_result(self, result)
"""
def __init__(self,
num_iteration,
worker_class,
num_worker,
init_seed=0,
daemonic_worker=None):
super().__init__(num_iteration=num_iteration,
worker_class=worker_class,
num_worker=num_worker,
init_seed=init_seed,
daemonic_worker=daemonic_worker)
# Create ES solver
self.es = self.make_es()
# It is better to force popsize to be number of workers
assert self.es.popsize == self.num_worker
def make_es(self):
"""
User-defined function to create an ES algorithm.
Returns:
es (BaseES): An instantiated object of an ES class.
Examples:
cmaes = CMAES(mu0=[3]*100,
std0=0.5,
popsize=12)
return cmaes
"""
raise NotImplementedError
def make_tasks(self, iteration):
# ES samples new candidate solutions
solutions = self.es.ask()
# Record iteration number, for logging in _process_workers_result()
# And it also keeps API untouched for assign_tasks() in non-iterative Master class
self.generation = iteration
return solutions
def _process_workers_result(self, tasks, workers_result):
# Rename, in ES context, the task is to evalute the solution candidate
solutions = tasks
# Unpack function values from workers results, [solution_id, function_value]
# Note that the workers result already sorted ascendingly with respect to task ID
function_values = [result[1] for result in workers_result]
# Update ES
self.es.tell(solutions, function_values)
# Obtain results from ES
result = self.es.result
# Process the ES result
self._process_es_result(result)
def _process_es_result(self, result):
"""
User-defined function to process the result from ES.
Note that the user can use the class memeber `self.generation` which indicate the index of
the current generation, it is automatically incremented each time when sample a set of
solution candidates.
Args:
result (dict): A dictionary of result returned from es.result.
Examples:
best_f_val = result['best_f_val']
if self.generation == 0 or (self.generation+1) % 100 == 0:
print(f'Best function value at generation {self.generation+1}: {best_f_val}')
"""
raise NotImplementedError
| 37.344086
| 98
| 0.600921
| 397
| 3,473
| 5.115869
| 0.382872
| 0.027573
| 0.023634
| 0.01871
| 0.024618
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009182
| 0.341492
| 3,473
| 93
| 99
| 37.344086
| 0.878881
| 0.520299
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.172414
| false
| 0
| 0.034483
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca513ca1cc8091c31b7381ae44ccedd1283fc01
| 1,096
|
py
|
Python
|
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
Roman_Morozov_dz_3/task_5.py
|
Wern-rm/2074_GB_Python
|
f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646
|
[
"MIT"
] | null | null | null |
"""
Реализовать функцию get_jokes(), возвращающую n шуток, сформированных из трех случайных слов, взятых из трёх списков (по одному из каждого):
"""
import random
nouns = ["автомобиль", "лес", "огонь", "город", "дом"]
adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"]
adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"]
def get_jokes(count, repeat=True, **kwargs) -> list[str]:
result: list[str] = []
if repeat:
for i in range(count):
result.append(' '.join(random.choice(kwargs[j]) for j in kwargs.keys()))
else:
for i in range(count):
noun, adverb, adjective = [random.choice(kwargs[j]) for j in kwargs.keys()]
result.append(' '.join([noun, adverb, adjective]))
return result
if __name__ == '__main__':
print(get_jokes(count=1, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=3, repeat=False, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=5, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
| 40.592593
| 140
| 0.666058
| 139
| 1,096
| 5.158273
| 0.489209
| 0.055788
| 0.072524
| 0.075314
| 0.404463
| 0.359833
| 0.359833
| 0.359833
| 0.270572
| 0.172943
| 0
| 0.003311
| 0.173358
| 1,096
| 27
| 141
| 40.592593
| 0.788079
| 0.127737
| 0
| 0.111111
| 0
| 0
| 0.107482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca67e9442436a3a4c05f92ccc99c1b4150df427
| 11,217
|
py
|
Python
|
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | 1
|
2019-10-09T07:39:55.000Z
|
2019-10-09T07:39:55.000Z
|
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | null | null | null |
tools.py
|
akerestely/nonlinearBestFit
|
e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
np.random.seed(421)
def hCG(x: np.ndarray, A: float, B: float, alpha: float):
return A * np.exp(-alpha * x) + B
def gen_rand_points(n: int, A: float = 1000, B: float = 3, alpha: float = 0.01, noise: float = 2, consecutive: bool = False):
"""
:param n: number of points to generate
:param A, B, alpha: parameters to hCG function
:param noise: randomly add this much to the result of the hCG function
"""
from numpy.random import random
sparsity = 1
if consecutive is False:
x = random(n) * n * sparsity
x.sort() # just for plot visual effect; does not change results
else :
x = np.linspace(0, n-1, n) * sparsity
y = hCG(x, A, B, alpha)
ynoise = random(n) * noise - noise / 2
y += ynoise
return x, y
def gen_rand_points_and_plot(n: int, A: float, B: float, alpha: float, noise: float, consecutive: bool):
x, y = gen_rand_points(20, A = 1000, B = 3, alpha = 1, noise=0, consecutive=False)
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.xlabel("$time$")
plt.ylabel("$hCG(time)$")
plt.show()
return x, y
def load_data(required_data_points: int = 3) -> pd.DataFrame:
url = "data/measurements.csv"
data = pd.read_csv(url)
# remove unused columns
data = data.loc[:, data.columns.str.startswith('MTH')]
def name_to_weekcount(s:str) -> int:
tokens = s.split('-')
import re
mth = int(re.search(r'\d+', tokens[0]).group(0)) - 1
wk = 0
if len(tokens) is not 1:
wk = int(re.search(r'\d+', tokens[1]).group(0)) - 1
return mth * 4 + wk
# rename columns
data.columns = pd.Series(data.columns).apply(name_to_weekcount)
# discard entries which have less than required_data_points measurements
data = data[data.count(axis=1) > required_data_points]
return data
def get_x_y(data: pd.DataFrame, row: int) -> (np.ndarray, np.ndarray) :
my_data = data.loc[row:row, :].dropna(axis=1)
x = np.array(my_data.columns[:]) # time
y = my_data.iloc[0,:].values # measurement
return x, y
def plot_real_data(data, from_row = None, to_row = None):
figsize = None
if from_row is not None and to_row is not None:
count = to_row - from_row
if count > 1:
figsize = (10, 5 * count)
data.T.iloc[:, from_row:to_row].dropna(axis=0).plot(kind="line", marker='o', subplots=True, figsize=figsize)
def plot_function(func, x: np.ndarray, y: np.ndarray):
import matplotlib.pyplot as plt
range_param = np.linspace(0, 1)
pt = [func(t, x, y) for t in range_param]
plt.plot(range_param, pt)
plt.show()
def print_rmse_methods(x: np.ndarray, y: np.ndarray, paramsList: list):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
print(f"Method {i} RMSE: {rmse}")
def plot_methods(x: np.ndarray, y: np.ndarray, paramsList:list , paramsNames: list = [], data_id: str="", showPlot: bool = True):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
param paramsNames: array of strings, where each sting represents the name of the corresponding param tuple.
The names will appear on the plot. Optional, in which case the name will be the index in the array.
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
plt.xlabel(r"$time$")
plt.ylabel(r"$hCG(time)$")
plt.plot(x, y, 'bo', label=f"data {data_id}")
#print(paramsNames)
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
name = paramsNames[i] if i < len(paramsNames) else ("Method " + str(i))
plt.plot(x, hCG(x, *params),
label=f'{name}: A=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (*params, rmse))
plt.legend()
if showPlot:
plt.show()
# print_rmse_methods(x, y, params, paramsCalc)
def plot_results(x: np.ndarray, y: np.ndarray, ptsStart: int = 0, ptsEnd: int = None, ptsTrain: int = None, data_id: str="", showPlot:bool = True, allAlgorithms:bool = True):
"""
:param ptsStart: use x, y values starting from this point
:param ptsEnd: use x, y values ending at this point
:param ptsTrain: use this much x, y values for training starting from ptsStart
"""
ptsEnd = ptsEnd or len(x)
ptsTrain = ptsTrain or (ptsEnd - ptsStart)
if ptsStart + ptsTrain > ptsEnd:
raise ValueError("Invalid interval for points")
x_train = x[ptsStart : ptsStart + ptsTrain]
y_train = y[ptsStart : ptsStart + ptsTrain]
paramsList = []
paramsNames = []
if allAlgorithms:
try:
from scipy.optimize import curve_fit
popt, _ = curve_fit(hCG, x_train, y_train) # uses Levenberg-Marquardt iterative method
paramsList.append(tuple(popt))
paramsNames.append("Iterative")
except:
pass
try:
from bestfitte import best_fit
paramsList.append(best_fit(x_train, y_train))
paramsNames.append("BestFit")
except:
pass
if allAlgorithms:
try:
from pseloglin import fit
paramsList.append(fit(x_train, y_train))
paramsNames.append("PseLogLin")
except:
pass
plot_methods(x[ptsStart:ptsEnd], y[ptsStart:ptsEnd], paramsList, paramsNames, data_id, showPlot)
def plot_and_get_real_data(row: int) -> (np.ndarray, np.ndarray):
data = load_data()
plot_real_data(data, row, row+1)
return get_x_y(data, row)
def get_real_data(row: int) -> (np.ndarray, np.ndarray):
data = load_data()
return get_x_y(data, row)
def plot_with_inner_plot(x: np.ndarray, y: np.ndarray, limX1: float, limX2: float, limY1: float, limY2: float, zoom: float = 2.5, loc='upper right'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(x, y)
plt.xlabel("$time$")
plt.ylabel("$hCG(time)$")
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
axins = zoomed_inset_axes(ax, zoom, loc=loc)
axins.scatter(x, y)
axins.set_xlim(limX1, limX2)
axins.set_ylim(limY1, limY2)
#plt.yticks(visible=False)
#plt.xticks(visible=False)
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
def find_and_plot_best_fit(x: np.ndarray, y: np.ndarray):
import bestfitte
A, B, alpha = bestfitte.best_fit(x, y)
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(y, hCG(x, A, B, alpha)))
import matplotlib.pyplot as plt
plt.scatter(x, y, label='data')
plt.plot(x, hCG(x, A, B, alpha),
label=f'A=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (A, B, alpha, rmse))
plt.legend()
plt.show()
def find_and_plot_best_fit_param_noise_grid(paramsList, noises):
import matplotlib.pyplot as plt
plt.figure(figsize = (20, 10))
for i, params in enumerate(paramsList):
for j, noise in enumerate(noises):
n:int = 20
x, y = gen_rand_points(n, *params, noise)
plt.subplot(len(paramsList), len(noises), i * len(noises) + j + 1)
plt.scatter(x, y)
import bestfitte
A, B, alpha = bestfitte.best_fit(x, y)
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(y, hCG(x, A, B, alpha)))
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.plot(np.arange(n), hCG(np.arange(n), A, B, alpha),
label=f'A=%5.2f, B=%5.2f, alpha=%5.2f, noise=%5.2f, \nA=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (*params, noise, A, B, alpha, rmse))
plt.legend()
def compare_results_on_datasets(datasets: list):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9*len(datasets), 5))
for i, dataset in enumerate(datasets):
x, y, name = dataset
plt.subplot(1, len(datasets), i + 1)
plot_results(x, y, data_id = name, showPlot=False)
def compare_time_on_datasets(datasets: list = None):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
if omitted, 10 random dataset will be generated
'''
if datasets is None:
# generate 10 random datasets
paramsList = []
for _ in range(10):
paramsList.append((
np.random.random_integers(3, 20), #n
np.random.random() * 1e3, # A
np.random.random() * 1e1, # B
np.random.random() * 1e1, # alpha
np.random.random() * 1 # noise
))
datasets = []
for params in paramsList:
datasets.append(gen_rand_points(*params) +
(f'n=%d, A=%5.2f, B=%5.2f, alpha=%5.2f, noise=%5.2f' % params,))
from scipy.optimize import curve_fit
from bestfitte import best_fit
from pseloglin import fit
from time import perf_counter
rows = []
for dataset in datasets:
x, y, name = dataset
measurements = {'Dataset' : name}
start = perf_counter()
try:
curve_fit(hCG, x, y)
end = perf_counter()
measurements["Iterative"] = end - start
except:
measurements["Iterative"] = np.nan
start = perf_counter()
try:
best_fit(x, y)
end = perf_counter()
measurements["BestFit"] = end - start
except:
measurements["BestFit"] = np.nan
start = perf_counter()
try:
fit(x, y)
end = perf_counter()
measurements["PseLogLin"] = end - start
except:
measurements["PseLogLin"] = np.nan
rows.append(measurements)
import pandas as pd
df = pd.DataFrame(rows, columns=["Dataset", "Iterative", "BestFit", "PseLogLin"])
df.loc['mean'] = df.mean()
df["Dataset"].values[-1] = "Mean"
#print(df.to_latex(index=False))
return df
def compare_with_less_trained(x: np.ndarray, y: np.ndarray, trainPoints):
'''
trainPoints, array with the number of points to use for train on each subplot
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9 * len(trainPoints), 10))
plt.subplot(2, len(trainPoints), len(trainPoints) / 2 + 1)
plot_results(x, y, showPlot=False, allAlgorithms=False, data_id="All")
for i, ptsTrain in enumerate(trainPoints):
plt.subplot(2, len(trainPoints), len(trainPoints) + i + 1)
plot_results(x, y, ptsTrain = ptsTrain, showPlot=False, allAlgorithms=False, data_id=str(ptsTrain) + " points")
plt.plot(x[ptsTrain:], y[ptsTrain:], "o", color="orange")
| 36.537459
| 174
| 0.61594
| 1,595
| 11,217
| 4.230094
| 0.172414
| 0.008893
| 0.010375
| 0.032014
| 0.384615
| 0.35349
| 0.262635
| 0.219357
| 0.200534
| 0.18838
| 0
| 0.016943
| 0.25809
| 11,217
| 307
| 175
| 36.537459
| 0.7938
| 0.127128
| 0
| 0.373333
| 0
| 0.017778
| 0.057431
| 0.002181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084444
| false
| 0.013333
| 0.137778
| 0.004444
| 0.262222
| 0.008889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ca9eb97e4365037a9faa4fd695283f51ac6d5a4
| 3,870
|
py
|
Python
|
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | null | null | null |
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | null | null | null |
sciflo/utils/mail.py
|
hysds/sciflo
|
f706288405c8eee59a2f883bab3dcb5229615367
|
[
"Apache-2.0"
] | 1
|
2019-02-07T01:08:34.000Z
|
2019-02-07T01:08:34.000Z
|
from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
from email.utils import parseaddr, formataddr, COMMASPACE, formatdate
from email.encoders import encode_base64
def send_email(sender, cc_recipients, bcc_recipients, subject, body, attachments=[]):
"""Send an email.
All arguments should be Unicode strings (plain ASCII works as well).
Only the real name part of sender and recipient addresses may contain
non-ASCII characters.
The email will be properly MIME encoded and delivered though SMTP to
localhost port 25. This is easy to change if you want something different.
The charset of the email will be the first one out of US-ASCII, ISO-8859-1
and UTF-8 that can represent all the characters occurring in the email.
"""
# combined recipients
recipients = cc_recipients + bcc_recipients
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = 'ISO-8859-1'
# We must choose the body charset manually
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
parsed_cc_recipients = [parseaddr(rec) for rec in cc_recipients]
parsed_bcc_recipients = [parseaddr(rec) for rec in bcc_recipients]
#recipient_name, recipient_addr = parseaddr(recipient)
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
sender_name = str(Header(str(sender_name), header_charset))
unicode_parsed_cc_recipients = []
for recipient_name, recipient_addr in parsed_cc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_cc_recipients.append((recipient_name, recipient_addr))
unicode_parsed_bcc_recipients = []
for recipient_name, recipient_addr in parsed_bcc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_bcc_recipients.append((recipient_name, recipient_addr))
# Make sure email addresses do not contain non-ASCII characters
sender_addr = sender_addr.encode('ascii')
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEMultipart()
msg['CC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_cc_recipients])
msg['BCC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_bcc_recipients])
msg['Subject'] = Header(str(subject), header_charset)
msg.attach(MIMEText(body.encode(body_charset), 'plain', body_charset))
# Add attachments
for attachment in attachments:
part = MIMEBase('application', "octet-stream")
part.set_payload(attachment.file.read())
encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % attachment.filename)
msg.attach(part)
# print "#" * 80
# print msg.as_string()
# Send the message via SMTP to localhost:25
smtp = SMTP("localhost")
smtp.sendmail(sender, recipients, msg.as_string())
smtp.quit()
| 42.527473
| 103
| 0.708527
| 503
| 3,870
| 5.292247
| 0.310139
| 0.063486
| 0.07438
| 0.087904
| 0.313298
| 0.302029
| 0.247934
| 0.247934
| 0.212622
| 0.212622
| 0
| 0.010502
| 0.212662
| 3,870
| 90
| 104
| 43
| 0.863144
| 0.311886
| 0
| 0.081633
| 0
| 0
| 0.054064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0.020408
| 0.142857
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3caab00869605f81530d9a70561508995ff52b3b
| 2,467
|
py
|
Python
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 349
|
2020-08-04T10:21:01.000Z
|
2022-03-23T08:31:29.000Z
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 2
|
2021-01-07T06:17:05.000Z
|
2021-04-01T06:01:30.000Z
|
apps/extention/views/tool.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 70
|
2020-08-24T06:46:14.000Z
|
2022-03-25T13:23:27.000Z
|
from flask import Blueprint
from apps.extention.business.tool import ToolBusiness
from apps.extention.extentions import validation, parse_json_form
from library.api.render import json_detail_render
tool = Blueprint('tool', __name__)
@tool.route('/ip', methods=['GET'])
def tool_ip():
"""
@api {get} /v1/tool/ip 查询 ip 地址信息
@apiName GetIpAddress
@apiGroup 拓展
@apiDescription 查询 ip 地址信息
@apiParam {string} ip 合法的 ip 地址
@apiParamExample {json} Request-Example:
{
"ip": "110.110.110.12"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"address": "\u4e0a\u6d77\u5e02",
"address_detail": {
"city": "\u4e0a\u6d77\u5e02",
"city_code": 289,
"district": "",
"province": "\u4e0a\u6d77\u5e02",
"street": "",
"street_number": ""
},
"point": {
"x": "13524118.26",
"y":"3642780.37"
}
},
"message":"ok"
}
"""
code, data, address, message = ToolBusiness.get_tool_ip()
return json_detail_render(code, data, message)
@tool.route('/apk/analysis', methods=['POST'])
@validation('POST:tool_apk_analysis_upload')
def apk_analysis_handler():
"""
@api {post} /v1/tool/apk/analysis 分析 apk 包信息
@apiName AnalysisApkInformation
@apiGroup 拓展
@apiDescription 分析 apk 包信息
@apiParam {apk_download_url} apk 包的下载地址
@apiParamExample {json} Request-Example:
{
"apk_download_url": "http://tcloud-static.ywopt.com/static/3787c7f2-5caa-434a-9a47-3e6122807ada.apk"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"default_activity": "com.earn.freemoney.cashapp.activity.SplashActivity",
"icon": "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAVr0lEQVR42u2debAdVZ3HP6f79N3ekuQlJOQlARICBCGs",
"label": "Dosh Winner",
"package_name": "com.earn.freemoney.cashapp",
"size": "13.97",
"version_code": "86",
"version_name": "2.0.36"
},
"message": "ok"
}
"""
apk_download_url, type = parse_json_form('tool_apk_analysis_upload')
if apk_download_url:
data = ToolBusiness.apk_analysis(apk_download_url, type)
return json_detail_render(0, data)
else:
return json_detail_render(101, 'apk_download_url is required!')
| 29.369048
| 115
| 0.614512
| 270
| 2,467
| 5.440741
| 0.425926
| 0.044929
| 0.057182
| 0.044929
| 0.076242
| 0.076242
| 0.076242
| 0.076242
| 0.076242
| 0.076242
| 0
| 0.063209
| 0.249696
| 2,467
| 83
| 116
| 29.722892
| 0.730416
| 0.551682
| 0
| 0
| 0
| 0
| 0.132603
| 0.064477
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.5
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cab08629b30111114e01484ab49b594bbdb9dd0
| 3,948
|
py
|
Python
|
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | 1
|
2017-07-01T21:46:40.000Z
|
2017-07-01T21:46:40.000Z
|
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | null | null | null |
apt_repoman/connection.py
|
memory/repoman
|
4c5cdfba85afcab5a1219fa5629abc457de27ed5
|
[
"Apache-2.0"
] | 6
|
2017-07-13T21:41:14.000Z
|
2020-08-07T19:40:25.000Z
|
# stdlib imports
import logging
import time
# pypi imports
from boto3 import Session
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, role_arn='', profile_name='', region=None):
self._log = LOG or logging.getLogger(__name__)
self.role_arn = role_arn
self.profile_name = profile_name
self.region = region
self._s3 = None
self._sdb = None
self._sts = None
self._iam = None
self._sns = None
self._session = None
self._caller_id = None
@property
def session(self):
'''Set our object's self._session attribute to a boto3
session object. If profile_name is set, use it to pull a
specific credentials profile from ~/.aws/credentials,
otherwise use the default credentials path.
If role_arn is set, use the first session object to
assume the role, and then overwrite self._session with
a new session object created using the role credentials.'''
if self._session is None:
self._session = self.get_session()
return self._session
@property
def s3(self):
if self._s3 is None:
self._s3 = self.get_resource('s3')
return self._s3
@property
def sdb(self):
if self._sdb is None:
self._sdb = self.get_client('sdb')
return self._sdb
@property
def sts(self):
if self._sts is None:
self._sts = self.get_client('sts')
return self._sts
@property
def iam(self):
if self._iam is None:
self._iam = self.get_client('iam')
return self._iam
@property
def sns(self):
if self._sns is None:
self._sns = self.get_client('sns')
return self._sns
@property
def caller_id(self):
if self._caller_id is None:
self._caller_id = self.sts.get_caller_identity()['Arn']
return self._caller_id
def get_session(self):
if self.profile_name:
self._log.info(
'using AWS credential profile %s', self.profile_name)
try:
kwargs = {'profile_name': self.profile_name}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
except Exception as ex:
self._log.fatal(
'Could not connect to AWS using profile %s: %s',
self.profile_name, ex)
raise
else:
self._log.debug(
'getting an AWS session with the default provider')
kwargs = {}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
if self.role_arn:
self._log.info(
'attempting to assume STS self.role %s', self.role_arn)
try:
self.role_creds = session.client('sts').assume_role(
RoleArn=self.role_arn,
RoleSessionName='repoman-%s' % time.time(),
DurationSeconds=3600)['Credentials']
except Exception as ex:
self._log.fatal(
'Could not assume self.role %s: %s',
self.role_arn, ex)
raise
kwargs = {
'aws_access_key_id': self.role_creds['AccessKeyId'],
'aws_secret_access_key': self.role_creds['SecretAccessKey'],
'aws_session_token': self.role_creds['SessionToken']}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
return session
def get_client(self, service_name):
return self.session.client(service_name)
def get_resource(self, service_name):
return self.session.resource(service_name)
| 31.584
| 76
| 0.563323
| 464
| 3,948
| 4.575431
| 0.209052
| 0.052756
| 0.032972
| 0.025436
| 0.148846
| 0.148846
| 0.1187
| 0.1187
| 0.1187
| 0.081959
| 0
| 0.004662
| 0.348024
| 3,948
| 124
| 77
| 31.83871
| 0.820124
| 0.101317
| 0
| 0.265306
| 0
| 0
| 0.106816
| 0.006014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112245
| false
| 0
| 0.030612
| 0.020408
| 0.255102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cac0aa35252a097de5d59a421a354021c1ccdfa
| 21,267
|
py
|
Python
|
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
paul_analysis/Python/labird/fieldize.py
|
lzkelley/arepo-mbh-sims_analysis
|
f14519552cedd39a040b53e6d7cc538b5b8f38a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Methods for interpolating particle lists onto a grid. There are three classic methods:
ngp - Nearest grid point (point interpolation)
cic - Cloud in Cell (linear interpolation)
tsc - Triangular Shaped Cloud (quadratic interpolation)
Each function takes inputs:
Values - list of field values to interpolate, centered on the grid center.
Points - coordinates of the field values
Field - grid to add interpolated points onto
There are also helper functions (convert and convert_centered) to rescale arrays to grid units.
"""
import math
import numpy as np
#Try to import scipy.weave. If we can't, don't worry, we just use the unaccelerated versions
try :
import scipy.weave
except ImportError :
scipy=None
def convert(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the lower corner of the grid.
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1)/float(box)
def convert_centered(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the center of the grid
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1.)/float(box)+(ngrid-1.)/2.
def check_input(pos, field):
"""Checks the position and field values for consistency.
Avoids segfaults in the C code."""
if np.size(pos) == 0:
return 0
dims=np.size(np.shape(field))
if np.max(pos) > np.shape(field)[0] or np.min(pos) < 0:
raise ValueError("Positions outside grid")
if np.shape(pos)[1] < dims:
raise ValueError("Position array not wide enough for field")
return 1
def ngp(pos,values,field):
"""Does nearest grid point for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in grid units
Note: This is implemented in scipy.weave and pure python (in case the weave breaks).
For O(1e5) points both versions are basically instantaneous.
For O(1e7) points the sipy.weave version is about 100 times faster.
"""
if not check_input(pos,field):
return field
nx=np.shape(values)[0]
dims=np.size(np.shape(field))
# Coordinates of nearest grid point (ngp).
ind=np.array(np.rint(pos),dtype=np.int)
#Sum over the 3rd axis here.
expr="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
field(ind1,ind2)+=values(j);
}
"""
expr3d="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
int ind3=ind(j,2);
field(ind1,ind2,ind3)+=values(j);
}
"""
try:
if dims==2:
scipy.weave.inline(expr,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
elif dims==3:
scipy.weave.inline(expr3d,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
#Fall back on slow python version.
for j in xrange(0,nx):
field[tuple(ind[j,0:dims])]+=values[j]
return field
def cic(pos, value, field,totweight=None,periodic=False):
"""Does Cloud-in-Cell for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in coordinates where np.max(points) = np.shape(field)
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate CIC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([2,nval,dim])
ww=np.empty([2,nval,dim])
# Index of ngp.
kk[1]=ng
# Weight of ngp.
ww[1]=0.5+np.abs(dng)
# Point before ngp.
kk[0]=kk[1]-1 # Index.
ww[0]=0.5-np.abs(dng)
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
extraind[0]=1
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
extraind[1]=1
#Perform the rest of the addition
for yy in xrange(0,2):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def tsc(pos,value,field,totweight=None,periodic=False):
""" NAME: TSC
PURPOSE:
Interpolate an irregularly sampled field using a Triangular Shaped Cloud
EXPLANATION:
This function interpolates an irregularly sampled field to a
regular grid using Triangular Shaped Cloud (nearest grid point
gets weight 0.75-dx**2, points before and after nearest grid
points get weight 0.5*(1.5-dx)**2, where dx is the distance
from the sample to the grid point in units of the cell size).
INPUTS:
pos: Array of coordinates of field samples, in grid units from 0 to nx
value: Array of sample weights (field values). For e.g. a
temperature field this would be the temperature and the
keyword AVERAGE should be set. For e.g. a density field
this could be either the particle mass (AVERAGE should
not be set) or the density (AVERAGE should be set).
field: Array to interpolate onto of size nx,nx,nx
totweight: If this is not None, the routine will to it the weights at each
grid point. You can then calculate the average later.
periodic: Set this keyword if you want a periodic grid.
ie, the first grid point contains samples of both sides of the volume
If this is not true, weight is not conserved (some falls off the edges)
Note: Points need to be in grid units: pos = [0,ngrid-1]
Note 2: If field has fewer dimensions than pos, we sum over the extra dimensions,
and the final indices are ignored.
Example of default allocation of nearest grid points: n0=4, *=gridpoint.
0 1 2 3 Index of gridpoints
* * * * Grid points
|---|---|---|---| Range allocated to gridpoints ([0.0,1.0> --> 0, etc.)
0 1 2 3 4 posx
OUTPUTS:
Returns particles interpolated to field, and modifies input variable of the same name.
PROCEDURE:
Nearest grid point is determined for each sample.
TSC weights are computed for each sample.
Samples are interpolated to the grid.
Grid point values are computed (sum or average of samples).
EXAMPLE:
nx=20
ny=10
posx=randomu(s,1000)
posy=randomu(s,1000)
value=posx**2+posy**2
field=tsc(value,pos,field,/average)
surface,field,/lego
NOTES:
A standard reference for these interpolation methods is: R.W. Hockney
and J.W. Eastwood, Computer Simulations Using Particles (New York:
McGraw-Hill, 1981).
MODIFICATION HISTORY:
Written by Joop Schaye, Feb 1999.
Check for overflow for large dimensions P. Riley/W. Landsman Dec. 1999
Ported to python, cleaned up and drastically shortened using
these new-fangled "function" thingies by Simeon Bird, Feb. 2012
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate TSC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([3,nval,dim])
ww=np.empty([3,nval,dim])
# Index of ngp.
kk[1,:,:]=ng
# Weight of ngp.
ww[1,:,:]=0.75-dng**2
# Point before ngp.
kk[0,:,:]=kk[1,:,:]-1 # Index.
dd=1.0-dng # Distance to sample.
ww[0]=0.5*(1.5-dd)**2 # TSC-weight.
# Point after ngp.
kk[2,:,:]=kk[1,:,:]+1 # Index.
dd=1.0+dng # Distance to sample.
ww[2]=0.5*(1.5-dd)**2 # TSC-weight.
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
for yy in xrange(1,3):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
#Perform the rest of the addition
for zz in xrange(1,3):
for yy in xrange(0,3):
extraind[0]=yy
extraind[1]=zz
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def cic_str(pos,value,field,in_radii,periodic=False):
"""This is exactly the same as the cic() routine, above, except
that instead of each particle being stretched over one grid point,
it is stretched over a cubic region with some radius.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
if dim != 2:
raise ValueError("Non 2D grid not supported!")
#Use a grid cell radius of 2/3 (4 \pi /3 )**(1/3) s
#This means that l^3 = cell volume for AREPO (so it should be more or less exact)
#and is close to the l = 0.5 (4\pi/3)**(1/3) s
#cic interpolation that Nagamine, Springel & Hernquist used
#to approximate their SPH smoothing
corr=2./3.*(4*math.pi/3.)**0.3333333333
radii=np.array(corr*in_radii)
#If the smoothing length is below a single grid cell,
#stretch it.
ind = np.where(radii < 0.5)
radii[ind]=0.5
#Weight of each cell
weight = value/(2*radii)**dim
#Upper and lower bounds
up = pos[:,1:dim+1]+np.repeat(np.transpose([radii,]),dim,axis=1)
low = pos[:,1:dim+1]-np.repeat(np.transpose([radii,]),dim,axis=1)
#Upper and lower grid cells to add to
upg = np.array(np.floor(up),dtype=int)
lowg = np.array(np.floor(low),dtype=int)
#Deal with the edges
if periodic:
raise ValueError("Periodic grid not supported")
else:
ind=np.where(up > nx-1)
up[ind] = nx
upg[ind]=nx-1
ind=np.where(low < 0)
low[ind]=0
lowg[ind]=0
expr="""for(int p=0;p<nval;p++){
//Temp variables
double wght = weight(p);
int ilx=lowg(p,0);
int ily=lowg(p,1);
int iux=upg(p,0);
int iuy=upg(p,1);
double lx=low(p,0);
double ly=low(p,1);
double ux=up(p,0);
double uy=up(p,1);
//Deal with corner values
field(ilx,ily)+=(ilx+1-lx)*(ily+1-ly)*wght;
field(iux,ily)+=(ux-iux)*(ily+1-ly)*wght;
field(ilx,iuy)+=(ilx+1-lx)*(uy-iuy)*wght;
field(iux,iuy)+=(ux-iux)*(uy-iuy)*wght;
//Edges in y
for(int gx=ilx+1;gx<iux;gx++){
field(gx,ily)+=(ily+1-ly)*wght;
field(gx,iuy)+=(uy-iuy)*wght;
}
//Central region
for(int gy=ily+1;gy< iuy;gy++){
//Edges.
field(ilx,gy)+=(ilx+1-lx)*wght;
field(iux,gy)+=(ux-iux)*wght;
//x-values
for(int gx=ilx+1;gx<iux;gx++){
field(gx,gy)+=wght;
}
}
}
"""
try:
scipy.weave.inline(expr,['nval','upg','lowg','field','up','low','weight'],type_converters=scipy.weave.converters.blitz)
except Exception:
for p in xrange(0,nval):
#Deal with corner values
field[lowg[p,0],lowg[p,1]]+=(lowg[p,0]+1-low[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[upg[p,0],lowg[p,1]]+=(up[p,0]-upg[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[lowg[p,0],upg[p,1]]+=(lowg[p,0]+1-low[p,0])*(up[p,1]-upg[p,1])*weight[p]
field[upg[p,0], upg[p,1]]+=(up[p,0]-upg[p,0])*(up[p,1]-upg[p,1])*weight[p]
#Edges in y
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,lowg[p,1]]+=(lowg[p,1]+1-low[p,1])*weight[p]
field[gx,upg[p,1]]+=(up[p,1]-upg[p,1])*weight[p]
#Central region
for gy in xrange(lowg[p,1]+1,upg[p,1]):
#Edges in x
field[lowg[p,0],gy]+=(lowg[p,0]+1-low[p,0])*weight[p]
field[upg[p,0],gy]+=(up[p,0]-upg[p,0])*weight[p]
#x-values
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,gy]+=weight[p]
return field
from _fieldize_priv import _SPH_Fieldize
# this takes forever!!!!a
# Typical call: fieldize.sph_str(coords,mHI,sub_nHI_grid[ii],ismooth,weights=weights, periodic=True)
def sph_str(pos,value,field,radii,weights=None,periodic=False):
"""Interpolate a particle onto a grid using an SPH kernel.
This is similar to the cic_str() routine, but spherical.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
weights - Weights to divide each contribution by.
"""
# Some error handling.
if np.size(pos)==0:
return field
dim=np.shape(field)
if np.size(dim) != 2:
raise ValueError("Non 2D grid not supported!")
if weights == None:
weights = np.array([0.])
#Cast some array types
if pos.dtype != np.float32:
pos = np.array(pos, dtype=np.float32)
if radii.dtype != np.float32:
radii = np.array(radii, dtype=np.float32)
if value.dtype != np.float32:
value = np.array(value, dtype=np.float32)
field += _SPH_Fieldize(pos, radii, value, weights,periodic,dim[0])
return
import scipy.integrate as integ
def integrate_sph_kernel(h,gx,gy):
"""Compute the integrated sph kernel for a particle with
smoothing length h, at position pos, for a grid-cell at gg"""
#Fast method; use the value at the grid cell.
#Bad if h < grid cell radius
r0 = np.sqrt((gx+0.5)**2+(gy+0.5)**2)
if r0 > h:
return 0
h2 = h*h
#Do the z integration with the trapezium rule.
#Evaluate this at some fixed (well-chosen) abcissae
zc=0
if h/2 > r0:
zc=np.sqrt(h2/4-r0**2)
zm = np.sqrt(h2-r0**2)
zz=np.array([zc,(3*zc+zm)/4.,(zc+zm)/2.,(zc+3*zm)/2,zm])
kern = sph_kern2(np.sqrt(zz**2+r0**2),h)
total= 2*integ.simps(kern,zz)
if h/2 > r0:
zz=np.array([0,zc/8.,zc/4.,3*zc/8,zc/2.,5/8.*zc,3*zc/4.,zc])
kern = sph_kern1(np.sqrt(zz**2+r0**2),h)
total+= 2*integ.simps(kern,zz)
return total
def do_slow_sph_integral(h,gx,gy):
"""Evaluate the very slow triple integral to find kernel contribution. Only do it when we must."""
#z limits are -h - > h, for simplicity.
#x and y limits are grid cells
(weight,err)=integ.tplquad(sph_cart_wrap,-h,h,lambda x: gx,lambda x: gx+1,lambda x,y: gy,lambda x,y:gy+1,args=(h,),epsabs=5e-3)
return weight
def sph_cart_wrap(z,y,x,h):
"""Cartesian wrapper around sph_kernel"""
r = np.sqrt(x**2+y**2+z**2)
return sph_kernel(r,h)
def sph_kern1(r,h):
"""SPH kernel for 0 < r < h/2"""
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def sph_kern2(r,h):
"""SPH kernel for h/2 < r < h"""
return 2*(1-r/h)**3*8/math.pi/h**3
def sph_kernel(r,h):
"""Evaluates the sph kernel used in gadget."""
if r > h:
return 0
elif r > h/2:
return 2*(1-r/h)**3*8/math.pi/h**3
else:
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def tscedge(kk,ww,ngrid,periodic):
"""This function takes care of the points at the grid boundaries,
either by wrapping them around the grid (the Julie Andrews sense)
or by throwing them over the side (the Al Pacino sense).
Arguments are:
kk - the grid indices
ww - the grid weights
nx - the number of grid points
periodic - Julie or Al?
"""
if periodic:
#If periodic, the nearest grid indices need to wrap around
#Note python has a sensible remainder operator
#which always returns > 0 , unlike C
kk=kk%ngrid
else:
#Find points outside the grid
ind=np.where(np.logical_or((kk < 0),(kk > ngrid-1)))
#Set the weights of these points to zero
ww[ind]=0
#Indices of these points now do not matter, so set to zero also
kk[ind]=0
def tscadd(field,index,weight,value,totweight):
"""This function is a helper for the tsc and cic routines. It adds
the weighted value to the field and optionally calculates the total weight.
Returns nothing, but alters field
"""
nx=np.size(value)
dims=np.size(np.shape(field))
total=totweight !=None
#Faster C version of this function: this is getting a little out of hand.
expr="""for(int j=0;j<nx;j++){
int ind1=index(j,0);
int ind2=index(j,1);
"""
if dims == 3:
expr+="""int ind3=index(j,2);
field(ind1,ind2,ind3)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2,ind3) +=weight(j);"
if dims == 2:
expr+="""field(ind1,ind2)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2) +=weight(j);"
expr+="}"
try:
if dims==2 or dims == 3:
if total:
scipy.weave.inline(expr,['nx','index','value','field','weight','totweight'],type_converters=scipy.weave.converters.blitz)
else:
scipy.weave.inline(expr,['nx','index','value','field','weight'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
wwval=weight*value
for j in xrange(0,nx):
ind=tuple(index[j,:])
field[ind]+=wwval[j]
if totweight != None:
totweight[ind]+=weight[j]
return
def get_tscweight(ww,ii):
"""Calculates the TSC weight for a particular set of axes.
ii should be a vector of length dims having values 0,1,2.
(for CIC a similar thing but ii has values 0,1)
eg, call as:
get_tscweight(ww,[0,0,0])
"""
tscweight=1.
#tscweight = \Pi ww[1]*ww[2]*ww[3]
for j in xrange(0,np.size(ii)):
tscweight*=ww[ii[j],:,j]
return tscweight
def tsc_xind(field,value,totweight,kk,ww,extraind):
"""Perform the interpolation along the x-axis.
extraind argument contains the y and z indices, if needed.
So for a 1d interpolation, extraind=[], for 2d,
extraind=[y,], for 3d, extraind=[y,z]
Returns nothing, but alters field
"""
dims=np.size(extraind)+1
dim_list=np.zeros(dims,dtype=int)
dim_list[1:dims]=extraind
index=kk[0]
#Set up the index to have the right kk values depending on the y,z axes
for i in xrange(1,dims):
index[:,i]=kk[extraind[i-1],:,i]
#Do the addition for each value of x
for i in xrange(0,np.shape(kk)[0]):
dim_list[0]=i
tscweight=get_tscweight(ww,dim_list)
index[:,0]=kk[i,:,0]
tscadd(field,index,tscweight,value,totweight)
return
| 34.246377
| 137
| 0.590022
| 3,324
| 21,267
| 3.757822
| 0.158845
| 0.004323
| 0.008646
| 0.009527
| 0.399888
| 0.369706
| 0.351933
| 0.326475
| 0.310864
| 0.29221
| 0
| 0.030906
| 0.269714
| 21,267
| 620
| 138
| 34.301613
| 0.773357
| 0.452344
| 0
| 0.367213
| 0
| 0
| 0.181399
| 0.044603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059016
| false
| 0
| 0.019672
| 0
| 0.170492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cafbcdeecba4bc828647c5d5e2a12435c74df80
| 776
|
py
|
Python
|
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | 1
|
2021-07-29T16:09:30.000Z
|
2021-07-29T16:09:30.000Z
|
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
spotify_search/search.py
|
MiltonLn/spotify-tracks-pyconco2020
|
4a75b15852344f7dac066bea3c3e3abb1157d198
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from flask import Flask, request, jsonify
from .spotify_api import get_spotify_response
app = Flask(__name__)
app.config.from_object("spotify_search.settings")
@app.route("/search", methods=["GET"])
def search():
search_term = request.args.get("search_term", "")
limit = request.args.get("limit")
search_type = request.args.get("type")
assert search_type in ["artist", "track", "album"]
json_response = get_spotify_response(
search_term,
limit=limit,
search_type=search_type
)
utils_module = import_module("spotify_search.utils")
parse_method = getattr(utils_module, f"parse_{search_type}s")
search_results = parse_method(json_response)
return jsonify(search_results)
| 26.758621
| 65
| 0.719072
| 100
| 776
| 5.27
| 0.38
| 0.094877
| 0.079696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166237
| 776
| 28
| 66
| 27.714286
| 0.814529
| 0
| 0
| 0
| 0
| 0
| 0.140464
| 0.029639
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cb1615543f6a7b7ba1580acd4a1477cfa004ce2
| 3,940
|
py
|
Python
|
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
Python/src/controllers/MainController.py
|
Jictyvoo/EXA868--PathFinder
|
1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a
|
[
"MIT"
] | null | null | null |
import math
from models.business.OrganismController import OrganismController
from models.value.Finder import Finder
from models.value.Labyrinth import Labyrinth
class MainController:
def __init__(self):
self.__labyrinth = Labyrinth("../config.json")
self.__labyrinth.loadLabyrinth("../labyrinth.la")
self.__controllerOrganism = OrganismController(Finder, self.__labyrinth.getBeginPosition())
self.__genomeDecoder = ("UP", "RIGHT", "DOWN", "LEFT")
self.__stateDecoder = {'alive': 0, 'dead': -1, 'finished': 1}
self.__ending = self.__labyrinth.getEndingPosition()
self.__have_finished = False
self.__generations_finished = 0
self.__generations_fitness_average = []
self.__best_fitness = []
self.__best_organisms = []
def finished_generations(self):
return self.__generations_finished
def get_generations_fitness_average(self):
return self.__generations_fitness_average
def get_best_fitness(self):
return self.__best_fitness
def get_genome_decoder(self):
return self.__genomeDecoder
def get_labyrinth(self):
return self.__labyrinth
def get_best_one(self):
return self.__controllerOrganism.getSmallerPath(list_to_order=self.__best_organisms)[0]
def __calculate_fitness(self, organism):
x_diference = organism.getPosition()['x']
x_diference = x_diference - self.__ending['x']
y_diference = organism.getPosition()['y']
y_diference = y_diference - self.__ending['y']
# return math.sqrt(math.pow(x_diference, 2) + math.pow(y_diference, 2))
return math.fabs(x_diference) + math.fabs(y_diference)
def move(self, organisms):
for organism in organisms:
count = 0
for genome in organism.getGenome():
if organism.getState() == self.__stateDecoder['alive']:
position = organism.getPosition()
has_moved = self.__labyrinth.move(self.__genomeDecoder[genome], position)
if has_moved:
organism.updateFitness(1)
organism.setPosition(has_moved)
if self.__labyrinth.isAtFinal(has_moved):
organism.updateFitness(100)
organism.setState(self.__stateDecoder['finished'])
organism.setLast(count)
print("Generation: " + str(organism.getGeneration()), organism.getGenome())
self.__have_finished = True
else:
organism.updateFitness(-5)
# organism.setState(self.stateDecoder['dead'])
count = count + 1
if organism.getState() == self.__stateDecoder['dead']:
organism.updateFitness(-10)
organism.updateFitness(-10 * self.__calculate_fitness(organism))
# print(organism.getPosition())
begin_position = self.__labyrinth.getBeginPosition()
organism.setPosition({'x': begin_position['x'], 'y': begin_position['y']})
def execute(self):
organisms = self.__controllerOrganism.getOrganisms()
if not organisms:
return None
self.move(organisms)
if self.__have_finished:
self.__generations_finished = self.__generations_finished + 1
self.__have_finished = False
self.__generations_fitness_average.append(self.__controllerOrganism.average_fitness())
mom, dad = self.__controllerOrganism.selectBestOnes()
self.__best_fitness.append(mom.getFitness())
self.__best_organisms.append(mom)
self.__controllerOrganism.crossover(mom, dad, 0.05)
if mom.getGeneration() % 11 == 0:
self.__controllerOrganism.saveGenomes("../LastsGenomes.json")
| 39.4
| 103
| 0.628173
| 378
| 3,940
| 6.164021
| 0.259259
| 0.044635
| 0.036052
| 0.037339
| 0.060086
| 0.030901
| 0
| 0
| 0
| 0
| 0
| 0.008717
| 0.272081
| 3,940
| 99
| 104
| 39.79798
| 0.803696
| 0.036548
| 0
| 0.027027
| 0
| 0
| 0.031118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.054054
| 0.081081
| 0.310811
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cb181b4a78692a5068ea6ba57d0e24bbe0db8c2
| 3,386
|
py
|
Python
|
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
callmewind/billdev
|
fcd53cb98284677fb619abeafb17a88035aabfd6
|
[
"MIT"
] | null | null | null |
from django.views.generic.edit import CreateView
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import RedirectView
from django.conf import settings
from .forms import *
class ActivateAccountTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (
str(user.pk) + str(timestamp) + str(user.is_active)
)
class SignUpView(CreateView):
template_name = 'accounts/sign-up.html'
form_class = SignUpForm
def form_valid(self, form):
from django.template.response import TemplateResponse
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core.mail import send_mail
from django.urls import reverse
import urllib
user = form.save()
token_generator = ActivateAccountTokenGenerator()
activation_link = self.request.build_absolute_uri(
reverse('accounts:activate', kwargs={
'uidb64' : urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': token_generator.make_token(user)
})
)
context = {
'user' : user,
'activation_link' : activation_link
}
send_mail(
_('Activate your account'),
activation_link,
'test@example.com',
[ user.email ],
html_message=activation_link)
#send_mail(user.site, 'guides/email/promo-confirm-email.html', user.email, _('Just one click to access to your Guide %(mobile_emoji)s' % {'mobile_emoji': u"\U0001F4F2" }), context, user.web_language)
return TemplateResponse(self.request, 'accounts/sign-up-confirm.html', { 'email': user.email })
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
from django.shortcuts import redirect
return redirect(settings.LOGIN_REDIRECT_URL)
return super().dispatch(request, *args, **kwargs)
class ActivateView(RedirectView):
url = settings.LOGIN_REDIRECT_URL
def dispatch(self, request, *args, **kwargs):
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.http import Http404
from .models import User
try:
user = User.objects.get(pk=force_text(urlsafe_base64_decode(self.kwargs['uidb64'])))
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
raise Http404
token_generator = ActivateAccountTokenGenerator()
if request.user.is_authenticated:
if user.pk != request.user.pk:
raise Http404
elif token_generator.check_token(user, self.kwargs['token']):
from django.contrib.auth import login
from django.contrib import messages
user.is_active = True
user.save()
login(request, user, 'django.contrib.auth.backends.ModelBackend')
messages.success(request, _('Your account has been activated. Welcome!'))
return super().dispatch(request, *args, **kwargs)
else:
raise Http404
| 36.804348
| 208
| 0.646486
| 364
| 3,386
| 5.873626
| 0.362637
| 0.074836
| 0.03508
| 0.02058
| 0.130964
| 0.130964
| 0.035547
| 0
| 0
| 0
| 0
| 0.012024
| 0.263142
| 3,386
| 91
| 209
| 37.208791
| 0.84489
| 0.058771
| 0
| 0.128571
| 0
| 0
| 0.072864
| 0.02858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0.028571
| 0.271429
| 0.014286
| 0.485714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cb70deff93c19ea3ca28c0dcdec1ef4bed01acf
| 3,532
|
py
|
Python
|
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
Custom/text.py
|
SemLaan/Hotel-review-sentiment-analysis
|
b7fd22dcea63bab1c7fe666a7f4912931de1f4dc
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from nltk import tokenize as tokenizers
from nltk.stem import PorterStemmer, WordNetLemmatizer
class TextCleaning:
def __init__(self):
return
def remove_hyperlinks(self, corpus):
corpus = corpus.str.replace(r"https?://t.co/[A-Za-z0-9]+", "https")
return corpus
def remove_numbers(self, corpus):
corpus = corpus.str.replace(r"\w*\d\w*", "")
return corpus
def tokenize(self, corpus):
tokenizer = tokenizers.RegexpTokenizer(r'\w+')
corpus = corpus.apply(lambda x: tokenizer.tokenize(x))
return corpus
def untokenize(self, corpus):
corpus = corpus.apply(
lambda tokenized_review: ' '.join(tokenized_review)
)
return corpus
def lemmatize(self, corpus):
corpus = self.tokenize(corpus)
lemmatizer = WordNetLemmatizer()
corpus = corpus.apply(
lambda tokens: [lemmatizer.lemmatize(token) for token in tokens]
)
return self.untokenize(corpus)
def stem(self, corpus):
corpus = self.tokenize(corpus)
stemmer = PorterStemmer()
corpus = corpus.apply(
lambda tokens: [stemmer.stem(token) for token in tokens]
)
return self.untokenize(corpus)
def to_lower(self, corpus):
return corpus.apply(str.lower)
def negate_corpus(self, corpus):
corpus = corpus.apply(self.negate_sentence)
return corpus
def negate_sentence(self, sentence):
sentence = sentence.lower()
for word in appos:
if word in sentence:
sentence = sentence.replace(word, appos[word])
return sentence.lower()
def count_negations(self, corpus):
negations = 0
for sentence in corpus:
sentence = sentence.lower()
for word in appos:
if word in sentence:
negations += 1
print(negations)
return
appos = {
"aren t" : "are not",
"can t" : "cannot",
"couldn t" : "could not",
"didn t" : "did not",
"doesn t" : "does not",
"don t" : "do not",
"hadn t" : "had not",
"hasn t" : "has not",
"haven t" : "have not",
"he d" : "he would",
"he ll" : "he will",
"he s" : "he is",
"i d" : "I would",
"i ll" : "I will",
"i m" : "I am",
"isn t" : "is not",
"it s" : "it is",
"it ll":"it will",
"i ve" : "I have",
"let s" : "let us",
"mightn t" : "might not",
"mustn t" : "must not",
"shan t" : "shall not",
"she d" : "she would",
"she ll" : "she will",
"she s" : "she is",
"shouldn t" : "should not",
"that s" : "that is",
"there s" : "there is",
"they d" : "they would",
"they ll" : "they will",
"they re" : "they are",
"they ve" : "they have",
"we d" : "we would",
"we re" : "we are",
"weren t" : "were not",
"we ve" : "we have",
"what ll" : "what will",
"what re" : "what are",
"what s" : "what is",
"what ve" : "what have",
"where s" : "where is",
"who d" : "who would",
"who ll" : "who will",
"who re" : "who are",
"who s" : "who is",
"who ve" : "who have",
"won t" : "will not",
"wouldn t" : "would not",
"you d" : "you would",
"you ll" : "you will",
"you re" : "you are",
"you ve" : "you have",
" re": " are",
"wasn t": "was not",
"we ll":" will",
}
| 22.213836
| 76
| 0.51812
| 439
| 3,532
| 4.138952
| 0.268793
| 0.085856
| 0.052834
| 0.048431
| 0.24656
| 0.18492
| 0.147496
| 0.111172
| 0.111172
| 0.111172
| 0
| 0.001699
| 0.333239
| 3,532
| 158
| 77
| 22.35443
| 0.769851
| 0
| 0
| 0.176991
| 0
| 0
| 0.220617
| 0.007363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097345
| false
| 0
| 0.026549
| 0.017699
| 0.230089
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cb8b156ffda90f3a147616840973c64a0b81e50
| 546
|
py
|
Python
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/plugins/user_auth/root_urls.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
"""
This is here to enable redirects from the old /user endpoint to /auth
"""
from django.conf.urls import include
from django.conf.urls import url
from django.views.generic.base import RedirectView
from kolibri.core.device.translation import i18n_patterns
redirect_patterns = [
url(
r"^user/$",
RedirectView.as_view(
pattern_name="kolibri:kolibri.plugins.user_auth:user_auth", permanent=True
),
name="redirect_user",
),
]
urlpatterns = [url(r"", include(i18n_patterns(redirect_patterns)))]
| 26
| 86
| 0.705128
| 70
| 546
| 5.371429
| 0.542857
| 0.079787
| 0.074468
| 0.095745
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 0.186813
| 546
| 20
| 87
| 27.3
| 0.837838
| 0.126374
| 0
| 0.142857
| 0
| 0
| 0.134328
| 0.091684
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cb8ec1381ca6215654d8b8a9da92a3ab2726159
| 4,685
|
py
|
Python
|
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
Script.py
|
harisqazi1/Automated_Script
|
6680e0604db55297fad2ab2f99ea61324ca88048
|
[
"MIT"
] | null | null | null |
"""
Title: Automated Script for Data Scraping
Creator: Haris "5w464l1c10u5"
Purpose: This was made in order to make it easier to get data from online, all through one python script
Usage:
python3 Automated_Script.py
Resources:
https://www.digitalocean.com/community/tutorials/how-to-scrape-web-pages-with-beautiful-soup-and-python-3
https://www.guru99.com/reading-and-writing-files-in-python.html
https://www.dataquest.io/blog/web-scraping-tutorial-python/
https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998
https://pythonspot.com/http-download-file-with-python/
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import urllib.request, urllib.error, urllib.parse
from datetime import date, datetime
import io
import codecs
Code_Version = 3
#Time in H:M:S format
now = datetime.now()
Time = now.strftime("%I:%M:%S:%p")
#Date
Today_Date = date.today()
Date = Today_Date.strftime("(%A) %B %d, %Y")
try:
#Weather
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998')
soup = BeautifulSoup(page.text, 'html.parser')
except:
print("Weather.gov is not available")
try:
#Weather Type
weathertype = soup.find(class_='myforecast-current')
type = weathertype.contents[0]
type = type.encode('utf-8')
except:
type = "N/A"
try:
#Fahrenheit
weather = soup.find(class_='myforecast-current-lrg')
w = weather.contents[0]
w = w.encode('utf-8')
except:
w = "N/A"
try:
#Humidity
Humidity = soup.find_all('td')[0].get_text()
Hum_percent = soup.find_all('td')[1].get_text()
except:
Humidity = "N/A"
Hum_percent = "N/A"
try:
#Wind_Speed
W_Speed = soup.find_all('td')[2].get_text()
W_S = soup.find_all('td')[3].get_text()
except:
W_Speed = "N/A"
W_S = "N/A"
try:
#Wind_Chill
Wind_Chill = soup.find_all('td')[10].get_text()
Wind_Chill_num = soup.find_all('td')[11].get_text()
Wind_Chill = Wind_Chill.encode('utf-8')
Wind_Chill_num = Wind_Chill_num.encode('utf-8')
except:
Wind_Chill = "N/A"
Wind_Chill_num = "N/A"
try:
#Last_Update
Last_Update = soup.find_all('td')[12].get_text()
Last_Update_num = soup.find_all('td')[13].get_text()
except:
Last_Update = "N/A"
Last_Update_num = "N/A"
html_file = """
<h1 style="text-align: center;"><span style="text-decoration: underline;">Good Morning, Haris!</span></h1>
<h4 style="text-align: left;">Time:</h4>
<h4 style="text-align: left;">Date:</h4>
<h4>Code Version:</h4>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline;"><span style="background-color: #00ccff;">Weather</span></span></h3>
<table style="margin-left: auto; margin-right: auto; height: 195px;" width="238">
<tbody>
<tr style="height: 7px;">
<td style="width: 228px; height: 7px;">Current Weather:</td>
</tr>
<tr style="height: 1px;">
<td style="width: 228px; height: 1px;">Weather Type:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Humidity:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Speed:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Chill:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Last Update:</td>
</tr>
</tbody>
</table>
<p style="font-size: 1.5em;"> </p>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline; background-color: #cc99ff;">News</span></h3>
"""
html_file = html_file.replace('Time:','Current Time: ' + Time)
html_file = html_file.replace('Date:','Today\'s Date: ' + Date)
html_file = html_file.replace('Code Version:', 'Code Version: #' + str(Code_Version))
html_file = html_file.replace('Current Weather:','Current Weather: ' + w.decode('utf8'))
html_file = html_file.replace('Weather Type:','Weather Type: ' + type.decode('utf8'))
html_file = html_file.replace('Humidity:','Humidity: ' + Hum_percent)
html_file = html_file.replace('Wind Speed:','Wind Speed: ' + W_S)
html_file = html_file.replace('Wind Chill:','Wind Chill: ' + Wind_Chill_num.decode('utf-8'))
html_file = html_file.replace('Last Update:','Last Update: ' + Last_Update_num)
try:
response = urllib.request.urlopen('https://allinfosecnews.com/')
html = response.read()
except:
print("https://allinfosecnews.com/ is not available")
with io.open("website.html", 'w', encoding='utf8') as f:
f.write(html_file)
f.write(html.decode('utf-8'))
f.close()
print(w)
print(type)
print(Hum_percent)
print(W_Speed)
print(W_S)
print(Wind_Chill_num)
print(Last_Update_num)
| 28.919753
| 159
| 0.683458
| 713
| 4,685
| 4.374474
| 0.26648
| 0.051298
| 0.034626
| 0.046169
| 0.309715
| 0.204873
| 0.18756
| 0.166399
| 0.151331
| 0.151331
| 0
| 0.039158
| 0.127855
| 4,685
| 161
| 160
| 29.099379
| 0.72418
| 0.155603
| 0
| 0.25
| 0
| 0.044643
| 0.450851
| 0.044958
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.053571
| 0
| 0.053571
| 0.080357
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cbd5fce78146aae7cbddda0c039ec527c342db9
| 5,752
|
py
|
Python
|
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
apis.py
|
teemuja/ndp_app3
|
8a9517b2e2385640dc1a2c1baf0ae07cf630c89c
|
[
"MIT"
] | null | null | null |
# apis for ndp_d3
from owslib.wfs import WebFeatureService
import pandas as pd
import geopandas as gpd
import momepy
import streamlit as st
@st.cache(allow_output_mutation=True)
def pno_data(kunta,vuosi=2021):
url = 'http://geo.stat.fi/geoserver/postialue/wfs' # vaestoruutu tai postialue
wfs = WebFeatureService(url=url, version="2.0.0")
layer = f'postialue:pno_tilasto_{vuosi}'
data_ = wfs.getfeature(typename=layer, outputFormat='json') # propertyname=['kunta'],
gdf_all = gpd.read_file(data_)
noneed = ['id', 'euref_x', 'euref_y', 'pinta_ala']
paavodata = gdf_all.drop(columns=noneed)
kuntakoodit = pd.read_csv('config/kunta_dict.csv', index_col=False, header=0).astype(str)
kuntakoodit['koodi'] = kuntakoodit['koodi'].str.zfill(3)
kunta_dict = pd.Series(kuntakoodit.kunta.values, index=kuntakoodit.koodi).to_dict()
paavodata = paavodata.replace({'kunta':kunta_dict})
dict_feat = pd.read_csv('config/paavo2021_dict.csv', skipinitialspace=True, header=None, index_col=0,squeeze=True).to_dict()
selkopaavo = paavodata.rename(columns=dict_feat).sort_values('Kunta')
pno_valinta = selkopaavo[selkopaavo['Kunta'] == kunta].sort_values('Asukkaat yhteensä', ascending=False)
return pno_valinta
@st.cache(allow_output_mutation=True)
def hri_data(pno):
def make_bbox(pno, point_crs='4326', projected_crs='3857'): # 3879
poly = gpd.GeoSeries(pno.geometry)
b = poly.to_crs(epsg=projected_crs)
b = b.buffer(100)
bbox = b.to_crs(epsg=point_crs).bounds
bbox = bbox.reset_index(drop=True)
bbox_tuple = bbox['minx'][0], bbox['miny'][0], bbox['maxx'][0], bbox['maxy'][0]
return bbox_tuple
bbox = make_bbox(pno) + tuple(['urn:ogc:def:crs:EPSG::4326'])
url = 'https://kartta.hsy.fi/geoserver/wfs'
wfs = WebFeatureService(url=url, version="2.0.0")
layer = 'ilmasto_ja_energia:rakennukset'
data = wfs.getfeature(typename=layer, bbox=bbox, outputFormat='json')
gdf = gpd.read_file(data)
# columns to keep
columns = ['kuntanimi', 'valm_v', 'kerrosala', 'kerrosluku', 'kayt_luok', 'kayttark', 'geometry']
# overlay with pno area & use only columns
gdf_pno = pno.to_crs(3067).overlay(gdf.to_crs(3067), how='intersection')[columns]#.to_crs(4326)
gdf_pno.rename(columns={'valm_v': 'rakennusvuosi',
'kayt_luok': 'rakennustyyppi',
'kayttark': 'tarkenne',
}, inplace=True)
gdf_out = gdf_pno.to_crs(epsg=4326)
return gdf_out
@st.cache(allow_output_mutation=True)
def densities(buildings):
# projected crs for momepy calculations & prepare for housing
gdf_ = buildings.to_crs(3857)
# check kerrosala data and use footprint if nan/zero
gdf_['kerrosala'] = pd.to_numeric(gdf_['kerrosala'], errors='coerce', downcast='float')
gdf_['kerrosala'].fillna(gdf_.area, inplace=True)
gdf_.loc[gdf_['kerrosala'] == 0, 'kerrosala'] = gdf_.area
# add footprint area
gdf_['rakennusala'] = gdf_.area
#gdf_.loc[:,gdf_['rakennusala']] = gdf_.area
# exlude some utility building types
no_list = ['Muut rakennukset','Palo- ja pelastustoimen rakennukset','Varastorakennukset']
yes_serie = ~gdf_.rakennustyyppi.isin(no_list)
gdf = gdf_[yes_serie]
# prepare momoepy..
gdf['uID'] = momepy.unique_id(gdf)
limit = momepy.buffered_limit(gdf)
tessellation = momepy.Tessellation(gdf, unique_id='uID', limit=limit).tessellation
# calculate GSI = ground space index = coverage = CAR = coverage area ratio
tess_GSI = momepy.AreaRatio(tessellation, gdf,
momepy.Area(tessellation).series,
momepy.Area(gdf).series, 'uID')
gdf['GSI'] = round(tess_GSI.series,3)
# calculate FSI = floor space index = FAR = floor area ratio
gdf['FSI'] = round(gdf['kerrosala'] / momepy.Area(tessellation).series,3)
# calculate OSR = open space ratio = spaciousness
gdf['OSR'] = round((1 - gdf['GSI']) / gdf['FSI'],3)
# ND calculations
# queen contiguity for 2 degree neighbours = "perceived neighborhood"
tessellation = tessellation.merge(gdf[['uID','rakennusala','kerrosala','OSR']]) # add selected values from buildings to tess-areas
sw = momepy.sw_high(k=2, gdf=tessellation, ids='uID') # degree of nd
gdf['GSI_ND'] = round(momepy.Density(tessellation, values='rakennusala', spatial_weights=sw, unique_id='uID').series, 2)
gdf['FSI_ND'] = round(momepy.Density(tessellation, values='kerrosala', spatial_weights=sw, unique_id='uID').series, 2)
gdf['OSR_ND'] = round((1 - gdf['GSI_ND']) / gdf['FSI_ND'], 2)
gdf['OSR_ND_mean'] = round(momepy.AverageCharacter(tessellation, values='OSR', spatial_weights=sw, unique_id='uID').mean,2)
# remove infinite values of osr if needed..
gdf['OSR_ND'].clip(upper=gdf['OSR'].quantile(0.99), inplace=True)
gdf['OSR_ND_mean'].clip(upper=gdf['OSR'].quantile(0.99), inplace=True)
gdf_out = gdf.to_crs(4326)
return gdf_out
@st.cache(allow_output_mutation=True)
def tess_boundaries(buildings):
# projected crs for momepy calculations & prepare for housing
gdf_ = buildings.to_crs(3857)
gdf_['kerrosala'] = pd.to_numeric(gdf_['kerrosala'], errors='coerce', downcast='float')
gdf_['kerrosala'].fillna(gdf_.area, inplace=True)
no_list = ['Muut rakennukset','Palo- ja pelastustoimen rakennukset','Varastorakennukset']
yes_serie = ~gdf_.rakennustyyppi.isin(no_list) # exclude some types
gdf = gdf_[yes_serie]
gdf['uID'] = momepy.unique_id(gdf)
limit = momepy.buffered_limit(gdf)
tessellation = momepy.Tessellation(gdf, unique_id='uID', limit=limit).tessellation
return tessellation.to_crs(4326)
| 52.770642
| 134
| 0.685327
| 766
| 5,752
| 4.979112
| 0.302872
| 0.01311
| 0.014421
| 0.018878
| 0.362611
| 0.340063
| 0.313057
| 0.295752
| 0.295752
| 0.254851
| 0
| 0.019026
| 0.168463
| 5,752
| 108
| 135
| 53.259259
| 0.778382
| 0.141516
| 0
| 0.298851
| 0
| 0
| 0.173789
| 0.026659
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057471
| false
| 0
| 0.057471
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cbec5b44846435b33e0ef20ab76a5f6a4ef6c68
| 6,471
|
py
|
Python
|
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# vim:expandtab:ts=3:sw=3
# @file testIncrStatus.py
# @brief Test SOAP calls to incrAddSentence using a deployed PortageLive web server.
#
# @author Samuel Larkin
#
# Traitement multilingue de textes / Multilingual Text Processing
# Tech. de l'information et des communications / Information and Communications Tech.
# Conseil national de recherches Canada / National Research Council Canada
# Copyright 2016, Sa Majeste la Reine du Chef du Canada /
# Copyright 2016, Her Majesty in Right of Canada
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
#import zeep
#client = zeep.Client(wsdl=url)
from suds.cache import DocumentCache
from suds.client import Client
from suds import WebFault
import unittest
import logging
import requests
import time
import random
import os
import sys
import shutil
logging.basicConfig(level=logging.CRITICAL)
# If you need to debug what is happening, uncomment the following line
#logging.basicConfig(level=logging.DEBUG)
url = 'http://127.0.0.1'
class TestIncrAddSentence(unittest.TestCase):
"""
Using PortageLiveAPI's WSDL deployed on a web server, we test SOAP calls to
incrAddSentence().
"""
def __init__(self, *args, **kwargs):
super(TestIncrAddSentence, self).__init__(*args, **kwargs)
DocumentCache().clear()
self.url = url + ':' + os.getenv('PHP_PORT', 8756)
self.WSDL = self.url + '/PortageLiveAPI.wsdl'
self.client = Client(self.WSDL)
self.context = 'unittest.rev.en-fr'
self.document_model_id = 'PORTAGE_UNITTEST_4da35'
self.source_sentence = "'home'"
self.target_sentence = '"maison"'
self.document_model_dir = os.path.join("doc_root", "plive",
"DOCUMENT_MODEL_" + self.context + '_' + self.document_model_id)
if (os.path.isdir(self.document_model_dir)):
shutil.rmtree(self.document_model_dir)
def test_01_no_argument(self):
"""
incrAddSentence() should warn the user that it needs some parameters.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence()
self.assertEqual(cm.exception.message, "Server raised fault: 'Missing parameter'")
def test_02_all_arguments_null(self):
"""
incrAddSentence() expects 3 arguments that cannot be None/NULL.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(None, None, None, None, None)
self.assertEqual(cm.exception.message, "Server raised fault: 'Missing parameter'")
def test_03_no_document_model_id(self):
"""
It is invalid to use the empty string as document level model ID.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context, '', '', '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a valid document_model_id.'")
def test_04_no_source_sentence(self):
"""
The source sentence cannot be empty.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id, '', '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a source sentence.'")
def test_05_no_target_sentence(self):
"""
The target sentence cannot be empty.
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id,
self.source_sentence, '')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a target sentence.'")
@unittest.skip("Should we check for too many parameters?")
def test_06_too_many_parameters(self):
"""
TODO: Should we get some sort of message if we provide an invalid number
of arguments
"""
with self.assertRaises(WebFault) as cm:
self.client.service.incrAddSentence(self.context,
self.document_model_id,
self.source_sentence,
self.target_sentence,
'extra_dummy_argument')
self.assertEqual(cm.exception.message,
"Server raised fault: 'You must provide a target sentence.'")
def test_07_basic_valid_usage(self):
"""
This tests a valid call to incrAddSentence() where
document_model_id is valid, source sentence is valid and target
sentence is also valid.
- The SOAP call should return true since it's supposed to be able to add
this sentence pair to the queue.
- The training phase should have inserted the sentence pair in the
corpora.
"""
UID = str(random.randint(0, 100000))
source = self.source_sentence + str(time.time()) + UID
target = self.target_sentence + str(time.time()) + UID
result = self.client.service.incrAddSentence(self.context,
self.document_model_id,
source, target)
self.assertEqual(result, True, 'SOAP call failed to add a sentence pair')
r = requests.get(self.url + '/plive/DOCUMENT_MODEL_' + self.context + '_' + self.document_model_id + '/corpora')
self.assertEqual(r.status_code, 200,
"Failed to fetch the corpora file for: " + self.document_model_id)
ref_sentence_pair = '\t'.join((source, target))
sentence_pairs = tuple(l.split('\t', 1)[-1] for l in r.text.split('\n'))
self.assertEqual(sentence_pairs.count(ref_sentence_pair), 1,
"Expected exactly one occurrence of our sentence pair in corpora.")
# Let incremental training finish.
time.sleep(3);
with open(os.path.join(self.document_model_dir, "incr-update.status"), "r") as sf:
status = sf.read().strip()
self.assertEqual(status, '0',
"0 exit status for incr-update.sh not found in incr-update.status.")
if __name__ == '__main__':
unittest.main()
| 36.767045
| 118
| 0.637923
| 778
| 6,471
| 5.155527
| 0.330334
| 0.055098
| 0.05086
| 0.037896
| 0.314884
| 0.288955
| 0.288955
| 0.288955
| 0.288955
| 0.265021
| 0
| 0.011402
| 0.268119
| 6,471
| 175
| 119
| 36.977143
| 0.835515
| 0.225622
| 0
| 0.222222
| 0
| 0
| 0.162224
| 0.009163
| 0
| 0
| 0
| 0.005714
| 0.177778
| 1
| 0.088889
| false
| 0
| 0.166667
| 0
| 0.266667
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cbf25669395a89790375a19545ba5be63026880
| 1,919
|
py
|
Python
|
Cryptography/Caesar_Cipher.py
|
hari40009/learnpython
|
b75e700f62f49ab9d8fef607ebd87a34c5cb6530
|
[
"MIT"
] | 1
|
2018-11-07T04:13:52.000Z
|
2018-11-07T04:13:52.000Z
|
Cryptography/Caesar_Cipher.py
|
engineerprogrammer/learnpython
|
140acfd8fc6345745a9b274baaa1e58ea3217f9f
|
[
"MIT"
] | null | null | null |
Cryptography/Caesar_Cipher.py
|
engineerprogrammer/learnpython
|
140acfd8fc6345745a9b274baaa1e58ea3217f9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" A program to use a Caesar cipher based on user input for the shift value """
MAX_SHIFT = 26
def whatMode():
""" Finds out what the user wants to do """
while True:
print("Do you wish to encrypt, decrypt or brute force a message: ")
mode = input().lower()
if mode in "encrypt e decrypt d brute b".split():
return mode[0]
else:
print("Enter '[E]ncrypt', '[D]ecrypt' or [B]rute")
def plainMessage():
""" Gets a string from the user """
print ("Message: ")
return input()
def getKey():
""" Gets a shift value from the user """
shiftKey = 0
while True:
print("Enter shift key (1-%s) " % (MAX_SHIFT))
shiftKey = int(input())
if (shiftKey >= 1 and shiftKey <= MAX_SHIFT):
return shiftKey
def cryptMessage(mode, message, shiftKey):
""" The encryption / decryption action is here """
if mode[0] == 'd':
shiftKey = -shiftKey
translated = ''
for symbol in message: # The encryption stuff
if symbol.isalpha():
num = ord(symbol)
num += shiftKey
if symbol.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif symbol.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
translated += chr(num)
else:
translated += symbol
return translated
mode = whatMode()
message = plainMessage()
if mode[0] != 'b':
shiftKey = getKey()
print('Your translated text is:')
if mode[0] != 'b': #Brute force settings
print(cryptMessage(mode, message, shiftKey))
else:
for shiftKey in range(1, MAX_SHIFT + 1):
print(shiftKey, cryptMessage('decrypt', message, shiftKey))
| 27.028169
| 80
| 0.532569
| 230
| 1,919
| 4.426087
| 0.369565
| 0.02947
| 0.020629
| 0.060904
| 0.058939
| 0.058939
| 0.058939
| 0.058939
| 0.058939
| 0.058939
| 0
| 0.015848
| 0.342366
| 1,919
| 70
| 81
| 27.414286
| 0.790808
| 0.145909
| 0
| 0.215686
| 0
| 0
| 0.123288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0
| 0
| 0.156863
| 0.137255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cc3cc243655d3b808c34d010f7d4b9e190e610a
| 494
|
py
|
Python
|
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
leetcode/python/medium/p046_permute.py
|
kefirzhang/algorithms
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def __init__(self):
self.res = []
def permute(self, nums):
self.backTrack(nums, [])
return self.res
def backTrack(self, nums, track):
if len(nums) == len(track):
self.res.append(track[:])
return
for i in nums:
if i in track:
continue
track.append(i)
self.backTrack(nums, track)
track.remove(i)
slu = Solution()
print(slu.permute([1]))
| 22.454545
| 39
| 0.506073
| 57
| 494
| 4.315789
| 0.385965
| 0.085366
| 0.081301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003226
| 0.37247
| 494
| 21
| 40
| 23.52381
| 0.790323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cc9578bf937313ea3ce810099e43cb50d90651a
| 634
|
py
|
Python
|
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
ribosome/compute/ribosome.py
|
tek/ribosome-py
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Generic, TypeVar, Type
from lenses import UnboundLens
from amino import Dat
from ribosome.data.plugin_state import PluginState
D = TypeVar('D')
CC = TypeVar('CC')
C = TypeVar('C')
class Ribosome(Generic[D, CC, C], Dat['Ribosome[D, CC, C]']):
def __init__(
self,
state: PluginState[D, CC],
comp_type: Type[C],
comp_lens: UnboundLens['Ribosome[D, CC, C]', 'Ribosome[D, CC, C]', C, C],
) -> None:
self.state = state
self.comp_type = comp_type
self.comp_lens = comp_lens
__all__ = ('Ribosome',)
| 21.862069
| 85
| 0.621451
| 85
| 634
| 4.411765
| 0.329412
| 0.048
| 0.042667
| 0.096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.250789
| 634
| 28
| 86
| 22.642857
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.104101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.263158
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cc96d6bfddb10586b88d9ad0d7b86bd5ca4e9aa
| 1,431
|
py
|
Python
|
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
pythonstartup.py
|
avisilver/util_scripts
|
ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508
|
[
"MIT"
] | null | null | null |
# Add auto-completion and a stored history file of commands to your Python
# interactive interpreter. Requires Python 2.0+, readline. Autocomplete is
# bound to the Esc key by default (you can change it - see readline docs).
#
# Store the file in ~/.pystartup, and set an environment variable to point
# to it: "export PYTHONSTARTUP=/home/user/.pystartup" in bash.
#
# Note that PYTHONSTARTUP does *not* expand "~", so you have to put in the
# full path to your home directory.
import atexit
import os
import readline
import rlcompleter
historyPath = os.path.expanduser("~/.pyhistory")
def save_history(historyPath=historyPath):
import readline
readline.write_history_file(historyPath)
if os.path.exists(historyPath):
readline.read_history_file(historyPath)
atexit.register(save_history)
readline.parse_and_bind('tab: complete')
del os, atexit, readline, rlcompleter, save_history, historyPath
def dirp(object_or_module):
"""dirp(object_or_module) -> string
Print the object's or currently imported module's attributes as shown
in dir() on separate lines with docstrings"""
for attr in dir(object_or_module):
doc = object_or_module.__getattribute__(attr).__doc__
doc = doc if doc else ""
indented_doc = "\n".join(doc.split("\n"))
print ("\n{line}\n{attr}\n{doc}".format(
line="-"*80,
attr=attr,
doc=indented_doc
))
| 31.108696
| 74
| 0.709294
| 199
| 1,431
| 4.964824
| 0.532663
| 0.032389
| 0.05668
| 0.036437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003451
| 0.190077
| 1,431
| 45
| 75
| 31.8
| 0.849008
| 0.425577
| 0
| 0.086957
| 0
| 0
| 0.06625
| 0.02875
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0
| 0.304348
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ccda61294b042b9301d3115e54f9eaad129e0a8
| 2,200
|
py
|
Python
|
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | 3
|
2018-03-16T17:25:23.000Z
|
2021-04-27T21:42:31.000Z
|
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | null | null | null |
core/cliqueIntersectionGraph.py
|
ongmingyang/some-max-cut
|
7ebabd06d3e46789a3672bd516adc48953ba135e
|
[
"MIT"
] | null | null | null |
import sys
from clique import Clique
from cvxopt import spmatrix, amd
from collections import defaultdict as dd
import chompack as cp
from util.graph import Graph
LARGEST_CLIQUE_SIZE = 24
#
# A CliqueIntersectionGraph is a graph (V,E), where V is a set of cliques, each
# bag containing a clique, and (i,j) in E if clique i and clique j have a non
# empty sepset
#
# @param I,J,W (I[i],J[i]) is an edge in the original graph with weight
# W[i]. We require I > J
#
class CliqueIntersectionGraph(Graph):
def __init__(self, I, J, W):
Graph.__init__(self)
self.cliques = self.nodes # We use a different alias to prevent confusion
n = max(max(I),max(J))+1
eye = spmatrix(1, range(n), range(n))
A = spmatrix(W, I, J, (n,n)) + eye
self.n = n
# Compute symbolic factorization using AMD ordering
# This automatically does a chordal completion on the graph
symb = cp.symbolic(A, p=amd.order)
# The factorization permutes the node indices, we need to unpermute these
cliques = symb.cliques()
perm = symb.p
cliques = [[perm[i] for i in clique] for clique in cliques]
# If the largest clique is above threshold, we terminate the algorithm
self.max_clique_size = max(len(x) for x in cliques)
if self.max_clique_size > LARGEST_CLIQUE_SIZE:
sys.exit('''
Chordal completion has clique of size %d,
Max allowed size is %d,
Program terminating...
''' % (self.max_clique_size, LARGEST_CLIQUE_SIZE))
node_to_clique = dd(list)
# Instantiate cliques and fill node_to_clique entries
for index, nodes in enumerate(cliques):
clique = Clique(index, nodes, A)
for node in nodes:
node_to_clique[node].append(clique)
self.cliques.append(clique)
# Update list of neighbours after node_to_clique entries are filled
for clique in self.cliques:
for node in clique.nodes:
neighbours = list(node_to_clique[node])
neighbours.remove(clique)
# Add edge to edgeset
for neighbour in neighbours:
edge = tuple(sorted([neighbour.index, clique.index]))
self.edges[edge] = clique.determine_sepset_size(neighbour)
| 33.333333
| 79
| 0.678636
| 331
| 2,200
| 4.413897
| 0.359517
| 0.041068
| 0.041068
| 0.034908
| 0.046543
| 0.046543
| 0.046543
| 0
| 0
| 0
| 0
| 0.002375
| 0.234545
| 2,200
| 65
| 80
| 33.846154
| 0.865202
| 0.325
| 0
| 0
| 0
| 0
| 0.07771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.153846
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ccdd8c975b584a486aac3e7fbb9b1d2ae39487f
| 4,586
|
py
|
Python
|
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/airtable/tasks.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from baserow.config.celery import app
logger = logging.getLogger(__name__)
@app.task(
bind=True,
queue="export",
soft_time_limit=settings.BASEROW_AIRTABLE_IMPORT_SOFT_TIME_LIMIT,
)
def run_import_from_airtable(self, job_id: int):
"""
Starts the Airtable import job. This task must run after the job has been created.
:param job_id: The id related to the job that must be started.
"""
from celery.exceptions import SoftTimeLimitExceeded
from pytz import timezone as pytz_timezone
from requests.exceptions import RequestException
from django.db import transaction
from django.core.cache import cache
from baserow.core.signals import application_created
from baserow.core.utils import Progress
from baserow.contrib.database.airtable.models import AirtableImportJob
from baserow.contrib.database.airtable.handler import AirtableHandler
from baserow.contrib.database.airtable.exceptions import AirtableBaseNotPublic
from baserow.contrib.database.airtable.constants import (
AIRTABLE_EXPORT_JOB_DOWNLOADING_FAILED,
AIRTABLE_EXPORT_JOB_DOWNLOADING_FINISHED,
)
from .cache import airtable_import_job_progress_key
job = AirtableImportJob.objects.select_related("group").get(id=job_id)
def progress_updated(percentage, state):
"""
Every time the progress of the import changes, this callback function is
called. If the percentage or the state has changed, the job will be updated.
"""
nonlocal job
if job.progress_percentage != percentage:
job.progress_percentage = percentage
changed = True
if state is not None and job.state != state:
job.state = state
changed = True
if changed:
# The progress must also be stored in the Redis cache. Because we're
# currently in a transaction, other database connections don't know about
# the progress and this way, we can still communite it to the user.
cache.set(
airtable_import_job_progress_key(job.id),
{"progress_percentage": job.progress_percentage, "state": job.state},
timeout=None,
)
job.save()
progress = Progress(100)
progress.register_updated_event(progress_updated)
kwargs = {}
if job.timezone is not None:
kwargs["timezone"] = pytz_timezone(job.timezone)
try:
with transaction.atomic():
databases, id_mapping = AirtableHandler.import_from_airtable_to_group(
job.group,
job.airtable_share_id,
progress_builder=progress.create_child_builder(
represents_progress=progress.total
),
**kwargs
)
# The web-frontend needs to know about the newly created database, so we
# call the application_created signal.
for database in databases:
application_created.send(self, application=database, user=None)
job.state = AIRTABLE_EXPORT_JOB_DOWNLOADING_FINISHED
job.database = databases[0]
# Don't override the other properties that have been set during the
# progress update.
job.save(update_fields=("state", "database"))
except Exception as e:
exception_mapping = {
SoftTimeLimitExceeded: "The import job took too long and was timed out.",
RequestException: "The Airtable server could not be reached.",
AirtableBaseNotPublic: "The Airtable base is not publicly shared.",
}
error = "Something went wrong while importing the Airtable base."
for exception, error_message in exception_mapping.items():
if isinstance(e, exception):
error = error_message
break
logger.error(e)
job.state = AIRTABLE_EXPORT_JOB_DOWNLOADING_FAILED
job.error = str(e)
job.human_readable_error = error
# Don't override the other properties that have been set during the
# progress update.
job.save(
update_fields=(
"state",
"error",
"human_readable_error",
)
)
# Delete the import job cached entry because the transaction has been committed
# and the AirtableImportJob entry now contains the latest data.
cache.delete(airtable_import_job_progress_key(job.id))
| 35.276923
| 86
| 0.657872
| 531
| 4,586
| 5.531073
| 0.34275
| 0.026217
| 0.023153
| 0.03541
| 0.195097
| 0.120191
| 0.085121
| 0.062649
| 0.062649
| 0.062649
| 0
| 0.001211
| 0.279546
| 4,586
| 129
| 87
| 35.550388
| 0.887712
| 0.199738
| 0
| 0.02381
| 0
| 0
| 0.074813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.27381
| 0
| 0.297619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd0a4bbec748d6e33fb26e96ae01249982c0522
| 7,439
|
py
|
Python
|
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | null | null | null |
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | 1
|
2020-06-06T06:34:03.000Z
|
2020-06-06T07:01:56.000Z
|
d2lbook/notebook.py
|
naoufelito/d2l-book
|
bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a
|
[
"Apache-2.0"
] | null | null | null |
"""utilities to handle notebooks"""
from typing import Union, List, Optional
import copy
import notedown
import nbformat
import nbconvert
from nbformat import notebooknode
from d2lbook import markdown
from d2lbook import common
def create_new_notebook(nb: notebooknode.NotebookNode,
cells: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""create an empty notebook by copying metadata from nb"""
new_nb = copy.deepcopy(nb)
new_nb.cells = cells
return new_nb
def read_markdown(source: Union[str, List[str]]) -> notebooknode.NotebookNode:
"""Returns a notebook from markdown source"""
if not isinstance(source, str):
source = '\n'.join(source)
reader = notedown.MarkdownReader(match='strict')
return reader.reads(source)
def split_markdown_cell(nb: notebooknode.NotebookNode) -> notebooknode.NotebookNode:
"""split a markdown cell if it contains tab block.
a new property `class` is added to the metadata for a tab cell.
"""
# merge continous markdown cells
grouped_cells = common.group_list(nb.cells,
lambda cell, _: cell.cell_type=='markdown')
new_cells = []
for is_md, group in grouped_cells:
if not is_md:
new_cells.extend(group)
else:
src = '\n\n'.join(cell.source for cell in group)
md_cells = markdown.split_markdown(src)
is_tab_cell = lambda cell, _: cell['type']=='markdown' and 'class' in cell
grouped_md_cells = common.group_list(md_cells, is_tab_cell)
for is_tab, md_group in grouped_md_cells:
new_cell = nbformat.v4.new_markdown_cell(
markdown.join_markdown_cells(md_group))
if is_tab:
tab = md_group[0]['class']
assert tab.startswith('`') and tab.endswith('`'), tab
new_cell.metadata['tab'] = tab[1:-1]
new_cells.append(new_cell)
new_cells = [cell for cell in new_cells if cell.source]
return create_new_notebook(nb, new_cells)
def _get_cell_tab(cell: notebooknode.NotebookNode, default_tab: str='') -> Optional[str]:
"""Get the cell tab"""
if 'tab' in cell.metadata:
return cell.metadata['tab']
if cell.cell_type != 'code':
return None
match = common.source_tab_pattern.search(cell.source)
if match:
return match[1]
return default_tab
def get_tab_notebook(nb: notebooknode.NotebookNode, tab: str, default_tab: str
) -> notebooknode.NotebookNode:
"""Returns a notebook with code/markdown cells that doesn't match tab
removed.
Return None if no cell matched the tab and nb contains code blocks.
A `origin_pos` property is added to the metadata for each cell, which
records its position in the original notebook `nb`.
"""
matched_tab = False
new_cells = []
for i, cell in enumerate(nb.cells):
new_cell = copy.deepcopy(cell)
new_cell.metadata['origin_pos'] = i
cell_tab = _get_cell_tab(new_cell, default_tab)
if not cell_tab:
new_cells.append(new_cell)
else:
if cell_tab == tab:
new_cell.metadata['tab'] = cell_tab
matched_tab = True
# remove the tab from source
lines = new_cell.source.split('\n')
for j, line in enumerate(lines):
src_tab = common.source_tab_pattern.search(line)
text_tab = common.md_mark_pattern.search(line)
if src_tab or (text_tab and (
text_tab[1]=='begin_tab' or text_tab[1]=='end_tab')):
del lines[j]
new_cell.source = '\n'.join(lines)
new_cells.append(new_cell)
if not matched_tab and any([cell.cell_type=='code' for cell in nb.cells]):
return None
return create_new_notebook(nb, new_cells)
def merge_tab_notebooks(src_notebooks: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""Merge the tab notebooks into a single one.
The reserved function of get_tab_notebook.
"""
n = max([max([cell.metadata['origin_pos'] for cell in nb.cells])
for nb in src_notebooks])
new_cells = [None] * (n+1)
for nb in src_notebooks:
for cell in nb.cells:
new_cells[cell.metadata['origin_pos']] = copy.deepcopy(cell)
return create_new_notebook(src_notebooks[0], new_cells)
def _get_tab_bar(tabs, tab_id, default_tab, div_class=''):
code = f"```eval_rst\n\n.. raw:: html\n\n <div class=\"mdl-tabs mdl-js-tabs mdl-js-ripple-effect\"><div class=\"mdl-tabs__tab-bar {div_class}\">"
for i, tab in enumerate(tabs):
active = 'is-active' if tab == default_tab else ''
code +=f'<a href="#{tab}-{tab_id}-{i}" class="mdl-tabs__tab {active}">{tab}</a>'
code += "</div>\n```"
return nbformat.v4.new_markdown_cell(code)
def _get_tab_panel(cells, tab, tab_id, default_tab):
active = 'is-active' if tab == default_tab else ''
tab_panel_begin = nbformat.v4.new_markdown_cell(
f"```eval_rst\n.. raw:: html\n\n <div class=\"mdl-tabs__panel {active}\" id=\"{tab}-{tab_id}\">\n```")
tab_panel_end = nbformat.v4.new_markdown_cell(
"```eval_rst\n.. raw:: html\n\n </div>\n```")
return [tab_panel_begin, *cells, tab_panel_end]
def _merge_tabs(nb: notebooknode.NotebookNode):
"""merge side-by-side tabs into a single one"""
def _tab_status(cell, status):
if _get_cell_tab(cell):
return 1 if cell.cell_type == 'markdown' else 2
return 0
cell_groups = common.group_list(nb.cells, _tab_status)
meta = [(in_tab, [cell.metadata['tab'] for cell in group] if in_tab else None
) for in_tab, group in cell_groups]
new_cells = []
i = 0
while i < len(meta):
in_tab, tabs = meta[i]
if not in_tab:
new_cells.append((False, cell_groups[i][1]))
i += 1
else:
j = i + 1
while j < len(meta):
if meta[j][1] != tabs:
break
j += 1
groups = [group for _, group in cell_groups[i:j]]
new_cells.append((True, [x for x in zip(*groups)]))
i = j
return new_cells
def add_html_tab(nb: notebooknode.NotebookNode, default_tab: str) -> notebooknode.NotebookNode:
"""Add html codes for the tabs"""
cell_groups = _merge_tabs(nb)
tabs = [len(group) for in_tab, group in cell_groups if in_tab]
if not tabs or max(tabs) <= 1:
return nb
new_cells = []
for i, (in_tab, group) in enumerate(cell_groups):
if not in_tab:
new_cells.extend(group)
else:
tabs = [cells[0].metadata['tab'] for cells in group]
div_class = "code" if group[0][0].cell_type == 'code' == 2 else "text"
new_cells.append(_get_tab_bar(tabs, i, default_tab, div_class))
for j, (tab, cells) in enumerate(zip(tabs, group)):
new_cells.extend(_get_tab_panel(cells, tab, f'{i}-{j}', default_tab))
new_cells.append(nbformat.v4.new_markdown_cell(
"```eval_rst\n.. raw:: html\n\n </div>\n```"))
return create_new_notebook(nb, new_cells)
| 41.099448
| 152
| 0.609894
| 1,027
| 7,439
| 4.211295
| 0.145083
| 0.042543
| 0.022659
| 0.024277
| 0.276994
| 0.12578
| 0.086936
| 0.069133
| 0.035607
| 0.024046
| 0
| 0.005376
| 0.274903
| 7,439
| 180
| 153
| 41.327778
| 0.79644
| 0.098938
| 0
| 0.176056
| 0
| 0.007042
| 0.068762
| 0.003929
| 0
| 0
| 0
| 0
| 0.007042
| 1
| 0.077465
| false
| 0
| 0.056338
| 0
| 0.253521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd1756adb8c57eb1928457d00bc92c25a43ba4c
| 1,204
|
py
|
Python
|
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
myamiweb/imcache/imcacheconfig.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
# config file for imcached
# camera name pattern to cache. For example 'GatanK2' will restrict it
# only to camera name containing the string
camera_name_pattern = ''
# time in seconds to wait between consecutive queries
query_interval = 5
# limit query to later than this timestamp (mysql style: yyyymmddhhmmss)
min_timestamp = '20130126000000'
# limit query to start at this image id
start_id = 0
# root dir of cache. session subdirs will be added automatically
cache_path = '/srv/cache/dbem'
# maximum image dimension after conversion
redux_maxsize1 = 4096
redux_maxsize2 = 1024
# initial redux read and resize before calculating power and final
redux_args1 = {
'pipes': 'read:Read,shape:Shape',
'cache': False,
}
# redux to create final image for cache
redux_args_jpg = {
'cache': False,
'pipes': 'shape:Shape,scale:Scale,format:Format',
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
# redux to create final power image for cache
redux_args_pow = {
'cache': False,
'pipes': 'power:Power,shape:Shape,mask:Mask,scale:Scale,format:Format',
'power': True,
'maskradius': 10,
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
| 23.607843
| 72
| 0.724252
| 168
| 1,204
| 5.113095
| 0.541667
| 0.034924
| 0.039581
| 0.041909
| 0.151339
| 0.100116
| 0.100116
| 0.100116
| 0
| 0
| 0
| 0.033597
| 0.159468
| 1,204
| 50
| 73
| 24.08
| 0.815217
| 0.45598
| 0
| 0.37931
| 0
| 0
| 0.425234
| 0.182243
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd1a6c109376dfdc24ad44b61222972d5c24dd2
| 3,737
|
py
|
Python
|
graphs/graphgenerator.py
|
andrew-lockwood/lab-project
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-08-30T15:21:31.000Z
|
2017-08-30T15:21:31.000Z
|
graphs/graphgenerator.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | null | null | null |
graphs/graphgenerator.py
|
andrew-lockwood/lab-project-summer2016
|
e39a0f21966cdee519942cf2f94b7bab6ed2196e
|
[
"MIT"
] | 1
|
2017-06-15T20:44:59.000Z
|
2017-06-15T20:44:59.000Z
|
import sqlite3
import matplotlib.pyplot as plt
import re
from collections import Counter
db = "C:\\Users\\Andrew\\lab-project\\data\\frontiers_corpus.db"
def wordvsline():
q = "SELECT wordcount, linecount FROM ArticleTXT"
curr.execute(q)
x,y = zip(*curr.fetchall())
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.scatter(x,y)
plt.xlim(0,25000)
plt.ylim(0,450)
ax.set_xlabel('Word Count')
ax.set_ylabel('Line Count')
ax.set_title('Words vs Lines')
plt.show()
def titles_between(start, end):
q = """ SELECT DISTINCT articleID
FROM ArticleInformation
WHERE date BETWEEN
'{s}' AND '{e}'""".format(s=start, e=end)
return di.execute_query(q)
def by_year():
q = """ SELECT strftime('%Y', date), count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y', date)"""
return di.execute_query(q)
def by_month():
q = """ SELECT strftime('%Y-%m', date), count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y-%m', date)"""
return di.execute_query(q)
def by_quarter():
q = """ SELECT strftime('%Y', date),
CASE
WHEN cast(strftime('%m', date) as integer) BETWEEN 1 AND 3 THEN 1
WHEN cast(strftime('%m', date) as integer) BETWEEN 4 AND 6 THEN 2
WHEN cast(strftime('%m', date) as integer) BETWEEN 7 AND 9 THEN 3
ELSE 4
END AS Quarter, count(articleID)
FROM ArticleInformation
GROUP BY strftime('%Y', date),
CASE
WHEN cast(strftime('%m', date) as integer) BETWEEN 1 AND 3 THEN 1
WHEN cast(strftime('%m', date) as integer) BETWEEN 4 AND 6 THEN 2
WHEN cast(strftime('%m', date) as integer) BETWEEN 7 AND 9 THEN 3
ELSE 4
END"""
return di.execute_query(q)
def graph(value):
data = []
if value == 'year':
for year, count in by_year():
data.append((year, count))
if value == 'month':
for year, count in by_month():
data.append((year, count))
if value == 'quarter':
for year, quarter, count in by_quarter():
d = "%s%s"%(year,'q'+str(quarter))
data.append((d, count))
x = [i for i in range(len(data))]
labels,y = zip(*data)
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
ax.set_ylabel('Articles Recieved')
plt.xticks(x, labels, rotation=45)
plt.show()
def kwd_frequency():
c1 = Counter()
c2 = Counter()
q = """ SELECT keyword, count(articleID)
FROM OriginalKeywords
GROUP BY keyword"""
data = curr.execute(q)
n = 10
for kwd, count in data.fetchall():
if count < 20:
c2[int(count)] += 1
else:
c1[int(count/n)] += 1
x = [i for i in range(len(c1))]
labels,y = zip(*c1.items())
labels = ["%s-%s"%(l*n, l*n+n) for l in labels]
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
plt.xticks(x, labels, rotation=90)
plt.show()
x = [i for i in range(len(c2))]
labels,y = zip(*c2.items())
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
plt.margins(0.025, 0)
plt.bar(x, y, align='center')
plt.xticks(x, labels, rotation=90)
plt.show()
if __name__ == "__main__":
conn = sqlite3.connect(db)
curr = conn.cursor()
kwd_frequency()
| 24.585526
| 85
| 0.54616
| 509
| 3,737
| 3.935167
| 0.259332
| 0.023964
| 0.047928
| 0.050924
| 0.546181
| 0.50674
| 0.480779
| 0.430354
| 0.402396
| 0.323015
| 0
| 0.030884
| 0.315494
| 3,737
| 151
| 86
| 24.748344
| 0.75215
| 0
| 0
| 0.377358
| 0
| 0
| 0.407388
| 0.015257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066038
| false
| 0
| 0.037736
| 0
| 0.141509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd1de8fe3c2b6efa630c25b86bb05e41fab354a
| 5,612
|
py
|
Python
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 127
|
2017-10-12T00:27:45.000Z
|
2020-08-07T11:13:55.000Z
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 247
|
2017-12-26T12:55:34.000Z
|
2020-08-08T11:57:35.000Z
|
peering_manager/constants.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 63
|
2017-10-13T06:46:05.000Z
|
2020-08-08T00:41:57.000Z
|
from collections import OrderedDict
from devices.filters import ConfigurationFilterSet
from devices.models import Configuration
from devices.tables import ConfigurationTable
from messaging.filters import ContactFilterSet, EmailFilterSet
from messaging.models import Contact, ContactAssignment, Email
from messaging.tables import ContactTable, EmailTable
from net.filters import ConnectionFilterSet
from net.models import Connection
from net.tables import ConnectionTable
from peering.filters import (
AutonomousSystemFilterSet,
BGPGroupFilterSet,
CommunityFilterSet,
DirectPeeringSessionFilterSet,
InternetExchangeFilterSet,
InternetExchangePeeringSessionFilterSet,
RouterFilterSet,
RoutingPolicyFilterSet,
)
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from peering.tables import (
AutonomousSystemTable,
BGPGroupTable,
CommunityTable,
DirectPeeringSessionTable,
InternetExchangePeeringSessionTable,
InternetExchangeTable,
RouterTable,
RoutingPolicyTable,
)
from utils.functions import count_related
__all__ = ("SEARCH_MAX_RESULTS", "SEARCH_TYPES")
SEARCH_MAX_RESULTS = 15
SEARCH_TYPES = OrderedDict(
(
# devices
(
"configuration",
{
"queryset": Configuration.objects.all(),
"filterset": ConfigurationFilterSet,
"table": ConfigurationTable,
"url": "devices:configuration_list",
},
),
# messaging
(
"contact",
{
"queryset": Contact.objects.prefetch_related("assignments").annotate(
assignment_count=count_related(ContactAssignment, "contact")
),
"filterset": ContactFilterSet,
"table": ContactTable,
"url": "messaging:contact_list",
},
),
(
"email",
{
"queryset": Email.objects.all(),
"filterset": EmailFilterSet,
"table": EmailTable,
"url": "messaging:email_list",
},
),
# net
(
"connection",
{
"queryset": Connection.objects.prefetch_related(
"internet_exchange_point", "router"
),
"filterset": ConnectionFilterSet,
"table": ConnectionTable,
"url": "net:connection_list",
},
),
# peering
(
"autonomousystem",
{
"queryset": AutonomousSystem.objects.defer("prefixes"),
"filterset": AutonomousSystemFilterSet,
"table": AutonomousSystemTable,
"url": "peering:autonomoussystem_list",
},
),
(
"bgpgroup",
{
"queryset": BGPGroup.objects.all(),
"filterset": BGPGroupFilterSet,
"table": BGPGroupTable,
"url": "peering:bgpgroup_list",
},
),
(
"community",
{
"queryset": Community.objects.all(),
"filterset": CommunityFilterSet,
"table": CommunityTable,
"url": "peering:community_list",
},
),
(
"directpeeringsession",
{
"queryset": DirectPeeringSession.objects.prefetch_related(
"autonomous_system", "bgp_group", "router"
),
"filterset": DirectPeeringSessionFilterSet,
"table": DirectPeeringSessionTable,
"url": "peering:directpeeringsession_list",
},
),
(
"internetexchange",
{
"queryset": InternetExchange.objects.prefetch_related(
"local_autonomous_system"
).annotate(
connection_count=count_related(
Connection, "internet_exchange_point"
)
),
"filterset": InternetExchangeFilterSet,
"table": InternetExchangeTable,
"url": "peering:internetexchange_list",
},
),
(
"internetexchangepeeringsession",
{
"queryset": InternetExchangePeeringSession.objects.prefetch_related(
"autonomous_system", "ixp_connection"
),
"filterset": InternetExchangePeeringSessionFilterSet,
"table": InternetExchangePeeringSessionTable,
"url": "peering:internetexchangepeeringsession_list",
},
),
(
"router",
{
"queryset": Router.objects.prefetch_related("platform").annotate(
connection_count=count_related(Connection, "router")
),
"filterset": RouterFilterSet,
"table": RouterTable,
"url": "peering:router_list",
},
),
(
"routingpolicy",
{
"queryset": RoutingPolicy.objects.all(),
"filterset": RoutingPolicyFilterSet,
"table": RoutingPolicyTable,
"url": "peering:routingpolicy_list",
},
),
),
)
| 31.351955
| 85
| 0.533678
| 316
| 5,612
| 9.335443
| 0.256329
| 0.027119
| 0.044746
| 0.021695
| 0.056271
| 0.030508
| 0
| 0
| 0
| 0
| 0
| 0.000572
| 0.376693
| 5,612
| 178
| 86
| 31.52809
| 0.842767
| 0.005168
| 0
| 0.111111
| 0
| 0
| 0.173718
| 0.062747
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081871
| 0
| 0.081871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd2638aee801c7efa156f6936b153c75c517e46
| 465
|
py
|
Python
|
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
e2e_graphsage/utils/logging.py
|
mingruimingrui/E2EGraphSage
|
90de7befd3a8ced514697c073b4c64e96b63bdb0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import logging
def setup_logging(log_path, mode='w'):
fmt = '%(asctime)s %(levelname)-4.4s %(filename)s:%(lineno)d: %(message)s'
logging.root.handlers = []
logging.basicConfig(
filename=log_path,
filemode=mode,
format=fmt,
datefmt='%m-%d %H:%M',
level=logging.INFO
)
logging.getLogger().addHandler(logging.StreamHandler())
return logging.getLogger(__name__)
| 24.473684
| 78
| 0.647312
| 55
| 465
| 5.254545
| 0.654545
| 0.048443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005435
| 0.208602
| 465
| 18
| 79
| 25.833333
| 0.779891
| 0
| 0
| 0
| 0
| 0.071429
| 0.167742
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd2949cb17d74dce66873599c286cade86072c8
| 3,486
|
py
|
Python
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 59
|
2018-02-22T19:14:19.000Z
|
2022-02-22T05:40:27.000Z
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 95
|
2018-02-03T11:55:30.000Z
|
2022-03-31T15:10:39.000Z
|
dmipy/distributions/tests/test_bingham.py
|
AthenaEPI/mipy
|
dbbca4066a6c162dcb05865df5ff666af0e4020a
|
[
"MIT"
] | 23
|
2018-02-13T07:21:01.000Z
|
2022-02-22T20:12:08.000Z
|
from numpy.testing import assert_almost_equal, assert_equal
from dmipy.utils import utils
import numpy as np
from dmipy.utils.utils import (
rotation_matrix_100_to_theta_phi, rotation_matrix_around_100,
rotation_matrix_100_to_theta_phi_psi
)
from dmipy.distributions import distributions
def test_rotation_100_to_theta_phi():
# test 1: does R100_to_theta_phi rotate a vector theta_phi?
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
R100_to_theta_pi = rotation_matrix_100_to_theta_phi(theta_, phi_)
xyz = np.dot(R100_to_theta_pi, np.r_[1, 0, 0])
_, theta_rec, phi_rec = utils.cart2sphere(xyz)
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
def test_axis_rotation_does_not_affect_axis():
# test 2: does R_around_100 not affect 100?
psi_ = np.random.rand() * np.pi
R_around_100 = rotation_matrix_around_100(psi_)
v100 = np.r_[1, 0, 0]
assert_equal(v100, np.dot(R_around_100, v100))
def test_psi_insensitivity_when_doing_psi_theta_phi_rotation():
# test 3: does psi still have no influence on main eigenvector when doing
# both rotations?
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
psi_ = np.random.rand() * np.pi
R_ = rotation_matrix_100_to_theta_phi_psi(theta_, phi_, psi_)
xyz = np.dot(R_, np.r_[1, 0, 0])
_, theta_rec, phi_rec = utils.cart2sphere(xyz)
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
def test_rotation_around_axis():
# test 4: does psi really rotate the second vector?
psi_ = np.pi # half circle
R_around_100 = rotation_matrix_around_100(psi_)
v2 = np.r_[0, 1, 0]
v2_expected = np.r_[0, -1, 0]
v2_rot = np.dot(R_around_100, v2)
assert_equal(np.round(v2_rot), v2_expected)
def test_rotation_on_bingham_tensor():
# test 5: does combined rotation rotate Bingham well?
kappa_ = np.random.rand()
beta_ = kappa_ / 2. # beta<kappa
Bdiag_ = np.diag(np.r_[kappa_, beta_, 0])
theta_ = np.random.rand() * np.pi
phi_ = (np.random.rand() - .5) * np.pi
psi_ = np.random.rand() * np.pi * 0
R_ = rotation_matrix_100_to_theta_phi_psi(theta_, phi_, psi_)
B_ = R_.dot(Bdiag_).dot(R_.T)
eigvals, eigvecs = np.linalg.eigh(B_)
main_evec = eigvecs[:, np.argmax(eigvals)]
_, theta_rec0, phi_rec0 = utils.cart2sphere(main_evec)
# checking if the angles are antipodal to each other
if abs(theta_ - theta_rec0) > 1e-5:
theta_rec = np.pi - theta_rec0
if phi_rec0 > 0:
phi_rec = phi_rec0 - np.pi
elif phi_rec0 < 0:
phi_rec = phi_rec0 + np.pi
else:
theta_rec = theta_rec0
phi_rec = phi_rec0
assert_almost_equal(theta_, theta_rec)
assert_almost_equal(phi_, phi_rec)
assert_almost_equal(np.diag(Bdiag_), np.sort(eigvals)[::-1])
def test_bingham_equal_to_watson(beta_fraction=0):
# test if bingham with beta=0 equals watson distribution
mu_ = np.random.rand(2)
n_cart = utils.sphere2cart(np.r_[1., mu_])
psi_ = np.random.rand() * np.pi
odi_ = np.max([0.1, np.random.rand()])
bingham = distributions.SD2Bingham(mu=mu_, psi=psi_,
odi=odi_,
beta_fraction=beta_fraction)
watson = distributions.SD1Watson(mu=mu_, odi=odi_)
Bn = bingham(n=n_cart)
Wn = watson(n=n_cart)
assert_almost_equal(Bn, Wn, 3)
| 35.938144
| 77
| 0.676133
| 548
| 3,486
| 3.925182
| 0.20073
| 0.026034
| 0.072524
| 0.04556
| 0.372385
| 0.355649
| 0.314272
| 0.291492
| 0.25802
| 0.234775
| 0
| 0.04494
| 0.214859
| 3,486
| 96
| 78
| 36.3125
| 0.740957
| 0.119908
| 0
| 0.287671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150685
| 1
| 0.082192
| false
| 0
| 0.068493
| 0
| 0.150685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd3066a814fddcf19dac7173c44fed139f2e632
| 669
|
py
|
Python
|
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/hofs/duck_dispenser.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
__author__ = '@britodfbr'
from head_first_design_patterns.hofs import duck
from head_first_design_patterns.hofs import fly_behaviors
from head_first_design_patterns.hofs import quack_behaviors
def run():
# Instatiate ducks
print("==== Model duck ====")
model = duck.DuckHOF()
model.perform_quack()
model.perform_fly()
model.display()
print("==== True duck ====")
model.perform_fly = fly_behaviors.fly_wings
model.perform_quack = quack_behaviors.quack
model.display()
print("==== Toy duck ====")
model.perform_fly = fly_behaviors.fly_rocket_powered
model.perform_quack = quack_behaviors.squeak
model.display()
| 27.875
| 59
| 0.715994
| 83
| 669
| 5.433735
| 0.337349
| 0.159645
| 0.086475
| 0.126386
| 0.534368
| 0.396896
| 0.396896
| 0
| 0
| 0
| 0
| 0
| 0.167414
| 669
| 24
| 60
| 27.875
| 0.809695
| 0.023916
| 0
| 0.166667
| 0
| 0
| 0.102761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd5abf591689acf3071f0da912c722b5ef681bb
| 1,279
|
py
|
Python
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 143
|
2022-01-01T10:56:58.000Z
|
2022-03-31T11:25:47.000Z
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 276
|
2021-12-30T15:57:15.000Z
|
2022-03-31T14:57:16.000Z
|
tests/test_zones_json.py
|
electricitymap/electricitymap-contrib
|
6572b12d1cef72c734b80273598e156ebe3c22ea
|
[
"MIT"
] | 44
|
2021-12-30T19:48:42.000Z
|
2022-03-29T22:46:16.000Z
|
import json
import unittest
from electricitymap.contrib.config import ZONES_CONFIG
ZONE_KEYS = ZONES_CONFIG.keys()
class ZonesJsonTestcase(unittest.TestCase):
def test_bounding_boxes(self):
for zone, values in ZONES_CONFIG.items():
bbox = values.get("bounding_box")
if bbox:
self.assertLess(bbox[0][0], bbox[1][0])
self.assertLess(bbox[0][1], bbox[1][1])
def test_sub_zones(self):
for zone, values in ZONES_CONFIG.items():
sub_zones = values.get("subZoneNames", [])
for sub_zone in sub_zones:
self.assertIn(sub_zone, ZONE_KEYS)
def test_zones_from_geometries_exist(self):
world_geometries = json.load(open("web/geo/world.geojson"))
world_geometries_zone_keys = set()
for ft in world_geometries["features"]:
world_geometries_zone_keys.add(ft["properties"]["zoneName"])
all_zone_keys = set(ZONES_CONFIG.keys())
non_existing_zone_keys = sorted(world_geometries_zone_keys - all_zone_keys)
assert (
len(non_existing_zone_keys) == 0
), f"{non_existing_zone_keys} are defined in world.geojson but not in zones.json"
if __name__ == "__main__":
unittest.main(buffer=True)
| 34.567568
| 89
| 0.656763
| 167
| 1,279
| 4.718563
| 0.365269
| 0.101523
| 0.072335
| 0.087563
| 0.088832
| 0.088832
| 0.088832
| 0.088832
| 0
| 0
| 0
| 0.009221
| 0.236904
| 1,279
| 36
| 90
| 35.527778
| 0.798156
| 0
| 0
| 0.071429
| 0
| 0
| 0.120407
| 0.035184
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.107143
| false
| 0
| 0.107143
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd609e71dc0ee42d0acf42ff022c5f15ae9992d
| 3,483
|
py
|
Python
|
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | 1
|
2019-05-25T12:12:39.000Z
|
2019-05-25T12:12:39.000Z
|
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
app/bda_core/entities/training/word2vec_trainer.py
|
bda-19fs/bda-chatbot
|
4fcbda813ff5d3854a4c2e12413775676bcba9e2
|
[
"MIT"
] | null | null | null |
import gensim
import numpy as np
class Config:
'''
This class represents the configuration for the Word2Vec model.
'''
def __init__(self, dimension=150, hierarchical_softmax=0, negative_sampling=0, ns_exponent=0,
sample=0, window_size=5, workers=3, use_skip_gram=1, min_count=2, epochs=10):
self.dimension = dimension
self.hierarchical_softmax = hierarchical_softmax
self.negative_sampling = negative_sampling
self.ns_exponent = ns_exponent
self.sample = sample
self.window_size = window_size
self.workers = workers
self.use_skip_gram = use_skip_gram
self.min_count = min_count
self.epochs = epochs
def fit_model(sentences, config):
'''
Fits the Word2Vec model with the given sentences. The vectors were normalized after the training.
A further training of the model is not possible.
:param sentences: A python list of sentences
:param config: The config for the model
:return: The trained Word2Vec model
'''
model = gensim.models.Word2Vec(size=config.dimension, hs=config.hierarchical_softmax, window=config.window_size,
workers=config.workers, sg=config.use_skip_gram, min_count=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=len(sentences), epochs=config.epochs)
model.init_sims(replace=True)
return model
def avg_word_vector(model, word_list):
'''
Calculates the average vector of a list of words. The average vector is the mean
of all word vectors. Only words of the Word2Vec vocabulary can be considered.
:param model: The trained Word2Vec model
:param word_list: A python list of words
:return: The average vector
'''
words = [word for word in word_list if word in model.wv.vocab]
return np.mean(model.wv.__getitem__(words), axis=0)
def transpose_vector(vec):
'''
Returns a new vector that is the transposition of the given vector.
:param vec: The vector to transpose
:return: The transposition vector
'''
return vec[np.newaxis]
def create_sentence_vectors(model, questions):
'''
Calculates the average vectors for all questions. The order of the sentences list
will remain in the returned list of vectors.
:param model: The trained Word2Vec model
:param questions: A python list of word lists
:return: A list of average vectors
'''
vectors = []
for i in range(len(questions)):
word_list = [word for word in questions[i] if word in model.wv.vocab]
avg_vector = None
if len(word_list) > 0:
avg_vector = avg_word_vector(model, word_list)
vectors.append(avg_vector)
vectors = np.array(vectors)
return vectors
def create_matrix_from_vectors(vectors):
'''
Creates a matrix that contains all vectors of the given vector list as row vectors.
:param vectors: A list of vectors with the same dimension
:return: The concatenation matrix of the given vectors
'''
vectors_len = len(vectors)
if vectors_len > 0:
matrix = transpose_vector(vectors[0])
for i in range(1, vectors_len):
vec = vectors[i]
if vec is not None:
transposed = transpose_vector(vectors[i])
matrix = np.concatenate((matrix, transposed), axis=0)
return matrix
else:
raise Exception('the given list of vectors is empty')
| 35.907216
| 116
| 0.681022
| 476
| 3,483
| 4.852941
| 0.264706
| 0.020779
| 0.019048
| 0.016883
| 0.072727
| 0.072727
| 0.0329
| 0
| 0
| 0
| 0
| 0.010266
| 0.244904
| 3,483
| 96
| 117
| 36.28125
| 0.868061
| 0.339937
| 0
| 0
| 0
| 0
| 0.015858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.040816
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd825fe40c8c6d189d67799fba8e31f6ba53c8a
| 642
|
py
|
Python
|
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
polls/migrations/0008_auto_20150918_1715.py
|
santeyio/phantastesproject
|
5ce1e2cb59e8283fe280e01d0e185be62cd4001a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('polls', '0007_vote'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='votes',
),
migrations.AddField(
model_name='book',
name='user',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 23.777778
| 76
| 0.605919
| 62
| 642
| 6.048387
| 0.612903
| 0.053333
| 0.085333
| 0.112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013072
| 0.285047
| 642
| 26
| 77
| 24.692308
| 0.803922
| 0.03271
| 0
| 0.2
| 0
| 0
| 0.050081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd8a7fa6829673461545374eeacd667661ea155
| 4,863
|
py
|
Python
|
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | null | null | null |
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | 1
|
2020-06-02T02:53:57.000Z
|
2020-06-02T02:53:57.000Z
|
DemoFinal.py
|
sohinim006/Heroku-App-demo
|
875b894b48e8544f6dbe629635f195ccd97ba201
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import pickle
# In[2]:
data=pd.read_csv("wd.csv",encoding="ISO-8859-1")
# In[3]:
data
# In[4]:
data.fillna(0,inplace=True) #it fills NaN with O's
# In[5]:
data
# In[6]:
data.dtypes
# In[7]:
#conversion
data['Temp']=pd.to_numeric(data['Temp'],errors='coerce')
data['D.O. (mg/l)']=pd.to_numeric(data['D.O. (mg/l)'],errors='coerce')
data['PH']=pd.to_numeric(data['PH'],errors='coerce')
data['B.O.D. (mg/l)']=pd.to_numeric(data['B.O.D. (mg/l)'],errors='coerce')
data['CONDUCTIVITY (µmhos/cm)']=pd.to_numeric(data['CONDUCTIVITY (µmhos/cm)'],errors='coerce')
data['NITRATENAN N+ NITRITENANN (mg/l)']=pd.to_numeric(data['NITRATENAN N+ NITRITENANN (mg/l)'],errors='coerce')
data['TOTAL COLIFORM (MPN/100ml)Mean']=pd.to_numeric(data['TOTAL COLIFORM (MPN/100ml)Mean'],errors='coerce')
data.dtypes
# In[8]:
#initialization
start=2
end=1779
station=data.iloc [start:end ,0]
location=data.iloc [start:end ,1]
state=data.iloc [start:end ,2]
do= data.iloc [start:end ,4].astype(np.float64)
value=0
ph = data.iloc[ start:end,5]
co = data.iloc [start:end ,6].astype(np.float64)
year=data.iloc[start:end,11]
tc=data.iloc [2:end ,10].astype(np.float64)
bod = data.iloc [start:end ,7].astype(np.float64)
na= data.iloc [start:end ,8].astype(np.float64)
na.dtype
# In[9]:
data=pd.concat([station,location,state,do,ph,co,bod,na,tc,year],axis=1)
data. columns = ['station','location','state','do','ph','co','bod','na','tc','year']
# In[10]:
data
# In[11]:
#calulation of Ph
data['npH']=data.ph.apply(lambda x: (100 if (8.5>=x>=7)
else(80 if (8.6>=x>=8.5) or (6.9>=x>=6.8)
else(60 if (8.8>=x>=8.6) or (6.8>=x>=6.7)
else(40 if (9>=x>=8.8) or (6.7>=x>=6.5)
else 0)))))
# In[12]:
#calculation of dissolved oxygen
data['ndo']=data.do.apply(lambda x:(100 if (x>=6)
else(80 if (6>=x>=5.1)
else(60 if (5>=x>=4.1)
else(40 if (4>=x>=3)
else 0)))))
# In[13]:
#calculation of total coliform
data['nco']=data.tc.apply(lambda x:(100 if (5>=x>=0)
else(80 if (50>=x>=5)
else(60 if (500>=x>=50)
else(40 if (10000>=x>=500)
else 0)))))
#calculation of electrical conductivity
data['nec']=data.co.apply(lambda x:(100 if (75>=x>=0)
else(80 if (150>=x>=75)
else(60 if (225>=x>=150)
else(40 if (300>=x>=225)
else 0)))))
# In[14]:
#calc of B.D.O
data['nbdo']=data.bod.apply(lambda x:(100 if (3>=x>=0)
else(80 if (6>=x>=3)
else(60 if (80>=x>=6)
else(40 if (125>=x>=80)
else 0)))))
# In[15]:
data
# In[16]:
#Calulation of nitrate
data['nna']=data.na.apply(lambda x:(100 if (20>=x>=0)
else(80 if (50>=x>=20)
else(60 if (100>=x>=50)
else(40 if (200>=x>=100)
else 0)))))
data.head()
data.dtypes
# In[17]:
data
# In[18]:
from sklearn.model_selection import train_test_split
# In[19]:
data=data.drop(['station','location'],axis=1)
# In[20]:
data
# In[21]:
data=data.drop(['do','ph','co','bod','na','tc'],axis=1)
# In[22]:
data
# In[24]:
yt=data['nco']
# In[25]:
yt
# In[26]:
data=data.drop(['nco'],axis=1)
# In[27]:
data
# In[28]:
x_t,x_tt,y_t,y_tt=train_test_split(data,yt,test_size=0.2,random_state=4)
# In[29]:
#reg2.fit(x_t,y_t)
# In[30]:
#a2=reg2.predict(x_tt)
#a2
#randomforest
# In[39]:
from sklearn.ensemble import RandomForestRegressor
# In[40]:
rfr=RandomForestRegressor(n_estimators=1000,random_state=42)
# In[41]:
rfr.fit(x_t,y_t)
pickle.dump(rfr,open('model.pkl','wb'))
# In[42]:
model = pickle.load(open('model.pkl','rb'))
yrfr=rfr.predict(x_tt)
# In[43]:
from sklearn.metrics import mean_squared_error
print('mse:%.2f'%mean_squared_error(y_tt,yrfr))
# In[44]:
y_tt
# In[45]:
yrfr
# In[47]:
dtrfr = pd.DataFrame({'Actual': y_tt, 'Predicted': yrfr})
dtrfr.head(20)
# In[48]:
from sklearn.metrics import r2_score
# In[49]:
print(r2_score(y_tt,yrfr))
# In[ ]:
| 15.438095
| 112
| 0.499897
| 731
| 4,863
| 3.274966
| 0.259918
| 0.033417
| 0.048872
| 0.06015
| 0.214703
| 0.120718
| 0.041771
| 0.030911
| 0.030911
| 0.030911
| 0
| 0.0882
| 0.309891
| 4,863
| 314
| 113
| 15.487261
| 0.625149
| 0.121119
| 0
| 0.186813
| 0
| 0
| 0.100356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.021978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cd8ed3786032ec99ff11bc34e84132d3b428b08
| 1,926
|
py
|
Python
|
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
Classes/gaussian.py
|
sankarebarri/Python
|
0c39da1df74d74b7b0a3724e57b5205a7d88537f
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
class Gaussian:
def __init__(self, mu=0, sigma=1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
self.mean = np.mean(self.data)
return self.mean
def calculate_stdev(self, sample=True):
x_mean = self.calculate_mean()
mean_item_squared = []
for i in range(len(self.data)):
mean_item = (self.data[i] - x_mean)**2
mean_item_squared.append(mean_item)
self.stdev = math.sqrt(np.sum(mean_item_squared) / len(self.data))
sample_length = len(self.data)
if sample:
self.stdev = math.sqrt(np.sum(mean_item_squared) / (sample_length-1))
return self.stdev
return self.stdev
def read_data_file(self, file_name, sample=True):
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(line)
line = file.readline()
file.close()
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample=True)
def __add__(self, other):
results = Gaussian()
results.mean = self.mean + other.mean
results.stdev = math.sqrt(self.stdev**2 + other.stdev**2)
return results
def __repr__(self):
return f'mean is {self.mean}, stdev is {self.stdev}'
data = [9, 2, 5, 4, 12, 7]
gaussian = Gaussian()
gaussian.data = data
print(gaussian.calculate_mean())
print(gaussian.calculate_stdev(sample=True))
gaussian_one = Gaussian(5, 2)
gaussian_two = Gaussian(7, 3)
gaussian_sum = gaussian_one + gaussian_two
print(gaussian_sum)
print(gaussian_sum.stdev)
print(gaussian_sum.mean)
| 27.126761
| 81
| 0.574247
| 241
| 1,926
| 4.390041
| 0.248963
| 0.068053
| 0.056711
| 0.039698
| 0.069943
| 0.069943
| 0.069943
| 0.069943
| 0.069943
| 0
| 0
| 0.012967
| 0.319315
| 1,926
| 71
| 82
| 27.126761
| 0.79405
| 0
| 0
| 0.078431
| 0
| 0
| 0.021796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.039216
| 0.019608
| 0.27451
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cda167a85c43c6395a461abd5b9210a39f3e5bb
| 987
|
py
|
Python
|
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | 1
|
2019-07-22T08:02:11.000Z
|
2019-07-22T08:02:11.000Z
|
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
datagovau/ckanext-datagovau
|
902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9
|
[
"Apache-2.0"
] | 6
|
2015-01-23T16:32:18.000Z
|
2021-06-27T03:42:18.000Z
|
from setuptools import find_packages, setup
version = "1.0.0a1"
# Keep in case we still need pylons...Just use the line below in place
# of the install_requires argument in the call to setup().
# install_requires=['requests', 'feedparser', 'pylons', 'python-dateutil'],
setup(
name="ckanext-datagovau",
version=version,
description="Extension for customising CKAN for data.gov.au",
long_description="",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Greg von Nessi",
author_email="greg.vonnessi@linkdigital.com.au",
url="",
license="",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
namespace_packages=["ckanext", "ckanext.datagovau"],
include_package_data=True,
zip_safe=False,
install_requires=[
"typing_extensions",
],
entry_points="""
[ckan.plugins]
datagovau = ckanext.datagovau.plugin:DataGovAuPlugin
""",
)
| 32.9
| 94
| 0.690983
| 117
| 987
| 5.700855
| 0.700855
| 0.067466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00609
| 0.168186
| 987
| 29
| 95
| 34.034483
| 0.806334
| 0.274569
| 0
| 0
| 0
| 0
| 0.362869
| 0.101266
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|