blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76368efaedccd95a1deec68041b92826b988d67b | 5205aaa9e0b54fc285c73f47ca266676c800a86d | /amr2seq/data_prep/filter_stop_words.py | 3fd88b49c49871ebf9702d7f97d68d403d92cacf | [] | no_license | GonewithGt/GAMRParsing | 4d83275651d6c23f6796dcdf32d33d99d057cf49 | 8b81789d23e64feada409f439235d615d4e048bf | refs/heads/master | 2020-03-14T21:10:58.401701 | 2018-05-07T16:02:42 | 2018-05-07T16:02:42 | 131,790,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | #!/usr/bin/python
import sys
def filter_vars(line):
fields = line.split('|||')
parts = fields[2].split('/')
for i in range(len(parts)):
try:
while parts[i][-1] in '0123456789':
parts[i] = parts[i][:-1]
except:
print (line)
print (parts)
fields[2] = '/'.join(parts)
return '|||'.join(fields)
def main(argv):
file = argv[1]
stop_words_file = argv[2]
result_file = argv[3]
wf = open(result_file, 'w')
stop_words = set()
for line in open(stop_words_file, 'r'):
line = line.strip()
if line != '':
stop_words.add(line)
line = line[0].upper() + line[1:]
stop_words.add(line)
line = line.upper()
stop_words.add(line)
stop_set = set()
for s in stop_words:
stop_set.add('/%s ' % s)
stop_set.add('/%s)' % s)
stop_set.add('/%s )' % s)
for line in open(file, 'r'):
if line.strip() == '':
wf.write(line)
continue
try:
lex_part = line.strip().split('|||')[1].strip()
except:
print (line)
graph_part = line.strip().split('|||')[2].strip()
if lex_part not in stop_words:
no_stop = True
#for w in stop_set:
# if w in graph_part:
# no_stop = False
# break
if no_stop:
wf.write(filter_vars(line))
wf.close()
if __name__ == '__main__':
main(sys.argv)
| [
"1724532024@qq.com"
] | 1724532024@qq.com |
43059fbcd356ec0f13831956367ede3368efeeff | 3eba176a5f88124e290d357ac94a9a86f90e3c0e | /euler/37/37.py | 2111226d01654623c81bfa0c92c515ad2d340189 | [] | no_license | marcus255/coding | 32cc028ed89fbdf3368eed3afca81536ec5b4d1f | 109aec1e77891cec94d8635e3399440ab4b74339 | refs/heads/master | 2021-04-26T23:24:31.414731 | 2018-03-05T23:28:51 | 2018-03-05T23:28:51 | 123,988,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | def isPrime2(Number):
return 2 in [Number,2**Number%Number]
def isPrime(n):
for i in range(2,int(n**0.5)+1):
if n%i==0:
return False
return True
num = 11
primes_sum = 0
primes_count = 0;
while(primes_count < 11):
if isPrime(num):
temp_num = num
temp_num_2 = num
while temp_num > 9 and temp_num_2 > 9:
temp_num = temp_num // 10
temp_num_2 = temp_num_2 - int(str(temp_num_2)[0] + '0' * (len(str(temp_num_2)) - 1))
if not (isPrime(temp_num) and isPrime(temp_num_2)):
break
if temp_num in [2, 3, 5, 7] and temp_num_2 in [2, 3, 5, 7]:
primes_count += 1
primes_sum += num
print('{}: {}'.format(primes_count, num))
num += 1
print('The sum of these primes is ' + str(primes_sum))
| [
"marek255trojan@gmail.com"
] | marek255trojan@gmail.com |
fa378984371587ff99f14a0eaca8909a68fd96c1 | 1809e9722b460e265f2fe8b510b7bfe3fa1571f7 | /webradio/base.py | 220240fca3c215b0824f195e80a0320e1d82059e | [] | no_license | 3ngthrust/webradio | ae4c891a875d599c80db4cb1a07710e4545432db | 14c80207d616137574000688f0618ac6c1b717ba | refs/heads/master | 2021-06-14T15:18:18.709890 | 2017-01-15T18:45:59 | 2017-01-15T18:45:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | import abc
from contextlib import contextmanager
@contextmanager
def ignore(exception):
try:
yield
except exception:
pass
class base_client(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __init__(self, server, *, muted=False):
""" constructor of the music client
Parameters
----------
server : str or object with socket attribute
the server to connect to
Other Parameters
----------------
muted : bool, default False
the initial muted state of the server
"""
@abc.abstractmethod
def disconnect(self):
""" destructor of the music client
Disconnect from the server and clean up.
"""
@abc.abstractmethod
def __enter__(self):
""" context manager entry function """
@abc.abstractmethod
def __exit__(self, *args):
""" context manager exit function """
@abc.abstractproperty
def volume(self):
""" the volume of the server
Returns
-------
volume : int
the current volume in percent. If the server was muted,
this is the volume it had before being muted.
"""
@volume.setter
@abc.abstractmethod
def volume(self, new_volume):
""" change the volume of the server
Assigning to it will change the volume of the server. The value is
assumed to be the volume in percent and will always be casted to
integer. Just as when reading the volume, writing it will only change
the volume if not muted, otherwise it will be cached until unmuted.
"""
@abc.abstractproperty
def urls(self):
""" urls of the radio stations
Returns
-------
urls : list of str
the list of the assigned radio stations
"""
@urls.setter
@abc.abstractmethod
def urls(self, urls):
""" change the radio stations
Assigning will replace the current list of radio stations with the
given ones.
Parameters
----------
urls : list of str
a list of valid radio urls
"""
@abc.abstractmethod
def play(self, index):
""" select the radio station to play
Parameters
----------
index : int
the index of the radio station
Raises
------
RuntimeError
if index is greater or equal to the number of available urls
"""
@abc.abstractproperty
def station(self):
""" the currently active station
Returns
-------
station : int
the current station index
Raises
------
IndexError
if no station is active
"""
@station.setter
@abc.abstractmethod
def station(self, index):
""" select the radio station to play
this is a direct wrapper of `play()`
Raises
------
RuntimeError
if index is greater or equal to the number of available urls
"""
@abc.abstractproperty
def muted(self):
""" the current mute state
Returns
-------
muted : bool
whether or not the server is currently muted
"""
@muted.setter
@abc.abstractmethod
def muted(self, value):
""" change the current mute state
Parameters
----------
muted : bool
Whether or not the server should be muted. If it already is muted
and is requested to get muted, it does nothing.
"""
@abc.abstractmethod
def mute(self):
""" mute the server
This is just a convenience function that sets `muted` to `True`
"""
@abc.abstractmethod
def unmute(self):
""" unmute the server
This is just a convenience function that sets `muted` to `False`
"""
@abc.abstractmethod
def toggle_mute(self):
""" toggle the current mute state
This is just a convenience function that inverts the muted state
"""
| [
"keewis@posteo.de"
] | keewis@posteo.de |
132b517b5d247685cafd0157d9a809df8e9c0920 | 352239db2379353be34c1b5f1a6eda504393ea16 | /app/api/v1/__init__.py | 9760c8304b4415627d91a1997b08890a1536864d | [] | no_license | ns3098/FastAPI_Template | 346f5aea367bb8909b146ec7f467b8d99dcf889b | ff245126af65b68e4ec6050864a9d3ae95a2601a | refs/heads/master | 2023-05-13T07:56:26.907460 | 2021-06-01T14:27:55 | 2021-06-01T14:27:55 | 372,858,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from fastapi import APIRouter
from app.api.v1.home import router as home_router
router = APIRouter(prefix="/v1")
router.include_router(home_router)
| [
"nitin.singh@stylumia.com"
] | nitin.singh@stylumia.com |
bb45d7d331ef9e8791fc9163cb3489a86d1e83da | e970a7a17850473a3edaa9c4fbc208c1cef95462 | /Assignment2/Code/task23_full_architecture_pretraining.py | 60769b7f52320c230197efd5df3ccb018a662d7f | [] | no_license | reetamdutta1/CS7015-Deep-Learning-Assignments | 4d39289ad647e96ff96429049442d58f349e21a2 | 7a720ec0dd871691e0360ab27229f11ee4b791cf | refs/heads/master | 2022-04-10T14:13:49.404734 | 2020-02-22T16:15:47 | 2020-02-22T16:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,258 | py | import os
import torch.utils.data as data_utils
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
### HYPERPARAMETERS ###
dataset_no = 1 # 1 for color, 2 for BnW
if dataset_no==1:
convergence = 1e-7
batch_size = 4
learning_rate = 1e-3
nodes_l1 = 200 # 64,128,256,512 ,best 512
nodes_l2 = 100 # 64,128,256,384 ,best 384
nodes_l3 = 30 # 32,64,128,256, best 256
if dataset_no==2:
convergence = 1e-7
batch_size = 128
learning_rate = 1e-2
nodes_l1 = 512 # 64,128,256,512 ,best 512
nodes_l2 = 384 # 64,128,256,384 ,best 384
nodes_l3 = 256 # 32,64,128,256, best 256
### data loading ###
torch.manual_seed(1)
np.random.seed(0)
if dataset_no ==1:
data1=np.zeros(shape=(1596,829))
ji=0
labels = ['coast','forest','highway','street','tallbuilding']
for idx, label in enumerate(labels):
path = 'Features/'+label
for file in os.listdir(path):
current = os.path.join(path, file)
in1=open(current)
l1 = in1.read().strip().split("\n")
l2=[]
for i in l1:
l2=l2+i.split(" ")
l2.append(idx)
l2=np.array(l2)
l2=np.float_(l2)
data1[ji]=l2
ji=ji+1
if dataset_no ==2:
datafile = 'BnW/7/data.csv'
data1 = pd.read_csv(datafile, encoding = "UTF-8")
data1=data1.values
np.random.shuffle(data1)
train_test_split = int(0.8*data1.shape[0]) # 80% traindata
data_train = data1[:train_test_split]
data_test = data1[train_test_split:]
unlabeled_labeled_split = int(0.7*data1.shape[0]) # 70% of traindata as unlabeled data
data_labeled = data1[unlabeled_labeled_split:]
data_train = data_labeled
targets=torch.Tensor(np.ravel(data_train[:,-1]))
features=torch.Tensor(data_train[:,:-1])
targets_t=torch.Tensor(np.ravel(data_test[:,-1]))
features_t=torch.Tensor(data_test[:,:-1])
dataset = data_utils.TensorDataset(features, targets)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
input_dim = data1[:,:-1].shape[1]
### Model definition ###
class autoencoder1(nn.Module): # autoencoder 1 model
def __init__(self):
super(autoencoder1, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, nodes_l1),
nn.ReLU(True))
self.decoder1 = nn.Sequential(
nn.Linear(nodes_l1, input_dim),
nn.Sigmoid())
def bottle(self,x): # forward pass through encoder only
x=self.encoder1(x)
return x
def forward(self, x): # forward pass through encoder and decoder
x = self.encoder1(x)
x = self.decoder1(x)
return x
class autoencoder2(nn.Module):
def __init__(self):
super(autoencoder2, self).__init__()
self.encoder2 = nn.Sequential(
nn.Linear(nodes_l1,nodes_l2),
nn.ReLU(True))
self.decoder2 = nn.Sequential(
nn.Linear(nodes_l2,nodes_l1),
nn.ReLU(True))
def bottle(self,x):
x=self.encoder2(x)
return x
def forward(self, x):
x = self.encoder2(x)
x = self.decoder2(x)
return x
class autoencoder3(nn.Module):
def __init__(self):
super(autoencoder3, self).__init__()
self.encoder3 = nn.Sequential(
nn.Linear(nodes_l2,nodes_l3),
nn.ReLU(True))
self.decoder3 = nn.Sequential(
nn.Linear(nodes_l3,nodes_l2),
nn.ReLU(True))
def bottle(self,x):
x=self.encoder3(x)
return x
def forward(self, x):
x = self.encoder3(x)
x = self.decoder3(x)
return x
model1 = autoencoder1()
model2 = autoencoder2()
model3 = autoencoder3()
model1.load_state_dict(torch.load('./autoencoder1.pth'))
model2.load_state_dict(torch.load('./autoencoder2.pth'))
model3.load_state_dict(torch.load('./autoencoder3.pth'))
class SAE(nn.Module):
def __init__(self):
super(SAE, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, nodes_l1),
nn.ReLU(True))
self.encoder2 = nn.Sequential(
nn.Linear(nodes_l1,nodes_l2),
nn.ReLU(True))
self.encoder3 = nn.Sequential(
nn.Linear(nodes_l2,nodes_l3),
nn.ReLU(True))
self.final_layer = nn.Sequential(
nn.Linear(nodes_l3, 5),
nn.Softmax(1))
def forward(self, x):
x = self.encoder1(x)
x = self.encoder2(x)
x = self.encoder3(x)
x = self.final_layer(x)
return x
sae_model = SAE()
# classif==sae_model
sae_model.encoder1.load_state_dict(model1.encoder1.state_dict(), strict=True)
sae_model.encoder2.load_state_dict(model2.encoder2.state_dict(), strict=True)
sae_model.encoder3.load_state_dict(model3.encoder3.state_dict(), strict=True)
### Classification ###
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
sae_model.parameters(), lr=learning_rate,momentum=0.9, weight_decay=1e-5)
best_model_cls=sae_model
prev_loss=10
train_cost_history = []
epoch=0
while(True):
epoch+=1
for data in dataloader:
img, clas = data
img = img.view(img.size(0), -1)
img = Variable(img)
clas=clas.type(torch.LongTensor)
# ===================forward=====================
#print (img.shape)
output = sae_model(img)
loss = criterion(output, clas)
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
delta_loss = prev_loss - loss.data
if dataset_no==1:
if epoch==200: # convergence criteria gives bad results for dataset 1
break
else:
if delta_loss < convergence: # convergence gives good results for dataset 2
break
best_model_cls=sae_model
prev_loss=loss.data
train_cost_history.append(loss.data)
# ===================log========================
## train acc ##
feat=Variable(features)
output=sae_model(feat)
targ=targets.type(torch.LongTensor)
tot=0
corr=0
#print (targ[0].item())
for i in range(targ.shape[0]):
in1=0
ma=0
for j in range(output.shape[1]):
if (output[i][j].item() > ma):
ma=output[i][j].item()
in1=j
if (targ[i].item() == in1):
corr=corr+1
tot=tot+1
train_acc=corr/tot
## test acc ##
feat=Variable(features_t)
output=sae_model(feat)
targ=targets_t.type(torch.LongTensor)
tot=0
corr=0
for i in range(targ.shape[0]):
in1=0
ma=0
for j in range(output.shape[1]):
if (output[i][j].item() > ma):
ma=output[i][j].item()
in1=j
if (targ[i].item() == in1):
corr=corr+1
# print (in1,targ[i].item())
tot=tot+1
test_acc=corr/tot
# print('epoch {}, loss:{:.4f}, train_accuracy:{:.4f}, test_accuracy:{:.4f}'
# .format(epoch, loss.data, train_acc, test_acc))
print('{}, {:.4f}, {:.4f}, {:.4f}'
.format(epoch, loss.data, train_acc, test_acc))
torch.save(best_model_cls.state_dict(), './sae_model_final_pretrained.pth')
sae_model.load_state_dict(torch.load('./sae_model_final_pretrained.pth'))
feat=Variable(features)
output=sae_model(feat)
pred = torch.argmax(output, 1)
targ=targets.type(torch.LongTensor)
cm = confusion_matrix(pred.view(-1), targ.view(-1))
labels = ['coast','forest','highway','street','tallbuilding']
print("_ ,",end="")
for i in range(cm.shape[0]):
print(labels[i],',',end="")
print("")
for i in range(cm.shape[0]):
print(labels[i],',',end="")
for j in range(cm.shape[1]):
print('{:.4f} , '.format(cm[i][j]),end = "")
print("")
| [
"noreply@github.com"
] | noreply@github.com |
8dd9f514f148cbbca22dfa823d1e213be9fb79c5 | 9d85bf08c43d627ab542786a5044c9995b5da83a | /rango/forms.py | 5b2397a9d77c75a6a939a4ba39c6b742a247bcef | [] | no_license | AmjadGlasgow/Movie | 9f35cf1532dccadee5b278ce2a468b8812268bb4 | cae2aa44ee9d3f3de0ba50462dda41caa1c5891d | refs/heads/master | 2023-06-29T14:55:08.868553 | 2021-08-03T23:52:38 | 2021-08-03T23:52:38 | 392,487,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | from django import forms
from rango.models import Page, Category
from django.contrib.auth.models import User
from rango.models import UserProfile
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=Category.NAME_MAX_LENGTH,
help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
# An inline class to provide additional information on the form.
class Meta:
# Provide an association between the ModelForm and a model
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
title = forms.CharField(max_length = Page.TITLE_MAX_LENGTH,
help_text="Please enter the title of the page.")
url = forms.URLField(max_length = Page.URL_MAX_LENGTH,
help_text="Please enter the URL of the page.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Page
# What fields do we want to include in our form?
# This way we don't need every field in the model present.
# Some fields may allow NULL values; we may not want to include them.
# Here, we are hiding the foreign key.
# we can either exclude the category field from the form,
exclude = ('category',)
# or specify the fields to include (don't include the category field).
#fields = ('title', 'url', 'views')
# def clean(self):
# cleaned_data = self.cleaned_data
# url = cleaned_data.get('url')
# If url is not empty and doesn't start with 'http://',
# then prepend 'http://'.
# if url and not url.startswith('http://'):
# url = f'http://{url}'
# cleaned_data['url'] = url
# if url and not url.startswith('https://'):
# url = f'https://{url}'
# cleaned_data['url'] = url
# return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password',)
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture',) | [
"2603127s@student.gla.ac.uk"
] | 2603127s@student.gla.ac.uk |
a867d8474cf647f0d513ef130e9b8eed8e0b9c3a | d566cea0b4357a33735d7ba4cdaed8204c6aef91 | /encryption/crypt.py | da3e4fbd32024a2262f1db1029661697c4fccfca | [] | no_license | GentelmanBastard/PythonCourse | c0fc4fe8cf491b4344e811113762f933df8e7eb7 | 4e5e7d999b5b0fe91784e8b93e8dd1bfc00b8386 | refs/heads/master | 2021-01-25T09:53:01.113469 | 2020-02-11T15:09:05 | 2020-02-11T15:09:05 | 123,327,526 | 0 | 1 | null | 2018-08-08T09:04:28 | 2018-02-28T18:49:17 | Python | UTF-8 | Python | false | false | 32 | py | from Crypto.Cipher import DES3
| [
"mkucman@hotmail.com"
] | mkucman@hotmail.com |
6ef2add7e26831c01edce50c144d966d2a50a68f | e386c3b58815fc01d2ec3ba938e6fe6250638f53 | /ch05/modulo.py | de6fed5804f3920da5af8ac7337d34da2446f78a | [
"MIT"
] | permissive | ricscar2570/fondamentibook | 824014abd97d75e9cc2a38ff00e9299557f3da3f | 14317326f193d422d0fe3bb4a240177eb9fde0d7 | refs/heads/master | 2021-05-04T06:00:48.020313 | 2016-09-14T07:41:05 | 2016-09-14T07:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | #! /usr/bin/env python3 -B
#
# Codice eseguibile per i capitoli del libro
# "Fondamenti di Programmazione in Python" di
# Fabio Pellacini
#
#
# Released under the MIT license
#
# Copyright (c) 2016 Fabio Pellacini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
def stampa_vuoto(x):
if x:
print('valore non nullo')
else:
print('valore nullo')
if __name__ == '__main__':
stampa_vuoto(0)
stampa_vuoto(1)
stampa_vuoto('')
stampa_vuoto('ciao')
| [
"fabio.pellacini@gmail.com"
] | fabio.pellacini@gmail.com |
ac126a334e5c16ab0f0e7c96bd9e37e9401d058a | d0081f81996635e913b1f267a4586eb0bfd3dcd5 | /dataactcore/migrations/versions/001758a1ab82_remove_legal_entity_address_line3_from_.py | a17f33249deb510d2d5a9c4c694595932bedba00 | [
"CC0-1.0"
] | permissive | fedspendingtransparency/data-act-broker-backend | 71c10a6c7c284c8fa6556ccc0efce798870b059b | b12c73976fd7eb5728eda90e56e053759c733c35 | refs/heads/master | 2023-09-01T07:41:35.449877 | 2023-08-29T20:14:45 | 2023-08-29T20:14:45 | 57,313,310 | 55 | 36 | CC0-1.0 | 2023-09-13T16:40:58 | 2016-04-28T15:39:36 | Python | UTF-8 | Python | false | false | 994 | py | """Remove legal_entity_address_line3 from DetachedAwardFinancialAssistance
Revision ID: 001758a1ab82
Revises: 60830f0881a5
Create Date: 2018-03-09 10:50:38.640532
"""
# revision identifiers, used by Alembic.
revision = '001758a1ab82'
down_revision = '60830f0881a5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_financial_assistance', 'legal_entity_address_line3')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_financial_assistance', sa.Column('legal_entity_address_line3', sa.TEXT(), autoincrement=False, nullable=True))
### end Alembic commands ###
| [
"Burdeyny_Alisa@bah.com"
] | Burdeyny_Alisa@bah.com |
0a948167ad5e63969b44e9ec0fcfe9edeee40ffd | 94488d4f1fbb05b33217cebbd2a6bdaf986d39a6 | /core_backend/views.py | 42574c671887f58fb4f4aa5bc5e7decd9c362ca5 | [
"MIT"
] | permissive | PoieoDev/User-Accounts-Template | fc3be67d35f8969ebf6a04916f5948f76c8a2e38 | 1e6586d9a79d3df61d6ae7589415f23e8aeeea07 | refs/heads/master | 2023-08-17T05:31:34.003340 | 2021-09-20T21:43:22 | 2021-09-20T21:43:22 | 298,093,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | from rest_framework_jwt.settings import api_settings
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from .serializers import UserSerializer
class UserAccountView(APIView):
# Registration Class. Data is sent to this function which
# is then validated and saved in a user object via a serializer
def post(self, request, format=None):
serializer = UserSerializer(data=request.data)
if serializer.is_valid(raise_exception=False):
serializer.save()
return Response({"user":serializer.data}, status=201)
return Response({"user": serializer.errors}, status=400)
# This function will be added soon. Use this function to update
# a user object via serializer:
# UserSerializer(USER_OBJECT, data=request.data)
def put(self, request, pk, format=None):
return Response({}, status=404)
# This class is currently used for Logging in via Username/password
# and via Token
class UserLoginView(APIView):
# This class verifies username/password and returns token/user data
def post(self, request, format=None):
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER
user_obj = User.objects.filter(email=request.data['username']).first() or User.objects.filter(username=request.data['username']).first()
if user_obj is not None:
credentials = {
'username':user_obj.username,
'password': request.data['password']
}
user = authenticate(**credentials)
if user and user.is_active:
payload = jwt_payload_handler(user)
return Response({
'token': jwt_encode_handler(payload),
'first_name': user.first_name,
}, status=200)
return Response({"msg": 'Unable to log in with provided credentials.'}, status=400)
# This function uses the same serializer as above but instead of
# creating a new user object, it verifies the user via JWT token
def get(self, request, format=None):
user_data = UserSerializer(request.user)
return Response({
'user': user_data.data,
}, status=200)
| [
"timvolner@gmail.com"
] | timvolner@gmail.com |
5d6ded4faf7566b8fb858f56738f9b733236abda | a3776dfa7a4bfd76ff7cb63ddb3f6d70483b89d2 | /python/Sort/BubbleSort.py | fe4c0e4f183df93e94e89a9a26fea609cdd7d9a2 | [] | no_license | x-jeff/Algorithm_Code | 9e3038d9504391e2bd52ddde1230f69953339ab8 | b0411bcc7a7ab674ceca73aeb1348d3241370817 | refs/heads/master | 2023-07-11T19:55:52.401814 | 2021-08-14T03:46:12 | 2021-08-14T03:46:12 | 293,771,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def bubbleSort(arr):
for i in range(1, len(arr)):
for j in range(0, len(arr)-i):
if arr[j] > arr[j+1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr
if __name__ == '__main__':
testlist = [17, 23, 20, 14, 12, 25, 1, 20, 81, 14, 11, 12]
print(bubbleSort(testlist)) | [
"jeff.xinsc@gmail.com"
] | jeff.xinsc@gmail.com |
99a2301cf9bd95c46e5d74f8ae21f723422bbdf3 | bfc88535fa1495c64672f048a5559e8bb6de1ae1 | /TOKI/Dasar/05/f.py | b303902ab8a8f25f189362c66329d929ab17aafb | [] | no_license | famus2310/CP | 59839ffe23cf74019e2f655f49af224390846776 | d8a77572830fb3927de92f1e913ee729d04865e1 | refs/heads/master | 2021-07-05T00:23:31.113026 | 2020-08-07T22:28:24 | 2020-08-07T22:28:24 | 144,426,214 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import math
N = float(input())
print("%d %d" % (math.floor(N), math.ceil(N))) | [
"fadhilmusaad@gmail.com"
] | fadhilmusaad@gmail.com |
9ff96c8e78bb0eba5f838cde0c006bda3e6972e3 | 0580db19e1dae670160791091da72316911a0cba | /DhavalTest/PatientRecord.py | 29138be2e169b5c24c48f80b86d57d397dfc91b4 | [] | no_license | swapnil0/Python | 06968fef1e1c7ead9074be4f20ff67d4bc17d7d3 | 3938c8dcd21f0ed9c280ac17dcf680049cc520b6 | refs/heads/master | 2022-11-15T14:41:58.538411 | 2020-07-11T06:14:54 | 2020-07-11T06:14:54 | 278,431,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | class PatientRecord:
def __init__(self, age, name, pid):
self.name=name
self.age=age
self.pid=str(pid)+str(age)
def __str__():
return self.name+","+self.age+","+self.pid
def __gt__(self,obj):
return self.age > obj.age
| [
"swapnil.ingale05@gmail.com"
] | swapnil.ingale05@gmail.com |
aa1565f1cb49c38aa7e508edbd54b899398a57ae | 87e0aee0b7561e3585f2bf7170c7fef51c886087 | /modules/money.py | 85adcaccc8cadc895930d0e4d2ead865843a1cee | [] | no_license | rob-kistner/udemy-python-masterclass | 8757e6a58b24768f098951679ad927d86a0dcf27 | 1555f393f6b972976e7f6619f3397aca32a0ffff | refs/heads/master | 2021-05-09T04:20:28.412594 | 2020-04-25T22:21:57 | 2020-04-25T22:21:57 | 119,271,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | def money(amt, spaced=False):
bucks = '$ ' if spaced else '$'
return '{}{:,.2f}'.format(bucks, amt)
| [
"robkistner@mac.com"
] | robkistner@mac.com |
627d416b543f78c9d91e73aec88ac490ac95735c | ec182e7bbf726555032e924491fd4b324b3e2485 | /notification/migrations/0001_initial.py | 223239d2db3ed57a6104763b6061b8e4895b5e93 | [] | no_license | Arsenho/erecrutment | 8e5687a5867186ca995bc338279c33713c336c6e | 6d7d2eccc70f751a64931b1931e72a8df95832a7 | refs/heads/develop | 2023-08-10T21:14:07.560164 | 2020-08-09T18:38:13 | 2020-08-09T18:38:13 | 268,737,681 | 1 | 0 | null | 2021-09-22T19:08:26 | 2020-06-02T07:58:19 | Python | UTF-8 | Python | false | false | 636 | py | # Generated by Django 2.2.6 on 2020-08-09 18:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
]
| [
"anvadele@yahoo.com"
] | anvadele@yahoo.com |
d1de71b1bcb7c3ab33eda16acd10e72b3bda4c31 | daca5cf1750496799a07f436644efac8bf1c68cc | /src/UserApp/routing.py | 5b7f932de3733892e7a7769590ee077b73994b91 | [] | no_license | TasifTarikul/Fiducia | 88ed3c00e734156fb715a68ed0b22e88f3687338 | eca356ba247f94dc02e6a6b0d5527cc8e74dd774 | refs/heads/master | 2022-05-19T22:39:02.088312 | 2020-12-11T12:31:23 | 2020-12-11T12:31:23 | 241,008,454 | 0 | 0 | null | 2022-04-22T23:06:49 | 2020-02-17T03:05:36 | JavaScript | UTF-8 | Python | false | false | 184 | py | from .consumers import NotificationConsumer
from django.urls import re_path
websocket_urlpatterns = [
re_path(r'ws/user_app/notify/(?P<userid>[0-9]+)/$', NotificationConsumer)
]
| [
"tasiftarikul@gmail.com"
] | tasiftarikul@gmail.com |
99a6a8e91b7a79fa5c52649c4f7b783205df4956 | a6b4e4556036514a8ed07847a4db1d1d857d0594 | /tree_construct_string_from_binary_tree.py | 56d91af7c8c4fbfac6a4532e889f05bd2bd38916 | [] | no_license | arpitpattewar/leetcode_puzzles | a4871633000812bf7991b8eb6fc586aa37f11ace | ddefceb7f1e3d09e811044aba5d6be1545ae6894 | refs/heads/master | 2023-02-14T01:03:22.331461 | 2023-02-06T02:24:17 | 2023-02-06T02:24:17 | 154,002,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | '''
Created on Nov 1, 2018
@author: aavinash
link: https://leetcode.com/problems/construct-string-from-binary-tree/
'''
from LeetCodeTree import TreeNode
class Solution(object):
def tree2str_1(self, root):
"""
:type t: TreeNode
:rtype: str
"""
if not root : return ""
rs=""
rs = rs+ str(root.val)
if root.left:
rs = rs + "({})".format(self.tree2str(root.left))
if root.right:
if not root.left: rs=rs + "()"
rs = rs + "({})".format(self.tree2str(root.right))
return rs
def tree2str_2(self, root):
"""
:type t: TreeNode
:rtype: str
"""
if not root : return ""
rs=[]
rs.append(str(root.val))
if root.left:
rs.append("({})".format(self.tree2str(root.left)))
if root.right:
if not root.left: rs.append("()")
rs.append("({})".format(self.tree2str(root.right)))
return "".join(rs)
def tree2str(self, root):
"""
:type t: TreeNode
:rtype: str
"""
if not root : return ""
rs=[]
rs.append(str(root.val))
if root.left:
rs.append("({})".format(self.tree2str(root.left)))
if root.right:
rs.append("{}({})".format( '()' if not root.left else "" ,self.tree2str(root.right)))
return "".join(rs)
def tree2str_leetcode(self, t):
"""
"""
if not t: return ''
left = '({})'.format(self.tree2str(t.left)) if (t.left or t.right) else ''
right = '({})'.format(self.tree2str(t.right)) if t.right else ''
return '{}{}{}'.format(t.val, left, right)
if __name__ == '__main__':
a1 = TreeNode(1)
a1_lt = TreeNode(2)
a1_rt = TreeNode(3)
a1.right = a1_rt
a1.left =a1_lt
c = TreeNode(4)
d = TreeNode(5)
e = TreeNode(6)
f= TreeNode(7)
a1.right.left = c
a1.right.right = d
#
# a1.left.left.left = e
a1.left.right = f
#
# g=TreeNode(8)
# h=TreeNode(9)
# i=TreeNode(8)
# j=TreeNode(9)
#
# a1.left.left.left.left=g
# a1.left.left.left.right=h
obj = Solution()
print obj.tree2str(a1)
| [
"aavinash@akamai.com"
] | aavinash@akamai.com |
2ad5195cb2531f382db1acaca896c6c212992811 | e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551 | /uri/uri_python/ad_hoc/p1089.py | 5016209f1dd88333f5f3c73bdab477d7dc2336d9 | [] | no_license | GabrielEstevam/icpc_contest_training | b8d97184ace8a0e13e1c0bf442baa36c853a6837 | 012796c2ceb901cf7aa25d44a93614696a7d9c58 | refs/heads/master | 2020-04-24T06:15:16.826669 | 2019-10-08T23:13:15 | 2019-10-08T23:13:15 | 171,758,893 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | N = int(input())
while N != 0:
entry = input().split(" ")
picos = 0
aux_a = int(entry[N-2])
aux_b = int(entry[N-1])
for i in range(N):
if (aux_b < aux_a and aux_b < int(entry[i])) or (aux_b > aux_a and aux_b > int(entry[i])):
picos += 1
aux_a = aux_b
aux_b = int(entry[i])
print(picos)
N = int(input()) | [
"gabrielestevam@hotmail.com"
] | gabrielestevam@hotmail.com |
1df8e317fea69f008dc5d5e32315bd51aa0fb43c | 5896da906bdcb1315881712a0baa52a706bbeb06 | /cursoemvideo/Atividades/exercicios/ex106.py | 3ebfa0d823d84edaa4ae159d58714aa44738c3d8 | [] | no_license | frederico-prog/python | 313b4c11347fb33f67d73dee89f3106f483a2333 | 6c3d3757944fcbf569e1114838f236a9329358bd | refs/heads/master | 2022-12-13T23:26:55.112797 | 2020-08-21T22:03:26 | 2020-08-21T22:03:26 | 272,381,728 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | '''
FAÇA UM MINI-SISTEMA QUE UTILIZE O INTERECTIVE HELP DO PYTHON. O USUÁRIO VAI DIGITAR O COMANDO E O MANUAL VAI APARECER.
QUANDO O USUÁRIO DIGITAR A PALAVRA 'FIM', O PROGRAMA SE ENCERRARÁ.
OBS.: USE CORES.
'''
from time import sleep
c = (
'\033[m', # 0- sem cor
'\033[0;30;41m', # 1- cor vermelha
'\033[0;30;42m', # 2- cor verde
'\033[0;30;43m', # 3- cor amarela
'\033[0;30;44m', # 4- cor azul
'\033[0;30;45m', # 5- cor roxa
'\033[7;30m' # 6- branca
);
def ajuda(com):
titulo(f'Acessando o manual do comando \'{com}\'', 4)
print(c[6], end='')
help(comando)
print(c[0], end='')
sleep(2)
def titulo(msg, cor=0):
tam = len(msg) + 4
print(c[cor], end='')
print('~' * tam)
print(f' {msg}')
print('~' * tam)
print(c[0], end='')
sleep(1)
# PROGRAMA PRINCIPAL
comando = ''
while True:
titulo('SISTEMA DE AJUDA PyHELP', 2)
comando = str(input('Função ou Biblioteca > '))
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
print('ATÉ LOGO!', 1)
| [
"fredlgprime@gmail.com"
] | fredlgprime@gmail.com |
b32e0d64918cffb217578b9ab4616ae7c3e8ea4e | 707b22b554664ea09532cefdeea3d5f9d8b0b586 | /bin/raksha-api | f005e1088b536dc7f3df73b1eb96190085195e85 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DPaaS-Raksha/python-rakshaclient | 62ee0f48e7c6aea3069744670ea6d4cd0a43e2ec | 85bfefd935553357ad14b8273035270d0e6946b8 | refs/heads/master | 2020-05-18T19:47:59.710923 | 2013-09-17T02:25:29 | 2013-09-17T02:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Raksha OS API."""
# NOTE(jdg): If we port over multi worker code from Nova
# we'll need to set monkey_patch(os=False), unless
# eventlet is updated/released to fix the root issue
import eventlet
eventlet.monkey_patch()
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "raksha", "__init__.py")):
sys.path.insert(0, possible_topdir)
from raksha.openstack.common import gettextutils
gettextutils.install('raksha')
from raksha import flags
from raksha.openstack.common import log as logging
from raksha import service
from raksha import utils
if __name__ == '__main__':
flags.parse_args(sys.argv)
logging.setup("raksha")
utils.monkey_patch()
server = service.WSGIService('osapi_backupjobs')
service.serve(server)
service.wait()
| [
"giri.basava@gmail.com"
] | giri.basava@gmail.com | |
58f59fdce17bede4e24e11eb61d5c95b415ec5ef | 85cd24dcc089aa05c55a2148b66994a65b619dc8 | /garage/exceptions.py | ac4951013ee7026200cacd67eabbe63692501eda | [
"BSD-3-Clause"
] | permissive | shuiziliu/django-garage | 6058bd1a8eb54896e6fbbcf65dda3cc847124164 | e46593bfd6f772216b85f3022d122a8b7064a9b4 | refs/heads/master | 2020-12-24T15:14:44.258944 | 2015-03-02T17:03:50 | 2015-03-02T17:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
"""
garage.exceptions
* created: 2014-08-23 Kevin Chan <kefin@makedostudio.com>
* updated: 2014-11-21 kchan
"""
from django.core.exceptions import ImproperlyConfigured
| [
"kefin@makedostudio.com"
] | kefin@makedostudio.com |
2fd4937da743fc000cbedc14f31385020e365cac | c264153f9188d3af187905d846fa20296a0af85d | /Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/proxy/selenium_chrome_auth.py | f9b9e55510c5325125459414bee6a67c7eb3fbed | [] | no_license | IS-OSCAR-YU/ebooks | 5cd3c1089a221759793524df647e231a582b19ba | b125204c4fe69b9ca9ff774c7bc166d3cb2a875b | refs/heads/master | 2023-05-23T02:46:58.718636 | 2021-06-16T12:15:13 | 2021-06-16T12:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import zipfile
ip = '127.0.0.1'
port = 9743
username = 'foo'
password = 'bar'
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
}
}
"""
background_js = """
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "http",
host: "%(ip)s",
port: %(port)s
}
}
}
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: "%(username)s",
password: "%(password)s"
}
}
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
)
""" % {'ip': ip, 'port': port, 'username': username, 'password': password}
plugin_file = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
chrome_options = Options()
chrome_options.add_argument("--start-maximized")
chrome_options.add_extension(plugin_file)
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get('http://httpbin.org/get')
| [
"jiangzhangha@163.com"
] | jiangzhangha@163.com |
367694bf22eedbb89985c70d2368890832e317f2 | 23d5370d1b4d889aba0c2bfccfe3fcc8bced7bf4 | /examples/RLC_example/test/RLC_IO_I_eval_sim.py | 7106cd0859cc1a4f13867be28def0f2e4708d138 | [
"MIT"
] | permissive | marcosfelt/sysid-neural-structures-fitting | 0cd21b4197b52ffe5ef78ac4045a431e202fdb05 | 80eda427251e8cce1d2a565b5cbca533252315e4 | refs/heads/master | 2022-12-06T18:45:21.365282 | 2020-09-03T18:32:16 | 2020-09-03T18:32:16 | 292,630,318 | 0 | 0 | MIT | 2020-09-03T17:01:34 | 2020-09-03T17:01:33 | null | UTF-8 | Python | false | false | 4,273 | py | import pandas as pd
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import sys
sys.path.append(os.path.join("..", ".."))
from torchid.iofitter import NeuralIOSimulator
from torchid.iomodels import NeuralIOModel
from common import metrics
if __name__ == '__main__':
dataset_type = 'id'
#dataset_type = 'val'
#model_type = '32step_noise'
model_type = '64step_noise'
# model_type = '1step_nonoise'
# model_type = '1step_noise'
plot_input = False
COL_T = ['time']
COL_X = ['V_C', 'I_L']
COL_U = ['V_IN']
COL_Y = ['I_L']
dataset_filename = f"RLC_data_{dataset_type}.csv"
df_X = pd.read_csv(os.path.join("data", dataset_filename))
time_data = np.array(df_X[COL_T], dtype=np.float32)
# y = np.array(df_X[COL_Y], dtype=np.float32)
x = np.array(df_X[COL_X], dtype=np.float32)
u = np.array(df_X[COL_U], dtype=np.float32)
y_var_idx = 1 # 0: voltage 1: current
y = np.copy(x[:, [y_var_idx]])
N = np.shape(y)[0]
Ts = time_data[1] - time_data[0]
n_a = 2 # autoregressive coefficients for y
n_b = 2 # autoregressive coefficients for u
n_max = np.max((n_a, n_b)) # delay
std_noise_V = 1.0 * 10.0
std_noise_I = 1.0 * 1.0
std_noise = np.array([std_noise_V, std_noise_I])
x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
x_noise = x_noise.astype(np.float32)
y_noise = x_noise[:, [y_var_idx]]
# Initialize optimization
io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64)
io_solution = NeuralIOSimulator(io_model)
model_filename = f"model_IO_I_{model_type}.pkl"
io_solution.io_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
# In[Validate model]
t_val_start = 0
t_val_end = time_data[-1]
idx_val_start = int(t_val_start//Ts)#x.shape[0]
idx_val_end = int(t_val_end//Ts)#x.shape[0]
n_val = idx_val_end - idx_val_start
u_val = np.copy(u[idx_val_start:idx_val_end])
y_val = np.copy(y[idx_val_start:idx_val_end])
y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end])
time_val = time_data[idx_val_start:idx_val_end]
y_seq = np.zeros(n_a, dtype=np.float32) #np.array(np.flip(y_val[0:n_a].ravel()))
u_seq = np.zeros(n_b, dtype=np.float32 ) #np.array(np.flip(u_val[0:n_b].ravel()))
# Neglect initial values
# y_val = y_val[n_max:, :]
# y_meas_val = y_meas_val[n_max:, :]
# u_val = u_val[n_max:, :]
# time_val = time_val[n_max:, :]
y_meas_val_torch = torch.tensor(y_meas_val)
with torch.no_grad():
y_seq_torch = torch.tensor(y_seq)
u_seq_torch = torch.tensor(u_seq)
u_torch = torch.tensor(u_val)
y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)
err_val = y_val_sim_torch - y_meas_val_torch
loss_val = torch.mean((err_val)**2)
if dataset_type == 'id':
t_plot_start = 0.2e-3
else:
t_plot_start = 1.0e-3
t_plot_end = t_plot_start + 0.3e-3
idx_plot_start = int(t_plot_start//Ts)#x.shape[0]
idx_plot_end = int(t_plot_end//Ts)#x.shape[0]
# In[Plot]
y_val_sim = np.array(y_val_sim_torch)
time_val_us = time_val *1e6
if plot_input:
fig, ax = plt.subplots(2,1, sharex=True)
else:
fig, ax = plt.subplots(1, 1, sharex=True)
ax = [ax]
ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val[idx_plot_start:idx_plot_end], 'k', label='True')
ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val_sim[idx_plot_start:idx_plot_end], 'r--', label='Model simulation')
ax[0].legend(loc='upper right')
ax[0].grid(True)
ax[0].set_xlabel("Time ($\mu$s)")
ax[0].set_ylabel("Capacitor voltage $v_C$ (V)")
ax[0].set_ylim([-20, 20])
if plot_input:
ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k', label='Input')
#ax[1].legend()
ax[1].grid(True)
ax[1].set_xlabel("Time ($\mu$s)")
ax[1].set_ylabel("Input voltage $v_{in}$ (V)")
fig_name = f"RLC_IO_{dataset_type}_{model_type}.pdf"
fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
R_sq = metrics.r_square(y_val, y_val_sim)
print(f"R-squared metrics: {R_sq}")
| [
"marco.forgione1986@gmail.com"
] | marco.forgione1986@gmail.com |
ecf74664f5363c52e4790b600cfe87442802733c | 76efd7bde15c764d81b847c2f1d27776e90ec2ed | /imgauth/urls.py | 9e3bcaf4c6a05bdd033ed6d3d6fdce1b5c3a4914 | [] | no_license | ccsreenidhin/Image-Metadata-Analysis-ELA | e7e961f5d5724397081c9437c78e727577f449fe | 4bb24c3047dc59a81867c7c9cdb58bc0fc222358 | refs/heads/master | 2022-11-07T21:18:51.340625 | 2018-03-23T17:16:27 | 2020-06-13T08:02:49 | 271,966,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | """imgauth URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
#url(r'^', include('imgaut.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"ccsreenidhin@gmail.com"
] | ccsreenidhin@gmail.com |
884dfc01602e5dbcf3bf40b472a5598f3cb7038f | 71de8d79f91f9ce64bd4df43d9b23afce8cab507 | /shopnani/urls.py | 8f8e65ca13ecd20a4f4c0844145b13a385b3f476 | [] | no_license | ritika-0111/shopna | d7484c4fbd92b45c161932e5cbd338ab0e884392 | 411a3282ad01f65180b5f7c29366c75dafb78bde | refs/heads/master | 2020-11-30T20:28:47.168180 | 2019-12-27T16:50:06 | 2019-12-27T16:50:06 | 230,461,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | """shopnani URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sample.urls', namespace='sample')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"noreply@github.com"
] | noreply@github.com |
7a18d7edc350a9159863008804955748ffbeec6f | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test002/数据类型_20201205162718.py | 6bec763ca9a2bf6df3696d9f6db0124f17054d85 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
# print(fruits.count("apple"))
# a = fruits.index("banana",4)
# print(a)
# fruits.reverse()
# print(fruits)
fruits.append("daka")
| [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
a6059eb70b584c3b6cf710feda6849035e3de5a6 | caa6ac5a8e16a51e8e375f15550ae14950673ce0 | /py05.py | e2d45abe35999bfc9b8ce15f3fae3a0579f9278d | [] | no_license | kennysongkui/python01 | 76361f8172488d1994634a76b379ce72ad04005b | aa9234dab017a4a077dabdcb507d7d39bf3e23b3 | refs/heads/master | 2020-05-23T23:42:40.827868 | 2017-03-14T09:38:30 | 2017-03-14T09:38:30 | 84,802,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | #!/usr/bin/env python
#coding=utf-8
import urllib
import urllib2
import re
import tool
import os
class Spider:
def __init__(self):
self.siteURL = 'https://mm.taobao.com/json/request_to_list.htm'
self.tool = tool.Tool()
def getPage(self,pageIndex):
url = self.siteURL + "?page=" + str(pageIndex)
print url
request = urllib2.Request(url)
print request.data
response = urllib2.urlopen(request)
print response.read()
return response.read().decode('gbk')
def getContents(self,pageIndex):
page = self.getPage(pageIndex)
print page
pattern = re.compile('<div class="list-item".*?pic-word.*?<a href="(.*?)".*?<img src="(.*?)".*?<a class="lady-name.*?>(.*?)</a>.*?<strong>(.*?)</strong>.*?<span>(.*?)</span>',re.S)
items = re.findall(pattern,page)
contents = []
for item in items:
contents.append([item[0],item[1],item[2],item[3],item[4]])
return contents
def getDetailPage(self,infoURL):
response = urllib2.urlopen(infoURL)
return response.read().decode('gbk')
def getBrief(self,page):
pattern = re.compile('<div class="mm-aixiu-content".*?(.*?)<!--',re.S)
result = re.search(pattern,page)
return self.tool.replace(result.group(1))
def getAllImg(self,page):
pattern = re.compile('<div class="mm-aixiu-content".*?>(.*?)<!--',re.S)
content = re.search(pattern,page)
patternImg = re.compile('<img.*?src="(.*?)"'.re.S)
images = re.findall(patternImg,content.group(1))
return images
def saveImgs(self,images,name):
number = 1
print u"发现",name,u"共有",len(images),u"张照片"
for imageURL in images:
splitPath = imageURL.split('.')
fTail = splitPath.pop()
if len(fTail) > 3:
fTail = "jpg"
fileName = name + "/" + str(number) + "." + fTail
self.saveImg(imageURL,fileName)
namber += 1
def saveIcon(self,iconURL,name):
splitPath = iconURL.split('.')
fTail = splitPath.pop()
fileName = name + "/icon." + fTail
self.saveImgs(iconURL,fileName)
def saveBrief(self,content,name):
fileName = name + "/" + name + ".txt"
f = open(fileName,"w+")
print u"正在偷偷保存她的个人信息为",fileName
f.write(content.encode('utf-8'))
def saveImg(self,imageURL,fileName):
u = urllib.urlopen(imageURL)
data = u.read()
f = open(fileName, 'wb')
f.write(data)
print u"正在悄悄保存她的一张图片为",fileName
f.close()
def mkdir(self,path):
path = path.strip()
isExists = os.path.exists(path)
if not isExists:
print u"偷偷新建了名字叫做",path,u'的文件夹'
os.makedirs(path)
return True
else:
print u"名为",path,'的文件夹已经创建成功'
return False
def savePageInfo(self,pageIndex):
contents = self.getContents(pageIndex)
for item in contents:
print u"发现一位模特,名字叫",item[2],u"芳龄",item[3],u",她在",item[4]
print u"正在偷偷地保存",item[2],"的信息"
print u"又意外的发现她的个人地址是",item[0]
detailURL = item[0]
detailPage = self.getDetailPage(detailURL)
brief = self.getBrief(detailPage)
images = self.getAllImg(detailPage)
self.mkdir(item[2])
self.saveBrief(brief,item[2])
self.saveIcon(item[1],item[2])
self.saveImgs(images,item[2])
def savePagesInfo(self,start,end):
for i in range(start,end+1):
print u"正在偷偷寻找第",i,u"个地方,看看MM们在不在"
self.savePageInfo(i)
spider = Spider()
spider.savePagesInfo(2,10) | [
"songkui@chetong.net"
] | songkui@chetong.net |
02ac599efffb60c456691b509ea0cf29184a095d | dd5f20d092493ec8b2ce8ea12ea66ddc52dcdaf6 | /poreseq/Util.py | 33bd8c0b74976c1390b9622081521a527865c3cc | [] | no_license | 0820LL/poreseq | 1421978b07250f43b2d4c967d02e3495534caf6c | c14d77369a431ee2c1f8abb7bbbd28ad3e58d6ff | refs/heads/master | 2020-12-02T19:20:29.021286 | 2016-04-23T20:39:08 | 2016-04-23T20:39:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py |
class RegionInfo:
'''Helper class for region data.'''
def __init__(self, region=None):
'''Parses a region string.
Accepts four formats:
None
sequence_name
10000:20000
sequence_name:10000:20000
and it is up to later processing to fill in unknown fields appropriately.
'''
self.start = None
self.end = None
self.name = None
if region is None:
return
rs = region.split(':')
if len(rs) != 2:
self.name = rs[0]
if len(rs) > 1:
self.start = int(rs[-2])
self.end = int(rs[-1])
class MutationInfo:
'''Contains information for a single mutation.
Note that orig and mut should be '' and not '.' for insertions/deletions.
Attributes:
start(int): 0-based starting base of mutation
orig(string): sequence of original bases ('ACG')
mut(string): sequence of mutated bases ('GT')
'''
def __init__(self, info=None):
'''Initialize the class, either with blank values, or parsing a string.
If parsing info (eg. reading from file), it takes the format
<start> <orig> <mut>
using whitespace as a delimiter and ignoring comments. It also coverts
input '.' strings to ''.
'''
self.start = 0
self.orig = ""
self.mut = ""
if info is not None:
if len(info) == 0 or info[0] == '#':
self.start = -1
return
vals = info.split()
if len(vals) != 3:
self.start = -1
return
self.start = int(vals[0])
self.orig = vals[1]
self.mut = vals[2]
if self.orig == '.':
self.orig = ''
if self.mut == '.':
self.mut = ''
def __str__(self):
'''Outputs in a nice format.'''
original = self.orig
if len(original) == 0:
original = '.'
mutation = self.mut
if len(mutation) == 0:
mutation = '.'
return '{}\t{}\t{}'.format(self.start,original,mutation)
class MutationScore:
'''Contains information for a scored mutation.
Note that orig and mut should be '' and not '.' for insertions/deletions.
Attributes:
start(int): 0-based starting base of mutation
orig(string): sequence of original bases ('ACG')
mut(string): sequence of mutated bases ('GT')
score(float): scored change in likelihood if this mutation were made
'''
def __init__(self):
'''Default initializer.'''
self.start = 0
self.orig = ""
self.mut = ""
self.score = 0
def __str__(self):
'''Outputs in a nice format.'''
original = self.orig
if len(original) == 0:
original = '.'
mutation = self.mut
if len(mutation) == 0:
mutation = '.'
return '{}\t{}\t{}\t{}'.format(self.start,original,mutation,self.score)
| [
"tamas@seas.harvard.edu"
] | tamas@seas.harvard.edu |
101ccd2aec21b66c706af7a581d6bb1035636092 | abb614790bdf41c7db9d09dfdea4385f78c2be52 | /rtk-RQA/rtk/hardware/__gui/gtk/Capacitor.py | 936eb677804a46719f2a7e3d331f370599b11797 | [
"BSD-3-Clause"
] | permissive | codacy-badger/rtk | f981bb75aadef6aaeb5a6fa427d0a3a158626a2a | bdb9392164b0b32b0da53f8632cbe6e3be808b12 | refs/heads/master | 2020-03-19T02:46:10.320241 | 2017-10-26T20:08:12 | 2017-10-26T20:08:12 | 135,659,105 | 0 | 0 | null | 2018-06-01T02:43:23 | 2018-06-01T02:43:23 | null | UTF-8 | Python | false | false | 39,030 | py | #!/usr/bin/env python
"""
###################################################
Capacitor Package Component Specific Work Book View
###################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.__gui.gtk.Capacitor.py is part of The RTK Project
#
# All rights reserved.
import sys
# Import modules for localization support.
import gettext
import locale
# Modules required for the GUI.
try:
import pygtk
pygtk.require('2.0')
except ImportError:
sys.exit(1)
try:
import gtk
except ImportError:
sys.exit(1)
try:
import gtk.glade
except ImportError:
sys.exit(1)
# Modules required for plotting.
import matplotlib # pylint: disable=E0401
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas # pylint: disable=E0401
from matplotlib.figure import Figure # pylint: disable=E0401
# Import other RTK modules.
try:
import Configuration
import gui.gtk.Widgets as Widgets
except ImportError:
import rtk.Configuration as Configuration
import rtk.gui.gtk.Widgets as Widgets
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error:
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
matplotlib.use('GTK')
class Inputs(gtk.Frame):
"""
The Work Book view for displaying all the attributes for a capacitor. The
attributes of a capacitor Work Book view are:
"""
dicQuality = {40: ["", "MIL-SPEC", _(u"Lower")],
41: ["", "M", _(u"Non-Established Reliability"),
_(u"Lower")],
42: ["", "S", "R", "P", "M", "L",
_(u"MIL-C-19978, Non-Established Reliability"),
_(u"Lower")],
43: ["", "S", "R", "P", "M", "L",
_(u"MIL-C-18312, Non-Established Reliability"),
_(u"Lower")],
44: ["", "S", "R", "P", "M", _(u"Lower")],
45: ["", "S", "R", "P", "M", _(u"Lower")],
46: ["", "T", "S", "R", "P", "M", "L",
_(u"MIL-C-5, Non-Established Reliability, Dipped"),
_(u"MIL-C-5, Non-Established Reliability, Molded"),
_(u"Lower")],
47: ["", "MIL-C-10950", _(u"Lower")],
48: ["", "S", "R", "P", "M", "L",
_(u"MIL-C-11272, Non-Established Reliability"),
_(u"Lower")],
49: ["", "S", "R", "P", "M", "L",
_(u"MIL-C-11015, Non-Established Reliability"),
_(u"Lower")],
50: ["", "S", "R", "P", "M",
_(u"Non-Established Reliability"), _(u"Lower")],
51: ["", "D", "C", "S", "B", "R", "P", "M", "L",
_(u"Lower")],
52: ["", "S", "R", "P", "M", "L",
_(u"MIL-C-3965, Non-Established Reliability"),
_(u"Lower")],
53: ["", "S", "R", "P", "M",
_(u"Non-Established Reliability"), _(u"Lower")],
54: ["", "MIL-SPEC", _(u"Lower")],
55: ["", "MIL-SPEC", _(u"Lower")],
56: ["", "MIL-SPEC", _(u"Lower")],
57: ["", "MIL-SPEC", _(u"Lower")],
58: ["", "MIL-SPEC", _(u"Lower")]}
dicSpecification = {40: ["", "MIL-C-25 (CP)", "MIL-C-12889 (CA)"],
41: ["", "MIL-C-11693 (CZ/CZR)"],
42: ["", "MIL-C-14157 (CPV)", "MIL-C-19978 (CQ/CQR)"],
43: ["", "MIL-C-18312 (CH)", "MIL-C-39022 (CHR)"],
44: ["", "MIL-C-55514 (CFR)"],
45: ["", "MIL-C-83421 (CRH)"],
46: ["", "MIL-C-5 (CM)", "MIL-C-39001 (CMR)"],
47: ["", "MIL-C-10950 (CB)"],
48: ["", "MIL-C-11272 (CY)", "MIL-C-23269 (CYR)"],
49: ["", "MIL-C-11015 (CK)", "MIL-C-39014 (CKR)"],
50: ["", "MIL-C-20 (CC/CCR)", "MIL-C-55681 (CDR)"],
51: ["", "MIL-C-39003 (CSR)"],
52: ["", "MIL-C-3965 (CL)", "MIL-C-39003 (CLR)"],
53: ["", "MIL-C-39016 (CU and CUR)"],
54: ["", "MIL-C-62 (CE)"],
55: ["", "MIL-C-81 (CV)"],
56: ["", "MIL-C-14409 (PC)"],
57: ["", "MIL-C-92 (CT)"],
58: ["", "MIL-C-23183 (CG)"]}
dicSpecSheet = {40: [["", u"85\u00B0C", u"125\u00B0C"],
["", u"85\u00B0C"]],
41: [["", u"85\u00B0C", u"125\u00B0C", u"150\u00B0C"]],
42: [["", u"65\u00B0C", u"85\u00B0C", u"125\u00B0C"],
["", u"65\u00B0C", u"85\u00B0C", u"125\u00B0C",
u"170\u00B0C"]],
43: [["", u"85\u00B0C", u"125\u00B0C"],
["", u"85\u00B0C", u"125\u00B0C"]],
44: [["", u"85\u00B0C", u"125\u00B0C"]],
45: [["", u"125\u00B0C"]],
46: [["", u"70\u00B0C", u"85\u00B0C", u"125\u00B0C",
u"150\u00B0C"], ["", u"125\u00B0C", u"150\u00B0C"]],
47: [["", u"85\u00B0C", u"150\u00B0C"]],
48: [["", u"125\u00B0C", u"200\u00B0C"],
["", u"125\u00B0C"]],
49: [["", u"85\u00B0C", u"125\u00B0C", u"150\u00B0C"],
["", u"85\u00B0C", u"125\u00B0C"]],
50: [["", u"85\u00B0C", u"125\u00B0C"],
["", u"85\u00B0C"]],
51: [["", _(u"All")]],
52: [["", u"85\u00B0C", u"125\u00B0C", u"175\u00B0C"],
["", u"125\u00B0C"]],
53: [["", u"85\u00B0C", u"105\u00B0C", u"125\u00B0C"]],
54: [["", u"85\u00B0C"]],
55: [["", u"85\u00B0C", u"125\u00B0C"]],
56: [["", u"125\u00B0C", u"150\u00B0C"]],
57: [["", u"85\u00B0C"]],
58: [["", u"85\u00B0C", u"100\u00B0C", u"125\u00B0C"]]}
def __init__(self, model):
"""
Method to create an input frame for the Capacitor data model.
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
whose attributes will be displayed.
"""
gtk.Frame.__init__(self)
self.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
# Define private dictionary attributes.
# Define private list attributes.
# Derating points for the derating curve. The list at position 0 is
# for severe environments. The list at position 1 is for benign
# environments.
self._lst_derate_criteria = [[0.6, 0.6, 0.0], [0.9, 0.9, 0.0]]
self._lst_count_labels = [_(u"Quality:"), _(u"Specification:")]
self._lst_stress_labels = [_(u"Quality:"),
_(u"\u03C0<sub>Q</sub> Override:"),
_(u"Rated Voltage:"),
_(u"Applied DC Voltage:"),
_(u"Applied AC Voltage:"),
_(u"Capacitance (F):"),
_(u"Specification:"),
_(u"Temperature Rating:")]
self._lst_quality = self.dicQuality[model.subcategory]
self._lst_specification = self.dicSpecification[model.subcategory]
self._lst_specsheet = self.dicSpecSheet[model.subcategory]
self._lst_construction = []
self._lst_handler_id = []
# Define private scalar attributes.
self._hardware_model = model
self._subcategory = model.subcategory
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.cmbConfiguration = Widgets.make_combo(simple=True)
self.cmbConstruction = Widgets.make_combo(simple=True)
self.cmbQuality = Widgets.make_combo(simple=True)
self.cmbSpecification = Widgets.make_combo(simple=True)
self.cmbSpecSheet = Widgets.make_combo(simple=True)
self.txtACVoltApplied = Widgets.make_entry(width=100)
self.txtCapacitance = Widgets.make_entry(width=100)
self.txtCommercialPiQ = Widgets.make_entry(width=100)
self.txtEffResistance = Widgets.make_entry(width=100)
self.txtVoltRated = Widgets.make_entry(width=100)
self.txtVoltApplied = Widgets.make_entry(width=100)
# Subcategory specific attributes.
if self._subcategory == 51: # Solid tantalum
self._lst_stress_labels.append(_(u"Eff. Series Resistance:"))
elif self._subcategory == 52: # Non-solid tantalum
self._lst_construction = ["", _(u"Slug, All Tantalum"),
_(u"Foil, Hermetic"),
_(u"Slug, Hermetic"),
_(u"Foil, Non-Hermetic"),
_(u"Slug, Non-Hermetic")]
self._lst_stress_labels.append(_(u"Construction:"))
elif self._subcategory == 58: # Variable vacuum
self._lst_configuration = ["", _(u"Fixed"), _(u"Variable")]
self._lst_stress_labels.append(_(u"Configuration:"))
# Create the tooltips for all the input widgets.
self.cmbConfiguration.set_tooltip_text(_(u"Displays whether the "
u"selected capacitor is "
u"fixed or variable."))
self.cmbConstruction.set_tooltip_text(_(u"Displays the method of "
u"construction for the "
u"selected capacitor."))
self.cmbQuality.set_tooltip_text(_(u"Select and display the quality "
u"level for the selected "
u"capacitor."))
self.cmbSpecification.set_tooltip_text(_(u"Selects the governing "
u"specification for the "
u"selected capacitor."))
self.cmbSpecSheet.set_tooltip_text(_(u"Selects the maximum "
u"temperature rating for the "
u"selected capacitor."))
self.txtACVoltApplied.set_tooltip_text(_(u"Displays the peak "
u"operating AC voltage for "
u"the selected capacitor."))
self.txtCapacitance.set_tooltip_text(_(u"Display the capacitance in "
u"farads for the selected "
u"capacitor."))
self.txtCommercialPiQ.set_tooltip_text(_(u"Displays the user-defined "
u"quality factor for the "
u"selected capacitor. This "
u"value over rides the "
u"quality factor selected "
u"above."))
self.txtEffResistance.set_tooltip_text(_(u"Displays the effective "
u"series resistance between "
u"the power supply and the "
u"capacitor."))
self.txtVoltRated.set_tooltip_text(_(u"Displays the rated voltage for "
u"the selected capacitor."))
self.txtVoltApplied.set_tooltip_text(_(u"Display the operating DC "
u"voltage for the selected "
u"capacitor."))
# Connect signals to callback functions.
self._lst_handler_id.append(
self.cmbQuality.connect('changed', self._on_combo_changed, 0))
self._lst_handler_id.append(
self.txtCommercialPiQ.connect('focus-out-event',
self._on_focus_out, 1))
self._lst_handler_id.append(
self.txtVoltRated.connect('focus-out-event',
self._on_focus_out, 2))
self._lst_handler_id.append(
self.txtVoltApplied.connect('focus-out-event',
self._on_focus_out, 3))
self._lst_handler_id.append(
self.txtACVoltApplied.connect('focus-out-event',
self._on_focus_out, 4))
self._lst_handler_id.append(
self.txtCapacitance.connect('focus-out-event',
self._on_focus_out, 5))
self._lst_handler_id.append(
self.cmbSpecification.connect('changed',
self._on_combo_changed, 6))
self._lst_handler_id.append(
self.cmbSpecSheet.connect('changed', self._on_combo_changed, 7))
self._lst_handler_id.append(
self.txtEffResistance.connect('focus-out-event',
self._on_focus_out, 8))
self._lst_handler_id.append(
self.cmbConstruction.connect('changed', self._on_combo_changed, 9))
self._lst_handler_id.append(
self.cmbConfiguration.connect('changed',
self._on_combo_changed, 10))
def create_217_count_inputs(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 parts count input widgets for
Capacitors.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Parts Count Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Populate all the gtk.ComboBox().
for i in range(len(self._lst_quality)):
self.cmbQuality.insert_text(i, self._lst_quality[i])
for i in range(len(self._lst_specification)):
self.cmbSpecification.insert_text(i, self._lst_specification[i])
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_count_labels, _fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Place all the input widgets.
if self.cmbQuality.get_parent() is not None:
self.cmbQuality.reparent(_fixed)
if self.cmbSpecification.get_parent() is not None:
self.cmbSpecification.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.put(self.cmbSpecification, _x_pos, _y_pos[1])
_fixed.show_all()
return _x_pos
def create_217_stress_inputs(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 part stress input widgets for
Capacitors.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Part Stress Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Populate all the gtk.ComboBox().
for i in range(len(self._lst_quality)):
self.cmbQuality.insert_text(i, self._lst_quality[i])
for i in range(len(self._lst_specification)):
self.cmbSpecification.insert_text(i, self._lst_specification[i])
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Place all the input widgets.
if self.cmbQuality.get_parent is not None:
self.cmbQuality.reparent(_fixed)
if self.cmbSpecification.get_parent is not None:
self.cmbSpecification.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.put(self.txtCommercialPiQ, _x_pos, _y_pos[1])
_fixed.put(self.txtVoltRated, _x_pos, _y_pos[2])
_fixed.put(self.txtVoltApplied, _x_pos, _y_pos[3])
_fixed.put(self.txtACVoltApplied, _x_pos, _y_pos[4])
_fixed.put(self.txtCapacitance, _x_pos, _y_pos[5])
_fixed.put(self.cmbSpecification, _x_pos, _y_pos[6])
_fixed.put(self.cmbSpecSheet, _x_pos, _y_pos[7])
if self._subcategory == 51: # Solid tantalum
_fixed.put(self.txtEffResistance, _x_pos, _y_pos[8])
elif self._subcategory == 52: # Non-solid tantalum
for i in range(len(self._lst_construction)):
self.cmbConstruction.insert_text(i, self._lst_construction[i])
_fixed.put(self.cmbConstruction, _x_pos, _y_pos[8])
elif self._subcategory == 58: # Gas or vacuum
for i in range(len(self._lst_configuration)):
self.cmbConfiguration.insert_text(i,
self._lst_configuration[i])
_fixed.put(self.cmbConfiguration, _x_pos, _y_pos[8])
_fixed.show_all()
return _x_pos
def load_217_count_inputs(self, model):
"""
Method to load the Capacitor class gtk.Widgets() with MIL-HDBK-217FN2
parts count calculation inputs.
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self.cmbQuality.set_active(int(model.quality))
self.cmbSpecification.set_active(int(model.specification))
return False
def load_217_stress_inputs(self, model):
"""
Method to load the Capacitor class gtk.Widgets() with MIL-HDBK-217FN2
part stress calculation inputs.
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
fmt = '{0:0.' + str(Configuration.PLACES) + 'G}'
self.cmbQuality.set_active(int(model.quality))
self.txtCommercialPiQ.set_text(str(fmt.format(model.q_override)))
self.txtVoltRated.set_text(str(fmt.format(model.rated_voltage)))
self.txtVoltApplied.set_text(str(fmt.format(model.operating_voltage)))
self.txtACVoltApplied.set_text(str(fmt.format(model.acvapplied)))
self.txtCapacitance.set_text(str('{0:0.8G}'.format(model.capacitance)))
# Load subcategory specific widgets.
if self._subcategory in [40, 41, 42, 43, 46, 47, 48, 49, 50, 52, 53,
54, 55, 56, 57, 58]:
self.cmbSpecification.set_active(int(model.specification))
if self._subcategory in [40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 52,
53, 54, 55, 56, 57, 58]:
self.cmbSpecSheet.set_active(int(model.spec_sheet))
if self._subcategory == 51:
self.txtEffResistance.set_text(
str(fmt.format(model.effective_resistance)))
if self._subcategory == 52:
self.cmbConstruction.set_active(int(model.construction))
if self._subcategory == 58:
self.cmbConfiguration.set_active(int(model.configuration))
return False
def _on_combo_changed(self, combo, index):
"""
Method to respond to gtk.ComboBox() changed signals and calls the
correct function or method, passing any parameters as needed.
:param gtk.ComboBox combo: the gtk.ComboBox() that called this method.
:param int index: the index in the handler ID list oc the callback
signal associated with the gtk.ComboBox() that
called this method.
:return: False if successful or True is an error is encountered.
:rtype: bool
"""
combo.handler_block(self._lst_handler_id[index])
if index == 0:
self._hardware_model.quality = combo.get_active()
elif index == 6:
self._hardware_model.specification = combo.get_active()
self._load_spec_sheet(self._hardware_model.specification - 1)
elif index == 7:
self._hardware_model.spec_sheet = combo.get_active()
try:
self._hardware_model.reference_temperature = \
self._hardware_model.lst_ref_temp[combo.get_active() - 1]
except IndexError:
print self._hardware_model.name, self._hardware_model.lst_ref_temp
elif index == 9:
self._hardware_model.construction = combo.get_active()
elif index == 10:
self._hardware_model.configuration = combo.get_active()
combo.handler_unblock(self._lst_handler_id[index])
return False
def _on_focus_out(self, entry, __event, index):
"""
Method to respond to gtk.Entry() focus_out signals and calls the
correct function or method, passing any parameters as needed.
:param gtk.Entry entry: the gtk.Entry() that called this method.
:param gtk.gdk.Event __event: the gtk.gdk.Event() that called this
method.
:param int index: the index in the handler ID list of the callback
signal associated with the gtk.Entry() that
called this method.
:return: False if successful or True is an error is encountered.
:rtype: bool
"""
entry.handler_block(self._lst_handler_id[index])
if index == 1:
self._hardware_model.q_override = float(entry.get_text())
elif index == 2:
self._hardware_model.rated_voltage = float(entry.get_text())
elif index == 3:
self._hardware_model.operating_voltage = float(entry.get_text())
elif index == 4:
self._hardware_model.acvapplied = float(entry.get_text())
elif index == 5:
self._hardware_model.capacitance = float(entry.get_text())
elif index == 8:
self._hardware_model.effective_resistance = float(entry.get_text())
entry.handler_unblock(self._lst_handler_id[index])
return False
def _load_spec_sheet(self, specification):
"""
Method to load the specification sheet gtk.ComboBox() whenever a new
specification is selected.
:param int specification: the selected specification index.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Remove existing entries.
_model = self.cmbSpecSheet.get_model()
_model.clear()
# Load the new entries.
_n_spec_sheets = len(self._lst_specsheet[specification])
for i in range(_n_spec_sheets):
self.cmbSpecSheet.insert_text(
i, self._lst_specsheet[specification][i])
return False
class Results(gtk.Frame):
"""
The Work Book view for displaying all the output attributes for a
capacitor. The output attributes of a capacitor Work Book view are:
"""
def __init__(self, model):
"""
Method to initialize an instance of the Capacitor assessment results
view.
:param int subcategory: the Capacitor subcategory ID of the component
to create the view for.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
gtk.Frame.__init__(self)
# Define private dictionary attributes.
# Define private list attributes.
self._lst_count_labels = [u"<span foreground=\"blue\">\u03BB<sub>EQUIP</sub> = \u03BB<sub>g</sub>\u03C0<sub>Q</sub></span>", u"\u03BB<sub>g</sub>:",
u"\u03C0<sub>Q</sub>:"]
self._lst_stress_labels = ['', u"\u03BB<sub>b</sub>:",
u"\u03C0<sub>Q</sub>:",
u"\u03C0<sub>E</sub>:",
u"\u03C0<sub>CV</sub>:"]
# Define private scalar attributes.
self._hardware_model = model
self._subcategory = model.subcategory
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.txtLambdaB = Widgets.make_entry(width=100, editable=False,
bold=True)
self.txtPiQ = Widgets.make_entry(width=100, editable=False, bold=True)
self.txtPiE = Widgets.make_entry(width=100, editable=False, bold=True)
self.txtPiCV = Widgets.make_entry(width=100, editable=False, bold=True)
self.txtPiSR = Widgets.make_entry(width=100, editable=False, bold=True)
self.txtPiC = Widgets.make_entry(width=100, editable=False, bold=True)
self.txtPiCF = Widgets.make_entry(width=100, editable=False, bold=True)
self.figDerate = Figure(figsize=(6, 4))
self.axsDerate = self.figDerate.add_subplot(111)
self.pltDerate = FigureCanvas(self.figDerate)
# Subcategory specific attributes.
if self._subcategory in [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
53, 54]:
self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub></span>"
elif self._subcategory == 51: # Solid tantalum
self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub>\u03C0<sub>SR</sub></span>"
self._lst_stress_labels.append(u"\u03C0<sub>SR</sub>:")
elif self._subcategory == 52: # Non-solid tantalum
self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub>\u03C0<sub>C</sub></span>"
self._lst_stress_labels.append(u"\u03C0<sub>C</sub>:")
elif self._subcategory in [55, 56, 57]:
self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub></span>"
self._lst_stress_labels.pop(4)
elif self._subcategory == 58:
self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CF</sub></span>"
self._lst_stress_labels[4] = u"\u03C0<sub>CF</sub>:"
# Create the tooltips for all the results widgets.
self.txtPiQ.set_tooltip_text(_(u"Displays the quality factor for the "
u"selected capacitor."))
self.txtPiQ.set_tooltip_text(_(u"Displays the quality factor for the "
u"selected capacitor."))
self.txtPiE.set_tooltip_text(_(u"Displays the environement factor for "
u"the selected capacitor."))
self.txtPiCV.set_tooltip_text(_(u"Displays the capacitance correction "
u"factor for the selected capacitor."))
self.txtPiSR.set_tooltip_text(_(u"Displays the effective series "
u"resistance factor for the selected "
u"capacitor."))
self.txtPiC.set_tooltip_text(_(u"Displays the construction factor "
u"for the selected capacitor."))
self.txtPiCF.set_tooltip_text(_(u"Displays the configuration factor "
u"for the selected capacitor."))
def create_217_count_results(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 parts count result widgets for
Capacitors.
:keyword int x_pos: the x position of the display widgets.
:return: _x_pos: the x-coordinate of the widgets.
:rtype: int
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Parts Count Results") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_count_labels, _fixed, 5, 25)
_x_pos = max(x_pos, _x_pos) + 25
# Create the tooltips for all the results display widgets.
self.txtLambdaB.set_tooltip_text(_(u"Displays the generic hazard rate "
u"for the selected capacitor."))
# Place the reliability result display widgets.
if self.txtLambdaB.get_parent() is not None:
self.txtLambdaB.reparent(_fixed)
if self.txtPiQ.get_parent() is not None:
self.txtPiQ.reparent(_fixed)
_fixed.put(self.txtLambdaB, _x_pos, _y_pos[1])
_fixed.put(self.txtPiQ, _x_pos, _y_pos[2])
_fixed.show_all()
return _x_pos
def create_217_stress_results(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 part stress result widgets for
Capacitors.
:keyword int x_pos: the x position of the display widgets.
:return: _x_pos: the x-coordinate of the widgets.
:rtype: int
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Part Stress Results") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 25)
_x_pos = max(x_pos, _x_pos) + 25
# Create the tooltips for all the results display widgets.
self.txtLambdaB.set_tooltip_text(_(u"Displays the base hazard rate "
u"for the selected capacitor."))
# Place the reliability result display widgets.
if self.txtLambdaB.get_parent() is not None:
self.txtLambdaB.reparent(_fixed)
if self.txtPiQ.get_parent() is not None:
self.txtPiQ.reparent(_fixed)
_fixed.put(self.txtLambdaB, _x_pos, _y_pos[1])
_fixed.put(self.txtPiQ, _x_pos, _y_pos[2])
_fixed.put(self.txtPiE, _x_pos, _y_pos[3])
# Subcategory specific widgets.
if self._subcategory == 51:
_fixed.put(self.txtPiSR, _x_pos, _y_pos[5])
elif self._subcategory == 52:
_fixed.put(self.txtPiC, _x_pos, _y_pos[5])
elif self._subcategory not in [55, 56, 57, 58]: # Not variable
_fixed.put(self.txtPiCV, _x_pos, _y_pos[4])
if self._subcategory == 58:
_fixed.put(self.txtPiCF, _x_pos, _y_pos[4])
_fixed.show_all()
return _x_pos
def load_217_count_results(self, model):
"""
Method to load the Capacitor class MIL-HDBK-217 parts count result
gtk.Widgets().
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
fmt = '{0:0.' + str(Configuration.PLACES) + 'G}'
self.txtLambdaB.set_text(str(fmt.format(model.base_hr)))
self.txtPiQ.set_text(str(fmt.format(model.piQ)))
return False
def load_217_stress_results(self, model):
"""
Method to load the Capacitor class MIL-HDBK-217 part stress result
gtk.Widgets().
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
fmt = '{0:0.' + str(Configuration.PLACES) + 'G}'
self.txtLambdaB.set_text(str(fmt.format(model.base_hr)))
self.txtPiQ.set_text(str(fmt.format(model.piQ)))
self.txtPiE.set_text(str(fmt.format(model.piE)))
self.txtPiCV.set_text(str(fmt.format(model.piCV)))
if self._subcategory == 51:
self.txtPiSR.set_text(str(fmt.format(model.piSR)))
elif self._subcategory == 52:
self.txtPiC.set_text(str(fmt.format(model.piC)))
elif self._subcategory == 58:
self.txtPiCF.set_text(str(fmt.format(model.piCF)))
return False
def load_derate_plot(self, model, frame):
"""
Method to load the stress derate plot for the Capacitor class.
:param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model`
to load the plot for.
:param gtk.Frame frame: the gtk.Frame() to embed the derate plot into.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Clear the operating point and derating curve for the component. We
# do this here so the component-specific GUI will set the proper x and
# y-axis labels.
self.axsDerate.cla()
# Plot the derating curve and operating point.
_x = [float(model.min_rated_temperature),
float(model.knee_temperature),
float(model.max_rated_temperature)]
self.axsDerate.plot(_x, model.lst_derate_criteria[0], 'r.-',
linewidth=2)
self.axsDerate.plot(_x, model.lst_derate_criteria[1], 'b.-',
linewidth=2)
self.axsDerate.plot(model.temperature_active,
model.voltage_ratio, 'go')
if(_x[0] != _x[2] and
model.lst_derate_criteria[1][0] != model.lst_derate_criteria[1][2]):
self.axsDerate.axis([0.95 * _x[0], 1.05 * _x[2],
model.lst_derate_criteria[1][2],
1.05 * model.lst_derate_criteria[1][0]])
else:
self.axsDerate.axis([0.95, 1.05, 0.0, 1.05])
self.axsDerate.set_title(_(u"Voltage Derating Curve for %s at %s") %
(model.part_number, model.ref_des),
fontdict={'fontsize': 12,
'fontweight': 'bold',
'verticalalignment': 'baseline'})
_legend = tuple([_(u"Harsh Environment"), _(u"Mild Environment"),
_(u"Voltage Operating Point")])
_leg = self.axsDerate.legend(_legend, loc='upper right', shadow=True)
for _text in _leg.get_texts():
_text.set_fontsize('small')
# Set the proper labels on the derating curve.
self.axsDerate.set_xlabel(_(u"Temperature (\u2070C)"),
fontdict={'fontsize': 12,
'fontweight': 'bold'})
self.axsDerate.set_ylabel(r'$\mathbf{V_{op} / V_{rated}}$',
fontdict={'fontsize': 12,
'fontweight': 'bold',
'rotation': 'vertical',
'verticalalignment': 'baseline'})
self.figDerate.tight_layout()
frame.add(self.pltDerate)
frame.show_all()
return False
| [
"arowland@localhost.localdomain"
] | arowland@localhost.localdomain |
221f6766e94a926edbc76bf1e3da59c333ccd8f6 | 42631b33be63821744ec85caf6ef49a6b1d189b0 | /VSRTorch/Models/video/__init__.py | f1c5cfea0869dbccaa6f876c2c5d088f6f37712f | [
"MIT"
] | permissive | AliceMegatron/VideoSuperResolution | c70e822764b29a01f3a7c035cfc10e3b31b9f6f4 | bfcf237ee7e412b688c7f5e094585bbaecffc1d0 | refs/heads/master | 2020-05-29T15:25:13.840222 | 2019-05-16T13:00:43 | 2019-05-16T13:00:43 | 189,219,950 | 1 | 0 | MIT | 2019-05-29T12:21:53 | 2019-05-29T12:21:52 | null | UTF-8 | Python | false | false | 240 | py | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 下午5:10
import logging
_logger = logging.getLogger("VSR.VIDEO")
_logger.info("@LoSealL. Video related ops, nets...")
| [
"twytwy12345@live.com"
] | twytwy12345@live.com |
399748c8245334ba0393c96da43d21a13b68ad95 | 5d4267bf377bea5beb50ee70e7a853b1d5be88d0 | /Project Euler/34.py | 4085ba2c972d435b453fc62ce40d31adfbafd462 | [] | no_license | Penguin-71630/Python-3 | 043b4d7b7525478f87c2404ff0d585d030d50d11 | fc3acf1a2b7a204282503d581cc61275b39911a4 | refs/heads/master | 2022-01-20T04:14:51.005757 | 2019-08-11T08:08:58 | 2019-08-11T08:08:58 | 198,004,811 | 0 | 0 | null | 2019-07-21T04:27:52 | 2019-07-21T02:52:04 | Python | UTF-8 | Python | false | false | 470 | py | # Q34-Digit factorials
# unsolved
def factorial(a):
fac = 1
for i in range(a, 0, -1):
fac *= i
return fac
def take_number_apart(a):
fac_sum = 0
for j in str(a):
fac_sum += factorial(int(j))
return fac_sum
# find the limit
k = 9
summation = 0
while k < take_number_apart(k):
k = k * 10 + 9
for n in range(3, k+1, 1):
summation += n if n == take_number_apart(n) else False
print(summation)
| [
"noreply@github.com"
] | noreply@github.com |
ebe1b3152447a485f5e92575742111b181eaa38a | a8487d84ead85c451a13144ffe0ed1b3bc1ce83f | /VPES_Auto/C23.py | 7ad71f3b15ccdd100ca6a9aed23b35a83d904f16 | [] | no_license | jinokhong/VPES_Auto | a4ba97b58a3caf4a599ff7fda64168a3233cca46 | dc0df673415d90cb9d225ef83353aae0d8188f8a | refs/heads/master | 2020-06-27T01:28:36.828916 | 2020-01-02T03:11:10 | 2020-01-02T03:11:10 | 199,366,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | import Default_Setting
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time
# TestRail module.run_id, Testcase_id, Message 정보
case_id = 23
class C23(unittest.TestCase):
def test_C23(self):
try:
module = Default_Setting
p = Default_Setting.default()
p.setUp()
p.test_project_init()
p.driver.find_element_by_id("projectCreate").click()
time.sleep(2)
p.driver.find_element_by_id("scmType").click()
Select(p.driver.find_element_by_id("scmType")).select_by_visible_text("SVN") # 드롭타운 선택
p.driver.find_element_by_id("scmType").click()
p.driver.find_element_by_id("scmUrl").click()
p.driver.find_element_by_id("scmUrl").clear()
p.driver.find_element_by_id("scmUrl").send_keys(module.scm_svn)
p.driver.find_element_by_id("BusinessName").click()
p.driver.find_element_by_id("BusinessName").clear()
p.driver.find_element_by_id("BusinessName").send_keys("Selenium")
p.driver.find_element_by_id("CSCIName").click()
p.driver.find_element_by_id("CSCIName").clear()
p.driver.find_element_by_id("CSCIName").send_keys("SVN")
p.driver.find_element_by_id("projectCheck").click()
time.sleep(3)
p.driver.find_element_by_id("btnState").click()
time.sleep(2)
p.driver.find_element_by_id("successBtn").click()
time.sleep(3)
p.driver.find_element_by_id("projectCreate").click()
time.sleep(2)
p.driver.find_element_by_id("scmType").click()
Select(p.driver.find_element_by_id("scmType")).select_by_visible_text("SVN") # 드롭타운 선택
p.driver.find_element_by_id("scmType").click()
p.driver.find_element_by_id("scmUrl").click()
p.driver.find_element_by_id("scmUrl").clear()
p.driver.find_element_by_id("scmUrl").send_keys(module.scm_svn)
p.driver.find_element_by_id("BusinessName").click()
p.driver.find_element_by_id("BusinessName").clear()
p.driver.find_element_by_id("BusinessName").send_keys("Selenium")
p.driver.find_element_by_id("CSCIName").click()
p.driver.find_element_by_id("CSCIName").clear()
p.driver.find_element_by_id("CSCIName").send_keys("SVN2")
p.driver.find_element_by_id("projectCheck").click()
time.sleep(1)
assert "만들 수 있는 프로젝트 입니다." in p.driver.find_element_by_id("modal-content").text
status_id = 1
except NoSuchElementException:
status_id = 5
# Test Rail 결과 메세지 입력
if status_id == 1:
print('\nRun ID : %s\nTest Case ID: %s\nMessage : %s\n' % (module.run_id, case_id, module.passMsg))
module.client.send_post(
'add_result_for_case/%s/%s' % (module.run_id, case_id),
{'status_id': status_id, 'comment': module.passMsg })
elif status_id == 5:
print('\nRun ID : %s\nTest Case ID: %s\nMessage : %s\n' % (module.run_id, case_id, module.failMsg))
module.client.send_post(
'add_result_for_case/%s/%s' % (module.run_id, case_id),
{'status_id': status_id, 'comment': module.failMsg })
if __name__ == "__main__":
unittest.main()
| [
"johong@suresofttech.com"
] | johong@suresofttech.com |
20c27998a016f048864a6587a7e0f5563eb3eb70 | 4ba6e58255643acf5c4bd3a7b1bf2765705f84b2 | /OldProjects/saferidelogger/mapper.py | 0d63121720de4daf064ae5b590c92740771c35b2 | [] | no_license | CliffordC/OldPythonProjects | db4358a4cc53d98248052f1aed821ca9a28c0e62 | 502ffc0998040e6d9060169906c03fa73f62b070 | refs/heads/master | 2022-07-24T00:30:07.351813 | 2020-05-18T05:01:57 | 2020-05-18T05:01:57 | 264,800,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | import gmaps
gmaps.configure() | [
"siestaftw10@gmail.com"
] | siestaftw10@gmail.com |
bf25cf83e5bb846200fe4a50cc9eb2aebef369e0 | f1620520e7f5a553f2a25d5e1cc5d3451fa612dc | /hw3.py | 46c5b5bf86b4b25c0ece14ad784dcfd95f0e6d08 | [] | no_license | milesfertel/NandSquare256 | c750a11a2945c4d860216282a1543deb46a37528 | 4acd8861153e7fa7e2483efb821c431cf989b47b | refs/heads/master | 2020-03-29T18:03:02.579707 | 2018-10-02T18:05:19 | 2018-10-02T18:05:19 | 150,192,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,238 | py | """ Basic template file that you should fill in for Problem Set 3. Some util
functions are provided from the NAND notebooks online that implement some
of the NAND essentials. """
from util import EVAL
from util import TRUTH
from util import NANDProgram
from math import floor
# TODO: Implement this function and return a string representation of its NAND
# implementation. You don't have to use the class we supplied - you could use
# other methods of building up your NAND program from scratch.
# This 2as the first attempt, not particularly optimized or pretty
def nandsquare(n):
'''Takes in an integer n. Outputs the string representation of a NAND prog
that takes in inputs x_0, ..., x_{n-1} and squares it mod 2^n. The output
will be y_0, ..., y_{n-1}. The first digit will be the least significant
digit (ex: 110001 --> 35)'''
# creates a blank NAND program with n inputs and n outputs.
prog = NANDProgram(n, n, debug=False)
prog.ONE("ONE")
prog.ZERO("ZERO")
partials = []
# create partial sums
for i in range(n):
partial = ["ZERO" for z in range(i)]
for j in range(n - len(partial)):
partial.append(prog.allocate())
prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
partials.append(partial)
# sum partial sums
total = partials.pop(0)
for index, partial in enumerate(partials):
newtotal = total[:]
carry = prog.allocate()
newtotal[0] = prog.allocate()
prog.ADD_3(newtotal[0] if index != len(partials) - 1 else prog.output_var(0), carry,
partial[0], total[0], "ZERO")
last_carry = ""
for i in range(1, n - 1):
last_carry = carry
carry = prog.allocate()
newtotal[i] = prog.allocate()
prog.ADD_3(newtotal[i] if index != len(partials) - 1 else prog.output_var(i), carry,
partial[i], total[i], last_carry)
newtotal[n - 1] = prog.allocate()
prog.ADD_3(newtotal[n - 1] if index != len(partials) - 1 else prog.output_var(n - 1), "TRASH",
partial[n - 1], total[n - 1], carry)
total = newtotal
# "compiles" your completed program as a NAND program string.
return str(prog)
## TODO: Do this for bonus points and the leaderboard.
def nandsquare256():
return fasternand256()
# Fastest was supposed to be the fastest but turns out dadda multipliers ain't shit
# This function implements one step of karatsuba's algorithm, relying on the fast that
# the return value is mod 256 in order to only compute 2 products
def fasternand256():
'''Implement nandsquare for a specific input size, n=256. This result gets
placed on the leaderboard for extra credit. If you get close to the top
score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
n = 256
prog = NANDProgram(n, n)
prog.ONE("ONE")
prog.ZERO("ZERO")
half = n/2
inputs = [prog.input_var(i) for i in range(n)]
a = inputs[:half]
b = inputs[half:]
c = inputs[:half]
d = inputs[half:]
partials = []
table = {}
# create symmetric partial sums for BD
for i in range(n/2):
partial = ["ZERO" for _ in range(i)]
for j in range(n/2):
if str(i) + ' ' + str(j) in table:
partial.append(table[str(i) + ' ' + str(j)])
elif str(j) + ' ' + str(i) in table:
partial.append(table[str(j) + ' ' + str(i)])
else:
partial.append(prog.allocate())
prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
table[str(i) + ' ' + str(j)] = partial[-1]
for j in range(n-len(partial)):
partial.append("ZERO")
partials.append(partial)
# calculate BD
seenzero = False
total = partials.pop(0)
prog.AND(prog.output_var(0), total[0], "ONE")
for index, partial in enumerate(partials):
newtotal = total[:]
carry = prog.allocate()
last_carry = ""
prog.ADD_2(prog.output_var(index + 1), carry,
partial[index + 1], total[index + 1], debug=True)
seenzero = False
for i in range(index + 2, n - 1):
last_carry = carry
if seenzero:
assert partial[i] == "ZERO"
newtotal[i] = "ZERO"
else:
if partial[i] == "ZERO":
seenzero = True
carry = prog.allocate()
newtotal[i] = prog.allocate()
prog.ADD_3(newtotal[i], carry,
partial[i], total[i], last_carry)
# This can also be optimized for ZEROs
if seenzero:
assert partial[n - 1] == "ZERO"
newtotal[n - 1] = "ZERO"
else:
newtotal[n - 1] = prog.allocate()
prog.ADD_3_1(newtotal[n - 1],
partial[n - 1], total[n - 1], carry)
total = newtotal
prog.AND(prog.output_var(n/2), total[n/2], 'ONE')
# create partial sums for AD
partials = []
for i in range(n/2 - 1):
partial = ["ZERO" for _ in range(i + (n / 2) + 1)]
for j in range(n/2 - 1 - i):
partial.append(prog.allocate())
prog.AND(partial[-1], a[i], d[j])
partials.append(partial)
# calcualte AD mod 128 and append to BD
for uindex, partial in enumerate(partials):
newtotal = total[:]
index = uindex + n/2
carry = prog.allocate()
last_carry = ""
prog.ADD_2(prog.output_var(index + 1), carry,
partial[index + 1], total[index + 1])
if index == n - 2:
break;
for i in range(index + 2, n - 1):
last_carry = carry
carry = prog.allocate()
newtotal[i] = prog.allocate()
prog.ADD_3(newtotal[i], carry,
partial[i], total[i], last_carry)
newtotal[n - 1] = prog.allocate()
prog.ADD_3_1(newtotal[n - 1],
partial[n - 1], total[n - 1], carry)
total = newtotal
# "compiles" your completed program as a NAND program string.
return str(prog)
# ==================BELOW HERE LIES THE GRAVEYARDOF THE DAMNED================
# Implements dadda multiplier
#def fastestnand256():
# n = 256
# prog = NANDProgram(n, n)
# prog.ONE("ONE")
# prog.ZERO("ZERO")
#
# partials = []
# table = {}
# # create symmetric partial sums
# for i in range(n):
# partial = ["ZERO" for z in range(i)]
# for j in range(n - len(partial)):
# if str(i) + ' ' + str(j) in table:
# partial.append(table[str(i) + ' ' + str(j)])
# elif str(j) + ' ' + str(i) in table:
# partial.append(table[str(j) + ' ' + str(i)])
# else:
# partial.append(prog.allocate())
# prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
# table[str(i) + ' ' + str(j)] = partial[-1]
# partials.append(partial)
# partials.append(["ZERO" for z in range(n)])
# partials.append(["ZERO" for z in range(n)])
#
# def getHeight(partials, i):
# counter = 0
# for partial in partials:
# if partial[i] == "ZERO":
# return counter
# counter += 1
#
# def assignVars(partials, out, carry, col, num, height):
# if num == 2:
# partials[0][col] = "REPLACEME"
# partials[1][col] = "REPLACEME"
# for i in range(height - 2):
# partials[i][col] = partials[i + 2][col]
# partials[height - 2][col] = out
# partials[height - 1][col] = "ZERO"
# else:
# partials[0][col] = "REPLACEME"
# partials[1][col] = "REPLACEME"
# partials[2][col] = "REPLACEME"
# for i in range(height - 3):
# partials[i][col] = partials[i + 3][col]
# partials[height - 3][col] = out
# partials[height - 2][col] = "ZERO"
# partials[height - 1][col] = "ZERO"
#
# if carry:
# partials[getHeight(partials, col + 1)][col + 1] = carry
#
# return partials
#
# d = [2.0]
# while d[-1] < n:
# d.append(floor(d[-1] * 1.5))
# d.pop()
#
# while len(d) >= 1:
# cutoff = d.pop()
# colHeight = n
# while colHeight > cutoff:
# for col in range(n):
# colHeight = getHeight(partials, col)
# if colHeight == cutoff + 1:
# out = prog.allocate()
# carry = prog.allocate()
# if col == n - 1:
# prog.ADD_2_1(out,
# partials[0][col], partials[1][col])
# partials = assignVars(partials, out, None, col, 2, colHeight)
# else:
# prog.ADD_2(out, carry,
# partials[0][col], partials[1][col])
# partials = assignVars(partials, out, carry, col, 2, colHeight)
# elif colHeight > cutoff + 1:
# out = prog.allocate()
# carry = prog.allocate()
# if col == n - 1:
# prog.ADD_3_1(out,
# partials[0][col], partials[1][col], partials[2][col])
# partials = assignVars(partials, out, None, col, 3, colHeight)
# else:
# prog.ADD_3(out, carry,
# partials[0][col], partials[1][col], partials[2][col])
# partials = assignVars(partials, out, carry, col, 3, colHeight)
#
# # 256 bit adder
# prog.AND(prog.output_var(0), partials[0][0], "ONE")
# carry = prog.allocate()
# prog.ADD_2(prog.output_var(1), carry,
# partials[0][1], partials[1][1])
# lastcarry = carry
# for i in range(2, n - 1):
# carry = prog.allocate()
# prog.ADD_3(prog.output_var(i), carry,
# partials[0][i], partials[1][i], lastcarry)
# lastcarry = carry
#
# prog.ADD_3_1(prog.output_var(n - 1),
# partials[0][n - 1], partials[1][n - 1], lastcarry)
#
# return str(prog)
#def fastnand256():
# '''Implement nandsquare for a specific input size, n=256. This result gets
# placed on the leaderboard for extra credit. If you get close to the top
# score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
# n = 256
# prog = NANDProgram(n, n)
# prog.ONE("ONE")
# prog.ZERO("ZERO")
#
# table = {}
# half = n/2
# inputs = [prog.input_var(i) for i in range(n)]
# a = inputs[:half]
# b = inputs[half:]
# c = inputs[:half]
# d = inputs[half:]
# partials = []
# table = {}
# # create symmetric partial sums for BD
# for i in range(n/2):
# partial = ["ZERO" for _ in range(i)]
# for j in range(n/2):
# if j + i >= 2 * i:
# partial.append(prog.allocate())
# prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
# table[str(i) + ' ' + str(j)] = partial[-1]
# else:
# if str(i) + ' ' + str(j) in table:
# partial.append(table[str(i) + ' ' + str(j)])
# elif str(j) + ' ' + str(i) in table:
# partial.append(table[str(j) + ' ' + str(i)])
# else:
# print "PANIC"
# return
# for j in range(n-len(partial)):
# partial.append("ZERO")
# partials.append(partial)
#
# # calculate BD
# seenzero = False
# total = partials.pop(0)
# prog.AND(prog.output_var(0), total[0], "ONE")
# for index, partial in enumerate(partials):
# newtotal = total[:]
# carry = prog.allocate()
# last_carry = ""
# prog.ADD_3(prog.output_var(index + 1), carry,
# partial[index + 1], total[index + 1], "ZERO")
# if index == n/2 - 1:
# break;
#
# seenzero = False
# for i in range(index + 2, n - 1):
# last_carry = carry
# if seenzero:
# assert partial[i] == "ZERO"
# newtotal[i] = "ZERO"
# else:
# if partial[i] == "ZERO":
# seenzero = True
#
# carry = prog.allocate()
# newtotal[i] = prog.allocate()
# prog.ADD_3(newtotal[i], carry,
# partial[i], total[i], last_carry)
# if seenzero:
# assert partial[n - 1] == "ZERO"
# newtotal[n - 1] = "ZERO"
# else:
# newtotal[n - 1] = prog.allocate()
# prog.ADD_3(newtotal[n - 1], "TRASH",
# partial[n - 1], total[n - 1], carry)
# total = newtotal
#
# # create partial sums for AD
# partials = []
# for i in range(n/2 - 1):
# partial = ["ZERO" for _ in range(i + (n / 2) + 1)]
# for j in range(n/2 - 1 - i):
# partial.append(prog.allocate())
# prog.AND(partial[-1], a[i], d[j])
# partials.append(partial)
# # calcualte AD
# for uindex, partial in enumerate(partials):
# newtotal = total[:]
# index = uindex + n/2
# carry = prog.allocate()
# last_carry = ""
# prog.ADD_3(prog.output_var(index + 1), carry,
# partial[index + 1], total[index + 1], "ZERO")
# if index == n - 2:
# break;
#
# for i in range(index + 2, n - 1):
# last_carry = carry
# carry = prog.allocate()
# newtotal[i] = prog.allocate()
# prog.ADD_3(newtotal[i], carry,
# partial[i], total[i], last_carry)
# newtotal[n - 1] = prog.allocate()
# prog.ADD_3(newtotal[n - 1], "TRASH",
# partial[n - 1], total[n - 1], carry)
# total = newtotal
#
# # "compiles" your completed program as a NAND program string.
# return str(prog)
#
### TODO: Do this for bonus points and the leaderboard.
#def onandsquare(n):
# '''Implement nandsquare for a specific input size, n=256. This result gets
# placed on the leaderboard for extra credit. If you get close to the top
# score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
# prog = NANDProgram(n, n)
# prog.ONE("ONE")
# prog.ZERO("ZERO")
# partials = []
# table = {}
# # create symmetric partial sums
# for i in range(n):
# partial = ["ZERO" for z in range(i)]
# for j in range(n - len(partial)):
# if j + i >= 2 * i:
# partial.append(prog.allocate())
# prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
# table[str(i) + ' ' + str(j)] = partial[-1]
# else:
# if str(i) + ' ' + str(j) in table:
# partial.append(table[str(i) + ' ' + str(j)])
# elif str(j) + ' ' + str(i) in table:
# partial.append(table[str(j) + ' ' + str(i)])
# else:
# print "PANIC"
# return
# partials.append(partial)
#
# # sum partial sums
# total = partials.pop(0)
# prog.AND(prog.output_var(0), total[0], "ONE")
# for index, partial in enumerate(partials):
# newtotal = total[:]
# carry = prog.allocate()
# last_carry = ""
# prog.ADD_3(prog.output_var(index + 1), carry,
# partial[index + 1], total[index + 1], "ZERO")
# if index == 254:
# break;
#
# for i in range(index + 2, n - 1):
# last_carry = carry
# carry = prog.allocate()
# newtotal[i] = prog.allocate()
# prog.ADD_3(newtotal[i], carry,
# partial[i], total[i], last_carry)
# newtotal[n - 1] = prog.allocate()
# prog.ADD_3(newtotal[n - 1], "TRASH",
# partial[n - 1], total[n - 1], carry)
# total = newtotal
#
# # "compiles" your completed program as a NAND program string.
# return str(prog)
#
## TODO: Do this for bonus points and the leaderboard.
#def onandsquare256():
# '''Implement nandsquare for a specific input size, n=256. This result gets
# placed on the leaderboard for extra credit. If you get close to the top
# score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
# n = 256
# prog = NANDProgram(n, n)
# prog.ONE("ONE")
# prog.ZERO("ZERO")
# partials = []
# table = {}
# # create symmetric partial sums
# for i in range(n):
# partial = ["ZERO" for z in range(i)]
# for j in range(n - len(partial)):
# if j + i >= 2 * i:
# partial.append(prog.allocate())
# prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
# table[str(i) + ' ' + str(j)] = partial[-1]
# else:
# if str(i) + ' ' + str(j) in table:
# partial.append(table[str(i) + ' ' + str(j)])
# elif str(j) + ' ' + str(i) in table:
# partial.append(table[str(j) + ' ' + str(i)])
# else:
# print "PANIC"
# return
# partials.append(partial)
#
# # sum partial sums
# total = partials.pop(0)
# prog.AND(prog.output_var(0), total[0], "ONE")
# for index, partial in enumerate(partials):
# newtotal = total[:]
# carry = prog.allocate()
# last_carry = ""
# prog.ADD_3(prog.output_var(index + 1), carry,
# partial[index + 1], total[index + 1], "ZERO")
# if index == 254:
# break;
#
# for i in range(index + 2, n - 1):
# last_carry = carry
# carry = prog.allocate()
# newtotal[i] = prog.allocate()
# prog.ADD_3(newtotal[i], carry,
# partial[i], total[i], last_carry)
# newtotal[n - 1] = prog.allocate()
# prog.ADD_3(newtotal[n - 1], "TRASH",
# partial[n - 1], total[n - 1], carry)
# total = newtotal
#
# # "compiles" your completed program as a NAND program string.
# return str(prog)
##
### TODO: Do this for bonus points and the leaderboard.
#def oldnandsquare256():
# '''Implement nandsquare for a specific input size, n=256. This result gets
# placed on the leaderboard for extra credit. If you get close to the top
# score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
# n = 256
# prog = NANDProgram(n, n)
# prog.ONE("ONE")
# prog.ZERO("ZERO")
# partials = []
# # create partial sums
# for i in range(n):
# partial = ["ZERO" for z in range(i)]
# for j in range(n - len(partial)):
# partial.append(prog.allocate())
# prog.AND(partial[-1], prog.input_var(i), prog.input_var(j))
# partials.append(partial)
#
# # sum partial sums
# total = partials.pop(0)
# for index, partial in enumerate(partials):
# newtotal = total[:]
# carry = prog.allocate()
#
# newtotal[0] = prog.allocate()
# prog.ADD_3(newtotal[0] if index != len(partials) - 1 else prog.output_var(0), carry,
# partial[0], total[0], "ZERO")
# last_carry = ""
# for i in range(1, n - 1):
# last_carry = carry
# carry = prog.allocate()
# newtotal[i] = prog.allocate()
# prog.ADD_3(newtotal[i] if index != len(partials) - 1 else prog.output_var(i), carry,
# partial[i], total[i], last_carry)
# newtotal[n - 1] = prog.allocate()
# prog.ADD_3(newtotal[n - 1] if index != len(partials) - 1 else prog.output_var(n - 1), "TRASH",
# partial[n - 1], total[n - 1], carry)
# total = newtotal
#
# # "compiles" your completed program as a NAND program string.
# return str(prog)
#
#
#def tupToProg(t):
# nand = NANDProgram(t[0], t[1], debug=False)
# vs = [nand.input_var(v) for v in range(t[0])]
# i = 0
# for line in t[2]:
# if len(vs) >= t[0] + len(t[2]) - t[1]:
# vs.append(nand.output_var(i))
# i += 1
# else:
# vs.append(nand.allocate())
# nand.NAND(vs[line[0]], vs[line[1]], vs[line[2]])
# return str(nand)
if __name__ == '__main__':
# Generate the string representation of a NAND prog. that adds numbers
#print(tupToProg((3,1,((3,2,2),(4,1,1),(5,3,4),(6,2,1),(7,6,6),(8,0,0),(9,7,8),(10,5,0),(11,9,10)))))
#addfive = str(nandadder(2))
# Input Number 1: 11110 --> 15
# Input Number 2: 10110 --> 13 1111010110
# Expected Output: 28 --> 001110
#816 0000110011
#877 1011011011
# 10111001011
# print(EVAL(addfive,'1111'))
#def squaren(N):
# return str(nandsquare(N))
#for bitcount in range(1, 6):
# print "bitcount: " + str(bitcount)
# square = squaren(bitcount)
# for value in range(2 ** bitcount):
# print "value: " + str(value)
# #valid = (('{0:0'+ str(bitcount) + 'b}').format(value ** 2))[::-1][:bitcount]
# res = EVAL(square, bin(2**10 + value)[2:][::-1][:bitcount])
# print(res)
#square = squaren(int(raw_input("number of bits:")))
#print len(square.split('\n'))
#str1 = EVAL(nandsquare256(), '1010001011110001011001000101000010011010110111111100110101111110011011010000111001100101010000011000101000001101100100001011100011001111010000001101101011100111011110001111001000100100001111111000000111111101101000101010101001110000111101110000001100000001')# == '1001011001100001110011000000110000000110111011110111110000110100011001111100001010010000100011011100101011100101000011111000001110001111000101111101111011001011110111001001000101001111100001111101100000011110011001011110101101011000101010000011001111111100')
## print str1
#str2 = '1001100100010100010101010011110001011111001011111010010101001111111101111010111110101111000101001111100101001011001010010111011010111000011100101001000111101110110010101111011101111001001110001001011001100011011010001001000000000101101111001111011111101111'
#print str1 == str2
#for i in range(len(str1)):
# if str1[i] != str2[i]:
# print "shouldbe", str2[i]
# print "is", str1[i]
# print i
#TRUTH(square)
print len(nandsquare256().split('\n'))
#print len(onandsquare(256).split('\n'))
#print TRUTH(nandsquare256()) == TRUTH(nandsquare(6))
| [
"milesfertel@college.harvard.edu"
] | milesfertel@college.harvard.edu |
50a415cc4f827edf71be0d052f41c61bd0eb5c69 | c01ffbf9e223bec136b1f527ffe378a70183eb6c | /DownloadFiles.py | aef7229f90f99201397a432ddeb2f18e03f6c320 | [] | no_license | frakopy/Download-files-using-Selenium | d1e4b3974efddd4fb41877b5c9a3703156971568 | 8c6faf372c2a5bcfc0702f29427682f7a94fb500 | refs/heads/main | 2023-05-06T17:48:31.978808 | 2021-05-27T19:55:44 | 2021-05-27T19:55:44 | 371,488,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,458 | py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as exc_condts
from selenium.webdriver.common.by import By
#from webdriver_manager.chrome import ChromeDriverManager
import time, os, csv, math
from zipfile import ZipFile as Zip
from openpyxl import load_workbook
class reporte():
def __init__(self):
#Establecemos las siguietnes opciones para evitar la ventana de chrome que indica que nuestra conexión no es privada o no es segura
self.options = webdriver.ChromeOptions()
self.options.add_argument('--ignore-ssl-errors=yes')#ignorar errores de ssl
self.options.add_argument('--ignore-certificate-errors')#ignorar errores de certificado
#self.options.headless = True#Para que se ejecute en background
#Luego instanciamos nuestro webdriver y le pasamos las opciones para evitar la ventana que indica que la conexión no es segura
self.driver = webdriver.Chrome(executable_path='C:/driver_Chrome/chromedriver.exe',options=self.options)
#self.driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=self.options)
def login_I2KV5(self):
self.url_cpu = 'https://www.xxxxxxx.com'
self.driver.minimize_window()#Para que inicie minimizado el explorador
self.driver.get(self.url_cpu)
#Esperamos hasta que el boton de login este localizable en la pagina
WebDriverWait(self.driver, 10).until(exc_condts.presence_of_element_located((By.ID, 'btn_submit')))
#Luego ingresamos el user y el password
self.login = self.driver.find_element_by_id('txf_username')
self.login.send_keys('xxxxx')
self.password = self.driver.find_element_by_id('txf_imtinfo')
self.password.send_keys('xxxxx')
self.btn_login = self.driver.find_element_by_id('btn_submit')
self.btn_login.click()
#Esperamos hasta que el elemento indicado este localizable y con esto saber que la pagina ha cargado con exito para continuar
#el elemento por ID es la opcion que dice settings de la pagina
WebDriverWait(self.driver, 10).until(exc_condts.presence_of_element_located((By.ID, 'menu.com.iemp.system')))
def descarga_reporte(self):
self.lista_urls = [
'https://xxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxxxx&from=topnMonitorView',
'https://xxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxxxxx&from=topnMonitorView',
'https://xxxxxxxxxxxxxxx/pm/themes/default/pm/app/history_pm_alone.html?curMenuId=com.iemp.app.pm.historypm&monitorPortletID=test&mapKey=xxxxx&from=topnMonitorView',
]
for url in self.lista_urls:
self.driver.get(url)
#Esperamos a que este visible el elemento donde esta la opcion del historico por semana y damos click
WebDriverWait(self.driver, 10).until(exc_condts.presence_of_element_located((By.ID, 'timerange_6'))).click()
#Esperamos un tiempo para que este habilitado el boton de export que nos permite descargar el archivo
time.sleep(1)
#Ubicamos el boton de export y damos click para descargar el archivo
self.btn_export = self.driver.find_element_by_id('btnExportData')
self.btn_export.click()
def logout(self):
self.logout = self.driver.find_element_by_id('login_logoutIcon')
self.logout.click()
time.sleep(0.5)
self.boton_ok_salir = self.driver.find_element_by_id('fw_btn_ok')
self.boton_ok_salir.click()
self.driver.close()
#Las siguientes lineas de codigo corresponden a descomprimir los archivos y colocarlos en el directorio
# que nos interesa para procesarlos y preparar el reporte para enviarlor
def descomprime_data(self):
self.ruta_archivo = 'C:/Users/FRANK BOJORQUEZ/Downloads/'
self.ruta_destino = 'D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/Archivos_vpn/'
self.lista_archivos_zip = os.listdir(self.ruta_archivo)
self.archivos_zip = [self.archivo_zip for self.archivo_zip in self.lista_archivos_zip if '.zip' in self.archivo_zip]
self.n = 0
for self.archivo_zip in self.archivos_zip:
self.archivo = Zip(self.ruta_archivo+self.archivo_zip)
self.archivo.extractall(self.ruta_destino)
self.archivo.close()
self.archivo_csv = os.listdir(self.ruta_destino)[self.n]
os.rename(self.ruta_destino+self.archivo_csv,self.ruta_destino+f'Archivo_{self.n}_'+self.archivo_csv)#Renombramos el archivo en la carepta destino
os.remove(self.ruta_archivo+self.archivo_zip)
self.n +=1
#---- creamos nuestro objeto de tipo reporte y llamamos a los metodos de la clase reporte--------------------------
generador_reporte = reporte() #Creacion de nuestro objeto de tipo reporte
generador_reporte.login_I2KV5()#-----llamada a la funcion que hace el login en la pagina de Iportal
generador_reporte.descarga_reporte()#---Llamada a la funcion que descaraga los archivos que contienen los datos de cpu
generador_reporte.logout()#----------llamada a la funcion que hace el logout del Iportal
generador_reporte.descomprime_data()#Llamada a la fucnion que descomprime los archivos y los coloca en la ruta: D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/Archivos_vpn/
#--------------------------------------------------------------------------------------------------------------------
#_______________________________________________________________________________________________________________________
#Las siguientes funciones se utilizan para obtener el promedio del CPU y agregar el valor en
#el archivo a enviar
#_______________________________________________________________________________________________________________________
# ----------------------- Funcion que obtiene el promedio del CPU de cada Tarjeta -----------------------------------------------
def cpu(archivo):
ruta_archivos_excell = 'D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/Archivos_vpn/'
valores_cpu = []
with open(ruta_archivos_excell+archivo) as f:
lector = csv.reader(f)
for fila in lector:
if fila[1].isnumeric():
valores_cpu.append(int(fila[1]))
promedio_tarjeta = sum(valores_cpu) / len(valores_cpu)
return promedio_tarjeta
#-------------------------------------------------------------------------------------------------------------------
def elimina_archivos_csv():
ruta_archivos_csv = 'D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/Archivos_vpn/'
archivos_csv = os.listdir(ruta_archivos_csv)
for archivo in archivos_csv:
os.remove(ruta_archivos_csv+archivo)
#--------------funcion que Modifica el nombre del archivo a enviar con la fecha actual------------------------------------------
def cambiar_nombre_archivo():
fecha = time.strftime("%d-%m-%y")
ruta_archivo_final = 'D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/ArchivoFinal_a_enviar/'
nombre_original = os.listdir(ruta_archivo_final)[0]
nombre_modificado = f'Plantilla_ Disponibilidad CORE_IT-{fecha}.xlsx'
#renombramos el archivo con la fecha actual
os.rename(ruta_archivo_final+nombre_original,ruta_archivo_final+nombre_modificado)
return nombre_modificado, ruta_archivo_final
#--------------------------------------------------------------------------------------------------------------------
#-------------- funcion que Inserta el valor del CPU en el reporte a enviar -------------------------------------------------
def inserta_dato_cpu(nombre_archivo, path, promedio_cpu_vpn):
reporte = load_workbook(path+nombre_archivo)
hoja_plataformas = reporte.get_sheet_by_name('PLATAFORMAS')
hoja_plataformas['D5'] = promedio_cpu_vpn
reporte.save(path+nombre_archivo)
#---------------------------------------------------------------------------------------------------------------------
#----- Llamamos a la fucion cpu la cual nos devuelve el promedio por tarjeta y el resultado lo vamos sumando -------------------------------
lista_archivos_csv = os.listdir('D:/A_PYTHON/ProgramasPython/Control_NodosCA/Reporte_CPU_VPN-PPS/Archivos_vpn/')
suma_valores_cpu = 0
for archivo in lista_archivos_csv:
suma_valores_cpu += cpu(archivo)
#redondeamos el valor y lo dividimos en la cantidad de tarjetas
promedio_cpu_vpn = str(math.ceil(suma_valores_cpu/8))+'%'
#---------------------------------------------------------------------------------------------------------------------
#Llamamos a la funcion que borra los archivos CSV los cuales contienen la informacion de CP de cada tarjeta
elimina_archivos_csv()
#------------------------------------------------------------------------------------------------------------------------
#Llamamos a la funcion cambiar_nombre_archivo la cual nos devuelve el nuevo nombre del archivo a
#enviar por correo y la ruta del archivo
nuevo_nombre, path_file = cambiar_nombre_archivo()
#--------------------------------------------------------------------------------------------------------------------
#------Llamamos a la funcion que inserta el valor de CPU en el archivo a enviar por correo------------------------------------------------------------------------
inserta_dato_cpu(nuevo_nombre, path_file, promedio_cpu_vpn)
#--------------------------------------------------------------------------------------------------------------------
#El acrhivo que contiene el subject del correo con la fecha correspondiente
def modificar_subject_txt():
FECHA = time.strftime("%d/%b/%Y")
PAHT_FILE = r'D:\A_PYTHON\ProgramasPython\Control_NodosCA\Reporte_CPU_VPN-PPS\ArchivoFinal_a_enviar\Subjet_Y_Cuerpo_Del_Correo.txt'
CUERPO_CORREO = '\n\nBuenos días,\n\nSe adjunta reporte semanal de CPU para el OSS/VPN/PPS.\n\nSaludos.'
SUBJECT = f'Carga de Cpu ::: {FECHA}'
with open(PAHT_FILE, 'w') as file:
file.write(SUBJECT)
file.write(CUERPO_CORREO)
modificar_subject_txt()
print('Fin del programa...')
#--------------------------------------------------------------------------------------------------------------------
| [
"noreply@github.com"
] | noreply@github.com |
e1b8b16676ad28146055b62b838fbfe5aad1dead | 739a84f15a1242ec715c79f4f1fd7657c607f2e6 | /804-unique-morse-code-words/804-unique-morse-code-words.py | c867b4c5cf6c30dbd95251439810089c2a8b060b | [] | no_license | ElshadaiK/Competitive-Programming | 365683a7af61f881ee9cd56d2124f09cfb88e789 | 9757395120757a81fb5df0bd4719771e60410b47 | refs/heads/master | 2023-03-09T04:58:33.262626 | 2023-02-04T12:24:39 | 2023-02-04T12:24:39 | 225,391,846 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
# create a dictionary for morse code (You can just copy & paste it! ^.^)
ENG_to_MORSE = {
'a':".-", 'b':"-...", 'c':"-.-.", 'd':"-..", 'e':".",
'f':"..-.", 'g':"--.", 'h':"....", 'i':"..", 'j':".---",
'k':"-.-", 'l':".-..", 'm':"--", 'n':"-.", 'o':"---",
'p':".--.", 'q':"--.-", 'r':".-.", 's':"...", 't':"-",
'u':"..-", 'v':"...-", 'w':".--", 'x':"-..-", 'y':"-.--", 'z':"--..",
}
cnt = {} # dictionary for different transformations
for word in words: # loop through every word
tmp = ""
for c in word: # loop through every character
tmp += ENG_to_MORSE[c] # convert the word to morse code
if tmp not in cnt:
cnt[tmp] = 0
else:
cnt[tmp] += 1
return len(cnt) | [
"54620095+ElshadaiK@users.noreply.github.com"
] | 54620095+ElshadaiK@users.noreply.github.com |
98abb2095d0747b698415bc438c553124bfed6c4 | 5faade7c8d091edb5e96d721ec33a9b240ee6e55 | /mmdetection/mmdet/.mim/tools/train.py | 27d56b1cb2b54cdb7e29127aec267fae482cf038 | [
"Apache-2.0",
"CC-BY-2.0"
] | permissive | seogi98/object-detection-level2-cv-18 | 34bba5c6a9d49d506277d2ba10d01f3b59af3117 | e87c741c4918314e28c461b937c7094d264fce56 | refs/heads/main | 2023-08-21T03:13:14.702929 | 2021-10-16T10:13:20 | 2021-10-16T10:13:20 | 425,447,040 | 3 | 0 | null | 2021-11-07T08:09:36 | 2021-11-07T09:09:36 | null | UTF-8 | Python | false | false | 6,914 | py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=2021, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| [
"tmdgns1139@naver.com"
] | tmdgns1139@naver.com |
9d48aa9c700b4a07e4a8f8bcbda6c8fb2120b598 | bad08ce4b707f8d479a6f9d6562f90d397042df7 | /Python/python-socket-网络协议.py | eb95bcb6957946195c1044ca5c82f8d396114488 | [] | no_license | lengyue1024/notes | 93bf4ec614cbde69341bc7e4decad169a608ff39 | 549358063da05057654811a352ae408e48498f25 | refs/heads/master | 2020-04-29T07:14:45.482919 | 2019-03-16T07:51:26 | 2019-03-16T07:51:26 | 175,945,339 | 2 | 0 | null | 2019-03-16T08:19:53 | 2019-03-16T08:19:52 | null | GB18030 | Python | false | false | 2,273 | py | ----------------------------
网络协议入门 |
----------------------------
----------------------------
网络-物理层和链路层 |
----------------------------
* 以太网协议(ethernet)
* 一组电信号组成一个数据包,叫做 - 帧
* 每一帧分为:报头(head)和数据(data)两部分
———————————————————————————————
|head| data |
———————————————————————————————
* head(固定18个字节)
* 发送者/源地址 :6个字节
* 接收者/目标地址 :6个字节
* 数据类型 :6个字节
* data(最短64字节,最长1500字节)
* 数据包的具体内容
* head + data 最大长度就是 1518字节 (1500 +18),超过长度,就分片发送
* mac地址
* head 中包含的源地址和目标地址的由来.
* ethernet 规定,接入internet的设备,都必须要具备网卡,发送端和接收端的地址,就是指网卡地址,也就是mac地址
* 每块网卡出厂时,都被烧录了世界上唯一的mac地址,长度为 48 位 2进制,通常由 12 位 16进制 表示
00:16:3e:16:0b:5e
* 前面6位是厂商编号
* 后面6位是流水号
* 广播
* 有了mac地址,同一个网络内的两台主机就可以通信了(一台主机通过arp协议获取另一台主机的mac地址)
* ethernet 采用最原始的方式 - 广播,方式进行通信,通俗点.计算机通信基本靠吼
IEEE802.1Q
———————————————————————————————————————————————————————————————————————————
|目标mac地址 |发送源mac地址 |TDIP |TCI |类型 |数据部分 |CRC |
———————————————————————————————————————————————————————————————————————————
目标mac地址 :6字节
发送源mac地址 :6字节
TDIP :0x8100
TCI :内含12个bit的vlan标识
类型 :2字节
数据部分 :46 - 1500 字节
CRC :4字节,经过重新计算
| [
"747692844@qq.com"
] | 747692844@qq.com |
41e5276cf18e8b181b6e5ce7318913cf6fd6d093 | 1a0d6aec200ea266e2726dd50dde40595662191d | /server/app.py | e491a5c2d86e7a04d1ac93495a93c9c88ef66985 | [] | no_license | sanggusti/fastapi-news-app | 77a8608377b4884ba85560eef66b04c9fe05578f | 13aa1e854347beb93ffa4decec71c828bd66c753 | refs/heads/master | 2023-07-12T01:52:55.723089 | 2021-08-14T05:51:55 | 2021-08-14T05:51:55 | 395,857,085 | 0 | 0 | null | 2021-08-14T03:08:20 | 2021-08-14T01:58:32 | Python | UTF-8 | Python | false | false | 801 | py | import os
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from pydantic import BaseModel
from .api.news_api import get_news_articles
DIR = os.path.dirname(__file__)
class SearchRequest(BaseModel):
q: str
from_date: str
app = FastAPI()
app.mount(
"/static", StaticFiles(directory=f"{DIR}/../client/build/static"), name="static"
)
@app.get("/")
async def index():
"""Return application index."""
return FileResponse(f"{DIR}/../client/build/index.html")
@app.post("/api/search")
async def search(req: SearchRequest):
"""Return news articles for a search query and from_date
Args:
req (SearchRequest): a request string
"""
return get_news_articles(q=req.q, from_date=req.from_date)
| [
"gustiowinata0@gmail.com"
] | gustiowinata0@gmail.com |
9f810de0b2d578ee42ac0ac94c65f3b5a53b2789 | 72bf2b70af6648664de97f9573b5df10c707936a | /rgstrn/registration/reg/urls.py | 9ecdee8838232f35e1454fee1442b38e737166be | [] | no_license | sarathrajkottamthodiyil/django-projects | 44f77d65e6f247c89cd5e0f63c81bf6dbe9ab671 | bf14e7e4db2f0d2290b5efeef4dda82d0d026382 | refs/heads/master | 2020-09-06T12:47:17.486425 | 2019-11-08T09:08:41 | 2019-11-08T09:08:41 | 220,428,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.contrib import admin
from django.urls import path
from.import views
from django.conf.urls import url
urlpatterns = [
url('/', views.register, name='register')
] | [
"noreply@github.com"
] | noreply@github.com |
c3016ff7a972f62e2906adc7b0164ee77a5a2a1c | ebfac951b49ba380d4b88e0c6308aea326597381 | /chatrender/views/chat_types.py | 7b37509617634b9ce6f0f47cc6e770b11a026be2 | [
"MIT"
] | permissive | The-Politico/django-politico-slackchat-renderer | 2e4175359a4df004526722a190040cef767837fd | adb3ed2ba5039a97ee7b021d39aa40cab11e5661 | refs/heads/master | 2022-12-10T10:57:51.796473 | 2018-05-22T15:37:57 | 2018-05-22T15:37:57 | 120,328,521 | 2 | 0 | MIT | 2022-12-08T02:09:33 | 2018-02-05T16:10:25 | JavaScript | UTF-8 | Python | false | false | 431 | py | import requests
from chatrender.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
@staff_member_required
def chat_types(request):
response = requests.get(settings.SLACKCHAT_CHATTYPE_ENDPOINT)
context = response.json()
return render(
request,
'chatrender/chattype_list.html',
context={"chat_types": context}
)
| [
"jmcclure@politico.com"
] | jmcclure@politico.com |
791f64fe3027c9ce492c7203ae8e8ce5cb945748 | ebf12b3f8ab1017ef8f04f6fe89425febe5fae3a | /Desafios Modulo IV/Desafio - Ventas/agrupados.py | 5ed26293e449617cefdaa24dce065271db9e1932 | [] | no_license | MaxiVargas78/DesafioLatamPython | a77afe64c3718a57da8ffd4cec10654377fc67f9 | 50dadabb5ea4a56a455edcc8a250e43705424205 | refs/heads/main | 2023-07-09T18:50:04.336128 | 2021-08-17T03:55:58 | 2021-08-17T03:55:58 | 387,273,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | ventas = {
"Enero": 15000,
"Febrero": 22000,
"Marzo": 12000,
"Abril": 17000,
"Mayo": 81000,
"Junio": 13000,
"Julio": 21000,
"Agosto": 41200,
"Septiembre": 25000,
"Octubre": 21500,
"Noviembre": 91000,
"Diciembre": 21000,
}
dicci = []
dicci1 = {}
for i in ventas:
dicci.append(ventas[i])
for j in dicci:
if j in dicci1:
dicci1[j] += 1
else:
dicci1[j] = 1
print(dicci1) | [
"maxx7689@gmail.com"
] | maxx7689@gmail.com |
91003a13c3cf7e9bec691541eb1dfdeb36497c67 | 6921e4e6955f5d35f9413f007e7be0d050550040 | /db/dbUtil.py | 9a62812340c4414bc94e1269cedd618e0dfd4283 | [] | no_license | yiyisf/get_link_python | 84f9fe94a3f21513e8c0b66c3a8705377ffafd51 | a7cc45846e91c9f26c8b91e9cc44f2910a02d04a | refs/heads/master | 2020-04-06T03:52:31.913232 | 2016-07-16T12:06:04 | 2016-07-16T12:06:04 | 59,097,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # -*- coding: utf-8 -*-
"""
数据库定义及操作
"""
def connectDb():
def createDB():
def insert():
def update():
def query():
def delete():
| [
"yiyi119120@gmail.com"
] | yiyi119120@gmail.com |
0450e9cf73bed75b7feb3469e06a9af363893758 | 45b4fb25f4cd0e74715a2a811354ff9c45888e73 | /Old_Py_Files/http_request_response_parser_tun.py | ad32a6b32adc227c374f3782c442d988bd2d22c4 | [] | no_license | Excloudx6/Measuring_Mobile_Advertisement | 062577dd97f88b5e51b266289e613312d9c78b18 | 5790eaa04ee12a0cdc7b7752bbf29bd0c03efea6 | refs/heads/master | 2023-03-18T15:43:35.364235 | 2015-04-22T04:22:11 | 2015-04-22T04:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | #!/usr/bin/env python
import dpkt
import zlib
f = open('test.pcap','r')
pcap = dpkt.pcap.Reader(f)
Response=0
# load HTTP pcap file n' link to dpkt attributes
frame_counter = 0
for ts,buf in pcap:
#print "Loop Request"
frame_counter+=1
ip = dpkt.ip.IP(buf)
tcp = ip.data
try:
if tcp.dport == 80 and len(tcp.data) > 0:
http_req = dpkt.http.Request(tcp.data)
Response=0
print "\r\n"
# HTTP request parser
print "Frame Number:", frame_counter
print tcp.data
frame_counter1=0
for ts1,buf1 in pcap:
frame_counter1+=1
ip1 = dpkt.ip.IP(buf1)
tcp1 = ip1.data
if tcp1.sport == 80 and len(tcp1.data) > 0 and tcp1.ack==tcp.seq+len(tcp.data):
http_res = dpkt.http.Response(tcp1.data)
Response=1
print "Frame Number:", frame_counter1
if 'content-encoding' in http_res.headers.keys():
print "Encoded using ", http_res.headers['content-encoding']
if http_res.headers['content-encoding']=="gzip":
print zlib.decompress(http_res.body, 16+zlib.MAX_WBITS)
else:
print http_res.body.decode(http_res.headers['content-encoding'],'strict')
break
if Response==0:
print "No Response Packet Found"
except dpkt.NeedData,e:
#print "dpkt raised an Need error %s" % (e)
print "Frame Number:", frame_counter
pass
except dpkt.UnpackError,e:
#print "dpkt raised an unpack error %s" % (e)
print "Frame Number:", frame_counter
pass
f.close()
| [
"hchawla@localhost.localdomain"
] | hchawla@localhost.localdomain |
c045592da596b19d55f84306bd5d9a70eeebe23d | 66e029a85dcce281fcba00b73c12ced74296cab9 | /build_corpus.py | 2960adfd761113d79a479146e789af3aee1e2d29 | [] | no_license | maiyaporn/user-profiling | 1bd08f56b48ac75443078ab8d09342e79c7a57b8 | 731421fc16f67988d32daa42cea59b931a37edc8 | refs/heads/master | 2021-01-13T01:46:16.233090 | 2014-12-17T05:54:23 | 2014-12-17T05:54:23 | 26,605,491 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,273 | py | from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from gensim import corpora, models, similarities
import numpy as np
import glob
import os
import codecs
import string
import pickle
class BuildCorpus(object):
"""docstring for BuildCorpus"""
def __init__(self):
self.customwords = [i.encode('utf-8') for i in ["n't", "'ve", "'m", "'ll", "'re"]]
self.stoplists = stopwords.words('english') + self.customwords
self.lmtzr = WordNetLemmatizer()
def isPunctuation(self, text):
if type(text) is not str:
text = text.encode('utf-8')
text_ = text.translate(string.maketrans("",""), string.punctuation)
return bool(len(text_)==0)
def tokenizeDoc(self, doc):
tokens = []
for text in codecs.open(doc, "r", "utf-8"):
tokens += [self.lmtzr.lemmatize(word) for word in word_tokenize(text.lower()) if len(word) > 3 and word not in self.stoplists and not self.isPunctuation(word)]
return tokens
def buildDictionary(self, directory, dictName):
dictionary = corpora.Dictionary()
for doc in glob.glob(directory + "/*"):
dictionary.add_documents([self.tokenizeDoc(doc)])
dictionary.filter_extremes(no_above=0.7)
dictionary.compactify()
dictionary.save(dictName)
print (dictionary)
return dictionary
def buildCorpus(self, directory, dictName, corpusName):
if os.path.isfile(dictName):
dictionary = corpora.Dictionary.load(dictName)
else:
dictionary = self.buildDictionary(directory, dictName)
corpus = []
doc_idx_map = dict()
n = 0
for doc in glob.glob(directory + "/*"):
doc_idx_map[n] = doc.split("/")[1]
corpus.append(dictionary.doc2bow(self.tokenizeDoc(doc)))
n += 1
corpora.MmCorpus.serialize(corpusName, corpus)
with open('tmp/doc-idx-map', 'wb') as f:
pickle.dump(doc_idx_map, f)
print len(corpus)
def main():
corpus = BuildCorpus()
#corpus.buildCorpus('train', 'tmp/train-corpus.dict', 'tmp/train-corpus.mm')
#corpus.buildCorpus('val', 'tmp/train-corpus.dict', 'tmp/val-corpus.mm')
# Build corpus for testing documents - combine train and val
# Save the order of file read to corpus to make a map of index - user
corpus.buildCorpus('documents', 'tmp/test-corpus.dict', 'tmp/test-corpus.mm')
if __name__ == '__main__':
main() | [
"p.maiyaporn@gmail.com"
] | p.maiyaporn@gmail.com |
f902010d4f96a60c2cb82fe060c4864c2f26e9d2 | 4b79bce1718767aedafe9a585e9fd126820ccd67 | /venv/Scripts/pip-script.py | e07292d8d29c675fe8f8aa41e3ae07edb5239a67 | [] | no_license | ajay15malusare/PYTHON | fc1208c7c3f1e89fdbf2d49c0976104d82e53bde | 59e7ad4ef8f86566fbee26ae67d7aeec3093ffcd | refs/heads/master | 2020-05-05T14:00:40.036947 | 2019-04-08T08:30:58 | 2019-04-08T08:30:58 | 180,103,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!D:\Desktop\PYTHON\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"ajay15malusare@gmail.com"
] | ajay15malusare@gmail.com |
1f459741a34f6b06e0c9856c6a59f86fee6acd63 | a3cdfaf2d4d72f4d1c8bd2a9d3e8ce1f6d0316ca | /Research Files/10x10x10_moving/10x10x10movinglammpsscriptgenerator.py | e24983c5d7ba1cc8fa3588b9ef5309dd69d9177a | [] | no_license | webclinic017/Personal-Projects | d61e3f5ad1e1c12c611ae088fa64050dc2f4693b | 4e730e350e5698bb40bbdb1526596c6a8a3c5596 | refs/heads/master | 2023-06-10T23:00:50.948934 | 2021-07-03T00:46:19 | 2021-07-03T00:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | #!/usr/bin/env python
if __name__ == '__main__':
temperature = 50
for i in range(1,21):
temp = int(temperature) * i
if temp == 1000:
temp_string = "99_1000"
else:
temp_string = str(temp)
f = open("10x10x10_{}k_moving_py.lmp".format(temp_string), "w+")
f.write("# bcc iron in a 3d periodic box\n\n")
f.write("clear\n")
f.write("units metal\n")
f.write("atom_style spin\n\n")
f.write("dimension 3\n")
f.write("boundary p p p\n\n")
f.write("# necessary for the serial algorithm (sametag)\n")
f.write("atom_modify map array \n\n")
f.write("lattice bcc 2.8665\n")
f.write("region box block 0.0 10.0 0.0 10.0 0.0 10.0\n")
f.write("create_box 1 box\n")
f.write("create_atoms 1 box\n\n")
f.write("# setting mass, mag. moments, and interactions for bcc iron\n\n")
f.write("mass 1 55.845\n\n")
f.write("# set group all spin/random 31 2.2\n")
f.write("set group all spin 2.2 0.0 0.0 1.0\n")
f.write("pair_style hybrid/overlay eam/alloy spin/exchange 3.5\n")
f.write("pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe\n")
f.write("pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841\n\n")
f.write("neighbor 0.1 bin\n")
f.write("neigh_modify every 10 check yes delay 20\n\n")
f.write("fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0\n")
f.write("fix_modify 1 energy yes\n")
f.write("fix 2 all langevin/spin {}.0 0.01 21\n\n".format(int(temp)))
f.write("fix 3 all nve/spin lattice moving\n")
f.write("timestep 0.0001\n\n")
f.write("# compute and output options\n\n")
f.write("compute out_mag all spin\n")
f.write("compute out_pe all pe\n")
f.write("compute out_ke all ke\n")
f.write("compute out_temp all temp\n\n")
f.write("variable magz equal c_out_mag[3]\n")
f.write("variable magnorm equal c_out_mag[4]\n")
f.write("variable emag equal c_out_mag[5]\n")
f.write("variable tmag equal c_out_mag[6]\n\n")
f.write("thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal\n")
f.write("thermo 5000\n\n")
f.write("compute outsp all property/atom spx spy spz sp fmx fmy fmz\n")
f.write("dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]\n\n")
f.write("run 100000\n")
f.write("# run 2\n\n")
f.write("unfix 3\n")
f.write("fix 3 all nve/spin lattice moving\n")
f.write("velocity all create {} 4928459 rot yes dist gaussian\n\n".format(int(temp)))
f.write("run 100000")
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
6f97e11be404d475c96c2f5c4625ac4c0a5f12cb | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /lengthOfLIS.py | 0416711c4a259c5b75a686e99c23b0c224139c4f | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # one solution
# class Solution:
# def lengthOfLIS(self, nums):
# log=[0 for i in range(len(nums))]
# for i in range(len(nums)):
# m=0
# for j in range(i):
# if nums[j]<nums[i]:
# m=max(m,log[j])
# log[i]=m+1
# return max(log)
#
# another solution
class Solution:
def lengthOfLIS(self, nums):
if len(nums) == 0:
return 0
log = []
for num in nums:
if len(log) == 0 or num > log[-1]:
log.append(num)
continue
start = 0
end = len(log) - 1
while start <= end:
mid = (start + end) // 2
if log[mid] >= num:
end = mid - 1
else:
start = mid + 1
log[start] = num
return len(log)
sl = Solution()
nums = [10, 9, 2, 5, 3, 7, 101, 18]
print(sl.lengthOfLIS(nums))
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
c3b32a9a5281c3c2b56d9125376718f943199953 | 0749bf5bd0d07cabe0acac93635f1e4e3c97dda6 | /Quick_Basic/20.functions.py | dd757d30a3c297d3cb9e6c7df47513afc6c2ca13 | [
"MIT"
] | permissive | ananddasani/Python_Practice_Course | b47565aedbc4cc166d54c6dee7c4587edabc2e82 | 5bc0993ccff7df2631a6f1177bf898626c095d8b | refs/heads/main | 2023-06-14T19:05:18.003900 | 2021-07-06T06:03:13 | 2021-07-06T06:03:13 | 377,720,391 | 3 | 2 | MIT | 2021-06-17T08:21:59 | 2021-06-17T05:54:28 | null | UTF-8 | Python | false | false | 698 | py | # program to demonstrate the functions in python
# function without args
def say_hello():
print("Hello python :)")
say_hello()
# function with args
def say_hello_args(arg1, arg2):
print("Hello " + arg1 + " how are you doing ? " +
arg2 + " is waiting for you :)")
say_hello_args("anand", "om")
# different way of passing args
def say_hello_args1(arg1, arg2="jay"):
print("Hello " + arg1 + " how are you doing ? " +
arg2 + " is waiting for you :)")
say_hello_args1("anand")
def fah2celsius(fah):
celsius = (5 * (fah - 32)) / 9
return celsius
print("Celsius : ", round(fah2celsius(100), 2))
print("Kelvin : ", round(fah2celsius(100) + 273.5, 2))
| [
"ananddasani74@gmail.com"
] | ananddasani74@gmail.com |
faa80815a53cfae073f37f568ed4e971ea29506f | 8c1551f974e4c2d89bc9f22905734e4421761189 | /model_backupJan12.py | 393715fa2bd41f31f501120dc337fd92c12c0b0f | [] | no_license | ofraam/GTT_MCTS | c7f8210d60398640fd4035b8d93fed99929bbe43 | 44647bf54a6e4622df21824c413e92ee33fbe719 | refs/heads/master | 2020-03-29T11:43:57.347444 | 2018-09-24T14:58:35 | 2018-09-24T14:58:35 | 149,867,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,356 | py | import copy
import numpy as np
import matplotlib.pyplot as plt
import math
import replay as rp
from emd import emd
# from pyemd import emd
from scipy import stats
# from cv2 import *
LOGFILE = ['logs/6_hard_full_dec19.csv','logs/6_hard_pruned_dec19.csv','logs/10_hard_full_dec19.csv','logs/10_hard_pruned_dec19.csv', 'logs/6_easy_full_dec19.csv','logs/6_easy_pruned_dec19.csv','logs/10_easy_full_dec19.csv','logs/10_easy_pruned_dec19.csv','logs/10_medium_full_dec19.csv','logs/10_medium_pruned_dec19.csv']
DIMENSION = 6
START_POSITION = [[[0,2,0,0,1,0],[0,2,1,2,0,0],[0,1,0,0,0,0],[0,1,0,2,0,0],[0,1,0,0,0,0],[0,2,0,0,2,0]],
[[0,2,0,1,1,0],[0,2,1,2,0,0],[0,1,0,0,0,0],[2,1,0,2,0,0],[0,1,0,0,0,0],[0,2,0,0,2,0]],
[[0,0,0,2,0,0,0,0,0,0],[0,0,0,1,0,2,0,0,0,0],[0,2,2,0,0,1,1,0,2,0],[0,0,2,1,2,0,0,0,0,0],[0,1,1,0,0,0,0,0,0,0],[0,1,1,0,2,0,0,0,0,0],[0,0,1,0,2,0,0,0,0,0],[0,0,1,0,0,0,0,0,0,0],[0,0,2,0,0,2,2,0,0,0],[0,0,0,0,1,0,0,0,0,0]],
[[0,0,0,2,0,0,0,0,0,0],[0,0,0,1,0,2,0,0,0,0],[0,2,2,0,1,1,1,0,2,0],[0,0,2,1,2,0,0,0,0,0],[0,1,1,0,0,0,0,0,0,0],[0,1,1,0,2,0,0,0,0,0],[2,0,1,0,2,0,0,0,0,0],[0,0,1,0,0,0,0,0,0,0],[0,0,2,0,0,2,2,0,0,0],[0,0,0,0,1,0,0,0,0,0]],
[[0,1,0,2,0,0],[0,2,1,1,0,0],[1,2,2,2,1,0],[2,0,1,1,2,0],[1,0,2,2,0,0],[0,0,0,0,0,0]],
[[0,1,2,2,0,0],[0,2,1,1,0,0],[1,2,2,2,1,0],[2,0,1,1,2,1],[1,0,2,2,0,0],[0,0,0,0,0,0]],
[[0,0,0,0,1,0,2,0,0,0],[0,0,0,0,2,1,1,1,0,0],[0,0,0,1,2,2,2,1,0,0],[0,0,0,2,2,1,1,2,1,1],[2,0,0,1,0,2,2,0,0,0],[1,0,0,0,0,0,0,0,0,0],[1,1,0,0,0,0,0,0,0,0],[2,2,0,0,0,0,1,0,0,0],[0,0,0,0,0,0,1,0,0,0],[0,0,0,0,0,2,2,2,0,0]],
[[0,0,0,0,1,2,2,0,0,0],[0,0,0,0,2,1,1,1,0,0],[0,0,0,1,2,2,2,1,0,0],[0,0,0,2,2,1,1,2,1,1],[2,0,0,1,0,2,2,0,0,1],[1,0,0,0,0,0,0,0,0,0],[1,1,0,0,0,0,0,0,0,0],[2,2,0,0,0,0,1,0,0,0],[0,0,0,0,0,0,1,0,0,0],[0,0,0,0,0,2,2,2,0,0]],
[[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,1,0,0,2,0,0,0,0],[0,0,0,1,1,0,0,0,0,0],[0,0,0,0,2,2,2,1,2,0],[0,0,0,0,0,1,2,2,0,0],[0,0,0,1,0,2,0,0,0,0],[0,0,0,0,1,1,0,0,0,0],[0,0,0,0,0,1,0,0,0,0],[0,0,0,0,0,0,2,0,0,0]],
[[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,1,0,0,2,0,0,0,0],[0,0,1,1,1,2,0,0,0,0],[0,0,0,0,2,2,2,1,2,0],[0,0,0,0,0,1,2,2,0,0],[0,0,0,1,0,2,0,0,0,0],[0,0,0,0,1,1,0,0,0,0],[0,0,0,0,0,1,0,0,0,0],[0,0,0,0,0,0,2,0,0,0]]
]
def compute_density(row, col, board, neighborhood_size):
x_count = 0.0
density_score = 0.0
for i in range(-1*neighborhood_size,neighborhood_size+1):
for j in range(-1*neighborhood_size,neighborhood_size+1):
if (i != 0) | (j != 0):
r = row + i
c = col + j
if (r < len(board)) & (r >= 0) & (c < len(board)) & (c >= 0):
# print r
# print c
if board[r][c] == 'X':
x_count += 1.0
density_score += 1.0/(8*max(abs(i), abs(j)))
return density_score
def compute_density_guassian(row, col, board, guassian_kernel):
density_score = 0.0
for guas in guassian_kernel:
density_score += guas[row][col]
return density_score
def compute_scores_density_guassian(normalized=False, sig = 3, lamb = 1):
data_matrices = {}
for g in range(len(LOGFILE)):
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
# create guassians for each X square
guassian_kernel = []
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 'X':
guassian_kernel.append(makeGaussian(len(board_matrix),fwhm=sig,center=[r,c]))
# compute density scores
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
square_score = compute_density_guassian(r,c,board_matrix,guassian_kernel)
score_matrix[r][c] = square_score
sum_scores += square_score
sum_scores_exp += math.pow(math.e,lamb*square_score)
# score_matrix_normalized = copy.deepcopy(score_matrix)
# for r in range(len(score_matrix_normalized)):
# for c in range(len(score_matrix_normalized[r])):
# score_matrix_normalized[r][c] = score_matrix_normalized[r][c]/sum_scores
# print 'score matrix:'
# print score_matrix
# print 'score matrix normalized'
# print score_matrix_normalized
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
# score_matrix[r][c] = score_matrix[r][c]/sum_scores
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
score_matrix[r][c] = score_matrix[r][c]/sum_scores
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
if normalized:
fig_file_name = fig_file_name + '_normalized_density_scores.png'
else:
fig_file_name = fig_file_name + '_density_scores.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def compute_scores_density(normalized=False, neighborhood_size=1, lamb=1):
data_matrices = {}
for g in range(len(LOGFILE)):
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
square_score = compute_density(r, c, board_matrix, neighborhood_size) # check neighborhood
score_matrix[r][c] = square_score
sum_scores += square_score
sum_scores_exp += math.pow(math.e,lamb*square_score)
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
# score_matrix[r][c] = score_matrix[r][c]/sum_scores
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
score_matrix[r][c] = score_matrix[r][c]/sum_scores
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
if normalized:
fig_file_name = fig_file_name + '_normalized_density_scores.png'
else:
fig_file_name = fig_file_name + '_density_scores.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def check_path_overlap(empty1, empty2):
for square in empty1:
if square in empty2:
return True
return False
def compute_open_paths(row, col, board, exp=1, player = 'X'):
other_player = 'O'
if player == 'O':
other_player = 'X'
streak_size = 4
if len(board) == 10:
streak_size = 5
open_paths_lengths = []
open_paths_data = []
# check right-down diagonal
for i in range(streak_size):
r = row - i
c = col - i
if (r > len(board)-1) | (r < 0) | (c > len(board)-1) | (c < 0):
continue
blocked = False
path_length = 0
path_x_count = 0
empty_squares = []
square_row = r
square_col = c
while (not blocked) & (path_length < streak_size) & (square_row < len(board)) & (square_row >= 0) & (square_col < len(board)) & (square_col >= 0):
if board[square_row][square_col] == other_player:
blocked = True
elif board[square_row][square_col] == player:
path_x_count += 1
elif ((square_col != col) | (square_row != row)) | (other_player=='X'):
empty_squares.append([square_row,square_col])
square_row += 1
square_col += 1
path_length += 1
if (path_length == streak_size) & (not blocked) & (path_x_count>0):
if other_player == 'O':
open_paths_lengths.append(path_x_count+1)
open_paths_data.append((path_x_count+1,empty_squares))
else:
open_paths_lengths.append(path_x_count)
open_paths_data.append((path_x_count,empty_squares))
# check left-down diagonal
for i in range(streak_size):
r = row - i
c = col + i
if (r > len(board)-1) | (r < 0) | (c > len(board)-1) | (c < 0):
continue
blocked = False
path_length = 0
path_x_count = 0
empty_squares = []
square_row = r
square_col = c
while (not blocked) & (path_length < streak_size) & (square_row < len(board)) & (square_row >= 0) & (square_col < len(board)) & (square_col >= 0):
if board[square_row][square_col] == other_player:
blocked = True
elif board[square_row][square_col] == player:
path_x_count += 1
elif ((square_col != col) | (square_row != row)) | (other_player=='X'):
empty_squares.append([square_row,square_col])
square_row += 1
square_col -= 1
path_length += 1
if (path_length == streak_size) & (not blocked) & (path_x_count>0):
if other_player == 'O':
open_paths_lengths.append(path_x_count+1)
open_paths_data.append((path_x_count+1,empty_squares))
else:
open_paths_lengths.append(path_x_count)
open_paths_data.append((path_x_count,empty_squares))
# check vertical
for i in range(streak_size):
r = row - i
c = col
if (r > len(board)-1) | (r < 0) | (c > len(board)-1) | (c < 0):
continue
blocked = False
path_length = 0
path_x_count = 0
empty_squares = []
square_row = r
square_col = c
while (not blocked) & (path_length < streak_size) & (square_row < len(board)) & (square_row >= 0) & (square_col < len(board)) & (square_col >= 0):
if board[square_row][square_col] == other_player:
blocked = True
elif board[square_row][square_col] == player:
path_x_count += 1
elif ((square_col != col) | (square_row != row)) | (other_player=='X'):
empty_squares.append([square_row,square_col])
square_row += 1
path_length += 1
if (path_length == streak_size) & (not blocked) & (path_x_count>0):
if other_player == 'O':
open_paths_lengths.append(path_x_count+1)
open_paths_data.append((path_x_count+1,empty_squares))
else:
open_paths_lengths.append(path_x_count)
open_paths_data.append((path_x_count,empty_squares))
# check horizontal
for i in range(streak_size):
r = row
c = col - i
if (r > len(board)-1) | (r < 0) | (c > len(board)-1) | (c < 0):
continue
blocked = False
path_length = 0
path_x_count = 0
empty_squares = []
square_row = r
square_col = c
while (not blocked) & (path_length < streak_size) & (square_row < len(board)) & (square_row >= 0) & (square_col < len(board)) & (square_col >= 0):
if board[square_row][square_col] == other_player:
blocked = True
elif board[square_row][square_col] == player:
path_x_count += 1
elif ((square_col != col) | (square_row != row)) | (other_player=='X'):
empty_squares.append([square_row, square_col])
square_col += 1
path_length += 1
if (path_length == streak_size) & (not blocked) & (path_x_count>0):
if other_player == 'O':
open_paths_lengths.append(path_x_count+1)
open_paths_data.append((path_x_count+1,empty_squares))
else:
open_paths_lengths.append(path_x_count)
open_paths_data.append((path_x_count,empty_squares))
# print open_paths_lengths
if len(open_paths_lengths) == 0:
return 0.0
score = 0.0
for i in range(len(open_paths_data)):
p1 = open_paths_data[i]
score += 1.0/math.pow((streak_size-p1[0]), exp)
for j in range(i+1, len(open_paths_data)):
p2 = open_paths_data[j]
if not(check_path_overlap(p1[1],p2[1])):
score += 1.0/(math.pow(((streak_size-1)*(streak_size-1))-(p1[0]*p2[0]), exp))
return score
def compute_scores_open_paths(normalized=False, exp=1, lamb = 1):
data_matrices = {}
for g in range(len(LOGFILE)):
print LOGFILE[g]
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
square_score = compute_open_paths(r, c, board_matrix,exp=exp) # check open paths for win
score_matrix[r][c] = square_score
sum_scores += square_score
sum_scores_exp += math.pow(math.e,lamb*square_score)
# score_matrix_normalized = copy.deepcopy(score_matrix)
# for r in range(len(score_matrix_normalized)):
# for c in range(len(score_matrix_normalized[r])):
# score_matrix_normalized[r][c] = score_matrix_normalized[r][c]/sum_scores
# print 'score matrix:'
# print score_matrix
# print 'score matrix normalized'
# print score_matrix_normalized
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
score_matrix[r][c] = score_matrix[r][c]/sum_scores
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
if normalized:
fig_file_name = fig_file_name + '_test_normalized_path_scores' + 'exp=' + str(exp) + '.png'
else:
fig_file_name = fig_file_name + '_test_path_scores' + 'exp=' + str(exp) + '.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def compute_scores_open_paths_opponent(normalized=False, exp=1, lamb = 1, o_weight = 0.5):
data_matrices = {}
for g in range(len(LOGFILE)):
print LOGFILE[g]
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
x_potential = compute_open_paths(r, c, board_matrix,exp=exp) # check open paths for win
o_potential = compute_open_paths(r, c, board_matrix,exp=exp, player='O') # check preventive paths
square_score = (1-o_weight)*x_potential + o_weight*o_potential # check open paths for win
score_matrix[r][c] = square_score
sum_scores += square_score
sum_scores_exp += math.pow(math.e,lamb*square_score)
# score_matrix_normalized = copy.deepcopy(score_matrix)
# for r in range(len(score_matrix_normalized)):
# for c in range(len(score_matrix_normalized[r])):
# score_matrix_normalized[r][c] = score_matrix_normalized[r][c]/sum_scores
# print 'score matrix:'
# print score_matrix
# print 'score matrix normalized'
# print score_matrix_normalized
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
score_matrix[r][c] = score_matrix[r][c]/sum_scores
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
if normalized:
fig_file_name = fig_file_name + '_opponent_normalized_path_scores' + 'exp=' + str(exp) + 'o_weight=' + str(o_weight) + '.png'
else:
fig_file_name = fig_file_name + '_opponent_path_scores' + 'exp=' + str(exp) + 'o_weight=' + str(o_weight)+ '.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def compute_scores_composite(normalized=False, exp=1, neighborhood_size=1, density = 'guassian', lamb=None, sig=3):
data_matrices = {}
if (density=='guassian'):
density_scores = compute_scores_density_guassian(True,sig=sig)
else:
density_scores = compute_scores_density(True, neighborhood_size=neighborhood_size)
path_scores = compute_scores_open_paths(True,exp)
for g in range(len(LOGFILE)):
board_key = LOGFILE[g]
board_key = board_key[:-4]
board_key = board_key[5:-6]
density_scores_board = density_scores[board_key]
path_scores_board = path_scores[board_key]
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
# if (density=='guassian'):
# square_score_density = compute_density_guassian(r, c, board_matrix, guassian_kernel) # check density
# else:
# square_score_density = compute_density(r, c, board_matrix, neighborhood_size) # check density
# square_score_path = compute_open_paths(r, c, board_matrix,exp=exp) # check open paths for win
# square_score = square_score_density * square_score_path
square_score = density_scores_board[r][c] * path_scores_board[r][c]
score_matrix[r][c] = square_score
sum_scores += square_score
if lamb != None:
sum_scores_exp += math.pow(math.e,lamb*square_score)
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
if lamb is None:
score_matrix[r][c] = score_matrix[r][c]/sum_scores
else:
score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
# if normalized:
# fig_file_name = fig_file_name + '_normalized_composite_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
# else:
# fig_file_name = fig_file_name + '_composite_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
#
# plt.savefig(fig_file_name)
# plt.clf()
return data_matrices
def compute_scores_composite(normalized=False, exp=1, neighborhood_size=1, density = 'guassian', lamb=None, sig=3, opponent = False, o_weight=0.5):
data_matrices = {}
if (density=='guassian'):
density_scores = compute_scores_density_guassian(True,sig=sig)
else:
density_scores = compute_scores_density(True, neighborhood_size=neighborhood_size)
if opponent:
path_scores = compute_scores_open_paths_opponent(True,exp, o_weight=o_weight)
else:
path_scores = compute_scores_open_paths(True,exp)
for g in range(len(LOGFILE)):
board_key = LOGFILE[g]
board_key = board_key[:-4]
board_key = board_key[5:-6]
density_scores_board = density_scores[board_key]
path_scores_board = path_scores[board_key]
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
# if (density=='guassian'):
# square_score_density = compute_density_guassian(r, c, board_matrix, guassian_kernel) # check density
# else:
# square_score_density = compute_density(r, c, board_matrix, neighborhood_size) # check density
# square_score_path = compute_open_paths(r, c, board_matrix,exp=exp) # check open paths for win
# square_score = square_score_density * square_score_path
square_score = density_scores_board[r][c] * path_scores_board[r][c]
score_matrix[r][c] = square_score
sum_scores += square_score
if lamb != None:
sum_scores_exp += math.pow(math.e,lamb*square_score)
# score_matrix_normalized = copy.deepcopy(score_matrix)
# for r in range(len(score_matrix_normalized)):
# for c in range(len(score_matrix_normalized[r])):
# score_matrix_normalized[r][c] = score_matrix_normalized[r][c]/sum_scores
# print 'score matrix:'
# print score_matrix
# print 'score matrix normalized'
# print score_matrix_normalized
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
if lamb is None:
score_matrix[r][c] = score_matrix[r][c]/sum_scores
else:
score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
if normalized:
fig_file_name = fig_file_name + '_normalized_composite_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
else:
fig_file_name = fig_file_name + '_composite_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
if opponent:
fig_file_name = fig_file_name[:-4] + 'o_weight=' + str(o_weight) + '_opponent.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def prune_matrix(matrix, threshold = 0.02):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if (matrix[i][j]<threshold):
matrix[i][j] = 0
return matrix
def compute_scores_layers(normalized=False, exp=1, neighborhood_size=1, density = 'guassian', lamb=None, sig=3,
threshold=0.2, o_weight=0.5, integrate = False):
data_matrices = {}
for g in range(len(LOGFILE)):
board_matrix = copy.deepcopy(START_POSITION[g])
for i in range(len(board_matrix)):
for j in range(len(board_matrix[i])):
if ((board_matrix[i][j]!=1) & (board_matrix[i][j]!=2)):
board_matrix[i][j] = int(board_matrix[i][j])
elif (board_matrix[i][j]==1):
board_matrix[i][j]='X'
elif (board_matrix[i][j]==2):
board_matrix[i][j]='O'
# print board_matrix
sum_scores = 0.0
sum_scores_exp = 0.0
density_score_matrix = copy.deepcopy(board_matrix)
if density=='guassian':
# create guassians for each X square
guassian_kernel = []
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 'X':
guassian_kernel.append(makeGaussian(len(board_matrix),fwhm=sig,center=[r,c]))
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if board_matrix[r][c] == 0: # only check if free
if density == 'guassian':
square_score = compute_density_guassian(r, c, board_matrix, guassian_kernel) # check neighborhood
else:
square_score = compute_density(r, c, board_matrix, neighborhood_size) # check neighborhood
density_score_matrix[r][c] = square_score
sum_scores += square_score
# if lamb!=None:
# sum_scores_exp += math.pow(math.e,lamb*square_score)
# normalize
max_density_score = -1000000
for r in range(len(density_score_matrix)):
for c in range(len(density_score_matrix[r])):
# score_matrix[r][c] = score_matrix[r][c]/sum_scores
if (density_score_matrix[r][c]!='X') & (density_score_matrix[r][c]!='O'):
density_score_matrix[r][c] = density_score_matrix[r][c]/sum_scores
if density_score_matrix[r][c] > max_density_score:
max_density_score = density_score_matrix[r][c]
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
# run path score on remaining squares
sum_scores = 0.0
sum_scores_exp = 0.0
score_matrix = copy.deepcopy(board_matrix)
for r in range(len(board_matrix)):
for c in range(len(board_matrix[r])):
if (board_matrix[r][c] == 0) & (density_score_matrix[r][c]>threshold*max_density_score): # only check if free
square_score_x = compute_open_paths(r, c, board_matrix,exp=exp) # check open paths for win
square_score_o = compute_open_paths(r, c, board_matrix, exp=exp, player = 'O')
square_score = (1-o_weight)*square_score_x + o_weight*square_score_o
if integrate:
square_score = square_score*density_score_matrix[r][c]
score_matrix[r][c] = square_score
sum_scores += square_score
if lamb!=None:
score_matrix[r][c] = math.pow(math.e,lamb*square_score)
sum_scores_exp += math.pow(math.e,lamb*square_score)
# heatmaps
for r in range(0,len(score_matrix)):
for j in range(0,len(score_matrix[r])):
if (score_matrix[r][j]=='X'):
score_matrix[r][j] = -0.00001
elif (score_matrix[r][j]=='O'):
score_matrix[r][j] = -0.00002
if normalized:
for r in range(len(score_matrix)):
for c in range(len(score_matrix[r])):
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
if (score_matrix[r][c]>0):
if lamb is None:
score_matrix[r][c] = score_matrix[r][c]/sum_scores
else:
score_matrix[r][c] = score_matrix[r][c]/sum_scores_exp
# if (score_matrix[r][c]!=-0.00001) & (score_matrix[r][c]!=-0.00002):
# score_matrix[r][c] = (math.pow(math.e, lamb*score_matrix[r][c]))/sum_scores_exp
a = np.array(score_matrix)
a = np.flip(a,0)
# print a
heatmap = plt.pcolor(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
plt.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
plt.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
plt.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig = plt.colorbar(heatmap)
fig_file_name = LOGFILE[g]
fig_file_name = fig_file_name[:-4]
data_matrices[fig_file_name[5:-6]] = score_matrix
fig_file_name += '_layers'
if normalized:
fig_file_name += '_normalized'
if density=='guassian':
fig_file_name += '_guassian_sig=' + str(sig)
else:
fig_file_name += '_neighborhood=' + str(neighborhood_size)
if lamb!=None:
fig_file_name += '_lamb=' + str(lamb)
if integrate:
fig_file_name += '_combined'
fig_file_name += '_oweight='+str(o_weight) + '.png'
# fig_file_name = fig_file_name + '_normalized_layers_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
# else:
# fig_file_name = fig_file_name + '_layers_scores_' + 'exp=' + str(exp) + '_neighborhood=' + str(neighborhood_size) +'_lamb=' + str(lamb) + '.png'
plt.savefig(fig_file_name)
plt.clf()
return data_matrices
def transform_matrix_to_list(mat):
list_rep = []
for i in range(len(mat)):
for j in range(len(mat[i])):
if mat[i][j]<0.00001:
mat[i][j] = 0.00001
list_rep.append(mat[i][j])
sum_values = sum(list_rep)
for i in range(len(list_rep)):
list_rep[i]=list_rep[i]/sum_values
return list_rep
def run_models():
# # data_composite_guassian = compute_scores_composite(True, exp=2, sig=4)
# # data_composite_reg = compute_scores_composite(True, exp=2, neighborhood_size=1, density='reg')
# data_composite_reg_2 = compute_scores_composite(True, exp=2, neighborhood_size=2, density='reg')
# data_layers_reg_2_integrated_lamb2 = compute_scores_layers(normalized=True,exp=2,neighborhood_size=2,density='reg',o_weight=0.5, integrate=True, lamb=200)
data_layers_reg_2_integrated = compute_scores_layers(normalized=True,exp=3,neighborhood_size=2,density='reg',o_weight=0.5, integrate=True)
data_layers_reg_2_integrated_guass = compute_scores_layers(normalized=True,exp=3,neighborhood_size=2,density='guassian',o_weight=0.5, integrate=True,sig=4)
data_layers_reg_2_integrated_noO = compute_scores_layers(normalized=True,exp=3,neighborhood_size=2,density='reg',o_weight=0.0, integrate=True)
data_layers_reg_2 = compute_scores_layers(normalized=True,exp=3,neighborhood_size=2,density='reg',o_weight=0.5, integrate=False)
data_layers_reg_2_noO = compute_scores_layers(normalized=True,exp=3,neighborhood_size=2,density='reg',o_weight=0.0, integrate=False)
# # data_density = compute_scores_density(True,neighborhood_size=1)
data_density_2 = compute_scores_density(True,neighborhood_size=2)
# # data_density_guassian = compute_scores_density_guassian(True)
data_paths_o = compute_scores_open_paths_opponent(True, exp=2,o_weight=0.5)
data_paths = compute_scores_open_paths_opponent(True, exp=2,o_weight=0.0)
data_first_moves = rp.entropy_paths()
models = []
# models.append(['dataCompositeGuassianSig3',compute_scores_composite(True, exp=2)])
# models.append(['dataCompositeGuassianSig10',compute_scores_composite(True, exp=2, sig=10)])
# models.append(['dataCompositeGuassianReg1',compute_scores_composite(True, exp=2, neighborhood_size=1, density='reg')])
models.append(['data_layers_reg_2',data_layers_reg_2])
# models.append(['dataDensity',compute_scores_density(True,neighborhood_size=1)])
models.append(['data_layers_reg_2_integrated',data_layers_reg_2_integrated])
# models.append(['dataDensityGuassian',compute_scores_density_guassian(True)])
# models.append(['dataPaths',data_paths])
# data_first_moves = rp.entropy_paths()
# for board in ['6_easy','6_hard','10_easy','10_hard','10_medium']:
# full = board + '_full'
# pruned = board + '_pruned'
# for model in models:
# qk = transform_matrix_to_list(model[1][full])
# pk = transform_matrix_to_list(data_first_moves[full])
# ent_full = stats.entropy(pk,qk=qk)
# print full + ',' + model[0] + ',' + str(ent_full)
# pk = transform_matrix_to_list(model[1][pruned])
# qk = transform_matrix_to_list(data_first_moves[pruned])
# ent_pruned = stats.entropy(pk,qk=qk)
# print pruned + ',' + model[0] + ',' + str(ent_pruned)
# print data_density.keys()
for board in ['6_easy','6_hard','10_easy','10_hard','10_medium']:
fig_file_name = 'heatmaps/layers/Jan12/' + board+ '_neighborhood=2_exp=3_notIntegrated.png'
heatmaps = []
full = board + '_full'
pruned = board + '_pruned'
if board.startswith('6'):
fig, axes = plt.subplots(2, 3, figsize=(12,8))
# fig, axes = plt.subplots(2, 4, figsize=(10,6))
else:
fig, axes = plt.subplots(2, 3, figsize=(18,12))
# fig, axes = plt.subplots(2, 4, figsize=(18,12))
fig.suptitle(board)
i = 0
print board
print '-----'
# heatmaps.append((data_density_2[full], 'density2 full'))
# heatmaps.append((data_density_guassian[full], 'density guassian full'))
dist = emd(data_layers_reg_2_noO[full],data_first_moves[full]) # earth mover distance
heatmaps.append((data_layers_reg_2_noO[full], 'layers' + '\n' +str(round(dist,3))))
dist = emd(data_layers_reg_2[full],data_first_moves[full])
heatmaps.append((data_layers_reg_2[full], 'layers with O '+'\n' +str(round(dist,3))))
# heatmaps.append((data_composite_guassian[full], 'composite guassian full'))
heatmaps.append((data_first_moves[full], 'first moves'))
dist = emd(data_layers_reg_2_noO[pruned],data_first_moves[pruned])
heatmaps.append((data_layers_reg_2_noO[pruned], 'layers '+'\n' +str(round(dist,3))))
dist = emd(data_layers_reg_2[pruned],data_first_moves[pruned])
heatmaps.append((data_layers_reg_2[pruned], 'layers with O'+'\n' +str(round(dist,3))))
# heatmaps.append((data_composite_guassian[full], 'composite guassian full'))
heatmaps.append((data_first_moves[pruned], 'first moves'))
# heatmaps.append((data_density[pruned],'density pruned'))
# heatmaps.append((data_density_guassian[pruned], 'density guassian pruned'))
# heatmaps.append((data_paths[pruned],'paths pruned'))
# heatmaps.append((data_composite_reg[pruned], 'composite pruned'))
# heatmaps.append((data_composite_guassian[pruned], 'composite guassian pruned'))
# heatmaps.append((data_first_moves[pruned], 'first moves pruned'))
for ax in axes.flatten(): # flatten in case you have a second row at some point
a = np.array(heatmaps[i][0])
a = np.flip(a,0)
img = ax.pcolormesh(a)
for y in range(a.shape[0]):
for x in range(a.shape[1]):
if(a[y,x]==-1) | (a[y,x]==-0.00001):
ax.text(x + 0.5, y + 0.5, 'X',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
ax.text(x + 0.5, y + 0.5, 'O',
horizontalalignment='center',
verticalalignment='center',
color='white'
)
elif(a[y,x]!=0):
ax.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
horizontalalignment='center',
verticalalignment='center',
color='white'
)
fig.colorbar(img, ax=ax)
# plt.colorbar(img)
ax.set_aspect('equal')
ax.set_title(heatmaps[i][1])
i+=1
# a = np.random.rand(10,4)
# img = axes[0,0].imshow(a,interpolation='nearest')
# axes[0,0].set_aspect('auto')
# plt.colorbar(img)
# plt.title(board)
# fig.tight_layout()
# fig.subplots_adjust(top=0.88)
plt.savefig(fig_file_name)
plt.clf()
def makeGaussian(size, fwhm = 3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2 -1
else:
x0 = center[0]
y0 = center[1]
# return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
return np.exp(-1 * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
if __name__ == "__main__":
# print emd(np.array([[0.0, 1.0],[0.0, 1.0]]),np.array([[5.0, 3.0],[5.0, 3.0]]))
# first_histogram = np.array([0.0, 1.0])
# second_histogram = np.array([5.0, 3.0])
# distance_matrix = np.array([[0.0, 0.5],[0.5, 0.0]])
# print emd(first_histogram, second_histogram, distance_matrix)
# compute_scores_layers(normalized=True,exp=2,neighborhood_size=2,density='guassian',o_weight=0.2, integrate=True)
# compute_scores_layers(normalized=True,exp=2,neighborhood_size=2,density='reg',o_weight=0.2, integrate=True)
# compute_scores_open_paths(True, exp=2)
# compute_scores_composite(True, exp=2, opponent=True, o_weight=0.2)
# compute_scores_open_paths_opponent(True,exp=2)
run_models()
# gaus = makeGaussian(6,center=[0, 0])
# print gaus
# heatmap = plt.pcolor(gaus)
# plt.colorbar(heatmap)
# # plt.plot(gaus)
# plt.show()
# # plt.colorbar(img)
# data_composite = compute_scores_composite(True, exp=2, neighborhood_size=1)
# data_density = compute_scores_density(True,neighborhood_size=1)
# data_paths = compute_scores_open_paths(True, exp=2)
# data_first_moves = rp.entropy_paths()
#
# print data_density.keys()
# for board in ['6_easy','6_hard','10_easy','10_hard','10_medium']:
# fig_file_name = 'heatmaps/noDensity/' + board+ '_neighborhood=1.png'
# heatmaps = []
# full = board + '_full'
# pruned = board + '_pruned'
# if board.startswith('6'):
# fig, axes = plt.subplots(2, 3, figsize=(12,8))
# else:
# fig, axes = plt.subplots(2, 3, figsize=(18,12))
# fig.suptitle(board)
#
#
# i = 0
#
# # heatmaps.append((data_density[full], 'density full'))
# heatmaps.append((data_paths[full], 'paths full'))
# heatmaps.append((data_composite[full], 'composite full'))
# heatmaps.append((data_first_moves[full], 'first moves full'))
#
# # heatmaps.append((data_density[pruned],'density pruned'))
# heatmaps.append((data_paths[pruned],'paths pruned'))
# heatmaps.append((data_composite[pruned], 'composite pruned'))
# heatmaps.append((data_first_moves[pruned], 'first moves pruned'))
#
# for ax in axes.flatten(): # flatten in case you have a second row at some point
# a = np.array(heatmaps[i][0])
# a = np.flip(a,0)
# img = ax.pcolormesh(a)
# for y in range(a.shape[0]):
# for x in range(a.shape[1]):
# if(a[y,x]==-1) | (a[y,x]==-0.00001):
# ax.text(x + 0.5, y + 0.5, 'X',
# horizontalalignment='center',
# verticalalignment='center',
# color='white'
# )
# elif((a[y,x]==-2) | (a[y,x]==-0.00002)):
# ax.text(x + 0.5, y + 0.5, 'O',
# horizontalalignment='center',
# verticalalignment='center',
# color='white'
# )
# elif(a[y,x]!=0):
# ax.text(x + 0.5, y + 0.5, '%.2f' % a[y, x],
# horizontalalignment='center',
# verticalalignment='center',
# color='white'
# )
#
# fig.colorbar(img, ax=ax)
# # plt.colorbar(img)
# ax.set_aspect('equal')
# ax.set_title(heatmaps[i][1])
# i+=1
#
# # a = np.random.rand(10,4)
# # img = axes[0,0].imshow(a,interpolation='nearest')
# # axes[0,0].set_aspect('auto')
# # plt.colorbar(img)
# # plt.title(board)
# # fig.tight_layout()
# # fig.subplots_adjust(top=0.88)
# plt.savefig(fig_file_name)
# plt.clf()
# # plt.show()
# compute_scores_density(True,neighborhood_size=2)
# compute_scores_density(True)
# compute_scores_composite(True, exp=2, neighborhood_size=1)
# compute_scores_open_paths(True, 2)
# compute_scores_open_paths(True) | [
"oamir@seas.harvard.edu"
] | oamir@seas.harvard.edu |
7de38a9ebf121bd2358964fca2221e14ee60c24a | b93446177b6ac10bd27582b1e9647f0adab7d3d4 | /pyVoodoo/ir.py | af3c8d637b9a9ac7f6a12bde7d1fe86473914bc8 | [
"BSD-3-Clause"
] | permissive | bossiernesto/pyVoodoo | 727f2666a656e8af7ed3d2c8ee4a2ea51f7b95f0 | 7be339ce05c909d0c3c2893ab1eaa2d18f335235 | refs/heads/master | 2021-04-09T17:16:46.984893 | 2017-01-31T22:11:29 | 2017-01-31T22:11:29 | 34,115,994 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | class Node(tuple):
"""Base class for AST"""
__slots__ = []
| [
"bossi.ernestog@gmail.com"
] | bossi.ernestog@gmail.com |
82d5072c95d430143fba75124b748cf8add70456 | d342898f0a632b28d5c6f594208300c546cb51e3 | /Helper.py | ee73a7910b6b3f420a71ca6c2bdb1f2d9ec9298c | [] | no_license | DragonKiller952/ST-Groep-8 | 91ce869b1905504e65d84acf104fc68156d0ef91 | 00c19288b2fb5a6110fba6a2eea7b03650d0e534 | refs/heads/main | 2023-01-31T22:08:12.134684 | 2020-12-17T09:05:02 | 2020-12-17T09:05:02 | 318,191,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # Chosing blue
def standard_color(*args):
return 'blue'
# Chosing random without duplicates
def unique_random(self, choices, used):
choice = self.random.choice(choices)
while choice in used:
choice = self.random.choice(choices)
used.append(choice)
return choice
# Chosing color based on agent id
def id_color(self, choices, used):
return choices[self.agentId]
# Chosing position based on agent id
def id_coord(self, choices, used):
coords = [(12, 75), (30, 60), (40, 80), (40, 90), (60, 80), (50, 35), (60, 35), (65, 15), (75, 40), (90, 45)]
return coords[self.agentId] | [
"abou.w@hotmail.com"
] | abou.w@hotmail.com |
09fb11f511d0b05365e34eecb467462c7c0d96a0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/228/56191/submittedfiles/lecker.py | 8478e84811202758aba6f53520c3def648a83ece | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # -*- coding: utf-8 -*-
from __future__ import division
n=int(input('digite o número de elementos:'))
lista1=[]
lista2=[]
for i in range (0,n,1):
termo1=int(input('digite o termo:'))
lista1.append(termo1)
for i in range (0,n,1):
termo2=int(input('digite o termo:'))
lista2.append(termo2)
def leker(a):
cont=0
if lista[0]>lista[1]:
cont=cont+1
elif lista[n]>lista[n-1]:
cont=cont+1
else:
for i in range(lista[1],len(lista),1):
if lista[i-1]<lista[i]<lista[i+1]:
cont=cont+1
if cont==1:
return True
else:
return False
if leker(lista1):
print('S')
elif leker(lista1)==False:
print('N')
if leker(lista2):
print('S')
elif leker(lista2)==False:
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8168fad0cd7f389b78bbc3f5a443a0caf364b763 | c39ad4916eb59f7824f2577eaff109fa3a820d91 | /app/controllers/BaseController.py | e965e579538ddc60729b2605f1f13ade78ec151a | [] | no_license | JhonathaNunes/della-sollution-back | e794b23f81136c729a35324ae35ce370411b23ef | f1001954ff349d99eb272a18b917cac7e0182123 | refs/heads/main | 2023-05-29T08:01:01.914292 | 2021-06-08T21:42:40 | 2021-06-08T21:42:40 | 341,367,385 | 0 | 0 | null | 2021-06-08T21:42:41 | 2021-02-22T23:29:41 | Python | UTF-8 | Python | false | false | 2,775 | py | import database
from flask import request, jsonify
from sqlalchemy import exc
from cerberus import Validator
from authenticator import auth
import exceptions
class BaseController(object):
def __init__(self, model, post_schema = {}, put_schema = {}):
self.model = model
self.post_schema = post_schema
if (len(put_schema) == 0): self.put_schema = post_schema
else: self.put_schema = put_schema
@auth.login_required
def get(self):
entities = database.get_all(self.model)
dict_data = self.manipulate_get(entities)
return jsonify(dict_data), 200
def manipulate_get(self, entities):
entities_response = []
for entity in entities:
entities_dict = {}
entities_response.append(entities_dict)
return entities_response
@auth.login_required
def post(self):
request_data = request.get_json()
v = Validator(require_all=True)
if (not v.validate(request_data, self.post_schema)):
return jsonify(v.errors), 422
try:
error = self.manipulate_post(request_data)
if error is not None:
return error
entity = self.model(**request_data)
database.add_instance(entity)
return jsonify("success"), 200
except exc.IntegrityError:
return jsonify({"error": self.model + " already registred"}), 409
def manipulate_post(self, request_data):
pass
@auth.login_required
def put(self, id: int):
request_data = request.get_json()
v = Validator()
if (not v.validate(request_data, self.put_schema)):
return jsonify(v.errors), 422
try:
entity = self.model.query.get(id)
self.manipulate_put(entity, request_data)
database.update_instance(entity or self.model,
**request_data)
return jsonify("success"), 200
except exc.IntegrityError:
return jsonify({"error": self.model + " already registred"}), 409
except exceptions.NotFoundException:
return jsonify({"error": self.model + " not found"}), 404
def manipulate_put(self, entity, request_data):
pass
@auth.login_required
def delete(self, id: int):
try:
entity = self.model.query.get(id)
if (entity is not None):
entity.active = False
database.update_instance(entity)
return jsonify("success"), 200
except exceptions.NotFoundException:
return jsonify({"error": self.model + " not found"}), 404
def custom_routes(self, app, model_string):
pass
| [
"lucas.pacheco@aluno.faculdadeimpacta.com.br"
] | lucas.pacheco@aluno.faculdadeimpacta.com.br |
fee5ef2e8800801f15cd35535d529dcf5561d05a | 9afcf8bd88a7c36331905f77c31f3b264a6e14e7 | /test_53_1.py | 46c72baa677ef6d7604e4aa0fc2c185978ca8a99 | [] | no_license | wasabbi/testcase | 72800c5f5a23c369c93f385c07040f0186912d9c | 774465d4ab2c88d184efddfae54fc2cfb760bb36 | refs/heads/master | 2021-01-08T23:37:11.717291 | 2020-02-29T17:25:58 | 2020-02-29T17:25:58 | 233,437,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | import os
target = "./CVE-2017-11176"
thread1 = 0x4008d1
thread2 = 0x40087d
thread3 = 0x0 #CPU_index is 2
# hw_bp_addr sched __start_routine
hw_bp1 = [0xffffffff810f7f2e, 1, 0x4008d1]
hw_bp2 = [0xffffffff810f8471, 2, 0x40087d]
hw_bp3 = [0x0, 0, 0x0]
#========The following need not be modified========
# PHASE 1: bind thread to CPU
# PHASE 1.1: modify the thread_addr in libhook.c
thread_addr = [hex(thread1), hex(thread2), hex(thread3)]
addr = [hex(hw_bp1[0]), hex(hw_bp2[0]), hex(hw_bp3[0])]
sched = [str(hw_bp1[1]), str(hw_bp2[1]),str(hw_bp3[1])]
__start_routine = [hex(hw_bp1[2]), hex(hw_bp2[2]), hex(hw_bp3[2])]
CPU_index = [0,0,0]
for i in range(3):
if(thread_addr[i]==__start_routine[0]):
CPU_index[0] = i
if(thread_addr[i]==__start_routine[1]):
CPU_index[1] = i
if(thread_addr[i]==__start_routine[2]):
CPU_index[2] = i
data = ''
with open('libhook.c', 'r+') as f:
for line in f.readlines():
if(line.find('void* thread1') == 0):
line = 'void* thread1 = %s;' % (thread_addr[0],) + '\n'
if(line.find('void* thread2') == 0):
line = 'void* thread2 = %s;' % (thread_addr[1],) + '\n'
if(line.find('void* thread3') == 0):
line = 'void* thread3 = %s;' % (thread_addr[2],) + '\n'
if(line.find(' hw_bps[0]->addr =') == 0):
line = ' hw_bps[0]->addr = %s;' % (addr[0],) + '\n'
if(line.find(' hw_bps[1]->addr =') == 0):
line = ' hw_bps[1]->addr = %s;' % (addr[1],) + '\n'
if(line.find(' hw_bps[2]->addr =') == 0):
line = ' hw_bps[2]->addr = %s;' % (addr[2],) + '\n'
if(line.find(' hw_bps[0]->sched =') == 0):
line = ' hw_bps[0]->sched = %s;' % (sched[0],) + '\n'
if(line.find(' hw_bps[1]->sched =') == 0):
line = ' hw_bps[1]->sched = %s;' % (sched[1],) + '\n'
if(line.find(' hw_bps[2]->sched =') == 0):
line = ' hw_bps[2]->sched = %s;' % (sched[2],) + '\n'
if(line.find(' hw_bps[0]->CPU_index =') == 0):
line = ' hw_bps[0]->CPU_index = %s;' % (CPU_index[0],) + '\n'
if(line.find(' hw_bps[1]->CPU_index =') == 0):
line = ' hw_bps[1]->CPU_index = %s;' % (CPU_index[1],) + '\n'
if(line.find(' hw_bps[2]->CPU_index =') == 0):
line = ' hw_bps[2]->CPU_index = %s;' % (CPU_index[2],) + '\n'
if(line.find( 'hw_bps[0]->__start_routine =') == 0):
line = ' hw_bps[0]->__start_routine = %s;' % (__start_routine[0],) + '\n'
if(line.find(' hw_bps[1]->__start_routine =') == 0):
line = ' hw_bps[1]->__start_routine = %s;' % (__start_routine[1],) + '\n'
if(line.find(' hw_bps[2]->__start_routine =') == 0):
line = ' hw_bps[2]->__start_routine = %s;' % (__start_routine[2],) + '\n'
data += line
f.close()
with open('libhook.c', 'w') as f:
f.writelines(data)
f.close()
# PHASE 1.2: compile libhook.c
os.system('gcc -shared -fPIC -o libhook.so libhook.c -ldl')
# PHASE 2: insert hw_bps & run
os.system('LD_PRELOAD="./libhook.so" %s' % (target,))
| [
"lee.yeb33@gmail.com"
] | lee.yeb33@gmail.com |
c2861967833cf5b0d8dd7b6e9e33c7aa2c74df1f | 2839ece36a5186ae0e7f90af1efa0913f701f066 | /backend/rocket/ids.py | adbfbb1daa6148e4f5d330d9eae0ae4d59b2b8b7 | [
"Apache-2.0"
] | permissive | Longi94/rl-loadout | 5d32badffb72d1e16561d31422e470d6e51f735e | 07a248e16c8e5fb615ec4f8018631f3c622ac2a4 | refs/heads/master | 2023-01-08T07:40:42.445982 | 2020-03-14T00:00:37 | 2020-03-14T00:00:37 | 197,087,185 | 18 | 2 | Apache-2.0 | 2023-01-07T08:47:39 | 2019-07-15T23:49:19 | TypeScript | UTF-8 | Python | false | false | 1,573 | py | BODY_GREY_CAR_ID = 597 # DeLorean Time Machine
BODY_DARK_CAR_ID = 803 # '16 Batmobile
BODY_BERRY_ID = 2665 # The Dark Knight Rises Tumbler
BODY_EGGPLANT_ID = 2666 # '89 Batmobile
BODY_MAPLE_ID = 2919 # Jurassic Jeep® Wrangler
BODY_RYE_TIER1_ID = 3155 # Maverick
BODY_RYE_TIER2_ID = 3156 # Maverick G1
BODY_RYE_TIER3_ID = 3157 # Maverick GXT
BODY_ENSPIER_TIER3_ID = 3594 # Artemis GXT
BODY_ENSPIER_TIER1_ID = 3614 # Artemis
BODY_ENSPIER_TIER2_ID = 3622 # Artemis G1
BODY_MANGO_TIER3_ID = 3875 # Guardian GXT
BODY_MANGO_TIER1_ID = 3879 # Guardian
BODY_MANGO_TIER2_ID = 3880 # Guardian G1
BODY_FELINE_ID = 4014 # K.I.T.T.
BODY_SLIME_ID = 4155 # Ecto-1
BODY_MELON_TIER1_ID = 4318 # Mudcat
BODY_MELON_TIER2_ID = 4319 # Mudcat G1
BODY_MELON_TIER3_ID = 4320 # Mudcat GXT
BODY_DURIAN_TIER3_ID = 4367 # Chikara GXT
BODY_DURIAN_TIER1_ID = 4472 # Chikara
BODY_DURIAN_TIER2_ID = 4473 # Chikara G1
def tier_floor(body_id: int):
# maverick
if body_id == BODY_RYE_TIER2_ID or body_id == BODY_RYE_TIER3_ID:
return BODY_RYE_TIER1_ID
# artemis
if body_id == BODY_ENSPIER_TIER2_ID or body_id == BODY_ENSPIER_TIER3_ID:
return BODY_ENSPIER_TIER1_ID
# Guardian
if body_id == BODY_MANGO_TIER2_ID or body_id == BODY_MANGO_TIER3_ID:
return BODY_MANGO_TIER1_ID
# Mudcat
if body_id == BODY_MELON_TIER2_ID or body_id == BODY_MELON_TIER3_ID:
return BODY_MELON_TIER1_ID
# Chikara
if body_id == BODY_DURIAN_TIER2_ID or body_id == BODY_DURIAN_TIER3_ID:
return BODY_DURIAN_TIER1_ID
return body_id
| [
"lngtrn94@gmail.com"
] | lngtrn94@gmail.com |
605f934856fa73abaca59a8d4b985a30749fa454 | f47ac8d59fe1c0f807d699fe5b5991ed3662bfdb | /binary24.py | 9cad221c86da71526bc3fda5faefd88b49ae47c7 | [] | no_license | YanglanWang/jianzhi_offer | 5561d8a29881d8504b23446353e9f969c01ed0c5 | 1c568f399ed6ac1017671c40c765e609c1b6d178 | refs/heads/master | 2020-06-16T10:41:44.979558 | 2019-08-03T09:07:37 | 2019-08-03T09:07:37 | 195,543,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import create_tree
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# def FindPath(self, root, expectNumber):
# # write code here
# start=root
# if start==None:
# return []
# if start.left==None and start.right==None and start.val==expectNumber:
# return [[start.val]]
# leftpath=self.FindPath(start.left,expectNumber-start.val)
# rightpath=self.FindPath(start.right,expectNumber-start.val)
# for i in leftpath+rightpath:
# i=i.insert(0,start.val)
# return leftpath+rightpath
def FindPath(self, root, expectNumber):
if root.left==None and root.right==None:
if root.val==expectNumber:
return [[root.val]]
else:
return []
if root.left!=None:
a=self.FindPath(root.left,expectNumber-root.val)
if root.right!=None:
b=self.FindPath(root.right,expectNumber-root.val)
for i in a+b:
i.insert(0,root.val)
return a+b
a=Solution()
root=create_tree.fromList([10,5,12,4,7])
b=a.FindPath(root,22)
print(b) | [
"yanglan-17@mails.tsinghua.edu.cn"
] | yanglan-17@mails.tsinghua.edu.cn |
a755ce708909d92c7070ea2cbb38df5e1e9621a9 | c1e2dfac83ca2fab928127e7ab3b898623f943ab | /tests/test_repr_role.py | b0904d569f5c21e294aed1da039383fd148880a5 | [
"BSD-3-Clause"
] | permissive | sixty-north/added-value | 1cd7845ff63a97f6282f4ed8ebc0e6b9158be779 | debd9e8dcf1475ecc7d2836a6fecdfd95a2de8ec | refs/heads/master | 2023-04-28T11:16:59.244319 | 2023-04-14T19:37:35 | 2023-04-14T19:37:35 | 132,577,509 | 0 | 1 | BSD-3-Clause | 2023-04-14T19:37:36 | 2018-05-08T08:21:33 | Python | UTF-8 | Python | false | false | 464 | py | from pathlib import Path
basename = "test_repr_role"
html_filename = basename + ".html"
def test_base_name_in_html(app, status, warning):
app.build()
html = Path(app.outdir / html_filename).read_text()
assert basename in html
def test_repr_literal_integer_html_contains_value(app, status, warning):
app.build()
html = Path(app.outdir / html_filename).read_text()
assert "The answer to life, the Universe and everything is 42." in html
| [
"rob@sixty-north.com"
] | rob@sixty-north.com |
5c116971384f95f12ce90c1b8582949a23afaf51 | 378e3768a5afec1fe4892121a95d64e7a09c8a6d | /file/urls.py | f16eb900c8acc36255f5b094018e3dfcfd709aa5 | [] | no_license | mattwilliamson/dawstore | f7c5f8593cf64885cc80dad2c04abbe1c747c095 | d17871c732be88383f92acccaeca2d33df22e6b5 | refs/heads/master | 2020-05-18T15:49:46.815129 | 2010-03-24T03:32:25 | 2010-03-24T03:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
urlpatterns = patterns('file.views',
(r'^get/(?P<key>.+)/$', 'get'),
(r'^put/(?P<public_key>.+)/(?P<secret_key>.+)/$', 'put'),
(r'^delete/(?P<public_key>.+)/(?P<secret_key>.+)/(?P<key>.+)/$', 'delete'),
)
| [
"dawsdesign@gmail.com"
] | dawsdesign@gmail.com |
4ad984ec5a966cb62eaeb618dfbc4aafb9fcd4f7 | 7100c3c8012dcf2bc6427bf33c55662bc61924f2 | /api/v1/views/cities.py | ecabd72acf87d8cdd29c4b5dfb6bb78c183ae1ca | [
"LicenseRef-scancode-public-domain"
] | permissive | OscarDRT/AirBnB_clone_v3 | c3ffa7b7ffb5182143b0f37c8ef7d1342cdffa0a | 9f015b7f1aa1b9c7f7f0d85fd7f5dc97a6679e9c | refs/heads/master | 2022-05-27T07:35:53.627606 | 2020-04-29T21:55:33 | 2020-04-29T21:55:33 | 259,408,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | #!/usr/bin/python3
"""Documentation"""
from flask import Flask, jsonify, abort, make_response, request
from api.v1.views import app_views
from models.state import *
from models.city import *
from models import storage
@app_views.route('/states/<state_id>/cities', methods=['GET', 'POST'],
strict_slashes=False)
def cities_li(state_id):
"""cities"""
state = storage.get(State, state_id)
if state is None:
abort(404)
if request.method == 'GET':
cities_list = []
for key, value in storage.all('City').items():
if value.state_id == str(state_id):
cities_list.append(value.to_dict())
return jsonify(cities_list)
if request.method == 'POST':
data = request.get_json()
if data is None:
return (jsonify({"error": "Not a JSON"}), 400)
if 'name' in data:
data['state_id'] = state_id
city = City(**data)
city.save()
data2 = storage.get(City, city.id).to_dict()
return make_response(jsonify(data2), 201)
return (jsonify({"error": "Missing name"}), 400)
@app_views.route('/cities/<city_id>', methods=['GET', 'DELETE', 'PUT'],
strict_slashes=False)
def my_city(city_id):
"""city"""
city = storage.get(City, city_id)
if city is None:
abort(404)
if request.method == 'GET':
return jsonify(city.to_dict())
if request.method == 'DELETE':
storage.delete(city)
storage.save()
return jsonify({}), 200
if request.method == 'PUT':
data = request.get_json()
if data is None:
return (jsonify({"error": "Not a JSON"}), 400)
ignorekey = ['id', 'created_at', 'updated_at']
for key, value in data.items():
if key not in ignorekey:
setattr(city, key, value)
city.save()
return jsonify(city.to_dict()), 200
| [
"oscarnetworkingpro@gmail.com"
] | oscarnetworkingpro@gmail.com |
440db3f7231af9543565979f36d3760abc278062 | 5f1afd8240ce286b0a78f61b7faa3a53e4d170e1 | /examples/contrib/mnist/mnist_with_neptune_logger.py | 2f7c7d2bc0784994e1fff9e02cd16acff0e25d91 | [
"BSD-3-Clause"
] | permissive | dnola/ignite | b71e5fe7c57fe157c09044d534321b070ec4c844 | da86f6d83268cba0275a18be506a69f142157e97 | refs/heads/master | 2020-12-29T08:47:24.519519 | 2020-02-07T14:30:29 | 2020-02-07T14:30:29 | 238,542,050 | 0 | 0 | BSD-3-Clause | 2020-02-05T20:29:07 | 2020-02-05T20:29:06 | null | UTF-8 | Python | false | false | 6,778 | py | """
MNIST example with training and validation monitoring using Neptune.
Requirements:
Neptune: `pip install neptune-client`
Usage:
Run the example:
```bash
python mnist_with_neptune_logger.py
```
Go to https://neptune.ai and explore your experiment.
Note:
You can see an example experiment here:
https://ui.neptune.ai/o/neptune-ai/org/pytorch-ignite-integration/e/PYTOR-26/charts
"""
import sys
from argparse import ArgumentParser
import logging
import torch
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
from torch.optim import SGD
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.contrib.handlers.neptune_logger import *
LOG_INTERVAL = 10
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, neptune_project):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
if sys.version_info > (3,):
from ignite.contrib.metrics.gpu_info import GpuInfo
try:
GpuInfo().attach(trainer)
except RuntimeError:
print("INFO: By default, in this example it is possible to log GPU information (used memory, utilization). "
"As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please "
"install it : `pip install pynvml`")
metrics = {
'accuracy': Accuracy(),
'loss': Loss(criterion)
}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
npt_logger = NeptuneLogger(api_token=None,
project_name=neptune_project,
name='ignite-mnist-example',
params={'train_batch_size': train_batch_size,
'val_batch_size': val_batch_size,
'epochs': epochs,
'lr': lr,
'momentum': momentum})
npt_logger.attach(trainer,
log_handler=OutputHandler(tag="training",
output_transform=lambda loss: {'batchloss': loss},
metric_names='all'),
event_name=Events.ITERATION_COMPLETED(every=100))
npt_logger.attach(train_evaluator,
log_handler=OutputHandler(tag="training",
metric_names=["loss", "accuracy"],
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
npt_logger.attach(validation_evaluator,
log_handler=OutputHandler(tag="validation",
metric_names=["loss", "accuracy"],
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
npt_logger.attach(trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_COMPLETED(every=100))
npt_logger.attach(trainer,
log_handler=WeightsScalarHandler(model),
event_name=Events.ITERATION_COMPLETED(every=100))
npt_logger.attach(trainer,
log_handler=GradsScalarHandler(model),
event_name=Events.ITERATION_COMPLETED(every=100))
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
npt_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument("--neptune_project", type=str,
help="your project in neptune.ai")
args = parser.parse_args()
# Setup engine logger
logger = logging.getLogger("ignite.engine.engine.Engine")
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.neptune_project)
| [
"vfdev.5@gmail.com"
] | vfdev.5@gmail.com |
b300563d66440de35f991e60c8ede1545e98a66c | 26550c1ce549a5de9bfefc87b8f8e625aa9d466d | /topics-info/asst-3/Tutorial/Ex5.py | ec61e1a47df7a34086bb5566f6cb4b39e7792864 | [] | no_license | havish99/Coding-theory-in-Distributed-systems | 9823e126a43d709b18d7b566034b05b8ebd73f7f | 0bc1094ee60d85706934cb5f2092a6c34848e780 | refs/heads/master | 2020-09-12T18:45:00.421631 | 2019-11-18T18:22:36 | 2019-11-18T18:22:36 | 222,514,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
N = 10000
if rank==0:
x = np.random.rand(N)
comm.Send(x,dest=1)
y = np.empty(N)
comm.Recv(y,source=1)
if rank==1:
x = np.empty(N)
comm.Recv(x,source=0)
y = np.random.rand(N)
comm.Send(y,dest=0)
| [
"noreply@github.com"
] | noreply@github.com |
4f901326d3793958d466861ca03a3659358b08ac | 4d39e90081a0e0f23143d0ce50228b98f6e13d2c | /venv/Scripts/rst2xetex.py | 7568ea13c7366ccf250eb5aabdcdad661afae3b8 | [] | no_license | tmtrinesh/robotselenium | ca6175ce0de97c184a16a80d96c1948efe32b66f | b621c1425615c01c4b927ceb129dca0403c8f0a6 | refs/heads/master | 2023-04-14T07:37:23.728935 | 2021-04-29T07:33:54 | 2021-04-29T07:33:54 | 362,730,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | #!C:\Users\THILAK\PycharmProjects\robotselenium\venv\Scripts\python.exe
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| [
"tmtrinesh@gmail.com"
] | tmtrinesh@gmail.com |
eefd9846908ec762b94bb28fc18af2a9af308b31 | 0cbf107de8b6ae51308765d321d8c134ea8108f1 | /LastDigit.py | e56b6a6035b0212dcd02522dd03543e1d96a0a33 | [] | no_license | dcormar/pyScripts | eee3555ddad83de0a66d4122f3e3809589166a84 | b75fb32f0469b35a5e88ed8ba006854909ec1eec | refs/heads/master | 2020-04-08T19:18:09.082347 | 2019-05-05T18:10:32 | 2019-05-05T18:10:32 | 159,649,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | '''def last_digit(n1, n2):
if n2 == 0:
return 1
cycle = [n1 % 10]
while True:
nxt = (cycle[-1] * n1) % 10
if nxt == cycle[0]:
break
cycle.append(nxt)
return cycle[(n2 - 1) % len(cycle)]
'''
def last_digit_matrix(matrix):
matrix.reverse()
print (matrix)
first = matrix.pop()
second=matrix.pop()
partial_last_digit = last_digit(first, second)
print (partial_last_digit)
for i in reversed(matrix):
partial_last_digit = last_digit(partial_last_digit,i)
return partial_last_digit
def last_digit_pow(n1, n2):
return pow(n1, n2, 10)
rules = {
0: [0,0,0,0],
1: [1,1,1,1],
2: [2,4,8,6],
3: [3,9,7,1],
4: [4,6,4,6],
5: [5,5,5,5],
6: [6,6,6,6],
7: [7,9,3,1],
8: [8,4,2,6],
9: [9,1,9,1],
}
remainders = {
0: [4,4],
1: [1,3],
2: [4,4],
3: [3,1],
4: [4,4],
5: [1,1],
6: [4,4],
7: [1,3],
8: [4,4],
9: [1,1]
}
def last_digit_simple(n1, n2):
ruler = rules[int(str(n1)[-1])]
return 1 if n2 == 0 else ruler[(n2 % 4) - 1]
def last_digit_partial(lst):
exponent = lst.pop()
base = lst.pop()
result = last_digit_simple(base, exponent)
for x in reversed(lst):
exponent = base
base = result
ruler = rules[int(str(x)[-1])]
remainder = remainders[base]
result = ruler[remainder[(exponent%2)] - 1]
return result
def sliceZerosAndOnes(lst):
for i in reversed(range(0,len(lst))):
if lst[i] == 0:
if i == 0: return [0]
else: lst[i-1] = 1
for i in range(0,len(lst)):
if lst[i] == 1:
return lst[0:i]
return lst
def last_digit(lst):
lst_sliced = sliceZerosAndOnes(lst)
if len(lst_sliced) == 0: return 1
if len(lst_sliced) == 1: return int(str(lst_sliced[0])[-1])
if len(lst_sliced) == 2: return last_digit_simple(lst_sliced[0],lst_sliced[1])
elif len(lst_sliced) >= 3: return last_digit_partial(lst_sliced)
'''
PARA:
0: 2-4-4-4-4... (remainder 2 y 0)
1: 3-1-3-1 (remainder 3 y 1)
2: 2-4-4-4-4-4... POS (REMAINDER 2 Y 0, 0, 0...)
3: 3-1 POS (REMAINDER 3 Y 1)
4: 4 POS (REMAINDER 0)
5: 1 POS (REMAINDER 1)
6: 2-4-4-4-4-4... POS (REMAINDER 2, 0, 0, 0...)
7: 1-3 pos (remainder 1 y 3
8: 4 pos (remainder 0)
9: 1 pos (remainder 1)
'''
if __name__ == '__main__':
print(last_digit([699568, 527915, 208553]))
#print(last_digit_partial(635868,12003))
#print (pow(635963,3))#2,0,0,0,0
#print (pow(635963,13))#3,1,3,1,3
#0,0,0,0,0
#print (pow(635963,23))#1,1,1,1,1
#print (pow(635963,33))#2,0,0,0,0
#print (pow(635963,43))#3,1,3,1,3
#print (pow(5671,64))
'''
print (pow(13,11))#2,0,0,0,0
print("*********************************")
print (pow(13,121))
print("*********************************")
#3,1,3,1,3
#0,0,0,0,0
print (pow(13,11**3))#1,1,1,1,1
print("*********************************")
print (pow(13,11**4))#2,0,0,0,0
print("*********************************")
print (pow(13,11**5))#3,1,3,1,3
print("*********************************")
print (pow(13,11**6))
#0,0,0,0,0
#1,1,1,1,1
#2,0,0,0,0
#3,1,3,1,3
'''
#print(last_digit([7, 6, 21]))
#import timeit
#print(timeit.timeit("last_digit(10, 10 ** 10)", setup="from __main__ import last_digit"))
#print(timeit.timeit("last_digit_pow(10, 10 ** 10)", setup="from __main__ import last_digit_pow"))
| [
"dcormar@gmail.com"
] | dcormar@gmail.com |
00c71e7081835fffe792e1d7b3793be9edcc3ed9 | 6f1d9d756d38ca0c69ae1a0ff6db2c15d97e370f | /Practice2.py | b0b4c36114c89452fb5967a93d9332ad1f51f18e | [] | no_license | MrOlafo/Python_class_code | 7edb96dac3e4ad44ce49610d759fe916ec0cac57 | 9b2d4e63d308e58aac04653e589bf4ee328e8012 | refs/heads/master | 2020-04-14T18:25:38.338366 | 2019-01-03T20:28:15 | 2019-01-03T20:28:15 | 164,018,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | try:
start=input('Welcome to the Studen Grade System. Press 1 to continue or 0 to leave: ')
start=(int(start))
while start!=1 and start!=0:
start=input('Please press 1 to continue or 0 to leave: ')
start=(int(start))
if start==1:
while start==1:
grade=input('Type the grade of the student in a range of 1 to 100. Been 1 the lowest and 100 the highest: ')
grade=(int(grade))
while grade<=0 or grade>100:
grade=input('The grade must be in a range of 1 to 100. Been 1 the lowest and 100 the highest. Plese type the grade again: ')
grade=(int(grade))
if grade==100 or grade>=91:
print('The grade is A. This student pass the course.')
else:
if grade==90 or grade>=71:
print('The grade is B. This student pass the course.')
else:
if grade==70 or grade>=61:
print('The grade is C. This student did not pass the course.')
else:
if grade==60 or grade>=50:
print('The grade is D. This student did not pass the course.')
else:
print('The grade is E. This student did not pass the course.')
start=input('Do you want to grade another student? Yes=1 No=0: ')
start=(int(start))
while start!=1 and start!=0:
start=input('Please press 1 to continue or 0 to leave: ')
start=(int(start))
print('Thanks for using the Student Grade System. Bye')
except:
print('Use numbers, not letters') | [
"U91593@ust-global.com"
] | U91593@ust-global.com |
15632584457de864ad6c921b7228c6996d3390a5 | ebdeaa70f6e30abab03a1589bcdd56d1339151ef | /day18Python多线程/day18-多线程/code1/耗时操作.py | 4fe94df37f17e3955e313560c7b922708e178a96 | [] | no_license | gilgameshzzz/learn | 490d8eb408d064473fdbfa3f1f854c2f163a7ef6 | d476af77a6163ef4f273087582cbecd7f2ec15e6 | refs/heads/master | 2020-03-31T11:32:42.909453 | 2018-11-22T03:34:45 | 2018-11-22T03:34:45 | 152,181,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """__author__ = 余婷"""
import pygame
from random import randint
import time
"""
1.耗时操作放到主线程中的问题:
耗时操作放到主线程中,会阻塞线程
多个耗时操作都放到一个线程中执行,最终执行的时间是两个耗时操作的时间和
2.怎么解决问题?
使用多线程(创建多个线程)
"""
def rand_color():
return randint(0, 255),randint(0, 255),randint(0, 255)
def long_time():
print('耗时操作开始')
time.sleep(10)
print('耗时操作结束')
def download(file):
print('开始下载',file)
time.sleep(10)
print(file, '下载结束')
if __name__ == '__main__':
print('====')
print(time.time())
download('狄仁杰')
download('爱情公寓')
print(time.time())
print('!!!') | [
"619959856@qq.com"
] | 619959856@qq.com |
707533be29f322011c761603977cdb06d18f4ac2 | 972aca82afd04ec6cbb4bf7225e3dcd56fe6f3f0 | /face_recog/recognition/views.py | 044b04aa9c2b8708a1c1e95018615f2a28c6cf5a | [] | no_license | sbhusal123/On-web-face-recognition | a41b05e53e691648f5c0296f6ad919e353e07221 | 5ff56aacce759656af407ac2cba03f72b2ce3de4 | refs/heads/master | 2022-02-25T16:12:58.746395 | 2019-09-07T06:06:37 | 2019-09-07T06:06:37 | 166,095,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py | from django.shortcuts import render,HttpResponse
from django.core.files.storage import FileSystemStorage
import os
import shutil
from django.conf import settings
from .models import User
# Create your views here.
def index(request):
if request.method == 'POST' and request.FILES['myfile']:
try:
os.remove(os.path.join(settings.BASE_DIR, 'media/test_file/test_image.jpg'))
except:
pass
myfile = request.FILES['myfile']
myfile.name = "test_image.jpg"
fs = FileSystemStorage(location="media/test_file")
filename = fs.save(myfile.name, myfile)
uploaded_file_url = "/media/test_file/test_image.jpg"
print(uploaded_file_url)
return render(request, 'index.html',{'uploaded_file_url':uploaded_file_url})
return render(request,'index.html')
def registerUser(request):
if request.method == 'POST' and request.FILES['profile_image']:
username= request.POST["username"]
myfile = request.FILES['profile_image']
myfile.name = username+".jpeg"
User.objects.create(username=username,profile_pic = myfile)
return render(request, 'index.html')
return render(request,'index.html')
def Scan(request):
if request.method =="POST":
name_list = []
unknown_pictures = os.path.join(settings.BASE_DIR,'/media/test_file')
known_pictures = os.path.join(settings.BASE_DIR, '/media/profile_image')
command = "face_recognition ."+known_pictures+" ."+unknown_pictures+""
out = os.popen(command).read()
each_line = out.split("\n")
each_line.remove("")
for l in each_line:
name = l.split(",")[1]
name_list.append(name)
return render(request, 'index.html',{'found':name_list})
return render(request, 'index.html')
| [
"="
] | = |
a30e53410b1a70c2fc70a0b13427cb7c59495f57 | aa8fd4f9f75234a6f7afe68b372e115106ab7404 | /models.py | d3e8e161f3ab5cd95bbb98c6eabe09867da5792f | [] | no_license | philipbrowne/Flask-Feedback | de30a4d1f46b87d5ad52cb471c604576f07e5525 | a3252f0c874ba65f26aa290bb64b1820e44bf52d | refs/heads/main | 2023-07-09T16:55:50.205098 | 2021-08-22T13:22:54 | 2021-08-22T13:22:54 | 398,805,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
db = SQLAlchemy()
bcrypt = Bcrypt()
def connect_db(app):
"""Connect to database"""
db.app = app
db.init_app(app)
class User(db.Model):
"""User Model"""
__tablename__ = 'users'
username = db.Column(db.String(20), primary_key=True, unique=True)
password = db.Column(db.Text, nullable=False)
email = db.Column(db.String(50), nullable=False, unique=True)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
is_admin = db.Column(db.Boolean, default=False)
feedback = db.relationship('Feedback', cascade = 'all,delete', backref='user')
@classmethod
def register(cls, username, password):
"""Register usedr with hashed password and return user"""
hashed = bcrypt.generate_password_hash(password)
# Turn bytestring into normal (unicode utf8) string
hashed_utf8 = hashed.decode('utf8')
# Return instance of user with username and hashed password
return cls(username=username, password=hashed_utf8)
@classmethod
def authenticate(cls, username, password):
"""Validate that user exists and password is correct
Return user if valid; otherwise return False.
"""
# Queries for unique username from database
user = User.query.filter_by(username=username).first()
# If valid user and if password check lines up with database hash
if user and bcrypt.check_password_hash(user.password, password):
# Return User instance
return user
else:
return False
@property
def full_name(self):
return f'{self.first_name} {self.last_name}'
class Feedback(db.Model):
"""Feedback Model"""
__tablename__ = 'feedback'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title=db.Column(db.String(100), nullable=False)
content=db.Column(db.Text, nullable=False)
username = db.Column(db.String(20), db.ForeignKey('users.username', onupdate='CASCADE', ondelete='CASCADE'))
| [
"pbrowne@gmail.com"
] | pbrowne@gmail.com |
4ea17ff0ed36837eba853d24ac27432296dd4858 | 424fce3e0927fcb1715655d722d0ee087f3d249e | /src/machine_learning/simple_collaborative_filtering.py | 2b66c9a717fa12a7cf7b7ae067701d67681d9dee | [] | no_license | qazwsxedc121/myCodingPractise | a6c1e641f5f41bdc4746491ae1c3ab5504fbce50 | 0c3ea6c17ff9c59e8bf7e783e653095637d1bdde | refs/heads/master | 2020-05-20T13:21:53.446049 | 2015-12-24T09:39:23 | 2015-12-24T09:39:23 | 5,888,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | '''
Created on 2012-5-24
@author: guoxc
'''
import cPickle
import math
DATA_FILE = open("../../data/recommendation.dat","r")
CRITICS = cPickle.load(DATA_FILE)
print CRITICS
def sim_distance(prefs, person1, person2):
""" distance """
si_list = [item for item in prefs[person1] if item in prefs[person2]]
if len(si_list) == 0:
return 0
sum_of_squares = sum([(prefs[person1][item] - prefs[person2][item])**2 for
item in prefs[person1] if item in prefs[person2]])
return 1 / (1 + math.sqrt(sum_of_squares))
def sim_pearson(prefs, person1, person2):
""" pearson """
si_list = [item for item in prefs[person1] if item in prefs[person2]]
if len(si_list) == 0:
return 1
sum1 = sum([prefs[person1][it] for it in si_list])
sum2 = sum([prefs[person2][it] for it in si_list])
sum1_sq = sum([prefs[person1][it]**2 for it in si_list])
sum2_sq = sum([prefs[person2][it]**2 for it in si_list])
p_sum = sum([prefs[person1][it] * prefs[person2][it] for it in si_list])
num = p_sum - (sum1 * sum2 / len(si_list))
den = math.sqrt((sum1_sq - sum1**2 / len(si_list))
* (sum2_sq - sum2**2 / len(si_list)))
if den == 0:
return 0
return num / den
def sim_tanimoto(prefs, person1, person2):
si_list = [item for item in prefs[person1] if item in prefs[person2]]
return len(si_list) / (len(prefs[person1])
+ len(prefs[person2]) - len(si_list))
def top_matches(prefs, person, length=5, similarity=sim_pearson):
""" return the persons with biggest similarity of this person """
scores = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:length]
def get_recommendations(prefs, person, similarity=sim_pearson):
"""make item recommendations for person """
totals = {}
sim_sums = {}
for other in prefs:
if other == person:
continue
sim = similarity(prefs, person, other)
if sim <= 0:
continue
for item in prefs[other]:
if item not in prefs[person] or prefs[person][item] == 0:
totals.setdefault(item, 0)
totals[item] += prefs[other][item] * sim
sim_sums.setdefault(item, 0)
sim_sums[item] += sim
rankings = [(total / sim_sums[item], item)
for item, total in totals.items()]
rankings.sort()
rankings.reverse()
return rankings
def transform_prefs(prefs):
result = {}
for person in prefs:
for item in prefs[person]:
result.setdefault(item, {})
result[item][person] = prefs[person][item]
return result | [
"qazwsxedc121@gmail.com"
] | qazwsxedc121@gmail.com |
baa00b77f98b43f0e30cfeaaba2b2e7bb4543308 | 21429a1f4df1540845003f078e518ac04f198872 | /05/test2.py | 2fde9988eba334193f3144d30ccbbcf1e8e4d7f8 | [] | no_license | MonikaMudr/pyladies | c905bad6295e5b8c690547bb7f31c145f850b03f | c49c43bb622d83d61db80087d5d41b92f4906da5 | refs/heads/master | 2020-05-04T12:42:17.724104 | 2019-08-21T21:24:08 | 2019-08-21T21:24:08 | 179,130,869 | 0 | 0 | null | 2019-06-13T19:32:06 | 2019-04-02T17:55:43 | Python | UTF-8 | Python | false | false | 576 | py | from turtle import forward, right, left, exitonclick
from math import sqrt
def domecek(delka_strany):
"vykresli domecek dane velikosti"
delka_sikme_strany = sqrt(2)*delka_strany
delka_strecha = delka_sikme_strany/2
left(90)
forward(delka_strany)
right(90)
forward(delka_strany)
right(135)
forward(delka_sikme_strany)
left(135)
forward(delka_strany)
left(90)
forward(delka_strany)
left(45)
forward(delka_strecha)
left(90)
forward(delka_strecha)
left(90)
forward(delka_sikme_strany)
left(45)
forward(10)
exitonclick()
domecek(200) | [
"monika.mudrochova@gmail.com"
] | monika.mudrochova@gmail.com |
7df9a005cbca7cca42428102b7a21ed5d229a0fa | 0966ff71ff056f5bb026bd30bc6e647d87816e01 | /TEST/python_basic/shake_number.py | ca9d2d4dde9e47ecd8b6312ee509b886fce9b89a | [] | no_license | LynTara/PYTHON | 9b7dfa1fc58964d92b33bba204b600a938f5df72 | 3036f5c57fcfefd9528ee24e998dd3f303b25861 | refs/heads/master | 2023-02-20T03:20:19.801779 | 2021-01-26T08:45:21 | 2021-01-26T08:45:21 | 285,955,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | #允许用户选3次
#每次放出20个车牌供用户选择
#京【A-Z】-【xxxxx】,数字和字母组合
# random.choice()
# random.sample()
# random.randint()
#随机数 random() string() join()
import random
import string
count = 0
while count < 3:
car_num = []
for i in range(20):
n1 = random.choice(string.ascii_uppercase)
n2 = "".join(random.sample(string.ascii_letters+string.digits,5))
c_num = (f"京{n1}.{n2}")
car_num.append(c_num)
print(c_num)
choice = input("输入你喜欢的号").strip()#strip会把空格和换行都去掉
if choice in car_num:
print("恭喜你选择新车牌")
exit("good lucks")
else:
print("不合法的选择....")
count += 1
| [
"Lyn_123456"
] | Lyn_123456 |
0e8291c913d42d6d47aa129d4403133d044afa4f | ac216a2cc36f91625e440247986ead2cd8cce350 | /go/src/infra/tools/vpython/testdata/test_requests_get.py | f36999bde734744de8ae72fa434816c8ed2cfa45 | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 544 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cryptography
import requests
SITE = 'https://www.google.com'
print 'Using requests version:', requests.__version__
print 'Using cryptography version:', cryptography.__version__
print 'Testing requests from:', SITE
r = requests.get(SITE)
print 'Status Code:', r.status_code
if len(r.text) == 0:
print 'Content length is zero!'
else:
print 'Content length is non-zero.'
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
002d9a0920e58597151b16f08009518dd6aedb01 | c37609e30866ab7c1bfc0fb7f4ca865df3c26cc9 | /Anderson_Darlington_Test.py | 128ecb0484e8369fab730eccf49f73c002e68fa8 | [] | no_license | theanandankit/Cooperative-Spectrum-Sensing | 8ab8d74c50be2639d163f4510c7d056207007253 | 31968ecffbfd8648cd14fc4dd2bc7b96b82dbbdf | refs/heads/master | 2023-08-19T17:10:05.714499 | 2021-10-23T19:16:41 | 2021-10-23T19:16:41 | 414,876,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | import math
from math import exp , sqrt , log
from sys import exit , stderr
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from scipy.stats import norm
M = 10000
m_array=[7,11,14]
N=list(range(3))
for i in range(3):
N[i]=2*m_array[i]
n=N[i]
Pf_ad = [0.001,0.0029,0.005,0.0062,0.0078,0.01,0.0154,0.025,0.0346,0.05,0.0632,0.081,0.1,0.1186,0.15,0.2027,0.2676,0.3573,0.4142,0.4463,0.481,0.5185,0.5588,0.5801,0.607,0.6247,0.648,0.7468,0.8487,0.9382,0.9904,0.9997];
snr_avgdB = -2;
snr_avg = pow(10,snr_avgdB/10);
Pd_sim_ad=list(range(len(Pf_ad)))
AD_test=list(range(len(Pf_ad)))
for j in range(len(Pf_ad)):
des=0
c_value_ad = [6.000,5,4.500,4.3,4.1,3.857,3.5,3.070,2.8,2.4920,2.3,2.1,1.933,1.8,1.610,1.4,1.2,1,0.9,0.850,0.8,0.75,0.7,0.675,0.650,0.625,0.6,0.5,0.4,0.3,0.2,0.125];
for kk in list(range(M)):
m=1
noise=np.random.randn(1,N[i])
q=math.sqrt(snr_avg)
x = np.add(q,noise)
x=x[:]
x=np.sort(x)
fx=norm(0,1).cdf(x)
print(fx)
e=[w for w in range(1,n+1)]
temp_fx=fx[0:n]
S=np.sum(np.multiply(np.subtract(np.multiply(2,e),1),np.add(np.log(temp_fx),np.log(np.subtract(1,temp_fx[:,::-1])))))
#print(S)
AD_test[j] = -n-(S/n)
print(AD_test[j])
if AD_test[j] >= c_value_ad[j]:
des = des + 1
Pd_sim_ad[j] = des/M
#print(S)
#print(AD_test)
#print(Pd_sim_ad)
plt.plot(Pf_ad,Pd_sim_ad)
plt.xlabel('False Alarm')
plt.ylabel('Probability of Detection')
plt.savefig('AD_ROC.PNG') | [
"ankitanand2909@gmail.com"
] | ankitanand2909@gmail.com |
80c9278d853e0dae42d1405cbe2fdc3c938b0df3 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/dcemulticastmacrange_cc6bbd6d9c9c0e88f6e630afd3a84823.py | 09fd8e288e4cbb7a1e6c8a322287c74d41fefd37 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,198 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class DceMulticastMacRange(Base):
"""Sets the Multicast MAC Range for the DCE ISIS router.
The DceMulticastMacRange class encapsulates a list of dceMulticastMacRange resources that are managed by the user.
A list of resources can be retrieved from the server using the DceMulticastMacRange.find() method.
The list can be managed by using the DceMulticastMacRange.add() and DceMulticastMacRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dceMulticastMacRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'InterGroupUnicastMacIncrement': 'interGroupUnicastMacIncrement',
'IntraGroupUnicastMacIncrement': 'intraGroupUnicastMacIncrement',
'MulticastMacCount': 'multicastMacCount',
'MulticastMacStep': 'multicastMacStep',
'SourceGroupMapping': 'sourceGroupMapping',
'StartMulticastMac': 'startMulticastMac',
'StartUnicastSourceMac': 'startUnicastSourceMac',
'Topology': 'topology',
'UnicastSourcesPerMulticastMac': 'unicastSourcesPerMulticastMac',
'VlanId': 'vlanId',
}
_SDM_ENUM_MAP = {
'sourceGroupMapping': ['fullyMeshed', 'oneToOne', 'manualMapping'],
}
def __init__(self, parent, list_op=False):
super(DceMulticastMacRange, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables the Multicast MAC Range for a particular DCE ISIS route range. (default = false)
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def InterGroupUnicastMacIncrement(self):
# type: () -> str
"""
Returns
-------
- str: The MAC address format of the Unicast MAC between one or more node groups. (Default = 00 00 00 00 00)
"""
return self._get_attribute(self._SDM_ATT_MAP['InterGroupUnicastMacIncrement'])
@InterGroupUnicastMacIncrement.setter
def InterGroupUnicastMacIncrement(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InterGroupUnicastMacIncrement'], value)
@property
def IntraGroupUnicastMacIncrement(self):
# type: () -> str
"""
Returns
-------
- str: The MAC address format of the Unicast MAC within a node group. (Default = 00 00 00 00 01)
"""
return self._get_attribute(self._SDM_ATT_MAP['IntraGroupUnicastMacIncrement'])
@IntraGroupUnicastMacIncrement.setter
def IntraGroupUnicastMacIncrement(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IntraGroupUnicastMacIncrement'], value)
@property
def MulticastMacCount(self):
# type: () -> int
"""
Returns
-------
- number: The number of Multicast MAC addresses. This option takes unsigned integer value ranging from 1 to UINT_MAX.
"""
return self._get_attribute(self._SDM_ATT_MAP['MulticastMacCount'])
@MulticastMacCount.setter
def MulticastMacCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MulticastMacCount'], value)
@property
def MulticastMacStep(self):
# type: () -> str
"""
Returns
-------
- str: The incremental value of Multicast MAC address. (Default = 00 00 00 00 01)
"""
return self._get_attribute(self._SDM_ATT_MAP['MulticastMacStep'])
@MulticastMacStep.setter
def MulticastMacStep(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['MulticastMacStep'], value)
@property
def SourceGroupMapping(self):
# type: () -> str
"""
Returns
-------
- str(fullyMeshed | oneToOne | manualMapping): The Source Group mapping type.
"""
return self._get_attribute(self._SDM_ATT_MAP['SourceGroupMapping'])
@SourceGroupMapping.setter
def SourceGroupMapping(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['SourceGroupMapping'], value)
@property
def StartMulticastMac(self):
# type: () -> str
"""
Returns
-------
- str: The MAC address format of the starting Multicast MAC. (Default = 0x01000000)
"""
return self._get_attribute(self._SDM_ATT_MAP['StartMulticastMac'])
@StartMulticastMac.setter
def StartMulticastMac(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['StartMulticastMac'], value)
@property
def StartUnicastSourceMac(self):
# type: () -> str
"""
Returns
-------
- str: The MAC address format of the starting Unicast Source MAC. (Default = 00 00 00 00 00 00)
"""
return self._get_attribute(self._SDM_ATT_MAP['StartUnicastSourceMac'])
@StartUnicastSourceMac.setter
def StartUnicastSourceMac(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['StartUnicastSourceMac'], value)
@property
def Topology(self):
# type: () -> int
"""
Returns
-------
- number: The topology identifier to which the corresponding MAC belongs.
"""
return self._get_attribute(self._SDM_ATT_MAP['Topology'])
@Topology.setter
def Topology(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Topology'], value)
@property
def UnicastSourcesPerMulticastMac(self):
# type: () -> int
"""
Returns
-------
- number: The number of Unicast Source for each Multicast MAC address. This option takes unsigned integer value ranging from 0 to UINT_MAX.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnicastSourcesPerMulticastMac'])
@UnicastSourcesPerMulticastMac.setter
def UnicastSourcesPerMulticastMac(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['UnicastSourcesPerMulticastMac'], value)
@property
def VlanId(self):
# type: () -> int
"""
Returns
-------
- number: The VLAN ID of the enabled Multicast MAC Range. (default = 1)
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanId'])
@VlanId.setter
def VlanId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanId'], value)
def update(self, Enabled=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastMacCount=None, MulticastMacStep=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, Topology=None, UnicastSourcesPerMulticastMac=None, VlanId=None):
# type: (bool, str, str, int, str, str, str, str, int, int, int) -> DceMulticastMacRange
"""Updates dceMulticastMacRange resource on the server.
Args
----
- Enabled (bool): If true, enables the Multicast MAC Range for a particular DCE ISIS route range. (default = false)
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups. (Default = 00 00 00 00 00)
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group. (Default = 00 00 00 00 01)
- MulticastMacCount (number): The number of Multicast MAC addresses. This option takes unsigned integer value ranging from 1 to UINT_MAX.
- MulticastMacStep (str): The incremental value of Multicast MAC address. (Default = 00 00 00 00 01)
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC. (Default = 0x01000000)
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC. (Default = 00 00 00 00 00 00)
- Topology (number): The topology identifier to which the corresponding MAC belongs.
- UnicastSourcesPerMulticastMac (number): The number of Unicast Source for each Multicast MAC address. This option takes unsigned integer value ranging from 0 to UINT_MAX.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range. (default = 1)
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastMacCount=None, MulticastMacStep=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, Topology=None, UnicastSourcesPerMulticastMac=None, VlanId=None):
# type: (bool, str, str, int, str, str, str, str, int, int, int) -> DceMulticastMacRange
"""Adds a new dceMulticastMacRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): If true, enables the Multicast MAC Range for a particular DCE ISIS route range. (default = false)
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups. (Default = 00 00 00 00 00)
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group. (Default = 00 00 00 00 01)
- MulticastMacCount (number): The number of Multicast MAC addresses. This option takes unsigned integer value ranging from 1 to UINT_MAX.
- MulticastMacStep (str): The incremental value of Multicast MAC address. (Default = 00 00 00 00 01)
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC. (Default = 0x01000000)
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC. (Default = 00 00 00 00 00 00)
- Topology (number): The topology identifier to which the corresponding MAC belongs.
- UnicastSourcesPerMulticastMac (number): The number of Unicast Source for each Multicast MAC address. This option takes unsigned integer value ranging from 0 to UINT_MAX.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range. (default = 1)
Returns
-------
- self: This instance with all currently retrieved dceMulticastMacRange resources using find and the newly added dceMulticastMacRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dceMulticastMacRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastMacCount=None, MulticastMacStep=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, Topology=None, UnicastSourcesPerMulticastMac=None, VlanId=None):
# type: (bool, str, str, int, str, str, str, str, int, int, int) -> DceMulticastMacRange
"""Finds and retrieves dceMulticastMacRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dceMulticastMacRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dceMulticastMacRange resources from the server.
Args
----
- Enabled (bool): If true, enables the Multicast MAC Range for a particular DCE ISIS route range. (default = false)
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups. (Default = 00 00 00 00 00)
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group. (Default = 00 00 00 00 01)
- MulticastMacCount (number): The number of Multicast MAC addresses. This option takes unsigned integer value ranging from 1 to UINT_MAX.
- MulticastMacStep (str): The incremental value of Multicast MAC address. (Default = 00 00 00 00 01)
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC. (Default = 0x01000000)
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC. (Default = 00 00 00 00 00 00)
- Topology (number): The topology identifier to which the corresponding MAC belongs.
- UnicastSourcesPerMulticastMac (number): The number of Unicast Source for each Multicast MAC address. This option takes unsigned integer value ranging from 0 to UINT_MAX.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range. (default = 1)
Returns
-------
- self: This instance with matching dceMulticastMacRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dceMulticastMacRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dceMulticastMacRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
5638da78645f931f0008807512fd5ddc9b58d72f | 4a67830dd5bf60007a86ad659e766632d0c6563e | /src/day6/day6.py | 89aaf988ecadf59540dc9fcfbd9db7521ecb1a82 | [] | no_license | oliphantee/adventOfCode2020 | 3a76e3d920dfcfe9b09c9fd5890dcf313293f426 | 4590caa7e94d265a23851a1a1bb6431392c8c952 | refs/heads/master | 2023-02-19T05:13:14.803037 | 2021-01-14T17:32:36 | 2021-01-14T17:32:36 | 323,707,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | import copy as cp
inputFile=open("input.txt")
allGroups=[]
oneGroup=set()
for line in inputFile:
if line=="\n":
allGroups+=[oneGroup]
oneGroup=set()
else:
for letter in line.strip("\n"):
oneGroup.add(letter)
allGroups+=[oneGroup]
totCount=0
for group in allGroups:
totCount+=len(group)
print(totCount)
inputFile=open("input.txt")
allGroupsPartB=[]
oneGroupPartB=set(("a","b","c","d","e","f","g","h","i","j","k","l","m",'n','o','p','q','r','s','t','u','v','w','x','y','z'))
for line in inputFile:
#print([line])
if line=="\n":
allGroupsPartB+=[oneGroupPartB]
oneGroupPartB=set(("a","b","c","d","e","f","g","h","i","j","k","l","m",'n','o','p','q','r','s','t','u','v','w','x','y','z'))
else:
newGroup=cp.deepcopy(oneGroupPartB)
for i in oneGroupPartB:
if i not in line and i in newGroup:
#print(i)
newGroup.remove(i)
oneGroupPartB=newGroup
allGroupsPartB+=[oneGroupPartB]
totCountB=0
for group in allGroupsPartB:
#print(len(group))
totCountB+=len(group)
print(totCountB)
#print(allGroupsPartB) | [
"eeo003@bucknell.edu"
] | eeo003@bucknell.edu |
38c8a5339b916969a3a3037a5f2472ad4598d5cc | 93c9f4da0e821bf7607a4c20c12100296849cad2 | /Q86_partition.py | 149344cf8fc14f107751ff202f7a5e0cafd5ca24 | [] | no_license | iamtonyxu/91alg_2_iamtony | fc5604c5d2142c981934c3889388b99a1fb21a2a | 41fa4d6a868d4f95f169eebf96d34f9cc5f0b744 | refs/heads/main | 2023-06-19T11:31:29.688505 | 2021-07-16T08:25:41 | 2021-07-16T08:25:41 | 308,189,238 | 0 | 0 | null | 2021-07-16T08:25:42 | 2020-10-29T01:53:04 | Python | UTF-8 | Python | false | false | 1,429 | py | '''
给你一个链表的头节点 head 和一个特定值 x ,请你对链表进行分隔,使得所有 小于 x 的节点都出现在 大于或等于 x 的节点之前。
你应当 保留 两个分区中每个节点的初始相对位置。
Example-1.
输入:head = [1,4,3,2,5,2], x = 3
输出:[1,2,2,4,3,5]
Example-2.
输入:head = [2,1], x = 2
输出:[1,2]
'''
from ListNodePkg import ListNode, genListNode
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
# 遍历所有节点:
# (1)所有小于x的节点放在新链表list1
# (2)所有大于或等于x的节点都放在新链表list2
# (3)list1的尾节点指向list2的头节点
dummy1 = head1 = ListNode(float('-inf'))
dummy2 = head2 = ListNode(float('-inf'))
while head:
if head.val < x:
head1.next = ListNode(head.val)
head1 = head1.next
else:
head2.next = ListNode(head.val)
head2 = head2.next
head = head.next
head1.next = dummy2.next
return dummy1.next
if __name__ == "__main__":
numList = [[1, 4, 3, 2, 5, 2], [2, 1]]
for nums in numList:
list1 = genListNode(nums)
obj = Solution()
list2 = obj.partition(list1, 3)
head = list2
while head:
print(head.val)
head = head.next
| [
"tony.xu@analog.com"
] | tony.xu@analog.com |
64d3ee1b2b63bf47956a16eb239dd218474a9fad | 1a55572a16c3e34c72630043af7ab3d05bafae8c | /Celery/celery_with_django/mysite/core/tasks.py | 877830411a479b583d31addcbbe10ff041244a19 | [] | no_license | Wald-K/Technologies | 1e2720a05083ba24da3d1761088d0cab4a6e9658 | 8cc2b27fbe5e6c860342fd46b251680899d111bb | refs/heads/master | 2023-01-12T14:11:29.204133 | 2021-01-25T21:54:45 | 2021-01-25T21:54:45 | 196,719,173 | 0 | 0 | null | 2023-01-07T20:03:47 | 2019-07-13T12:28:18 | Python | UTF-8 | Python | false | false | 581 | py | import string
import time
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from celery import shared_task
@shared_task
def create_random_user_accounts(total):
for i in range(total):
username = 'user_{}'.format(get_random_string(10, string.ascii_letters))
email = '{}@example.com'.format(username)
password = get_random_string(50)
User.objects.create_user(username=username, email=email, password=password)
time.sleep(10)
return '{} random users created with success'.format(total)
| [
"1000druid@gmail.com"
] | 1000druid@gmail.com |
c3dc17eebb3ba445f2e63f01b977d93a793a462c | b2649b1b0c632201cfa8131a25762faf0b1e653d | /excelplay_dalalbull/excelplay_dalalbull/__init__.py | 8d49d3636be3e61ff0e404c1fa5da5896df08f4a | [] | no_license | Excel-MEC/excelplay-dalalbull | c75a54e380bdb22946e0cde3fea3ca43c9a7fdfa | 5e76858f831bb8d9f408c6f6e940bbd164ece47b | refs/heads/master | 2022-05-13T00:36:07.239378 | 2021-01-12T13:29:00 | 2021-01-12T13:29:00 | 127,179,351 | 4 | 6 | null | 2022-04-22T21:03:28 | 2018-03-28T18:06:20 | Python | UTF-8 | Python | false | false | 263 | py | from __future__ import absolute_import, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
print("Initializing celery_app")
__all__ = ['celery_app'] | [
"augustinetharakan12@gmail.com"
] | augustinetharakan12@gmail.com |
22681a74a3ab2ea419097925d4179db9200336c0 | f064eb964e2cf77bcaf60aee411ed1a32e2aa0dd | /list_stack.py | 1b6739fd9a777e6bb52a06e930195e55ba3c3887 | [] | no_license | ZxwZero/list_stack | 8efa9857fcc39c7aa847d5ef8e0345355f64d883 | 9eafd25bdf6eec5e0fd3f3ff8b0c242adf256004 | refs/heads/master | 2020-06-13T12:17:16.480322 | 2019-07-01T10:44:14 | 2019-07-01T10:44:14 | 194,651,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # coding=utf-8
# class ListStack:
# def __init__(self):
# self._data = [1,2]
# def __len__(self):
# return len(self._data)
# def is_empty(self):
# return len(self._data) == 0
# def l_pop(self): # 从栈顶取数据
# if self.is_empty():
# return None
# return self._data.pop(0)
# def r_pop(self): # 从栈底取数据
# if self.is_empty():
# return None
# return self._data.pop(-1)
# def l_push(self, e): #压入栈底
# self._data.append(e)
# return self._data
# def r_push(self, e): # 压入栈顶
# self._data.insert(0,e)
# return self._data
class RightStack():
def __init__(st,size):
st.stack=[]
st.size=size
st.top=-1
def empty(st):
if st.top==-1:
return True
else:
return False
def full(st):
if st.top==st.size:
return True
else:
return False
def r_push(st,data):
if st.full():
print "RightStack is full"
else:
if isinstance(data, int):
st.stack.append(data)
st.top=st.top+1
def r_pop(st):
if st.empty():
print "RightStack is empty"
else:
st.top=st.top-1
class LeftStack():
def __init__(st,size):
st.stack=[]
st.size=size
st.top=-1
def empty(st):
if st.top==-1:
return True
else:
return False
def full(st):
if st.top==st.size:
return True
else:
return False
def l_push(st,data):
if st.full():
print "LeftStack is full"
else:
if isinstance(data, int):
st.stack.insert(0, data)
st.top=st.top+1
def l_pop(st):
if st.empty():
print "LeftStack is empty"
else:
for i in xrange(1, len(st.stack)):
st.stack[i-1] = st.stack[i]
st.top=st.top-1
| [
"zxwzero@foxmail.com"
] | zxwzero@foxmail.com |
7e8b76a8c25b3c730f1070160fc8743f76f2462e | 1eb0fcebe9fe5446256966ab79e3e56bda08edb1 | /imgfind/utils/color.py | 02fb80ae112c90c9ec486d0f33525cc426254457 | [
"WTFPL"
] | permissive | h5vx/imgfind | 14c1c14abf73280f34b9465863f2868bcb534bcb | 66a033a411424c62c08767dfce156b7a71d14e17 | refs/heads/master | 2022-12-26T18:12:08.721139 | 2020-10-02T21:48:04 | 2020-10-02T21:48:04 | 299,069,288 | 1 | 1 | WTFPL | 2020-10-02T21:48:05 | 2020-09-27T16:11:33 | Python | UTF-8 | Python | false | false | 626 | py | # coding=utf-8
from typing import Tuple, Union
import numpy as np
from skimage.color import rgb_colors
# TODO: documentation
def color_from_str(color_str: str) -> Tuple[Union[int, float]]:
result = rgb_colors.__dict__.get(color_str)
if result:
return result
if len(color_str) == 3:
color_str = "".join(c * 2 for c in color_str) # "123" -> "112233"
if len(color_str) == 6:
try:
rgb = tuple(bytes.fromhex(color_str))
return tuple(np.float32(rgb) / 255)
except ValueError:
pass
raise ValueError("Unknown color specification format")
| [
"h5v@protonmail.com"
] | h5v@protonmail.com |
ed141fb02a1f77848fde5e95b506ff74000abcfa | b1e50e5898ca61af7d9f2032298cf4323717d120 | /models.py | 2bafec24e473d798352d7efe4a4b6224472b4a37 | [] | no_license | CassandraGoose/crudpython | 87f2c6daab29b3387a135c3ebee62a113bf07492 | d74f11d0faf8379187da1c952cfb26d7d4f60f95 | refs/heads/master | 2020-12-31T05:41:06.392139 | 2017-02-01T22:47:19 | 2017-02-01T22:47:19 | 80,652,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from app import db
from sqlalchemy.dialects.postgresql import JSON
class Movies(db.Model):
__tablename__ = 'movies'
id = db.Column(db.Integer, primary_key=True)
title = db.Column('title', db.String())
genre = db.Column('genre', db.String())
def __init__(self, title, genre):
self.title = title
self.genre = genre
def __repr__(self):
return '<id {}>'.format(self.id)
| [
"cassandra.torske@gmail.com"
] | cassandra.torske@gmail.com |
7281dcc2e0fc2e7ec804619aff65c7fa8014fb67 | e12ba2481e3cc003e132b6fb78687af618233680 | /src/netview/wsgi.py | 50af558d7f908e7b33377b57bb73fbe94f386c35 | [] | no_license | danielgiampaolo/CIS4930_Python | f23edee76aed6a0e08de365e935a69cd40d0c313 | 036b3ad831ddcd3171fc2fe59f169c30564a1b0f | refs/heads/master | 2022-11-23T05:27:23.419842 | 2020-08-02T02:44:12 | 2020-08-02T02:44:12 | 272,823,751 | 0 | 0 | null | 2020-08-01T19:10:56 | 2020-06-16T22:21:42 | Python | UTF-8 | Python | false | false | 391 | py | """
WSGI config for netview project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'netview.settings')
application = get_wsgi_application()
| [
"enzoegues@yahoo.com"
] | enzoegues@yahoo.com |
64f4bcee8ef7c6bbb13c5da72126ef1407138a4c | 495f252e6e1c7fcc7e24b28693df403e0116de92 | /tx.py | 26a405dbfad7d3817c4526c48c2e1ad684f48145 | [] | no_license | twshelton/did-hackathon-2018 | c7bff5bb8c8014e30bef6a566d1962cf8adadf8f | f0e550c2e0a2736d785096ed297ddcecc29d445d | refs/heads/master | 2020-03-23T11:32:58.392311 | 2018-07-19T01:42:12 | 2018-07-19T01:42:12 | 141,459,685 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,210 | py | from io import BytesIO
from unittest import TestCase
import random
import requests
from ecc import PrivateKey, S256Point, Signature
from helper import (
decode_base58,
double_sha256,
encode_varint,
int_to_little_endian,
little_endian_to_int,
p2pkh_script,
read_varint,
SIGHASH_ALL,
)
from script import Script
class Tx:
def __init__(self, version, tx_ins, tx_outs, locktime, testnet=False):
self.version = version
self.tx_ins = tx_ins
self.tx_outs = tx_outs
self.locktime = locktime
self.testnet = testnet
def __repr__(self):
tx_ins = ''
for tx_in in self.tx_ins:
tx_ins += tx_in.__repr__() + '\n'
tx_outs = ''
for tx_out in self.tx_outs:
tx_outs += tx_out.__repr__() + '\n'
return 'version: {}\ntx_ins:\n{}\ntx_outs:\n{}\nlocktime: {}\n'.format(
self.version,
tx_ins,
tx_outs,
self.locktime,
)
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the transaction at the start
return a Tx object
'''
# s.read(n) will return n bytes
# version has 4 bytes, little-endian, interpret as int
version = little_endian_to_int(s.read(4))
# num_inputs is a varint, use read_varint(s)
num_inputs = read_varint(s)
# each input needs parsing
inputs = []
for _ in range(num_inputs):
inputs.append(TxIn.parse(s))
# num_outputs is a varint, use read_varint(s)
num_outputs = read_varint(s)
# each output needs parsing
outputs = []
for _ in range(num_outputs):
outputs.append(TxOut.parse(s))
# locktime is 4 bytes, little-endian
locktime = little_endian_to_int(s.read(4))
# return an instance of the class (cls(...))
return cls(version, inputs, outputs, locktime)
def serialize(self):
'''Returns the byte serialization of the transaction'''
# serialize version (4 bytes, little endian)
result = int_to_little_endian(self.version, 4)
# encode_varint on the number of inputs
result += encode_varint(len(self.tx_ins))
# iterate inputs
for tx_in in self.tx_ins:
# serialize each input
result += tx_in.serialize()
# encode_varint on the number of inputs
result += encode_varint(len(self.tx_outs))
# iterate outputs
for tx_out in self.tx_outs:
# serialize each output
result += tx_out.serialize()
# serialize locktime (4 bytes, little endian)
result += int_to_little_endian(self.locktime, 4)
return result
def fee(self):
'''Returns the fee of this transaction in satoshi'''
# initialize input sum and output sum
input_sum, output_sum = 0, 0
# iterate through inputs
for tx_in in self.tx_ins:
# for each input get the value and add to input sum
input_sum += tx_in.value()
# iterate through outputs
for tx_out in self.tx_outs:
# for each output get the amount and add to output sum
output_sum += tx_out.amount
# return input sum - output sum
return input_sum - output_sum
def sig_hash(self, input_index, hash_type):
'''Returns the integer representation of the hash that needs to get
signed for index input_index'''
# create a new set of tx_ins (alt_tx_ins)
alt_tx_ins = []
# iterate over self.tx_ins
for tx_in in self.tx_ins:
# create a new TxIn that has a blank script_sig (b'') and add to alt_tx_ins
alt_tx_ins.append(TxIn(
prev_tx=tx_in.prev_tx,
prev_index=tx_in.prev_index,
script_sig=b'',
sequence=tx_in.sequence,
))
# grab the input at the input_index
signing_input = alt_tx_ins[input_index]
# grab the script_pubkey of the input
script_pubkey = signing_input.script_pubkey(self.testnet)
# the script_sig of the signing_input should be script_pubkey
signing_input.script_sig = script_pubkey
# create an alternate transaction with the modified tx_ins
alt_tx = self.__class__(
version=self.version,
tx_ins=alt_tx_ins,
tx_outs=self.tx_outs,
locktime=self.locktime)
# add the hash_type int 4 bytes, little endian
result = alt_tx.serialize() + int_to_little_endian(hash_type, 4)
# get the double_sha256 of the tx serialization
s256 = double_sha256(result)
# convert this to a big-endian integer using int.from_bytes(x, 'big')
return int.from_bytes(s256, 'big')
def verify_input(self, input_index):
'''Returns whether the input has a valid signature'''
# get the relevant input
tx_in = self.tx_ins[input_index]
# parse the point from the sec format (tx_in.sec_pubkey())
point = S256Point.parse(tx_in.sec_pubkey())
# parse the signature from the der format (tx_in.der_signature())
signature = Signature.parse(tx_in.der_signature())
# get the hash type from the input (tx_in.hash_type())
hash_type = tx_in.hash_type()
# get the sig_hash (z)
z = self.sig_hash(input_index, hash_type)
# use point.verify on the z and signature
return point.verify(z, signature)
def sign_input(self, input_index, private_key, hash_type):
'''Signs the input using the private key'''
# get the hash to sign
z = self.sig_hash(input_index, hash_type)
# get der signature of z from private key
der = private_key.sign(z).der()
# append the hash_type to der (use hash_type.to_bytes(1, 'big'))
sig = der + hash_type.to_bytes(1, 'big')
# calculate the sec
sec = private_key.point.sec()
# initialize a new script with [sig, sec] as the elements
script_sig = Script([sig, sec])
# change input's script_sig to new script
self.tx_ins[input_index].script_sig = script_sig
# return whether sig is valid using self.verify_input
return self.verify_input(input_index)
def is_coinbase(self):
'''Returns whether this transaction is a coinbase transaction or not'''
# check that there is exactly 1 input
if len(self.tx_ins) != 1:
return False
# grab the first input
first_input = self.tx_ins[0]
# check that first input prev_tx is b'\x00' * 32 bytes
if first_input.prev_tx != b'\x00' * 32:
return False
# check that first input prev_index is 0xffffffff
if first_input.prev_index != 0xffffffff:
return False
return True
def coinbase_height(self):
'''Returns the height of the block this coinbase transaction is in
Returns None if this transaction is not a coinbase transaction
'''
# if this is NOT a coinbase transaction, return None
if not self.is_coinbase():
return None
# grab the first input
first_input = self.tx_ins[0]
# grab the first element of the script_sig (.script_sig.elements[0])
first_element = first_input.script_sig.elements[0]
# convert the first element from little endian to int
return little_endian_to_int(first_element)
class TxIn:
cache = {}
def __init__(self, prev_tx, prev_index, script_sig, sequence):
self.prev_tx = prev_tx
self.prev_index = prev_index
self.script_sig = Script.parse(script_sig)
self.sequence = sequence
def __repr__(self):
return '{}:{}'.format(
self.prev_tx.hex(),
self.prev_index,
)
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the tx_input at the start
return a TxIn object
'''
# s.read(n) will return n bytes
# prev_tx is 32 bytes, little endian
prev_tx = s.read(32)[::-1]
# prev_index is 4 bytes, little endian, interpret as int
prev_index = little_endian_to_int(s.read(4))
# script_sig is a variable field (length followed by the data)
# get the length by using read_varint(s)
script_sig_length = read_varint(s)
script_sig = s.read(script_sig_length)
# sequence is 4 bytes, little-endian, interpret as int
sequence = little_endian_to_int(s.read(4))
# return an instance of the class (cls(...))
return cls(prev_tx, prev_index, script_sig, sequence)
def serialize(self):
'''Returns the byte serialization of the transaction input'''
# serialize prev_tx, little endian
result = self.prev_tx[::-1]
# serialize prev_index, 4 bytes, little endian
result += int_to_little_endian(self.prev_index, 4)
# get the scriptSig ready (use self.script_sig.serialize())
raw_script_sig = self.script_sig.serialize()
# encode_varint on the length of the scriptSig
result += encode_varint(len(raw_script_sig))
# add the scriptSig
result += raw_script_sig
# serialize sequence, 4 bytes, little endian
result += int_to_little_endian(self.sequence, 4)
return result
@classmethod
def get_url(cls, testnet=False):
if testnet:
return 'https://testnet.blockexplorer.com/api'
else:
return 'https://blockexplorer.com/api'
def fetch_tx(self, testnet=False):
if self.prev_tx not in self.cache:
url = self.get_url(testnet) + '/rawtx/{}'.format(self.prev_tx.hex())
response = requests.get(url)
try:
js_response = response.json()
if 'rawtx' not in js_response:
raise RuntimeError('got from server: {}'.format(js_response))
except:
raise RuntimeError('got from server: {}'.format(response.text))
raw = bytes.fromhex(js_response['rawtx'])
stream = BytesIO(raw)
tx = Tx.parse(stream)
self.cache[self.prev_tx] = tx
return self.cache[self.prev_tx]
def value(self, testnet=False):
'''Get the outpoint value by looking up the tx hash on libbitcoin server
Returns the amount in satoshi
'''
# use self.fetch_tx to get the transaction
tx = self.fetch_tx(testnet=testnet)
# get the output at self.prev_index
# return the amount property
return tx.tx_outs[self.prev_index].amount
def script_pubkey(self, testnet=False):
'''Get the scriptPubKey by looking up the tx hash on libbitcoin server
Returns the binary scriptpubkey
'''
# use self.fetch_tx to get the transaction
tx = self.fetch_tx(testnet=testnet)
# get the output at self.prev_index
# return the script_pubkey property and serialize
return tx.tx_outs[self.prev_index].script_pubkey
def der_signature(self, index=0):
'''returns a DER format signature and hash_type if the script_sig
has a signature'''
signature = self.script_sig.signature(index=index)
# last byte is the hash_type, rest is the signature
return signature[:-1]
def hash_type(self, index=0):
'''returns a DER format signature and hash_type if the script_sig
has a signature'''
signature = self.script_sig.signature(index=index)
# last byte is the hash_type, rest is the signature
return signature[-1]
def sec_pubkey(self, index=0):
'''returns the SEC format public if the script_sig has one'''
return self.script_sig.sec_pubkey(index=index)
def redeem_script(self):
'''return the Redeem Script if there is one'''
return self.script_sig.redeem_script()
class TxOut:
def __init__(self, amount, script_pubkey):
self.amount = amount
self.script_pubkey = Script.parse(script_pubkey)
def __repr__(self):
return '{}:{}'.format(self.amount, self.script_pubkey.address())
@classmethod
def parse(cls, s):
'''Takes a byte stream and parses the tx_output at the start
return a TxOut object
'''
# s.read(n) will return n bytes
# amount is 8 bytes, little endian, interpret as int
amount = little_endian_to_int(s.read(8))
# script_pubkey is a variable field (length followed by the data)
# get the length by using read_varint(s)
script_pubkey_length = read_varint(s)
script_pubkey = s.read(script_pubkey_length)
# return an instance of the class (cls(...))
return cls(amount, script_pubkey)
def serialize(self):
'''Returns the byte serialization of the transaction output'''
# serialize amount, 8 bytes, little endian
result = int_to_little_endian(self.amount, 8)
# get the scriptPubkey ready (use self.script_pubkey.serialize())
raw_script_pubkey = self.script_pubkey.serialize()
# encode_varint on the length of the scriptPubkey
result += encode_varint(len(raw_script_pubkey))
# add the scriptPubKey
result += raw_script_pubkey
return result
class TxTest(TestCase):
def test_parse_version(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.version, 1)
def test_parse_inputs(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(len(tx.tx_ins), 1)
want = bytes.fromhex('d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81')
self.assertEqual(tx.tx_ins[0].prev_tx, want)
self.assertEqual(tx.tx_ins[0].prev_index, 0)
want = bytes.fromhex('483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a')
self.assertEqual(tx.tx_ins[0].script_sig.serialize(), want)
self.assertEqual(tx.tx_ins[0].sequence, 0xfffffffe)
def test_parse_outputs(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(len(tx.tx_outs), 2)
want = 32454049
self.assertEqual(tx.tx_outs[0].amount, want)
want = bytes.fromhex('76a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac')
self.assertEqual(tx.tx_outs[0].script_pubkey.serialize(), want)
want = 10011545
self.assertEqual(tx.tx_outs[1].amount, want)
want = bytes.fromhex('76a9141c4bc762dd5423e332166702cb75f40df79fea1288ac')
self.assertEqual(tx.tx_outs[1].script_pubkey.serialize(), want)
def test_parse_locktime(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.locktime, 410393)
def test_der_signature(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
want = '3045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed'
der = tx.tx_ins[0].der_signature()
hash_type = tx.tx_ins[0].hash_type()
self.assertEqual(der.hex(), want)
self.assertEqual(hash_type, SIGHASH_ALL)
def test_sec_pubkey(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
want = '0349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a'
self.assertEqual(tx.tx_ins[0].sec_pubkey().hex(), want)
def test_serialize(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.serialize(), raw_tx)
def test_input_value(self):
tx_hash = 'd1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81'
index = 0
want = 42505594
tx_in = TxIn(
prev_tx=bytes.fromhex(tx_hash),
prev_index=index,
script_sig=b'',
sequence=0,
)
self.assertEqual(tx_in.value(), want)
def test_input_pubkey(self):
tx_hash = 'd1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81'
index = 0
tx_in = TxIn(
prev_tx=bytes.fromhex(tx_hash),
prev_index=index,
script_sig=b'',
sequence=0,
)
want = bytes.fromhex('76a914a802fc56c704ce87c42d7c92eb75e7896bdc41ae88ac')
self.assertEqual(tx_in.script_pubkey().serialize(), want)
def test_fee(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.fee(), 40000)
raw_tx = bytes.fromhex('010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.fee(), 140500)
def test_sig_hash(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
hash_type = SIGHASH_ALL
want = int('27e0c5994dec7824e56dec6b2fcb342eb7cdb0d0957c2fce9882f715e85d81a6', 16)
self.assertEqual(tx.sig_hash(0, hash_type), want)
def test_verify_input(self):
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertTrue(tx.verify_input(0))
def test_sign_input(self):
private_key = PrivateKey(secret=8675309)
tx_ins = []
prev_tx = bytes.fromhex('0025bc3c0fa8b7eb55b9437fdbd016870d18e0df0ace7bc9864efc38414147c8')
tx_ins.append(TxIn(
prev_tx=prev_tx,
prev_index=0,
script_sig = b'',
sequence = 0xffffffff,
))
tx_outs = []
h160 = decode_base58('mzx5YhAH9kNHtcN481u6WkjeHjYtVeKVh2')
tx_outs.append(TxOut(amount=int(0.99*100000000), script_pubkey=p2pkh_script(h160)))
h160 = decode_base58('mnrVtF8DWjMu839VW3rBfgYaAfKk8983Xf')
tx_outs.append(TxOut(amount=int(0.1*100000000), script_pubkey=p2pkh_script(h160)))
tx = Tx(
version=1,
tx_ins=tx_ins,
tx_outs=tx_outs,
locktime=0,
testnet=True,
)
self.assertTrue(tx.sign_input(0, private_key, SIGHASH_ALL))
def test_is_coinbase(self):
raw_tx = bytes.fromhex('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5e03d71b07254d696e656420627920416e74506f6f6c20626a31312f4542312f4144362f43205914293101fabe6d6d678e2c8c34afc36896e7d9402824ed38e856676ee94bfdb0c6c4bcd8b2e5666a0400000000000000c7270000a5e00e00ffffffff01faf20b58000000001976a914338c84849423992471bffb1a54a8d9b1d69dc28a88ac00000000')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertTrue(tx.is_coinbase())
def test_coinbase_height(self):
raw_tx = bytes.fromhex('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5e03d71b07254d696e656420627920416e74506f6f6c20626a31312f4542312f4144362f43205914293101fabe6d6d678e2c8c34afc36896e7d9402824ed38e856676ee94bfdb0c6c4bcd8b2e5666a0400000000000000c7270000a5e00e00ffffffff01faf20b58000000001976a914338c84849423992471bffb1a54a8d9b1d69dc28a88ac00000000')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertEqual(tx.coinbase_height(), 465879)
raw_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
stream = BytesIO(raw_tx)
tx = Tx.parse(stream)
self.assertIsNone(tx.coinbase_height())
| [
"twshelton@hrtoolbox.com"
] | twshelton@hrtoolbox.com |
23c681e160a0cbf136433a9f28f3f3e0d81d19e7 | 73e6c7d678312cbd6544d49324167cba25bf0fd3 | /utiles/eval_utils.py | d03c767a980a2877f5dac6f19a9b82891af0f87a | [
"MIT",
"WTFPL"
] | permissive | qiuweibin2005/yolov3_fire_detection | d3be84d6da5d1ea26abb2d921590b36b92abac2b | abbe22a3dce87adf96d7cd195c5c25b97db9b4c1 | refs/heads/master | 2022-01-22T06:44:24.006340 | 2019-08-21T07:39:55 | 2019-08-21T07:39:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,402 | py | # coding: utf-8
from __future__ import division, print_function
import numpy as np
import cv2
from collections import Counter
from utiles.nms_utils import cpu_nms, gpu_nms
# from utiles.data_utils import parse_line
def parse_line(line):
'''
Given a line from the training/test txt file, return parsed info.
return:
line_idx: int64
pic_path: string.
boxes: shape [N, 4], N is the ground truth count, elements in the second
dimension are [x_min, y_min, x_max, y_max]
labels: shape [N]. class index.
'''
if 'str' not in str(type(line)):
line = line.decode()
s = line.strip().split(' ')
line_idx = int(s[0])
pic_path = s[1]
s = s[2:]
box_cnt = len(s) // 5
boxes = []
labels = []
for i in range(box_cnt):
label, x_min, y_min, x_max, y_max = int(s[i * 5]), float(s[i * 5 + 1]), float(s[i * 5 + 2]), float(
s[i * 5 + 3]), float(s[i * 5 + 4])
boxes.append([x_min, y_min, x_max, y_max])
labels.append(label)
boxes = np.asarray(boxes, np.float32)
labels = np.asarray(labels, np.int64)
return line_idx, pic_path, boxes, labels
def calc_iou(pred_boxes, true_boxes):
'''
Maintain an efficient way to calculate the ios matrix using the numpy broadcast tricks.
shape_info: pred_boxes: [N, 4]
true_boxes: [V, 4]
return: IoU matrix: shape: [N, V]
'''
# [N, 1, 4]
pred_boxes = np.expand_dims(pred_boxes, -2)
# [1, V, 4]
true_boxes = np.expand_dims(true_boxes, 0)
# [N, 1, 2] & [1, V, 2] ==> [N, V, 2]
intersect_mins = np.maximum(pred_boxes[..., :2], true_boxes[..., :2])
intersect_maxs = np.minimum(pred_boxes[..., 2:], true_boxes[..., 2:])
intersect_wh = np.maximum(intersect_maxs - intersect_mins, 0.)
# shape: [N, V]
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
# shape: [N, 1, 2]
pred_box_wh = pred_boxes[..., 2:] - pred_boxes[..., :2]
# shape: [N, 1]
pred_box_area = pred_box_wh[..., 0] * pred_box_wh[..., 1]
# [1, V, 2]
true_boxes_wh = true_boxes[..., 2:] - true_boxes[..., :2]
# [1, V]
true_boxes_area = true_boxes_wh[..., 0] * true_boxes_wh[..., 1]
# shape: [N, V]
iou = intersect_area / (pred_box_area + true_boxes_area - intersect_area + 1e-10)
return iou
def evaluate_on_cpu(y_pred, y_true, num_classes, calc_now=True, max_boxes=50, score_thresh=0.5, iou_thresh=0.5):
'''
Given y_pred and y_true of a batch of data, get the recall and precision of the current batch.
'''
num_images = y_true[0].shape[0]
true_labels_dict = {i: 0 for i in range(num_classes)} # {class: count}
pred_labels_dict = {i: 0 for i in range(num_classes)}
true_positive_dict = {i: 0 for i in range(num_classes)}
for i in range(num_images):
true_labels_list, true_boxes_list = [], []
for j in range(3): # three feature maps
# shape: [13, 13, 3, 80]
true_probs_temp = y_true[j][i][..., 5:-1]
# shape: [13, 13, 3, 4] (x_center, y_center, w, h)
true_boxes_temp = y_true[j][i][..., 0:4]
# [13, 13, 3]
object_mask = true_probs_temp.sum(axis=-1) > 0
# [V, 3] V: Ground truth number of the current image
true_probs_temp = true_probs_temp[object_mask]
# [V, 4]
true_boxes_temp = true_boxes_temp[object_mask]
# [V], labels
true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()
# [V, 4] (x_center, y_center, w, h)
true_boxes_list += true_boxes_temp.tolist()
if len(true_labels_list) != 0:
for cls, count in Counter(true_labels_list).items():
true_labels_dict[cls] += count
# [V, 4] (xmin, ymin, xmax, ymax)
true_boxes = np.array(true_boxes_list)
box_centers, box_sizes = true_boxes[:, 0:2], true_boxes[:, 2:4]
true_boxes[:, 0:2] = box_centers - box_sizes / 2.
true_boxes[:, 2:4] = true_boxes[:, 0:2] + box_sizes
# [1, xxx, 4]
pred_boxes = y_pred[0][i:i + 1]
pred_confs = y_pred[1][i:i + 1]
pred_probs = y_pred[2][i:i + 1]
# pred_boxes: [N, 4]
# pred_confs: [N]
# pred_labels: [N]
# N: Detected box number of the current image
pred_boxes, pred_confs, pred_labels = cpu_nms(pred_boxes, pred_confs * pred_probs, num_classes,
max_boxes=max_boxes, score_thresh=score_thresh, iou_thresh=iou_thresh)
# len: N
pred_labels_list = [] if pred_labels is None else pred_labels.tolist()
if pred_labels_list == []:
continue
# calc iou
# [N, V]
iou_matrix = calc_iou(pred_boxes, true_boxes)
# [N]
max_iou_idx = np.argmax(iou_matrix, axis=-1)
correct_idx = []
correct_conf = []
for k in range(max_iou_idx.shape[0]):
pred_labels_dict[pred_labels_list[k]] += 1
match_idx = max_iou_idx[k] # V level
if iou_matrix[k, match_idx] > iou_thresh and true_labels_list[match_idx] == pred_labels_list[k]:
if match_idx not in correct_idx:
correct_idx.append(match_idx)
correct_conf.append(pred_confs[k])
else:
same_idx = correct_idx.index(match_idx)
if pred_confs[k] > correct_conf[same_idx]:
correct_idx.pop(same_idx)
correct_conf.pop(same_idx)
correct_idx.append(match_idx)
correct_conf.append(pred_confs[k])
for t in correct_idx:
true_positive_dict[true_labels_list[t]] += 1
if calc_now:
# avoid divided by 0
recall = sum(true_positive_dict.values()) / (sum(true_labels_dict.values()) + 1e-6)
precision = sum(true_positive_dict.values()) / (sum(pred_labels_dict.values()) + 1e-6)
return recall, precision
else:
return true_positive_dict, true_labels_dict, pred_labels_dict
def evaluate_on_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, y_pred, y_true, num_classes, iou_thresh=0.5, calc_now=True):
'''
Given y_pred and y_true of a batch of data, get the recall and precision of the current batch.
This function will perform gpu operation on the GPU.
'''
num_images = y_true[0].shape[0]
true_labels_dict = {i: 0 for i in range(num_classes)} # {class: count}
pred_labels_dict = {i: 0 for i in range(num_classes)}
true_positive_dict = {i: 0 for i in range(num_classes)}
for i in range(num_images):
true_labels_list, true_boxes_list = [], []
for j in range(3): # three feature maps
# shape: [13, 13, 3, 80]
true_probs_temp = y_true[j][i][..., 5:-1]
# shape: [13, 13, 3, 4] (x_center, y_center, w, h)
true_boxes_temp = y_true[j][i][..., 0:4]
# [13, 13, 3]
# object_mask = true_probs_temp.sum(axis=-1) > 0
# object_mask = y_true[j][i][..., 4] == 1.
object_mask = y_true[j][i][..., 4] == 1.
# [V, 80] V: Ground truth number of the current image
true_probs_temp = true_probs_temp[object_mask]
# [V, 4]
true_boxes_temp = true_boxes_temp[object_mask]
# [V], labels, each from 0 to 79
true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()
# [V, 4] (x_center, y_center, w, h)
true_boxes_list += true_boxes_temp.tolist()
if len(true_labels_list) != 0:
for cls, count in Counter(true_labels_list).items():
true_labels_dict[cls] += count
# print('true_labels_list.len: %d' % len(true_labels_list))
# print('true_boxes_list.len: %d' % len(true_boxes_list))
# [V, 4] (xmin, ymin, xmax, ymax)
true_boxes = np.array(true_boxes_list)
box_centers, box_sizes = true_boxes[:, 0:2], true_boxes[:, 2:4]
true_boxes[:, 0:2] = box_centers - box_sizes / 2.
true_boxes[:, 2:4] = true_boxes[:, 0:2] + box_sizes
# [1, xxx, 4]
pred_boxes = y_pred[0][i:i + 1]
pred_confs = y_pred[1][i:i + 1]
pred_probs = y_pred[2][i:i + 1]
# pred_boxes: [N, 4]
# pred_confs: [N]
# pred_labels: [N]
# N: Detected box number of the current image
pred_boxes, pred_confs, pred_labels = sess.run(gpu_nms_op,
feed_dict={pred_boxes_flag: pred_boxes,
pred_scores_flag: pred_confs * pred_probs})
# len: N
pred_labels_list = [] if pred_labels is None else pred_labels.tolist()
if pred_labels_list == []:
continue
# calc iou
# [N, V]
iou_matrix = calc_iou(pred_boxes, true_boxes)
# [N]
max_iou_idx = np.argmax(iou_matrix, axis=-1)
correct_idx = []
correct_conf = []
for k in range(max_iou_idx.shape[0]):
pred_labels_dict[pred_labels_list[k]] += 1
match_idx = max_iou_idx[k] # V level
if iou_matrix[k, match_idx] > iou_thresh and true_labels_list[match_idx] == pred_labels_list[k]:
if match_idx not in correct_idx:
correct_idx.append(match_idx)
correct_conf.append(pred_confs[k])
else:
same_idx = correct_idx.index(match_idx)
if pred_confs[k] > correct_conf[same_idx]:
correct_idx.pop(same_idx)
correct_conf.pop(same_idx)
correct_idx.append(match_idx)
correct_conf.append(pred_confs[k])
for t in correct_idx:
true_positive_dict[true_labels_list[t]] += 1
if calc_now:
# avoid divided by 0
recall = sum(list(true_positive_dict.values())) / (sum(list(true_labels_dict.values())) + 1e-6)
person_recall = sum(list(true_positive_dict.values())[0:1]) / (sum(list(true_labels_dict.values())[0:1]) + 1e-6)
bag_recall = sum(list(true_positive_dict.values())[1:]) / (sum(list(true_labels_dict.values())[1:]) + 1e-6)
precision = sum(list(true_positive_dict.values())) / (sum(list(pred_labels_dict.values())) + 1e-6)
person_precision = sum(list(true_positive_dict.values())[0:1]) / (sum(list(pred_labels_dict.values())[0:1]) + 1e-6)
bag_precision = sum(list(true_positive_dict.values())[1:]) / (sum(list(pred_labels_dict.values())[1:]) + 1e-6)
return recall, precision, person_recall, person_precision, bag_recall, bag_precision
else:
return true_positive_dict, true_labels_dict, pred_labels_dict
def get_preds_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, image_ids, y_pred):
'''
Given the y_pred of an input image, get the predicted bbox and label info.
return:
pred_content: 2d list.
'''
image_id = image_ids[0]
# keep the first dimension 1
pred_boxes = y_pred[0][0:1]
pred_confs = y_pred[1][0:1]
pred_probs = y_pred[2][0:1]
boxes, scores, labels = sess.run(gpu_nms_op,
feed_dict={pred_boxes_flag: pred_boxes,
pred_scores_flag: pred_confs * pred_probs})
pred_content = []
for i in range(len(labels)):
x_min, y_min, x_max, y_max = boxes[i]
score = scores[i]
label = labels[i]
pred_content.append([image_id, x_min, y_min, x_max, y_max, score, label])
return pred_content
def get_2cls_preds_gpu(sess, gpu_nms_op, pred_boxes_flag, pred_scores_flag, image_ids, y_pred):
'''
Given the y_pred of an input image, get the predicted bbox and label info.
return:
pred_content: 2d list.
'''
image_id = image_ids[0]
# keep the first dimension 1
pred_boxes = y_pred[0][0:1]
pred_confs = y_pred[1][0:1]
pred_probs = y_pred[2][0:1]
pred_scores_4 = pred_confs * pred_probs
pred_scores_shape = list(pred_scores_4.shape)
pred_scores_shape[-1] = 2
pred_scores_2 = np.zeros(pred_scores_shape)
pred_scores_2[..., 0] = pred_scores_4[..., 0]
pred_scores_2[..., 1:] = np.max(pred_scores_4[..., 1:], axis=2, keepdims=True)
boxes, scores, labels = sess.run(gpu_nms_op,
feed_dict={pred_boxes_flag: pred_boxes,
pred_scores_flag: pred_scores_2})
pred_content = []
for i in range(len(labels)):
x_min, y_min, x_max, y_max = boxes[i]
score = scores[i]
label = labels[i]
pred_content.append([image_id, x_min, y_min, x_max, y_max, score, label])
return pred_content
gt_dict = {} # key: img_id, value: gt object list
def parse_gt_rec(gt_filename, resize_img_size):
'''
parse and re-organize the gt info.
return:
gt_dict: dict. Each key is a img_id, the value is the gt bboxes in the corresponding img.
'''
global gt_dict
if not gt_dict:
resize_w, resize_h = resize_img_size
with open(gt_filename, 'r') as f:
for line in f:
img_id, pic_path, boxes, labels = parse_line(line)
ori_img_size = cv2.imread(pic_path).shape
ori_w, ori_h = ori_img_size[1], ori_img_size[0]
objects = []
for i in range(len(labels)):
x_min, y_min, x_max, y_max = boxes[i]
label = labels[i]
objects.append([x_min * resize_w / ori_w,
y_min * resize_h / ori_h,
x_max * resize_w / ori_w,
y_max * resize_h / ori_h,
label])
gt_dict[img_id] = objects
return gt_dict
# The following two function are modified from FAIR's Detectron repo to calculate mAP:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/voc_eval.py
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(gt_dict, val_preds, classidx, iou_thres=0.5, use_07_metric=False):
'''
Top level function that does the PASCAL VOC evaluation.
'''
# 1.obtain gt: extract all gt objects for this class
class_recs = {}
npos = 0
for img_id in gt_dict:
R = [obj for obj in gt_dict[img_id] if obj[-1] == classidx]
bbox = np.array([x[:4] for x in R])
det = [False] * len(R)
npos += len(R)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# 2. obtain pred results
pred = [x for x in val_preds if x[-1] == classidx]
img_ids = [x[0] for x in pred]
confidence = np.array([x[-2] for x in pred])
BB = np.array([[x[1], x[2], x[3], x[4]] for x in pred])
# 3. sort by confidence
sorted_ind = np.argsort(-confidence)
try:
BB = BB[sorted_ind, :]
except:
print('no box, ignore')
return 1e-6, 1e-6, 0, 0, 0
img_ids = [img_ids[x] for x in sorted_ind]
# 4. mark TPs and FPs
nd = len(img_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
tf_flag = [0] * nd
for d in range(nd):
# all the gt info in some image
R = class_recs[img_ids[d]]
bb = BB[d, :]
ovmax = -np.Inf
BBGT = R['bbox']
if BBGT.size > 0:
# calc iou
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * (
BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > iou_thres:
# gt not matched yet
if not R['det'][jmax]:
tp[d] = 1.
tf_flag[d] = 1
R['det'][jmax] = 1
else:
fp[d] = 1.
tf_flag[d] = 0
else:
fp[d] = 1.
tf_flag[d] = 0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
# return rec, prec, ap
return npos, nd, tp[-1] / float(npos), tp[-1] / float(nd), ap
def rpc_eval(gt_dict, val_preds, classidx, iou_thres=0.5, use_07_metric=False):
'''
Top level function that does the PASCAL VOC evaluation.
'''
# 1.obtain gt: extract all gt objects for this class
class_recs = {}
npos = 0
for img_id in gt_dict:
R = [obj for obj in gt_dict[img_id] if obj[-1] == classidx]
bbox = np.array([x[:4] for x in R])
det = [False] * len(R)
npos += len(R)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# 2. obtain pred results
pred = [x for x in val_preds if x[-1] == classidx]
img_ids = [x[0] for x in pred]
confidence = np.array([x[-2] for x in pred])
BB = np.array([[x[1], x[2], x[3], x[4]] for x in pred])
# 3. sort by confidence
sorted_ind = np.argsort(-confidence)
try:
BB = BB[sorted_ind, :]
except:
print('no box, ignore')
return 1e-6, 1e-6, 0, 0, 0
img_ids = [img_ids[x] for x in sorted_ind]
# 4. mark TPs and FPs
nd = len(img_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
tf_flag = [0] * nd
for d in range(nd):
# all the gt info in some image
R = class_recs[img_ids[d]]
bb = BB[d, :]
ovmax = -np.Inf
BBGT = R['bbox']
if BBGT.size > 0:
# calc iou
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * (
BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
sort_d = sorted_ind[d]
if ovmax > iou_thres:
# gt not matched yet
if not R['det'][jmax]:
tp[d] = 1.
tf_flag[sort_d] = 1
R['det'][jmax] = 1
else:
fp[d] = 1.
tf_flag[sort_d] = 0
else:
fp[d] = 1.
tf_flag[sort_d] = 0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
# return rec, prec, ap
return npos, nd, tp[-1] / float(npos), tp[-1] / float(nd), ap, tf_flag
def eval_mixarray(gt_dict, val_preds, cls_num, iou_thres=0.5, use_07_metric=False):
'''
Top level function that does the PASCAL VOC evaluation.
'''
# gt, pred
mix_arr = np.zeros([cls_num, cls_num+1])
classidx = 1
# 1.obtain gt: extract all gt objects for this class
class_recs = {}
npos = 0
for img_id in gt_dict:
R = [obj for obj in gt_dict[img_id] if obj[-1] == classidx]
bbox = np.array([x[:4] for x in R])
det = [False] * len(R)
npos += len(R)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# 2. obtain pred results
pred = [x for x in val_preds if x[-1] == classidx]
img_ids = [x[0] for x in pred]
confidence = np.array([x[-2] for x in pred])
BB = np.array([[x[1], x[2], x[3], x[4]] for x in pred])
# 3. sort by confidence
sorted_ind = np.argsort(-confidence)
try:
BB = BB[sorted_ind, :]
except:
print('no box, ignore')
return 1e-6, 1e-6, 0, 0, 0
img_ids = [img_ids[x] for x in sorted_ind]
# 4. mark TPs and FPs
nd = len(img_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
# all the gt info in some image
R = class_recs[img_ids[d]]
bb = BB[d, :]
ovmax = -np.Inf
BBGT = R['bbox']
if BBGT.size > 0:
# calc iou
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * (
BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > iou_thres:
# gt not matched yet
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
# return rec, prec, ap
return npos, nd, tp[-1] / float(npos), tp[-1] / float(nd), ap
| [
"935535775@qq.com"
] | 935535775@qq.com |
9265005a0452ad623d02fb337d2ac88baa665b6e | 99d82523bfcda19bcb6a3b436d5107f9b05ff5b5 | /UI-Script/led_on.py | 8bf27e6fbd526f80113f5b38af32838eff1160ed | [] | no_license | svineyar/RCTSpring2017 | 3d2fd45438d1dd589d6fec799763ed43f36ebbf5 | d07b0d947ea248a6e8ba60e85f407f8a561e101d | refs/heads/master | 2021-01-20T04:08:02.653114 | 2017-06-21T04:51:39 | 2017-06-21T04:51:39 | 89,638,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | import mraa
import time
import sys
def main():
led_num = sys.argv[1]
# Use LEDs GPIO 12, 14, 16, 18, 20
led = mraa.Gpio(led_num)
# Set LED to be output
led.dir(mraa.DIR_OUT)
# Write 1 to the LED
led.write(1)
if __name__=="__main__":
main()
| [
"thomasan1995@gmail.com"
] | thomasan1995@gmail.com |
cc74c4b48d01311de9524512faa2e7208b92e29d | 0d697ce08c9ffc91940e7556e1788b10e069d0b1 | /cli/cli.py | 3f278854fc0dfe4d987ed44d30a39c7a06212bc4 | [] | no_license | AlexandreProenca/load-testing | 9a96b3ad1b111d654893c4f29f0f5ae5668e1ea3 | 0fe5e3d7af5613c14ced5d0e2409f816190d1b5e | refs/heads/master | 2021-01-13T00:49:27.643694 | 2016-02-02T00:45:08 | 2016-02-02T00:45:08 | 50,864,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ws import PyWebSocket
import os
class MyApp():
ws = []
def __init__(self):
MyApp.ws.append(PyWebSocket(self.callback))
def callback(self, info):
print 'info:', info
if __name__ == '__main__':
for i in range(200):
app = MyApp()
while 1:
a = raw_input('>')
if a.upper() in ['EXIT', 'QUIT', 'SAIR', 'END']:
print 'bye'
os._exit(0)
| [
"alexandre.proenca@socialbase.com.br"
] | alexandre.proenca@socialbase.com.br |
e618d228ef200153331639dc5141bf389bbdb341 | ea04e29123cdbe5f49be236ee1df8397da25b330 | /students/migrations/0019_student.py | fee26a235fed55750b1bf338835c5ab5f8bebced | [
"MIT"
] | permissive | Davy-71993/MySchool | bf187673e4796010dd8fd4bcbf27b0b87199794d | fa02c8ec19d71873fc0d714cf652d8ad05f2f0e7 | refs/heads/master | 2022-05-28T23:24:06.865580 | 2020-05-04T19:03:04 | 2020-05-04T19:03:04 | 261,251,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | # Generated by Django 2.2.6 on 2020-04-06 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0018_paper'),
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sir_name', models.CharField(max_length=50)),
('given_name', models.CharField(max_length=50)),
('other_names', models.CharField(blank=True, max_length=50, null=True)),
('sex', models.CharField(choices=[('Female', 'Female'), ('Male', 'Male')], max_length=8)),
('passport_photo', models.ImageField(blank=True, null=True, upload_to='passports')),
('klass', models.CharField(choices=[('S.1', 'S.1'), ('S.2', 'S.2'), ('S.3', 'S.3'), ('S.4', 'S.4'), ('S.5', 'S.5'), ('S.6', 'S.6')], max_length=5)),
('stream', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('S', 'S')], max_length=10)),
('house', models.CharField(choices=[('Australia', 'Australia'), ('Brazil', 'Brazil'), ('Canada', 'Canada'), ('France', 'France'), ('Japan', 'Japan'), ('Nigeria', 'Nigeria')], default='Australia', max_length=10)),
('subjects', models.ManyToManyField(default=(), to='students.Subject')),
],
),
]
| [
"59330475+Davy-71993@users.noreply.github.com"
] | 59330475+Davy-71993@users.noreply.github.com |
56972268082116f26894acdd2d6158648fb6456c | b6c08c60c6e1c79ef14387e94dc0413159177c05 | /grp_expense/report/report_carta_aceptacion.py | 07c9a020fff6ce1c83d830a02ac740a5c6f2202d | [] | no_license | juradojose/capacitcion | 8c4484aaf538d3e5ff45f0c2ca8e6d2a97baa4ac | 04f1f085ed0e753bb6d2c589ce490f9dbcab0446 | refs/heads/master | 2022-12-25T09:01:16.116701 | 2020-10-09T15:46:36 | 2020-10-09T15:46:36 | 296,679,168 | 0 | 0 | null | 2020-09-21T23:10:37 | 2020-09-18T16:50:06 | Python | UTF-8 | Python | false | false | 1,809 | py | import time
import locale
from datetime import datetime, timedelta
from odoo import fields, api, models
from dateutil.parser import parse
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class cls_report_carta_aceptacion(models.AbstractModel):
_name = 'report.grp_expense.report_carta_aceptacion'
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env['expense.payment'].browse(docids)
locale.setlocale(locale.LC_TIME, "es_MX.UTF-8")
date = fields.Datetime.context_timestamp(
self.with_context(tz='America/Mexico_City'), datetime.strptime(str(docs.date), '%Y-%m-%d')+timedelta(days=1)).strftime('%d de %B de %Y')
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'docs': docs,
'time': time,
'date_today': date,
}
return self.env['report'].render('grp_expense.report_carta_aceptacion', docargs)
class cls_report_recibo(models.AbstractModel):
_name = 'report.grp_expense.report_recibo'
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env['expense.payment'].browse(docids)
locale.setlocale(locale.LC_TIME, "es_MX.UTF-8")
date = fields.Datetime.context_timestamp(
self.with_context(tz='America/Mexico_City'), datetime.now()).strftime('%d de %B de %Y')
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'docs': docs,
'time': time,
'date_today': date,
}
return self.env['report'].render('grp_expense.report_recibo', docargs)
| [
"noreply@github.com"
] | noreply@github.com |
8563b4bb9db8df61494d434689fd85b797cdb33a | 7020af20eed65ec6619ca91967a7ed4299304357 | /neuro-fuzzy-master/PS_1/problem7.py | 3635a82a287a775956de2321ccc00de160fa013b | [] | no_license | kostastsiv/neuro-fuzzy | ae27193477da3670017e0c5c4d8648e5fcde4d64 | ad482d3e2185533201f31cb5515953292f2515cd | refs/heads/main | 2023-03-07T04:34:59.926535 | 2021-02-19T15:27:13 | 2021-02-19T15:27:13 | 340,409,019 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | import matplotlib.pyplot as plt
import numpy as np
import scipy.special as sp
#pylint: disable=no-member
def main():
input_vector_p = np.arange(-3, 3, 0.2)
weight_vector_sigmoid_layer = (-0.27, -0.41)
weight_vector_lin_layer = (0.09, -0.17)
bias_sigmoid_layer = (-0.48, -0.13)
bias_lin_layer = 0.48
net_val_sigmoid_layer = np.zeros((2, len(input_vector_p)))
net_val_lin_layer = np.zeros(len(input_vector_p))
output_sigmoid_layer = np.zeros((2, len(input_vector_p)))
output_lin_layer = np.zeros(len(input_vector_p))
############## --INIT-- ##############
for i in range(len(input_vector_p)):
# Calculate net output (sigmoid)
net_val_sigmoid_layer[0][i] = input_vector_p[i]*weight_vector_sigmoid_layer[0] + bias_sigmoid_layer[0]
net_val_sigmoid_layer[1][i] = input_vector_p[i]*weight_vector_sigmoid_layer[1] + bias_sigmoid_layer[1]
# Calculate sigmoid layer output
output_sigmoid_layer[0][i] = sp.expit(net_val_sigmoid_layer[0][i])
output_sigmoid_layer[1][i] = sp.expit(net_val_sigmoid_layer[1][i])
#Calculate net output (linear)
net_val_lin_layer[i] = output_sigmoid_layer[0][i]*weight_vector_lin_layer[0] + output_sigmoid_layer[1][i]*weight_vector_lin_layer[1] + bias_lin_layer
#Calculate linear layer output
output_lin_layer[i] = net_val_lin_layer[i]
#Print sigm_net_1
plt.figure('LOGSIG[0]')
plt.plot(input_vector_p, net_val_sigmoid_layer[0], 'r', input_vector_p, output_sigmoid_layer[0], 'b')
plt.xlabel('Input values')
plt.ylabel('Logsig layer net output, value 1')
plt.grid()
plt.axhline(linewidth='0.5', color='r')
plt.axvline(linewidth='0.5', color='r')
#Print sigm_net_2
plt.figure('LOGSIG[1]')
plt.plot(input_vector_p, net_val_sigmoid_layer[1], 'r--', input_vector_p, output_sigmoid_layer[1], 'b--')
plt.xlabel('Input values')
plt.ylabel('Logsig layer net output, value 2')
plt.grid()
plt.axhline(linewidth='0.5', color='r')
plt.axvline(linewidth='0.5', color='r')
#Print lin_net
plt.figure('PURELIN')
plt.plot(input_vector_p, net_val_lin_layer, 'g', input_vector_p, output_lin_layer, 'k')
plt.xlabel('Input values')
plt.ylabel('Purelin layer net output')
plt.axhline(linewidth='0.5', color='r')
plt.axvline(linewidth='0.5', color='r')
plt.grid()
plt.show()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
b18d215da6eb6379225c33e52983791c0f1ab897 | a3d0af5a106504622904a0db3bb51ea59fea0034 | /main.py | 70cd89d91d88bca0902ae5a81d3c2517b1addaf6 | [] | no_license | rayyanshaik2022/Car-Racing-NN | 3e4bdd4d738749f946b6964d040fcc6e23be6908 | f699cda324c4d38dd83a41ac68f3747ba205c28f | refs/heads/main | 2023-01-01T06:17:45.215481 | 2020-10-24T20:46:19 | 2020-10-24T20:46:19 | 306,015,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import math
import pickle
import random
from gui import Gui
from network import *
from settings import *
if not LOAD_DATA:
pop = Population(POP_SIZE, GENERATIONS, LIFESPAN, MUTATION_CHANCE, MUTATION_RATE, network_type=Genetic)
else:
with open("Networks/"+LOAD_FILE, 'rb') as f:
nets = pickle.load(f)
pop = Population(POP_SIZE, GENERATIONS, LIFESPAN, MUTATION_CHANCE, MUTATION_RATE, network_type=Genetic)
pop.population = nets
# create the game object
g = Gui(pop)
g.new()
g.run()
| [
"56854894+rayyanshaik2022@users.noreply.github.com"
] | 56854894+rayyanshaik2022@users.noreply.github.com |
e0190ff61f2aabbb5624403696f355f6c20c9987 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_unifies.py | 6a571fbebcd0b1f1afa793ee61d08959b4764e46 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._unify import _UNIFY
#calss header
class _UNIFIES(_UNIFY, ):
def __init__(self,):
_UNIFY.__init__(self)
self.name = "UNIFIES"
self.specie = 'verbs'
self.basic = "unify"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4249804af9bf5efd18650847da5379335d1544ac | 211ce0bb59a2a9d10ff0854f4ad66674acb9da2a | /core/apps/articul/migrations/0003_auto_20210309_1242.py | 4747b437fbb41c625b38ab42974500fd89f85d29 | [] | no_license | azhalkov/selo3 | 5498491a20ae564ddb35486a39238fc510e3ad12 | b8509dd561f2eaa5ff22ed8fe32a8c20553e98c2 | refs/heads/main | 2023-03-15T14:12:30.075119 | 2021-03-19T08:42:40 | 2021-03-19T08:42:40 | 342,849,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Generated by Django 3.1 on 2021-03-09 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articul', '0002_auto_20210308_0804'),
]
operations = [
migrations.AlterModelOptions(
name='articul',
options={'ordering': ['-artikuls'], 'verbose_name': 'Артикул', 'verbose_name_plural': 'Артикулы'},
),
migrations.AddField(
model_name='articul',
name='is_oplata',
field=models.BooleanField(default=False, verbose_name='Оплачено'),
),
migrations.AddField(
model_name='articul',
name='zametki',
field=models.TextField(blank=True, max_length=2000, verbose_name='Для заметок'),
),
]
| [
"azhalkov@yandex.ru"
] | azhalkov@yandex.ru |
296672059945b97bbd869f0d3b96a94d7cf409de | c5d00c27e86e7a9b35bcc94a0dd3e222c20e8017 | /fishy/libs/__init__.py | d4a018d2705e3ee47e24753a8dc05a09ae134e85 | [
"MIT"
] | permissive | realPrimoh/fishyboteso | 2a4abbd2a93ef6110ee87688695a8ee4b38f16aa | f2b2873b207cf8daaef30dfd39d21599719abb9f | refs/heads/master | 2022-11-11T08:41:56.987394 | 2020-06-23T03:06:29 | 2020-06-23T03:06:29 | 274,293,446 | 0 | 0 | MIT | 2020-06-23T02:51:30 | 2020-06-23T02:51:29 | null | UTF-8 | Python | false | false | 25 | py | from . import tkhtmlview
| [
"adsau59@gmail.com"
] | adsau59@gmail.com |
35ba72ae9ef6093c9a88df2e881fb109352be381 | 30455451f8f4dbdaf653e7150201e89ed6c6e609 | /pushgateway/gateway.py | 55ccaaccea79bd2edb099ed772312bf9a9e15e7d | [
"Apache-2.0"
] | permissive | grro/OpenhabWebThingPushGateway | d5a1dbceeae8cc50a752ed81a78d3ea600fd7a6e | e96abd556d96c3d5e2752843b301d930c5aabc7b | refs/heads/master | 2023-01-02T07:00:29.262853 | 2020-10-21T18:58:14 | 2020-10-21T18:58:14 | 300,156,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,627 | py | from pushgateway.config import load_config
from pushgateway.webthing import WebthingProperty
from pushgateway.openhab import OpenhabItem
import time
import threading
import logging
class WebThingPropertyToOpenhabItemLink:
def __init__(self, webthing_property: WebthingProperty, openhab_item: OpenhabItem):
self.webthing_property = webthing_property
self.openhab_item = openhab_item
self.cached_value = None
def start(self):
threading.Thread(target=self.__listen).start()
def __listen(self):
while True:
try:
# listen for values to forwards
stream = self.webthing_property.new_change_listener(self.__on_changed_callback)
stream.start()
time.sleep(10 * 60)
stream.stop()
except Exception as e:
logging.error("error occurred for webthing " + self.webthing_property.metadata.name + ": "+ str(e))
time.sleep(10)
def __on_changed_callback(self, new_value):
item_value = self.__convert(new_value)
if self.cached_value != item_value:
self.cached_value = item_value
self.openhab_item.state = item_value
def __convert(self, property_value):
if self.webthing_property.metadata.type == 'boolean':
if property_value:
return "ON"
else:
return "OFF"
else:
return property_value
class OpenhabItemToWebThingPropertyLink:
def __init__(self, webthing_property: WebthingProperty, openhab_item: OpenhabItem):
self.webthing_property = webthing_property
self.openhab_item = openhab_item
self.cached_value = None
def start(self):
threading.Thread(target=self.__listen).start()
def __listen(self):
while True:
try:
# listen for values to forwards
stream = self.openhab_item.new_change_listener(self.__on_changed_callback)
stream.start()
time.sleep(10 * 60)
stream.stop()
except Exception as e:
logging.error("error occurred for openhab " + self.webthing_property.metadata.name + ": "+ str(e))
time.sleep(10)
def __on_changed_callback(self, new_value):
property_value = self.__convert(new_value)
if self.cached_value != property_value:
self.cached_value = property_value
self.webthing_property.property = property_value
def __convert(self, value):
source_type = self.openhab_item.metadata.type
target_type = self.webthing_property.metadata.type
if source_type == 'switch':
return value == 'ON'
elif target_type == 'number':
return float(value)
elif target_type == 'integer':
return int(value)
else:
return value
class Link:
def __init__(self, webthing_uri: str, webthing_property: str, openhab_uri: str, itemname: str):
self.webthing_property = WebthingProperty(webthing_uri, webthing_property)
self.openhab_item = OpenhabItem(openhab_uri, itemname)
if self.webthing_property.metadata.readonly:
logging.info("Backward link openhab item " + self.openhab_item.metadata.name + " <- webthing property " + self.webthing_property.metadata.name + " created")
else:
logging.info("Forward Link openhab item " + self.openhab_item.metadata.name + " -> webthing property " + self.webthing_property.metadata.name + " created")
def start(self):
threading.Thread(target=self.__listen).start()
def __listen(self):
if self.webthing_property.metadata.readonly:
webthing_to_openhab_link = WebThingPropertyToOpenhabItemLink(self.webthing_property, self.openhab_item)
webthing_to_openhab_link.start()
else:
openhab_to_webthing_link = OpenhabItemToWebThingPropertyLink(self.webthing_property, self.openhab_item)
openhab_to_webthing_link.start()
while True:
time.sleep(10)
def run(filename: str):
configs = load_config(filename)
logging.info("config file " + filename + " loaded. " + str(len(configs)) + " entries found")
for config in configs:
try:
Link(config.webthing_root_uri, config.webthing_property_name, config.openhab_root_uri, config.openhab_item_name).start()
except Exception as e:
logging.error("error occurredbe seting up link", e);
while True:
time.sleep(60)
| [
"gregor.roth@1und1.de"
] | gregor.roth@1und1.de |
07e0c92817221caedd1c94d8b47f082328fdf89a | d0dc4116f93913522a862b51c2427c4ab6209389 | /python/class_student.py | 88b2e4ce0cf8a6d64e6a665641d05da0dfa1d530 | [] | no_license | HeZhang01/Room | e1f8ee4b6b130aac34120c29a5345f9483b4f75c | 0638453cbfe63e2559a8f2285d58eb3852de4a83 | refs/heads/master | 2022-01-12T20:14:12.178307 | 2019-05-07T13:27:34 | 2019-05-07T13:27:34 | 66,245,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import types
"""
一个student类
"""
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print('%s: %s' % (self.name, self.score))
# bart = Student('Bart Simpson', 59)
# lisa = Student('Lisa Simpson', 87)
# bart.print_score()
# lisa.print_score()
"""
访问限制
"""
class Student2(object):
def __init__(self, name, score):
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def set_score(self, score):
# TODO score有效性验证
self.__score = score
bart2 = Student2('Bart Simpson', 98)
bart2.print_score()
bart2.set_score(95)
bart2.print_score()
#bart2.__name #非法访问
# print(bart2._Student2__name) #强制访问私有变量
| [
"hezhang1001@foxmail.com"
] | hezhang1001@foxmail.com |
04c0313f3d285053ebea5dbc384c5c1413960fe8 | dfdc5ac9b49e9fdf1732ccf9e7072b817ddfa3c1 | /Ej1b_copia.py | e3c27549b2f50f00fda71bb658d5c8781daec681 | [] | no_license | ccerrudo/HO-python | 58933a35352e06cbccd9ea4c8affcb434c197576 | 085b40a90cae515735da206dff67f00b3ab025a1 | refs/heads/master | 2021-05-07T05:31:04.121542 | 2017-11-21T20:20:56 | 2017-11-21T20:20:56 | 111,551,611 | 0 | 0 | null | 2017-11-21T13:20:26 | 2017-11-21T13:20:26 | null | UTF-8 | Python | false | false | 387 | py | def fib(n):
if n <= 2:
return n
else:
return (fib(n - 2) + fib(n - 1))
print ([fib(i) for i in range(100)])
#a= [fib(i) for i in range(100)]
#print ("imprimo a: ",a)
'''
def imp(n):
if n / 3 == 0:
return n
else:
return 0
print ([imp(i) for i in a])
b= [imp(i) for i in a]
print (b)
sum (b)
print (sum(b))
'''
| [
"noreply@github.com"
] | noreply@github.com |
c01984844afa50913893ef063078a3dea5271a17 | 72ab43b29e9fbc453dc07503d98beca5c213ef38 | /mp/task.py | 1bfe275c7d18ada1ccc87217afdcb620aa06565f | [] | no_license | spinaBelaya/python-labs_ITMO | 3cef6def2ceff1e10b4b204af55ac66957bf1c08 | 69577c0bade134208149c3f3f1f3234dc25234f0 | refs/heads/master | 2022-12-20T01:07:43.437362 | 2019-06-01T20:55:07 | 2019-06-01T20:55:07 | 153,694,668 | 0 | 1 | null | 2022-08-23T17:28:44 | 2018-10-18T22:23:02 | Python | UTF-8 | Python | false | false | 377 | py | import random
from queue import Queue
def sorter(*args, **kwargs):
array = [random.randint(0, 100) for _ in range(1000000)]
array.sort()
def generate_data(len_q = 300, len_list = 700):
q = Queue()
for _ in range(len_q):
array = [random.randint(0, 1) for _ in range(len_list)]
q.put(array)
return q
| [
"noreply@github.com"
] | noreply@github.com |
ce83711024455db4eb8de12534096bbaf80499c2 | c54ca055363bd7fd097e0b28a76fd16f487fa8b2 | /diyblog/clock.py | 99dae68be5f15d962afeda5d8e3869339679b04c | [
"MIT"
] | permissive | lair60/diyblog | af1025dd3af4789e6c3211cd5267087bdd1712dc | 18a35a120a3546fba12b42c61b25f6f545d9d191 | refs/heads/main | 2023-05-25T17:45:45.435036 | 2023-05-18T10:32:07 | 2023-05-18T10:32:07 | 335,326,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | import os
import django
from rq import Queue
import redis
initialized = False
"""
@sched.scheduled_job('cron', day_of_week='mon-fri', hour=17)
def scheduled_job():
print('This job is run every weekday at 5pm.')
"""
if __name__ == '__main__':
from utils import removeLinks
from apscheduler.schedulers.blocking import BlockingScheduler
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'diyblog.settings')
django.setup()
q = Queue(connection=conn)
sched = BlockingScheduler()
@sched.scheduled_job('interval', minutes=1, max_instances=1)
def timed_job():
result = q.enqueue(removeLinks)
print('This job1 is run every 1 minute.')
sched.start()
else:
from diyblog.utils import removeLinks
from apscheduler.schedulers.background import BackgroundScheduler
redis_url = os.environ.get('REDISTOGO_URL', 'redis://red-chd69c2k728tp9f132cg:6379')
conn = redis.from_url(redis_url)
q = Queue(connection=conn)
sched = BackgroundScheduler()
if initialized == False:
initialized = True
@sched.scheduled_job('interval', minutes=1, max_instances=1)
def timed_job():
#print('id clock: '+str(os.getpid()))
result = q.enqueue(removeLinks)
#print('This job2 is run every 1 minute.')
def start_jobs():
sched.start() | [
"lair60@yahoo.es"
] | lair60@yahoo.es |
6a75e330e912ea0b671c833906e68358aec70daf | 1cc54e191b9d6e4ea2a469b92da0f3ac8ccd84b0 | /tasks/post_session_check_sync_pulses.py | 435b760d75571b766963aac6ffca53126fef9006 | [
"MIT"
] | permissive | alejandropan/iblrig | 027c090dbe54b6ef2cbbf22c16ad60eb040ee949 | d8e746ccc52c2ad325404077ad2403e165e94d0c | refs/heads/master | 2020-04-28T11:45:36.182150 | 2019-06-12T01:38:06 | 2019-06-12T01:38:06 | 175,253,494 | 0 | 0 | MIT | 2019-05-28T01:34:28 | 2019-03-12T16:25:04 | Python | UTF-8 | Python | false | false | 2,085 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, February 21st 2019, 7:13:37 pm
from pathlib import Path
import ibllib.io.raw_data_loaders as raw
import matplotlib.pyplot as plt
import numpy as np
import sys
def get_port_events(events: dict, name: str = '') -> list:
out: list = []
for k in events:
if name in k:
out.extend(events[k])
out = sorted(out)
return out
if __name__ == '__main__':
if len(sys.argv) == 1:
print("I need a file name...")
session_data_file = Path(sys.argv[1])
if not session_data_file.exists():
raise (FileNotFoundError)
if session_data_file.name.endswith('.jsonable'):
data = raw.load_data(session_data_file.parent.parent)
else:
try:
data = raw.load_data(session_data_file)
except Exception:
print('Not a file or a valid session folder')
unsynced_trial_count = 0
frame2ttl = []
sound = []
camera = []
trial_end = []
for trial_data in data:
tevents = trial_data['behavior_data']['Events timestamps']
ev_bnc1 = get_port_events(tevents, name='BNC1')
ev_bnc2 = get_port_events(tevents, name='BNC2')
ev_port1 = get_port_events(tevents, name='Port1')
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
unsynced_trial_count += 1
frame2ttl.extend(ev_bnc1)
sound.extend(ev_bnc2)
camera.extend(ev_port1)
trial_end.append(trial_data['behavior_data']['Trial end timestamp'])
print(f'Found {unsynced_trial_count} trials with bad sync data')
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.plot(camera, np.ones(len(camera)) * 1, '|')
ax.plot(sound, np.ones(len(sound)) * 2, '|')
ax.plot(frame2ttl, np.ones(len(frame2ttl)) * 3, '|')
[ax.axvline(t, alpha=0.5) for t in trial_end]
ax.set_ylim([0, 4])
ax.set_yticks(range(4))
ax.set_yticklabels(['', 'camera', 'sound', 'frame2ttl'])
plt.show()
| [
"nbonacchi@gmail.com"
] | nbonacchi@gmail.com |
379a52ae1a2e6f3eee47c55a2628a991c82ac961 | b318f75d8b3c5a569728152f7fb4aa9d9ea74008 | /ass12.py | eb10344a2ef6e90739acd45a4886e03265f829ce | [] | no_license | anishshetty17/pthon- | eca59570bac551307975e3508d40997ed99abb0d | ec1358dc9560d4ea6f00796d267789b1d521c052 | refs/heads/master | 2020-12-31T07:41:26.078672 | 2017-03-29T04:37:47 | 2017-03-29T04:37:47 | 86,538,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #CENTROID OF A TRIANGLE
print('Assume the three sides(coordinates) of a triangle to be A(p,q), B(r,s) & C(t,u)')
p = int(input('p : '))
q = int(input('q : '))
print('The coordinates of side A is ' , (p,q))
r = int(input('r : '))
s = int(input('s : '))
print('The coordinates of side B is ' , (r,s))
t = int(input('t : '))
u = int(input('u : '))
print('The coordinates of side C is' , (t,u))
print('Let the centroid be having vertices and coordinates as X(v,w)')
v = (p + r + t) / 3
w = (q + s + u) / 3
print('The centroid of triangle is ' , (v,w)) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.