seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
25062222468 | # word doc binay search
def read_words (document):
# read in the article file
with open(document) as doc:
words = []
# loop through the article line by line
for line in doc :
words.append(line.strip('\n'))
return words
def binary_search_word(value):
my_list = read_words ("wordlist.txt")
print (my_list)
my_dict = {}
# create dictionary in one line, switch the position of key and value
my_dict = dict((value,key) for key,value in enumerate(my_list))
print (my_dict)
new_list = my_list
while len(new_list)>=0:
middle = int(len(new_list)/2)
if (len(new_list))>0:
if value > new_list[middle]:
# cut in half to the right
new_list = new_list[middle+1:]
elif value < new_list[middle]:
# cut in half to the left
new_list = new_list[:middle]
elif value == new_list[middle]:
print ("You found the value", new_list[middle], "in your array")
# now you have the value just need to look up its key in the dict
value_idex = my_dict[new_list[middle]]
print ("Your index position of that value is ", value_idex)
return True
else:
print ("The value is not in the array")
return False
##sample dataset
print (binary_search_word("hello"))
print (" ")
# regulare binary search
def binary_search(my_list, value):
sorted = check_sort(my_list)
my_dict = {}
counter = 0
# create dictionary in one line, switch the position of key and value
my_dict = dict((value,key) for key,value in enumerate(my_list))
# print (my_dict)
# for item in my_list:
# my_dict[item] = counter
# counter += 1
if sorted == True:
new_list = my_list
while len(new_list)>=0:
middle = int(len(new_list)/2)
if (len(new_list))>0:
if value > new_list[middle]:
# cut in half to the right
new_list = new_list[middle+1:]
elif value < new_list[middle]:
# cut in half to the left
new_list = new_list[:middle]
elif value == new_list[middle]:
print ("You found the value", new_list[middle], "in your array")
# now you have the value just need to look up its key in the dict
value_idex = my_dict[new_list[middle]]
print ("Your index position of that value is ", value_idex)
return True
else:
print ("The value is not in the array")
return False
# no point in binary search bc this check is linear
def check_sort (my_list):
changed = True
while changed:
for i in range (0, len(my_list)-1):
if my_list[i]>my_list[i+1]:
changed = False
return changed
##sample dataset
arr = [1,3,9,11,23,44,66,88,102,142,188,192,239,382,492,1120,1900,2500,4392,5854,6543,8292,9999,29122]
print (binary_search(arr, 88))
'''
The whole point of a binary search is that, since the data is already sorted, you can quickly locate the information you want.
Take the phone book, which is sorted by last name.
How do you find someone in the phone book? You open it up to a page which you assume will be close to what you want, and then start flipping pages.
But you don't flip one page each time, if you missed by a lot, you flip a bunch of pages, and then finally start flipping one at a time, until finally you start looking at a single page.
This is what binary search does. Since the data is sorted, it knows it can skip a lot and do another look, and it'll focus in on the information you want.
A binary search does 1 comparison for every doubled number of items. So a 1024 element collection would require around 10 comparisons, at the most, to find your information, or at least figure out that it's not there.
If you, before running the actual binary search, does a full run-through to check if the data is sorted, you might as well just do a scan for the information. A full run-through + the binary search will require N + log2 N operations, so for 1024 elements, it would require around 1034 comparisons, whereas a simple scan for the information will on average require half, which is 512.
So if you can't guarantee that the data is sorted, you should not use binary search, as it will be outperformed by a simple scan.
''' | Janteby1/binary_search-copy | binary-search.py | binary-search.py | py | 4,067 | python | en | code | 0 | github-code | 36 |
73895256743 | import numpy as np
import torch
from torch.utils.data import Dataset
import pickle as pkl
def valence_map(elements: list, valences: list):
"""
## Given a list of elements and their corresponding valences, create a dictionary mapping each element to its valence.
### Args:
- elements: a list of elements, such as [Element F, Element Pb].
- valences: a list of valences corresponding to the elements, such as ['Pb2+', 'F-'].
### Return:
A dictionary mapping each element to its valence, such as {'F': -1, 'Pb': 2}.
"""
map = {}
for ele in elements:
ele = str(ele)
for val in valences:
if ele in val:
num = val.replace(ele, "")
if not num[0].isalpha():
if num == "-":
num = "1-"
if num == "+":
num = "1+"
num = float(num[-1] + num[:-1])
map[ele] = num
return map
class ValenceDataset(Dataset):
"""
The ValenceDataset class is a PyTorch Dataset that loads and preprocesses X-ray absorption near edge structure (XANES) spectra data for machine learning tasks.
It takes an annotation file as input, which contains the paths to the data files to be loaded. The class unpacks the data files,
extracts the XANES spectra and corresponding valences of the elements in the spectra, and returns them as a tuple of data and label for each sample.
## Args:
- annotation: the path of the annotation text file which contains the paths of data samples to be used to train/test the model.
"""
def __init__(self, annotation="",xy_label=False):
super().__init__()
with open(annotation, "r") as f:
self.mp_list = f.readlines()
self.dataset = []
self.xy_label=xy_label
self.unpack_data()
def unpack_data(self):
for i in range(len(self.mp_list)):
self.mp_list[i] = self.mp_list[i].split("\n")[0]
with open(self.mp_list[i], "rb") as f:
info = pkl.load(f)
valences = valence_map(info["elements"], info["valences"])
spectrum = info["xanes"]
for sub_spec in spectrum.keys():
element = sub_spec.split("-")[-2]
# if element == "Fe" and valences[element].is_integer() and sub_spec.endswith('K'):
if element == "Fe" and sub_spec.endswith('K'):
if self.xy_label:
spec=np.array(spectrum[sub_spec])
else:
spec=np.array(spectrum[sub_spec][1])
self.dataset.append(
[spec, int(valences[element])-0]
)
def __getitem__(self, index):
data, label = self.dataset[index]
data = torch.from_numpy(data).type(torch.FloatTensor)
label = torch.LongTensor([label])
# label=torch.Tensor([float(label)]).type(torch.FloatTensor)
return data, label
def __len__(self):
return len(self.dataset)
class ValenceDatasetV2(Dataset):
"""
The ValenceDatasetV2 class is a PyTorch Dataset that loads and preprocesses X-ray absorption near edge structure (XANES) spectra data for machine learning tasks.
It takes an PKL file which contains all data samples as input, extracts the XANES spectra and corresponding valences of the elements in the spectra, and returns them as a tuple of data and label for each sample.
## Args:
- annotation: the path of the annotation text file which contains the paths of data samples to be used to train/test the model.
"""
def __init__(self, annotation="",xy_label=True):
super().__init__()
self.xy_label=xy_label
with open(annotation, "rb") as f:
self.dataset=pkl.load(f)
def __getitem__(self, index):
label, data = self.dataset[index]
if not self.xy_label:
data=data[1]
else:
data=np.array(data)
data[0]=data[0]/10000.0
data = torch.from_numpy(data).type(torch.FloatTensor)
label = torch.Tensor([float(label)]).type(torch.FloatTensor)
return data, label
def __len__(self):
return len(self.dataset)
| Airscker/DeepMuon | DeepMuon/dataset/XASData.py | XASData.py | py | 4,337 | python | en | code | 1 | github-code | 36 |
21115457088 | """Django FilterSet classes for Nautobot."""
import django_filters
from nautobot.apps.filters import BaseFilterSet, NautobotFilterSet, SearchFilter
from nautobot_chatops.choices import PlatformChoices
from nautobot_chatops.models import CommandLog, AccessGrant, ChatOpsAccountLink, CommandToken
class CommandLogFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of CommandLog objects."""
class Meta:
"""Metaclass attributes of CommandLogFilterSet."""
model = CommandLog
fields = [
"start_time",
"runtime",
"user_name",
"user_id",
"platform",
"command",
"subcommand",
"status",
"details",
]
class AccessGrantFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of AccessGrant objects."""
class Meta:
"""Metaclass attributes of AccessGrantFilterSet."""
model = AccessGrant
fields = ["command", "subcommand", "grant_type", "value"]
class ChatOpsAccountLinkFilterSet(NautobotFilterSet):
"""FilterSet for filtering the ChatOps Account Links."""
q = SearchFilter(
filter_predicates={
"user_id": "icontains",
"platform": "icontains",
}
)
platform = django_filters.MultipleChoiceFilter(choices=PlatformChoices)
class Meta:
"""Metaclass attributes of ChatOpsAccountLinkFilterSet."""
model = ChatOpsAccountLink
fields = "__all__"
class CommandTokenFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of CommandToken objects."""
class Meta:
"""Metaclass attributes of CommandTokenFilterSet."""
model = CommandToken
fields = ["comment", "platform"]
| nautobot/nautobot-plugin-chatops | nautobot_chatops/filters.py | filters.py | py | 1,773 | python | en | code | 47 | github-code | 36 |
20145639624 | """
Cobweb plot function
"""
import numpy as np
import matplotlib.pyplot as plt
__all__ = [
'cobweb'
]
def cobweb(func, initial_conditon, nsteps, limits, args=(), ax=None):
"""
Plot cobweb diagram for onedimensional iterated functions
``x[n+1] = func(x[n])``.
Parameters
----------
func : callable
Function that compute the next system state from the current one.
initial_conditon : float
Simulation initial condition.
nsteps : int
Number of steps displayed be the cobweb diagram.
limits : 2 elements array_like
Upper and lower limits for the cobweb diagram.
arg : tuple, optional
Extra arguments to pass to function ``func``.
ax : matplotlib axis object, optional
Axis in which the phase plane will be plot. If none is provided
create a new one.
"""
# Create new subplot if no axis is provided
if ax is None:
_, ax = plt.subplots()
# Plot basic curves
x = np.linspace(limits[0], limits[1], 1000)
y = list(map(lambda z: func(z, *args), x))
ax.plot(x, x, linewidth=1.5, color='black')
ax.plot(x, y, linewidth=1.5, color='blue')
# Interate and plot cobweb segments
startpoint = initial_conditon
for i in range(nsteps):
endpoint = func(startpoint, *args)
ax.plot([startpoint, startpoint, endpoint],
[startpoint, endpoint, endpoint],
color='red',
marker='o',
markersize=3,
markerfacecolor='black')
startpoint = endpoint
| antonior92/dynamic-system-plot | dynplt/cobweb.py | cobweb.py | py | 1,586 | python | en | code | 1 | github-code | 36 |
35896409907 | from typing import NamedTuple, Optional, List, Dict, Any, Union
from enum import Enum, auto
import pysam
def _build_filter(rec: pysam.VariantRecord) -> List[Union[str, int]]:
return [f for f in rec.filter]
def _build_info(rec: pysam.VariantRecord) -> Dict[str, Any]:
info = dict()
for key, value in rec.info.items():
info[key] = value
return info
def _build_format(rec: pysam.VariantRecord) -> List[str]:
return [f for f in rec.format]
def _build_samples(rec: pysam.VariantRecord) -> Dict[str, Dict[str, Any]]:
samples = dict()
for sample_name in rec.samples:
sample_dict = dict()
for key, value in rec.samples[sample_name].items():
sample_dict[key] = value
samples[sample_name] = sample_dict
return samples
class VariantType(Enum):
"""Enumeration with the different types of variations
"""
SNV = auto()
DEL = auto()
INS = auto()
DUP = auto()
INV = auto()
CNV = auto()
TRA = auto()
SGL = auto()
class BreakendSVRecord(NamedTuple):
"""NamedTuple with the information of a breakend notated SV record
"""
prefix: Optional[str]
"""Prefix of the SV record with breakend notation. For example, for :code:`G]17:198982]` the prefix will be :code:`G`"""
bracket: str
"""Bracket of the SV record with breakend notation. For example, for :code:`G]17:198982]` the bracket will be :code:`]`"""
contig: str
"""Contig of the SV record with breakend notation. For example, for :code:`G]17:198982]` the contig will be :code:`17`"""
pos: int
"""Position of the SV record with breakend notation. For example, for :code:`G]17:198982]` the position will be :code:`198982`"""
suffix: Optional[str]
"""Suffix of the SV record with breakend notation. For example, for :code:`G]17:198982]` the suffix will be :code:`None`"""
class ShorthandSVRecord(NamedTuple):
"""NamedTuple with the information of a shorthand SV record
"""
type: str
"""One of the following, :code:`'DEL'`, :code:`'INS'`, :code:`'DUP'`, :code:`'INV'` or :code:`'CNV'`"""
extra: Optional[List[str]]
"""Extra information of the SV. For example, for :code:`<DUP:TANDEM:AA>` the extra will be :code:`['TANDEM', 'AA']`"""
def _str_value(value):
if isinstance(value, str):
return value
elif isinstance(value, float):
return f'{value:.2f}'
elif hasattr(value, '__iter__'):
return ','.join([_str_value(v) for v in value])
elif value is None:
return '.'
else:
return str(value)
def _convert_info_key_value(key, value):
if value is None:
return key
elif isinstance(value, bool):
return key if value else None
else:
return key+'='+_str_value(value)
def _convert_sample_value(key, value):
if key == 'GT':
return '/'.join([_str_value(v) for v in value])
else:
return _str_value(value)
class VariantRecord():
"""NamedTuple with the information of a variant record
"""
contig: str
"""Contig name"""
pos: int
"""Position of the variant in the contig"""
end: int
"""End position of the variant in the contig (same as `pos` for TRA and SNV)"""
length: int
"""Length of the variant"""
id: Optional[str]
"""Record identifier"""
ref: str
"""Reference sequence"""
alt: str
"""Alternative sequence"""
qual: Optional[float]
"""Quality score for the assertion made in ALT"""
filter: List[Union[str, int]]
"""Filter status. PASS if this position has passed all filters. Otherwise, it contains the filters that failed"""
variant_type: VariantType
"""Variant type"""
alt_sv_breakend: Optional[BreakendSVRecord]
"""Breakend SV info, present only for SVs with breakend notation. For example, :code:`G]17:198982]`"""
alt_sv_shorthand: Optional[ShorthandSVRecord]
"""Shorthand SV info, present only for SVs with shorthand notation. For example, :code:`<DUP:TANDEM>`"""
def __init__(self, rec: pysam.VariantRecord, contig: str, pos: int, end: int,
length: int, id: Optional[str], ref: str,
alt: str, variant_type: VariantType,
alt_sv_breakend: Optional[BreakendSVRecord] = None,
alt_sv_shorthand: Optional[ShorthandSVRecord] = None):
self._rec = rec
self.contig = contig
self.pos = pos
self.end = end
self.length = length
self.id = id
self.ref = ref
self.alt = alt
self.qual = rec.qual
self.filter = _build_filter(rec)
self.variant_type = variant_type
self.alt_sv_breakend = alt_sv_breakend
self.alt_sv_shorthand = alt_sv_shorthand
self._info = None
self._format = None
self._samples = None
@property
def info(self):
"""Additional information"""
if self._info is None:
self._info = _build_info(self._rec)
return self._info
@info.setter
def info(self, value):
self._info = value
@property
def format(self):
"""Specifies data types and order of the genotype information"""
if self._format is None:
self._format = _build_format(self._rec)
return self._format
@format.setter
def format(self, value):
self._format = value
@property
def samples(self):
"""Genotype information for each sample"""
if self._samples is None:
self._samples = _build_samples(self._rec)
return self._samples
@samples.setter
def samples(self, value):
self._samples = value
def _replace(self, **kwargs):
new_record = VariantRecord(self._rec, self.contig, self.pos, self.end,
self.length, self.id, self.ref, self.alt,
self.variant_type, self.alt_sv_breakend,
self.alt_sv_shorthand)
for key, value in kwargs.items():
setattr(new_record, key, value)
return new_record
def _info_str(self, rec_str: List[str]) -> str:
# If info has not been loaded, return the original info string
if self._info is None and len(rec_str) > 7:
return rec_str[7]
info_list = []
for key, value in self.info.items():
info_str = _convert_info_key_value(key, value)
if info_str is None:
continue
info_list.append(info_str)
if self.alt_sv_shorthand:
info_list.insert(0, 'END='+str(self.end))
info = ";".join(info_list)
return info
def _format_str(self, rec_str: List[str]) -> str:
# If format has not been loaded, return the original format string
if self._format is None and len(rec_str) > 8:
return rec_str[8]
return ":".join(self.format)
def _samples_str(self, rec_str: List[str]) -> str:
# If samples and format have not been loaded, return the original samples string
if self._samples is None and self._format is None and len(rec_str) > 9:
return '\t'.join(rec_str[9:])
samples_list = [":".join([_convert_sample_value(k, self.samples[sample_name][k])
for k in self.format]) for sample_name in self.samples]
samples = "\t".join(samples_list)
return samples
def __str__(self):
rec_str_split = str(self._rec).split('\t')
contig = self.contig
pos = self.pos
id_ = self.id if self.id else '.'
ref = self.ref
alt = self.alt
qual = _str_value(self.qual)
filter_ = ";".join(map(str, self.filter)) if self.filter else '.'
info = self._info_str(rec_str_split)
format_ = self._format_str(rec_str_split)
samples = self._samples_str(rec_str_split)
return f'{contig}\t{pos}\t{id_}\t{ref}\t{alt}\t{qual}\t{filter_}\t{info}\t{format_}\t{samples}'.strip()
| EUCANCan/variant-extractor | src/variant_extractor/variants.py | variants.py | py | 8,031 | python | en | code | 3 | github-code | 36 |
35584359150 | from zipfile import ZipFile
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
import seaborn as sns
bikes = pd.read_csv('bsd/hour.csv', index_col='dteday', parse_dates=True)
bikes['hour'] = bikes.index.hour
bikes.head()
bikes.tail()
# - **hour** ranges from 0 (midnight) through 23 (11pm)
# - **workingday** is either 0 (weekend or holiday) or 1 (non-holiday weekday)
# ## Task 1
#
# Run these two `groupby` statements and figure out what they tell you about the data.
# mean rentals for each value of "workingday"
# sns.set(style='whitegrid', context='notebook')
# cols = ['hr', 'temp', 'atemp', 'hum', 'windspeed', 'cnt']
# sns.pairplot(bikes[cols], size=2.5)
# plt.show()
#WARNING: dont run code below(mem overflow)
# cm = np.corrcoef(bikes[cols])
#
# hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':15}, yticklabels=cols, xticklabels=cols)
# plt.show()
# bikes.groupby('workingday').cnt.mean()
# mean rentals for each value of "hour"
# bikes.groupby('hour').cnt.mean()
# bikes.groupby(['holiday', 'season']).cnt.mean().unstack().plot()
feature_cols = ['casual']
X = bikes[feature_cols].values
y = bikes.cnt.values
# X = StandardScaler().fit(X.reshape(-1, 1)).transform(X.reshape(-1, 1))
# y_scaler = StandardScaler().fit(y.reshape(-1, 1))
# y = y_scaler.transform(y.reshape(-1, 1))
X_len = len(X)
test_value = round(X_len * 0.05)
X_train, X_test = X[:-test_value], X[-test_value:]
y_train, y_test = y[:-test_value], y[-test_value:]
linreg = linear_model.LinearRegression()
linreg.fit(X_train.reshape(-1,1), y_train.reshape(-1,1))
y_pred = linreg.predict(X_test.reshape(test_value, 1))
plt.scatter(X_test.reshape(-1,1), y_test, color='b')
plt.plot(X_test.reshape(-1,1), y_pred, color='red',linewidth=1)
plt.show()
# pred = linreg.predict(X_test)
#
# # scores = cross_val_score(linreg, X, y, cv=10, scoring='mean_squared_error')
#
# # The coefficients
print('Coefficients: \n', linreg.coef_)
# # The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# # Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
#
# pass | Kamkas/Bike-Sharing-Data-Analysis | lr.py | lr.py | py | 2,416 | python | en | code | 0 | github-code | 36 |
70503976105 | from __future__ import division
class EstadisticaIII:
def __init__(self,X,Y=[]):
self.X = X
self.Y = Y
self.n = len(X)
def Fn(self,y):
return len([x for x in self.X if x<=y])/self.n
def KS(self,F):
Xi = sorted(self.X)
Dn = []
for i in range(self.n):
Dn.append(max((i+1)/self.n-F(Xi[i]),F(Xi[i])-i/self.n))
D = max(Dn)
K = np.argmax(Dn)
Femp = [self.Fn(x) for x in np.arange(min(self.X),max(self.X)+0.02,0.01)]
F0 = [F(x) for x in np.arange(min(self.X),max(self.X)+0.02,0.01)]
plt.plot(np.arange(min(self.X),max(self.X)+0.02,0.01),Femp)
plt.plot(np.arange(min(self.X),max(self.X)+0.02,0.01),F0)
plt.plot(Xi[K],F(Xi[K]),'g^')
return D | aleespa/Estadistica-III-2019 | Cรณdigo python/Clase.py | Clase.py | py | 776 | python | en | code | 0 | github-code | 36 |
32892688817 | import pickle
import pandas as pd
import mixmod
from mixmod import gm # gm contains global constants (enum items etc.)
def persist_strategy():
# --------------------------------------
# Strategy example :
# - nbTry = 5
# - algo 1 : SEM (nb_iteration = 80)
# - algo 2 : EM (epsilon=0.0001)
# Note : default algorithm is EM.
# ------------------------------------
algo1 = mixmod.algo(name=gm.SEM, nb_iteration=80)
algo2 = mixmod.algo(name=gm.EM, epsilon=0.0001)
strategy_ = mixmod.strategy(nb_try=5, algo=[algo1, algo2])
with open("/tmp/pymixmod_persist_strategy.pkl", "wb") as fd:
pickle.dump(strategy_, fd)
def clustering_with_persisted_strategy():
folder_data = "data/"
data = pd.read_csv(folder_data + "iris.train")
data = data.iloc[:, :4] # exclude labels on 5th column
# nb_cluster contains the numbers of clusters to be tested.
# Here we assume that there are 2, 3 or 4 clusters.
with open("/tmp/pymixmod_persist_strategy.pkl", "rb") as fd:
strategy_ = pickle.load(fd)
return mixmod.cluster(data, nb_cluster=[2, 3, 4], strategy=strategy_)
if __name__ == "__main__":
print(
"-----------------------------------------------------------------------\n"
"Clustering example : \n"
" - iris data (quantitative data)\n"
" - strategy :\n"
" - nbTry = 5 \n"
" - 2 chained algorithms\n"
" - SEM (80 iterations)\n"
" - EM (epsilon=0.0001)\n"
"-----------------------------------------------------------------------\n"
)
persist_strategy()
print(clustering_with_persisted_strategy().summary())
| mixmod/mixmod | Pymixmod/examples/persistence/persist_strategy.py | persist_strategy.py | py | 1,692 | python | en | code | 3 | github-code | 36 |
31412134180 | import sys
input = sys.stdin.readline
result = 0
def merge_sort(s, e):# ๋ณํฉ์ ๋ ฌ
global result
if e-s < 1: return
m = int(s+(e-s) / 2)
merge_sort(s, m)
merge_sort(m+1, e)
for i in range(s, e+1):
tmp[i] = A[i]
k = s #A๋ฐฐ์ด์์ ์ด๋ ์์น์ index๊ฐ ๋ค์ด๊ฐ๋์ง
index1 = s
index2 = m+1
while index1 <= m and index2 <= e:
if tmp[index1] > tmp[index2]:
A[k] = tmp[index2] # ๋ค์ ์๋ ๋ฐ์ดํฐ ์ ํ
result = result + index2 - k #index2 - k: ํ์ฌ ๋จ์์๋ ์์ ์๋ ๊ฐ์(# swap ๊ฐ ์นด์ดํธ)
k += 1
index2 += 1
else:
A[k] = tmp[index1] # ์์ ์๋ ๋ฐ์ดํฐ ์ ํ
k += 1
index1 += 1
while index1 <= m:
A[k] = tmp[index1]
k += 1
index1 += 1
while index2 <= e:
A[k] = tmp[index2]
k += 1
index2 += 1
N = int(input())
A = list(map(int, input().split()))
A.insert(0,0)
tmp = [0] * int(N+1)
merge_sort(1, N)
print(result)
| ZzinB/Algorithm_Study | ๋ฐฑ์ค/Platinum/1517.โ
๋ฒ๋ธโ
์ํธ/๋ฒ๋ธโ
์ํธ.py | ๋ฒ๋ธโ
์ํธ.py | py | 1,121 | python | ko | code | 0 | github-code | 36 |
71910544423 | from collections import OrderedDict
from Models.Utils.FRRN_utils import *
class FRRNet(nn.Module):
"""
implementation table A of Full-Resolution Residual Networks
"""
def __init__(self, in_channels=3, out_channels=21, layer_blocks=(3, 4, 2, 2)):
super(FRRNet, self).__init__()
# 5ร5
self.first = nn.Sequential(
OrderedDict([
('conv', nn.Conv2d(in_channels=in_channels, out_channels=48, kernel_size=5, padding=2)),
('bn', nn.BatchNorm2d(48)),
('relu', nn.ReLU()),
]))
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.relu = nn.ReLU()
# 3ร48 Residual Unit
self.reslayers_in = nn.Sequential(*[BasicBlock(48, 48, efficient=False) for _ in range(3)])
# divide
self.divide = nn.Conv2d(in_channels=48, out_channels=32, kernel_size=1)
# frrlayer 1
self.frrnlayer1 = FRRLayer(48, 96, factor=2, num_blocks=layer_blocks[0])
# frrlayer2
self.frrnlayer2 = FRRLayer(96, 192, factor=4, num_blocks=layer_blocks[1])
# frrnlayer3
self.frrnlayer3 = FRRLayer(192, 384, factor=8, num_blocks=layer_blocks[2])
# frrnlayer4
self.frrnlayer4 = FRRLayer(384, 384, factor=16, num_blocks=layer_blocks[3])
# defrrnlayer1
self.defrrnlayer1 = FRRLayer(384, 192, factor=8, num_blocks=2)
# defrrnlayer2
self.defrrnlayer2 = FRRLayer(192, 192, factor=4, num_blocks=2)
# defrrnlayer3
self.defrrnlayer3 = FRRLayer(192, 96, factor=2, num_blocks=2)
# join
self.compress = nn.Conv2d(96 + 32, 48, kernel_size=1)
# 3ร48 reslayer
self.reslayers_out = nn.Sequential(*[BasicBlock(48, 48, efficient=True) for _ in range(3)])
self.out_conv = nn.Conv2d(48, out_channels, 1)
def forward(self, x):
x = self.first(x)
y = self.reslayers_in(x)
z = self.divide(y)
y = self.pool(y)
y, z = self.frrnlayer1(y, z)
y = self.pool(y)
y, z = self.frrnlayer2(y, z)
y = self.pool(y)
y, z = self.frrnlayer3(y, z)
y = self.pool(y)
y, z = self.frrnlayer4(y, z)
y = self.up(y)
y, z = self.defrrnlayer1(y, z)
y = self.up(y)
y, z = self.defrrnlayer2(y, z)
y = self.up(y)
y, z = self.defrrnlayer3(y, z)
y = self.up(y)
refine = self.compress(torch.cat((y, z), 1))
out = self.reslayers_out(refine)
out = self.out_conv(out)
return out
| akshatgarg99/FRR-Net | Models/FRRN.py | FRRN.py | py | 2,658 | python | en | code | 0 | github-code | 36 |
13320431649 | import os
import re
clear = lambda: os.system('clear')
data = "AAAX---aaax---111"
exp = "\w+(?=X), \w+, \w+(?=\w)"
txt='''\
\w+ : ๋ฌธ์(\w),1์ด์(+)
(?=X) : ๋ค์ ๋ฐ๋ผ์ค๋(?=) X๋ผ๋ ๊ฒ์ ๋ฌธ์์ด ๊ฒ์์ ์ฐ์ง๋ง ๋งค์น๋ ๋์ง ์์
\w+(?=X) : AAAX์์ ๋ฌธ์๋ค์ด ๋งค์น๋์ง๋ง AAA๋ง ๋งค์น๋จ X๋ ์ ์ธ ์ํด
X๋ X ์์ ๋จ์ด AAA๋ฅผ ์ฐพ๋๋ฐ๋ ๊ธฐ์ฌํ์ง๋ง ์์ ์ ๋งค์น๋์ง ์์
\w+ : (?=X)์์ด ํ๊ฒ๋๋ฉด AAAX,aaax,111๊ณผ ๋งค์น๋จ
\w+(?=\w) : AAA,aaa,11๊ณผ ๋งค์น๋จ
(?=\w) : ์กฐ๊ฑด์ ๋ฌธ์๋ฅผ ๋ง์กฑ์ํค๋ฉด ์์ ์ ์ฐพ๋๋ฐ๋ง ๊ธฐ์ฌํ๊ณ ๋งค์น๋ ์๋จ
'''
print(data)
print(txt)
while True:
output = input('{} >>> '.format(exp))
regex = re.compile(output)
a = regex.search(data)
b = regex.findall(data)
print('search :',a)
print('findall :',b)
if output == '':
clear()
break | ipcoo43/pythonthree | lesson150.py | lesson150.py | py | 855 | python | ko | code | 0 | github-code | 36 |
31981774021 | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
new_list = []
for n in range(list_length):
try:
a = my_list_1[n] / my_list_2[n]
except(ZeroDivisionError):
print("division by 0")
a = 0
except(TypeError, ValueError):
print("wrong type")
a = 0
except(IndexError):
print("out of range")
a = 0
finally:
new_list.append(a)
return new_list
| osamuflair/alx-higher_level_programming | 0x05-python-exceptions/4-list_division.py | 4-list_division.py | py | 506 | python | en | code | 0 | github-code | 36 |
37664896770 | from page.E_confirm_order_page import ConfirmOrderPage
class MyOrderPage(ConfirmOrderPage):
"""ๆ็่ฎขๅ้กต้ข"""
order_sn_loc = ('id', 'com.tpshop.malls:id/order_sn_tv') # ่ฎขๅ็ผๅท
to_be_received_loc = ('id','com.tpshop.malls:id/status_receive_tv') # ๅพ
ๆถ่ดง
back_loc = ('id','com.tpshop.malls:id/title_back_img') # ่ฟๅๅพๆ
confirm_received_loc = ('id','com.tpshop.malls:id/id_index_gallery_item_button') # ็กฎ่ฎคๆถ่ดง
positive_button_loc = ('id','com.tpshop.malls:id/positiveButton') # "็กฎๅฎ"ๆ้ฎ
cart_loc = ('id','com.tpshop.malls:id/bottom_cart_img') # ่ดญ็ฉ่ฝฆ
def get_order_sn(self):
"""่ทๅ่ฎขๅ็ผๅท"""
return self.get_ele_text(self.order_sn_loc)
def click_to_be_received(self):
"""็นๅป<ๅพ
ๆถ่ดง>"""
self.click(self.to_be_received_loc)
def click_confirm_received(self):
"""็นๅป<็กฎ่ฎคๆถ่ดง>"""
self.click(self.confirm_received_loc)
def click_positive_Button(self):
"""็นๅป<็กฎๅฎ>"""
self.click(self.positive_button_loc)
def click_back(self):
"""็นๅป่ฟๅๅพๆ """
self.click(self.back_loc)
def click_cart(self):
"""็นๅป่ดญ็ฉ่ฝฆ"""
self.click(self.cart_loc)
if __name__ == '__main__':
from common.base_app import open_app
from time import sleep
driver = open_app() # ๆๅผTPShop,่ฟๅ
ฅ็ปๅฝ้กต้ข
confirm = ConfirmOrderPage(driver)
confirm.input_account_num('13730626896') # ่พๅ
ฅ่ดฆๅท
confirm.input_password('123456') # ่พๅ
ฅๅฏ็
confirm.click_confirm_login() # ็นๅป็ปๅฝ
confirm.wait_page() # ็ญๅพ
้กต้ขๅ ่ฝฝ
confirm.click_search() # ็นๅปๆ็ดขๆก
confirm.input_search_content('ๅฎนๅฃฐๅฐ็ฎฑ') # ๆ็ดขๅฎนๅฃฐๅฐ็ฎฑ
confirm.click_search_button() # ็นๅปๆ็ดขๆ้ฎ
confirm.wait_page() # ็ญๅพ
้กต้ขๅ ่ฝฝ
sleep(2)
confirm.click_RSfridge() # ็นๅปๅฎนๅฃฐๅฐ็ฎฑ
confirm.wait_page() # ็ญๅพ
้กต้ขๅ ่ฝฝ
sleep(3)
confirm.click_buy() # ็นๅป็ซๅณ่ดญไนฐ
confirm.click_confrim_buy() # ็นๅป็กฎ่ฎค่ดญไนฐ
confirm.click_address_RS() # ็นๅป้ๆฉๆถ่ดงๅฐๅ
confirm.choose_consignee_RS() # ้ๆฉๆถ่ดงไบบ
confirm.click_order_balance_RS() # ็นๅปไฝฟ็จไฝ้ข
confirm.wait_page() # ็ญๅพ
้กต้ขๅ ่ฝฝ
confirm.click_sub_order_RS() # ็นๅปๆไบค่ฎขๅ
confirm.input_pay_pwd_RS('123456') # ่พๅ
ฅๆฏไปๅฏ็
confirm.click_confirm_pay_pwd_RS() # ็กฎ่ฎคๆฏไปๅฏ็
order = MyOrderPage(driver)
sn = order.get_order_sn() # ่ทๅ่ฎขๅ็ผๅท
print(sn)
sleep(3)
confirm.quit()
| 15008477526/- | APP_aaaaaaaa/page/F_my_order.py | F_my_order.py | py | 2,633 | python | en | code | 0 | github-code | 36 |
25947138248 | # Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
if not node:
return node
stack = [node]
visited = {node: Node(node.val, [])}
while stack:
n = stack.pop()
for neighbor in n.neighbors:
if neighbor not in visited:
visited[neighbor] = Node(neighbor.val, [])
stack.append(neighbor)
visited[n].neighbors.append(visited[neighbor])
return visited[node]
| dzaytsev91/leetcode-algorithms | medium/133_clone_graph.py | 133_clone_graph.py | py | 694 | python | en | code | 2 | github-code | 36 |
4940941702 | import pandas as pd
dataPath = 'data/'
# maps for converting BPS points
scoringBpsMap = {'FWD': {'GKP': -12, 'DEF': -12, 'MID': -6, 'FWD': 0},
'MID': {'GKP': -6, 'DEF': -6, 'MID': 0, 'FWD': 6},
'DEF': {'GKP': 0, 'DEF': 0, 'MID': 6, 'FWD': 12},
'GKP': {'GKP':0, 'DEF': 0, 'MID': 6, 'FWD': 12}}
cleanSheetBpsMap = {'FWD': {'GKP': 12, 'DEF': 12, 'MID': 0, 'FWD': 0},
'MID': {'GKP': 12, 'DEF': 12, 'MID': 0, 'FWD': 0},
'DEF': {'GKP': 0, 'DEF': 0, 'MID': -12, 'FWD': -12},
'GKP': {'GKP': 0, 'DEF': 0, 'MID': -12, 'FWD': -12}}
# maps for converting absolute points
scoringMap = {'FWD': {'GKP': 2, 'DEF': 2, 'MID': 1, 'FWD': 0},
'MID': {'GKP': 1, 'DEF': 1, 'MID': 0, 'FWD': -1},
'DEF': {'GKP': 0, 'DEF': 0, 'MID': -1, 'FWD': -2},
'GKP': {'GKP': 0, 'DEF': 0, 'MID': -1, 'FWD': -2}}
cleanSheetMap = {'FWD': {'GKP': 4, 'DEF': 4, 'MID': 1, 'FWD': 0},
'MID': {'GKP': 3, 'DEF': 3, 'MID': 0, 'FWD': -1},
'DEF': {'GKP': 0, 'DEF': 0, 'MID': -3, 'FWD': -4},
'GKP': {'GKP': 0, 'DEF': 0, 'MID': -3, 'FWD': -4}}
goalsConcededMap = {'FWD': {'GKP': -1, 'DEF': -1, 'MID': 0, 'FWD': 0},
'MID': {'GKP': -1, 'DEF': -1, 'MID': 0, 'FWD': 0},
'DEF': {'GKP': 0, 'DEF': 0, 'MID': 1, 'FWD': 1},
'GKP': {'GKP': 0, 'DEF': 0, 'MID': 1, 'FWD': 1}}
def getGw(seasonString, gwInt):
return pd.read_csv(f'{dataPath}{seasonString}/gws/gw{gwInt}.csv')
def getGwFixtures(playerID, df):
return df[df['element'] == playerID]['fixture'].tolist()
def getGwFixtureInfo(df, fixture):
df = df[df['fixture'] == fixture]
df = df.set_index('element')
return df
def recalculateFixtureBonus(df, playerID, newPos):
oldPos = df.loc[playerID].position
BPS = df.loc[playerID].bps
if oldPos == newPos:
return 0
BPS += df.loc[playerID].clean_sheets * cleanSheetBpsMap[oldPos][newPos]
BPS += df.loc[playerID].goals_scored * scoringBpsMap[oldPos][newPos]
df.loc[playerID, 'bps'] = BPS
try:
newBonus = df.nlargest(3, 'bps', keep='all')['bps'].rank(method='max').loc[playerID]
return int(newBonus - df.loc[playerID].bonus)
except KeyError:
return 0
def recalculateFixturePoints(df, playerID, newPos):
oldPos = df.loc[playerID].position
points = df.loc[playerID].total_points
if oldPos == newPos:
return 0
points += (df.loc[playerID].clean_sheets * cleanSheetMap[oldPos][newPos])
points += (df.loc[playerID].goals_scored * scoringMap[oldPos][newPos])
points += ((df.loc[playerID].goals_conceded // 2) * goalsConcededMap[oldPos][newPos])
points += recalculateFixtureBonus(df, playerID, newPos)
return points
def recalculateTotalPoints(seasonString, playerID, newPos):
newPoints = 0
oldPoints = 0
for i in range(1, 39):
gw = getGw(seasonString, i)
fixtureList = getGwFixtures(playerID, gw)
for fixture in fixtureList:
fx = getGwFixtureInfo(gw, fixture)
newPoints += recalculateFixturePoints(fx, playerID, newPos)
oldPoints += fx.loc[playerID].total_points
return {'old': oldPoints, 'new': newPoints, }
if __name__ == "__main__":
print(f"Salah (MID to FWD): {recalculateTotalPoints(seasonString='2021-22', playerID=233, newPos='FWD')}")
print(f"Jota (MID to FWD): {recalculateTotalPoints(seasonString='2021-22', playerID=240, newPos='FWD')}")
print(f"Havertz (MID to FWD): {recalculateTotalPoints(seasonString='2021-22', playerID=141, newPos='FWD')}")
print(f"Dallas (MID to DEF): {recalculateTotalPoints(seasonString='2021-22', playerID=188, newPos='DEF')}")
print(f"Joelinton (FWD to MID): {recalculateTotalPoints(seasonString='2021-22', playerID=310, newPos='MID')}")
print(f"Saint-Maximan (FWD to MID): {recalculateTotalPoints(seasonString='2021-22', playerID=307, newPos='MID')}")
print(f"Kouyate (DEF to MID): {recalculateTotalPoints(seasonString='2021-22', playerID=150, newPos='MID')}")
| vaastav/Fantasy-Premier-League | new_position_checker.py | new_position_checker.py | py | 4,137 | python | en | code | 1,277 | github-code | 36 |
7803255026 | import pprint
def log(file_name="default.log", msg="Default text"):
"""
Logs the specified message to specified file.
Overwrites the whole file each time
"""
f = open(file_name, "w+")
pretty = pprint.pformat(msg)
f.write(pretty)
f.close()
def perm_log(file_name="default.log", title="Default title", msg="Default text"):
"""
Logs the specified message to specified file.
Appends to the file each time
"""
f = open(file_name, "a")
f.write(3*"\n"+str(title)+"\n"+80*"-"+"\n")
f.write(msg)
f.close()
| errnox/ncurses-shenanigans | logger.py | logger.py | py | 564 | python | en | code | 0 | github-code | 36 |
34998353566 | """
Created on Thu Mar 17 16:34:46 2022
โ
@author: svein
"""
import speech_recognition as sr
import sounddevice as sd
from scipy.io.wavfile import write
import os
import ffmpeg
from scipy.io import wavfile
import numpy as np
def Speech_to_text():
myfile="output.wav"
## If file exists, delete it ##
if os.path.isfile(myfile):
os.remove(myfile)
else: ## Show an error ##
print("Error: %s file not found" % myfile)
##
print("recording start")
fs = 44100 # Sample rate
seconds = 4 # Duration of recording
sd.default.dtype='int32', 'int32'
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait() # Wait until recording is finished
print("recording ended")
wavfile.write("output.wav", fs, myrecording) # Save as WAV file
def SpeechToText():
r = sr.Recognizer() #Speech recognition
audio = sr.AudioFile("output.wav")
with audio as source:
print("Wait. Program Starting")
audio = r.record(source)
message = r.recognize_google(audio)
print("Check: "+message)
return message
Ord=SpeechToText()
return Ord
if __name__ == "__main__":
print(Speech_to_text()) | klarahi/Fuzzy_project | voice_recognition.py | voice_recognition.py | py | 1,245 | python | en | code | 0 | github-code | 36 |
30629658909 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import json, urllib
import plotly.graph_objects as go
import pandas as pd
import numpy as np
# In[2]:
asi_measures = pd.read_csv('final-data.csv')
asi_measures.head()
# In[4]:
all_nodes = asi_measures.Category.values.tolist() + asi_measures.ASI.values.tolist()
source_indices = [all_nodes.index(Category) for Category in asi_measures.Category]
target_indices = [all_nodes.index(ASI) for ASI in asi_measures.ASI]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 20,
thickness = 20,
line = dict(color = "black", width = 1.0),
label = all_nodes,
),
link = dict(
source = source_indices,
target = target_indices,
value = asi_measures.Count,
))])
fig.update_layout(title_text="Transport mitigation actions in the context of Avoid, Shift and Improve",
font_size=10)
fig.show()
| nikolamedi/sankey-diagram | Sankey diagram with plotly.py | Sankey diagram with plotly.py | py | 912 | python | en | code | 0 | github-code | 36 |
38807417619 | import csv
''' def leer_parque(nombre_archivo, parque):
f = open(nombre_archivo)
filas = csv.reader(f)
data = []
encabezado = next(filas)
for fila in filas:
registro = dict(zip(encabezado, fila))
if registro['espacio_ve'] == parque:
data.append(registro)
return data
print(len(leer_parque('arbolado-en-espacios-verdes.csv', 'GENERAL PAZ'))) '''
def leer_parque(nombre_archivo, parque):
f = open(nombre_archivo)
filas = csv.reader(f)
data = []
encabezado = next(filas)
for fila in filas:
registro = dict(zip(encabezado, fila))
if registro['espacio_ve'] == parque:
registro['altura_tot'] = float(registro['altura_tot'])
data.append(registro)
return data
#print(leer_parque('arbolado-en-espacios-verdes.csv', 'GENERAL PAZ'))
def obtener_alturas(lista_arboles, especie):
lista_de_alturas = []
for arbol in lista_arboles:
if arbol['nombre_com'] == especie:
altura = arbol['altura_tot']
lista_de_alturas.append(altura)
return lista_de_alturas
gral_paz = [leer_parque('arbolado-en-espacios-verdes.csv', 'GENERAL PAZ'), 'General Paz']
los_andes = [leer_parque('arbolado-en-espacios-verdes.csv', 'EJERCITO DE LOS ANDES'),'Los Andes']
centenario = [leer_parque('arbolado-en-espacios-verdes.csv', 'CENTENARIO'),'Centenario']
parques = [gral_paz, los_andes, centenario]
max_arbol = []
proms = []
nombres = []
for parque in parques:
data = obtener_alturas(parque[0], 'Jacarandรก')
max_arbol.append(max(data))
proms.append(sum(data)/len(data))
nombres.append(parque[1])
# print('Medida', nombres)
# print("max", max_arbol)
# print("prom", proms)
| AxelBelbrun/TP_LaboDeDatos | ejercicio_1&4.py | ejercicio_1&4.py | py | 1,766 | python | es | code | 0 | github-code | 36 |
73550814184 | #-*- coding: utf-8 -*-
'''
1. ๆทปๅ ็ณป๏ผadddepartment
2. ๆทปๅ ็ญ็บง๏ผaddclass
3. ๅ ้ค็ณป๏ผdeldepartment
4. ๅ ้ค็ญ็บง๏ผdelclass
5. ็ผ่พ็ณป๏ผeditdepartment
6. ็ผ่พ็ญ็บง๏ผeditclass
'''
from django.shortcuts import render_to_response
from django.template import RequestContext, Template, Context
from classes.models import Class, Department
from teachers.models import Teacher
from students.models import Student
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime
import json
import logging
logger = logging.getLogger('mysite.log')
def adddepartment(request):
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
errors = []
if request.method == 'POST':
if not request.POST.get('deptname',''):
errors.append('deptname')
for dept in deptls:
if(dept.deptname==request.POST['deptname']):
judgeadd=1
break;
if(judgeadd==0):
dept = Department(deptname=request.POST['deptname'],teacherid_id= request.session['userid'],createtime=datetime.datetime.now(),edittime=datetime.datetime.now())
dept.save()
deptls = Department.objects.order_by('id')
departments = Department.objects.order_by('id')
return HttpResponseRedirect('/students/')
def addclass(request):
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
errors = []
if request.method == 'POST':
if not request.POST.get('claname',''):
errors.append('claname')
for clas in classls:
if(clas.claname == request.POST['claname']):
if(clas.grade == int(request.POST['grade'])):
if(clas.departmentid_id ==int(request.POST['deptid'])):
judgeadd=1
break;
if(judgeadd==0):
cla = Class(claname=request.POST['claname'],departmentid_id=int(request.POST['deptid']),grade=int(request.POST['grade']),teacherid_id= request.session['userid'],createtime=datetime.datetime.now(),edittime=datetime.datetime.now())
cla.save()
deptls = Department.objects.order_by('id')
classes = Class.objects.order_by('id')
return HttpResponseRedirect('/students/')
def delclass(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
error = ''
try:
did = int(did)
cla = Class.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
cla.delete()
return HttpResponseRedirect('/students/')
def deldepartment(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
error = ''
try:
did = int(did)
dept = Department.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
dept.delete()
return HttpResponseRedirect('/students/')
def editdepartment(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
errors = []
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
try:
did = int(did)
dept= Department.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
if request.method == 'POST':
if not request.POST.get('deptname',''):
errors.append('deptname')
for deptl in deptls:
if(deptl.deptname==request.POST['deptname']):
if(dept.deptname!=request.POST['deptname']):
judgeadd=1
break;
if(judgeadd==0):
dept.deptname = request.POST['deptname']
dept.edittime=datetime.datetime.now()
dept.save()
deptls = Department.objects.order_by('id')
departments = Department.objects.order_by('id')
return HttpResponseRedirect('/students/')
def editclass(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
errors = []
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
try:
did = int(did)
clas=Class.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
if request.method == 'POST':
if not request.POST.get('claname',''):
errors.append('claname')
for clasl in classls:
if(clasl.claname == request.POST['claname']):
if(clasl.grade == int(request.POST['grade'])):
if(clasl.departmentid_id ==int(request.POST['deptid'])):
judgeadd=1
break;
if(judgeadd==0):
clas.claname = request.POST['claname']
clas.grade = request.POST['grade']
clas.departmentid_id = request.POST['deptid']
clas.edittime=datetime.datetime.now()
clas.save()
deptls = Department.objects.order_by('id')
classes = Class.objects.order_by('id')
return HttpResponseRedirect('/students/')
def deptnamecheck(request):
departments = Department.objects.order_by('-id')
judgedeptname= 0
if request.method == 'POST':
deptdeptname=request.POST['deptname']
# print deptdeptname
for dept in departments:
if (dept.deptname == deptdeptname):
judgedeptname=1
break
data={}
data["judgedeptname"]=judgedeptname
return HttpResponse(json.dumps(data))
def clanamecheck(request):
classes = Class.objects.order_by('-id')
judgeclaname= 0
if request.method == 'POST':
cladeptid= int(request.POST['deptid'])
clagrade = int(request.POST['grade'])
claclaname = request.POST['claname']
for cla in classes:
if (cla.claname==claclaname):
if(cla.grade==clagrade):
#print cla.departmentid_id
#print cladeptid
if(cla.departmentid_id==cladeptid):
judgeclaname=1
break
data={}
data["judgeclaname"]=judgeclaname
return HttpResponse(json.dumps(data))
| Luokun2016/QuickSort | classes/views.py | views.py | py | 7,941 | python | en | code | 0 | github-code | 36 |
7822031293 | import sys
import heapq
input = sys.stdin.readline
N = int(input())
arr = []
#์
๋ ฅ
for _ in range(N):
arr.append(list(map(int, input().split())))
#ํ์์๊ฐ ๊ธฐ์ค ์ ๋ ฌ
arr.sort(key=lambda x: x[0])
rooms = [0]
answer = 1
for s, e in arr:
# ๊ฐ์ฅ ๋น ๋ฅธ ํ์ ์ข
๋ฃ ์๊ฐ๋ณด๋ค array์ ํ์ ์์ ์๊ฐ์ด ๋ฆ๊ฑฐ๋ ๊ฐ๋ค๋ฉด
if s >= rooms[0]:
# ๊ทธ ํ์ ์๊ฐ์ popํด์ค ๋ค์ ์๋ก์ด ์ข
๋ฃ ์๊ฐ ์ฝ์
heapq.heappop(rooms)
else:
# ๊ฐ์ฅ ๋น ๋ฅธ ํ์ ์ข
๋ฃ ์๊ฐ๋ณด๋ค ๋น ๋ฅด๋ค๋ฉด ํ์์ค ์ถ๊ฐ ํ ์ข
๋ฃ ์๊ฐ ์ฝ์
answer+=1
# ์ฝ์
๋ถ : ์ด๋ ๊ฒ ํ๋ฉด ์ฌ๋ฌ ๊ฐ๋ฅผ ๊ด๋ฆฌํ ์ ์์
heapq.heappush(rooms, e)
print(answer) | Drizzle03/baekjoon_coding | 20230123/19598_heapq.py | 19598_heapq.py | py | 736 | python | ko | code | 0 | github-code | 36 |
12685785417 | import itertools
from itertools import izip, cycle
import os
import string
import glob
from moduleBaseClass import ModuleBaseClass
class XorStuff:
def __init__(self, filepath=None):
"""Constructor : set xored file (optional)
"""
self.file_type = None
self.list_types = self.load_files_types('modules/')
if filepath is not None:
self.file_content = self.set_file_content(filepath)
def load_files_types(self, path):
"""Load all modules from modules/ and make them available
"""
list_types = {}
files = glob.glob(path + "*")
for file in files:
file_name, file_extension = os.path.splitext(file)
if not file_name.endswith("__init__") and file_extension == ".py":
module_name = file_name.replace("/", ".")
mod = __import__(module_name)
modules = module_name.split('.')
for module in modules[1:]:
mod = getattr(mod, module)
if issubclass(mod.Module, ModuleBaseClass):
instance = mod.Module()
list_types[instance.name] = instance
return list_types
def xor(self, data, key, file_type=None):
"""Perform a simple xor with data and key
file_type is an instance of modules and provide file checking
"""
result = []
for data, char_key in izip(data, cycle(key)):
byte = chr(ord(data) ^ ord(char_key))
if file_type is not None:
if not file_type.live_check(byte):
return None
result.append(byte)
return ''.join(result)
def set_file_content(self, filepath, length=None):
"""Open xored file and store content
Optional : can store n bytes only
"""
bin_file = ''
with open(filepath, "rb") as f:
byte = f.read(1)
index = 0
while byte != "":
bin_file = bin_file + byte
byte = f.read(1)
if length is not None:
if index == length:
break
index = index + 1
self.file_content = bin_file
def get_pass(self, key_length, grep=None):
"""Try to recover key(s) for a given length and yield them
Optional : can grep bytes in result
"""
# Padding of header with %s if key length > header length
if int(key_length) > len(self.file_type.header):
header_no_formatters = self.file_type.header.replace('%s', '?')
formatters = '%s' * (int(key_length) - len(header_no_formatters))
self.file_type.header = "%s%s" % (self.file_type.header,
formatters)
bf_length = self.file_type.header.count('%s')
header_length = len(self.file_type.header.replace('%s', '?'))
bin_header = self.file_content[:header_length]
charset = ''.join([chr(i) for i in range(128)])
key_charset = string.ascii_letters + string.digits + string.punctuation
# generate keys
for char in itertools.product(charset, repeat=bf_length):
generated_header = self.file_type.header % char
output = self.xor(bin_header, generated_header)
key = output[0: key_length]
if not [c for c in key if c not in key_charset]:
raw = self.xor(self.file_content, key, self.file_type)
if raw is not None:
if self.file_type.final_check(raw):
if grep is not None:
if grep in raw:
yield key
else:
yield key
def set_file_type(self, file_type):
"""Load correct file type module according to file extension name
"""
self.file_type = self.list_types[file_type]
| tengwar/xorstuff | xorstuff.py | xorstuff.py | py | 4,032 | python | en | code | 0 | github-code | 36 |
10663941067 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 21 16:38:00 2016
@author: Neo
Oct 25, 2016: updated by Niu.
"""
import numpy as np
import matplotlib.pyplot as plt
res_dir = '../results/'
dat_fil = ['OVrot.dat', 'GRrot.dat']
#glide for the three special catalog
w3 = np.loadtxt(res_dir + 'SpecialSets.rot', usecols=(1,3,5,7))
Num = np.loadtxt(res_dir + dat_fil[0], usecols=(0,), dtype=int)
W, W_E, WX, WX_E, WY, WY_E, WZ, WZ_E = \
np.loadtxt(res_dir + dat_fil[0], usecols=list(range(1,9)), unpack=True)
Wg, W_Eg, WXg, WX_Eg, WYg, WY_Eg, WZg, WZ_Eg = \
np.loadtxt(res_dir + dat_fil[1], usecols=list(range(1,9)), unpack=True)
i = Num
y = np.ones(Num.size)
#############################################################################
#plot, writted a year ago.
#i = range(100,len(Sou)+1)
#y = np.ones(len(i))
#
#fig, ax = plt.subplots(2, 2)
#((ax1, ax2), (ax3, ax4)) = ax
#
#ax1.plot(i, WX, 'b')
##ax1.plot(i, WXg, 'r')
#ax1.set_ylabel('$\omega_x$',fontsize = 25)
#ax1.plot(i, w3[0][1]*y, ':', label = '212 ICRF' )
#ax1.plot(i, w3[1][1]*y, '-.', label = '295 ICRF' )
#ax1.plot(i, w3[2][1]*y, '--', label = '247 MFV' )
#
#ax2.plot(i, WY, 'b')
##ax2.plot(i, WYg, 'r')
#ax2.set_ylabel('$\omega_y$',fontsize = 25)
#ax2.plot(i, w3[0][2]*y, ':', label = '212 ICRF' )
#ax2.plot(i, w3[1][2]*y, '-.', label = '295 ICRF' )
#ax2.plot(i, w3[2][2]*y, '--', label = '247 MFV' )
#
#ax3.plot(i, WZ, 'b')
##ax3.plot(i, WZg, 'r')
#ax3.set_ylabel('$\omega_z$',fontsize = 25)
#ax3.plot(i, w3[0][3]*y, ':', label = '212 ICRF' )
#ax3.plot(i, w3[1][3]*y, '-.', label = '295 ICRF' )
#ax3.plot(i, w3[2][3]*y, '--', label = '247 MFV' )
#
#ax4.plot(i, W, 'b')
##ax4.plot(i, Wg, 'r')
#ax4.set_ylabel('$\omega$',fontsize = 25)
#ax4.plot(i, w3[0][0]*y, ':' , label = '212 ICRF' )
#ax4.plot(i, w3[1][0]*y, '-.', label = '295 ICRF' )
#ax4.plot(i, w3[2][0]*y, '--', label = '247 MFV' )
#
#ax1.legend()
#ax2.legend()
#ax3.legend()
#ax4.legend()
#
#ax1.set_xlabel('No. Sources',fontsize = 15)
#ax2.set_xlabel('No. Sources',fontsize = 15)
#ax3.set_xlabel('No. Sources',fontsize = 15)
#ax4.set_xlabel('No. Sources',fontsize = 15)
#
#plt.show()
##plt.savefig('../plot/OARank_rot.eps')
#plt.savefig('../plot/GRank_rot.eps')
##############################################################################
plt.figure()
#set the size of subplots
left,width = 0.10,0.85
bottom,height = 0.1, 0.17
bottom_3 = bottom + height*2 + 0.01
bottom_2 = bottom_3 + height + 0.01
bottom_1 = bottom_2 + height + 0.01
scale4 = [left, bottom, width, height*2]
scale3 = [left, bottom_3, width, height]
scale2 = [left, bottom_2, width, height]
scale1 = [left, bottom_1, width, height]
ax1 = plt.axes(scale1)
ax2 = plt.axes(scale2, sharex = ax1)
ax3 = plt.axes(scale3, sharex = ax1)
ax4 = plt.axes(scale4)
ax1.plot(i, np.abs(WX) , 'b', linewidth=3)
ax1.plot(i, np.abs(WXg), 'r', linewidth=3)
ax1.set_ylabel('$r_1$',fontsize = 25)
ax1.set_xlim([100, max(i)])
ax1.set_xticks([100,150,200,250,300,350,400,450,500,550])
ax1.set_xticklabels(['','','','','','','','','',''])
ax1.set_ylim([0,15])
ax1.set_yticks(np.arange(0, 15, 5))
ax1.set_yticklabels(['0','5','10'],fontsize = 12)
ax1.plot(i, np.abs(w3[0][1])*y, 'b--', label = '212 ICRF' )
ax1.plot(i, np.abs(w3[1][1])*y, 'g--', label = '295 ICRF')
ax1.plot(i, np.abs(w3[2][1])*y, 'y--', label = '247 MFV' )
ax1.plot(i, np.abs(w3[3][1])*y, 'k--', label = '260 AMS' )
ax2.plot(i, np.abs(WY) , 'b', linewidth=3)
ax2.plot(i, np.abs(WYg), 'r', linewidth=3)
ax2.set_ylabel('$r_2$',fontsize = 25)
ax2.set_ylim([0,20])
ax2.set_yticks([0,5,10,15])
ax2.set_yticklabels(['0','5','10','15'],fontsize = 12)
ax2.plot(i, np.abs(w3[0][2])*y, 'b--', label = '212 ICRF' )
ax2.plot(i, np.abs(w3[1][2])*y, 'g--', label = '295 ICRF')
ax2.plot(i, np.abs(w3[2][2])*y, 'y--', label = '247 MFV' )
ax2.plot(i, np.abs(w3[3][2])*y, 'k--', label = '260 AMS' )
ax3.plot(i, np.abs(WZ) , 'b', linewidth=3)
ax3.plot(i, np.abs(WZg), 'r', linewidth=3)
ax3.set_ylabel('$r_3$',fontsize = 25)
ax3.set_ylim([0, 15])
ax3.set_yticks(np.arange(0, 15, 5))
ax3.set_yticklabels(['0','5','10'],fontsize = 12)
ax3.plot(i, w3[0][3]*y, 'b--', label = '212 ICRF' )
ax3.plot(i, w3[1][3]*y, 'g--', label = '295 ICRF')
ax3.plot(i, w3[2][3]*y, 'y--', label = '247 MFV' )
ax3.plot(i, w3[3][3]*y, 'k--', label = '260 AMS' )
ax4.plot(i, W, 'b', linewidth=3)
ax4.plot(i, Wg, 'r', linewidth=3)
ax4.set_ylabel('$r$', fontsize=25)
ax4.set_ylim([0,20])
ax4.set_yticks(np.arange(0, 20, 5))
ax4.set_yticklabels(['0','5','10','15'],fontsize = 12)
ax4.plot(i, w3[0][0]*y, 'b--' , label = '212 ICRF' )
ax4.plot(i, w3[1][0]*y, 'g--', label = '295 ICRF' )
ax4.plot(i, w3[2][0]*y, 'y--', label = '247 MFV' )
ax4.plot(i, w3[3][0]*y, 'k--', label = '260 AMS' )
ax4.set_xlim([100,max(i)])
ax4.set_xticks([100,150,200,250,300,350,400,450,500,550, max(i)])
ax4.set_xticklabels(['100','','200','','300','','400','','500', ''],fontsize = 15)
ax4.legend(loc=0, fontsize=10)
ax4.set_xlabel('No. Sources', fontsize=15)
plt.show()
plt.savefig('../plot/Rotation_No.eps', dpi=100)
plt.close()
print('Done!') | Niu-Liu/thesis-materials | sou-selection/progs/RotationPlot.py | RotationPlot.py | py | 5,084 | python | en | code | 0 | github-code | 36 |
14432743893 | print("genera lo numero del 1 al 10: ")
input()
lista = [0,1,2,3,4,5,6,7,8,9]
numero = 1
range = (10)
for numero in lista:
print(numero)
| toshio3024/programacion_visual | modulo1/ciclo_for/lista1_10.py | lista1_10.py | py | 149 | python | it | code | 0 | github-code | 36 |
9896960203 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import os
import sys
import rospy
import numpy as np
import cv2
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from darknet_ros_msgs.msg import BoundingBoxes, ObjectCount
class Bridge(object):
"""์์ถ๋ ์ด๋ฏธ์ง๋ฅผ ์ผ์ ๋ฉ์ธ์ง ํํ๋ก ๋ณํํ๋ค.."""
def __init__(self):
# ๊ธ๋ก๋ฒ ๋ณ์ ์ค์
self.bridge = CvBridge()
self.bounding_boxes = BoundingBoxes()
self.image = None
# ๋ฐํ ์ค์
self.compressed_detection_image_pub = rospy.Publisher("/detection_image/compressed", CompressedImage, queue_size=1)
# ๊ตฌ๋
์ค์
compressed_color_image_sub = rospy.Subscriber("camera/color/image_raw/compressed", CompressedImage, self.bridge_color_image)
bounding_boxes_sub = rospy.Subscriber('darknet_ros/bounding_boxes', BoundingBoxes, self.update_bounding_boxes)
def bridge_color_image(self, data):
"""
"""
# ์์ถ ๋ฐ์ดํฐ๋ฅผ CV ๋ฐฐ์ด๋ก ๋ณํ
np_arr = np.fromstring(data.data, np.uint8)
self.image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
# try:
# self.color_image_pub.publish(self.bridge.cv2_to_imgmsg(color_image, "bgr8"))
# except CvBridgeError as e:
# print(e)
def update_bounding_boxes(self, data):
bounding_boxes = data
for i in range(len(self.bounding_boxes.bounding_boxes)):
try:
if self.bounding_boxes.bounding_boxes[i].Class == 'person':
probability = self.bounding_boxes.bounding_boxes[i].probability
xmin = self.bounding_boxes.bounding_boxes[i].xmin
ymin = self.bounding_boxes.bounding_boxes[i].ymin
xmax = self.bounding_boxes.bounding_boxes[i].xmax
ymax = self.bounding_boxes.bounding_boxes[i].ymax
_id = i + 1
_class = self.bounding_boxes.bounding_boxes[i].Class
except:
pass
# def bridge_detection_image(self, data):
# """
# """
# # try:
# detection_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
# # except CvBridgeError as e:
# # print(e)
# compressed_detection_image = CompressedImage()
# compressed_detection_image.header.stamp = rospy.Time.now()
# compressed_detection_image.format = "jpeg"
# compressed_detection_image.data = cv2.imencode('.jpg', detection_image)[1].tostring()
# try:
# self.compressed_detection_image_pub.publish(compressed_detection_image)
# except CvBridgeError as e:
# print(e)
if __name__ == '__main__':
rospy.init_node('bridge', anonymous=False)
bridge = Bridge()
rospy.spin()
| Taemin0707/minibot_control | pedestrian_tracking/src/visualizing.py | visualizing.py | py | 2,884 | python | en | code | 0 | github-code | 36 |
39642904721 | """ CONTROLLER.PY
The site controller controlls the production of the response
for requests to the website. The controller creates and interacts
with both, the SiteModel and the SiteViews. A call to the controller
calls start_response and returns the contents of the response.
Upgrade to multi-site:
./ set up with config in apache config
./ changes to main
./ read and open config
./ set paths from config
./ check if import weberror works else set flag
./ changes to controller
./ look for script_name split into sitename and responsearr
(keep older code arround but commented out)
./ update config if section with name exists
./ clean up baseurlpath or replace with siteurl where appropriate
./ test on current site
./ make 2 sites with 2 different folders and names (also check if basename works)
./ also check if old system with scriptalias still works
./ changes to main with extra error
./ make changes to have both error functions, call of correct application at bottom
./ test both errors
- look at main <-> controller: does split make sense?
- update log in controller
Add search tab
"""
### Preparation
# Import
import sys # System Library
import os # OS Library
import time # Time Library
import logging.config # Logging Config Object
#import urlparse # URLParse Library (should be used instead of CGI)
import cgi # CGI library (needed because urlparse.parse_qs broken in py2.5)
import hashlib # Hashlib Library
import shelve # Shelve Module
from configobj import ConfigObj # Configuration Object
from views import SiteViews # Site Views Object
from model import SiteModel # Site Model Object
class SiteController(object):
""" Controller object that formulates the response to the http
request.
"""
def __init__(self):
""" Constructor: declares the variables
"""
# Declare Variables
self.env = {'No Env':0} # Environment
self.conf = None # Configuration Object
self.output = 'No Output' # Output
self.log = None # Logging Object
self.sid = '' # Session ID
self.session = {} # Session Variables
self.request = {} # Request Variables
self.views = None # The views object
self.model = None # The model object
def __call__(self,environ,start_response):
""" Object Call: creates the response
"""
### Setup / Preparation
errormsg = ''
# Set Environment
self.env = environ
# Split URI into sitename and RESPONSEARR with path info -> fill / update session vars
# response = environ.get('PATH_INFO','list') # Old code: PATH_INFO not available using SciptAliasMatch
#responsearr = environ.get('SCRIPT_NAME').strip('/').split('/') # Does not work with older browsers
responsearr = environ.get('REQUEST_URI').strip('/').split('/')
if len(responsearr) > 0:
siteurl = responsearr[0]
responsearr = responsearr[1:]
else: siteurl = ''
# Load Configuration
self.conf = ConfigObj(environ.get('WEBVIEW_CONFIG'))
# Add configuration from site_sitename
if 'site_'+siteurl in self.conf:
self.conf.merge(self.conf['site_'+siteurl])
# Edit siteurl in config
if len(siteurl):
self.conf['path']['siteurl'] = '/'+siteurl
# Set up logging & print message
logfile = os.path.join(self.conf['path']['basepath'],
self.conf['ctrl']['logfile'])
logging.basicConfig(level='DEBUG',filename = logfile,
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.log = logging.getLogger('webview.control')
self.log.info('********* Started Controller')
self.log.info(' Request from %s to %s'
% (environ.get('REMOTE_ADDR'),
environ.get('REQUEST_URI')))
# Get Post request parameters (decode if needed)
try:
request_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_size = 0
request_body = environ['wsgi.input'].read(request_size)
try:
request_body = request_body.decode()
except(UnicodeDecodeError,AttributeError):
pass
request_params = cgi.parse_qs(request_body)
# Attach GET request parameters
query_string = environ['QUERY_STRING']
request_params.update(cgi.parse_qs(query_string))
self.request = request_params
# Get session id from Post request
self.log.debug('Request Params = ' + repr(request_params))
self.sid = request_params.get('sid',[''])[0]
if len(self.sid):
self.log.info('Existing Session SID = %s' % self.sid)
else:
self.log.info('New Session')
self.sid = hashlib.sha1((repr(time.time())+environ['REMOTE_ADDR']).encode('utf-8')).hexdigest()
# Get session information / make new session file
sessionfile = os.path.join(self.conf['path']['basepath'],
self.conf['path']['session'],
'sess_%s' % self.sid)
self.session = shelve.open(sessionfile, writeback = True)
self.session['sid'] = self.sid
# Make other objects
self.views = SiteViews(self.env, self.session, self.conf)
self.model = SiteModel(self.env, self.session, self.conf)
self.views.model = self.model
###### Compute response (format is page/request)
# Select Response and Query from URI -> fill / update session vars
if len(responsearr) > 0: self.session['page'] = responsearr[0].lower()
else: self.session['page'] = 'list'
if not (self.session['page'] in ['data','error','log', 'list', 'search', 'test']):
self.session['page'] = 'list'
self.log.info('Page Type is = %s' % self.session['page'])
#-- DATA Response: update session variables and validate request
self.session['request'] = '' # Clear request
if self.session['page'] == 'data':
responsefolder = '' # variable to allow check if folder is valid
# Get and Validate request
if responsearr[-1].lower() in ['raw']:
self.session['request'] = responsearr[-1].lower()
responsearr = responsearr[0:-1]
# FOLDER selection from request parameters or response path array
if 'folder_selection' in request_params:
self.session['folder'] = request_params.get('folder_selection')[0]
elif len(responsearr) > 1:
responsefolder = os.path.join(*responsearr[1:])
self.session['folder'] = responsefolder
# FILE selection from request parameters
if 'file_selection' in request_params:
self.session['file'] = request_params.get('file_selection')[0]
# STEP selection from request parameters
if 'step_selection' in request_params:
self.session['step'] = request_params.get('step_selection')[0]
# DATA selection from request parameters
if 'data_selection' in request_params:
self.session['data'] = request_params.get('data_selection')[0]
# PLANE selection from request parameters
if 'plane_selection' in request_params:
self.session['plane'] = request_params.get('plane_selection')[0]
# Validate / Set session variables
self.model.set_selection()
# if no data available -> error
if len(self.session['data']) == 0:
self.session['page'] = 'error'
errormsg = 'No FITS data avaialable:<br> '
errormsg += ' folder = <%s>' % (self.session['folder'])
errormsg += ' file = <%s>' % (self.session['file'])
errormsg += ' step = <%s>' % (self.session['step'])
errormsg += ' data = <%s>' % (self.session['data'])
errormsg += ' plane = <%s>' % (self.session['plane'])
# if responsefolder was invalid raise error
if ( len(responsefolder) and not responsefolder in self.session['folder'] and
int(self.conf['ctrl']['erronbadurl']) ):
self.session['page'] = 'error'
errormsg = 'Nonexistent or empty folder requested: %s is not available or contains no data' % responsefolder
#-- LOG Response: update session variables and validate request
if self.session['page'] == 'log':
# Get and Validate request
if responsearr[-1].lower() in ['update']:
self.session['request'] = responsearr[-1].lower()
# LOG_LEVEL selection from request parameters
if 'log_level' in request_params:
level = request_params.get('log_level')[0]
if level in 'DEBUG INFO WARNING ERROR CRITICAL':
self.session['loglevel'] = level
elif not 'loglevel' in self.session:
self.session['loglevel'] = 'INFO'
#-- LIST Response: update session variables and validate request
if self.session['page'] == 'list':
# LIST_FOLDER selection from response path array
responsefolder = '' # variable to allow check if folder is valid
if len(responsearr) > 1:
responsefolder = responsearr[1]
self.session['listfolder'] = responsefolder
else:
self.session['listfolder'] = ''
# Get Folder list and make sure there's something there
folderlist = self.model.folderlist(0)
if len(folderlist) == 0:
self.session['page'] = 'error'
errormsg = '<b>NO Data Folders Available</b><p> No folders were found under '
errormsg += os.path.join(self.conf['path']['basepath'], self.conf['path']['datapath'])
errormsg += '. Check the server settings or contact the administrator.'
elif ( len(responsefolder) and not responsefolder in folderlist and
int(self.conf['ctrl']['erronbadurl'])):
self.session['page'] = 'error'
errormsg = 'Nonexistent folder requested: %s is not available or contains no data' % responsefolder
else:
# Set list_folder
if not self.session['listfolder'] in folderlist:
self.session['listfolder'] = folderlist[-1]
#-- TEST Response: log the messages
if self.session['page'] == 'test':
if 'messagetext' in request_params:
self.log.debug('Test - Message from %s: %s' %
(environ.get('REMOTE_ADDR'), request_params['messagetext'][0]) )
# Print request if it came up
if len(self.session['request']) > 0:
self.log.info('Request Type is = %s' % self.session['request'])
###### Make response page
# Initialize Response
status = '200 OK'
response_headers = [('Content-Type','text/html')]
self.output = ''
# If there is no request -> return regular page
if len(self.session['request']) == 0 or self.session['page'] == 'error':
# Create Response Header
self.output += self.views.header()
# Create Text
if self.session['page'] == 'data':
# Request is to see data
self.output += self.views.data()
elif self.session['page'] == 'error':
# Request is an Error
self.output += self.views.error(errormsg)
elif self.session['page'] == 'log':
# Request is log
self.output += self.views.pipelog()
elif self.session['page'] == 'list':
# Request is list
self.output += self.views.folderlist()
elif self.session['page'] == 'search':
# Request is search
self.output += self.views.search()
elif self.session['page'] == 'test':
# Request is test
self.output += self.views.test()
# Close Response
self.log.debug('debuginfo = %d' % int(self.conf['ctrl']['debuginfo']) )
if( int(self.conf['ctrl']['debuginfo']) > 0 or
self.session['page'] == 'test' ):
self.list_env()
self.output += '</body></html>'
# If there is a querry -> return request text instead
else:
# Data, Raw request
if self.session['page']+'-'+self.session['request'] == 'data-raw':
self.output += self.views.dataraw()
# Logging, Update request
if self.session['page']+'-'+self.session['request'] == 'log-update':
self.output += self.views.logupdate()
# Return
start_response(status, response_headers)
self.log.info('********* Finished Controller')
return self.output
def list_env(self):
""" Creates a response containing path and environment variables.
"""
# Initialize Output
output = "<hr>\n <h2>Environment Setup</h2>\n"
# Add request text
reqtext = ['<li>%s: %s' % (key, self.request[key])
for key in self.request]
reqtext = '\n'.join(reqtext)
output += '<b>Request:</b><ul>\n' + reqtext + '</ul>\n'
# Add current path
output += '<b>Current Path:</b> %s<p>\n' % os.getcwd()
# Add session variables
sesstext = ['<li>%s: %s' % (key, self.session[key])
for key in self.session]
sesstext = '\n'.join(sesstext)
output += '<b>Session Variables:</b><ul>\n' + sesstext + '</ul>\n'
# Add environment Variables
envstr = ['<li>%s: %s' % (key,self.env[key])
for key in sorted(self.env.keys())]
envstr = '\n'.join(envstr)
output += '<b>Environment Variables:</b><ul>\n' + envstr + '</ul>\n'
# Add path
pathstr = ['<li>%s' % p for p in sorted(sys.path) ]
pathstr = '\n'.join(pathstr)
output += '<b>Path Settings:</b><ul>\n' + pathstr + '</ul>\n'
# Return answer
self.output += output
""" === History ===
2021-4 Marc Berthoud, remove use of logconfig
2020 Marc Berthoud, Upgrade to multi-site
2020-1-10 Marc Berthoud,
* removed [path][baseurlpath from config: Either use absolute paths
or use siteurl (which is set automatically), also in logscripts.js
* Config now comes from environment variable WEBVIEW_CONFIG
* Load site_siteurl preferences from config section into config
to allow multiple sites on a server.
* Main.py now loads pythonpaths from config file
* Main.py checks if weberror.errormiddleware exists else uses
simpler error reporting function
2015-2-20 Marc Berthoud, Various improvements
* Update code for using astropy.io.fits
2014-4-3 Marc Berthoud, Added self.infohead to model object to specify
which header contains main information.
2012-11-13 Marc Berthoud, Ability to specify instrument name and icons
* Ability to have information come from specific headers
* Configuration file name is now in main.py
2012-9-13 Marc Berthoud, Added file name and format flexibility
* Added flexible detection of pipe step in file name (model.filelist)
* Added ability to have no image in primary FITS header
2012-6-15 Marc Berthoud, Added use of jQuery for JavaScript elements
* New ['scripts'] section in the configuration, scripts are now
loaded in the page header
* Updated logscripts.js for use of jQuery
2012-4-12 Marc Berthoud, Various improvements during system testing
* Validate flights and aors to make sure data is present
* Add INSTMODE to the end of AOR entries in data
2011-11-23 Marc Berthoud, Ver0.2: Added imageanalysis javascript object
to the viewer to manage client side user interface.
2011-1-31 Marc Berthoud, Ver0.1: Wrote and Tested
"""
| berthoud/webfitsviewer | webfitsviewer/src/controller.py | controller.py | py | 16,483 | python | en | code | 1 | github-code | 36 |
16539248084 | import markovify
import sys
import argparse
import configparser
import twitter
model_depth_default = 2
model_depth = model_depth_default
def main():
arg_parser = argparse.ArgumentParser(description="Generate text with Markov chains based on a source corpus.")
subparser = arg_parser.add_subparsers(dest="subparser_name")
subparser_train = subparser.add_parser("train")
subparser_train.add_argument("corpus", help="Path to a corpus to train with.")
subparser_train.add_argument("savepath", help="Path to where to save the model, in JSON format.")
subparser_tweet = subparser.add_parser("tweet")
subparser_tweet.add_argument("corpus", help="Path to a corpus.")
subparser_tweet.add_argument("modelpath", help="Path to a model built with \"train\"")
subparser_tweet.add_argument("--no-post", help="Do not post to Twitter, write to stdout and exit.", action="store_true")
args = arg_parser.parse_args()
config = configparser.ConfigParser()
config.read("poorlytrained.ini")
twitter_consumer_key = config["keys"]["consumerkey"]
twitter_consumer_secret = config["keys"]["consumersecret"]
twitter_access_token = config["keys"]["accesstoken"]
twitter_access_token_secret = config["keys"]["accesstokensecret"]
try:
model_depth = config["markov"]["modeldepth"]
except:
sys.stderr.write("WARNING: Could not read model depth from configuration file. Defaulting to {}.\n".format(model_depth_default))
if(args.subparser_name == "train"):
with open(args.corpus) as f:
text = f.read()
text_model = markovify.Text(text)
with open(args.savepath, "w") as f:
f.write(text_model.chain.to_json())
elif(args.subparser_name == "tweet"):
with open(args.corpus) as corpus:
with open(args.modelpath) as model:
model_chain = markovify.Chain.from_json(model.read())
text_model = markovify.Text(corpus.read(), model_depth, model_chain)
tweet_message = text_model.make_short_sentence(140)
print(tweet_message)
if(args.no_post == False): # If --no-post was not specified, go ahead and post.
tapi = twitter.Api(twitter_consumer_key, twitter_consumer_secret, twitter_access_token, twitter_access_token_secret)
tapi.PostUpdate(tweet_message)
if __name__ == "__main__":
main()
| nanovad/poorlytrained | poorlytrained.py | poorlytrained.py | py | 2,216 | python | en | code | 0 | github-code | 36 |
42184120980 | # encoding = utf-8
class FirstClass():
name = 'javaAndBidata'
def javaAndBigdata_method(self):
'''
ๆนๆณๅฎไน็ๆถๅๅฟ
้กปๅญๅจไธไธชๅฝขๅ๏ผ้ป่ฎคไธบself๏ผๅฆๅไผๆฅ้
selfๅณไธบๅฏน่ฑกๆฌ่บซ
:return:
'''
print("this is javaAndBigdata's method!!!")
return self.name # ่กจๆselfไธบๅฏน่ฑกๆฌ่บซ
@classmethod
def class_method(cls):
print("็ฑปๆนๆณ๏ผ็จๅฐไบ classmethod ่ฃ
้ฅฐๅจ๏ผ๏ผ")
@staticmethod
def static_method():
print("้ๆๆนๆณ๏ผไธ่ฌไธ็ฑปๆ ๅ
ณ๏ผๅธธๅธธไธบๅทฅๅ
ทๆนๆณ๏ผ๏ผ๏ผ")
javaAndBigdataInstance = FirstClass()
print(javaAndBigdataInstance.name)
print(javaAndBigdataInstance.javaAndBigdata_method())
FirstClass.class_method()
FirstClass.static_method()
| helleboyboy/2022_pythonProj | com/study/MyFirstClass.py | MyFirstClass.py | py | 793 | python | en | code | 0 | github-code | 36 |
39675510369 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.applications.resnet_v2 import ResNet152V2, preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
import numpy as np
def classifyImg(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
# print(x)
x = np.expand_dims(x, axis = 0)
# print(x)
x = preprocess_input(x)
# Create the model
model = ResNet152V2(weights='imagenet')
# output prediction
prediction = model.predict(x)
# decode output
print('prediction: ', decode_predictions(prediction, top=3)[0])
img_path1 = 'fig/monkey.jpg'
classifyImg(img_path1)
img_path2 = 'fig/bird.jpg'
classifyImg(img_path2)
img_path3 = 'fig/taj.jpg'
classifyImg(img_path3)
img_path4 = 'fig/car.jpeg'
classifyImg(img_path4) | puneet4321/MachineLearning-DeepLearning | DeepLearning/TransferLearning.py | TransferLearning.py | py | 852 | python | en | code | 0 | github-code | 36 |
22353260658 | import torch
from .logger_utils import get_logger
import matplotlib.pyplot as plt
import numpy as np
import itertools
logger = get_logger()
def prediction(*, test_data, model, device):
"""Predict on test data and generate confusion matrix.
Args:
test_data (torch.utils.data.Dataset): Test dataset.
model (torch.nn.Module): Model.
device (str): Device (cpu or gpu or mps)
"""
num_classes = 10
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
confusion_matrix = torch.zeros(num_classes, num_classes)
with torch.no_grad():
for X, y in test_data:
X = X.unsqueeze(0).to(device)
y = torch.tensor([y]).to(device)
outputs = model(X)
_, predicted = torch.max(outputs.data, 1)
for t, p in zip(y.view(-1), predicted.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
plt.figure(figsize=(10,10))
plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
thresh = confusion_matrix.max() / 2.
for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):
plt.text(j, i, format(confusion_matrix[i, j], '.1f'),
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusion_matrix.png')
logger.info('Confusion Matrix saved as confusion_matrix.png')
| abhijitramesh/hpc-demo | utils/prediction_utils.py | prediction_utils.py | py | 1,910 | python | en | code | 0 | github-code | 36 |
39154979229 | from typing import Callable
def format_number(number: int) -> str:
num = float(f"{number:.3g}")
magnitude = 0
format_human: Callable[[float], str] = lambda x: f"{x:f}".rstrip("0").rstrip(".")
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return f"{format_human(num)}{['', 'K', 'M', 'G', 'T', 'P'][magnitude]}"
| SkyLissh/skylet-discord | app/utils/format_number.py | format_number.py | py | 358 | python | en | code | 0 | github-code | 36 |
8036114944 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
#from django.template import loader # no longer needed b/c of render shortcut
from .models import Question, Choice
# Create your views here.
class IndexView(generic.ListView):
"""
"""
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
"""
This is a generic view. Each generic view needs to know
what model it will be acting upon. This is provided using
the model attribute. The detail view expects the primary key
value captured from the url to be called 'pk' so we changed
question_id to pk for the generic views.
"""
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
"""
This is a generic view
"""
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
"""
This function will increase the number of votes a selected
question have, if it exists, and then redirect us to the results
page. If said question fails, we are given a 404 error!
Reverse will take us to the view specified by arg1 along with the
variable portion of the url specified by arg2.
"""
question = get_object_or_404(Question, pk=question_id)
try:
# this will obtain the id of the selected choice as a string
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# error message if choice isn't given
return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice."})
else:
selected_choice.votes += 1
selected_choice.save()
# redirected to this url - always return a redirect after successfully
# dealing with POST data - prevents data from being posted twice if the
# user hits the back button
return HttpResponseRedirect(reverse('polls:results', args=(question_id,)))
#################################################################################################################3
### Previous versions of functions below!
#################################################################################################################3
#def index(request):
#"""
#index will display the latest 5 poll questions in the system, separated by commans,
#according to the publication date.
#"""
#latest_question_list = Question.objects.order_by('-pub_date')[:5]
#template = loader.get_template('polls/index.html')
#context = {
#'latest_question_list': latest_question_list
#}
# httpresponse is common, but render is a shortcut!
#return HttpResponse(template.render(context, request))
#def index(request):
#"""
#The simplest view possible in Django
#"""
#return HttpResponse("Hello world! You're at the polls index!")
# one way of writing a 404 error
#def detail(request, question_id):
#"""
#Returns a simple template response.
#"""
#try:
#question = Question.objects.get(pk=question_id)
#except Question.DoesNotExist:
#raise Http404("Question does not exist!")
#return render(request, 'polls/detail.html', {'question': question})
| Alex-Bishka/Languages | Django/mysite/polls/views.py | views.py | py | 3,562 | python | en | code | 0 | github-code | 36 |
15193997907 | from enum import Enum
from dynsimf.models.helpers.ConfigValidator import ConfigValidator
from dynsimf.models.components.conditions.Condition import Condition
__author__ = "Mathijs Maijer"
__email__ = "m.f.maijer@gmail.com"
class UpdateType(Enum):
'''
An Enum to specify the type of the update
'''
STATE = 0
NETWORK = 1
EDGE_VALUES = 2
class UpdateConfiguration(object):
'''
Configuration for Updates
:var config: The dictionary containing the key/value pairs of the members of this class,
if no key/value pair is provided, a default value is used instead
:vartype config: dict
:var arguments: A dictionary with arguments for the update function, defaults to empty dict
:vartype arguments: dict
:var condition: A condition for nodes that must be met before the update is executed on them
:vartype condition: Condition or None
:var get_nodes: A boolean indicating whether the update function should receive a list of sampled nodes as argument,
defaults to None
:vartype get-nodes: bool or None
:var update_type: A value from the `UpdateType` enum, indicating what kind of update is being performed,
defaults to `UpdateType.STATE`
:vartype update_type: UpdateType
'''
def __init__(self, config=None):
self.set_config(config)
self.validate()
def set_config(self, config):
'''
Set the values for the members of the class by reading them from the config or setting their default values
:param dict config: The configuration dictionary with the key/value pairs for the class members
'''
self.config = config if config else {}
self.arguments = config['arguments'] if 'arguments' in self.config else {}
self.condition = config['condition'] if 'condition' in self.config else None
self.get_nodes = config['get_nodes'] if 'get_nodes' in self.config else None
self.update_type = config['update_type'] if 'update_type' in self.config else UpdateType.STATE
def validate(self):
'''
Validate the update configuration
:raises ValueError: if the `update_type` member is not of type `UpdateType`
'''
ConfigValidator.validate('arguments', self.arguments, dict)
ConfigValidator.validate('condition', self.condition, Condition, optional=True)
ConfigValidator.validate('get_nodes', self.get_nodes, bool, optional=True)
ConfigValidator.validate('update_type', self.update_type, UpdateType)
if not isinstance(self.update_type, UpdateType):
raise ValueError('Update type should of enum UpdateType')
class Update(object):
"""
Update class
:var fun: The update function that should be executed
:vartype fun: function
:var config: UpdateConfiguration object, defaults to a new UpdateConfiguration object
:vartype config: UpdateConfiguration
:var arguments: A dictionary with arguments for the update function, defaults to empty dict
:vartype arguments: dict
:var condition: A condition for nodes that must be met before the update is executed on them
:vartype condition: Condition or None
:var get_nodes: A boolean indicating whether the update function should receive a list of sampled nodes as argument,
defaults to None
:vartype get-nodes: bool or None
:var update_type: A value from the `UpdateType` enum, indicating what kind of update is being performed,
defaults to `UpdateType.STATE`
:vartype update_type: UpdateType
"""
def __init__(self, fun, config=None):
'''
Initialise the update by setting the class members to their values defined in the config
:param function fun: The update function to execute
:param config: The configuration object containing the values for the class members
:type config: UpdateConfiguration, optional
'''
self.function = fun
self.config = config if config else UpdateConfiguration()
self.arguments = self.config.arguments
self.condition = self.config.condition
self.get_nodes = self.config.get_nodes
self.update_type = self.config.update_type
def execute(self, nodes=None):
'''
Execute the update function with or without the sampled nodes from the scheme/condition
and return the output
:param nodes: An optional list of nodes that the update function should be applied on.
The given nodes are filtered by the schemes and conditions
:type nodes: list, optional
'''
if self.get_nodes:
output = self.function(nodes, **self.arguments)
else:
output = self.function(**self.arguments)
return output
| Tensaiz/DyNSimF | dynsimf/models/components/Update.py | Update.py | py | 4,795 | python | en | code | 4 | github-code | 36 |
19199461708 | from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
import pandas as pd
import torch
from dpipe.io import load_numpy
class BraTSDataset(Dataset):
def __init__(self, meta: pd.DataFrame, source_folder: [str, Path], nonzero_mask=False, transform=None):
if isinstance(source_folder, str):
source_folder = Path(source_folder)
if nonzero_mask:
meta = meta[meta.sample_id.isin(meta.query('is_nonzero_mask == True').sample_id)]
self.source_folder = source_folder
self.meta_images = meta.query('is_mask == False').sort_values(by='sample_id').reset_index(drop=True)
self.meta_masks = meta.query('is_mask == True').sort_values(by='sample_id').reset_index(drop=True)
self.transform = transform
def __len__(self):
return self.meta_images.shape[0]
def __getitem__(self, i):
image = load_numpy(self.source_folder / self.meta_images.iloc[i]['relative_path'], allow_pickle=True, decompress=True)
mask = load_numpy(self.source_folder / self.meta_masks.iloc[i]['relative_path'], allow_pickle=True, decompress=True)
sample = image, mask
if self.transform:
image, mask = self.transform(sample)
return torch.from_numpy(image).reshape(1, 240, 240), torch.from_numpy(mask).reshape(1, 240, 240).double()
| kurmukovai/hse_projects | 2020/Anvar/data_loader.py | data_loader.py | py | 1,392 | python | en | code | 1 | github-code | 36 |
11473243619 | import os
import zipfile
from abc import ABCMeta
from pathlib import Path
from typing import Optional, Union
from urllib.request import urlretrieve
class BaseDownloader(metaclass=ABCMeta):
"""Base downloader for all Movielens datasets."""
DOWNLOAD_URL: str
DEFAULT_PATH: str
def __init__(self, zip_path: Optional[Union[Path, str]] = None):
if zip_path is None:
zip_path = self.DEFAULT_PATH
else:
zip_path = zip_path
self.zip_path = Path(zip_path)
if not self.zip_path.exists():
self._retrieve()
def _retrieve(self) -> None:
url: str = self.DOWNLOAD_URL
file_name: str = str(self.zip_path) + ".zip"
urlretrieve(url, filename=file_name)
with zipfile.ZipFile(file_name) as zf:
zf.extractall(self.zip_path.parent)
os.remove(file_name)
| smartnews/rsdiv | src/rsdiv/dataset/base.py | base.py | py | 876 | python | en | code | 7 | github-code | 36 |
22889942490 | #!/usr/bin/env python
import os
import re
import glob
def printPackages():
paths = glob.glob(os.path.join('/usr/share', 'cmake*/Modules'))
modules = [package for path in paths for package in os.listdir(path)]
[print(module[4:-6]) for module in modules if re.search("^Find", module) and module[4:-6] != '']
def run():
printPackages()
if __name__ == '__main__':
run()
| FredeEB/cmake-list-packages | cmake-list-packages.py | cmake-list-packages.py | py | 392 | python | en | code | 0 | github-code | 36 |
41366597699 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
def MyPlotSO(CompMethod,trials,MaxFuncEvals,method,problem):
##% Plot
figcount=0
Convergence = np.zeros((int(CompMethod.shape[0]/trials), MaxFuncEvals))
count=-1
for i in range(0,int(CompMethod.shape[0]),trials):
count+=1
for j in range(trials):
a=np.array(CompMethod[i+j,4])
b=np.array(Convergence[count,:])
Convergence[count,:] = a + b
Convergence=problem['Max/Min']*Convergence
Convergence= Convergence / trials
##% Convergence curves
PlotConvergence(Convergence,method)
figcount+=1
savefigures(figcount,plt)
#savefigures(plots)
##% Violin Plots
for i in range(2,4):
#print(i)
vec = CompMethod[:, i]
matrix = vec.reshape((int(CompMethod.shape[0]/trials)),trials)
matrix=matrix.T
ViolinPlotting(matrix,trials,method)
figcount+=1
savefigures(figcount,plt)
return
def PlotConvergence(Convergence,method):
# Define colors, styles, and symbols for each line
colors = ['red', 'blue', 'green', 'orange', 'purple']
linestyles = ['-', '--', '-.', ':', '-']
#markers = ['o', 's', 'D', '*', '^']
# Create a figure and axes
fig, ax = plt.subplots()
# Loop through each row of the matrix and plot it as a separate line
for i in range(Convergence.shape[0]):
ax.plot(Convergence[i], color=colors[i], linestyle=linestyles[i], label=method[i])
# Add a legend to the plot
ax.legend()
ax.grid()
# Add title and axis labels
ax.set_title('Convergence characteristic curves')
ax.set_xlabel('Function evaluations')
ax.set_ylabel('Objective function value')
# Display the plot
#ax.show()
return plt
def ViolinPlotting(matrix,trials,method):
df = pd.DataFrame(matrix, columns=method)
sns.set_context("talk", font_scale=1)
plt.figure(figsize=(7,2*matrix.shape[1]))
plt.grid()
sns.violinplot(data=df, palette='pastel', bw=.5,orient="h")
sns.stripplot(data=df,color="black", edgecolor="gray", orient="h")
return plt
def plot_3d(func, points_by_dim = 50, title = '', bounds = None, cmap = 'twilight', plot_surface = True, plot_heatmap = True):
from matplotlib.ticker import MaxNLocator, LinearLocator
"""
Plots function surface and/or heatmap
Parameters
----------
func : class callable object
Object which can be called as function.
points_by_dim : int, optional
points for each dimension of plotting (50x50, 100x100...). The default is 50.
title : str, optional
title of plot with LaTeX notation. The default is ''.
bounds : tuple, optional
space bounds with structure (xmin, xmax, ymin, ymax). The default is None.
save_as : str/None, optional
file path to save image (None if not needed). The default is None.
cmap : str, optional
color map of plot. The default is 'twilight'.
plot_surface : boolean, optional
plot 3D surface. The default is True.
plot_heatmap : boolean, optional
plot 2D heatmap. The default is True.
"""
assert (plot_surface or plot_heatmap), "should be plotted at least surface or heatmap!"
xmin, xmax, ymin, ymax = bounds
x = np.linspace(xmin, xmax, points_by_dim)
y = np.linspace(ymin, ymax, points_by_dim)
a, b = np.meshgrid(x, y)
data = np.empty((points_by_dim, points_by_dim))
for i in range(points_by_dim):
for j in range(points_by_dim):
data[i,j] = func(np.array([x[i], y[j]]))
a = a.T
b = b.T
l_a, r_a, l_b, r_b = xmin, xmax, ymin, ymax
l_c, r_c = data.min(), data.max()
levels = MaxNLocator(nbins=15).tick_values(l_c,r_c)
if plot_heatmap and plot_surface:
fig = plt.figure(figsize=(16, 6))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2, projection='3d')
else:
fig = plt.figure()
if plot_heatmap:
ax1 = fig.gca()
else:
ax2 = fig.gca(projection='3d')
#title = r"$\bf{" + title+ r"}$"
#min_title = title[::]
def base_plot():
c = ax1.contourf(a, b, data , cmap=cmap, levels = levels, vmin=l_c, vmax=r_c)
name = title
ax1.set_title( name, fontsize = 15)
ax1.axis([l_a, r_a, l_b, r_b])
fig.colorbar(c)
if plot_surface:
# Plot the surface.
surf = ax2.plot_surface(a, b, data, cmap = cmap, linewidth=0, antialiased=False)
ax2.contour(a, b, data, zdir='z', levels=30, offset=np.min(data), cmap=cmap)
# Customize the z axis.
ax2.set_xlabel('1st dim', fontsize=15)
ax2.set_ylabel('2nd dim', fontsize=15)
#ax2.set_zlabel('second dim', fontsize=10)
ax2.set_zlim(l_c, r_c)
ax2.zaxis.set_major_locator(LinearLocator(5))
#ax2.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.tick_params(axis='z', pad=10)
# Add a color bar which maps values to colors.
if not plot_heatmap: fig.colorbar(surf)#, shrink=0.5, aspect=5)
ax2.contour(a, b, data, zdir='z', offset=0, cmap = cmap)
ax2.view_init(30, 50)
#ax2.set_title( min_title , fontsize = 15, loc = 'right')
if plot_heatmap: base_plot()
fig.tight_layout()
#if save_as != None:
# plt.savefig(save_as, dpi = 900)
plt.show()
return fig
def savefigures(figcount,plt):
# create a directory to store the figures
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig('results/figure_{}.png'.format(figcount), dpi=900, bbox_inches="tight")
return | KZervoudakis/Mayfly-Optimization-Algorithm-Python | plotting.py | plotting.py | py | 5,858 | python | en | code | 4 | github-code | 36 |
22968808073 | import pickle
import pandas as pd
from rs import recommend_system
def main():
data = pd.read_csv('data/ratings_train.csv')
users = data.userId.tolist()
items = data.movieId.tolist()
ratings = data.rating.tolist()
rec_sys = recommend_system(100)
rec_sys.fit(users, items, ratings)
with open('data/rec-sys', 'wb') as f:
pickle.dump(rec_sys, f)
if __name__ == '__main__':
main()
| kotsmile/test_task_repo | main1.py | main1.py | py | 424 | python | en | code | 0 | github-code | 36 |
25374663372 | def soma_vetores(vetor1: list, vetor2:list) -> list:
vetor_soma = []
maior = max(len(vetor1), len(vetor2))
for i in range(maior):
valor1 = vetor1[i] if i < len(vetor1) else 0
valor2 = vetor2[i] if i < len(vetor2) else 0
vetor_soma.append(valor1 + valor2)
return vetor_soma
l1 = [1, 2, 3]
l2 = [7, 8, 9, 6]
l3 = soma_vetores(l1, l2)
print(l3) | ProfessorDudarts/Unasp-HT-2023.2 | algoritmos_II/especial/ex02.py | ex02.py | py | 384 | python | pt | code | 0 | github-code | 36 |
8366866522 | import numpy as np
import sofacontrol.utils as scutils
from sofacontrol.mor import pod
class LinearROM():
"""
Linear ROM, written for compatibility with TPWL class
"""
def __init__(self, data, dt, Cf=None, Hf=None):
if not isinstance(data, dict):
data = scutils.load_data(data)
# Discretize dynamics via zero-order hold
self.A_d, self.B_d, self.d_d = scutils.zoh_affine(data['A_c'], data['B_c'], data['d_c'], dt)
# Build ROM object in case it is needed
if data['rom_info']['type'] == 'POD':
self.rom = pod.POD(data['rom_info'])
else:
raise NotImplementedError("Unknown ROM type")
self.state_dim = self.A_d.shape[0]
self.N = self.state_dim
self.input_dim = self.B_d.shape[1]
# Optionally set output and measurement models
if Cf is not None:
self.set_measurement_model(Cf)
else:
self.C = None
self.y_ref = None
self.meas_dim = None
if Hf is not None:
self.set_output_model(Hf)
else:
self.H = None
self.z_ref = None
self.output_dim = None
def update_state(self, x, u):
return self.A_d @ x + np.squeeze(self.B_d @ u) + self.d_d
def set_measurement_model(self, Cf):
self.C = Cf @ self.rom.V
self.y_ref = Cf @ self.rom.x_ref
self.meas_dim = self.C.shape[0]
def set_output_model(self, Hf):
self.H = Hf @ self.rom.V
self.z_ref = Hf @ self.rom.x_ref
self.output_dim = self.H.shape[0]
def zfyf_to_zy(self, zf=None, yf=None):
"""
:zf: (N, n_z) or (n_z,) array
:yf: (N, n_y) or (n_y,) array
"""
if zf is not None and self.z_ref is not None:
return zf - self.z_ref
elif yf is not None and self.y_ref is not None:
return yf - self.y_ref
else:
raise RuntimeError('Need to set output or meas. model')
def zy_to_zfyf(self, z=None, y=None):
"""
:z: (N, n_z) or (n_z,) array
:y: (N, n_y) or (n_y,) array
"""
if z is not None and self.z_ref is not None:
return z + self.z_ref
elif y is not None and self.y_ref is not None:
return y + self.y_ref
else:
raise RuntimeError('Need to set output or meas. model')
def x_to_zfyf(self, x, zf=False, yf=False):
"""
:x: (N, n_x) or (n_x,) array
:zf: boolean
:yf: boolean
"""
if zf and self.H is not None:
return np.transpose(self.H @ x.T) + self.z_ref
elif yf and self.C is not None:
return np.transpose(self.C @ x.T) + self.y_ref
else:
raise RuntimeError('Need to set output or meas. model')
def x_to_zy(self, x, z=False, y=False):
"""
:x: (N, n_x) or (n_x,) array
:z: boolean
:y: boolean
"""
if z and self.H is not None:
return np.transpose(self.H @ x.T)
elif y and self.C is not None:
return np.transpose(self.C @ y.T)
else:
raise RuntimeError('Need to set output or meas. model')
def get_state_dim(self):
return self.state_dim
def get_input_dim(self):
return self.input_dim
def get_output_dim(self):
return self.output_dim
def get_meas_dim(self):
return self.meas_dim
def get_rom_info(self):
return self.tpwl_dict['rom_info']
def TPWL2LinearROM(tpwl_loc, save_loc):
"""
Generates linearized ROM from existing TPWL ROM by taking the first linearization point.
:param tpwl_loc: absolute path to location of tpwl model pickle file
:param save_loc: absolutel path to location to save linear rom pickle file
"""
tpwl_data = scutils.load_data(tpwl_loc)
linrom_data = dict()
linrom_data['A_c'] = tpwl_data['A_c'][0]
linrom_data['B_c'] = tpwl_data['B_c'][0]
linrom_data['d_c'] = tpwl_data['d_c'][0]
linrom_data['rom_info'] = tpwl_data['rom_info']
scutils.save_data(save_loc, linrom_data)
| mdubied/soft-robot-control | sofacontrol/baselines/rompc/rompc_utils.py | rompc_utils.py | py | 4,166 | python | en | code | 1 | github-code | 36 |
75162395624 | import os
class SwitchGenerator:
_path = "src"
_default_spec = {
#letters_range : max_depth
(2, 2) : 20,
(3, 3) : 10,
(4, 4) : 6,
(5, 6) : 6,
(7, 9) : 5,
(10, 16) : 4,
(17, 25) : 2,
#(41, 128) : 2
}
indent_str = " "
endln = "\n"
template_fn_macro = "TemplatedFn"
@property
def default_spec(self):
return dict(
(i, d) for (mina, maxa), d in self._default_spec.items()
for i in range(mina, maxa+1)
)
def __init__(self, spec=None, types=None, path=None):
self.spec = spec or self.default_spec
self.types = types or ["DPReal", "SPReal"]
self.indentation = 0
self._file = None
self.path = path or self._path
def _write_file(self):
self.enter_switch("width")
for k, v in self.spec.items():
self.write_case(k)
self.write_depth_switch(k, v)
self.write_width_default()
self.exit_switch()
def write_depth_switch(self, w, max_depth):
self.enter_switch("depth")
for d in range(2, max_depth+1):
self.write_case(d)
self.write_templatefn(w, d, None)
#self.write_type_switch(w, d)
self.write_depth_default(w, max_depth)
self.exit_switch()
self.write_break()
def write_type_switch(self, w, d):
self.enter_switch("coeff")
for typ in self.types:
self.write_case(typ)
self.write_templatefn(w, d, typ)
self.write_type_default()
self.exit_switch()
def write_file(self):
path = os.path.join(self.path, "switch.h")
with open(path, "wt", encoding="UTF-8") as f:
self._file = f
self._write_file()
self._file = None
# self.write_config_bounds_header()
def enter_switch(self, var):
self.writeln("switch ({}) {{".format(var))
self.indentation += 1
def exit_switch(self):
self.indentation -= 1
self.writeln("}")
def write_case(self, n):
self.writeln("case {} :".format(n))
def write_templatefn(self, w, d, dtype):
text = "return {}({}, {});".format(
self.template_fn_macro, w, d,# dtype
)
self.writeln(text)
self.write_break()
def write_break(self):
self.writeln("break;" + self.endln)
def write_depth_default(self, w, d):
self.writeln("default :")
text = "Legitimate depth of 2<->{} for records with width {} exceeds limit"
self.writeln(
"throw std::runtime_error ( \"{}\" );".format(
text.format(d, w)
)
)
def write_type_default(self):
self.writeln("default :")
text = "This type is not supported."
self.writeln("throw std::runtime_error ( \"{}\" );".format(text))
def write_width_default(self):
self.writeln("default :")
text = "Legitimate width 2 <-> 256 exceeded"
self.writeln(
"throw std::runtime_error ( \"{}\" );".format(text)
)
def writeln(self, val):
assert self._file
self._file.write(
self.indent_str*self.indentation + val + self.endln
)
def write_config_bounds_header(self):
path = os.path.join(self.path, "config_bounds.h")
with open(path, "wt", encoding="UTF-8") as f:
self._file = f
self._write_config_bounds_header()
self.write_checker_switch_function()
self.write_get_bounds_switch_function()
self._file = None
def _write_config_bounds_header(self):
self.start_internal_namespace()
self.write_struct()
for depth in self.spec:
self.write_struct(depth)
self.end_internal_namespace()
def start_internal_namespace(self):
self.writeln("namespace {")
self.indentation += 1
def end_internal_namespace(self):
self.indentation -= 1
self.writeln("}")
self.writeln(self.endln)
def write_struct(self, width=None):
self.write_struct_head(width)
self.write_struct_values(width)
self.write_struct_end()
def write_struct_head(self, width):
template_str = "" if width else "DEG W"
template_args = "<{width}>".format(width=width) if width else ""
self.writeln("template <{template_str}>".format(template_str=template_str))
self.writeln("struct config_check{template_args}".format(template_args=template_args))
self.writeln("{")
self.indentation += 1
def write_struct_values(self, width):
if not width:
self.writeln("")
return None
min_depth = 2
max_depth = self.spec[width]
self.writeln("static const DEG min_depth = {min_depth};".format(min_depth=min_depth))
self.writeln("static const DEG max_depth = {max_depth};".format(max_depth=max_depth))
def write_struct_end(self):
self.indentation -= 1
self.writeln("};")
self.writeln(self.endln)
def write_checker_switch_function(self):
self.writeln("inline bool check_depth_config(DEG width, DEG depth)")
self.writeln("{")
self.indentation += 1
self.writeln("DEG min_depth, max_depth;")
self.enter_switch("width")
for width in self.spec:
typename = "config_check<{width}>".format(width=width)
self.write_case(width)
self.writeln("min_depth = {typename}::min_depth;".format(typename=typename))
self.writeln("max_depth = {typename}::max_depth;".format(typename=typename))
self.write_break()
self.writeln("default:")
self.writeln("return false;")
self.exit_switch()
self.writeln("return (depth <= max_depth && depth >= min_depth);")
self.indentation -= 1
self.writeln("}")
self.writeln(self.endln)
def write_get_bounds_switch_function(self):
self.writeln("inline std::pair<DEG, DEG> get_depth_bounds_config(DEG width, DEG depth)")
self.writeln("{")
self.indentation += 1
self.writeln("DEG min_depth, max_depth;")
self.enter_switch("width")
for width in self.spec:
typename = "config_check<{width}>".format(width=width)
self.write_case(width)
self.writeln("min_depth = {typename}::min_depth;".format(typename=typename))
self.writeln("max_depth = {typename}::max_depth;".format(typename=typename))
self.write_break()
self.writeln("default:")
self.writeln("return std::make_pair(0, 0);")
self.exit_switch()
self.writeln("return std::make_pair(min_depth, max_depth);")
self.indentation -= 1
self.writeln("}")
self.writeln(self.endln)
if __name__ == "__main__":
import sys
g = SwitchGenerator(path=sys.argv[1])
g.write_file()
| datasig-ac-uk/esig | tools/switch_generator.py | switch_generator.py | py | 7,018 | python | en | code | 37 | github-code | 36 |
21662366530 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2020/4/5 21:14
# @Author: qyh
import matplotlib.pyplot as plt
import numpy.random as rdm
import networkx as nx
node_num = 100
probability = 0.01
er_graph = nx.erdos_renyi_graph(node_num, probability)
susceptible = 'S'
infected = 'I'
recovered = 'R'
# Init
def onset(graph):
for i in graph.nodes.keys():
graph.nodes[i]['state'] = susceptible
# Set infection rate
def infect_prop(graph, proportion):
for i in graph.nodes.keys():
if rdm.random() <= proportion:
graph.nodes[i]['state'] = infected
# Model building
def build_model(p_infect, p_recover):
def model(graph, i):
if graph.nodes[i]['state'] == infected:
for m in graph.neighbors(i):
if graph.nodes[m]['state'] == susceptible:
if rdm.random() <= p_infect:
graph.nodes[m]['state'] = infected
if rdm.random() <= p_recover:
graph.nodes[i]['state'] = recovered
return model
# Single model run
def model_run(graph, model):
for i in graph.nodes.keys():
model(graph, i)
# Multiple model cycles
def model_iter(graph, model, iter_num):
for i in range(iter_num):
model_run(graph, model)
def draw(graph):
fig, ax = plt.subplots(figsize=(12, 10))
ax.set_xticks([])
ax.set_xticks([])
pos = nx.spring_layout(graph, k=0.2)
nx.draw_networkx_edges(graph, pos, alpha=0.5, width=1)
nx.draw_networkx_nodes(graph, pos, node_size=80)
plt.show()
def calc_infection_rate(graph):
onset(graph)
infect_prop(graph, 0.05)
model = build_model(0.2, 0.8)
model_iter(graph, model, 10)
infect = [v for (v, attr) in graph.nodes(data=True) if attr['state'] == recovered]
infection_rate = len(infect) / node_num
print(infection_rate)
if __name__ == '__main__':
draw(er_graph)
calc_infection_rate(er_graph)
| QCloudHao/COVID19 | development.py | development.py | py | 2,014 | python | en | code | 0 | github-code | 36 |
4063170648 | from tkinter import *
window = Tk()
window.title("DOW")
Label(window, text="", width=1).grid(row=0, column=0)
Label(window, text=" Company:").grid(row=0, column=3, sticky=W)
Label(window, text=" Industry:").grid(row=3, column=3, sticky=W)
Label(window, text="Exchange:").grid(row=6, column=4, sticky=E)
Label(window, text="Growth in 2013:").grid(row=7, column=4, sticky=E)
Label(window, text="Price/Earnings ratio:").grid(row=8, column=4, sticky=E)
yscroll = Scrollbar(window, orient=VERTICAL)
yscroll.grid(row=0, column=2, rowspan=9,pady=5, sticky=NS)
lstSymbols = Listbox(window, width=5, yscrollcommand=yscroll.set)
lstSymbols.grid(row=0, column=1, rowspan=9, pady=5, sticky=E)
lstSymbols.bind("<<ListboxSelect>>")
entCompany = Entry(window, state="readonly", width=30)
entCompany.grid(row=1, column=3, columnspan=2, padx=5, sticky=W)
entIndustry = Entry(window, state="readonly", width=30)
entIndustry.grid(row=4, column=3, columnspan=2, padx=5, sticky=W)
entExchange = Entry(window, width=8, state="readonly")
entExchange.grid(row=6, column=5, padx=5, sticky=W)
entGrowth = Entry(window, width=8, state="readonly")
entGrowth.grid(row=7, column=5, padx=5, sticky=W)
entPE = Entry(window, width=8, state="readonly")
entPE.grid(row=8, column=5, padx=5, sticky=W)
yscroll["command"] = lstSymbols.yview
window.mainloop()
| guoweifeng216/python | python_design/pythonprogram_design/Ch8/8-2-E22.py | 8-2-E22.py | py | 1,340 | python | en | code | 0 | github-code | 36 |
22450428766 | """
Team 46
Haoyue Xie 1003068 @Melbourne
Jiayu Li 713551 @Melbourne
Ruqi Li 1008342 @Melbourne
Yi Zhang 1032768 @Melbourne
Zimeng Jia 978322 @Hebei, China
"""
import json
path = "E:/Unimelb/2020semester1/COPM90024 Cluster and Cloud Computing/assignment2/code/"
filename = path + 'SA4_2016_AUST.json'
with open(filename, 'r') as f:
jsonfile = json.load(f) #jsonfile is a dict
print("---------------reading json file done--------------------------------------")
print(jsonfile.keys()) #dict_keys(['type', 'features'])
features = jsonfile['features']
'''
print("type of features:",type(features)) #features is a list
print(len(features))
print("type of features[0]:",type(features[0])) #each feature is a dict
feature = features[0]
print("keys of feature:",feature.keys()) #dict_keys(['type', 'geometry', 'properties'])
properities = feature['properties']
geometry = feature['geometry']
print("type of properities:",type(properities))
print("keys of properities:",properities.keys()) #dict_keys(['SA4_CODE', 'SA4_CODE16', 'SA4_NAME', 'STATE_CODE', 'STATE_NAME', 'AREA_SQKM'])
print("type of geometry:",type(geometry)) #<class 'dict'>
print("keys of geometry:",geometry.keys()) #dict_keys(['type', 'coordinates'])
coordinates = geometry['coordinates']
print("type of coordinates",type(coordinates)) #<class 'list'>
print(len(coordinates)) # len=1
'''
coordinates_Melbourne = []
coordinates_Sydney = []
coordinates_Brisbane = []
coordinates_GoldCoast = []
coordinates_Adelaide = []
coordinates_Perth = []
coordinates_Canberra = []
coordinates_ACT = []
coordinates_NSW = []
coordinates_NT = []
coordinates_QLD = []
coordinates_SA = []
coordinates_TAS = []
coordinates_VIC = []
coordinates_WA = []
area_sqkm_Sydney = 0
area_sqkm_Melbourne = 0
area_sqkm_Brisbane = 0
area_sqkm_GoldCoast = 0
area_sqkm_Adelaide = 0
area_sqkm_Perth = 0
area_sqkm_Canberra = 0
area_sqkm_NT = 0
area_sqkm_WA = 0
area_sqkm_TAS = 0
area_sqkm_SA = 0
area_sqkm_QLD = 0
area_sqkm_VIC = 0
area_sqkm_NSW = 0
area_sqkm_ACT = 0
for feature in features:
properities = feature['properties']
sa4_code16 = properities['SA4_CODE16']
print("+++++++++++++++"+sa4_code16+"++++++++++++")
geometry = feature['geometry'] #geometry has two keys: "tpye" and "coordinates"
print(properities)
if(int(sa4_code16)>=115 and int(sa4_code16)<=128): #-------------------------------merge for Sydney---------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
#print(properities['SA4_NAME'],geometry['type'],len(coordinates))
if (geometry["type"]=="Polygon"):
coordinates_Sydney.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Sydney.append(coordinate)
area_sqkm_Sydney += properities['AREA_SQKM']
elif(int(sa4_code16)>=206 and int(sa4_code16)<=213): #-----------------------------merge for Melbourne-----------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Melbourne.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Melbourne.append(coordinate)
area_sqkm_Melbourne += properities['AREA_SQKM']
elif(int(sa4_code16)>=401 and int(sa4_code16)<=404): #-----------------------------merge for Adelaide-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Adelaide.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Adelaide.append(coordinate)
area_sqkm_Adelaide += properities['AREA_SQKM']
elif(int(sa4_code16)>=301 and int(sa4_code16)<=305): #-----------------------------merge for Brisbane-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Brisbane.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Brisbane.append(coordinate)
area_sqkm_Brisbane += properities['AREA_SQKM']
elif(int(sa4_code16)==801): #------------------------------------------------------merge for Canberra-------------------------------------
if(geometry !=None): #some coordinates are None
geometry_Canberra = geometry #Canberraonly have one region
#coordinates = geometry['coordinates']
#if (geometry["type"]=="Polygon"):
# coordinates_Canberra.append(coordinates)
#else:
# for coordinate in coordinates:
# coordinates_Canberra.append(coordinate)
area_sqkm_Canberra += properities['AREA_SQKM']
elif(int(sa4_code16)==309): #------------------------------------------------------merge for Gold Coast-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_GoldCoast.append(coordinates)
else:
for coordinate in coordinates:
coordinates_GoldCoast.append(coordinate)
area_sqkm_GoldCoast += properities['AREA_SQKM']
elif(int(sa4_code16)>=503 and int(sa4_code16)<=507): #-----------------------------merge for Perth-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Perth.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Perth.append(coordinate)
area_sqkm_Perth += properities['AREA_SQKM']
else: #other regions for each state
if(geometry !=None):
if(properities['STATE_CODE']=="8"): #---------------------------------------merge for ACT
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_ACT.append(coordinates)
else:
for coordinate in coordinates:
coordinates_ACT.append(coordinate)
area_sqkm_ACT += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="1"): #---------------------------------------merge for NSW
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_NSW.append(coordinates)
else:
for coordinate in coordinates:
coordinates_NSW.append(coordinate)
area_sqkm_NSW += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="7"): #---------------------------------------merge for NT
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_NT.append(coordinates)
else:
for coordinate in coordinates:
coordinates_NT.append(coordinate)
area_sqkm_NT += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="3"): #---------------------------------------merge for QLD
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_QLD.append(coordinates)
else:
for coordinate in coordinates:
coordinates_QLD.append(coordinate)
area_sqkm_QLD += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="4"): #---------------------------------------merge for SA
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_SA.append(coordinates)
else:
for coordinate in coordinates:
coordinates_SA.append(coordinate)
area_sqkm_SA += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="6"): #---------------------------------------merge for TAS
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_TAS.append(coordinates)
else:
for coordinate in coordinates:
coordinates_TAS.append(coordinate)
area_sqkm_TAS += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="2"): #---------------------------------------merge for VIC
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_VIC.append(coordinates)
else:
for coordinate in coordinates:
coordinates_VIC.append(coordinate)
area_sqkm_VIC += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="5"): #---------------------------------------merge for WA
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_WA.append(coordinates)
else:
for coordinate in coordinates:
coordinates_WA.append(coordinate)
area_sqkm_WA += properities['AREA_SQKM']
#create a new ragion code
#===============================properties,geometry, feature of each region=========================
#----------------------Melbourne--------------------------
properties_Melbbourne = {}
properties_Melbbourne["REGION_CODE"]="01"
properties_Melbbourne["AREA_SQKM"] = area_sqkm_Melbourne
properties_Melbbourne["STATE_CODE"]="2"
properties_Melbbourne["STATE_NAME"]="VIC"
properties_Melbbourne["CITY_NAME"]="Melbourne"
geometry_Melbourne = {}
geometry_Melbourne["type"] = "MultiPolygon"
geometry_Melbourne["coordinates"] = coordinates_Melbourne
feature_Melbbourne = {}
feature_Melbbourne["type"]="Feature"
feature_Melbbourne["geometry"] = geometry_Melbourne
feature_Melbbourne["properties"] = properties_Melbbourne
#----------------------Sydney-------------------------------
properties_Sydney = {}
properties_Sydney["REGION_CODE"]="02"
properties_Sydney["AREA_SQKM"] = area_sqkm_Sydney
properties_Sydney["STATE_CODE"]="1"
properties_Sydney["STATE_NAME"]="NSW"
properties_Sydney["CITY_NAME"]="Sydney"
geometry_Sydney = {}
geometry_Sydney["type"] = "MultiPolygon"
geometry_Sydney["coordinates"] = coordinates_Sydney
feature_Sydney = {}
feature_Sydney["type"]="Feature"
feature_Sydney["geometry"] = geometry_Sydney
feature_Sydney["properties"] = properties_Sydney
#----------------------Brisbane-------------------------------
properties_Brisbane = {}
#properties_Brisbane["type"]="Feature"
properties_Brisbane["REGION_CODE"]="03"
properties_Brisbane["AREA_SQKM"] = area_sqkm_Brisbane
properties_Brisbane["STATE_CODE"]="3"
properties_Brisbane["STATE_NAME"]="QLD"
properties_Brisbane["CITY_NAME"]="Brisbane"
geometry_Brisbane = {}
geometry_Brisbane["type"] = "MultiPolygon"
geometry_Brisbane["coordinates"] = coordinates_Brisbane
feature_Brisbane = {}
feature_Brisbane["type"]="Feature"
feature_Brisbane["geometry"] = geometry_Brisbane
feature_Brisbane["properties"] = properties_Brisbane
#----------------------GoldCoast-------------------------------
properties_GoldCoast = {}
#properties_GoldCoast["type"]="Feature"
properties_GoldCoast["REGION_CODE"]="04"
properties_GoldCoast["AREA_SQKM"] = area_sqkm_GoldCoast
properties_GoldCoast["STATE_CODE"]="3"
properties_GoldCoast["STATE_NAME"]="QLD"
properties_GoldCoast["CITY_NAME"]="Gold Coast"
geometry_GoldCoast = {}
geometry_GoldCoast["type"] = "MultiPolygon"
geometry_GoldCoast["coordinates"] = coordinates_GoldCoast
feature_GoldCoast = {}
feature_GoldCoast["type"]="Feature"
feature_GoldCoast["geometry"] = geometry_GoldCoast
feature_GoldCoast["properties"] = properties_GoldCoast
#----------------------Adelaide-------------------------------
properties_Adelaide = {}
#properties_Adelaide["type"]="Feature"
properties_Adelaide["REGION_CODE"]="05"
properties_Adelaide["AREA_SQKM"] = area_sqkm_Adelaide
properties_Adelaide["STATE_CODE"]="4"
properties_Adelaide["STATE_NAME"]="SA"
properties_Adelaide["CITY_NAME"]="Adelaide"
geometry_Adelaide = {}
geometry_Adelaide["type"] = "MultiPolygon"
geometry_Adelaide["coordinates"] = coordinates_Adelaide
feature_Adelaide = {}
feature_Adelaide["type"]="Feature"
feature_Adelaide["geometry"] = geometry_Adelaide
feature_Adelaide["properties"] = properties_Adelaide
#----------------------Perth-------------------------------
properties_Perth = {}
#properties_Perth["type"]="Feature"
properties_Perth["REGION_CODE"]="06"
properties_Perth["AREA_SQKM"] = area_sqkm_Perth
properties_Perth["STATE_CODE"]="5"
properties_Perth["STATE_NAME"]="WA"
properties_Perth["CITY_NAME"]="Perth"
geometry_Perth = {}
geometry_Perth["type"] = "MultiPolygon"
geometry_Perth["coordinates"] = coordinates_Perth
feature_Perth = {}
feature_Perth["type"]="Feature"
feature_Perth["geometry"] = geometry_Perth
feature_Perth["properties"] = properties_Perth
#----------------------Canberra-------------------------------
properties_Canberra = {}
#properties_Canberra["type"]="Feature"
properties_Canberra["REGION_CODE"]="07"
properties_Canberra["AREA_SQKM"] = area_sqkm_Canberra
properties_Canberra["STATE_CODE"]="8"
properties_Canberra["CITY_NAME"]="Canberra"
#geometry_Canberra = {}
#geometry_Canberra["type"] = "Polygon"
#geometry_Canberra["coordinates"] = coordinates_Canberra
feature_Canberra = {}
feature_Canberra["type"]="Feature"
feature_Canberra["geometry"] = geometry_Canberra
feature_Canberra["properties"] = properties_Canberra
#----------------------ACT-------------------------------
properties_ACT = {}
#properties_ACT["type"]="Feature"
properties_ACT["REGION_CODE"]="08"
properties_ACT["AREA_SQKM"] = area_sqkm_ACT
properties_ACT["STATE_CODE"]="8"
properties_ACT["STATE_NAME"]="ACT"
properties_ACT["CITY_NAME"]="Australian Capital Territory Other Regions"
geometry_ACT = {}
#geometry_ACT["type"] = "Polygon"
#geometry_ACT["coordinates"] = coordinates_ACT
feature_ACT = {}
feature_ACT["type"]="Feature"
feature_ACT["geometry"] = geometry_ACT
feature_ACT["properties"] = properties_ACT
#----------------------NSW-------------------------------
properties_NSW = {}
#properties_NSW["type"]="Feature"
properties_NSW["REGION_CODE"]="09"
properties_NSW["AREA_SQKM"] = area_sqkm_NSW
properties_NSW["STATE_CODE"]="1"
properties_NSW["STATE_NAME"]="NSW"
properties_NSW["CITY_NAME"]="New South Wales Other Regions"
geometry_NSW = {}
geometry_NSW["type"] = "MultiPolygon"
geometry_NSW["coordinates"] = coordinates_NSW
feature_NSW = {}
feature_NSW["type"]="Feature"
feature_NSW["geometry"] = geometry_NSW
feature_NSW["properties"] = properties_NSW
#----------------------VIC-------------------------------
properties_VIC = {}
#properties_VIC["type"]="Feature"
properties_VIC["REGION_CODE"]="10"
properties_VIC["AREA_SQKM"] = area_sqkm_VIC
properties_VIC["STATE_CODE"]="2"
properties_VIC["STATE_NAME"]="VIC"
properties_VIC["CITY_NAME"]="Victoria Other Regions"
geometry_VIC = {}
geometry_VIC["type"] = "MultiPolygon"
geometry_VIC["coordinates"] = coordinates_VIC
feature_VIC = {}
feature_VIC["type"]="Feature"
feature_VIC["geometry"] = geometry_VIC
feature_VIC["properties"] = properties_VIC
#----------------------QLD-------------------------------
properties_QLD = {}
#properties_QLD["type"]="Feature"
properties_QLD["REGION_CODE"]="11"
properties_QLD["AREA_SQKM"] = area_sqkm_QLD
properties_QLD["STATE_CODE"]="3"
properties_QLD["STATE_NAME"]="QLD"
properties_QLD["CITY_NAME"]="Queensland Other Regions"
geometry_QLD = {}
geometry_QLD["type"] = "MultiPolygon"
geometry_QLD["coordinates"] = coordinates_QLD
feature_QLD = {}
feature_QLD["type"]="Feature"
feature_QLD["geometry"] = geometry_QLD
feature_QLD["properties"] = properties_QLD
#----------------------SA-------------------------------
properties_SA = {}
#properties_SA["type"]="Feature"
properties_SA["REGION_CODE"]="12"
properties_SA["AREA_SQKM"] = area_sqkm_SA
properties_SA["STATE_CODE"]="4"
properties_SA["STATE_NAME"]="SA"
properties_SA["CITY_NAME"]="South Australia Other Regions"
geometry_SA = {}
geometry_SA["type"] = "MultiPolygon"
geometry_SA["coordinates"] = coordinates_SA
feature_SA = {}
feature_SA["type"]="Feature"
feature_SA["geometry"] = geometry_SA
feature_SA["properties"] = properties_SA
#----------------------TAS-------------------------------
properties_TAS = {}
#properties_TAS["type"]="Feature"
properties_TAS["REGION_CODE"]="13"
properties_TAS["AREA_SQKM"] = area_sqkm_TAS
properties_TAS["STATE_CODE"]="6"
properties_TAS["STATE_NAME"]="TAS"
properties_TAS["CITY_NAME"]="Tasmania Other Regions"
geometry_TAS = {}
geometry_TAS["type"] = "MultiPolygon"
geometry_TAS["coordinates"] = coordinates_TAS
feature_TAS = {}
feature_TAS["type"]="Feature"
feature_TAS["geometry"] = geometry_TAS
feature_TAS["properties"] = properties_TAS
#----------------------WA-------------------------------
properties_WA = {}
#properties_WA["type"]="Feature"
properties_WA["REGION_CODE"]="14"
properties_WA["AREA_SQKM"] = area_sqkm_WA
properties_WA["STATE_CODE"]="5"
properties_WA["STATE_NAME"]="WA"
properties_WA["CITY_NAME"]="Western Australia Other Regions"
geometry_WA = {}
geometry_WA["type"] = "MultiPolygon"
geometry_WA["coordinates"] = coordinates_WA
feature_WA = {}
feature_WA["type"]="Feature"
feature_WA["geometry"] = geometry_WA
feature_WA["properties"] = properties_WA
#----------------------NT-------------------------------
properties_NT = {}
#properties_NT["type"]="Feature"
properties_NT["REGION_CODE"]="15"
properties_NT["AREA_SQKM"] = area_sqkm_NT
properties_NT["STATE_CODE"]="7"
properties_NT["STATE_NAME"]="NT"
properties_NT["CITY_NAME"]="Northern Territory Other Regions"
geometry_NT = {}
geometry_NT["type"] = "MultiPolygon"
geometry_NT["coordinates"] = coordinates_NT
feature_NT = {}
feature_NT["type"]="Feature"
feature_NT["geometry"] = geometry_NT
feature_NT["properties"] = properties_NT
#=============================Add feature into features and output====================================
#new_features = [feature_Adelaide,feature_Brisbane,feature_Canberra,feature_GoldCoast,feature_Melbbourne,feature_Perth,feature_Sydney,\
#feature_NSW,feature_NT,feature_QLD,feature_SA,feature_TAS,feature_VIC,feature_WA]
#feature_ACT is empty
new_features = [feature_WA,feature_Perth,feature_Adelaide,feature_Brisbane,feature_Canberra,feature_GoldCoast,feature_Melbbourne,feature_Sydney,\
feature_NSW,feature_NT,feature_QLD,feature_SA,feature_TAS,feature_VIC,feature_WA]
#print("************************")
#print(feature_ACT["properties"])
#print(feature_ACT["geometry"])
newjsonfile = {}
newjsonfile["type"] = "FeatureCollection"
newjsonfile["features"] = new_features
#print(newjsonfile.keys())
#print(newjsonfile["features"][0].keys())
#print(newjsonfile["features"][0]['properties'])
#print(newjsonfile["features"][0]['geometry'])
#print("newjsonfile done!")
#json_str = json.dumps(newjsonfile)
#print("create json done!")
outputfilename = path + "City_geojson.json"
with open(outputfilename, 'w') as json_file:
#json_file.write(json_str)
for chunk in json.JSONEncoder().iterencode(newjsonfile):
json_file.write(chunk)
print("All Done!") | yzzhan4/COMP90024-AuzLife | Create City_GeoJSON file/coordinates for cities.py | coordinates for cities.py | py | 19,866 | python | en | code | 0 | github-code | 36 |
10586396443 | class Option:
def __init__(self, symbols, value):
self.beats = None
self.symbols = symbols
self.value = value
def score(self, other):
if other == self:
return self.value + 3
if self.beats == other:
return self.value + 6
if other.beats == self:
return self.value
def find(self, symbol):
return symbol in self.symbols
with open('day-2/part1-input.txt', 'r') as source:
rock = Option({'A', 'X'}, 1)
paper = Option({'B', 'Y'}, 2)
scissors = Option({'C', 'Z'}, 3)
rock.beats = scissors
paper.beats = rock
scissors.beats = paper
score = 0
for line in source.read().splitlines():
[theirs, mine] = line.split()
their_symbol = next(x for x in [rock, paper, scissors] if x.find(theirs))
my_symbol = next(x for x in [rock, paper, scissors] if x.find(mine))
score = score + my_symbol.score(their_symbol)
print(score)
| chiptopher/advent-of-code-2022 | day-2/part1.py | part1.py | py | 986 | python | en | code | 0 | github-code | 36 |
19112292097 |
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class DeepQNetwork(nn.Module):
def __init__(self, lr, input_dims, fc1_dims, fc2_dims, n_actions):
super(DeepQNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.fc3 = nn.Linear(self.fc2_dims, self.n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.MSELoss()
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
actions = self.fc3(x) #this does return estimates of PV(Q-value)
return actions
class Agent():
def __init__(self, gamma, epsilon, lr, input_dims, batch_size, n_actions, epsilon_min):
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = 0.99
self.lr = lr
self.input_dims = input_dims
self.n_actions = n_actions
self.batch_size = batch_size
self.mem_size = 10000
self.epsilon_min = epsilon_min
self.action_space = [i for i in range(self.n_actions)]
self.mem_cntr = 0
self.Q_eval = DeepQNetwork(self.lr, n_actions=self.n_actions, input_dims=self.input_dims, fc1_dims=256, fc2_dims=256)
self.state_memory = np.zeros((self.mem_size, *input_dims), dtype=np.float32)
self.new_state_memory = np.zeros((self.mem_size, *input_dims), dtype=np.float32)
self.action_memory = np.zeros(self.mem_size, dtype=np.int32)
self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)
self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool)
def store_transitions(self, state, action, reward, new_state, done):
# This is the memory function to create samples to learn from
index = self.mem_cntr % self.mem_size #starts at 0 when reaching mem_size
self.state_memory[index] = state
self.new_state_memory[index] = new_state
self.reward_memory[index] = reward
self.action_memory[index] = action
self.terminal_memory[index] = done
self.mem_cntr += 1
def act(self, observation):
print(observation)
print(observation.shape)
print(type(observation[0]))
if np.random.random() > self.epsilon:
state = T.tensor([observation]).to(self.Q_eval.device)
actions = self.Q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.mem_cntr < self.batch_size: # skip learning till enough samples
return
self.Q_eval.optimizer.zero_grad()
max_mem = min(self.mem_cntr, self.mem_size)
batch = np.random.choice(max_mem, self.batch_size, replace=False)
batch_index = np.arange(self.batch_size, dtype=np.int32)
state_batch = T.tensor(self.state_memory[batch]).to(self.Q_eval.device)
new_state_batch = T.tensor(self.new_state_memory[batch]).to(self.Q_eval.device)
reward_batch = T.tensor(self.reward_memory[batch]).to(self.Q_eval.device)
terminal_batch = T.tensor(self.terminal_memory[batch]).to(self.Q_eval.device)
action_batch = self.action_memory[batch]
q_eval = self.Q_eval.forward(state_batch)[batch_index, action_batch] # values for actions that were took
#print(self.Q_eval.forward(state_batch))
#print(batch_index)
#print(action_batch)
#print(q_eval)
q_next = self.Q_eval.forward(new_state_batch) # value of new state (TODO TARGET NETWORK HERE)
#print(q_next)
q_next[terminal_batch] = 0.0
#print(q_next)
#print(reward_batch)
q_target = reward_batch +self.gamma * T.max(q_next, dim=1)[0]
#print(q_target)
loss = self.Q_eval.loss(q_target, q_eval).to(self.Q_eval.device)
loss.backward()
self.Q_eval.optimizer.step()
self.epsilon = max(self.epsilon*self.epsilon_decay, self.epsilon_min)
import gym
if __name__ == '__main__':
env = gym.make('LunarLander-v2')
agent = Agent(gamma=0.99, epsilon=1.0, batch_size=3, n_actions=4, epsilon_min=0.01, input_dims=[8], lr=0.003)
scores = []
eps_history = []
episodes = 500
for episode in range(episodes):
score = 0
done = False
cur_state = env.reset()
while not done:
print(cur_state)
action = agent.act(cur_state)
new_state, reward, done, info = env.step(action)
score += reward
agent.store_transitions(cur_state, action, reward, new_state, done)
agent.learn()
cur_state = new_state
scores.append(score)
eps_history.append(agent.epsilon)
avg_score = np.mean(scores[-100:])
print("Epsiode", episode, "Score %.2f" % score, "Average Score %.2f" % avg_score, "Epsilon %.2f" %agent.epsilon)
| miczed/learny-mc-learnface | DQN/Examples/DQN_Lunar_MLwithPhil.py | DQN_Lunar_MLwithPhil.py | py | 5,392 | python | en | code | 0 | github-code | 36 |
6671044786 | from sense_hat import SenseHat
from PIL import Image
from random import randint
import time
sense = SenseHat()
amount_of_pics = 3
while True:
pic_nr = str(randint(1, amount_of_pics))
img = Image.open('pic'+pic_nr+'.png')
byteList = list(img.getdata())
pixels = []
for index, byte in enumerate(byteList):
pixels.append([byte[0], byte[1], byte[2]])
sense.set_pixels(pixels)
time.sleep(1)
| gdmgent-IoT-1920/labo-2-sensehat-hansvertriest | avatar_animated.py | avatar_animated.py | py | 425 | python | en | code | 0 | github-code | 36 |
73313388265 | # %%
import csv
import sys
import numpy as np
import pandas as pd
import os
from matplotlib import pyplot as plt
# plt.rcParams['font.sans-serif'] = ['Times New Roman']
# ๆพๅฐๅ
จๅฑๆไผ 10 20 41
# ไธๆญๆพๅฐๆไผ 15 34
filename1 = "results/Cifar10-center/LeNet/record_sgd weight.csv"
filename2 = "results/Cifar10-center/LeNet/record_spf weight.csv"
filename3 = "painting/csv/freezing_weight.csv"
colors = ['#FD6D5A', '#FEB40B', '#6DC354', '#994487', '#518CD8', '#443295']
df1 = pd.read_csv(filename1)
df2 = pd.read_csv(filename3)
# 33
# %%
# s = list(range(40, 45))
s = [41]
df1_v = df1.iloc[:35000, s].values.T
df2_v = df2.iloc[:, s].values.T
for i in range(len(s)):
plt.clf()
plt.title(f"weight changed of {s[i]}")
# plt.plot(df1_v[i], linewidth=0.5, color=colors[0], label="sgd")
plt.plot(df2_v[i], linewidth=0.5, color=colors[3], label="spf")
plt.legend()
plt.show()
# %% ไฟๅญๅพ็
plt.clf()
plt.figure(figsize=(6, 4))
# plt.title(f"Weight Changed of Parameter in SPF")
plt.xticks(size=20)
plt.yticks(size=20)
plt.xlabel("Time", fontsize=25)
plt.ylabel("Value", fontsize=25)
plt.plot(df2_v[1][:2150], linewidth=0.5, color='r', label="spf")
# plt.savefig(f"./painting/pic/weight_change_spf_41.svg", bbox_inches='tight')
plt.show()
| zhengLabs/FedLSC | painting/painting_weight_change.py | painting_weight_change.py | py | 1,279 | python | en | code | 1 | github-code | 36 |
18482251122 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, 1, stride, bias=False)
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(inplanes, planes, 1, stride, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes))
self.isdownsample = True if inplanes != planes else False
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.isdownsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Decoder(nn.Module):
def __init__(self, backbone):
super(Decoder, self).__init__()
if backbone == 'resnet':
channel = [0, 256, 512, 1024, 256]
else:
raise NotImplementedError
self.layer1 = nn.Sequential(nn.Conv2d(channel[1], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer2 = nn.Sequential(nn.Conv2d(channel[2], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer3 = nn.Sequential(nn.Conv2d(channel[3], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
plances = [256, 256, 256, 256]
self.last_conv1 = BasicBlock(256, plances[0])
self.last_conv2 = BasicBlock(256, plances[1])
self.last_conv3 = BasicBlock(256, plances[2])
self.last_conv4 = BasicBlock(256, plances[3])
self.plances = plances
self._init_weight()
def forward(self, layer1_feat, layer2_feat, layer3_feat, layer4_feat):
x = layer4_feat
dssdlayer4 = self.last_conv4(x)
y = self.layer3(layer3_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer3 = self.last_conv3(x)
y = self.layer2(layer2_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer2 = self.last_conv2(x)
y = self.layer1(layer1_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer1 = self.last_conv1(x)
return dssdlayer1, dssdlayer2, dssdlayer3, dssdlayer4
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_decoder(backbone):
return Decoder(backbone)
if __name__ == "__main__":
model = build_decoder('resnet')
layer4_feat = torch.rand((1, 256, 16, 16))
layer3_feat = torch.rand((1, 1024, 16, 16))
layer2_feat = torch.rand((1, 512, 32, 32))
layer1_feat = torch.rand((1, 256, 64, 64))
output = model(layer1_feat, layer2_feat, layer3_feat, layer4_feat)
pass | TWSFar/DSSD | model/decoder.py | decoder.py | py | 3,824 | python | en | code | 8 | github-code | 36 |
27912151130 | import boto3
import pg8000
import datetime
import json
import time
# Create an S3 client
s3 = boto3.client('s3')
# Create a CloudWatch client
cloudwatch = boto3.client('cloudwatch')
def ingest_database_to_s3(bucket_name):
# Retrieve the database connection details from AWS Secrets Manager
secretsmanager = boto3.client("secretsmanager")
secret_value_response = secretsmanager.get_secret_value(SecretId="db-creds-source") #Change this to match
secret_dict = json.loads(secret_value_response["SecretString"])
host = secret_dict["host"]
port = secret_dict["port"]
user = secret_dict["username"]
password = secret_dict["password"]
database = secret_dict["database"]
# Connect to the PostgreSQL database
conn = pg8000.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
cursor = conn.cursor()
logs = boto3.client('logs')
log_groups = logs.describe_log_groups()
log_group = log_groups['logGroups'][-1]['logGroupName']
log_streams = logs.describe_log_streams(logGroupName=log_group)
log_stream = log_streams['logStreams'][0]['logStreamName']
log_events = logs.get_log_events(logGroupName=log_group, logStreamName=log_stream)
first_ingestion = True
for event in log_events['events']:
if "[INGESTION] Ingestion completed" in event['message']:
first_ingestion = False
break
try:
# Retrieve all table names from the totesys database
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
table_names = [row[0] for row in cursor.fetchall()]
# Save the data from each table in a separate file in the "ingestion" S3 bucket
for table_name in table_names:
# Check if the table has been modified since the last time it was ingested
cursor.execute(f"SELECT max(last_updated) FROM {table_name}")
last_update = cursor.fetchone()[0]
# If the table has been modified, retrieve and save the updated data
if first_ingestion or last_update > datetime.datetime.utcnow() - datetime.timedelta(minutes=5): #Change this to what you decide on
# Retrieve column names from the current table
cursor.execute(f"SELECT column_name FROM (SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'{table_name}') AS column_schema")
column_names = cursor.fetchall()
# Retrieve the data from the current table
cursor.execute(f"SELECT * FROM {table_name}")
rows = cursor.fetchall()
# Save the data to a CSV file in the "ingestion" S3 bucket
with open(f"/tmp/{table_name}.csv", "w") as file:
file.write(",".join([column_name[0] for column_name in column_names]))
file.write("\n")
for row in rows:
file.write(",".join(["\"" + str(cell) + "\"" if "," in str(cell) else str(cell) for cell in row]))
file.write("\n")
s3.upload_file(f"/tmp/{table_name}.csv", bucket_name, f"{table_name}.csv")
print(f'[INGESTION] MODIFIED: {table_name} was last modified at {last_update}')
else:
# Log a message to CloudWatch indicating that the table has not been modified
print(f'[INGESTION] {table_name} was last modified at {last_update}')
# Close the database connection
cursor.close()
conn.close()
except Exception as e:
# Log the error message to CloudWatch
print(f'[INGESTION] ERROR: {e}')
def lambda_handler(event, context):
# Log the start time of the function execution
print(f'[INGESTION] Ingestion started')
# Allow time for cloudwatch log to be created
time.sleep(15)
# Ingest the database to S3
ingest_database_to_s3(event['ingested_bucket'])
# Log the end time of the function execution
print(f'[INGESTION] Ingestion completed') | vasilecondrea/lake-cabin-project | extract/src/extract.py | extract.py | py | 4,151 | python | en | code | 1 | github-code | 36 |
15065246438 | import asyncio
import base64
import collections
import json
import struct
import sys
import aiohttp
import pytest
import six
from pytest_httpserver import RequestHandler
import consul
import consul.aio
Check = consul.Check
@pytest.fixture
def local_server(httpserver):
from pytest_httpserver import RequestHandler
handler = httpserver.expect_request('/v1/agent/services')
assert isinstance(handler, RequestHandler)
handler.respond_with_data(json.dumps({"foo": "bar"}), status=599)
port = httpserver.port
LocalServer = collections.namedtuple('LocalServer', ['port'])
yield LocalServer(port)
httpserver.stop()
@pytest.fixture
async def local_timeout_server(httpserver):
async def func():
return json.dumps({"foo": "bar"})
handler = httpserver.expect_request('/v1/agent/services')
assert isinstance(handler, RequestHandler)
handler.respond_with_data(await func(), status=200)
LocalServer = collections.namedtuple('LocalServer', ['port', 'server'])
return LocalServer(httpserver.port, httpserver)
@pytest.fixture
def loop(request):
asyncio.set_event_loop(None)
loop = asyncio.new_event_loop()
def fin():
loop.close()
request.addfinalizer(fin)
return loop
class TestAsyncioConsul(object):
def test_kv(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
index, data = await c.kv.get('foo')
assert data is None
response = await c.kv.put('foo', 'bar')
assert response is True
response = await c.kv.put('foo-2', 'bar')
assert response is True
index, data = await c.kv.get('foo')
assert data['Value'] == six.b('bar')
loop.run_until_complete(main())
def test_consul_ctor(self, loop, consul_port):
# same as previous but with global event loop
async def main():
c = consul.aio.Consul(port=consul_port)
assert c._loop is loop
await c.kv.put('foo', struct.pack('i', 1000))
index, data = await c.kv.get('foo')
assert struct.unpack('i', data['Value']) == (1000,)
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
def test_kv_binary(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo', struct.pack('i', 1000))
index, data = await c.kv.get('foo')
assert struct.unpack('i', data['Value']) == (1000,)
loop.run_until_complete(main())
def test_kv_missing(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(put(), loop=loop)
await c.kv.put('index', 'bump')
index, data = await c.kv.get('foo')
assert data is None
index, data = await c.kv.get('foo', index=index)
assert data['Value'] == six.b('bar')
await fut
async def put():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(2.0 / 100, loop=loop)
await c.kv.put('foo', 'bar')
loop.run_until_complete(main())
def test_kv_put_flags(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo', 'bar')
index, data = await c.kv.get('foo')
assert data['Flags'] == 0
response = await c.kv.put('foo', 'bar', flags=50)
assert response is True
index, data = await c.kv.get('foo')
assert data['Flags'] == 50
loop.run_until_complete(main())
def test_kv_delete(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo1', '1')
await c.kv.put('foo2', '2')
await c.kv.put('foo3', '3')
index, data = await c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo2', 'foo3']
response = await c.kv.delete('foo2')
assert response is True
index, data = await c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo3']
response = await c.kv.delete('foo', recurse=True)
assert response is True
index, data = await c.kv.get('foo', recurse=True)
assert data is None
loop.run_until_complete(main())
def test_kv_subscribe(self, loop, consul_port):
async def get():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(put(), loop=loop)
index, data = await c.kv.get('foo')
assert data is None
index, data = await c.kv.get('foo', index=index)
assert data['Value'] == six.b('bar')
await fut
async def put():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
response = await c.kv.put('foo', 'bar')
assert response is True
loop.run_until_complete(get())
def test_transaction(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
value = base64.b64encode(b"1").decode("utf8")
d = {"KV": {"Verb": "set", "Key": "asdf", "Value": value}}
r = await c.txn.put([d])
assert r["Errors"] is None
d = {"KV": {"Verb": "get", "Key": "asdf"}}
r = await c.txn.put([d])
assert r["Results"][0]["KV"]["Value"] == value
loop.run_until_complete(main())
def test_agent_services(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
services = await c.agent.services()
assert services == {}
response = await c.agent.service.register('foo')
assert response is True
services = await c.agent.services()
assert services == {
'foo': {'ID': 'foo',
'Service': 'foo',
'Tags': [],
'Meta': {},
'Port': 0,
'Address': '',
'Weights': {'Passing': 1, 'Warning': 1},
'EnableTagOverride': False}, }
response = await c.agent.service.deregister('foo')
assert response is True
services = await c.agent.services()
assert services == {}
loop.run_until_complete(main())
def test_catalog(self, loop, consul_port):
async def nodes():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(register(), loop=loop)
index, nodes = await c.catalog.nodes()
assert len(nodes) == 1
current = nodes[0]
index, nodes = await c.catalog.nodes(index=index)
nodes.remove(current)
assert [x['Node'] for x in nodes] == ['n1']
index, nodes = await c.catalog.nodes(index=index)
nodes.remove(current)
assert [x['Node'] for x in nodes] == []
await fut
async def register():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
response = await c.catalog.register('n1', '10.1.10.11')
assert response is True
await asyncio.sleep(50 / 1000.0, loop=loop)
response = await c.catalog.deregister('n1')
assert response is True
loop.run_until_complete(nodes())
def test_session(self, loop, consul_port):
async def monitor():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(register(), loop=loop)
index, services = await c.session.list()
assert services == []
await asyncio.sleep(20 / 1000.0, loop=loop)
index, services = await c.session.list(index=index)
assert len(services)
index, services = await c.session.list(index=index)
assert services == []
await fut
async def register():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
session_id = await c.session.create()
await asyncio.sleep(50 / 1000.0, loop=loop)
response = await c.session.destroy(session_id)
assert response is True
loop.run_until_complete(monitor())
@pytest.mark.skipif(sys.version_info < (3, 4, 1),
reason="Python <3.4.1 doesnt support __del__ calls "
"from GC")
def test_httpclient__del__method(self, loop, consul_port, recwarn):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
_, _ = await c.kv.get('foo')
del c
import gc
# run gc to ensure c is collected
gc.collect()
w = recwarn.pop(ResourceWarning)
assert issubclass(w.category, ResourceWarning)
loop.run_until_complete(main())
def test_root(self, loop, local_server):
async def test_timeout():
time_out = False
c = consul.aio.Consul(port=local_server.port, loop=loop)
try:
await c.agent.services()
except consul.Timeout:
time_out = True
assert time_out
loop.run_until_complete(test_timeout())
def test_http_session(self, loop, local_timeout_server, consul_port):
async def test_session_close():
http_server = await local_timeout_server
c = consul.aio.Consul(port=http_server.port, loop=loop)
c.agent.services()
c.http._session = aiohttp.ClientSession()
assert not c.http._session.closed
c.http.__del__()
await c.http.close()
assert c.http._session.closed
http_server.server.stop()
...
loop.run_until_complete(test_session_close())
| poppyred/python-consul2 | tests/test_aio.py | test_aio.py | py | 10,482 | python | en | code | 125 | github-code | 36 |
15083780449 | import ray
import numpy as np
import gym
import tensorflow as tf
import tensorflow.contrib.slim as slim
import time
import sys
sys.path.insert(0, "/home/ubuntu/pong_py")
from pongjsenv import PongJSEnv
ray.init(num_workers=0)
n_obs = 8 # dimensionality of observations
n_h = 256 # number of hidden layer neurons
#n_actions = 2 # number of available actions
n_actions = 3 # number of available actions
learning_rate = 5e-4 # how rapidly to update parameters
gamma = .99 # reward discount factor
def make_policy(observation_placeholder):
hidden = slim.fully_connected(observation_placeholder, n_h)
log_probability = slim.fully_connected(hidden, n_actions, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(0.001))
return tf.nn.softmax(log_probability)
def discounted_normalized_rewards(r):
"""Take 1D float array of rewards and compute normalized discounted reward."""
result = np.zeros_like(r)
running_sum = 0
for t in reversed(range(0, r.size)):
running_sum = running_sum * gamma + r[t]
result[t] = running_sum
return (result - np.mean(result)) / np.std(result)
@ray.remote
class Env(object):
def __init__(self):
self.env = env = PongJSEnv()
self.input_observation = tf.placeholder(dtype=tf.float32, shape=[None, n_obs])
input_probability = tf.placeholder(dtype=tf.float32, shape=[None, n_actions])
input_reward = tf.placeholder(dtype=tf.float32, shape=[None,1])
# The policy network.
self.action_probability = make_policy(self.input_observation)
loss = tf.nn.l2_loss(input_probability - self.action_probability)
optimizer = tf.train.AdamOptimizer(learning_rate)
self.train_op = optimizer.minimize(loss, grad_loss=input_reward)
# Create TensorFlow session and initialize variables.
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
self.variables = ray.experimental.TensorFlowVariables(self.action_probability, self.sess)
def rollout(self):
observation = self.env.reset()
observations, rewards, labels = [], [], []
reward_sum = 0
reward_sums = []
episode_number = 0
num_timesteps = 0
done = False
start_time = time.time()
# Training loop
while not done:
# stochastically sample a policy from the network
probability = self.sess.run(self.action_probability, {self.input_observation: observation[np.newaxis, :]})[0,:]
action = np.random.choice(n_actions, p = probability)
label = np.zeros_like(probability) ; label[action] = 1
observations.append(observation)
labels.append(label)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
rewards.append(reward)
return np.vstack(observations), discounted_normalized_rewards(np.vstack(rewards)), np.vstack(labels)
def load_weights(self, weights):
self.variables.set_weights(weights)
agents = [Env.remote() for _ in range(4)]
input_observation = tf.placeholder(dtype=tf.float32, shape=[None, n_obs])
input_probability = tf.placeholder(dtype=tf.float32, shape=[None, n_actions])
input_reward = tf.placeholder(dtype=tf.float32, shape=[None, 1])
action_probability = make_policy(input_observation)
loss = tf.nn.l2_loss(input_probability - action_probability)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, grad_loss=input_reward)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
variables = ray.experimental.TensorFlowVariables(loss, sess)
num_timesteps = 0
reward_sums = []
# Barrier for the timing (TODO(pcm): clean this up)
weights = ray.put(variables.get_weights())
ray.get([agent.load_weights.remote(weights) for agent in agents])
start_time = time.time()
for _ in range(100):
weights = ray.put(variables.get_weights())
# EXERCISE: Set weights on the remote agents
[agent.load_weights.remote(weights) for agent in agents]
# EXERCISE: Call agent.rollout on all the agents, get results, store them in variable "trajectories"
trajectories = ray.get([agent.rollout.remote() for agent in agents])
reward_sums.extend([trajectory[0].shape[0] for trajectory in trajectories])
timesteps = np.sum([trajectory[0].shape[0] for trajectory in trajectories])
if (num_timesteps + timesteps) // 5000 > num_timesteps // 5000:
print('time: {:4.1f}, timesteps: {:7.0f}, reward: {:7.3f}'.format(
time.time() - start_time, num_timesteps + timesteps, np.mean(reward_sums)))
num_timesteps += timesteps
results = [np.concatenate(x) for x in zip(*trajectories)]
sess.run(train_op, {input_observation: results[0], input_reward: results[1], input_probability: results[2]})
| robertnishihara/ray-tutorial-docker | rl_exercises/pong_py_no_git/pong_py/parallel_train.py | parallel_train.py | py | 4,946 | python | en | code | 8 | github-code | 36 |
23751441892 | import os
import argparse
import matplotlib.pyplot as plt
from matplotlib import cm
import torch
import numpy as np
import statistics as st
import csv
import seaborn as sns
from timeseries import EchoCard
from quality_classification import predict_single as predict_acq
from quality_classification import CardioNet
from heart_segmentation import QuickNat
from heart_segmentation import predict_single as predict_vol
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
class Graph(object):
'''
This class collects all information needed to plot graphs and has functions which creates and saves graphs
Parameters
----------
time_per_pixel: float
pixel resolution in x axis (time in seconds)
labels: list of ints with values 0 or 1
A list containing the model's quality assessment classification results
sigmoids: list of floats between [0,1]
A list containing the model's quality assessment sigmoid outputs
BEtimes: A list of tuples of floats
A list containing the Begin and End times (in pixels) of the windowing performed on the original timeseries image in
order to perform classification
Attributes
----------
time_per_pixel: float
pixel resolution in x axis (time in seconds)
labels: list of ints with values 0 or 1
A list containing the model's quality assessment classification results
sigmoids: list of floats between [0,1]
A list containing the model's quality assessment sigmoid outputs
BEtimes: A list of tuples of floats
A list containing the Begin and End times (in pixels) of the windowing performed on the original timeseries image in
order to perform classification
tot_time: float
The total time of acquisition
heatmap: numpy array with the same width as the original timeseries image and 1/3 of its height
An "image" showing the sigmoid outputs of the network in the regions of the original image
'''
def __init__(self, time_per_pixel, labels, sigmoids, BEtimes):
self.time_per_pixel = time_per_pixel
self.BEtimes = BEtimes
self.labels = labels
self.sigmoids = sigmoids
self.tot_time = (self.BEtimes[-1][1]-self.BEtimes[0][0])*self.time_per_pixel
print('Total time of acquisition:', self.tot_time)
def add_axvspan(self):
"""
This function is called to add the classification results of the quality assessment as colors in regions of plots
"""
i=0
j=0
for BEtime, label in zip(self.BEtimes, self.labels):
timeB = BEtime[0]*self.time_per_pixel #int(BEtime[0]*time_per_pixel*len(volumes)/tot_time)
timeE = BEtime[1]*self.time_per_pixel #int(BEtime[1]*time_per_pixel*len(volumes)/tot_time)
if label == 1:
plt.axvspan(timeB, timeE, facecolor='gold', alpha=1, label='_'*i +'Good')
i+=1
else:
plt.axvspan(timeB, timeE, facecolor='midnightblue', alpha=1, label='_'*j +'Bad')
j+=1
def make_graph(self, points, volumes, lvids, title, output_path):
"""
This function creates and saves a graph with two subplots. The first shows the LV Volume over time and
the second shows the LVID over time. The quality of the image is represented with colors on the graph
according to the classification results by calling the add_axvspan function.
Parameters
----------
points: numpy array
contains the corresponding points of the occurences in volumes and lvids
volumes: list of floats
A list containing the LV Volume either for systole, diastole, or all points
lvids: list of floats
A list containing the LV Inner Diameters either for systole, diastole, or all points
title: string
The title of the figure to be saved
output_path: string
The name of the file to be saved
"""
#f = plt.figure(figsize[12.8, 9.6])
volume = np.array(volumes)
lvid = np.array(lvids)
plt.figure(figsize=[12.8, 9.6])
# plot LV Vol
plt.subplot(121) # plt
#plt.plot(points*self.time_per_pixel, volume) #*10**(9) # [::3] to take every third
sns.lineplot(points*self.time_per_pixel, volume)
self.add_axvspan()
plt.legend()
plt.grid(True)
plt.ylabel('LV Vol [mm^3]')
plt.xlabel('Time [sec]')
plt.xticks(np.arange(0, self.tot_time, 0.5))
plt.title('LV Volume')
# and LVID
plt.subplot(122)
#plt.plot(points*self.time_per_pixel, lvid)
sns.lineplot(points*self.time_per_pixel, lvid)
self.add_axvspan()
plt.legend()
plt.grid(True)
plt.ylabel('LVID [mm]')
plt.xlabel('Time [sec]')
plt.xticks(np.arange(0, self.tot_time, 0.5))
plt.title('LV Inner Diameters')
plt.suptitle(title)
plt.savefig(output_path)
plt.close() #'all'
def make_custom_heatmap(self, img):
"""
This function creates a heatmap with the same width as the original timeseries image and 1/3 of its height
It is an "image" showing the sigmoid outputs of the network in the regions of the original image
Parameters
----------
img: numpy array
The original timeseries image
"""
self.heatmap = np.zeros((img.shape[0]//3, img.shape[1]))
for (Btime, Etime), label in zip(self.BEtimes, self.sigmoids):
self.heatmap[:,Btime:Etime] = label #255*
def map_sigs_to_colors(self, peaks):
"""
This function calculates the sigmoid value (continuous value of quality acquisition) at each time point in the list peaks
Parameters
----------
peaks: numpy array
A list containing points in time (represented in pixels) during which a diastole occurs
Returns
-------
new_labels: list of floats
A list containing the corresponding sigmoid value (quality of acquisition) for each point in peaks
"""
new_labels = []
for peak in peaks:
for (Btime, Etime), label in zip(self.BEtimes, self.sigmoids):
if peak >= Btime and peak < Etime:
new_labels.append(label)
break
return new_labels
def make_hr_graph(self, heartrate, peaks, vols, output_path):
"""
This function creates a graph with two subplots. The first sublots shows the heartrate over time and the second subplot
show the LV Vol;d over the heartrates. The quality of the image is represented as an image with continuos colors under the first
sbuplot according to the classification results. In the second subplot the quality of the image is represented as a heatmap
where each point in the plot is represented by a different color representing the quality of acquisition during the time the measurement
was mede
Parameters
----------
heartrate: list of ints
Contains a list of the heartrate calculated for each heart beat in [bpm]
peaks: numpy array
Contains the points (pixels) corresponding to when the heart is in diastole which were used for the heartrate calculation
vols: list of floats
Contains the LV Vol in diastole
output_path: string
The name of the file to be saved
"""
plt.figure(figsize=[12.8, 9.6])
grid = plt.GridSpec(6, 2, hspace=0.0, wspace=0.2)
ax_hrt = plt.subplot(grid[:-1, 0]) # grid for graph heartrate-time
ax_h = plt.subplot(grid[-1, 0]) # grid for classification regions
ax_vhr = plt.subplot(grid[:, 1]) # grid for graph volume-heartrate
sig_colors = self.map_sigs_to_colors(peaks)
ax_hrt.set_xlabel('Time [sec]')
ax_hrt.set_ylabel('Heart rate [bpm]]')
ax_hrt.set_xticks(np.arange(0, peaks[-1]*self.time_per_pixel, 0.5))
ax_hrt.grid()
ax_hrt.plot(peaks*self.time_per_pixel, np.array(heartrate), '-o')
ax_h.axis('off')
h = ax_h.imshow(self.heatmap)
plt.colorbar(h, ax=ax_h, orientation='horizontal')
v = ax_vhr.scatter(heartrate, vols, c=sig_colors, cmap='viridis')
ax_vhr.set_xlabel('Heart rate [bpm]]')
ax_vhr.set_ylabel('LV Vol;d [mm^3]]')
ax_vhr.grid()
plt.colorbar(v, ax=ax_vhr, orientation='horizontal')
plt.suptitle("Heart rate plots")
plt.savefig(output_path)
plt.close()
def plot_img_mask(self, img, mask, output_path):
"""
This function plots and saves the original timeseries image and the superimposed segmentation mask of the heart
Parameters
----------
img: numpy array
The original timeseries image
mask: numpy array
The segmentation mask of the heart inner diameter
output_path: string
The name of the file to be saved
"""
fig, ax = plt.subplots()
ax.imshow(img, cmap='gray')
ax.imshow(mask, cmap='winter', alpha=0.3)
xt = np.arange(0, img.shape[1], step=int(0.5/self.time_per_pixel))
ax.set_xticks(xt)
xl = np.round_(xt*self.time_per_pixel, 1)
ax.set_xticklabels(xl)
ax.set_yticks([])
plt.xlabel('Time [sec]')
plt.savefig(output_path, bbox_inches = 'tight', dpi=1200)
plt.close()
def plot_img(self, img, output_path):
"""
This function plots and saves the original timeseries image and above that the heatmap created by the function make_custom_heatmap
Parameters
----------
img: numpy array
The original timeseries image
output_path: string
The name of the file to be saved
"""
heights = [a.shape[0] for a in [self.heatmap, img]]
widths = [self.heatmap.shape[1]]
fig_width = 8
fig_height = fig_width*sum(heights)/sum(widths)
f, axarr = plt.subplots(2,1, figsize=(fig_width, fig_height+0.4), gridspec_kw={'height_ratios': heights})
ax = axarr[0].imshow(self.heatmap, cmap='viridis')
axarr[0].axis('off')
axarr[1].imshow(img, cmap='gray')
xt = np.arange(0, img.shape[1], step=int(0.5/self.time_per_pixel))
axarr[1].set_xticks(xt)
xl = np.round_(xt*self.time_per_pixel, 1)
axarr[1].set_xticklabels(xl)
axarr[1].set_yticks([])
axarr[1].set_xlabel('Time [sec]')
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.colorbar(ax, ax=axarr[:]) #, orientation='horizontal'
plt.savefig(output_path, bbox_inches = 'tight', dpi=1200)
plt.close()
''' ----------- DONE WITH GRAPH CLASS ----------- '''
''' ----------- NEXT COME HELPER FUNCTIONS FOR GETTING STATISTICS AND LOADING MODELS ----------- '''
def get_stats_good(labels, times, peaks, ds, time_res):
"""
This function calculates various statistics for either the LVIDs in diastole or systole during good quality of acquisition
Parameters
----------
labels: list of ints, 0 or 1
Holds the acquisition quality classification result fror each time window
times: list of tuples
Each tuple holds the begin and end time of the window cut from the timeseries for the acquisition quality classification
peaks: numpy array
Holds the time (in pixels) of the events in ds
ds: list of floats
Holds either LVID;d, LVID;s or heartrates
time_res: float
The resolution on the x axis, i.e. to how many seconds one pixel corresponds
Returns
-------
med_ds: float
The median value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
avg_ds: float
The average value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
max(good_ds): float
The maximum value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
min(good_ds): float
The minimum value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
good_ds: list of floats
Includes a list of LVIDs either in systole or diastole, or heartrates, only during good quality of acquisition.
If no good acquisition regions were found an empty list is returned
good_times: list of ints
A list of corresponding times (in seconds) of the above good_ds
If no good acquisition regions were found an empty list is returned
"""
good_ds = []
good_times = []
for peak, ds in zip(peaks, ds):
for label, (Btime, Etime) in zip(labels, times):
if peak >=Btime and peak < Etime:
if label == 1:
good_ds.append(ds)
good_times.append(peak*time_res)
break
try:
med_ds = st.median(good_ds)
avg_ds = sum(good_ds)/len(good_ds)
return med_ds, avg_ds, max(good_ds), min(good_ds), good_ds, good_times
except (ZeroDivisionError, st.StatisticsError):
return 0, 0, 0, 0, good_ds, good_times
def load_model_device(network):
"""
This function loads the appropriate model and gets the current device
Parameters
----------
network: string
Should be either 'echo' or 'quicknat' defining the which model is to be loaded
Returns
-------
net: model.QuickNat or model.CardioNet
The network model instance
device: torch.device
The currently running divice, e.g. cpu, gpu0
"""
# get device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if network=='quicknat':
model_path = './heart_segmentation/checkpoints/heart-seg-net.pt'
# load model
params = {'num_channels':1,
'num_class':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'kernel_c':1,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'se_block': "NONE",
'drop_out':0.2}
net = QuickNat(params)
net.to(device=device)
state_dict = torch.load(os.path.join(model_path), map_location=device)
net.load_state_dict(state_dict['net_state_dict'])
else:
model_path = './quality_classification/checkpoints/quality-clas-net.pth'
net = CardioNet(n_channels=1, n_classes=1)
net.to(device=device)
net.load_state_dict(torch.load(model_path, map_location=device))
return net, device
''' ----------- DONE WITH HELPER FUNCTIONS ----------- '''
''' ----------- NEXT COMES THE FUNCTION WHERE EVERYTHING IS RUN ----------- '''
def run(input_path, output_path, weight, graphs=True, write=None, write_file=None):
"""
This function is where the end2end framework is run.
Parameters
----------
input_path: string
Path of file to be loaded
output_path: string
Directory to save results
weight: int
The weight of the mouse we are evaluating
graphs: bool
If true graphs will be created and saved in the ouput_path directory
write: string
If 'stats' then values such as max, min, median etc of the LVIDs etc are written to a csv file
If 'all' then LVIDs etc. are written for all good classified regions
write_file: string
The csv file to write results to according to what has been given to write
"""
labels = []
sigs = []
masks = []
# create an echo card instance
ec = EchoCard(input_path)
# fill in timeseries attribute of class - a numpy array of entire time of acquisition
ec.make_timeseries()
# split timeseries to get images for segmentation network
vol_windows = ec.make_seg_windows()
# load models for testing
echo_net, device = load_model_device('echo')
quicknat_net, _ = load_model_device('quicknat')
print("Using device ", device)
print("Loaded models")
print('Image shape:', ec.image.shape)
'''-----------SEGMENTATION PART-----------'''
# get masks
for img in vol_windows:
mask, _ = predict_vol(quicknat_net, device, img, 256)
masks.append(mask)
# connect back to one timeseries
ec.connect_masks(masks)
# compute volumes and lvids for all points in timeseries
ec.get_vols()
# get diastole and systole lvid, lv vol and time of occurence (in pixel values)
#plt.plot(ec.lvids)
dpeaks, dlvids, dvols = ec.get_diastoles()
speaks, slvids, svols = ec.get_systoles()
'''
plt.scatter(dpeaks, dlvids, label='g')
plt.scatter(speaks, slvids, label='r')
plt.legend()
plt.show()
'''
# get heartrate in [bpm]
heartrate = ec.get_heartrate(dpeaks)
'''
print('AAAA', len(dpeaks), len(speaks), len(heartrate))
for i in range(len(speaks)):
if i<len(speaks)-1:
print(speaks[i], dpeaks[i])
else:
print(speaks[i])
'''
'''-----------QUALITY ACQUISITION PART-----------'''
# split timeseries to get images for quality classification
# two lists are returned - one with numpy arrays (image) one with a tuple (startTime, endTime)
ec.weight_to_size(weight)
qual_windows, BEtimes = ec.make_quality_windows_man()
# classify each window as good or bad
for i,img in enumerate(qual_windows):
label, _ = predict_acq(echo_net, device, img, 256)
sigs.append(label)
labels.append(np.round(label))
'''-----------GRAPHS PART-----------'''
if graphs:
if not os.path.exists(output_path):
os.mkdir(output_path)
graphs = Graph(ec.time_res, labels, sigs, BEtimes)
graphs.make_custom_heatmap(ec.image)
#graphs.make_graph(np.arange(len(ec.vols)), ec.vols, ec.lvids, 'Heart Values Estimation', os.path.join(output_path, 'output_vol.png'))
graphs.make_graph(dpeaks, dvols, dlvids, 'Diastole', os.path.join(output_path, 'output_diastole.png'))
graphs.make_graph(speaks, svols, slvids, 'Systole', os.path.join(output_path, 'output_systole.png'))
graphs.plot_img_mask(ec.image, ec.mask, os.path.join(output_path, 'output_img_mask.png'))
graphs.plot_img(ec.image, os.path.join(output_path, 'output_img.png'))
graphs.make_hr_graph(heartrate, dpeaks[:-1], dvols[:-1], os.path.join(output_path, 'output_heartrate.png'))
'''-----------WRITING TO FILES PART-----------'''
med_diastole, avg_diastole, max_diastole, min_diastole, good_lvid_d, times_lvid_d = get_stats_good(labels, BEtimes, dpeaks, dlvids, ec.time_res)
med_systole, avg_systole, max_systole, min_systole, good_lvid_s, times_lvid_s = get_stats_good(labels, BEtimes, speaks, slvids, ec.time_res)
med_heartrate, avg_heartrate, max_heartrate, min_heartrate, good_heartrates, times_hr = get_stats_good(labels, BEtimes, dpeaks[:-1], heartrate, ec.time_res)
print('Average lvid;d is: ', avg_diastole, ' mm and average lvid;s is: ', avg_systole, ' mm')
print('Median lvid;d is: ', med_diastole, ' mm and median lvid;s is: ', med_systole, ' mm')
print('The average heart rate is: ', avg_heartrate, ' bpm and the median heart rate is: ', med_heartrate, ' bpm')
# append results to file if a csv file has been given in write
# either stats such as mean, median etc. (1 value for each file)
if write=='stats':
filename = input_path.split('/')[-1]
# if the file doesn't already exist add first row with column names first
if not os.path.isfile(write_file):
with open(write_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['file','median_diastole', 'median_systole', 'median_heartrate', 'avg_diastole', 'avg_systole', 'avg_heartrate', 'max_diastole', 'max_systole', 'max_heartrate', 'min_diastole', 'min_systole', 'min_heartrate'])
# append new line to file
with open(write_file, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([filename, med_diastole, med_systole, med_heartrate, avg_diastole, avg_systole, avg_heartrate, max_diastole, max_systole, max_heartrate, min_diastole, min_systole, min_heartrate])
# or heartrare, lvid;d etc. during all good acquisition regions
elif write=='all':
filename = input_path.split('/')[-1]
# if the file doesn't already exist add first row with column names first
if not os.path.isfile(write_file):
with open(write_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['file','lvid;d', 'lvid;d time', 'lvid;s', 'lvid;s time', 'heart rate', 'heart rate time'])
# append new lines to file
with open(write_file, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
i = 0
# depending on windowing and classification these will not necessarily have the exact same length - take the smallest
min_len = min([len(good_lvid_s), len(good_lvid_s), len(good_heartrates)])
for i in range(min_len):
writer.writerow([filename, good_lvid_d[i], times_lvid_d[i], good_lvid_s[i], times_lvid_s[i], good_heartrates[i], times_hr[i]])
i += 1
def get_args():
'''
Required arguments
------------------
-i: The path to the dicom file you wish to extract features from
-m: The body mass in grams of the current mouse you wish to extract features from
Optional arguments
------------------
-o: The name of the directory to save graphs, images and csv to. Default is the current working directory.
-g: if True output graphs and images will be created and saved, if False they will not. Default is True
-w: If 'all' all features are extracted and saved to a csv, if 'stats' only statistics from echocardiogram are extracted and saved
(one row per image). Default value is 'all'
-f: The name of the csv file to write features to. The default value is 'output_all.csv'. If the file already exists then the new
features will be appended as new rows to the file, but if the file doesn't already exist then it is automatically created.
'''
parser = argparse.ArgumentParser(description='Run the end 2 end framework to extract useful heart features from an echocardiogram',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', metavar='INPUT', required=True,
help='Specify path of input image - must be in DICOM format')
parser.add_argument('--mass', '-m', type=int, required=True,
help='Specify the body mass of the mouse')
parser.add_argument('--output', '-o', default='.',
help='Specify output path to save graphs')
parser.add_argument('--graphs', '-g', default=True,
help='Specify True or False depending on whether you want to save figures')
parser.add_argument('--write', '-w', default='all',
help='Specify wheter to save all good features or statistics of features. Give all or stars as input.')
parser.add_argument('--writefile', '-f', default='output_all.csv',
help='Specify in which file to save features')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
run(args.input, args.output, args.mass, graphs=args.graphs, write=args.write, write_file=args.writefile) | HelmholtzAI-Consultants-Munich/Echo2Pheno | Module I/run4single.py | run4single.py | py | 25,479 | python | en | code | 1 | github-code | 36 |
9135106371 | #!/usr/bin/env python3
import sqlite3
def nice_print(sql_table):
""" used to show information in a slightly more readable fashion"""
for row in sql_table:
row_string = map(str,row)
pretty_row = '\t\t'.join(list(row_string))
print(pretty_row)
conn = sqlite3.connect('northwind.sqlite3')
curs = conn.cursor()
# Basic Questions
ten_expensive = """
SELECT ProductName, UnitPrice
from Product
ORDER BY UnitPrice DESC
LIMIT 10;
"""
avg_age = """
SELECT ROUND(AVG(HireDate - BirthDate)) as "average hire age"
from Employee;
"""
avg_age_city = """
SELECT City, ROUND(AVG(HireDate - BirthDate)) as "average hire age by City"
from Employee
GROUP BY City;
"""
print("\n\ntop 10 most expensive items")
nice_print(curs.execute(ten_expensive).fetchall())
print("\n\navg. age at hire")
nice_print(curs.execute(avg_age).fetchall())
print("\n\navg. age by city")
nice_print(curs.execute(avg_age_city).fetchall())
# Advanced Questions
ten_expensive_supplier = """
SELECT ProductName, UnitPrice, CompanyName
from Product JOIN Supplier
ON Product.SupplierID = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10;
"""
largest_category = """
SELECT CategoryName, COUNT(ProductName) as "count"
FROM Product JOIN Category
ON Product.CategoryId = Category.Id
GROUP BY CategoryName
ORDER BY "count" DESC
LIMIT 1;
"""
give_raise_to_this_employee = """
SELECT FirstName || ' ' || LastName as fullname
FROM Employee JOIN EmployeeTerritory
ON Employee.Id = EmployeeTerritory.EmployeeId
GROUP BY fullname
ORDER BY COUNT( DISTINCT TerritoryId) DESC
LIMIT 1;
"""
print("\n\nWhat are the 10 most expensive items AND their Supplier?")
nice_print(curs.execute(ten_expensive_supplier).fetchall())
print("\n\nWhat is the largest category (by unique products)")
nice_print(curs.execute(largest_category).fetchall())
print("\n\nWho's the employee with the most territories?")
nice_print(curs.execute(give_raise_to_this_employee).fetchall())
| Tclack88/Lambda | DS-3-2-SQL-and-Databases/sc/northwind.py | northwind.py | py | 2,024 | python | en | code | 0 | github-code | 36 |
39842034728 | class Node(object):
def __init__(self, connections, status, distance):
self.connections = connections
self.status = status
self.distance = distance
def merge(self, other):
self.connections.union(other.connections)
self.distance = min(self.distance, other.distance)
self.status = max(self.status, other.status)
return self
def generate(self):
newNodes = list()
if self.status == 1:
self.status = 2
for connection in self.connections:
newNodes.append(
(connection,
Node(set(), 1,
self.distance + 1)))
return newNodes
| mingruilyu/spark-examples | social-networks/node.py | node.py | py | 716 | python | en | code | 0 | github-code | 36 |
40066321475 | import pandas as pd
from decouple import config
import numpy as np
import os
import nilearn.image as img
from nilearn.glm.second_level import non_parametric_inference
import nibabel
import argparse
def options() -> dict:
'''
Function to accept accept command line flags.
Needs -t for task name and -p for number of
permutations
Parameters
---------
None
Returns
-------
dict: dictionary object
Dictionary of task and number of
permuations
'''
args= argparse.ArgumentParser()
args.add_argument('-t', '--task',
dest='task',
help='Task name. Either happy, eft or fear')
args.add_argument('-p', '--perms',
dest='perms',
help='number of permutations to run')
return vars(args.parse_args())
def paths(task: str) -> dict:
'''
Function to return paths to save and load images.
Parameters
----------
task: str
str of happy, fear, eft.
Returns
-------
dict of paths
'''
base_path = config(task)
return {
'base_path': base_path,
'mixed_model': os.path.join(base_path, '2ndlevel', 'mixed_model')
}
def subject_scans(base_path: str) -> pd.DataFrame:
'''
Function to load csv of subjects scans.
Will remove one subject who doesn't have T1 scan.
Parameters
----------
base_path: str
absolute path to task directory
Returns
-------
subject_scans_df: pd.DataFrame
csv of subjects scans locations
'''
subject_scans_df = pd.read_csv(f"{base_path}/1stlevel_location.csv")
subject_scans_df = subject_scans_df.drop(subject_scans_df[subject_scans_df['t1'] == 75].index)
return subject_scans_df
def create_desgin_matrix(subjects_scans: dict) -> pd.DataFrame:
'''
Function to create a singe design matrix of group
Parameters
----------
subjects_scans: dict,
dictionary of subject images with keys of group.
Returns
-------
design matrix: pd.DataFrame,
(92 x 1) design matrix of -1 and 1.
'''
return pd.DataFrame(data={'Group': np.hstack((-np.ones(len(subjects_scans['HC'])), np.ones(len(subjects_scans['AN']))))})
def mean_img(subject_scans: pd.DataFrame) -> dict:
'''
Function to get the mean image from the two time points
Parameters
----------
subject_scans: pd.DataFrame.
Dataframe of location of subjects scans of T1, T2
Returns
-------
subjects_mean_images: dict
dictionary of mean images
'''
subjects_mean_images = {
'HC' : [],
'AN' : []
}
for subject in range(0, subject_scans.shape[0]):
try:
t1_image = img.load_img(subject_scans['t1'].iloc[subject])
t2_image = img.load_img(subject_scans['t2'].iloc[subject])
mean_img = img.mean_img([t1_image, t2_image])
except Exception as e:
print(e)
continue
if 'G1' in subject_scans['t1'].iloc[subject]:
subjects_mean_images['HC'].append(mean_img)
else:
subjects_mean_images['AN'].append(mean_img)
return subjects_mean_images
def ols(subjects_to_analyse: list,
design_matrix: pd.DataFrame,
masks_2ndlevel: nibabel.nifti1.Nifti1Image,
perm: int) -> dict:
'''
Function to run nilearn permutated ols.
Parameters
----------
subjects_to_analyse: list
list of nibabel.nifti1.Nifti1Image scans
design_matrix: pd.DataFrame
(92 x 1) design matrix of group
mask_2ndlevel: nibabel.nifti1.Nifti1Image
mask of 1st level inputs
perm: int
Number of permutations
Returns
-------
dictionary of nibabel.nifti1.Nifti1Image
'''
return non_parametric_inference(
second_level_input=subjects_to_analyse,
design_matrix=design_matrix,
second_level_contrast="Group",
mask=masks_2ndlevel,
model_intercept=True,
n_perm=int(perm),
n_jobs=6,
tfce=True,
verbose=3
)
if __name__ == "__main__":
print('Starting up permutated ols for group differences')
flags = options()
path = paths(flags['task'])
scans_location = subject_scans(path['base_path'])
mean_images = mean_img(scans_location)
design_matrix = create_desgin_matrix(mean_images)
mask = img.load_img(os.path.join(path['mixed_model'], 'mask_img.nii.gz' ))
print(f'Running OLS with {flags["perms"]} permutations for {flags["task"]} task')
subjects_to_analyse = mean_images['HC'] + mean_images['AN']
group_diff = ols(subjects_to_analyse, design_matrix, mask, flags["perms"])
print(f'Saving scans to {path["mixed_model"]}')
group_diff['logp_max_tfce'].to_filename(f'{path["mixed_model"]}/tfce_fwep_group.nii.gz')
group_diff['tfce'].to_filename(f'{path["mixed_model"]}/tfce_tstat_group.nii.gz')
group_diff['t'].to_filename(f'{path["mixed_model"]}/vox_tstat_group.nii.gz')
group_diff['logp_max_t'].to_filename(f'{path["mixed_model"]}/vox_fwep_group.nii.gz') | WMDA/socio-emotion-cognition | task_fmri/modelling/nilearn_notebooks/second_level_group_differences_nilearn.py | second_level_group_differences_nilearn.py | py | 5,119 | python | en | code | 0 | github-code | 36 |
7413102812 | """Sorts duplicate photos/files and places all copies in their own folder."""
import click
import halo
from hashlib import sha256
from pathlib import Path
from shutil import copyfile, move
""" Return the SHA256 sum of the provided file name.
:param file_name - Name of the file to check
:return Hexdigst of SHA sum
"""
def sha256sum(file_name):
with open(file_name, 'rb') as file_bytes:
return sha256(file_bytes.read()).hexdigest()
return ''
@click.command()
@click.argument('directory', type=click.Path(file_okay=False, exists=True))
@click.option('--move/--copy', '-m/-c', default=False)
def main(directory, move):
dup_dir = Path('{}/duplicate_files'.format(directory))
# Do stuff
if not dup_dir.exists():
dup_dir.mkdir()
click.echo('This script will iterate through the directory \'{0}\' and move all duplicate files to a subdirectory named \'{1}\'.'.format(directory, dup_dir))
#click.confirm('Do you wish to continue?', abort=True)
path = Path(directory)
hash_dict = dict()
for file_path in path.iterdir():
print(file_path)
if Path(file_path).is_file():
sha_sum = sha256sum(file_path)
if sha_sum in hash_dict.keys():
hash_dict[sha_sum].append(file_path)
else:
hash_dict[sha_sum] = [file_path]
print(hash_dict)
for sha_sum in list(filter(lambda x: len(hash_dict[x]) > 1, hash_dict.keys())):
for filepath in hash_dict[sha_sum]:
if move:
move(filepath, dup_dir / filepath.parts[-1])
else:
copyfile(filepath, dup_dir / filepath.parts[-1])
if __name__ == '__main__':
main()
| poiriermike/sort_dup_photos | sort_dupes.py | sort_dupes.py | py | 1,698 | python | en | code | 0 | github-code | 36 |
33531980673 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux
JRC biomass Project.
Unit D1 Bioeconomy.
"""
# Built-in modules #
# Third party modules #
# First party modules #
import autopaths
from autopaths import Path
from autopaths.auto_paths import AutoPaths
from autopaths.tmp_path import new_temp_dir
from plumbing.cache import property_cached
from tqdm import tqdm
# Internal modules #
from cbmcfs3_runner.reports.scenario import ScenarioReport
from cbmcfs3_runner.pump.dataframes import concat_as_df
###############################################################################
class Scenario(object):
"""
This object represents a modification of the input data for the purpose.
A scenario can be harvest and economic scenario.
Actual scenarios should inherit from this class.
"""
all_paths = """
/logs_summary.md
"""
def __iter__(self): return iter(self.runners.values())
def __len__(self): return len(self.runners.values())
def __getitem__(self, key):
"""Return a runner based on a country code."""
return self.runners[key]
def __init__(self, continent):
# Save parent #
self.continent = continent
# This scenario dir #
self.base_dir = Path(self.scenarios_dir + self.short_name + '/')
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.base_dir, self.all_paths)
def __repr__(self):
return '%s object with %i runners' % (self.__class__, len(self))
def __call__(self, verbose=False):
for code, steps in tqdm(self.runners.items()):
for runner in steps:
runner(interrupt_on_error=False, verbose=verbose)
self.compile_log_tails()
@property
def runners(self):
msg = "You should inherit from this class and implement this property."
raise NotImplementedError(msg)
@property
def scenarios_dir(self):
"""Shortcut to the scenarios directory."""
return self.continent.scenarios_dir
@property_cached
def report(self):
return ScenarioReport(self)
def compile_log_tails(self, step=-1):
summary = self.paths.summary
summary.open(mode='w')
summary.handle.write("# Summary of all log file tails\n\n")
summary.handle.writelines(r[step].tail for r in self.runners.values() if r[step])
summary.close()
# ------------------------------ Others ----------------------------------#
def make_csv_zip(self, csv_name, dest_dir):
"""
Will make a zip file will the specified CSV file from every country
together and place it in the given destination directory.
For instance you can do:
>>> f = scenario.make_csv_zip('ipcc_pools', '~/exports/for_sarah/')
>>> print(f)
"""
# Files to put in the zip #
files = {iso: rl[-1].post_processor.csv_maker.paths(csv_name)
for iso, rl in self.runners.items()}
# Actual name of CSV file #
csv_full_name = next(iter(files.items()))[1].name
# Destination directory #
dest_dir = Path(dest_dir)
# If it's not a directory #
assert isinstance(dest_dir, autopaths.dir_path.DirectoryPath)
# Destination zip file #
dest_zip = dest_dir + csv_full_name + '.zip'
# Temporary directory #
tmp_dir = new_temp_dir()
zip_dir = tmp_dir + csv_full_name + '/'
zip_dir.create()
# Copy #
for iso, f in files.items():
try:
f.copy(zip_dir + iso + '.csv')
except Exception as e:
print("no data in ", iso)
print('Error loading data: '+ str(e))
# Compress #
zip_dir.zip_to(dest_zip)
# Remove #
tmp_dir.remove()
# Return #
return dest_zip
def concat_as_df(self, *args, **kwargs):
"""A data frame with many countries together, crucial for analysis"""
return concat_as_df(self, *args, **kwargs)
| xapple/cbmcfs3_runner | cbmcfs3_runner/scenarios/base_scen.py | base_scen.py | py | 4,131 | python | en | code | 2 | github-code | 36 |
14975602029 | import os
import codecs
try:
from setuptools import (setup, find_packages)
except ImportError:
from distutils.core import (setup, find_packages)
VERSION = (0, 2, 0)
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pelican-readtime'
__description__ = 'Plugin for Pelican that computes average read time.'
__contact_names__ = 'David Jenkins, Deepak Bhalla, Jonathan Dektiar'
__contact_emails__ = 'djenkinsdev@gmail.com, contact@deepakrb.com, contact@jonathandekhtiar.eu'
__homepage__ = 'https://github.com/JenkinsDev/pelican-readtime'
__repository_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__download_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__docformat__ = 'markdown'
__license__ = 'MIT'
__keywords__ = 'pelican blogging blog static webdevelopment plugin pelican-plugin readtime python python3 python2'
here = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(here, 'README.rst'), 'r', 'utf-8').read()
else:
long_description = 'See ' + __homepage__
setup(
name=__package_name__,
version=__version__,
description=__description__,
long_description=long_description,
url=__repository_url__,
download_url=__download_url__,
license='MIT',
author=__contact_names__,
author_email=__contact_emails__,
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=__keywords__,
packages=[''],
install_requires=['pelican>=3.6'],
zip_safe=True,
include_package_data=True
)
| JenkinsDev/pelican-readtime | setup.py | setup.py | py | 2,230 | python | en | code | 11 | github-code | 36 |
21811338637 | from elasticsearch import Elasticsearch
from services.caption_processor import split_captions
es = Elasticsearch()
def index_captions(captions, video_id):
for ctime, ctext in split_captions(captions):
doc = {
'time': ctime,
'text': ctext,
'video': video_id
}
es.index(index="simple-captions", doc_type='caption', body=doc)
def index_caption_pause_splitted(captions, video_id, index_name = "pause-splitted-captions"):
for ctime, ctext in captions:
doc = {
'time': ctime,
'text': ctext,
'video': video_id,
}
es.index(index=index_name, doc_type='caption', body=doc)
| veotani/youtube-caption-search | server/services/caption_indexator.py | caption_indexator.py | py | 696 | python | en | code | 0 | github-code | 36 |
17122612080 | # import the packages
import matplotlib.pyplot as plt
import networkx as nx
# Define the data structures of vertices and edges
vertices = range(1, 10)
edges = [(7, 2), (2, 3), (7, 4), (4, 5), (7, 3), (7, 5),
(1, 6), (1, 7), (2, 8), (2, 9)]
# Let's first instantiate the graph
G = nx.Graph()
# let's draw the graph
G.add_nodes_from(vertices)
G.add_edges_from(edges)
pos = nx.spring_layout(G)
# Let's define the NF nodes
nx.draw_networkx_nodes(G, pos,
nodelist=[1, 4, 3, 8, 9],
label=True,
node_color='g',
node_size=1300)
# let's create the nodes that are known to be involved in fraud
nx.draw_networkx_nodes(G, pos,
nodelist=[2, 5, 6, 7],
label=True,
node_color='r',
node_size=1300)
# Let's create labels for the nodes
nx.draw_networkx_edges(G, pos, edges, width=3, alpha=0.5, edge_color='b')
# Creating labels names
labels = {1: r'1 NF', 2: r'2 F', 3: r'3 NF', 4: r'4 NF', 5: r'5 F', 6: r'6 F', 7: r'7F', 8: r'8 NF', 9: r'9 NF'}
nx.draw_networkx_labels(G, pos, labels={n: lab for n, lab in labels.items() if n in pos})
plt.show()
nx.draw_networkx_labels(G, pos)
| amrmabdelazeem/40-Algorithms-to-Know | Fraud Analytics.py | Fraud Analytics.py | py | 1,258 | python | en | code | 0 | github-code | 36 |
3984159879 | from django.http import HttpResponse
from django.shortcuts import render
from .models import *
def home_view(request):
names = ['Jitendra', 'Rimaljit', 'Mohit', 'Deepak']
address = ['Chandigarh', 'Ludhiana', 'Ludhiana', 'Ludhian']
info = zip(names, address)
data = {
'info': info
}
return render(request, 'home.html', context=data)
def about_view(request):
qs = AdmissionDetails.objects.all()
data = {'queryset': qs}
return render(request, 'about.html', context=data)
def login_view(request):
data = {}
if (request.method=='POST'):
ad_no = request.POST.get('adno_text')
name = request.POST.get('name_text')
section = request.POST.get('class_text')
# orm code to store value in database
# admission_details = AdmissionDetails()
# admission_details.application_no = ad_no
# admission_details.name = name
# admission_details.section = section
# admission_details.save()
# OR
# admission_details = AdmissionDetails(
# application_no = ad_no,
# name = name,
# section = section
# )
# admission_details.save()
# OR
AdmissionDetails.objects.create(
application_no = ad_no,
name = name,
section = section
)
#********************
data['result'] = "your record has been saved"
return render(request, 'login.html', context=data)
| jitendra5581/cms-training | studentapp/views.py | views.py | py | 1,601 | python | en | code | 0 | github-code | 36 |
31061471225 |
from ..utils import Object
class GetLanguagePackStrings(Object):
"""
Returns strings from a language pack in the current localization target by their keys. Can be called before authorization
Attributes:
ID (:obj:`str`): ``GetLanguagePackStrings``
Args:
language_pack_id (:obj:`str`):
Language pack identifier of the strings to be returned
keys (List of :obj:`str`):
Language pack keys of the strings to be returned; leave empty to request all available strings
Returns:
LanguagePackStrings
Raises:
:class:`telegram.Error`
"""
ID = "getLanguagePackStrings"
def __init__(self, language_pack_id, keys, extra=None, **kwargs):
self.extra = extra
self.language_pack_id = language_pack_id # str
self.keys = keys # list of str
@staticmethod
def read(q: dict, *args) -> "GetLanguagePackStrings":
language_pack_id = q.get('language_pack_id')
keys = q.get('keys')
return GetLanguagePackStrings(language_pack_id, keys)
| iTeam-co/pytglib | pytglib/api/functions/get_language_pack_strings.py | get_language_pack_strings.py | py | 1,077 | python | en | code | 20 | github-code | 36 |
29265995758 | """Tools for looking for instances of godaddy cookie stuffing in individual
and collections of BroRecords."""
import re
from .affiliate import AffiliateHistory, FULL_DOMAIN
class ClickCashAffiliateHistory(AffiliateHistory):
@classmethod
def checkout_urls(cls):
"""Returns a list of strings, each of which, if found in a url
on the current marketer, would count as a checkout attempt. So,
for example, returning "add-to-cart" would cause a request to
"example.org/shopping/add-to-cart/item" to count as a checkout
attempt.
Return:
A tuple or list of zero or more strings
"""
return (
)
@classmethod
def referrer_tag(cls, record):
return "wid" if "wid=" in record.uri else "WID"
@classmethod
def cookie_set_pattern(cls):
try:
return cls._cookie_set_pattern
except AttributeError:
cls._cookie_set_pattern = re.compile(r'(?:&|\?|^|;)wid=', re.I)
return cls._cookie_set_pattern
@classmethod
def domains(cls):
return [
("www.model-perfect.com", FULL_DOMAIN),
("www.ifriends.net", FULL_DOMAIN),
("www.incrediblecams.com", FULL_DOMAIN),
("www.sizzlingcams.com", FULL_DOMAIN),
("www.webcamdating.us", FULL_DOMAIN),
("www.babefever.net", FULL_DOMAIN),
]
@classmethod
def name(cls):
return "ClickCash Affiliate"
| pes10k/bro-tools | stuffing/clickcash.py | clickcash.py | py | 1,492 | python | en | code | 0 | github-code | 36 |
14489788093 | # Lab 24 - Rain Data
import datetime
import matplotlib.pyplot as plt
def open_file():
"""opens the file and returns its contents as a list separated by each row"""
with open('lab24_ankeny_rain.txt', 'r') as data:
return data.read().split("\n")
def get_dates(data):
"""accepts data and returns a list of just the dates"""
dates = []
for row in data:
row_as_list = row.split(" ")
for value in row_as_list:
if len(value) == 11:
dates.append(value)
return dates
def get_daily_totals(data):
"""accepts data and returns a list of the first integer located in each row, i.e. the daily total for each date"""
daily_totals = []
for row in data:
row_as_list = row.split(" ")
for value in row_as_list:
if value == "-":
value = 0
try:
num = int(value)
daily_totals.append(num)
break
except:
pass
return daily_totals
def get_merged_data(dates, daily_totals):
"""accepts two lists and zips together to return as a list of tuples"""
merged_data = list(zip(dates, daily_totals))
return merged_data
def calculate_mean(daily_totals):
"""calculates the mean of the data by dividing the total sum by the length of the list"""
sum_total = sum(daily_totals)
return sum_total / len(daily_totals)
def calculate_variance(mean, daily_totals):
"""calculates the variance by accepting the mean; then, for each number, subtracts the mean and squares the result; finally, returns the average of those squared differences"""
sq_diffs = [((num - mean) ** 2) for num in daily_totals]
sum_sq_diff = sum(sq_diffs)
return sum_sq_diff / len(sq_diffs)
def get_max_rain_date(merged_data):
"""iterates through the tuples of merged rain data and returns the date with the highest rain total"""
rain_totals = [date[1] for date in merged_data]
max_rain_total = max(rain_totals)
max_rain_date = [date[0] for date in merged_data if date[1] == max_rain_total]
return max_rain_date[0]
def print_results(mean, variance, max_rain_date):
"""accepts results and prints each back to the user"""
print(f"\nThe mean of the data is {mean}.")
print(f"The variance of the data is {variance}.")
print(f"The date which had the most rain was {max_rain_date}.\n")
def plot_results(dates, daily_totals):
"""creates an x, y graph with the dates along the x-axis and the daily totals along the y-axis"""
plt.plot(dates, daily_totals)
plt.ylabel("Daily Totals")
plt.xlabel("Dates")
plt.show()
def main():
data = open_file()
dates = get_dates(data)
daily_totals = get_daily_totals(data)
merged_data = get_merged_data(dates, daily_totals)
mean = calculate_mean(daily_totals)
variance = calculate_variance(mean, daily_totals)
max_rain_date = get_max_rain_date(merged_data)
print_results(mean, variance, max_rain_date)
plot_results(dates, daily_totals)
main()
| mjhcodes/pdxcodeguild | python/lab24.py | lab24.py | py | 2,881 | python | en | code | 0 | github-code | 36 |
69864473703 | from deep_introspection import network, synthesis, utils
import numpy as np
import matplotlib.pyplot as plt
def test_synthesise_loss_is_low():
net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
img, offset, resFac, newSize = utils.imgPreprocess(img_path='deep_introspection/test/cat.jpg')
net.set_new_size(newSize)
print(np.argmax(np.mean(net.predict(img), axis=0)))
layer = net.get_layer_names()[-1]
print("Testing layer " + layer)
target_rep = net.get_activations(layer)
solution, loss = synthesis.synthesise(net, target_rep,layer)
net.set_new_size(solution.shape[:2])
net.predict(solution)
assert(solution.shape == (224,224,3))
assert(synthesis.loss(net.get_activations(layer), target_rep) < 0.1)
def test_boundary():
net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
img, offset, resFac, newSize = utils.imgPreprocess(img_path='deep_introspection/test/cat.jpg')
net.set_new_size(newSize)
layer = net.get_layer_names()[-2]
solution, loss = synthesis.synthesise_boundary(net, img, layer, 185, 120, 140, 95)
assert(solution.shape == (224,224,3))
def test_boundary_synthesis():
net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
target = np.zeros(1000)
target[285] = 0.5
target[7] = 0.5
solution, loss = synthesis.synthesise(net, target, 'prob')
assert(solution.shape == (224,224,3))
| onorton/deep_introspection | deep_introspection/test/test_synthesis.py | test_synthesis.py | py | 1,610 | python | en | code | 0 | github-code | 36 |
29432851011 | from asyncore import read
#JMP:xkalou03
__author__ = 'xkalou03'
import sys
import argparse
import re
import reader
import table
def checkParameters():
parser = argparse.ArgumentParser(description = 'Projekt do IPP.', add_help = False)
parser.add_argument('--help', action = "count", default = 0, help = 'Prints help')
parser.add_argument('--input=', action = "store", default = [], dest = "input", nargs = 1, help = 'Input file')
parser.add_argument('--output=', action = "store", default = [], dest = "output", nargs = 1, help = 'Output file')
parser.add_argument('--cmd=', action = "store", default = "", nargs = '+', dest = "text", help = 'Input text')
parser.add_argument('-r', action = "store_true", dest = "redef", default = False, help = 'Redefination macros')
try:
args = parser.parse_args()
except:
print("Parameters Error", file = sys.stderr)
exit(1)
if(args.help == 1):
if len(sys.argv) == 2:
print(parser.print_help())
exit(0)
else:
print("Zadany help + jine parametry", file = sys.stderr)
exit(1)
return args
def readInput(input, reader, table, params):
stringIn = ""
rest = ""
outputString = ""
count = 0
c = reader.getc()
while c:
if c == '@':
x = macro(reader, table, params, c)
if x in {'@', '{', '}', '$'}:
outputString += x
elif c in {'$', '}'}:
exit(55)
elif c == '{':
block = reader.readBlock(False)
if block != None:
outputString += block
else:
exit(55)
else:
outputString += c
c = reader.getc()
return outputString
def macro(reader, table, params, x):
macroString = ""
reg = '^[a-zA-Z_][0-9a-zA-Z_]*$'
i = 0
c = reader.getc()
if c in {'@', '{', '}', '$'} and x == '@': # kontrola, zda nejde o escape sekvenci
return c
while c:
i += 1
if re.match(reg, macroString + c):
macroString += c
c = reader.getc()
if not c:
find = table.readMacro(macroString)
if find:
argumentsMacro(reader, find, c, table, params)
c = '%'
break
else:
print("Toto makro neexistuje", file = sys.stderr)
exit(56)
continue
else:
# mam precten nazev - po kouknuti do tabulky budu koukat dale
# print("Budu koukat do tabulky", macroString)
find = table.readMacro(macroString)
if find:
argumentsMacro(reader, find, c, table, params)
c = '%'
break
else:
if i == 1:
exit(55)
print("Toto makro neexistuje", file = sys.stderr)
exit(56)
macroString += c
c = reader.getc()
return c
def argumentsMacro(reader, find, x, table, params):
name = ""
temp = False
if params.redef:
temp = True
c = x # pismeno po nazvu makra
if find.name == 'def' or find.name == '__def__':
table.insertMacro(reader, x, temp)
elif find.name == 'undef' or find.name == '__undef__':
table.deleteMacro(reader, x)
elif find.name == 'set' or find.name == '__set__':
table.setMacro(reader, x)
else:
stringExpand = table.expandMacro(reader, x, find.name)
reader.attachString(stringExpand)
return
def main():
params = checkParameters()
if params.input: # Otevreni vstupniho souboru
try:
inputFile = open(params.input[0], 'r')
except IOError:
print("Vstupni soubor nejde otevrit", file = sys.stderr)
exit(2)
else:
inputFile = sys.stdin
if params.output: # Otevreni vystupniho souboru
try:
outputFile = open(params.output[0], 'w')
except IOError:
print("Soubor nejde otevrit", file = sys.stderr)
exit(2)
else:
outputFile = sys.stdout
r = reader.Reader(inputFile) # vytvoreni readeru
if params.text:
r.attachString(str(params.text[0]))
macroTable = table.Table() # vytvoreni tabulky maker
stringInput = readInput(inputFile, r, macroTable, params) # spusteni cteni vstupu
if params.output:
print(stringInput, file = outputFile, end="") # vytisknuti vystupu
else:
print(stringInput, file = sys.stdout)
if __name__ == "__main__":
main() | Strihtrs/IPP_Projects | JMP/jmp.py | jmp.py | py | 4,768 | python | en | code | 0 | github-code | 36 |
184316917 | # coding: utf-8
# device: Mi8SE
# screen: 2244x1080
# adb support
# open USB debugging and allow simulating input via USB debugging
import os
import time
fir_in_button_x, fir_in_button_y = 1485, 870 # first button, click to start
check_in_button_x, check_in_button_y = 1400, 750 # dialog๏ผ่ฑ้ไธ่ถณไธไธชๆฏๅฆ็ปง็ปญ
time_waiting = 13 # loading time (second)
skip_x, skip_y = 2060, 65 # skip useless story
direc_x1, direc_y1, direc_x2, direc_y2 = 900, 364, 995, 485 # two mark points, calculate the direction
direc_tan = (direc_y2 - direc_y1) / (direc_x2 - direc_x1)
magic_num1 = 35 # magic number to fix the direction so that you can walking straight
move_x, move_y = 450, 850 # the center of the left joystick
time_move1 = 32000 # time from the very begining to the boss, fixing according to your own moving speed (millisecond)
ult_ability_x, ult_ability_y = 1860, 620 # the position of the ultimate ability joystick(ๅฐฑๆฏๅคงๆ๏ผๅๆญฃไนๅๆdotaๅคงๆๅซultimate ability)
sec_ability_x, sec_ability_y = 1650, 750 # second ability
A_x, A_y = 1850, 900 # basic attack
again_x, again_y = 1800, 980 # button(ๅๆฌกๆๆ)
while(True):
os.system('adb shell input swipe {} {} {} {} {}'.format(fir_in_button_x, fir_in_button_y, fir_in_button_x, fir_in_button_y, 10)) # start the game (see: 1.png)
os.system('adb shell input swipe {} {} {} {} {}'.format(check_in_button_x, check_in_button_y, check_in_button_x, check_in_button_y, 10)) # check dialog (see: 2.png)
time.sleep(time_waiting) # wait for loading
os.system('adb shell input swipe {} {} {} {} {}'.format(skip_x, skip_y, skip_x, skip_y, 10)) # skip story
print('move')
os.system('adb shell input swipe {} {} {} {} {}'.format(move_x, move_y, move_x-50, move_y-50*direc_tan-magic_num1, time_move1)) # move to the boss
print('attack')
os.system('adb shell input swipe {} {} {} {} {}'.format(ult_ability_x, ult_ability_y, ult_ability_x, ult_ability_y, 3)) # ultimate ability
os.system('adb shell input swipe {} {} {} {} {}'.format(sec_ability_x, sec_ability_y, sec_ability_x, sec_ability_y, 3)) # second ability
print('A')
for i in range(40):
os.system('adb shell input swipe {} {} {} {} {}'.format(A_x, A_y, A_x, A_y, 1)) # basic attack and skip following story
print('done')
os.system('adb shell input swipe {} {} {} {} {}'.format(again_x, again_y, again_x, again_y, 10)) # play again?
time.sleep(3) | qlindazm/coinCollector | coinCollector.py | coinCollector.py | py | 2,423 | python | en | code | 1 | github-code | 36 |
6543350552 | #Avoid Duplicates
print("Enter a list of words; press the enter button twice to finish>>")
words = []#List
while True:#Collects words from the user.
word = input()
if word not in words:#If the word entered doesn't already exist in the list, append it to the list.
words.append(word)
if word == "":
break
words.remove(words[-1])#Removes the blank space from the list.
print(words) | law35/bitwisetechtips-final | media/python/files/duplicates.py | duplicates.py | py | 437 | python | en | code | 0 | github-code | 36 |
12718727003 | a=""
# infix to postfix conversion
def infix_to_postfix(infix):
prec = {}
prec["*"] = 3
prec["/"] = 3
prec["+"] = 2
prec["-"] = 2
prec["("] = 1
op_stack = []
postfix_list = []
token_list = infix.split()
for token in token_list:
if token in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or token in "0123456789":
postfix_list.append(token)
elif token == '(':
op_stack.append(token)
elif token == ')':
top_token = op_stack.pop()
while top_token != '(':
postfix_list.append(top_token)
top_token = op_stack.pop()
else:
while (len(op_stack) > 0) and (prec[op_stack[-1]] >= prec[token]):
postfix_list.append(op_stack.pop())
op_stack.append(token)
while len(op_stack) > 0:
postfix_list.append(op_stack.pop())
return " ".join(postfix_list)
a=infix_to_postfix("(-b+((b*b)-4*a*c)^(0.5)/(2*a))")
print(a) | beetrandahiya/python-upes | stackappln.py | stackappln.py | py | 988 | python | en | code | 2 | github-code | 36 |
2404877114 |
'''
This library is compouse of the units that we are going to adopt for the different magnitudes
'''
# Magnitudes
r = 10**10 #cm (radius)
P = 10**15 #din cmโ2 (pressure)
T = 10**7 #K (temperatura)
M = 10**33 #g (mass)
L = 10**33 #erg sโ1 (luminosity)
rho = 1 #g cmโ3 (density)
epsilon = 1 #erg gโ1 sโ1 (energy rate production)
kappa = 1 #cm2 gโ1 (opacity)
# Parameters
X = 0.80 # Hydrogen fraction
Y = 0.16 # Helium fraction
Z = 1 - X - Y # Heavy elements fraction
mu = 1 / (2*X + 0.75*Y + 0.5*Z) # mu - reduce mass
MTot = 5 # Total radius
RTot_initial = 12 # Initial Radius
LTot_initial = 40 # Initial Luminosity
layers = 100 # Number of layers in the star
| javiisebas/TFG | python-scripts/Initial_Parameters.py | Initial_Parameters.py | py | 773 | python | en | code | 0 | github-code | 36 |
30684906195 | """
Python data structures and algo practice
"""
class Node:
"""
Basic node object type
left_node ==> memory pointer for binary tree example
right_node ==> memory pointer for binary tree example
next_node ==> memory pointer for queue / stack examples
data ==> simple value (int)
"""
def __init__(self, data: int =None, next_node= None):
self.data = data # is data is passed asign it
self.left_node = None
self.right_node = None
self.next_node = next_node
##########################################
# example of a stack implementation using a stack class
'''
class EmptyStackException(Exception):
pass
class Stack(object):
def __init__(self):
self.head = None
def push(self, value: int) -> None:
self.head = Node(value, self.head)
def pop(self) -> int:
if self.head is None:
raise EmptyStackException("Pop from empty stack.")
value = self.head.value
self.head = self.head.next
return value
def peek(self) -> int:
if self.head is None:
raise EmptyStackException("Peek from empty stack.")
return self.head.value
def is_empty(self) -> bool:
return self.head is None
'''
##########################################
"""
Queue
FIFO Linear data structure with flexible sizing
"""
# asign H/T to null objects to make logic easier
head_node = None # remove nodes here
tail_node = None # add nodes here
def queue_is_empty():
if head_node == None:
return True
else:
return False
def queue_peek():
if head_node == None:
return "error msg: queue is empty"
else:
return head_node.data
def queue_add(data):
global tail_node, head_node # pull global vars into scope to update values
new_node = Node(data)
if (tail_node != None):
tail_node.next_node = new_node # point current tail's next at new_node
tail_node = new_node # set tail to point at new_node
if (head_node == None):
head_node = new_node
def queue_remove():
global head_node, tail_node # pull global vars into scope to update values
data = head_node.data
head_node = head_node.next_node
if (head_node == None):
tail_node = None
return data
"""
Stack
FIFO data structure with flexible sizing
"""
top_node = None # global var for stack
def stack_is_empty():
return (top_node == None)
def stack_peek():
if top_node == None:
return "error msg: stack is empty"
else:
return top_node.data
def stack_push(data):
global top_node # grab global top_node to update
new_node = Node(data, top_node) # make new node with nex_node to top_node
top_node = new_node
def stack_pop():
global top_node # get global top node to update
data = top_node.data # top_node data to return
top_node = top_node.next_node # remove top node from stack
return data
##################################
def queue_run():
test_node = Node(24)
print("Test node is a ", test_node.__class__)
print("\nLet's use these nodes to make a queue...\n")
print("Is queue empty? ", queue_is_empty())
print(queue_peek())
print("\nOk. Now lets add a few nodes... 2, 34, 53, 6")
queue_add(2)
queue_add(34)
queue_add(53)
queue_add(6)
print("Now if we run remove on our queue we get ==> " +
str(queue_remove()))
print("\nThen running it again we get...")
print("==> " + str(queue_remove()))
print("==> " + str(queue_remove()))
print("==> " + str(queue_remove()))
print("\nNow is the queue empty? ", queue_is_empty())
def stack_run():
print("\n\n Let's make a stack! \n")
print("Lets peek our stack to make sure its empty... \n", stack_peek())
print("\nNow let's push a few nodes onto the stack... 32, 80, 443, 8080")
stack_push(32)
stack_push(80)
stack_push(443)
stack_push(8080)
print("\nNow when we run is_empty() it returns...\n==> ", stack_is_empty())
print("\nNow lets pop the nodes off the stack...")
print(stack_pop())
print(stack_pop())
print(stack_pop())
print(stack_pop())
print("\nNow when we check if the stack is empty it returns...\n", stack_is_empty())
def binary_search_tree_run():
"""
Binary search tree
create tree using ordered insert and lookup to get log N memory/time space
value on left child is less than parent and right child is more
binary search trees must be balanced to avoid losing log n time
"""
# no need for Tree object as the Tree itself is a concept; its made of connected nodes
# nodes are the object; connections are self contained
def binary_insert(root, node):
if root is None:
root = node
else:
if root.data > node.data:
if root.l_child is None:
root.l_child = node
else:
binary_insert(root.l_child, node)
else:
if root.r_child is None:
root.r_child = node
else:
binary_insert(root.r_child, node)
def in_order_print(root):
if not root:
return
in_order_print(root.l_child)
print(root.data)
in_order_print(root.r_child)
#################################
def main():
queue_run()
stack_run()
binary_search_tree_run()
if (__name__ == "__main__"):
main()
| keeganridgley107/CompSci-Py | main.py | main.py | py | 5,520 | python | en | code | 0 | github-code | 36 |
15783422482 | # -*- coding: utf-8 -*-
"""
Created on Sat May 8 12:16:46 2021
@author: tamon
"""
import csv
import numpy as np
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
radius = []
angles = []
points = []
result = []
with open('angrad.csv', newline='') as csvfile:
readfile = csv.reader(csvfile, quotechar='|')
for row in readfile:
radius.append(row[12])
angles.append(row[13])
result.append(row[20])
radius.pop(0)
angles.pop(0)
result.pop(0)
radius = [int(i) for i in radius]
angles = [int(i) for i in angles]
for i in range(len(radius)):
points.append([angles[i], radius[i]])
result = [np.float64(i) for i in result]
xgrid, ygrid = np.mgrid[10:90:1000j, 30:240:1000j]
grid = griddata(points, result, (xgrid, ygrid), method='cubic')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(angles, radius, 'k.', ms=1)
sp = ax.imshow(grid.T, cmap='jet', extent=(10, 90, 30, 240), origin='lower')
ax.set_aspect(80/(210))
ax.set_xlabel('Angle [deg]')
ax.set_ylabel('Radius [mm]')
clb = fig.colorbar(sp)
clb.set_label('Equivelant Maximum Stress [Pa]')
fig.savefig('angrad.pdf', format='pdf', bbox_inches='tight')
plt.show()
angslice1 = []
angslice2 = []
angslice3 = []
angs = np.linspace(10, 90, 9)
j = 1
for j in range(9):
angslice1.append(result[8*j + 0])
angslice2.append(result[8*j + 1])
angslice3.append(result[8*j + 2])
xnew = np.linspace(10, 90, 200)
f1 = interp1d(angs, angslice1, kind='cubic')
f2 = interp1d(angs, angslice2, kind='cubic')
f3 = interp1d(angs, angslice3, kind='cubic')
plt.plot(xnew, f1(xnew), 'r', label='Radius=30 [mm]')
plt.plot(xnew, f2(xnew), 'b', label='Radius=60 [mm]')
plt.plot(xnew, f3(xnew), 'g', label='Radius=90 [mm]')
plt.grid('major')
plt.legend(loc='lower right')
plt.xlabel('Angle [deg]')
plt.ylabel('Equivelant Maximum Stress [Pa]')
plt.savefig('angslice.pdf', format='pdf', bbox_inches='tight')
# angslice1 = []
# angslice2 = []
# angslice3 = []
# angs = np.linspace(10, 90, 9)
# j = 1
# for j in range(9):
# angslice1.append(result[8*j + 0])
# angslice2.append(result[8*j + 1])
# angslice3.append(result[8*j + 2])
# xnew = np.linspace(10, 90, 200)
# f1 = interp1d(angs, angslice1, kind='cubic')
# f2 = interp1d(angs, angslice2, kind='cubic')
# f3 = interp1d(angs, angslice3, kind='cubic')
# plt.plot(xnew, np.gradient(f1(xnew)), 'r', label='Radius=30 [mm]')
# plt.plot(xnew, np.gradient(f2(xnew)), 'b', label='Radius=60 [mm]')
# plt.plot(xnew, np.gradient(f3(xnew)), 'g', label='Radius=90 [mm]')
# plt.grid('major')
# plt.legend(loc='lower right')
# plt.xlabel('Angle [deg]')
# plt.ylabel('Equivelant Maximum Stress [Pa]')
# plt.savefig('angslice.pdf', format='pdf', bbox_inches='tight')
radslice1 = result[:8]
radslice2 = result[8:16]
radslice3 = result[16:24]
radslice4 = result[24:32]
radslice5 = result[32:40]
radslice6 = result[40:48]
radslice7 = result[48:56]
rads = np.linspace(30, 240, 8)
xnew = np.linspace(30, 240, 200)
f1 = interp1d(rads, radslice1, kind='cubic')
f2 = interp1d(rads, radslice2, kind='cubic')
f3 = interp1d(rads, radslice3, kind='cubic')
f4 = interp1d(rads, radslice4, kind='cubic')
f5 = interp1d(rads, radslice5, kind='cubic')
f6 = interp1d(rads, radslice6, kind='cubic')
f7 = interp1d(rads, radslice7, kind='cubic')
fig2 = plt.figure()
ax2 = plt.subplot(111)
ax2.plot(xnew, f1(xnew), 'r', label='Radius=10 [mm]')
ax2.plot(xnew, f2(xnew), 'b', label='Radius=20 [mm]')
ax2.plot(xnew, f3(xnew), 'g', label='Radius=30 [mm]')
ax2.plot(xnew, f4(xnew), 'y', label='Radius=40 [mm]')
ax2.plot(xnew, f5(xnew), 'orange', label='Radius=50 [mm]')
ax2.plot(xnew, f6(xnew), 'cyan', label='Radius=60 [mm]')
ax2.plot(xnew, f7(xnew), 'purple', label='Radius=70 [mm]')
ax2.grid('major')
chartBox = ax2.get_position()
ax2.set_position([chartBox.x0, chartBox.y0, chartBox.width*0.6, chartBox.height])
ax2.legend(loc='upper center', bbox_to_anchor=(1.4, 0.8), shadow=True, ncol=1)
ax2.set_xlabel('Radius [mm]')
ax2.set_ylabel('Equivelant Maximum Stress [Pa]')
fig2.savefig('radslice.pdf', format='pdf', bbox_inches='tight')
| Maselko/individual-project | Angrad.py | Angrad.py | py | 4,228 | python | en | code | 0 | github-code | 36 |
21195656811 | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='snmpdesk',
version='0.0.91',
description='Scripts for easy get snmp data',
author='Svintsov Dmitry',
author_email='spam@19216801.ru',
url='http://github.com/uralbash/snmpdesk/',
keywords = "snmp",
install_requires=['pysnmp'],
license='GPL',
packages=['snmpdesk'],
long_description=read('README'),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Russian',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: System :: Networking :: Monitoring',
],
)
| uralbash/snmpdesk | setup.py | setup.py | py | 927 | python | en | code | 5 | github-code | 36 |
31026501229 |
board=["-","-","-","-","-","-","-","-","-"]
current_player='X'
game_is_on=True
winner=None
print('WELCOME')
print("Let's play Tic Tac Toe")
print('Plz give the reqiured detail')
player1=input('Player 1 name:')
player2=input('Player 2 name:')
current_name=player1
print(player1 + ' has been assigned with X')
print('AND')
print(player2 + ' has been assigned with 0')
b=True
def display_board():
print(board[0] + '|'+ board[1]+'|'+ board[2])
print(board[3] + '|'+ board[4]+'|'+ board[5])
print(board[6] + '|'+ board[7]+'|'+ board[8])
def position_input(player,name):
print(name+"'s turn:")
position=input('Plz Choose a Position from 1-9:')
valid = True
while valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Choose a position from 1-9: ")
position = int(position) - 1
if board[position] == "-":
valid = False
else:
print("You can't go there. Go again.")
board[position] = player
display_board()
def give_turn():
global current_player
global current_name
if current_player=='X':
current_player='0'
elif current_player=='0':
current_player='X'
if current_name == player1:
current_name=player2
elif current_name == player2:
current_name=player1
def check_if_game_over():
check_win()
check_if_tie()
def check_win():
global winner
row_done=check_row()
vertical_done=check_vertical()
diagonal_done=check_diagonal()
if row_done :
winner=row_done
elif vertical_done :
winner=vertical_done
elif diagonal_done :
winner=diagonal_done
else:
winner=None
def check_row():
global game_is_on
row_1 = board[0] == board[1] == board[2] != "-"
row_2 = board[3] == board[4] == board[5] != "-"
row_3 = board[6] == board[7] == board[8] != "-"
if row_1 or row_2 or row_3:
game_is_on = False
if row_1:
return board[0]
elif row_2:
return board[3]
elif row_3:
return board[6]
else:
return None
def check_vertical():
global game_is_on
column_1 = board[0] == board[3] == board[6] != "-"
column_2 = board[1] == board[4] == board[7] != "-"
column_3 = board[2] == board[5] == board[8] != "-"
if column_1 or column_2 or column_3:
game_is_on = False
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[2]
else:
return None
def check_diagonal():
global game_is_on
diagonal_1 = board[0] == board[4] == board[8] != "-"
diagonal_2 = board[2] == board[4] == board[6] != "-"
if diagonal_1 or diagonal_2:
game_is_on=False
if diagonal_1:
return board[0]
elif diagonal_2:
return board[2]
else:
return None
def check_if_tie():
global game_is_on
if "-" not in board:
game_is_on = False
return True
else:
return False
def play():
global b
global current_player
#global winner
print('Are You Ready')
print("Let's start the game")
display_board()
while game_is_on:
position_input(current_player,current_name)
check_if_game_over()
give_turn()
print('\n')
if winner=='X' :
print('CONGRATULATIONS!')
print(player1 +' won \n' )
print(player2 +' Better luck next time.')
elif winner=='0' :
print('CONGRATULATIONS!')
print(player2 +' won \n')
print(player1 +' Better luck next time.')
elif winner==None:
print("It's a tie")
#start thw game
play()
| meetshah129/py | Project.py | Project.py | py | 3,835 | python | en | code | 0 | github-code | 36 |
73479278505 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2016, Daisuke Cato <daisuke.cato@gmail.com>'
__docformat__ = 'restructuredtext en'
try:
from PyQt5 import QtWidgets as QtGui
from PyQt5.Qt import QWidget, QGridLayout, QLabel, QLineEdit, QPushButton
except ImportError as e:
from PyQt4 import QtGui
from PyQt4.Qt import QWidget, QGridLayout, QLabel, QLineEdit, QPushButton
from calibre.utils.config import JSONConfig
from calibre_plugins.extract_isbn.common_utils import KeyValueComboBox, KeyboardConfigDialog
STORE_NAME = 'Options'
KEY_PATH_BROWSER = 'pathBrowser'
DEFAULT_STORE_VALUES = {
KEY_PATH_BROWSER: '',
}
# This is where all preferences for this plugin will be stored
plugin_prefs = JSONConfig('plugins/Bibi Calibre')
# Set defaults
plugin_prefs.defaults[STORE_NAME] = DEFAULT_STORE_VALUES
class ConfigWidget(QWidget):
def __init__(self, plugin_action):
QWidget.__init__(self)
self.plugin_action = plugin_action
layout = QGridLayout(self)
self.setLayout(layout)
c = plugin_prefs[STORE_NAME]
layout.addWidget(QLabel('Browser command:', self), 0, 0)
path_browser = c.get(KEY_PATH_BROWSER, DEFAULT_STORE_VALUES[KEY_PATH_BROWSER])
self.path_browser_ledit = QLineEdit(path_browser, self)
layout.addWidget(self.path_browser_ledit, 1, 0)
def save_settings(self):
new_prefs = {}
new_prefs[KEY_PATH_BROWSER] = unicode(self.path_browser_ledit.text())
plugin_prefs[STORE_NAME] = new_prefs
| dcato/bibi_calibre | config.py | config.py | py | 1,698 | python | en | code | 0 | github-code | 36 |
16128248265 | import functools
from importlib import import_module
from pyws.public import InvalidPath
def route(path):
def wrapper(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
_wrapper.__route__ = path
return _wrapper
return wrapper
class Route:
routes = {}
@classmethod
def add_routes(cls, module_name):
point = module_name.rfind('.')
if point == (-1):
mod = import_module(module_name)
else:
mod = getattr(import_module(module_name[:point]), module_name[point + 1:])
for attr in dir(mod):
if attr.startswith('_'):
continue
func = getattr(mod, attr)
path = getattr(func, '__route__', None)
if path and callable(func):
cls.routes.setdefault(path, func)
@classmethod
def get(cls, path):
func = cls.routes.get(path)
if not func:
raise InvalidPath
return func
| czasg/ScrapyLearning | czaSpider/dump/bootstrap_test/blogs_v1/pyws/route.py | route.py | py | 1,042 | python | en | code | 1 | github-code | 36 |
6410410688 | import random
import os
import argparse
from typing import DefaultDict
# from paper "Learning Unknown from Correlations:
# Graph Neural Network for Inter-novel-protein Interaction Prediction"
standard_acids = [
('A', 1), ('C', 6), ('D', 5), ('E', 7), ('F', 2),
('G', 1), ('H', 4), ('I', 2), ('K', 5), ('L', 2),
('M', 3), ('N', 4), ('P', 2), ('Q', 7), ('R', 4),
('S', 3), ('T', 3), ('V', 1), ('W', 4), ('Y', 3)]
class PPI:
def __init__(self, fst_pro, sec_pro, label=1):
self.fst_pro = fst_pro
self.sec_pro = sec_pro
self.label = label
assert label == 1 or label == 0
def __eq__(self, __o: object) -> bool:
if (self.fst_pro == __o.fst_pro and self.sec_pro == __o.sec_pro) or \
(self.fst_pro == __o.sec_pro and self.sec_pro == __o.fst_pro):
return True
else:
return False
def __str__(self) -> str:
return f"{self.fst_pro}\t{self.sec_pro}\t{str(self.label)}\n"
def handle(pairfp, fastafp):
with open(pairfp, "r") as f:
ppi_lines = f.readlines()
with open(fastafp, "r") as f:
seq_lines = f.readlines()
# ๅญๅจๆญฃๆ ทๆฌ้ปๅฑ
๏ผtsvๆไปถไธญ่ดๆ ทๆฌๆปๅจๆๅ
pneighbors = DefaultDict(set)
ppis = []
proteins = set()
for ppiline in ppi_lines:
fpro, spro, label = ppiline.split()
proteins.update((fpro, spro))
label = int(float(label))
if label == 0:
if spro in pneighbors[fpro] or fpro in pneighbors[spro]:
# ๆ ็ญพๅฒ็ช๏ผไฝไธบๆญฃๆ ทๆฌ
continue
elif label == 1:
pneighbors[fpro].add(spro)
pneighbors[spro].add(fpro)
else:
raise ValueError
ppis.append(PPI(fpro, spro, label))
acid_seqs = {}
for idx in range(0, len(seq_lines), 2):
key = seq_lines[idx].strip()[1:].strip()
value = seq_lines[idx+1].strip()
if key in proteins:
acid_seqs[key] = value
assert len(acid_seqs) == len(proteins)
return ppis, acid_seqs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pair_dir", type=str, default="./data/dscript/data/pairs")
parser.add_argument("--seq_dir", type=str, default="./data/dscript/data/seqs")
parser.add_argument('--processed_dir', type=str, default="./data/dscript/processed")
# ่ฟๆปคๆ ทๆฌ็ๅๆฐ
parser.add_argument('--max_length', type=int, default=800)
parser.add_argument('--min_length', type=int, default=50)
args = parser.parse_args()
if not os.path.exists(args.processed_dir):
os.mkdir(args.processed_dir)
os.mkdir(os.path.join(args.processed_dir, "pairs"))
os.mkdir(os.path.join(args.processed_dir, "seqs"))
pair_fns = os.listdir(args.pair_dir)
for pairfn in pair_fns:
organism = pairfn.split("_")[0].strip()
pairfp = os.path.join(args.pair_dir, pairfn)
fastafp = os.path.join(args.seq_dir, organism + ".fasta")
ppis, acid_seqs = handle(pairfp, fastafp)
# ่ขซไธขๅผ็่็ฝ่ดจ
dropout_proteins = {"Too short": [], "Too long": [],} # "With non-standard acids": []}
# ่็ฝ่ดจ้ฟๅบฆ
protein_lengths = []
for pro, acid_seq in acid_seqs.items():
# ็ญ้่็ฝ่ดจ
qualified = False
if len(acid_seq) < args.min_length:
dropout_proteins['Too short'].append(pro)
elif len(acid_seq) > args.max_length:
dropout_proteins['Too long'].append(pro)
# elif len(set(list(acid_seq)) - set([acid[0] for acid in standard_acids])) > 0:
# dropout_proteins['With non-standard acids'].append(pro)
else:
qualified = True
if qualified:
protein_lengths.append((pro, len(acid_seq)))
# ่พๅบ่็ฝ่ดจ็ๆฐ้็ญไฟกๆฏ
if sum([len(value) for value in dropout_proteins.values()]) > 0:
print(f"============{pairfn.split('.')[0]} Dataset Filter============")
print(f"Total {len(acid_seqs)} proteins.")
print(f"\tFilter {len(dropout_proteins['Too short'])} because they are too short.")
print(f"\tFilter {len(dropout_proteins['Too long'])} because they are too long.")
# print(f"\tFilter {len(dropout_proteins['With non-standard acids'])} because they have non-standard acids.")
# ้ๅ่ฟๆปคๆ็่็ฝ่ดจ
dropout_proteins = set(dropout_proteins['Too long'] + dropout_proteins['Too short'])
# + dropout_proteins['With non-standard acids'])
# ๅ ้คไธๅๆ ผ็่็ฝ่ดจๅppi
ppis = [ppi for ppi in ppis if ppi.fst_pro not in dropout_proteins and ppi.sec_pro not in dropout_proteins]
acid_seqs = {key: value for key, value in acid_seqs.items() if key not in dropout_proteins}
with open(os.path.join(args.processed_dir, "pairs", pairfn), "w") as f:
f.writelines([str(ppi) for ppi in ppis])
with open(os.path.join(args.processed_dir, "seqs", pairfn.split(".")[0] + ".fasta"), "w") as f:
f.writelines([f"{pro}\t{sequence}\n" for pro, sequence in acid_seqs.items()])
# print statistics
print(f"============{pairfn.split('.')[0]} Dataset Statistics============")
print(f'Total {len(ppis)} positive samples:')
print(f'\t Positive: {len([ppi for ppi in ppis if ppi.label == 1])}')
print(f'\t Negative: {len([ppi for ppi in ppis if ppi.label == 0])}')
print(f"Total {len(acid_seqs)} Proteins:" )
print(f"\tMax length of protein: {max([pro[1] for pro in protein_lengths])}")
print(f"\tMin length of protein: {min([pro[1] for pro in protein_lengths])}")
print(f"\tAvg length of protein: {round(sum([pro[1] for pro in protein_lengths])/len(protein_lengths), 3)}")
| LtECoD/PPITrans | data/dscript/builddataset.py | builddataset.py | py | 5,962 | python | en | code | 4 | github-code | 36 |
25046698571 | #!/usr/bin/python
import json
import re
import os
import sys
# Get data.json from Twitter for
# Followers: https://oauth-playground.glitch.me/?id=usersIdFollowers¶ms=%28%27user.fields%21%27description%27%29_
# Followings: https://oauth-playground.glitch.me/?id=usersIdFollowing¶ms=%28%27user.fields%21%27description%27%29_
with open(os.path.join(sys.path[0], "data.json"), "r") as fileData:
jsonData = json.load(fileData)
mastodonUrlRegex = re.compile(r'@\w*@\w*\.\w*')
webUrlRegex = re.compile(r'http(s?)://.*/@\w*')
for follower in jsonData['data']:
name = follower['name']
username = follower['username']
description = follower['description']
match1 = mastodonUrlRegex.search(description)
if match1:
print("%s (@%s) - %s" % (name, username, match1.group()))
match2 = webUrlRegex.search(description)
if match2:
print("%s (@%s) - %s" % (name, username, match2.group()))
| tjosten/twitter-mastodon-finder | finder.py | finder.py | py | 983 | python | en | code | 0 | github-code | 36 |
36947645839 | __revision__ = "src/engine/SCons/Tool/JavaCommon.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import os
import os.path
import re
import glob
java_parsing = 1
default_java_version = '1.4'
# a switch for which jdk versions to use the Scope state for smarter
# anonymous inner class parsing.
scopeStateVersions = ('1.8')
# Glob patterns for use in finding where the JDK is.
# These are pairs, *dir_glob used in the general case,
# *version_dir_glob if matching only a specific version.
# For now only used for Windows.
java_win32_dir_glob = 'C:/Program Files*/Java/jdk*/bin'
# On windows, since Java 9, there is a dash between 'jdk' and the version
# string that wasn't there before. this glob should catch either way.
java_win32_version_dir_glob = 'C:/Program Files*/Java/jdk*%s*/bin'
# Glob patterns for use in finding where the JDK headers are.
# These are pairs, *dir_glob used in the general case,
# *version_dir_glob if matching only a specific version.
java_macos_include_dir_glob = '/System/Library/Frameworks/JavaVM.framework/Headers/'
java_macos_version_include_dir_glob = '/System/Library/Frameworks/JavaVM.framework/Versions/%s*/Headers/'
java_linux_include_dirs_glob = [
'/usr/lib/jvm/default-java/include',
'/usr/lib/jvm/java-*/include'
]
# Need to match path like below (from Centos 7)
# /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.191.b12-0.el7_5.x86_64/include/
java_linux_version_include_dirs_glob = [
'/usr/lib/jvm/java-*-sun-%s*/include',
'/usr/lib/jvm/java-%s*-openjdk*/include',
'/usr/java/jdk%s*/include'
]
if java_parsing:
# Parse Java files for class names.
#
# This is a really cool parser from Charles Crain
# that finds appropriate class names in Java source.
# A regular expression that will find, in a java file:
# newlines;
# double-backslashes;
# a single-line comment "//";
# single or double quotes preceeded by a backslash;
# single quotes, double quotes, open or close braces, semi-colons,
# periods, open or close parentheses;
# floating-point numbers;
# any alphanumeric token (keyword, class name, specifier);
# any alphanumeric token surrounded by angle brackets (generics);
# the multi-line comment begin and end tokens /* and */;
# array declarations "[]".
_reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' +
r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' +
r'/\*|\*/|\[\])')
class OuterState(object):
"""The initial state for parsing a Java file for classes,
interfaces, and anonymous inner classes."""
def __init__(self, version=default_java_version):
if version not in ('1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '1.7',
'1.8', '5', '6', '9.0', '10.0', '11.0', '12.0'):
msg = "Java version %s not supported" % version
raise NotImplementedError(msg)
self.version = version
self.listClasses = []
self.listOutputs = []
self.stackBrackets = []
self.brackets = 0
self.nextAnon = 1
self.localClasses = []
self.stackAnonClassBrackets = []
self.anonStacksStack = [[0]]
self.package = None
def trace(self):
pass
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getPackageState(self):
try:
return self.packageState
except AttributeError:
ret = PackageState(self)
self.packageState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
self.outer_state = self
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def _getAnonStack(self):
return self.anonStacksStack[-1]
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
if len(self.stackBrackets) and \
self.brackets == self.stackBrackets[-1]:
self.listOutputs.append('$'.join(self.listClasses))
self.localClasses.pop()
self.listClasses.pop()
self.anonStacksStack.pop()
self.stackBrackets.pop()
if len(self.stackAnonClassBrackets) and \
self.brackets == self.stackAnonClassBrackets[-1] and \
self.version not in scopeStateVersions:
self._getAnonStack().pop()
self.stackAnonClassBrackets.pop()
def parseToken(self, token):
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
elif token in ['"', "'"]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
if len(self.listClasses) > 0:
return self.__getAnonClassState()
return self.__getSkipState() # Skip the class name
elif token in ['class', 'interface', 'enum']:
if len(self.listClasses) == 0:
self.nextAnon = 1
self.stackBrackets.append(self.brackets)
return self.__getClassState()
elif token == 'package':
return self.__getPackageState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
def addAnonClass(self):
"""Add an anonymous inner class"""
if self.version in ('1.1', '1.2', '1.3', '1.4'):
clazz = self.listClasses[0]
self.listOutputs.append('%s$%d' % (clazz, self.nextAnon))
elif self.version in ('1.5', '1.6', '1.7', '1.8', '5', '6', '9.0', '10.0', '11.0', '12.0'):
self.stackAnonClassBrackets.append(self.brackets)
className = []
className.extend(self.listClasses)
self._getAnonStack()[-1] = self._getAnonStack()[-1] + 1
for anon in self._getAnonStack():
className.append(str(anon))
self.listOutputs.append('$'.join(className))
self.nextAnon = self.nextAnon + 1
self._getAnonStack().append(0)
def setPackage(self, package):
self.package = package
class ScopeState(object):
"""
A state that parses code within a scope normally,
within the confines of a scope.
"""
def __init__(self, old_state):
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brackets = 0
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
def parseToken(self, token):
# if self.brackets == 0:
# return self.old_state.parseToken(token)
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
if self.brackets == 0:
self.outer_state._getAnonStack().pop()
return self.old_state
elif token in ['"', "'"]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
return self.__getAnonClassState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
class AnonClassState(object):
"""A state that looks for anonymous inner classes."""
def __init__(self, old_state):
# outer_state is always an instance of OuterState
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brace_level = 0
def parseToken(self, token):
# This is an anonymous class if and only if the next
# non-whitespace token is a bracket. Everything between
# braces should be parsed as normal java code.
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '\n':
return self
elif token[0] == '<' and token[-1] == '>':
return self
elif token == '(':
self.brace_level = self.brace_level + 1
return self
if self.brace_level > 0:
if token == 'new':
# look further for anonymous inner class
return SkipState(1, AnonClassState(self))
elif token in ['"', "'"]:
return IgnoreState(token, self)
elif token == ')':
self.brace_level = self.brace_level - 1
return self
if token == '{':
self.outer_state.addAnonClass()
if self.outer_state.version in scopeStateVersions:
return ScopeState(old_state=self.old_state).parseToken(token)
return self.old_state.parseToken(token)
class SkipState(object):
"""A state that will skip a specified number of tokens before
reverting to the previous state."""
def __init__(self, tokens_to_skip, old_state):
self.tokens_to_skip = tokens_to_skip
self.old_state = old_state
def parseToken(self, token):
self.tokens_to_skip = self.tokens_to_skip - 1
if self.tokens_to_skip < 1:
return self.old_state
return self
class ClassState(object):
"""A state we go into when we hit a class or interface keyword."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
# the next non-whitespace token should be the name of the class
if token == '\n':
return self
# If that's an inner class which is declared in a method, it
# requires an index prepended to the class-name, e.g.
# 'Foo$1Inner'
# https://github.com/SCons/scons/issues/2087
if self.outer_state.localClasses and \
self.outer_state.stackBrackets[-1] > \
self.outer_state.stackBrackets[-2] + 1:
locals = self.outer_state.localClasses[-1]
try:
idx = locals[token]
locals[token] = locals[token] + 1
except KeyError:
locals[token] = 1
token = str(locals[token]) + token
self.outer_state.localClasses.append({})
self.outer_state.listClasses.append(token)
self.outer_state.anonStacksStack.append([0])
return self.outer_state
class IgnoreState(object):
"""A state that will ignore all tokens until it gets to a
specified token."""
def __init__(self, ignore_until, old_state):
self.ignore_until = ignore_until
self.old_state = old_state
def parseToken(self, token):
if self.ignore_until == token:
return self.old_state
return self
class PackageState(object):
"""The state we enter when we encounter the package keyword.
We assume the next token will be the package name."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
self.outer_state.setPackage(token)
return self.outer_state
def parse_java_file(fn, version=default_java_version):
with open(fn, 'r') as f:
data = f.read()
return parse_java(data, version)
def parse_java(contents, version=default_java_version, trace=None):
"""Parse a .java file and return a double of package directory,
plus a list of .class files that compiling that .java file will
produce"""
package = None
initial = OuterState(version)
currstate = initial
for token in _reToken.findall(contents):
# The regex produces a bunch of groups, but only one will
# have anything in it.
currstate = currstate.parseToken(token)
if trace: trace(token, currstate)
if initial.package:
package = initial.package.replace('.', os.sep)
return (package, initial.listOutputs)
else:
# Don't actually parse Java files for class names.
#
# We might make this a configurable option in the future if
# Java-file parsing takes too long (although it shouldn't relative
# to how long the Java compiler itself seems to take...).
def parse_java_file(fn):
""" "Parse" a .java file.
This actually just splits the file name, so the assumption here
is that the file name matches the public class name, and that
the path to the file is the same as the package name.
"""
return os.path.split(fn)
def get_java_install_dirs(platform, version=None):
"""
Find the java jdk installation directories.
This list is intended to supply as "default paths" for use when looking
up actual java binaries.
:param platform: selector for search algorithm.
:param version: If specified, only look for java sdk's of this version
:return: list of default paths for java.
"""
paths = []
if platform == 'win32':
if version:
paths = glob.glob(java_win32_version_dir_glob % version)
else:
paths = glob.glob(java_win32_dir_glob)
else:
# other platforms, do nothing for now
pass
return sorted(paths)
def get_java_include_paths(env, javac, version):
"""
Find java include paths for JNI building.
:param env: construction environment, used to extract platform.
:param javac: path to detected javac.
:return: list of paths.
"""
paths = []
if not javac:
# there are no paths if we've not detected javac.
pass
elif env['PLATFORM'] == 'win32':
# on Windows, we have the right path to javac, so look locally
javac_bin_dir = os.path.dirname(javac)
java_inc_dir = os.path.normpath(os.path.join(javac_bin_dir, '..', 'include'))
paths = [java_inc_dir, os.path.join(java_inc_dir, 'win32')]
elif env['PLATFORM'] == 'darwin':
if not version:
paths = [java_macos_include_dir_glob]
else:
paths = sorted(glob.glob(java_macos_version_include_dir_glob % version))
else:
base_paths = []
if not version:
for p in java_linux_include_dirs_glob:
base_paths.extend(glob.glob(p))
else:
for p in java_linux_version_include_dirs_glob:
base_paths.extend(glob.glob(p % version))
for p in base_paths:
paths.extend([p, os.path.join(p, 'linux')])
# print("PATHS:%s"%paths)
return paths
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/JavaCommon.py | JavaCommon.py | py | 17,551 | python | en | code | 24,670 | github-code | 36 |
27194927025 | import logging
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
from utils import logging as lg
from heatmap_tutorial import utils as ht_utils
lg.set_logging()
def get_mnist(dataset, dir_path='./data/mnist'):
if dataset == 'train':
prefix = 'train'
elif dataset == 'test':
prefix = 't10k'
else:
raise ValueError('No dataset MNIST - %s' % dataset)
logging.debug('Load %s : %s' % (dir_path, dataset))
x_path = '%s/%s-images-idx3-ubyte' % (dir_path, prefix)
y_path = '%s/%s-labels-idx1-ubyte' % (dir_path, prefix)
with open(x_path) as xf:
with open(y_path) as yf:
x = 2.0*np.fromfile(xf, dtype='ubyte', count=-1)[16:].reshape((-1, 784)) / 255 - 1
y = np.fromfile(yf, dtype='ubyte', count=-1)[8:]
y = (y[:, np.newaxis] == np.arange(10)) * 1.0
return x, y
def get_empty_data():
return np.zeros((28, 28)) - 1
def fill_left_right_digit(x, y, seed=71):
new_x = np.zeros((x.shape[0], 28, 28*3))
new_x[:, :, 28:(28*2)] = x
def plot_sample_propotions(indices, label):
y_not_in_class_i = classes[indices]
counts = dict()
for jj in range(10):
counts[jj] = np.sum(y_not_in_class_i == jj)
logging.info('%s | sample propotions' % (label))
logging.info(counts)
np.random.seed(seed)
classes = np.argmax(y, axis=1)
# plot_sample_propotions(range(classes.shape[0]), 'total')
for i in range(10):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
left_indices = np.random.choice(samples_not_in_class_i, total)
# plot_sample_propotions(left_indices, 'left-%d' % i)
right_indices = np.random.choice(samples_not_in_class_i, total)
# plot_sample_propotions(right_indices, 'right-%d' % i)
new_x[samples_in_class_i, :, :28] = x[left_indices, :, :]
new_x[samples_in_class_i, :, -28:] = x[right_indices, :, :]
return new_x, y
def expand_samples(x, y, n=7, seed=71):
new_x = np.zeros((x.shape[0], x.shape[1], x.shape[2]*n))
np.random.seed(seed)
classes = np.argmax(y, axis=1)
original_sample_idx = np.floor(n / 2).astype(int)
new_x[:, :, x.shape[2]*original_sample_idx:x.shape[2]*(original_sample_idx+1)] = x
for i in range(y.shape[1]):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
for j in range(n):
if j == original_sample_idx:
continue
indices = np.random.choice(samples_not_in_class_i, total)
new_x[samples_in_class_i, :, j*x.shape[2]:(j+1)*x.shape[2]] = x[indices, :, :]
return new_x, y
def create_majority_data(x, y, seed=71):
np.random.seed(seed)
classes = np.argmax(y, axis=1)
new_x = np.tile(x, (1, 3))
digit_positions = np.zeros((new_x.shape[0], 3))
for i in range(10):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
fake_digit_idx = np.random.choice(samples_not_in_class_i, total)
same_class_digit_idx = np.random.choice(samples_in_class_i, total)
for j, idx in zip(range(total), samples_in_class_i):
dd = [x[idx, :, :], x[same_class_digit_idx[j], :, :], x[fake_digit_idx[j], :, :]]
permuted_pos = np.random.permutation(range(3))
digit_positions[idx] = permuted_pos
dd_permuted = [dd[jj] for jj in permuted_pos]
new_x[idx, :, :] = np.concatenate(dd_permuted, axis=1)
return new_x, y, digit_positions <= 1
def create_middle_mark(no_x, no_digit=3):
zeros = np.zeros((no_x, no_digit))
zeros[:, int(np.floor(no_digit/2))] = 1
return zeros
def build_cvdataset(data, k=10):
xar = []
yar = []
mar = []
total_data = 0
for d in [data.train2d, data.test2d, data.val2d]:
xar.append(d.x)
yar.append(d.y)
mar.append(d.correct_digit_mark)
total_data += d.y.shape[0]
datasets = []
x = np.vstack(xar)
logging.info('total x shape')
logging.info(x.shape)
y = np.vstack(yar)
logging.info('total y shape')
logging.info(y.shape)
mark = np.vstack(mar)
logging.info('total mark shape')
logging.info(mark.shape)
skf = StratifiedKFold(n_splits=k, random_state=71, shuffle=True)
for train_indices, test_indices in skf.split(x, np.argmax(y, axis=1)):
dtrain = DataSet(x=x[train_indices, ...], y=y[train_indices, ...], correct_digit_mark=mark[train_indices, ...])
dtest = DataSet(x=x[test_indices, ...], y=y[test_indices, ...], correct_digit_mark=mark[test_indices, ...])
datasets.append((dtrain, dtest, dtest))
return datasets
class DatasetLoader():
def __init__(self, data_dir):
self.prepend_dir = lambda p: '%s/%s' % (data_dir, p)
self.cache = dict()
def load(self, dataset_name):
if self.cache.get(dataset_name):
return self.cache[dataset_name]
if dataset_name == 'mnist':
data = MNISTData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist':
data = FashionMNISTData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'ufi-cropped':
data = UFICroppedData(dir_path=self.prepend_dir('ufi-cropped'))
elif dataset_name == 'mnist-3-digits':
data = MNIST3DigitsData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'mnist-3-digits-maj':
data = MNIST3DigitsWithMajorityData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist-3-items':
data = FashionMNIST3ItemsData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'fashion-mnist-3-items-maj':
data = FashionMNIST3DigitsWithMajorityData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'mnist-7-digits':
data = MNISTMiddleSampleProblem(n=7, seed=5, dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist-7-items':
data = MNISTMiddleSampleProblem(n=7, seed=15, dir_path=self.prepend_dir('fashion-mnist'))
else:
raise SystemError('No dataset name `%s`' % dataset_name)
self.cache[dataset_name] = data
return self.cache[dataset_name]
class DataSet:
def __init__(self, x, y, correct_digit_mark=None):
self.x = x
self.y = y
self.correct_digit_mark = correct_digit_mark
def get_batch(self, no_batch, seed=71):
total = len(self.x)
np.random.seed(seed)
shuffled_indices = np.random.permutation(total)
x = self.x[shuffled_indices, :, :]
y = self.y[shuffled_indices, :]
for ndx in range(0, total, no_batch):
yield (x[ndx:min(ndx + no_batch, total)], y[ndx:min(ndx + no_batch, total)])
class MNISTData:
def __init__(self, dir_path='./data/mnist'):
self.dir_path = dir_path
x_train, y_train = get_mnist('train', dir_path=dir_path)
x_test, y_test = get_mnist('test', dir_path=dir_path)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=71)
self.no_classes = 10
self.dims = (28, 28)
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train.reshape(-1, 28, 28), y_train)
self.val2d = DataSet(x_val.reshape(-1, 28, 28), y_val)
self.test2d = DataSet(x_test.reshape(-1, 28, 28), y_test)
self.labels = {
0: 'Digit 0',
1: 'Digit 1',
2: 'Digit 2',
3: 'Digit 3',
4: 'Digit 4',
5: 'Digit 5',
6: 'Digit 6',
7: 'Digit 7',
8: 'Digit 8',
9: 'Digit 9'
}
def get_text_label(self, label_index):
return 'Digit %d' % label_index
def get_samples_for_vis(self, n=12):
np.random.seed(1234)
r = np.random.randint(0, self.test2d.y.shape[0], n)
return self.test2d.x[r, :, :], self.test2d.y[r]
class MNIST3DigitsData(MNISTData):
def __init__(self, **kwargs):
super(MNIST3DigitsData, self).__init__(**kwargs)
self.dims = (28, 28*3)
self.train2d = DataSet(*fill_left_right_digit(self.train2d.x, self.train2d.y, seed=0),
correct_digit_mark=create_middle_mark(self.train2d.x.shape[0]))
self.val2d = DataSet(*fill_left_right_digit(self.val2d.x, self.val2d.y, seed=1),
correct_digit_mark=create_middle_mark(self.val2d.x.shape[0])
)
self.test2d = DataSet(*fill_left_right_digit(self.test2d.x, self.test2d.y, seed=3),
correct_digit_mark=create_middle_mark(self.test2d.x.shape[0])
)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class MNIST3DigitsWithMajorityData(MNISTData):
def __init__(self, **kwargs):
super(MNIST3DigitsWithMajorityData, self).__init__(**kwargs)
self.dims = (28, 28*3)
x, y, train2d_correct_digit_mark = create_majority_data(self.train2d.x, self.train2d.y, seed=0)
self.train2d = DataSet(x, y, correct_digit_mark=train2d_correct_digit_mark)
assert self.train2d.correct_digit_mark.shape[0] == self.train2d.y.shape[0]
x, y, val2d_correct_digit_mark = create_majority_data(self.val2d.x, self.val2d.y, seed=1)
self.val2d = DataSet(x, y, correct_digit_mark=val2d_correct_digit_mark)
assert self.val2d.correct_digit_mark.shape[0] == self.val2d.y.shape[0]
x, y, test2d_correct_digit_mark = create_majority_data(self.test2d.x, self.test2d.y, seed=3)
self.test2d = DataSet(x, y, correct_digit_mark=test2d_correct_digit_mark)
assert self.test2d.correct_digit_mark.shape[0] == self.test2d.y.shape[0]
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class MNISTMiddleSampleProblem(MNISTData):
def __init__(self, n=7, seed=1, **kwargs):
super(MNISTMiddleSampleProblem, self).__init__(**kwargs)
self.dims = (28, 28*n)
self.train2d = DataSet(*expand_samples(self.train2d.x, self.train2d.y, n, seed=seed))
self.train2d_correct_digit_mark = create_middle_mark(self.train2d.x.shape[0], no_digit=n)
self.val2d = DataSet(*expand_samples(self.val2d.x, self.val2d.y, n, seed=seed+1))
self.val2d_correct_digit_mark = create_middle_mark(self.val2d.x.shape[0], no_digit=n)
self.test2d = DataSet(*expand_samples(self.test2d.x, self.test2d.y, n, seed=seed+2))
self.test2d_correct_digit_mark = create_middle_mark(self.test2d.x.shape[0], no_digit=n)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
labels = {
0: 'Digit 0',
1: 'Digit 1',
2: 'Digit 2',
3: 'Digit 3',
4: 'Digit 4',
5: 'Digit 5',
6: 'Digit 6',
7: 'Digit 7',
8: 'Digit 8',
9: 'Digit 9'
}
if 'fashion' in kwargs['dir_path']:
labels = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
self.labels = labels
class FashionMNISTData:
def __init__(self, dir_path='./data/fashion-mnist'):
x_train, y_train = get_mnist('train', dir_path=dir_path)
x_test, y_test = get_mnist('test', dir_path=dir_path)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=71)
self.dims = (28, 28)
self.no_classes = 10
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train.reshape(-1, 28, 28), y_train)
self.val2d = DataSet(x_val.reshape(-1, 28, 28), y_val)
self.test2d = DataSet(x_test.reshape(-1, 28, 28), y_test)
self.labels = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
def get_samples_for_vis(self, n=12):
indices = [588, 314, 47, 145, 258, 641, 561, 3410, 1094, 4059, 518, 9304][:n]
return self.test2d.x[indices, :], self.test2d.y[indices]
def get_text_label(self, label_index):
return self.labels[label_index]
class FashionMNIST3ItemsData(FashionMNISTData):
def __init__(self, **kwargs):
super(FashionMNIST3ItemsData, self).__init__(**kwargs)
self.dims = (28, 28*3)
self.train2d = DataSet(*fill_left_right_digit(self.train2d.x, self.train2d.y, seed=20),
correct_digit_mark=create_middle_mark(self.train2d.x.shape[0]))
self.val2d = DataSet(*fill_left_right_digit(self.val2d.x, self.val2d.y, seed=21),
correct_digit_mark=create_middle_mark(self.val2d.x.shape[0])
)
self.test2d = DataSet(*fill_left_right_digit(self.test2d.x, self.test2d.y, seed=23),
correct_digit_mark=create_middle_mark(self.test2d.x.shape[0])
)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class FashionMNIST3DigitsWithMajorityData(FashionMNISTData):
def __init__(self, **kwargs):
super(FashionMNIST3DigitsWithMajorityData, self).__init__(**kwargs)
self.dims = (28, 28*3)
x, y, train2d_correct_digit_mark = create_majority_data(self.train2d.x, self.train2d.y, seed=0)
self.train2d = DataSet(x, y, correct_digit_mark=train2d_correct_digit_mark)
assert self.train2d.correct_digit_mark.shape[0] == self.train2d.y.shape[0]
x, y, val2d_correct_digit_mark = create_majority_data(self.val2d.x, self.val2d.y, seed=1)
self.val2d = DataSet(x, y, correct_digit_mark=val2d_correct_digit_mark)
assert self.val2d.correct_digit_mark.shape[0] == self.val2d.y.shape[0]
x, y, test2d_correct_digit_mark = create_majority_data(self.test2d.x, self.test2d.y, seed=3)
self.test2d = DataSet(x, y, correct_digit_mark=test2d_correct_digit_mark)
assert self.test2d.correct_digit_mark.shape[0] == self.test2d.y.shape[0]
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class UFICroppedData:
def __init__(self, dir_path='./data/ufi-cropped'):
# subsampling_indices = list(np.arange(0, 128, 2))
def avg_pooling(x):
new_x = np.zeros((x.shape[0], 64, 64))
for i in range(0, x.shape[1], 2):
for j in range(0, x.shape[2], 2):
new_x[:, int(i/2), int(j/2)] = np.mean(x[:, i:(i+2), j:(j+2)].reshape(-1, 4), axis=1)
return new_x
def flip_data(x, y):
total = x.shape[0]
new_x = np.tile(x, (2, 1, 1))
new_y = np.tile(y, (2, 1))
new_x[total:, :, :] = x[:, :, ::-1]
np.random.seed(0)
shuffled_indices = np.arange(total*2)
np.random.shuffle(shuffled_indices)
return new_x[shuffled_indices, :, :], new_y[shuffled_indices, :]
x_train = avg_pooling(np.load('%s/train-x.npy' % dir_path).reshape(-1, 128, 128))
y_train = np.load('%s/train-y.npy' % dir_path)
# print(x_train[0])
# print(np.argmax(y_train[0]))
x_train, y_train = flip_data(x_train, y_train)
# print('Train data', x_train.shape)
x_test = avg_pooling(np.load('%s/test-x.npy' % dir_path).reshape(-1, 128, 128))
y_test = np.load('%s/test-y.npy' % dir_path)
x_test, y_test = flip_data(x_test, y_test)
# print('Test data', x_test.shape)
self.dims = (64, 64)
# This is a bad idea but we have limited amount of data
x_val, y_val = x_test, y_test
self.no_classes = y_test.shape[1]
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train, y_train)
self.val2d = DataSet(x_val, y_val)
self.test2d = DataSet(x_test, y_test)
def get_samples_for_vis(self, n=12):
print("WARNING! this is data sampled from training set not testing one")
indices = [2785, 2973, 57, 906, 393, 3666, 3502, 1222, 731, 2659, 3400, 656]
return self.train2d.x[indices, :], self.train2d.y[indices]
def get_text_label(self, label_index):
return 'Person %d' % label_index
| p16i/thesis-designing-recurrent-neural-networks-for-explainability | src/utils/data_provider.py | data_provider.py | py | 17,506 | python | en | code | 14 | github-code | 36 |
28555182089 | # This is a sample Python script.
# Press โR to execute it or replace it with your code.
# Press Double โง to search everywhere for classes, files, tool windows, actions, and settings.
import sys
import asyncio
import datetime
import time
import collections
import json
# function to execute aws cli 'iam' command
async def getAwsIamData(require_data:str) -> list:
command = "aws iam "
command += require_data
process = await asyncio.create_subprocess_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
return await process.communicate()
# function to get user list
async def getAwsUserList() -> str:
output, errs = await getAwsIamData("list-users")
if errs:
print("Error occurred to get user list")
return "error"
return output.decode()
# function to get access key list
async def getAwsAccessKeys(userName) -> str:
output, errs = await getAwsIamData("list-access-keys --user-name " + userName)
if errs:
print("Error occurred to get access key list:", errs)
return "error"
return output.decode()
# get access key list per user and arranging them in the dictionary
async def arrangeUserAccessKeyData(user_map, userName:str, current_time:float, valid_time:int):
# request access key list assigned to {userName}
keys = json.loads(await getAwsAccessKeys(userName))
# iterate access keys and check if it is valid.
for key in keys["AccessKeyMetadata"]:
if await checkIsValid(key["CreateDate"], current_time, valid_time):
user_map[key["UserName"]]["UserName"] = key["UserName"]
user_map[key["UserName"]]["AccessKeyId"] = key["AccessKeyId"]
user_map[key["UserName"]]["CreateDate"] = key["CreateDate"]
# check if create_time + valid_time is older than current_time
async def checkIsValid(created_time:str, current_time:float, valid_time:int) -> bool:
date = datetime.datetime.strptime(created_time, '%Y-%m-%dT%H:%M:%S+00:00')
created_at = time.mktime(date.timetuple())
# print("diff: ", (current_time - (created_at + valid_time)) // 3600)
if created_at + valid_time < current_time: # user is not valid
return True
else: # user is still valid
return False
async def detectInvalidUserkey(valid_time:int):
users = json.loads(await getAwsUserList())
if users == "error":
print("Error occurred to get User data")
return
# dictionary to save user/key data
user_data = collections.defaultdict(dict)
# current time to be base
current_time = time.time()
valid_time *= 3600
tasks = []
# get access keys per user and arrange them in 'user_data' dictionary
for user in users["Users"]:
tasks.append(arrangeUserAccessKeyData(user_data, user["UserName"], current_time, valid_time))
# running the tasks asynchronously
await asyncio.gather(*tasks)
# write result(user_data) on the file
with open("invalid_access_keys.txt", "w") as fd:
fd.write(" - Get Invalid User List - At: " + datetime.datetime.fromtimestamp(current_time).strftime("%m/%d/%Y, %H:%M:%S") + "\n\n")
for key, val in user_data.items():
fd.write(" " + "{\n")
for nkey, nval in val.items():
fd.write(" \t" + nkey + ": " + nval + "\n")
fd.write(" }\n")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
limit = int(sys.argv[1])
if not limit:
print("please put validation time to filter access keys.")
else:
asyncio.run(detectInvalidUserkey(limit))
| SeungwookE/InvalidAccessKeyDetector | main.py | main.py | py | 3,645 | python | en | code | 0 | github-code | 36 |
11130997604 | import csv
import os
from flask import Blueprint, redirect, render_template, request
import sqlalchemy
from forms.addTeacherForm import AddTeacherForm
from models import GroupOfTeacher, Teacher, TeacherInGroup, db
from forms.editForm import EditTeacherForm
ROWS_PER_PAGE = 5
teachers_blueprint = Blueprint("teachers_blueprint", __name__)
@teachers_blueprint.route("/teachers", methods=["get", "post"])
def teachers():
page = request.args.get('page', 1, type=int)
teachers = Teacher.query.order_by(Teacher.last_name).paginate(page=page, per_page=ROWS_PER_PAGE)
addTeacherFormData = AddTeacherForm()
return render_template("teachers/teachers.html",
teachers=teachers,
form=addTeacherFormData)
add_teachers_blueprint = Blueprint("add_teachers_blueprint", __name__)
@add_teachers_blueprint.route("/teachers/add", methods=["get", "post"])
def add_teachers():
addTeacherFormData = AddTeacherForm()
# teachers = db.session.query(Teacher).all()
if True: # request.method == "POST":
if addTeacherFormData.validate_on_submit():
teacherData = Teacher()
teacherData.first_name = addTeacherFormData.first_name.data
teacherData.last_name = addTeacherFormData.last_name.data
teacherData.birthdate = addTeacherFormData.birthdate.data
# teacherData.teacher_in_group_Id = addTeacherFormData.teacher_in_group_Id.data
db.session.add(teacherData)
db.session.commit()
return redirect("/teachers")
else:
return render_template("teachers/addTeacherForm.html",
form=addTeacherFormData)
show_edit_teachers_blueprint = Blueprint(
"show_edit_teachers_blueprint", __name__)
@show_edit_teachers_blueprint.route("/teachers/edit")
def show_edit_teachers():
editTeacherFormData = EditTeacherForm()
global current_data
current_data = editTeacherFormData
# itemId auslesen
teacher_Id = request.args["teacher_Id"]
# Item laden
teacher_to_edit = db.session.query(Teacher).filter(
Teacher.teacher_Id == teacher_Id).first()
# Form befรผllen
editTeacherFormData.teacher_Id.data = teacher_Id
editTeacherFormData.first_name.data = teacher_to_edit.first_name
editTeacherFormData.last_name.data = teacher_to_edit.last_name
editTeacherFormData.birthdate.data = teacher_to_edit.birthdate
# editTeacherFormData.teacher_in_group_Id.data = teacher_to_edit.teacher_in_group_Id
group_of_teachers = db.session.query(GroupOfTeacher).filter(
sqlalchemy.and_(TeacherInGroup.group_of_teachers_Id == GroupOfTeacher.group_of_teachers_Id,
TeacherInGroup.teacher_Id == teacher_Id))
return render_template("/teachers/editTeacherForm.html", form=editTeacherFormData,
group_of_teachers=group_of_teachers)
submit_edit_teachers_blueprint = Blueprint(
"submit_edit_teachers_blueprint", __name__)
@submit_edit_teachers_blueprint.route("/teachers/edit", methods=["post"])
def submit_edit_teachers():
editTeacherFormData = EditTeacherForm()
create_report_file(editTeacherFormData)
if editTeacherFormData.validate_on_submit():
# daten aus Form auslesen
teacher_Id = editTeacherFormData.teacher_Id.data
teacher_to_edit = db.session.query(Teacher).filter(
Teacher.teacher_Id == teacher_Id).first()
# daten mit update in DB speichern
teacher_to_edit.first_name = editTeacherFormData.first_name.data
teacher_to_edit.last_name = editTeacherFormData.last_name.data
teacher_to_edit.birthdate = editTeacherFormData.birthdate.data
# teacher_to_edit.teacher_in_group_Id = editTeacherFormData.teacher_in_group_Id.data
db.session.commit()
return redirect("/teachers")
else:
raise("Fatal Error")
def create_report_file(teacher_data):
header = ["Data", "Previous Data", "New Data"]
teacher_id = ["Teacher ID", current_data.teacher_Id.data,
teacher_data.teacher_Id.data]
first_name = ["First Name", current_data.first_name.data,
teacher_data.first_name.data]
last_name = ["Last Name", current_data.last_name.data,
teacher_data.last_name.data]
birthdate = ["Birthdate", current_data.birthdate.data,
teacher_data.birthdate.data]
i = 0
while os.path.exists("TeacherDataEdit%s.csv" % i):
i += 1
f = open(f"TeacherDataEdit{i}.csv", "w")
writer = csv.writer(f)
writer.writerow(header)
writer.writerow(teacher_id)
writer.writerow(first_name)
writer.writerow(last_name)
writer.writerow(birthdate)
| IngNoN/School_App | controllers/teachers.py | teachers.py | py | 4,762 | python | en | code | 0 | github-code | 36 |
22706853086 | import code
import gym
import torch
from tqdm import trange
import numpy as np
import components.prioritized_memory
import components.memory
from components.filesys_manager import ExperimentPath
class BaseTrainer:
def __init__(self, args):
# init experiment hyper-parameters
self.args = args
self.debug = self.args.debug
self.allow_printing = True
self.exp_path = ExperimentPath(f"{self.args.exp_root}/{self.args.name}/{self.args.env}/{args.trainer}/{self.args.seed}")
self.print("log directory initialized")
# env init
self.expl_env = gym.make(self.args.env)
self.eval_env = gym.make(self.args.env)
# replay buffer
if args.prioritized:
self.replay_buffer = components.prioritized_memory.ReplayMemory(self.args)
else:
self.replay_buffer = components.memory.ReplayMemory(self.args)
# global time steps
self.t = 0
# exploration environment
self.s = torch.from_numpy(self.expl_env.reset()).to(dtype=torch.float32, device=self.args.device)
self.done = False
self.episode_num = 0
self.episode_len = 0
self.episode_return = 0
self.episode_discounted_return = 0
# init model
self.init_model()
# log
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "model initialized"])
self.exp_path['config'].json_write(vars(self.args))
self.exp_path['model_info'].txt_write(str(self.q_learner))
self.save('initial')
def print(self, *args, **kwargs):
if self.allow_printing:
print(",", self.exp_path.now(), self.exp_path.str(), ",", *args, **kwargs)
def init_model(self):
""" init model """
self.print("warning: init model not implemented")
self.q_learner = None
self.q_target = None
self.policy = None
self.q_optimizer = None
self.policy_optimizer = None
def sample_batch(self, batch_size):
return self.replay_buffer.sample(batch_size)
def expl_action(self, obs) -> torch.Tensor:
self.print("warning: expl action not implemented")
a = np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0])
return torch.from_numpy(a)
def eval_action(self, obs) -> torch.Tensor:
self.print("warning: eval action not implemented")
a = np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0])
return torch.from_numpy(a)
def before_learn_on_batch(self):
self.print("warning: before learn on batch not implemented")
def learn_on_batch(self, treeidx, s, a, r, s_, d, w):
""" learn on batch """
self.print("warning: learn on batch not implemented")
def after_learn_on_batch(self):
self.print("warning: after learn on batch not implemented")
def state_value_pred(self, s):
""" log the state value of initial state """
self.print("warning: state value pred not implemented")
return -1
def reset_expl_env(self):
""" reset exploration env """
self.s = self.expl_env.reset()
self.s = torch.from_numpy(self.s).to(dtype=torch.float32, device=self.args.device)
self.done = False
self.episode_num += 1
self.episode_len = 0
self.episode_return = 0
self.episode_discounted_return = 0
def advance_expl_env(self, next_obs, reward, done):
""" advance exploration env for next step """
self.s = next_obs
self.done = done
self.episode_return += reward
if self.args.reward_clip:
reward = min(max(reward, -1), 1)
self.episode_discounted_return += reward * self.args.discount ** self.episode_len
self.episode_len += 1
def epsilon(self):
""" compute current epsilon (linear annealing 1M steps) """
return 1 - self.t * ((1 - 0.1) / self.args.epsilon_steps) if self.t < self.args.epsilon_steps else 0.1
def collect_init_steps(self, render=False):
""" fill the replay buffer before learning """
for _ in trange(self.args.min_num_steps_before_training):
if self.done:
self.reset_expl_env()
# sample transition
a = torch.from_numpy(np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0]))
s_, r, d, _ = self.expl_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# store into replay
if self.args.reward_clip:
r = min(max(r, -1), 1)
if 'reward_std' in self.args:
r += np.random.normal(0, self.args.reward_std) # clip, add noise
if self.args.prioritized:
self.replay_buffer.append(self.s, a, r, d)
else:
self.replay_buffer.append(self.s, a, r, s_, d)
# render
if render:
self.expl_env.render()
# sample next state
self.advance_expl_env(s_, r, d)
self.t += 1
self.reset_expl_env()
self.print("finished collect init steps")
def sample_transition(self, render=False):
""" train one step """
if self.done:
self.reset_expl_env()
# sample transition
a = self.expl_action(self.s)
s_, r, d, _ = self.expl_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# store into replay
if self.args.reward_clip:
r = min(max(r, -1), 1)
if 'reward_std' in self.args:
r += np.random.normal(0, self.args.reward_std) # clip, add noise
if self.args.prioritized:
self.replay_buffer.append(self.s, a, r, d)
else:
self.replay_buffer.append(self.s, a, r, s_, d)
# render
if render:
self.expl_env.render()
# sample next state
self.advance_expl_env(s_, r, d)
# log status
if self.done:
self.exp_path['expl_episode_stats[t,return,discounted_return]'].csv_writerow(
[self.t, self.episode_return, self.episode_discounted_return])
if self.args.prioritized:
self.exp_path['debug[t,buffer_size,epsilon]'].csv_writerow(
[self.t, self.replay_buffer.data_buffer.index, self.epsilon()]
)
else:
self.exp_path['debug[t,buffer_size]'].csv_writerow(
[self.t, len(self.replay_buffer)]
)
self.t += 1
def evaluate(self, render=False, random=False):
""" evaluate current policy """
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "evaluation started"])
self.print(f"evaluation started t={self.t}")
# init eval
s = self.eval_env.reset()
s = torch.from_numpy(s).to(dtype=torch.float32, device=self.args.device)
done = False
episode_len = 0
episode_return = 0
episode_discounted_return = 0
v_initials = []
v_initials.append(self.state_value_pred(s))
returns = []
discounted_returns = []
episode_lens = []
# start eval
for _ in trange(self.args.num_eval_steps_per_epoch):
if done:
# record data
returns.append(episode_return)
discounted_returns.append(episode_discounted_return)
episode_lens.append(episode_len)
# reset env
s = self.eval_env.reset()
s = torch.from_numpy(s).to(dtype=torch.float32, device=self.args.device)
v_initials.append(self.state_value_pred(s))
episode_len = 0
episode_return = 0
episode_discounted_return = 0
if len(returns) >= self.args.eval_max_episode:
# check if enough episodes simulated
break
# sample transition
a = self.eval_action(s)
if random:
a = np.random.randint(self.eval_env.action_space())
s_, r, d, _ = self.eval_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# advance env
s = s_
done = d
episode_return += r
if self.args.reward_clip:
r = min(max(r, -1), 1)
episode_discounted_return += r * self.args.discount ** episode_len
episode_len += 1
# render
if render:
print(f"s={s.shape} a={a} r={r} r_clip={r}")
self.eval_env.render()
# finished eval
self.print(f"returns={returns}\nmean={np.mean(returns) if len(returns) > 0 else 0}")
if len(returns) == 0:
self.exp_path['eval_episode_stats[t,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, v_initials[0], episode_return, episode_discounted_return, episode_len])
self.exp_path['eval_mean_stats[t,num_episode,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, 1, v_initials[0], episode_return, episode_discounted_return, episode_len])
else:
for V_init, R, discounted_R, length in zip(v_initials, returns, discounted_returns, episode_lens):
self.exp_path['eval_episode_stats[t,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, V_init, R, discounted_R, length])
self.exp_path['eval_mean_stats[t,num_episode,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, len(returns), np.mean(v_initials), np.mean(returns), np.mean(discounted_returns), np.mean(episode_lens)])
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "evaluation finished"])
def save(self, name='trainer'):
""" save this trainer """
buffer = self.replay_buffer
expl_env = self.expl_env
eval_env = self.eval_env
# skip the things not saving
self.replay_buffer = None
self.expl_env = None
self.eval_env = None
self.exp_path["checkpoint"][f"{name}_{self.t}.pth"].save_model(self)
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "model saved"])
self.replay_buffer = buffer
self.expl_env = expl_env
self.eval_env = eval_env
| APM150/Continuous_Envs_Experiments | mujoco/trainers/base_trainer.py | base_trainer.py | py | 10,727 | python | en | code | 0 | github-code | 36 |
39055642818 | #!/usr/bin/env python
from __future__ import print_function
import os, sys, json
import numpy as np
from ase.build import bulk, surface
from ase.units import Rydberg, Bohr
from ase.io import read
from ase.visualize import view
from ase.spacegroup import crystal
from ase.calculators.espresso import Espresso
infile = sys.argv[1]
print(infile)
with open(infile) as handle:
system = json.loads(handle.read())
face = system['face']
layers = system['layers']
vacuum = system['vacuum']
kpts = system['kpts']
ecut = system['ecut']
mode = system['mode']
cwd = os.getcwd()
def cassiterite(show=False):
a = 4.7382
c = 3.1871
sno2 = crystal(['Sn','O'], basis=[(0, 0, 0), (0.3, 0.3, 0.0)],
spacegroup='P 4 2/mnm', cellpar=[a,a,c,90,90,90],
pbc=True)
return sno2
def create_surface(atoms,face=(1,1,0),layers=3,vacuum=10.813,kpts=([6,3,1])):
mySurface = surface( atoms, face, layers)
mySurface.center(vacuum=vacuum, axis=2)
kpts = np.asarray(kpts)
return mySurface
sno2 = cassiterite()
sno2_surface = create_surface(sno2,
face=face,
layers=layers,
vacuum=vacuum,
kpts=kpts)
# Put together QE input dict
input_dict = {
'control': {
'calculation': 'scf',
'etot_conv_thr': 1e-6,
'nstep': 100,
'outdir': 'sno2_test_face_{0}{1}{2}'.format(face[0], face[1], face[2]),
},
'system': {
'ecutwfc': ecut,
},
'electrons': {
'diagonalization': 'cg',
},
}
# Put together pseudopotential dict
psp_dict = {'Sn': 'Sn.UPF',
'O': 'O.UPF',
}
calc = Espresso(input_data=input_dict,
kpts=kpts,
pseudo_dir=cwd + "/../pseudo",
pseudopotentials=psp_dict,
)
sno2_surface.set_calculator(calc)
if mode == 'view':
view(sno2_surface)
elif mode == 'calc':
calc.calculate(sno2_surface)
print('SnO2 PE:', sno2_surface.get_potential_energy())
| marshallmcdonnell/sno2_ase_espresso | surfaces/surfaces_sno2.py | surfaces_sno2.py | py | 2,092 | python | en | code | 0 | github-code | 36 |
71038521063 | import numpy as np
try:
import mc
except Exception:
pass
import cv2
import os
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import utils
from . import reader
class PartialCompDataset(Dataset):
def __init__(self, config, phase):
self.dataset = config['dataset']
if self.dataset == 'COCOA':
self.data_reader = reader.COCOADataset(config['{}_annot_file'.format(phase)])
elif self.dataset == 'Mapillary':
self.data_reader = reader.MapillaryDataset(
config['{}_root'.format(phase)], config['{}_annot_file'.format(phase)])
else:
self.data_reader = reader.KINSLVISDataset(
self.dataset, config['{}_annot_file'.format(phase)])
if config['load_rgb']:
self.img_transform = transforms.Compose([
transforms.Normalize(config['data_mean'], config['data_std'])
])
self.eraser_setter = utils.EraserSetter(config['eraser_setter'])
self.sz = config['input_size']
self.eraser_front_prob = config['eraser_front_prob']
self.phase = phase
self.config = config
self.memcached = config.get('memcached', False)
self.initialized = False
self.memcached_client = config.get('memcached_client', None)
def __len__(self):
return self.data_reader.get_instance_length()
def _init_memcached(self):
if not self.initialized:
assert self.memcached_client is not None, "Please specify the path of your memcached_client"
server_list_config_file = "{}/server_list.conf".format(self.memcached_client)
client_config_file = "{}/client.conf".format(self.memcached_client)
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)
self.initialized = True
def _load_image(self, fn):
if self.memcached:
try:
img_value = mc.pyvector()
self.mclient.Get(fn, img_value)
img_value_str = mc.ConvertBuffer(img_value)
img = utils.pil_loader(img_value_str)
except:
print('Read image failed ({})'.format(fn))
raise Exception("Exit")
else:
return img
else:
return Image.open(fn).convert('RGB')
def _get_inst(self, idx, load_rgb=False, randshift=False):
modal, bbox, category, imgfn, _ = self.data_reader.get_instance(idx)
centerx = bbox[0] + bbox[2] / 2.
centery = bbox[1] + bbox[3] / 2.
size = max([np.sqrt(bbox[2] * bbox[3] * self.config['enlarge_box']), bbox[2] * 1.1, bbox[3] * 1.1])
if size < 5 or np.all(modal == 0):
return self._get_inst(
np.random.choice(len(self)), load_rgb=load_rgb, randshift=randshift)
# shift & scale aug
if self.phase == 'train':
if randshift:
centerx += np.random.uniform(*self.config['base_aug']['shift']) * size
centery += np.random.uniform(*self.config['base_aug']['shift']) * size
size /= np.random.uniform(*self.config['base_aug']['scale'])
# crop
new_bbox = [int(centerx - size / 2.), int(centery - size / 2.), int(size), int(size)]
modal = cv2.resize(utils.crop_padding(modal, new_bbox, pad_value=(0,)),
(self.sz, self.sz), interpolation=cv2.INTER_NEAREST)
# flip
if self.config['base_aug']['flip'] and np.random.rand() > 0.5:
flip = True
modal = modal[:, ::-1]
else:
flip = False
if load_rgb:
rgb = np.array(self._load_image(os.path.join(
self.config['{}_image_root'.format(self.phase)], imgfn))) # uint8
rgb = cv2.resize(utils.crop_padding(rgb, new_bbox, pad_value=(0,0,0)),
(self.sz, self.sz), interpolation=cv2.INTER_CUBIC)
if flip:
rgb = rgb[:, ::-1, :]
rgb = torch.from_numpy(rgb.astype(np.float32).transpose((2, 0, 1)) / 255.)
rgb = self.img_transform(rgb) # CHW
if load_rgb:
return modal, category, rgb
else:
return modal, category, None
def __getitem__(self, idx):
if self.memcached:
self._init_memcached()
randidx = np.random.choice(len(self))
modal, category, rgb = self._get_inst(
idx, load_rgb=self.config['load_rgb'], randshift=True) # modal, uint8 {0, 1}
if not self.config.get('use_category', True):
category = 1
eraser, _, _ = self._get_inst(randidx, load_rgb=False, randshift=False)
eraser = self.eraser_setter(modal, eraser) # uint8 {0, 1}
# erase
erased_modal = modal.copy().astype(np.float32)
if np.random.rand() < self.eraser_front_prob:
erased_modal[eraser == 1] = 0 # eraser above modal
else:
eraser[modal == 1] = 0 # eraser below modal
erased_modal = erased_modal * category
# shrink eraser
max_shrink_pix = self.config.get('max_eraser_shrink', 0)
if max_shrink_pix > 0:
shrink_pix = np.random.choice(np.arange(max_shrink_pix + 1))
if shrink_pix > 0:
shrink_kernel = shrink_pix * 2 + 1
eraser = 1 - cv2.dilate(
1 - eraser, np.ones((shrink_kernel, shrink_kernel), dtype=np.uint8),
iterations=1)
eraser_tensor = torch.from_numpy(eraser.astype(np.float32)).unsqueeze(0) # 1HW
# erase rgb
if rgb is not None:
rgb = rgb * (1 - eraser_tensor)
else:
rgb = torch.zeros((3, self.sz, self.sz), dtype=torch.float32) # 3HW
erased_modal_tensor = torch.from_numpy(
erased_modal.astype(np.float32)).unsqueeze(0) # 1HW
target = torch.from_numpy(modal.astype(np.int)) # HW
return rgb, erased_modal_tensor, eraser_tensor, target
| XiaohangZhan/deocclusion | datasets/partial_comp_dataset.py | partial_comp_dataset.py | py | 6,088 | python | en | code | 764 | github-code | 36 |
27521291277 | from pyspark.sql import SparkSession
from pyspark.sql import functions as fs
spark= SparkSession.builder.appName("word_count").getOrCreate()
data= spark.read.text("book.txt")
pro_data= data.select(fs.explode(fs.split(data.value,"\\W+")).alias("words"))
pro_data.filter(pro_data.words !="")
a=pro_data.select("words").groupBy("words").count().show()
| AmanSolanki007/Pyspark_problems | word_count_dataframe.py | word_count_dataframe.py | py | 353 | python | en | code | 0 | github-code | 36 |
74120455144 | import pygame
from constantes import *
from auxiliar import Auxiliar
class Background:
'''
Clase para representar un fondo en un juego utilizando Pygame.
Attributes:
x (int): La coordenada x de la esquina superior izquierda del fondo.
y (int): La coordenada y de la esquina superior izquierda del fondo.
width (int): Ancho del fondo.
height (int): Alto del fondo.
path (str): Ruta del archivo de imagen para el fondo.
'''
def __init__(self, x, y,width, height, path):
'''
Constructor de la clase. Inicializa las propiedades del fondo.
'''
self.image = pygame.image.load(path).convert()
self.image = pygame.transform.scale(self.image,(width,height))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def draw(self,screen):
'''
Dibuja el fondo en la pantalla especificada.
'''
screen.blit(self.image,self.rect)
if(DEBUG):
pygame.draw.rect(screen,color=ROJO,rect=self.collition_rect) | valverdecristian/cristian_valverde_tp_pygame | background.py | background.py | py | 1,080 | python | es | code | 0 | github-code | 36 |
33066511083 | """
enums.py
Contains the different types of objects for the application.
"""
# Import modules and libraries
from enum import Enum
class UserType(Enum):
"""
Represents the different types of users in the application.
"""
admin = "admin"
manager = "manager"
inspector = "inspector"
maintenance_worker = "maintenance_worker"
repair_tech = "repair_tech"
user = "user" # Default value
archived = "archived"
class FireClass(Enum):
"""
Represents the common fire types an extinguisher can put out in the United States.
"""
A = "A" # Ordinary solid combustibles
B = "B" # Flammable liquids and gases
C = "C" # Energized electrical equipment
D = "D" # Combustible metals
K = "K" # Oils and fats
ABC = "ABC" # Multi-purpose dry chemical
other = "other" # Default value
class TicketType(Enum):
"""
Represents the type of ticket.
"""
inspect = "inspect"
damaged_ext = "damaged_ext"
repair_ext = "repair_ext"
damaged_box = "damaged_box"
repair_box = "repair_box"
other = "other" # Default value
class TicketState(Enum):
"""
Represents the state of a ticket.
"""
open = "open"
in_progress = "in_progress"
closed = "closed"
archived = "archived" | Xata/cis4050-spring2023-prototipo | backend/app/enums.py | enums.py | py | 1,280 | python | en | code | 0 | github-code | 36 |
71534616424 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from icalendar import Calendar, Event
from datetime import *
from dateutil.parser import parse
from pytz import UTC # timezone
cal_file = '/Users/gyyoon/Desktop/pentaa.ics'
MEETING_STR = '[ํ์]'
TEATIME_STR = '[Tea-Time]'
WORK_STR = '[์
๋ฌด]'
NONWORK_STR = '[๋น์
๋ฌด]'
LUNCH_STR = '[์ ์ฌ]'
CLUB_STR = '[๋ํธํ]'
WORKCLEAN_STR = '[์
๋ฌด์ ๋ฆฌ]'
TALK_STR = '[๋ฉด๋ด]'
OUTWORK_STR = '[์ธ๊ทผ]'
BREAK_STR = '[ํด์]'
TOGETHER_CTR = '[ํ์]'
SEATCLEAN_STR = '[์๋ฆฌ์ ๋ฆฌ]'
EVENT_STR = '[ํ์ฌ]'
LATE_SRT = '[์ง์ฐ์ถ๊ทผ]'
# Count global var
total_cnt = 0
work_cnt = 0
nonwork_cnt = 0
meeting_cnt = 0
lunch_cnt = 0
club_cnt = 0
workclean_cnt = 0
outwork_cnt = 0
unknown_cnt = 0
break_cnt = 0
together_cnt = 0
seatclean_cnt = 0
event_cnt = 0
# Duration global var
work_dur = timedelta(hours=0, minutes=0)
nonwork_dur = timedelta(hours=0, minutes=0)
meeting_dur = timedelta(hours=0, minutes=0)
lunch_dur = timedelta(hours=0, minutes=0)
club_dur = timedelta(hours=0, minutes=0)
workclean_dur = timedelta(hours=0, minutes=0)
outwork_dur = timedelta(hours=0, minutes=0)
break_dur = timedelta(hours=0, minutes=0)
together_dur = timedelta(hours=0, minutes=0)
seatclean_dur = timedelta(hours=0, minutes=0)
event_dur = timedelta(hours=0, minutes=0)
late_dur = timedelta(hours=0, minutes=0)
def process_work_event(comp):
# Duration global var
global work_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
work_dur += duration
def process_nonwork_event(comp):
# Duration global var
global nonwork_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
nonwork_dur += duration
def process_meeting_event(comp):
# Duration global var
global meeting_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
meeting_dur += duration
def process_lunch_event(comp):
# Duration global var
global lunch_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
lunch_dur += duration
def process_club_event(comp):
# Duration global var
global club_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
club_dur += duration
def process_workclean_event(comp):
# Duration global var
global workclean_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
workclean_dur += duration
def process_outwork_event(comp):
# Duration global var
global outwork_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
outwork_dur += duration
def process_break_event(comp):
# Duration global var
global break_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
break_dur += duration
def process_together_event(comp):
# Duration global var
global together_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
together_dur += duration
def process_seatclean_event(comp):
# Duration global var
global seatclean_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
seatclean_dur += duration
def process_event_event(comp):
# Duration global var
global event_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
event_dur += duration
def process_late_event(comp):
global late_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
late_dur += duration
def print_results():
print('----------------------------------')
print('total count : ' + str(total_cnt))
print('meeting : ' + str(meeting_cnt))
print('work : ' + str(work_cnt))
print('nonwork : ' + str(nonwork_cnt))
print('lunch : ' + str(lunch_cnt))
print('unknown ' + str(unknown_cnt))
print('----------------------------------')
print('workdur : ' + str(work_dur))
print('nonwork dur : ' + str(nonwork_dur))
print('meeting dur : ' + str(meeting_dur))
print('lunch dur : ' + str(lunch_dur))
print('club dur : ' + str(club_dur))
print('workclean dur : ' + str(workclean_dur))
print('outwork dur : ' + str(outwork_dur))
print('break dur : ' + str(break_dur))
print('together dur : ' + str(together_dur))
print('seatclean dur : ' + str(seatclean_dur))
print('event dur : ' + str(event_dur))
print('late dur : ' + str(late_dur))
print('----------------------------------')
class EventProcessor:
def __init__(self):
print('Initialize')
def calc_event_duration(self, comp):
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
return duration
def main():
# Count global var
global total_cnt
global work_cnt
global nonwork_cnt
global meeting_cnt
global lunch_cnt
global club_cnt
global workclean_cnt
global outwork_cnt
global break_cnt
global together_cnt
global seatclean_cnt
global event_cnt
global unknown_cnt
global late_dur
g = open(cal_file, 'rb')
gcal = Calendar.from_ical(g.read())
for component in gcal.walk():
if component.name == "VEVENT":
start_date = component.get('dtstart').dt
end_date = component.get('dtend').dt
# I don't know why, but same formatted strs have differecne types,
# date or datetime, so I unified them to date type(datetime requires
# timezone set...)
if type(start_date) != type(date(2018, 4, 4)):
start_date = start_date.date()
if type(end_date) != type(date(2018, 4, 4)):
end_date = end_date.date()
# Maybe someday I might get inputs from user that specifies
# the date range...
if start_date >= date(2018, 1, 1) and end_date <= date(2018, 12, 31):
total_cnt += 1
event_summary = component.get('summary')
if WORK_STR in event_summary:
work_cnt += 1
process_work_event(component)
print(event_summary)
elif MEETING_STR in event_summary or TEATIME_STR in \
event_summary or TALK_STR in event_summary:
meeting_cnt += 1
process_meeting_event(component)
elif NONWORK_STR in event_summary:
nonwork_cnt += 1
process_nonwork_event(component)
elif LUNCH_STR in event_summary:
lunch_cnt += 1
process_lunch_event(component)
elif CLUB_STR in event_summary:
club_cnt += 1
process_club_event(component)
elif WORKCLEAN_STR in event_summary:
workclean_cnt += 1
process_workclean_event(component)
elif OUTWORK_STR in event_summary:
outwork_cnt += 1
process_outwork_event(component)
elif BREAK_STR in event_summary:
break_cnt += 1
process_break_event(component)
elif TOGETHER_CTR in event_summary:
together_cnt += 1
process_together_event(component)
elif SEATCLEAN_STR in event_summary:
seatclean_cnt += 1
process_seatclean_event(component)
elif EVENT_STR in event_summary:
event_cnt += 1
process_event_event(component)
elif LATE_SRT in event_summary:
event_cnt += 1
process_late_event(component)
else:
unknown_cnt += 1
# print(event_summary)
# print(component.get('dtstart').dt)
# print(component.get('dtend').dt)
# print(component.get('dtstamp').dt)
g.close()
print_results()
if __name__ == "__main__":
main()
| Dry8r3aD/ics_parser | run.py | run.py | py | 8,555 | python | en | code | 0 | github-code | 36 |
3383079371 | class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
output = []
q = collections.deque() # index
l = r = 0
# O(n) O(n)
while r < len(nums):
# pop smaller values from q
while q and nums[q[-1]] < nums[r]:
q.pop()
q.append(r)
# remove left val from window
if l > q[0]:
q.popleft()
if (r + 1) >= k:
output.append(nums[q[0]])
l += 1
r += 1
return output
| neetcode-gh/leetcode | python/0239-sliding-window-maximum.py | 0239-sliding-window-maximum.py | py | 582 | python | en | code | 4,208 | github-code | 36 |
2760395481 | import os
from datetime import datetime, timedelta
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import googleapiclient.discovery
import googleapiclient.errors
import httplib2.error
import youtube_dl
import logging
class LiveBroadcast():
def __init__(self, broadcast_id, title, channel_id, channel_name="", m3u8_url=None, protocol="m3u8", mine=False):
self.id = broadcast_id
self.title = title
self.m3u8_url = m3u8_url
self.url = f"https://www.youtube.com/watch?v={broadcast_id}"
self.channel_id = channel_id
self.channel_url = f"https://www.youtube.com/channel/{channel_id}"
self.channel_name = channel_name
self.protocol = protocol
self.mine = mine
class GoogleApis:
class NetworkException(Exception):
pass
class HttpException(Exception):
pass
class AuthException(Exception):
pass
def __init__(self, api_name, api_version, scopes):
self.api_name = api_name
self.api_version = api_version
self.scopes = scopes
self.service = None
def is_authorized(self):
return self.service is not None
def get_credentials(self, token_file, client_secrets_file, force_new=False):
creds = None
# Get previous credentials from file
if os.path.exists(token_file):
if not force_new:
creds = Credentials.from_authorized_user_file(token_file, self.scopes)
else:
creds = None
# If the credentials don't exist, do oauth
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, self.scopes)
creds = flow.run_console()
with open(token_file, "w") as token:
token.write(creds.to_json())
return creds
def auth_key(self, api_key):
self.service = googleapiclient.discovery.build(self.api_name, self.api_version, developerKey=api_key)
def auth_oauth(self, token_file, client_secrets_file, force_new=False):
credentials = self.get_credentials(token_file, client_secrets_file, force_new)
self.service = googleapiclient.discovery.build(self.api_name, self.api_version, credentials=credentials)
class YoutubeApis(GoogleApis):
def __init__(self):
super().__init__("youtube", "v3", ["https://www.googleapis.com/auth/youtube.force-ssl"])
# Not recommended to use: costs 100 quota units and takes ~5 minutes to detect newly started broadcasts
def search_livebroadcasts_ytapi(self, channel_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.search().list(part="snippet", eventType="live", type="video", channelId=channel_id)
livestreams = []
try:
res = request.execute()
items = res.get("items", [])
for item in items:
single_stream = LiveBroadcast(
item.get("id").get("videoId"),
item.get("snippet").get("title"),
channel_id
)
livestreams.append(single_stream)
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return livestreams
def search_livebroadcasts(self, channel_id):
channel_url = f"https://www.youtube.com/channel/{channel_id}"
options = {
"playlistend": 1, # only the first item
"quiet": True
}
livestreams = []
with youtube_dl.YoutubeDL(options) as yt_dl:
try:
res = yt_dl.extract_info(channel_url, download=False)
res_item = res["entries"][0]["entries"][0]
if res_item["protocol"] == "m3u8":
single_stream = LiveBroadcast(
res_item["id"],
res_item["title"],
channel_id,
channel_name=res_item["channel"],
m3u8_url=res_item["url"]
)
livestreams.append(single_stream)
except youtube_dl.utils.DownloadError as e:
raise GoogleApis.NetworkException(f"youtube-dl failed to search live broadcasts: {str(e)}")
except (IndexError, KeyError):
pass # no livestreams found
return livestreams
def parse_livestream_res(self, res):
ingestion_info = res.get("cdn").get("ingestionInfo")
res_data = {
"id": res.get("id", ""), # ex 'AniW-ozy_koWoLjDw3F2Rg1618885401806773'
"rtmp_url": ingestion_info.get("ingestionAddress", ""),
"rtmp_key": ingestion_info.get("streamName", "")
}
return res_data
def list_videos(self, video_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.videos().list(
part="contentDetails,id,snippet,status",
id=video_id
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items")[0]
# Creates the RTMP ingestion point that can be reused for every stream
def insert_livestream(self, title, fps="variable", resolution="variable"):
# fps can be "30fps", "60fps"
# resolution "1080p", "720p", "480p", etc
# both can be set to "variable" for automatic detection
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveStreams().insert(
# part="snippet,cdn,id,status",
part = "id,cdn",
body={
"cdn": {
"ingestionType": "rtmp",
"resolution": resolution,
"frameRate": fps
},
"snippet": {
"title": title
}
}
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return self.parse_livestream_res(res)
def create_variable_livestream(self, title):
livestreams = self.list_livestream()
variable_stream = None
for livestream in livestreams:
if livestream.get("cdn").get("resolution") == "variable":
variable_stream = livestream
break
# Seems like YT will always create a default variable stream if deleted
variable_stream_data = None
if variable_stream is None:
logging.info("Variable livestream not found, creating new one")
variable_stream_data = self.insert_livestream(title)
else:
variable_stream_data = self.parse_livestream_res(variable_stream)
return variable_stream_data
def list_livestream(self):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveStreams().list(
part="id,cdn,snippet,status",
mine=True
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items", [])
# Creates the actual stream video instance that viewers see
def insert_broadcast(self, title, description=None, archive=True, privacy="public"):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# Privacy may be: "public", "private", "unlisted"
broadcast_date = datetime.utcnow()
#broadcast_date += timedelta(minutes=1)
request = self.service.liveBroadcasts().insert(
part="id,snippet,contentDetails,status",
body={
"contentDetails": {
"enableDvr": archive,
"enableAutoStart": True,
"enableAutoStop": False
},
"snippet": {
"scheduledStartTime": broadcast_date.isoformat(),
"title": title,
"description": description
},
"status": {
"privacyStatus": privacy
}
}
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
res_data = {
"id": res.get("id", "") # ex '1b9GoutrU7k'
}
return res_data
def list_broadcast(self):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# acceptable status values: complete, live, testing
request = self.service.liveBroadcasts().list(
part="id,snippet,contentDetails,status",
mine=True
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items", [])
def transition_broadcast(self, broadcast_id, status):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# acceptable status values: complete, live, testing
request = self.service.liveBroadcasts().transition(
broadcastStatus=status,
id=broadcast_id,
part="id"
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res
def bind_broadcast(self, broadcast_id, stream_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveBroadcasts().bind(
id=broadcast_id,
part="id,snippet,contentDetails,status",
streamId=stream_id
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res
def create_rtmp_broadcast(self, title, description, privacy):
# First, check if a stream exists
stream_data = self.create_variable_livestream("Variable stream")
broadcast_data = self.insert_broadcast(title, description, privacy=privacy)
data = {
"video_id": broadcast_data["id"],
"rtmp_url": stream_data["rtmp_url"],
"rtmp_key": stream_data["rtmp_key"]
}
self.bind_broadcast(data["video_id"], stream_data["id"])
return data
# TODO support other quality levels?
# TODO distinguish between net and param exceptions
def get_stream_m3u8_url(self, video_url):
options = {
"noplaylist": True,
}
playlist_url = None
with youtube_dl.YoutubeDL(options) as yt_dl:
try:
res = yt_dl.extract_info(video_url, download=False)
playlist_url = res["url"]
except youtube_dl.utils.DownloadError as e:
raise GoogleApis.NetworkException(f"youtube-dl failed to download m3u8: {str(e)}")
return playlist_url
| vachau/youtube-restreamer | utils/apis.py | apis.py | py | 12,777 | python | en | code | 6 | github-code | 36 |
30055311792 | from collections import defaultdict
import os
import pandas as pd
path = '/home/djf/djf/POI/CDRF/data/Foursquare_NYC.txt'
dic = defaultdict(int)
f = open(path, 'r')
lines = f.readlines()
for line in lines:
user, t, lat, lon, POI = line.strip().split('\t')
dic[int(POI)] += 1
counts = [item[1] for item in dic.items()]
counts = sorted(counts)
with open('counts.txt', 'w') as f:
for item in counts[:-10]:
f.write('{}\n'.format(item))
| Mediocre250/CTMR | long_tail.py | long_tail.py | py | 462 | python | en | code | 0 | github-code | 36 |
16154091028 | from django.contrib import admin
from .models import InstrumentItem
# Register your models here.
class InstrumentItemAdmin(admin.ModelAdmin):
search_fields = ['definition']
list_filter = ['instrument']
list_display = ['definition','instrument','discrimination','difficulty','guessing','upper_asymptote']
admin.site.register(InstrumentItem, InstrumentItemAdmin)
| langcog/web-cdi | webcdi/cdi_forms/cat_forms/admin.py | admin.py | py | 375 | python | en | code | 7 | github-code | 36 |
37213151621 | #!/usr/bin/env python3
import argparse
from pprint import pprint
import json
import hlib
def check_temp(cl, device_id):
device_name = None
# first, try and find the device as a device
url = f"/clip/v2/resource/device/{device_id}"
resp = cl.get(url)
if resp.status_code == 200:
data = resp.json()
device = data['data'][0]
# see if the device has a temperature device
temp_id = None
services = device.get('services', [])
for service in services:
if service['rtype'] == 'temperature':
temp_id = service['rid']
break
if temp_id is None:
print("Device has no temperature sensor")
for service in services:
print(f"- {service['rtype']}")
return None, None
device_name = device['metadata']['name']
device_id = temp_id
# now get the temperature
url = f"/clip/v2/resource/temperature/{device_id}"
resp = cl.get(url)
if resp.status_code != 200:
print(f"Request failed with {resp.status_code} {resp.reason}")
return None
data = resp.json()
device = data['data'][0]
temp = device['temperature']['temperature']
return float(temp), device_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument('device_id', help='id of the device to query', type=str, nargs='+')
args = parser.parse_args()
bridge = hlib.find_bridge()
if bridge is None:
print("Error: failed to locate a bridge")
return
cfg = hlib.load_config()
cl = hlib.new_client(bridge, cfg['user_name'])
print("Temperatures:")
for idx, device_id in enumerate(args.device_id):
temp, device_name = check_temp(cl, device_id)
if temp is None:
continue
if device_name is None:
print(f"{idx:02d} {temp}")
else:
print(f"{idx:02d} {temp} at {device_name}")
# check for pushover configuratin
# token = cfg.get('pushover_token', None)
# clients = cfg.get('pushover_clients', None)
# if token and clients:
# messages = []
# if len(red):
# messages.append("RED: incorrectly configured lights")
# for light_id, light_name in red.items():
# messages.append(f" - {light_id} {light_name}")
#
# if len(green):
# messages.append("GREEN: correctly configured lights")
# for light_id, light_name in green.items():
# messages.append(f" - {light_id} {light_name}")
#
# if len(red):
# title = "RED: incorrectly configured lights"
# else:
# title = "GREEN: correctly configured lights"
#
# message = "\n".join(messages)
# hlib.send_message(token, clients, message, title)
if __name__ == "__main__":
main()
| parlaynu/hue-utilities | bin/check-temp.py | check-temp.py | py | 2,946 | python | en | code | 0 | github-code | 36 |
10423506179 | # coding: utf-8
import xlrd
def getProgramList(filepath):
program_list = dict()
program_list[u'็ฝ็ปๅง'] = []
program_list[u'็ฝ็ป็ตๅฝฑ'] = []
program_list[u'็ฝ็ป็ปผ่บ'] = []
data = xlrd.open_workbook(filepath)
table = data.sheet_by_name(u'็ฝ็ปๅง')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'่็ฎๅ็งฐ'] = table.cell(i, 1).value
one_program[u'ๆญๅบ็ฝ็ซ'] = table.cell(i, 3).value
program_list[u'็ฝ็ปๅง'].append(one_program)
table = data.sheet_by_name(u'็ฝ็ป็ตๅฝฑ')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'่็ฎๅ็งฐ'] = table.cell(i, 1).value
one_program[u'ๆญๅบ็ฝ็ซ'] = table.cell(i, 3).value
program_list[u'็ฝ็ป็ตๅฝฑ'].append(one_program)
table = data.sheet_by_name(u'็ฝ็ป็ปผ่บ')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'่็ฎๅ็งฐ'] = table.cell(i, 1).value
one_program[u'ๆญๅบ็ฝ็ซ'] = table.cell(i, 3).value
program_list[u'็ฝ็ป็ปผ่บ'].append(one_program)
return program_list
def get_next_keyword(filepath, idx):
data = xlrd.open_workbook(filepath)
table = data.sheet_by_name('Sheet1')
nrows = table.nrows
ncols = table.ncols
return table.cell(idx, 0).value
if __name__ == "__main__":
program_list = getProgramList(u"ๅพ็ๆ
ๅตๆฑๆป0213.xlsx")
print(program_list)
| LayneIns/CrawlerProject | crawl2/dataHelper/fetchProgram.py | fetchProgram.py | py | 1,465 | python | en | code | 1 | github-code | 36 |
41896395893 | import cv2
import numpy as np
import csv
import math
img = cv2.imread(".//new_dataset//250.jpg")
img = cv2.GaussianBlur(img , (5 , 5) , 0)
n = 64
div = 256//n #n is the number of bins, here n = 64
rgb = cv2.split(img)
q = []
for ch in rgb:
vf = np.vectorize(lambda x, div: int(x//div)*div)
quantized = vf(ch, div)
q.append(quantized.astype(np.uint8))
img = cv2.merge(q)
row , col , channels = img.shape
connectivity = 8
tau = 0
if tau == 0:
tau = row*col*0.1
rgb = cv2.split(img)
q = []
for ch in rgb:
vf = np.vectorize(lambda x, div: int(x//div)*div)
quantized = vf(ch, div)
q.append(quantized.astype(np.uint8))
img = cv2.merge(q)
bgr = cv2.split(img)
total = []
for ch in bgr:
for k in range(0 , 256 , div):
temp = ch.copy()
temp = (temp == k).astype(np.uint8)
output = cv2.connectedComponentsWithStats(temp , connectivity , cv2.CV_32S)
num_labels = output[0]
labels = output[0]
stats = output[2]
centroids = output[3]
alpha = 0
beta = 0
req = stats[1:]
for r in req:
if(r[4] >= tau):
alpha += r[4]
else:
beta += r[4]
total.append(alpha)
total.append(beta)
dist = []
name = []
with open('ccv_feat.csv' , 'r') as csvFile:
reader = csv.reader(csvFile)
i = 0
for row in reader:
distance = math.sqrt(sum([(float(a) - float(b)) ** 2 for a , b in zip(row , total)]))
dist.append(distance)
nam = str(i)+".jpg"
name.append(nam)
i += 1
di = {}
for i in range(len(name)):
di[name[i]] = dist[i]
sorted_di = sorted(di.items() , key = lambda kv : kv[1])
pic = []
t = 0
first_key = list(sorted_di)[:10]
for key , val in first_key:
key = ".//new_dataset//"+key
res = cv2.imread(key)
na = str(t)
cv2.imshow(na , res)
t += 1
| kumar6rishabh/cbir-search-engine | ccv_searcher.py | ccv_searcher.py | py | 1,968 | python | en | code | 0 | github-code | 36 |
15733506905 | __docformat__ = "reStructuredText"
__author__ = "davidh"
import logging
import os
import re
import sys
from collections import namedtuple
from datetime import datetime, timedelta
from glob import glob
LOG = logging.getLogger(__name__)
FILENAME_RE = r"HS_H08_(?P<date>\d{8})_(?P<time>\d{4})_(?P<band>B\d{2})_FLDK_(?P<res>R\d+)\.(?P<ext>.+)"
fn_re = re.compile(FILENAME_RE)
DT_FORMAT = "%Y%m%d_%H%M"
CASE_NAME_FORMAT = "{start}_{end}_{delta:02d}"
DataCase = namedtuple("DataCase", ["topic_title", "start", "end", "delta", "bands"])
### Guam Cases ###
guam_cases: dict = {}
# Kathy's Cases
guam_cases["Introduction"] = []
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 17, 21, 0, 0), datetime(2015, 7, 18, 20, 0, 0), timedelta(minutes=60), "all"
)
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 18, 1, 0, 0), datetime(2015, 7, 18, 3, 20, 0), timedelta(minutes=10), "all"
)
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 18, 14, 0, 0), datetime(2015, 7, 18, 16, 0, 0), timedelta(minutes=10), "all"
)
)
guam_cases["Introduction"].append(
DataCase("Introduction", datetime(2016, 3, 9, 0, 0, 0), datetime(2016, 3, 9, 4, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2016, 3, 9, 1, 30, 0), datetime(2016, 3, 9, 4, 0, 0), timedelta(minutes=10), "all"
)
)
# Scott's Cases
guam_cases["Water Vapor"] = []
guam_cases["Water Vapor"].append(
DataCase(
"Water Vapor", datetime(2015, 10, 7, 0, 0, 0), datetime(2015, 10, 8, 0, 0, 0), timedelta(minutes=30), "all"
)
)
guam_cases["Water Vapor"].append(
DataCase(
"Water Vapor", datetime(2016, 2, 19, 19, 0, 0), datetime(2016, 2, 20, 5, 0, 0), timedelta(minutes=60), "all"
)
)
# Tim's Cases
guam_cases["Weighting Functions"] = []
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 2, 30, 0),
datetime(2015, 9, 20, 2, 30, 0),
timedelta(minutes=0),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 0, 0, 0),
datetime(2015, 9, 20, 6, 0, 0),
timedelta(minutes=60),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 1, 30, 0),
datetime(2015, 9, 20, 2, 30, 0),
timedelta(minutes=10),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 1, 0, 0),
datetime(2015, 9, 20, 3, 0, 0),
timedelta(minutes=10),
"all",
)
)
# Jordan's Cases
guam_cases["Extra"] = []
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 17, 12, 0, 0), datetime(2015, 8, 18, 12, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 17, 22, 0, 0), datetime(2015, 8, 18, 1, 0, 0), timedelta(minutes=10), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 24, 15, 0, 0), datetime(2015, 8, 15, 21, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 25, 2, 0, 0), datetime(2015, 8, 25, 5, 0, 0), timedelta(minutes=10), "all")
)
def main():
import argparse
parser = argparse.ArgumentParser(description="Regenerate or generate mirrored AHI data structure")
parser.add_argument(
"base_ahi_dir",
default="/odyssey/isis/tmp/davidh/sift_data/ahi",
help="Base AHI directory for the geotiff data files " "(next child directory is the full dated directory)",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbosity",
action="count",
default=int(os.environ.get("VERBOSITY", 2)),
help="each occurrence increases verbosity 1 level through " "ERROR-WARNING-Info-DEBUG (default Info)",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite existing hardlinks")
args = parser.parse_args()
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
level = levels[min(3, args.verbosity)]
logging.basicConfig(level=level)
if not os.path.isdir(args.base_ahi_dir):
raise NotADirectoryError("Directory does not exist: %s" % (args.base_ahi_dir,))
os.chdir(args.base_ahi_dir)
for section_name, cases in guam_cases.items():
for case in cases:
start_str = case.start.strftime(DT_FORMAT)
end_str = case.end.strftime(DT_FORMAT)
# Note this only uses the minutes!
case_name = CASE_NAME_FORMAT.format(
start=start_str, end=end_str, delta=int(case.delta.total_seconds() / 60.0)
)
case_dir = os.path.join(args.base_ahi_dir, section_name, case_name)
if not os.path.isdir(case_dir):
LOG.debug("Creating case directory: %s", case_dir)
os.makedirs(case_dir)
else:
LOG.error("Case directory already exists: %s", case_dir)
continue
t = case.start
while t <= case.end:
glob_pattern = t.strftime("%Y_%m_%d_%j/%H%M/*_%Y%m%d_%H%M_B??_*.merc.tif")
t = t + case.delta
matches = glob(glob_pattern)
if len(matches) == 0:
LOG.error("Zero files found matching pattern: %s", glob_pattern)
continue
for input_pathname in matches:
fn = os.path.basename(input_pathname)
link_path = os.path.join(case_dir, fn)
if os.path.exists(link_path) and not args.overwrite:
LOG.debug("Link '%s' already exists, skipping...", link_path)
continue
LOG.debug("Creating hardlink '%s' -> '%s'", link_path, input_pathname)
os.link(input_pathname, link_path)
if int(case.delta.total_seconds()) == 0:
LOG.debug("Only one file needed to meet delta of 0")
break
LOG.info("done mirroring files")
if __name__ == "__main__":
sys.exit(main())
| ssec/sift | uwsift/project/organize_data_topics.py | organize_data_topics.py | py | 6,413 | python | en | code | 45 | github-code | 36 |
5741967370 | import os
import pandas as pd
from sklearn.model_selection import KFold, train_test_split
root_path = os.path.dirname(__file__)
asset_path = os.path.join(root_path, '../assets')
# load the full titanic example data
data = pd.read_csv(os.path.join(root_path, '../data/train.csv'))
# train / test split
train_data, test_data = train_test_split(data, test_size=0.2)
# number of data samples for the train and test sets
N_TRAIN_DATA_SAMPLES = 10
N_TEST_DATA_SAMPLES = 2
train_test_configs = [
{
'data': train_data,
'n_samples': N_TRAIN_DATA_SAMPLES,
'data_samples_root': os.path.join(asset_path, 'train_data_samples'),
'data_samples_content': [],
},
{
'data': test_data,
'n_samples': N_TEST_DATA_SAMPLES,
'data_samples_root': os.path.join(asset_path, 'test_data_samples'),
'data_samples_content': [],
},
]
# generate data samples
for conf in train_test_configs:
kf = KFold(n_splits=conf['n_samples'], shuffle=True)
splits = kf.split(conf['data'])
for _, index in splits:
conf['data_samples_content'].append(conf['data'].iloc[index])
# save data samples
for conf in train_test_configs:
for i, data_sample in enumerate(conf['data_samples_content']):
filename = os.path.join(conf['data_samples_root'], f'data_sample_{i}/data_sample_{i}.csv')
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
data_sample.to_csv(f)
| Esadruhn/owkin_elixir_hackathon | substra_materials/titanic_example/titanic/scripts/generate_data_samples.py | generate_data_samples.py | py | 1,475 | python | en | code | 1 | github-code | 36 |
14940809637 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
try:
import petsclinter as pl
except ModuleNotFoundError as mnfe:
try:
petsc_dir = os.environ['PETSC_DIR']
except KeyError as ke:
raise RuntimeError('Must set PETSC_DIR environment variable') from ke
sys.path.insert(0, os.path.join(petsc_dir, 'lib', 'petsc', 'bin', 'maint', 'petsclinter'))
import petsclinter as pl
def __prepare_ns_args(ns_args, parser):
slepc_mansecs = ['eps','lme','mfn','nep','pep','svd','sys']
slepc_aux_mansecs = ['bv','ds','fn','rg','st']
if ns_args.slepc_dir is None:
raise RuntimeError('Could not determine SLEPC_DIR from environment, please set via options')
extra_compiler_flags = [
'-I' + os.path.join(ns_args.slepc_dir, 'include'),
'-I' + os.path.join(ns_args.slepc_dir, ns_args.petsc_arch, 'include')
]
with open(os.path.join(ns_args.slepc_dir, ns_args.petsc_arch, 'lib', 'slepc', 'conf', 'slepcvariables'), 'r') as sv:
line = sv.readline()
while line:
if 'INCLUDE' in line:
for inc in line.split('=', 1)[1].split():
extra_compiler_flags.append(inc)
line = sv.readline()
extra_header_includes = []
mansecimpls = [m + 'impl.h' for m in slepc_mansecs + slepc_aux_mansecs] + [
'slepcimpl.h', 'vecimplslepc.h'
]
for header_file in os.listdir(os.path.join(ns_args.slepc_dir, 'include', 'slepc', 'private')):
if header_file in mansecimpls:
extra_header_includes.append(f'#include <slepc/private/{header_file}>')
if ns_args.src_path == parser.get_default('src_path'):
ns_args.src_path = os.path.join(ns_args.slepc_dir, 'src')
if ns_args.patch_dir == parser.get_default('patch_dir'):
ns_args.patch_dir = os.path.join(ns_args.slepc_dir, 'slepcLintPatches')
# prepend these
ns_args.extra_compiler_flags = extra_compiler_flags + ns_args.extra_compiler_flags
# replace these
if not ns_args.extra_header_includes:
ns_args.extra_header_includes = extra_header_includes
return ns_args
def command_line_main():
import argparse
import petsclinter.main
slepc_classid_map = {
'_p_BV *' : 'BV_CLASSID',
'_p_DS *' : 'DS_CLASSID',
'_p_FN *' : 'FN_CLASSID',
'_p_RG *' : 'RG_CLASSID',
'_p_ST *' : 'ST_CLASSID',
'_p_EPS *' : 'EPS_CLASSID',
'_p_PEP *' : 'PEP_CLASSID',
'_p_NEP *' : 'NEP_CLASSID',
'_p_SVD *' : 'SVD_CLASSID',
'_p_MFN *' : 'MFN_CLASSID',
'_p_LME *' : 'LME_CLASSID',
}
for struct_name, classid_name in slepc_classid_map.items():
pl.checks.register_classid(struct_name, classid_name)
parser = argparse.ArgumentParser(prog='slepclinter', add_help=False)
group_slepc = parser.add_argument_group(title='SLEPc location settings')
group_slepc.add_argument('--SLEPC_DIR', required=False, default=os.environ.get('SLEPC_DIR', None), help='if this option is unused defaults to environment variable $SLEPC_DIR', dest='slepc_dir')
args, parser = pl.main.parse_command_line_args(parent_parsers=[parser])
args = __prepare_ns_args(args, parser)
return pl.main.namespace_main(args)
if __name__ == '__main__':
sys.exit(command_line_main())
| firedrakeproject/slepc | lib/slepc/bin/maint/slepcClangLinter.py | slepcClangLinter.py | py | 3,156 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.