seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
29752329020 | # days to seconds, hours to minutes
# to repr an interval of time,create timedelta instance like this
from datetime import timedelta
a = timedelta(days=2, hours=6)
b = timedelta(hours=4.5)
c = a + b
print(c.days)
print(c.seconds)
print(c.seconds / 3600)
print(c.total_seconds() / 3600)
from datetime import datetime
a = datetime(2012, 9, 23)
print(a + timedelta(days=10))
b = datetime(2012, 12, 2)
d = b - a
print('interval days',d.days)
now = datetime.today()
print('Time and Date: ',now)
print(now + timedelta(minutes=10))
# datetime is aware of leap years
a = datetime(2012, 3, 1)
b = datetime(2012, 2, 28)
print(a - b)
c = datetime(2013, 3, 1)
d = datetime(2013, 2, 28)
print(c-d)
a1 = datetime(2012, 9, 23)
# print(a1 + timedelta(months=1)) month is an invalid keyword
from dateutil.relativedelta import relativedelta
print(a1 + relativedelta(months=+1))
print(a1 + relativedelta(months=+4))
# Time between 2 dates
b = datetime(2012, 12, 21)
d = b - a
print(d)
d = relativedelta(b, a)
print(d)
# print(d.months, d.days)
# Determining Last Friday's Date
# you want to find last occurence of a day of the week.Last friday Example.
from datetime import datetime,timedelta
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
def get_previous_byday(dayname, start_date=None):
if start_date is None:
start_date = datetime.today()
day_num = start_date.weekday()
day_num_target = weekdays.index(dayname)
days_ago = (7 + day_num - day_num_target) % 7
if days_ago == 0:
days_ago = 7
target_date = start_date - timedelta(days=days_ago)
return target_date
print(get_previous_byday('Saturday'))
# performing same calculation using the relativedelta() function
# from dateutil
from dateutil.rrule import *
d = datetime.now()
# next friday
print(d + relativedelta(weekday=FR))
# last Friday
print(d + relativedelta(weekday=FR(-1)))
| pranavchandran/redtheme_v13b | chapter_2_strings_and_text/days_to_seconds/days_to_seconds_other.py | days_to_seconds_other.py | py | 1,950 | python | en | code | 0 | github-code | 36 |
7773371889 | import random
import time
from pathlib import Path
from typing import Any
import numpy as np
from midi.decode import get_array_of_notes
from midi.encode import get_file_from_standard_features
from models.music_model import MusicModel, ProgressCallback, ProgressMetadata
class MarkovChain(MusicModel):
n_gram_size: int
def __init__(self, n_gram_size: int = 1) -> None:
self.data: list = []
self.tokens: set = set()
self.n_grams: set = set()
self.tokens_list: list[tuple] = []
self.n_grams_list: list[tuple] = []
self.probabilities: np.ndarray
self.n_gram_size = n_gram_size
def train(self, epochs: int | None, x_train: Any, y_train: Any, progress_callback: ProgressCallback,
checkpoint_path: Path | None = None) -> None:
# count probabilities
n = len(self.n_grams_list[0])
n_gram_next: np.ndarray = np.ndarray(
(len(self.n_grams_list, )), dtype=object)
for i in range(n_gram_next.shape[0]):
n_gram_next[i] = []
start = time.time()
time.perf_counter()
for i in range(len(self.data)):
elapsed = time.time() - start
progress_callback([(elapsed, 100 * i / len(self.data))])
for j in range(len(self.data[i]) - 1 - self.n_gram_size):
curr_n_gram = tuple(self.data[i][j:j + n])
next_note = self.data[i][j + n]
n_gram_next[self.n_grams_list.index(
curr_n_gram)].append(next_note)
elapsed = time.time() - start
progress_callback([(elapsed, 100)])
self.probabilities = np.ndarray(
(len(self.n_grams_list, )), dtype=object)
for i in range(n_gram_next.shape[0]):
self.probabilities[i] = {}
len_n_gram_next = len(n_gram_next)
for i in range(len_n_gram_next):
for j in range(len(n_gram_next[i])):
if len(n_gram_next[i]) <= 1:
self.probabilities[n_gram_next[i]][j] = 1
else:
if self.probabilities[i].get(n_gram_next[i][j]) is None:
self.probabilities[i][n_gram_next[i][j]] = float(
n_gram_next[i].count(n_gram_next[i][j]) / len(n_gram_next[i]))
def create_dataset(self, dataset: list[tuple[Any, Any]]) -> tuple[Any, Any]:
self.generate_tokens()
self.generate_n_grams(self.n_gram_size)
return 0, 0
def generate_tokens(self) -> None:
for i in range(len(self.data)):
for j in range(len(self.data[i])):
notes = []
for k in range(128):
if self.data[i][j][k]:
notes.append(k)
self.data[i][j] = tuple(notes)
self.tokens.add(tuple(notes))
def prepare_data(self, midi_file: Path) -> tuple[Any, Any]:
data_lines = get_array_of_notes(midi_file, False, False)
for i in range(len(data_lines)): # serialize tracks
self.data.append(data_lines[i].tolist())
return 0, 0
def save(self, path: Path) -> None:
np.savez(path, probabilities=np.asarray(self.probabilities, dtype=object),
tokens=np.asarray(self.tokens_list, dtype=object))
def load(self, path: Path) -> None:
path = path if path.name.endswith('.npz') else path.with_suffix('.npz')
data = np.load(path, allow_pickle=True)
self.probabilities = data['probabilities']
self.tokens_list = data['tokens']
def generate_n_grams(self, n: int) -> None:
print("Generating " + str(n) + "-grams")
for i in range(len(self.data)):
for j in range(len(self.data[i]) - n + 1):
self.n_grams.add(tuple(self.data[i][j:j + n]))
self.tokens_list = list(self.tokens)
self.n_grams_list = list(self.n_grams)
print(str(len(self.n_grams_list)) + " " + str(n) + "-grams generated!")
def model_summary(self) -> str:
return ("Markov chain basing on " +
str(self.n_gram_size) + "-grams:\n" +
str(len(self.tokens_list)) + " tokens\n" +
str(len(self.n_grams_list)) + " n_grams\n" +
str(len(self.data)) + " files")
def generate(self, path: Path, seed: int | None = None) -> None:
assert len(self.tokens_list) > 0, "Model was not initiated with data"
if seed is not None:
random.seed(seed)
result = self.predict(random.choice(self.tokens_list), 512, False, 0)
get_file_from_standard_features(
result, 1000000, path, False, True, False, [8 for _ in result])
def predict(self, initial_notes: tuple, length: int, deterministic: bool, rand: int) -> np.ndarray:
# deterministic - if True, next note will be always note with maximum probability
# - if False, next note will be sampled according to all notes probability
# rand - % chance of selecting random token next (int [0;100])
prediction = []
previous_n_gram = initial_notes
for i in range(len(initial_notes)):
prediction.append(initial_notes[i])
# generating length - initial_token
for i in range(length - len(self.tokens_list[0])):
idx = None
if tuple(previous_n_gram) in self.n_grams:
idx = self.n_grams_list.index(previous_n_gram)
else:
idx = random.randrange(len(self.probabilities))
probs = self.probabilities[idx]
while len(probs) == 0:
idx = random.randrange(len(self.probabilities))
probs = self.probabilities[idx]
next_note = None
if random.randrange(100) < rand:
next_note = random.choice(self.tokens_list)
elif deterministic:
next_note = max(probs, key=probs.get)
else:
next_note = random.choices(
list(probs.keys()), weights=probs.values(), k=1)[0]
prediction.append(next_note)
if next_note is not None:
previous_n_gram = next_note
result = np.full((len(prediction), 128), False)
for i in range(len(prediction)):
if isinstance(prediction[i], int):
result[i][prediction[i]] = True
else:
for j in range(len(prediction[i])):
note = prediction[i][j]
result[i][note] = True
return result
@staticmethod
def get_progress_metadata() -> ProgressMetadata:
return ProgressMetadata(x_label='Time [s]', y_label='Progress [%]', legends=['Markov Chain'])
| piotrowskv/music_generation | models/models/markov_chain/markov_chain.py | markov_chain.py | py | 6,790 | python | en | code | 0 | github-code | 36 |
473540501 | #!/usr/bin/env python
import musescore_parser as mp
import sys
from fractions import Fraction
from dataclasses import dataclass, field
from typing import Optional
import re
#https://github.com/OpenLilyPondFonts/lilyjazz/blob/master/JazzSampler.pdf
@dataclass
class Base:
def __post_init__(self):
print("%%", self)
pass
@dataclass
class LyricHandler(Base):
note_duration: Optional[Fraction] = None
text: Optional[str] = None
note_pitch: Optional[str] = None
extender_line: Optional[str] = None
extender_duration: Optional[Fraction] = None
slur: Optional[str] = None
parser_key_signature = {
'-7' : 'ces',
'-6' : 'ges',
'-5' : 'des',
'-4' : 'as',
'-3' : 'es',
'-2' : 'b',
'-1' : 'f',
'0' : 'c',
'1' : 'g',
'2' : 'd',
'3' : 'a',
'4' : 'e',
'5' : 'h',
'6' : 'fis',
'7' : 'cis',
}
parser_key_signature_duration = {
'4/4': "1",
'3/4': "2.",
'2/4': "2",
}
parser_duration_fractions = {
'whole' : "4/4",
'half' : "2/4",
'quarter' : "1/4",
'eighth' : "1/8",
'16th' : "1/16",
'32nd' : "1/32",
'64th' : "1/64"
}
parser_tpc = {
'' : 's',
'-1' : 'feses',
'0' : 'ceses',
'1' : 'geses',
'2' : 'deses',
'3' : 'ases',
'4' : 'eses',
'5' : 'bes',
'6' : 'fes',
'7' : 'ces',
'8' : 'ges',
'9' : 'des',
'10' : 'as',
'11' : 'es',
'12' : 'b',
'13' : 'f',
'14' : 'c',
'15' : 'g',
'16' : 'd',
'17' : 'a',
'18' : 'e',
'19' : 'h',
'20' : 'fis',
'21' : 'cis',
'22' : 'gis',
'23' : 'dis',
'24' : 'ais',
'25' : 'eis',
'26' : 'his',
'27' : 'fisis',
'28' : 'cisis',
'29' : 'gisis',
'30' : 'disis',
'31' : 'aisis',
'32' : 'eisis',
'33' : 'hisis'
}
parser_barline = {
"startRepeat" : ".|:",
"endRepeat" : ":|.",
"double" : "||",
"end" : "|."
}
parser_clefs = {
"G8vb" : "tenorG",
"F" : "bass",
'' : "treble",
'G' : "treble"
}
parser_name = {
"": "Zero",
"0": "Zero",
"1": "One",
"2": "Two",
"3": "Three",
"4": "Four",
"5": "Five",
"6": "Six",
}
parser_dots_fractions = {
"": 1,
"1": 1 + 1/2,
"2": 1 + 1/2 + 1/2/2,
"3": 1 + 1/2 + 1/2/2 + 1/2/2/2,
"4": 1 + 1/2 + 1/2/2 + 1/2/2/2 + 1/2/2/2/2,
}
parser_fraction_to_duration = {
"1": "1",
"1/1": "1",
"1/2": "2",
"1/4": "4",
"2/4": "2",
"3/4": "2.",
"1/8": "8",
"3/8": "4.",
"7/8": "2..",
"1/16": "16",
"3/16": "8.",
"7/16": "4..",
"15/16": "2...",
"1/32": "32",
"3/32": "16.",
"7/32": "8..",
"15/32": "4...",
"1/64": "64",
"3/64": "32.",
}
parse_measure_end_repeat = {
"2": ":|."
}
#https://github.com/OpenLilyPondFonts/lilyjazz/blob/master/JazzSampler.pdf
parse_chord_names = {
"m7": "m7",
"(add9)": "9^7",
"7": "7",
"m6": "m6",
"dim6": "dim6",
"dim7": "dim7",
"dim": "dim",
"m7(11)": "m7.11",
"6": "6",
"Maj9": "maj9",
"7(b9)": "9-",
"m": "m",
"0": "m7.5-",
"7(#9)": "9+",
"o7": "dim7",
"7(#5)": "7.5+",
"(b5)": "dim",
"sus4": "sus4",
"7sus4": "sus4.7"
}
last_pitch = 60
last_tpc = 14
def get_pitch(pitch, tpc):
global last_pitch, last_tpc
line = parser_tpc[tpc]
pitch_diff = int(pitch) - int(last_pitch)
tcp_diff = int(tpc) - int(last_tpc)
last_pitch = pitch
last_tpc = tpc
#print("%%%% pitch_diff %s, last_pitch %s, pitch %s, tcp_diff %s" % (pitch_diff, last_pitch, pitch, tcp_diff))
#TODO: clean up this mess
if (pitch_diff >= 6 and pitch_diff < 18):
if (pitch_diff == 6 and tcp_diff == 6):
#print("%% pitch_diff > but exception")
line += ""
else:
#print("%% pitch_diff >")
line += "'"
elif (pitch_diff >= 18 and pitch_diff < 30):
if (pitch_diff == 18 and tcp_diff == 6):
#print("%% pitch_diff >> but exception")
line += "'"
else:
#print("%% pitch_diff >>")
line += "''"
elif (pitch_diff >= 30):
if (pitch_diff == 30 and tcp_diff == 6):
#print("%% pitch_diff >>> but exception")
line += "''"
else:
#print("%% pitch_diff >>>")
line += "'''"
elif (pitch_diff <= -6 and pitch_diff > -18):
if (pitch_diff == -6 and tcp_diff == -6):
#print("%% pitch_diff < but exception")
line += ""
else:
#print("%% pitch_diff <")
line += ","
elif (pitch_diff <= -18 and pitch_diff > -30):
if (pitch_diff == -18 and tcp_diff == -6):
#print("%% pitch_diff << but exception")
line += ","
else:
#print("%% pitch_diff <<")
line += ",,"
elif (pitch_diff <= -30):
if (pitch_diff == -30 and tcp_diff == -6):
#print("%% pitch_diff <<< but exception")
line += ",,"
else:
#print("%% pitch_diff <<<")
line += ",,,"
return line
class LilypondGenerator(mp.MuseScoreParser):
def get_head(self):
string = []
string.append("\\version \"2.24.1\"")
string.append("\\include \"deutsch.ly\"")
string.append("jazzChords = { \\semiGermanChords }")
string.append("aFourL = {}")
string.append("%\\include \"../config/include.ily\"")
string.append("markMoj = #(define-music-function (letter) (string?) #{ \\mark \\markup { \\box \\bold #letter } #})")
string.append("")
string.append("\layout {")
string.append(" indent = 0")
string.append("}")
return string
def get_header(self):
string = []
string.append("\header {")
string.append(" titlex = \"Pjevajte Jahvi\"")
poet_found = False
part_found = False
for e in self.staffs[0].children:
if isinstance(e, mp.VBox):
if e.style == "Title":
string.append(f" title = \"%s\"" % e.text.upper())
elif e.style == "Composer":
string.append(" composer = \"%s\"" % e.text)
elif e.style == "Lyricist":
string.append(" %%poet = \"%s\"" % e.text)
string.append(" style = \"%s\"" % e.text)
poet_found = True
elif e.style == "Instrument Name (Part)":
string.append(" %%meter = \"%s\"" % e.text)
string.append(" broj = \"%s\"" % e.text)
part_found = True
if not poet_found:
string.append(" style = \"\"")
if not part_found:
string.append(" broj = \"1\"")
string.append(" %tagline = \\markup { \\override #'(font-name . \"JohnSans White Pro\") \\override #'(font-size . -3) { Izvorno: Name, Album } }")
string.append("}")
return string
def get_paper(self):
string = []
string.append("\\paper {")
string.append(" \\aFourL")
string.append(" %min-systems-per-page = #7")
string.append(" %annotate-spacing = ##t")
string.append(" %system-system-spacing.padding = #3.2")
string.append(" %page-breaking = #ly:one-page-breaking")
string.append(" %last-bottom-spacing.minimum-distance = #8")
string.append("}")
return string
def get_staff_start(self, staff):
string = []
string.append("staff%s = \\relative c' {" % parser_name[staff.id])
return string
def get_staff_end(self):
string = []
string.append("}")
return string
def fractions_add_missing(self, bar, time_signature):
fraction_sum = Fraction(0)
for e in bar:
if isinstance(e, Fraction):
fraction_sum += e
if fraction_sum != time_signature:
bar.append(time_signature - fraction_sum)
return bar
def fractions_sum_neighbor(self, bar):
summed_bar = []
fraction = None
for e in bar:
if isinstance(e, Fraction):
if fraction is not None:
fraction += e
else:
fraction = e
else:
if fraction is not None:
summed_bar.append(fraction)
fraction = None
summed_bar.append(e)
if fraction is not None:
summed_bar.append(fraction)
fraction = None
return summed_bar
def fractions_add_skip_if_bar_starts_with_fraction(self, bar):
if len(bar) > 0 and isinstance(bar[0], Fraction):
bar.insert(0, "s")
return bar
def fractions_convert_bar_with_fractions_to_ly(self, bar, lyrics=False):
line = ""
for e in bar:
if isinstance(e, Fraction):
if not lyrics:
line += parser_fraction_to_duration[str(e)]
line += " "
else:
line += e
if lyrics:
line += " "
if "bar" in e or "mark" in e or "clef" in e or "repeat" in e:
line += " "
if "{" in e or "}" in e:
line += " "
return line
def fractions_convert_harmony_bar_with_fractions_to_ly(self, bar):
line = ""
harmony = None
for e in bar:
if isinstance(e, Fraction):
if harmony is not None:
line += parser_tpc[harmony.root]
line += parser_fraction_to_duration[str(e)]
if harmony is not None:
if harmony.name:
line += ":" + parse_chord_names[harmony.name]
if harmony.base:
line += "/" + parser_tpc[harmony.base]
line += " "
harmony = None
elif isinstance(e, mp.Harmony):
harmony = e
else:
line += e
return line
def get_staff_data(self, staff):
string = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
line = " "
has_break = False
for e in sc.children:
if isinstance(e, mp.TimeSig):
string.append(" \\time %s/%s" % (e.sig_n, e.sig_d))
elif isinstance(e, mp.Tempo):
string.append(" \\tempo 4 = %s" % int((60 * float(e.tempo))))
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
bar.append("r")
predicted_duration = Fraction(e.duration)
bar.append(predicted_duration)
else:
bar.append("r")
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Chord):
bar.append(get_pitch(e.note_pitch, e.note_tpc))
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.KeySig):
tpc_value = str(14 + int(e.accidental))
string.append(" \\key %s \\major" % parser_tpc[tpc_value])
elif isinstance(e, mp.ChordNoteSpanner):
if e.type == "Tie":
if e.next_location_fractions or e.next_location_measures:
bar.append("~")
elif isinstance(e, mp.ChordSpanner):
if e.type == "Slur":
if e.next_location_fractions or e.next_location_measures:
bar.append("(")
elif e.prev_location_fractions or e.prev_location_measures:
bar.append(")")
elif isinstance(e, mp.BarLine):
bar.append("\\bar \"%s\"" % parser_barline[e.subtype])
elif isinstance(e, mp.RehearsalMark):
#text = "\\mark \\markup { \\box \\bold %s }" % e.text
#bar.append(text)
text = "\\markMoj \"%s\"" % e.text
#text = "\\markMoj"
bar.append(text)
#text = "%\\markMojPonn"
#bar.append(text)
elif isinstance(e, mp.Clef):
if e.concert_clef_type:
text = "\\clef %s" % parser_clefs[e.concert_clef_type]
bar.append(text)
elif e.transposing_clef_type:
text = "\\clef %s" % parser_clefs[e.transposing_clef_type]
bar.append(text)
elif isinstance(e, mp.LayoutBreak):
if e.subtype == "line":
has_break = True
elif isinstance(e, mp.VoltaSpanner):
if e.next_location_measures:
text = "\\set Score.repeatCommands = #\'((volta \"%s\"))" % e.begin_text
bar.append(text)
elif e.prev_location_measures:
text = "\\set Score.repeatCommands = #\'((volta #f))"
bar.append(text)
elif isinstance(e, mp.Tuplet):
text = "\\tuplet %s/%s {" % (e.actual_notes, e.normal_notes)
bar.append(text)
elif isinstance(e, mp.EndTuplet):
text = "}"
bar.append(text)
#line += str(bar) + "\n "
if sc.len:
line += "\\partial %s" % parser_fraction_to_duration[sc.len]
line += "\n "
line += self.fractions_convert_bar_with_fractions_to_ly(bar)
if sc.end_repeat:
line += "\\bar \"%s\"" % parse_measure_end_repeat[sc.end_repeat]
line += " "
line += "|"
#if has_break:
# line += " \\break"
string.append(line)
return string
def get_harmony(self, staff):
string = []
#harmony_found = False
#for sc in staff.children:
# if isinstance(sc, mp.Measure):
# for e in sc.children:
# if isinstance(e, mp.Harmony):
# harmony_found = True
#if not harmony_found:
# return string
string.append("harmony%s = \chordmode {" % parser_name[staff.id])
time_signature = None
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
line = " "
for e in sc.children:
if isinstance(e, mp.TimeSig):
time_signature = Fraction(f"{e.sig_n}/{e.sig_d}")
elif isinstance(e, mp.Harmony):
bar.append(e)
elif isinstance(e, mp.Chord):
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
predicted_duration = Fraction(e.duration)
bar.append(predicted_duration)
else:
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Location):
predicted_duration = Fraction(e.fractions)
bar.append(predicted_duration)
if sc.len:
bar = self.fractions_add_missing(bar, Fraction(sc.len))
else:
bar = self.fractions_add_missing(bar, time_signature)
bar = self.fractions_sum_neighbor(bar)
bar = self.fractions_add_skip_if_bar_starts_with_fraction(bar)
line += self.fractions_convert_harmony_bar_with_fractions_to_ly(bar)
#line += str(bar)
line += "|"
string.append(line)
# force end bar
string.append(" \\bar \"|.\"")
string.append("}")
return(string)
def get_lyric_nos(self, staff):
nos = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
for e in sc.children:
if isinstance(e, mp.Lyrics):
if e.no not in nos:
nos.append(e.no)
return sorted(nos)
def fractions_swap_with_elements(self, bar):
swaped_bar = []
fraction = None
for e in bar:
if isinstance(e, Fraction):
if fraction is None:
fraction = e
else:
swaped_bar.append(fraction)
fraction = e
else:
swaped_bar.append(e)
if fraction is not None:
swaped_bar.append(fraction)
fraction = None
if fraction is not None:
swaped_bar.append(fraction)
fraction = None
return swaped_bar
def get_lyric(self, staff, no):
bars = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
lyric_handler = LyricHandler()
for e in sc.children:
if isinstance(e, mp.Lyrics):
if e.no == no:
#print(repr(e.text))
if "\xa0" in e.text:
lyric_handler.text = "\"%s\"" % e.text
else:
lyric_handler.text = e.text
if e.syllabic in ["begin", "middle"]:
lyric_handler.extender_line = "--"
if e.ticks_f and e.ticks:
predicted_duration = - Fraction(e.ticks_f)
lyric_handler.extender_line = "__"
lyric_handler.extender_duration = abs(predicted_duration)
elif isinstance(e, mp.Chord):
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
lyric_handler.note_pitch = "c"
lyric_handler.note_duration = predicted_duration
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(e.duration)
lyric_handler.note_pitch = "r"
lyric_handler.note_duration = predicted_duration
else:
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
lyric_handler.note_pitch = "r"
lyric_handler.note_duration = predicted_duration
if lyric_handler.note_duration is not None and lyric_handler.text is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
bars.append(bar)
# add slurs for extender line and replace non text notes to rests
extender_duration = None
for bar in bars:
#print("|")
for b in bar:
#print(" ", b)
if b.text is not None:
if b.extender_duration:
extender_duration = b.extender_duration - b.note_duration
#print(extender_duration, "adding (")
b.slur = "("
else:
if extender_duration is None:
b.note_pitch = "r"
else:
extender_duration -= b.note_duration
#print(extender_duration, "calculating")
if extender_duration < 0:
extender_duration = None
#print("adding )")
b.slur = ")"
string = []
#string.append("%%test%s%s = {" % (parser_name[staff.id], parser_name[no]))
#for bar in bars:
# for b in bar:
# line = "% "
# line += str(b)
# string.append(line)
# string.append("% |")
#string.append("%}")
#string.append("")
string.append("aligner%s%s = \\relative {" % (parser_name[staff.id], parser_name[no]))
for bar in bars:
line = " "
for b in bar:
line += b.note_pitch + parser_fraction_to_duration[str(b.note_duration)]
if b.slur:
line += b.slur
line += " "
line += "|"
if len(line.strip()):
string.append(line)
string.append("}")
string.append("")
string.append("lyric%s%s = \\lyricmode {" % (parser_name[staff.id], parser_name[no]))
for bar in bars:
line = " "
for b in bar:
if b.text is not None:
line += b.text
line += " "
if b.extender_line is not None:
line += b.extender_line
line += " "
line += "%|"
if len(line.strip()):
string.append(line)
string.append("}")
return string
def get_tbox(self):
string = []
#tbox_found = False
#for e in self.staffs[0].children:
# if isinstance(e, mp.TBox):
# tbox_found = True
# break
#if not tbox_found:
# return string
stanzas = []
lyrics = []
for e in self.staffs[0].children:
if isinstance(e, mp.TBox):
if e.style == "Frame":
line_count = 0
for line in e.text.split("\n"):
line = line.strip()
if len(line) > 0:
if re.match("\\d\\.", line):
stanzas.append(" \\line { \\bold %s }" % line)
else:
line_count += 1
lyrics.append(" \\line { %s }" % line)
else:
stanzas.append(" \\vspace #%s" % (line_count))
line_count = 0
lyrics.append(" \\vspace #1")
string.append("\\markup {")
string.append(" \\column {")
string += stanzas
string.append(" }")
string.append(" \\hspace #1")
string.append(" \\column {")
string += lyrics
string.append(" }")
string.append("}")
return string
def get_score(self):
string = []
string.append("\\score {")
string.append(" <<")
for staff in self.staffs:
string.append(" \\new ChordNames { \\jazzChords \\harmony%s }" % parser_name[staff.id])
string.append(" \\new Staff {")
string.append(" <<")
string.append(" \\new Voice { \\staff%s }" % parser_name[staff.id])
for no in self.get_lyric_nos(staff):
string.append(" \\new NullVoice = \"aligner%s%s\" { \\aligner%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
string.append(" \\new Lyrics \\lyricsto \"aligner%s%s\" { \\lyric%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
string.append(" >>")
string.append(" }")
#string.append(" \\new Staff {")
#for no in self.get_lyric_nos(staff):
# string.append(" \\new Voice = \"aligner%s%s\" { \\transpose c c'' \\aligner%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
#string.append(" }")
string.append(" >>")
string.append("}")
return(string)
def get_file(self):
string = []
string += self.get_head()
string.append("")
string += self.get_header()
string.append("")
string += self.get_paper()
string.append("")
for s in self.staffs:
string += self.get_staff_start(s)
string += self.get_staff_data(s)
string += self.get_staff_end()
string.append("")
string += self.get_harmony(s)
string.append("")
for no in self.get_lyric_nos(s):
string += self.get_lyric(s, no)
string.append("")
string += self.get_score()
string.append("")
string += self.get_tbox()
return(string)
if __name__ == "__main__":
lg = LilypondGenerator(sys.argv[1])
print("\n".join(lg.get_file()))
| duhovniprojekt/duhovne_pjesme_novi_sad_1966 | scripts/new/lilypond_generator.py | lilypond_generator.py | py | 27,495 | python | en | code | 0 | github-code | 36 |
3843330309 | import numpy
import numpy as np
import pandas as pd
import pygad
import tlsh
import json
from tools import featurer
import sys
import csv
import tensorflow as tf
from tensorflow import keras
from keras import layers
import filenames_modified as filenames
MALWAREIDX = int(sys.argv[1])
BATCH_SIZE = 10
# print(MALWAREIDX)
arm_training = pd.read_csv(filenames.arm_training, header=None, index_col=False)
arm_validation = pd.read_csv(filenames.arm_validation, header=None, index_col=False)
arm_test = pd.read_csv(filenames.arm_test, header=None, index_col=False)
dataset_arm_training = np.asarray(arm_training.drop(columns=arm_training.columns[-2:]))
dataset_arm_validation = np.asarray(arm_validation.drop(columns=arm_validation.columns[-2:]))
dataset_arm_test = np.asarray(arm_test.drop(columns=arm_test.columns[-2:]))
labels_arm_training = np.asarray(arm_training[arm_training.columns[-1]])
labels_arm_validation = np.asarray(arm_validation[arm_validation.columns[-1]])
labels_arm_test = np.asarray(arm_test[arm_test.columns[-1]])
names_arm_training = arm_training[arm_training.columns[-2]]
names_arm_validation = arm_validation[arm_validation.columns[-2]]
names_arm_test = arm_test[arm_test.columns[-2]]
df_arm_malware_forpoison = pd.read_csv(filenames.forpoison_arm_malware, header=None, index_col=False)
df_arm_malware_forpoison = df_arm_malware_forpoison.drop(columns=df_arm_malware_forpoison.columns[-2:])
topredict = np.asarray([df_arm_malware_forpoison.iloc[MALWAREIDX],])
malwareTLSH = ""
mybytes = ""
def myfunc(solution):
additional = np.array(solution).tobytes()
return str(tlsh.hash(mybytes + additional))
def fitness_func(solution, solution_idx):
poisonedTLSH = myfunc(solution)
return 1 / tlsh.diff(malwareTLSH, poisonedTLSH)
# print(sys.argv)
num_generations = 500
num_parents_mating = 8
sol_per_pop = 20
# num_genes = 20
gene_type = numpy.uint8
init_range_low = 0
init_range_high = 255
stop_criteria = "saturate_200"
# percents = np.append(np.arange(0.5, 5.1, 0.5), [10, 20])
percents = [5, 10, 20]
benignnumbers = [30, 40]
BENIGNNUMBER = 50
with open(filenames.poisonJSON) as poison_json:
poison = json.load(poison_json)
with open(filenames.dir_malware_arm + str(poison["arm"]["malware"][MALWAREIDX]), "rb") as malware:
malwareread = malware.read()
malwareTLSH = str(tlsh.hash(malwareread))
for BENIGNNUMBER in benignnumbers:
with open("{}genetic_idx-{}_bening-{}_percent-5-10-20.csv".format(filenames.dir_results, MALWAREIDX, BENIGNNUMBER), "w") as results_file:
csv_writer_r = csv.writer(results_file, lineterminator="\n")
for percent in percents:
# for num_genes in range(10, 101, 10):
with open(filenames.dir_poison_data_genetic + "percent_" + str(percent) + "_" + str(MALWAREIDX) + ".csv", "w") as f:
csv_writer = csv.writer(f, lineterminator="\n")
for i in range(BENIGNNUMBER):
print("*{}: {}% - {}*".format(str(MALWAREIDX), percent, i))
filename = str(poison["arm"]["benign"][i])
with open(filenames.dir_bening_arm + filename, "rb") as benign:
mybytes = benign.read()
lenbytes = len(mybytes)
num_genes = int(lenbytes * percent / 100)
ga = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
fitness_func=fitness_func,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
gene_type=gene_type,
init_range_low=init_range_low,
init_range_high=init_range_high,
stop_criteria=stop_criteria
)
ga.run()
# ga.plot_fitness()
best_solution, best_fitness, best_idx = ga.best_solution()
# print(best_solution, " - ", 1 / best_fitness)
csv_writer.writerow(featurer(myfunc(best_solution)))
file_poison_arm_BM = filenames.dir_poison_data_genetic + "percent_" + str(percent) + "_" + str(MALWAREIDX) + ".csv"
poisoned_arm_training = pd.read_csv(file_poison_arm_BM, index_col=False, header=None)
poisoned_arm_training_base = poisoned_arm_training.sample(frac=1)
poisoned_arm_training_new = arm_training.append(poisoned_arm_training, ignore_index=True).sample(frac=1)
dataset_poisoned_arm_training_base = np.asarray(
poisoned_arm_training_base.drop(columns=poisoned_arm_training_base.columns[-2:]))
dataset_poisoned_arm_training_new = np.asarray(
poisoned_arm_training_new.drop(columns=poisoned_arm_training_new.columns[-2:]))
labels_poisoned_arm_training_base = np.asarray(poisoned_arm_training_base[poisoned_arm_training_base.columns[-1]])
labels_poisoned_arm_training_new = np.asarray(poisoned_arm_training_new[poisoned_arm_training_new.columns[-1]])
# MODIFIED
base_model = keras.models.load_model(filenames.base_model)
base_model.fit(dataset_poisoned_arm_training_base, labels_poisoned_arm_training_base, epochs=10, batch_size=BATCH_SIZE,
validation_data=(dataset_arm_validation, labels_arm_validation), verbose=0)
[_, binary_accuracy_appended] = base_model.evaluate(dataset_arm_test, labels_arm_test, verbose=0)
# print(binary_accuracy_appended)
# base_model.save(filenames.models_iterative + "modified" + str(num_genes))
[[predict_appended]] = base_model.predict(topredict, verbose=0)
# print(predict_appended)
# # NEW
# poison_model = keras.Sequential(
# [
# layers.Dense(1, input_shape=(131,), activation="sigmoid")
# ]
# )
# poison_model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
# metrics=[tf.keras.metrics.BinaryAccuracy()])
# poison_model.fit(dataset_poisoned_arm_training_new, labels_poisoned_arm_training_new, epochs=10, batch_size=BATCH_SIZE,
# validation_data=(dataset_arm_validation, labels_arm_validation), verbose=0)
# [_, binary_accuracy_new] = poison_model.evaluate(dataset_arm_test, labels_arm_test, verbose=0)
# # print(binary_accuracy_appended)
# # base_model.save(filenames.models_iterative + "poison" + str(num_genes))
# [[predict_new]] = poison_model.predict(topredict, verbose=0)
# # print(predict_new)
results = [percent,
binary_accuracy_appended,
predict_appended]
# binary_accuracy_new,
# predict_new]
print(results)
csv_writer_r.writerow(results)
print("{} DONE".format(MALWAREIDX)) | ZsZs88/Poisoning | genetic_modified.py | genetic_modified.py | py | 7,404 | python | en | code | 0 | github-code | 36 |
35865754669 | """
no longer needed since pointnet2_ssg_cls can provide this form
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys,os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'ops','pointnet2_ops_lib', 'pointnet2_ops'))
from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
# from .pointnet2_modules import PointnetSAModule, PointnetSAModuleMSG
from .pointnet2_ssg_cls import PointNet2SSGCls
class PointNet2MSGCls(PointNet2SSGCls):
"""PointNet2 MSG for classification
"""
def _build_model(self):
# call the base method and then override SA_modules
super()._build_model()
self.SA_modules = nn.ModuleList()
for i in range(len(self.radii)):
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=self.npoints[i],
radii=self.radii[i],
nsamples=self.nsamples[i],
mlps=self.mlps[i],
use_xyz=self.use_xyz
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=self.mlps[-1],
use_xyz=self.use_xyz
)
)
| PointCloudYC/PointNet-modern.pytorch | models/pointnet2/pointnet2_msg_cls.py | pointnet2_msg_cls.py | py | 1,339 | python | en | code | 3 | github-code | 36 |
15871926331 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
def read_image(image):
return mpimg.imread(image)
def format_image(image):
return tf.image.resize(image[tf.newaxis, ...], [224, 224]) / 255.0
def get_category(img):
"""Write a Function to Predict the Class Name
Args:
img [jpg]: image file
Returns:
[str]: Prediction
"""
path = 'static/model/'
tflite_model_file = 'converted_model.tflite'
# Load TFLite model and allocate tensors.
with open(path + tflite_model_file, 'rb') as fid:
tflite_model = fid.read()
# Interpreter interface for TensorFlow Lite Models.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Gets model input and output details.
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
input_img = read_image(img)
format_img = format_image(input_img)
# Sets the value of the input tensor
interpreter.set_tensor(input_index, format_img)
# Invoke the interpreter.
interpreter.invoke()
predictions_array = interpreter.get_tensor(output_index)
predicted_label = np.argmax(predictions_array)
class_names = ['rock', 'paper', 'scissors']
return class_names[predicted_label]
def plot_category(img, current_time):
"""Plot the input image
Args:
img [jpg]: image file
"""
read_img = mpimg.imread(img)
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(ROOT_DIR + f'/static/images/output_{current_time}.png')
print(file_path)
if os.path.exists(file_path):
os.remove(file_path)
plt.imsave(file_path, read_img)
| FourthBrain/Intro-to-Flask | inference.py | inference.py | py | 1,815 | python | en | code | 1 | github-code | 36 |
31628499109 | import traceback
import sys
from discord.ext import commands
import discord
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
ignored = (commands.CommandNotFound)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
print("Command not found: ", error)
return
elif isinstance(error, commands.DisabledCommand):
return await ctx.send(f'{ctx.command} has been disabled.')
elif isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.author.send(f'{ctx.command} can not be used in Private Messages.')
except:
pass
elif isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
return await ctx.send(error)
else:
print(error)
return
def setup(bot):
bot.add_cog(ErrorHandler(bot))
| docgonzo2015/Botler-discord-bot | cogs/errors.py | errors.py | py | 1,104 | python | en | code | 0 | github-code | 36 |
3204081333 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 21:36:27 2019
@author: Rodrigo
"""
import csv
import sqlite3
e = csv.writer(open('output.csv', 'w'))
e.writerow(['cpf','UC'])
conn = sqlite3.connect('enel.db')
cursor = conn.cursor()
# lendo os dados
cursor.execute("""
SELECT * FROM enel;
""")
for linha in cursor.fetchall():
e.writerow(linha)
print(linha)
conn.close()
| rasiqueira/enel | bd.py | bd.py | py | 409 | python | en | code | 0 | github-code | 36 |
36568920733 | from django.urls import path, re_path
from . import views
app_name = 'adminapp'
urlpatterns = [
path('', views.login, name='login'),
path('category/add/', views.add_category, name='add_category'),
path('article/add/', views.add_post),
path('article/list/', views.post_list),
path('category/list/', views.category_list),
path('event/list/', views.event_list),
path('event/add/', views.add_event),
path('page/list/', views.page_list),
path('menu/list/', views.menu_list),
path('banner/add/', views.add_banner),
path('banner/list/', views.banner_list),
re_path('banner/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_banner),
re_path('banner/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_banner),
re_path('category/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_category),
re_path('article/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_post),
re_path('category/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_category),
re_path('event/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_event),
re_path('article/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_post),
re_path('article/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.post_state),
re_path('event/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_event),
re_path('event/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.event_state),
path('admin_logout/', views.admin_logout),
path('menu/add/', views.add_menu),
re_path('menu/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_menu),
re_path('menu/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_menu),
re_path('menu/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_state),
re_path('menu/lvl-up/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_lvl_up),
re_path('menu/lvl-down/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_lvl_down),
re_path('delete_gal_img/(?P<pk>[a-zA-Z0-9_-]+)/(?P<pid>[a-zA-Z0-9_-]+)',
views.delete_gal_image),
re_path('delete_page_imgs/(?P<pk>[a-zA-Z0-9_-]+)/(?P<pid>[a-zA-Z0-9_-]+)',
views.delete_page_images),
path('page/add/', views.add_page),
re_path('page/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_page),
re_path('page/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_page),
path("ajax/photos/upload/", views.upload_photos, name="upload_photos"),
path("ajax/photos/recent/", views.recent_photos, name="recent_photos"),
path('change_password/', views.change_password),
# path("tags/list/", views.tags_list),
# re_path('tag/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_tag),
]
| MicroPyramid/ngo-cms | admin/urls.py | urls.py | py | 2,514 | python | en | code | 8 | github-code | 36 |
9993622660 | import json
from django.core.management import call_command
from django.core.management.base import BaseCommand
from people.models import Person, Address
class Command(BaseCommand):
help = 'Loads sample data into the database'
def handle(self, *args, **options):
# Clear the database
call_command('flush', '--noinput')
with open('sample_data.json') as f:
people_data = json.load(f)
for person_data in people_data:
address_data = person_data.pop('address')
address = Address.objects.create(**address_data)
Person.objects.create(address=address, **person_data)
self.stdout.write(self.style.SUCCESS('Successfully loaded sample data'))
| finlay422/challenge_project | people/management/commands/load_sample_data.py | load_sample_data.py | py | 734 | python | en | code | 0 | github-code | 36 |
37635088720 | # Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n.
# Example 1:
# Input: n = 13
# Output: 6
# Example 2:
# Input: n = 0
# Output: 0
# Constraints:
# 0 <= n <= 2 * 109
class Solution:
def countDigitOne(self, n: int) -> int:
strn = str(n)
lenn = len(strn)
res = 0
for i,c in enumerate(strn):
#print(i,c)
res += int(c) * (lenn - 1) * 10 ** (lenn - 2) #ๆฏๅฆ567 ็ฌฌไธไฝ5 lennไธบ3๏ผ 0-99ๆ(lenn-1)*10**(lenn-2)ไธช
#็ฐๅจๆฏ5๏ผๆ5ไธช 0-99(0-99 100-199 200-299 300-399 400-499)ๆไปฅๅ้ขไนไปฅint(c)
if c>'1':
res += 10**(lenn-1) #ๅฆๆๅคงไบ1๏ผ ๆฏๅฆ่ฟๆฏ5็ๆถๅ๏ผ ๅ100-199 ็พไฝไธ็1๏ผไธๅ
ฑ100ไธช๏ผๅฐฑๆฏ 10**(lenn-1)
elif c=='1': #ๅฆๆ็ญไบ1ๆฏๅฆ167,้ฃไนๅฐฑไธๆฏๆๆ100-199็พไฝไธ็๏ผ่ๆฏ100-167 ไธๅ
ฑ68 ไธช
res += int(strn[i+1:])+1 if i<len(strn)-1 else 1
lenn -= 1
return int(res)
# 0 -9 1
# 0 -99 20
# 0 -999 300
# 0 -9999 4000 | sunnyyeti/Leetcode-solutions | 233 Number of Digit One.py | 233 Number of Digit One.py | py | 1,146 | python | zh | code | 0 | github-code | 36 |
35648526405 | from src.knowledge_graph import KGEntity, KGProperty
from .kgqa_dataset import KGQADataSet
from .kgqa_data import KGQAData
from typing import List
import logging
import json
class Mintaka(KGQADataSet):
def load(self, path: str) -> List[KGQAData]:
datasets: List[KGQAData] = []
with open(path, encoding='utf-8') as f:
json_dict = json.load(f)
for mintaka_data in json_dict:
question_id = mintaka_data["id"]
raw_question = mintaka_data["question"]
answer_data = mintaka_data["answer"]["answer"]
answers = []
if answer_data:
for answer in answer_data:
if type(answer) is dict:
answers.append(KGEntity(answer["label"]["en"]))
elif type(answer) is bool:
# add additional answers yes/no for true/false question
if answer == True:
answers.append(KGEntity("True"))
answers.append(KGEntity("Yes"))
elif answer == False:
answers.append(KGEntity("False"))
answers.append(KGEntity("No"))
else:
print("Invalid boolean value")
continue
else:
answers.append(KGEntity(str(answer)))
else:
continue
datasets.append(KGQAData(question_id, raw_question, answers))
logging.info(f"number of parsed questions: {len(datasets)}")
return datasets | bumsikki/KAPPR | src/dataset/mintaka.py | mintaka.py | py | 1,837 | python | en | code | null | github-code | 36 |
22565135811 | #coding:utf-8
from weixin import WXAPPAPI
api = WXAPPAPI(appid=APP_ID,
app_secret=APP_SECRET)
session_info = api.exchange_code_for_session_key(code=code)
# ่ทๅsession_info ๅ
session_key = session_info.get('session_key')
crypt = WXBizDataCrypt(WXAPP_APPID, session_key)
# encrypted_data ๅ
ๆฌๆๆๆฐๆฎๅจๅ
็ๅฎๆด็จๆทไฟกๆฏ็ๅ ๅฏๆฐๆฎ
# iv ๅ ๅฏ็ฎๆณ็ๅๅงๅ้
# ่ฟไธคไธชๅๆฐ้่ฆjs่ทๅ
user_info = crypt.decrypt(encrypted_data, iv)
| sun5411/myPython | python-weixin-master/my_test.py | my_test.py | py | 485 | python | zh | code | 0 | github-code | 36 |
7182820045 | #!/usr/bin/env python3
"""A denselayer dense block in tensorflow keras"""
import tensorflow.keras as K
def dense_block(X, nb_filters, growth_rate, layers):
"""A dense block, X is the previous layer, nb_filters is the number of
filters to use, growth rate is the rate to change the number of filters
by, and layers is how many layers"""
initializer = K.initializers.he_normal()
filter_total = 0
for layer in range(0, layers):
batch_normalization = K.layers.BatchNormalization()(X)
activation = K.layers.Activation("relu")(batch_normalization)
conv2d = K.layers.Conv2D(filters=growth_rate * 4,
kernel_size=(1, 1),
padding="same",
kernel_initializer=initializer)(activation)
batch_normalization1 = K.layers.BatchNormalization()(conv2d)
activation1 = K.layers.Activation("relu")(batch_normalization1)
conv2d1 = K.layers.Conv2D(filters=growth_rate,
kernel_size=(3, 3),
padding="same",
kernel_initializer=initializer)(activation1)
concatenate = K.layers.concatenate([X, conv2d1])
X = concatenate
filter_total += growth_rate
return X, filter_total + nb_filters
| JohnCook17/holbertonschool-machine_learning | supervised_learning/0x08-deep_cnns/5-dense_block.py | 5-dense_block.py | py | 1,357 | python | en | code | 3 | github-code | 36 |
27040895007 | import argparse
import auxil.mydata as mydata
import auxil.mymetrics as mymetrics
import gc
import tensorflow as tf
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.losses import categorical_crossentropy
from keras.layers import *
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import regularizers
from keras.models import Model
from keras.utils import to_categorical as keras_to_categorical
import numpy as np
import sys
class AttentionBlock(Layer):
def __init__(self, filters):
super(AttentionBlock, self).__init__()
self.filters = filters
#self.init = RandomNormal()
def call(self, x):
conv_3d = Conv3D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(x)
conv_3d_shape = conv_3d._keras_shape
print(conv_3d_shape)
conv_3d = Reshape((conv_3d_shape[1], conv_3d_shape[2], conv_3d_shape[3]*conv_3d_shape[4]))(conv_3d)
conv_2d = Conv2D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(conv_3d)
conv_2d_shape = conv_2d._keras_shape
print(conv_2d_shape)
conv_2d = Reshape((conv_2d_shape[1],conv_2d_shape[2]*conv_2d_shape[3]))(conv_2d)
conv_1d = Conv1D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(conv_2d)
conv_1d_shape = conv_1d._keras_shape
print(conv_1d_shape)
gap = GlobalAveragePooling1D()(conv_1d)
fc = Dense(self.filters, use_bias = True)(gap)
softmax = Activation('softmax')(fc)
reshape_1d = Reshape((1, self.filters))(softmax)
deconv_1d = Conv1D(filters = self.filters, kernel_size = 3, strides = 1, padding = 'same')(reshape_1d)
reshape_2d = Reshape((1,1, self.filters))(deconv_1d)
deconv_2d = Conv2DTranspose(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(reshape_2d)
reshape_3d = Reshape((1,1,1, self.filters))(deconv_2d)
deconv_3d = Conv3DTranspose(filters = self.filters, kernel_size = 3, strides = 1, padding = 'same')(reshape_3d)
x = tf.multiply(deconv_3d, x)
return x
def set_params(args):
args.batch_size = 64
args.epochs = 200
return args
def get_model_compiled(shapeinput, num_class, w_decay=0):
inputs = Input((shapeinput[0],shapeinput[1],shapeinput[2],1))
filters = [4,4,4,8]
x = Conv3D(filters=4,use_bias=False,kernel_size=(3,3,5), padding = 'valid',strides = 1)(inputs)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
for i in range(4):
x = Conv3D(filters=filters[i],use_bias=False, kernel_size=(3,3,5),padding = 'valid',strides = 1)(x)
a1 = AttentionBlock(filters[i])(x)
#a1 = LeakyReLU()(a1)
b1 = AttentionBlock(filters[i])(x)
#b1 = LeakyReLU()(b1)
x = Add()([a1,b1])
x = Dropout(0.4)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.85)(x)
x = Flatten()(x)
x = Dropout(0.85)(x)
x = Dense(units=128, use_bias=True)(x)
x = LeakyReLU()(x)
x = Dense(units=64, use_bias=True)(x)
x = LeakyReLU()(x)
output_layer = Dense(units=num_class, activation='softmax')(x)
clf = Model(inputs=inputs, outputs=output_layer)
clf.compile(loss='categorical_crossentropy',
optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
return clf
def main():
parser = argparse.ArgumentParser(description='Algorithms traditional ML')
parser.add_argument('--dataset', type=str, required=True,
choices=["IP", "UP", "SV", "UH",
"DIP", "DUP", "DIPr", "DUPr"],
help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
parser.add_argument('--components', default=None,
type=int, help='dimensionality reduction')
parser.add_argument('--spatialsize', default=9,
type=int, help='windows size')
parser.add_argument('--wdecay', default=0.02, type=float,
help='apply penalties on layer parameters')
parser.add_argument('--preprocess', default="standard",
type=str, help='Preprocessing')
parser.add_argument('--splitmethod', default="sklearn",
type=str, help='Method for split datasets')
parser.add_argument('--random_state', default=42, type=int,
help='The seed of the pseudo random number generator to use when shuffling the data')
parser.add_argument('--tr_percent', default=0.1,
type=float, help='samples of train set')
parser.add_argument('--use_val', action='store_true',
help='Use validation set')
parser.add_argument('--val_percent', default=0.1,
type=float, help='samples of val set')
parser.add_argument(
'--verbosetrain', action='store_true', help='Verbose train')
#########################################
parser.add_argument('--set_parameters', action='store_false',
help='Set some optimal parameters')
############## CHANGE PARAMS ############
parser.add_argument('--batch_size', default=64, type=int,
help='Number of training examples in one forward/backward pass.')
parser.add_argument('--epochs', default=100, type=int,
help='Number of full training cycle on the training set')
#########################################
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
if args.set_parameters:
args = set_params(args)
pixels, labels, num_class = \
mydata.loadData(args.dataset, num_components=args.components,
preprocessing=args.preprocess)
pixels, labels = mydata.createImageCubes(
pixels, labels, windowSize=args.spatialsize, removeZeroLabels=False)
stats = np.ones((args.repeat, num_class+3)) * -1000.0 # OA, AA, K, Aclass
for pos in range(args.repeat):
rstate = args.random_state+pos if args.random_state != None else None
if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
x_train, x_test, y_train, y_test = \
mydata.load_split_data_fix(
args.dataset, pixels) # , rand_state=args.random_state+pos)
else:
pixels = pixels[labels != 0]
labels = labels[labels != 0] - 1
x_train, x_test, y_train, y_test = \
mydata.split_data(
pixels, labels, args.tr_percent, rand_state=rstate)
if args.use_val:
x_val, x_test, y_val, y_test = \
mydata.split_data(
x_test, y_test, args.val_percent, rand_state=rstate)
inputshape = x_train.shape[1:]
clf = get_model_compiled(inputshape, num_class, w_decay=args.wdecay)
valdata = (x_val, keras_to_categorical(y_val, num_class)) if args.use_val else (
x_test, keras_to_categorical(y_test, num_class))
clf.fit(x_train, keras_to_categorical(y_train, num_class),
batch_size=args.batch_size,
epochs=args.epochs,
verbose=args.verbosetrain,
validation_data=valdata,
callbacks=[ModelCheckpoint("/tmp/best_model.h5", monitor='val_accuracy', verbose=0, save_best_only=True)])
clf.load_weights("/tmp/best_model.h5")
clf.compile(loss='categorical_crossentropy',
optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
print("PARAMETERS", clf.count_params())
stats[pos, :] = mymetrics.reports(
np.argmax(clf.predict(x_test), axis=1), y_test)[2]
print(args.dataset, list(stats[-1]))
if __name__ == '__main__':
main()
| deeplearning2020/comparison | algorithms/proposed.py | proposed.py | py | 7,971 | python | en | code | 0 | github-code | 36 |
8445188718 |
import operator
import cupy
from cupy._core import internal
from cupy._core._scalar import get_typename
from cupyx.scipy.sparse import csr_matrix
import numpy as np
TYPES = ['double', 'thrust::complex<double>']
INT_TYPES = ['int', 'long long']
INTERVAL_KERNEL = r'''
#include <cupy/complex.cuh>
extern "C" {
__global__ void find_interval(
const double* t, const double* x, long long* out,
int k, int n, bool extrapolate, int total_x) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= total_x) {
return;
}
double xp = *&x[idx];
double tb = *&t[k];
double te = *&t[n];
if(isnan(xp)) {
out[idx] = -1;
return;
}
if((xp < tb || xp > te) && !extrapolate) {
out[idx] = -1;
return;
}
int left = k;
int right = n;
int mid;
bool found = false;
while(left < right && !found) {
mid = ((right + left) / 2);
if(xp > *&t[mid]) {
left = mid + 1;
} else if (xp < *&t[mid]) {
right = mid - 1;
} else {
found = true;
}
}
int default_value = left - 1 < k ? k : left - 1;
int result = found ? mid + 1 : default_value + 1;
while(xp >= *&t[result] && result != n) {
result++;
}
out[idx] = result - 1;
}
}
'''
INTERVAL_MODULE = cupy.RawModule(
code=INTERVAL_KERNEL, options=('-std=c++11',),)
# name_expressions=[f'find_interval<{type_name}>' for type_name in TYPES])
D_BOOR_KERNEL = r'''
#include <cupy/complex.cuh>
#include <cupy/math_constants.h>
#define COMPUTE_LINEAR 0x1
template<typename T>
__global__ void d_boor(
const double* t, const T* c, const int k, const int mu,
const double* x, const long long* intervals, T* out,
double* temp, int num_c, int mode, int num_x) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= num_x) {
return;
}
double xp = *&x[idx];
long long interval = *&intervals[idx];
double* h = temp + idx * (2 * k + 1);
double* hh = h + k + 1;
int ind, j, n;
double xa, xb, w;
if(mode == COMPUTE_LINEAR && interval < 0) {
for(j = 0; j < num_c; j++) {
out[num_c * idx + j] = CUDART_NAN;
}
return;
}
/*
* Perform k-m "standard" deBoor iterations
* so that h contains the k+1 non-zero values of beta_{ell,k-m}(x)
* needed to calculate the remaining derivatives.
*/
h[0] = 1.0;
for (j = 1; j <= k - mu; j++) {
for(int p = 0; p < j; p++) {
hh[p] = h[p];
}
h[0] = 0.0;
for (n = 1; n <= j; n++) {
ind = interval + n;
xb = t[ind];
xa = t[ind - j];
if (xb == xa) {
h[n] = 0.0;
continue;
}
w = hh[n - 1]/(xb - xa);
h[n - 1] += w*(xb - xp);
h[n] = w*(xp - xa);
}
}
/*
* Now do m "derivative" recursions
* to convert the values of beta into the mth derivative
*/
for (j = k - mu + 1; j <= k; j++) {
for(int p = 0; p < j; p++) {
hh[p] = h[p];
}
h[0] = 0.0;
for (n = 1; n <= j; n++) {
ind = interval + n;
xb = t[ind];
xa = t[ind - j];
if (xb == xa) {
h[mu] = 0.0;
continue;
}
w = ((double) j) * hh[n - 1]/(xb - xa);
h[n - 1] -= w;
h[n] = w;
}
}
if(mode != COMPUTE_LINEAR) {
return;
}
// Compute linear combinations
for(j = 0; j < num_c; j++) {
out[num_c * idx + j] = 0;
for(n = 0; n < k + 1; n++) {
out[num_c * idx + j] = (
out[num_c * idx + j] +
c[(interval + n - k) * num_c + j] * ((T) h[n]));
}
}
}
'''
D_BOOR_MODULE = cupy.RawModule(
code=D_BOOR_KERNEL, options=('-std=c++11',),
name_expressions=[f'd_boor<{type_name}>' for type_name in TYPES])
DESIGN_MAT_KERNEL = r'''
#include <cupy/complex.cuh>
template<typename U>
__global__ void compute_design_matrix(
const int k, const long long* intervals, double* bspline_basis,
double* data, U* indices, int num_intervals) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= num_intervals) {
return;
}
long long interval = *&intervals[idx];
double* work = bspline_basis + idx * (2 * k + 1);
for(int j = 0; j <= k; j++) {
int m = (k + 1) * idx + j;
data[m] = work[j];
indices[m] = (U) (interval - k + j);
}
}
'''
DESIGN_MAT_MODULE = cupy.RawModule(
code=DESIGN_MAT_KERNEL, options=('-std=c++11',),
name_expressions=[f'compute_design_matrix<{itype}>'
for itype in INT_TYPES])
def _get_module_func(module, func_name, *template_args):
def _get_typename(dtype):
typename = get_typename(dtype)
if dtype.kind == 'c':
typename = 'thrust::' + typename
return typename
args_dtypes = [_get_typename(arg.dtype) for arg in template_args]
template = ', '.join(args_dtypes)
kernel_name = f'{func_name}<{template}>' if template_args else func_name
kernel = module.get_function(kernel_name)
return kernel
def _get_dtype(dtype):
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
if cupy.issubdtype(dtype, cupy.complexfloating):
return cupy.complex_
else:
return cupy.float_
def _as_float_array(x, check_finite=False):
"""Convert the input into a C contiguous float array.
NB: Upcasts half- and single-precision floats to double precision.
"""
x = cupy.ascontiguousarray(x)
dtyp = _get_dtype(x.dtype)
x = x.astype(dtyp, copy=False)
if check_finite and not cupy.isfinite(x).all():
raise ValueError("Array must not contain infs or nans.")
return x
def _evaluate_spline(t, c, k, xp, nu, extrapolate, out):
"""
Evaluate a spline in the B-spline basis.
Parameters
----------
t : ndarray, shape (n+k+1)
knots
c : ndarray, shape (n, m)
B-spline coefficients
xp : ndarray, shape (s,)
Points to evaluate the spline at.
nu : int
Order of derivative to evaluate.
extrapolate : int, optional
Whether to extrapolate to ouf-of-bounds points, or to return NaNs.
out : ndarray, shape (s, m)
Computed values of the spline at each of the input points.
This argument is modified in-place.
"""
n = t.shape[0] - k - 1
intervals = cupy.empty_like(xp, dtype=cupy.int64)
# Compute intervals for each value
interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval')
interval_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,),
(t, xp, intervals, k, n, extrapolate, xp.shape[0]))
# Compute interpolation
num_c = int(np.prod(c.shape[1:]))
temp = cupy.empty(xp.shape[0] * (2 * k + 1))
d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', c)
d_boor_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,),
(t, c, k, nu, xp, intervals, out, temp, num_c, 1,
xp.shape[0]))
def _make_design_matrix(x, t, k, extrapolate, indices):
"""
Returns a design matrix in CSR format.
Note that only indices is passed, but not indptr because indptr is already
precomputed in the calling Python function design_matrix.
Parameters
----------
x : array_like, shape (n,)
Points to evaluate the spline at.
t : array_like, shape (nt,)
Sorted 1D array of knots.
k : int
B-spline degree.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points.
indices : ndarray, shape (n * (k + 1),)
Preallocated indices of the final CSR array.
Returns
-------
data
The data array of a CSR array of the b-spline design matrix.
In each row all the basis elements are evaluated at the certain point
(first row - x[0], ..., last row - x[-1]).
indices
The indices array of a CSR array of the b-spline design matrix.
"""
n = t.shape[0] - k - 1
intervals = cupy.empty_like(x, dtype=cupy.int64)
# Compute intervals for each value
interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval')
interval_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(t, x, intervals, k, n, extrapolate, x.shape[0]))
# Compute interpolation
bspline_basis = cupy.empty(x.shape[0] * (2 * k + 1))
d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', x)
d_boor_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(t, None, k, 0, x, intervals, None, bspline_basis, 0, 0,
x.shape[0]))
data = cupy.zeros(x.shape[0] * (k + 1), dtype=cupy.float_)
design_mat_kernel = _get_module_func(
DESIGN_MAT_MODULE, 'compute_design_matrix', indices)
design_mat_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(k, intervals, bspline_basis, data, indices,
x.shape[0]))
return data, indices
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. seealso:: :class:`scipy.interpolate.splder`
See Also
--------
splantider, splev, spalde
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = cupy.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError as e:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n) from e
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. seealso:: :class:`scipy.interpolate.splantider`
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = cupy.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = cupy.r_[cupy.zeros((1,) + c.shape[1:]),
c, [c[-1]] * (k+2)]
# New knots
t = cupy.r_[t[0], t, t[-1]]
k += 1
return t, c, k
class BSpline:
r"""Univariate spline in the B-spline basis.
.. math::
S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
and knots `t`.
Parameters
----------
t : ndarray, shape (n+k+1,)
knots
c : ndarray, shape (>=n, ...)
spline coefficients
k : int
B-spline degree
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
or to return nans.
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
If 'periodic', periodic extrapolation is used.
Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
t : ndarray
knot vector
c : ndarray
spline coefficients
k : int
spline degree
extrapolate : bool
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
axis : int
Interpolation axis.
tck : tuple
A read-only equivalent of ``(self.t, self.c, self.k)``
Notes
-----
B-spline basis elements are defined via
.. math::
B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
**Implementation details**
- At least ``k+1`` coefficients are required for a spline of degree `k`,
so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
``j > n``, are ignored.
- B-spline basis elements of degree `k` form a partition of unity on the
*base interval*, ``t[k] <= x <= t[n]``.
- Based on [1]_ and [2]_
.. seealso:: :class:`scipy.interpolate.BSpline`
References
----------
.. [1] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
.. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
"""
def __init__(self, t, c, k, extrapolate=True, axis=0):
self.k = operator.index(k)
self.c = cupy.asarray(c)
self.t = cupy.ascontiguousarray(t, dtype=cupy.float64)
if extrapolate == 'periodic':
self.extrapolate = extrapolate
else:
self.extrapolate = bool(extrapolate)
n = self.t.shape[0] - self.k - 1
axis = internal._normalize_axis_index(axis, self.c.ndim)
# Note that the normalized axis is stored in the object.
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (n, ...),
# and axis !=0 means that we have c.shape (..., n, ...)
# ^
# axis
self.c = cupy.moveaxis(self.c, axis, 0)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if self.t.ndim != 1:
raise ValueError("Knot vector must be one-dimensional.")
if n < self.k + 1:
raise ValueError("Need at least %d knots for degree %d" %
(2*k + 2, k))
if (cupy.diff(self.t) < 0).any():
raise ValueError("Knots must be in a non-decreasing order.")
if len(cupy.unique(self.t[k:n+1])) < 2:
raise ValueError("Need at least two internal knots.")
if not cupy.isfinite(self.t).all():
raise ValueError("Knots should not have nans or infs.")
if self.c.ndim < 1:
raise ValueError("Coefficients must be at least 1-dimensional.")
if self.c.shape[0] < n:
raise ValueError(
"Knots, coefficients and degree are inconsistent.")
dt = _get_dtype(self.c.dtype)
self.c = cupy.ascontiguousarray(self.c, dtype=dt)
@classmethod
def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
"""Construct a spline without making checks.
Accepts same parameters as the regular constructor. Input arrays
`t` and `c` must of correct shape and dtype.
"""
self = object.__new__(cls)
self.t, self.c, self.k = t, c, k
self.extrapolate = extrapolate
self.axis = axis
return self
@property
def tck(self):
"""Equivalent to ``(self.t, self.c, self.k)`` (read-only).
"""
return self.t, self.c, self.k
@classmethod
def basis_element(cls, t, extrapolate=True):
"""Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
Parameters
----------
t : ndarray, shape (k+2,)
internal knots
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[0] .. t[k+1]``, or to return nans.
If 'periodic', periodic extrapolation is used.
Default is True.
Returns
-------
basis_element : callable
A callable representing a B-spline basis element for the knot
vector `t`.
Notes
-----
The degree of the B-spline, `k`, is inferred from the length of `t` as
``len(t)-2``. The knot vector is constructed by appending and
prepending ``k+1`` elements to internal knots `t`.
.. seealso:: :class:`scipy.interpolate.BSpline`
"""
k = len(t) - 2
t = _as_float_array(t)
t = cupy.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
c = cupy.zeros_like(t)
c[k] = 1.
return cls.construct_fast(t, c, k, extrapolate)
@classmethod
def design_matrix(cls, x, t, k, extrapolate=False):
"""
Returns a design matrix as a CSR format sparse array.
Parameters
----------
x : array_like, shape (n,)
Points to evaluate the spline at.
t : array_like, shape (nt,)
Sorted 1D array of knots.
k : int
B-spline degree.
extrapolate : bool or 'periodic', optional
Whether to extrapolate based on the first and last intervals
or raise an error. If 'periodic', periodic extrapolation is used.
Default is False.
Returns
-------
design_matrix : `csr_matrix` object
Sparse matrix in CSR format where each row contains all the basis
elements of the input row (first row = basis elements of x[0],
..., last row = basis elements x[-1]).
Notes
-----
In each row of the design matrix all the basis elements are evaluated
at the certain point (first row - x[0], ..., last row - x[-1]).
`nt` is a length of the vector of knots: as far as there are
`nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
to have at least `k + 1` basis element.
Out of bounds `x` raises a ValueError.
.. note::
This method returns a `csr_matrix` instance as CuPy still does not
have `csr_array`.
.. seealso:: :class:`scipy.interpolate.BSpline`
"""
x = _as_float_array(x, True)
t = _as_float_array(t, True)
if extrapolate != 'periodic':
extrapolate = bool(extrapolate)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
f"got t={t}.")
# There are `nt - k - 1` basis elements in a BSpline built on the
# vector of knots with length `nt`, so to have at least `k + 1` basis
# elements we need to have at least `2 * k + 2` elements in the vector
# of knots.
if len(t) < 2 * k + 2:
raise ValueError(f"Length t is not enough for k={k}.")
if extrapolate == 'periodic':
# With periodic extrapolation we map x to the segment
# [t[k], t[n]].
n = t.size - k - 1
x = t[k] + (x - t[k]) % (t[n] - t[k])
extrapolate = False
elif not extrapolate and (
(min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
):
# Checks from `find_interval` function
raise ValueError(f'Out of bounds w/ x = {x}.')
# Compute number of non-zeros of final CSR array in order to determine
# the dtype of indices and indptr of the CSR array.
n = x.shape[0]
nnz = n * (k + 1)
if nnz < cupy.iinfo(cupy.int32).max:
int_dtype = cupy.int32
else:
int_dtype = cupy.int64
# Preallocate indptr and indices
indices = cupy.empty(n * (k + 1), dtype=int_dtype)
indptr = cupy.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
# indptr is not passed to CUDA as it is already fully computed
data, indices = _make_design_matrix(
x, t, k, extrapolate, indices
)
return csr_matrix(
(data, indices, indptr),
shape=(x.shape[0], t.shape[0] - k - 1)
)
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate a spline function.
Parameters
----------
x : array_like
points to evaluate the spline at.
nu : int, optional
derivative to evaluate (default is 0).
extrapolate : bool or 'periodic', optional
whether to extrapolate based on the first and last intervals
or return nans. If 'periodic', periodic extrapolation is used.
Default is `self.extrapolate`.
Returns
-------
y : array_like
Shape is determined by replacing the interpolation axis
in the coefficient array with the shape of `x`.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = cupy.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = cupy.ascontiguousarray(cupy.ravel(x), dtype=cupy.float_)
# With periodic extrapolation we map x to the segment
# [self.t[k], self.t[n]].
if extrapolate == 'periodic':
n = self.t.size - self.k - 1
x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
self.t[self.k])
extrapolate = False
out = cupy.empty(
(len(x), int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype)
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[1:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
dim_order = list(range(out.ndim))
dim_order = (
dim_order[x_ndim:x_ndim+self.axis] +
dim_order[:x_ndim] +
dim_order[x_ndim+self.axis:])
out = out.transpose(dim_order)
return out
def _ensure_c_contiguous(self):
if not self.t.flags.c_contiguous:
self.t = self.t.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def _evaluate(self, xp, nu, extrapolate, out):
_evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
self.k, xp, nu, extrapolate, out)
def derivative(self, nu=1):
"""
Return a B-spline representing the derivative.
Parameters
----------
nu : int, optional
Derivative order.
Default is 1.
Returns
-------
b : BSpline object
A new instance representing the derivative.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
tck = splder((self.t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def antiderivative(self, nu=1):
"""
Return a B-spline representing the antiderivative.
Parameters
----------
nu : int, optional
Antiderivative order. Default is 1.
Returns
-------
b : BSpline object
A new instance representing the antiderivative.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
tck = splantider((self.t, c, self.k), nu)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(*tck, extrapolate=extrapolate,
axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral of the spline.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
base interval. If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
I : array_like
Definite integral of the spline over the interval ``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Prepare self.t and self.c.
self._ensure_c_contiguous()
# Swap integration bounds if needed.
sign = 1
if b < a:
a, b = b, a
sign = -1
n = self.t.size - self.k - 1
if extrapolate != "periodic" and not extrapolate:
# Shrink the integration interval, if needed.
a = max(a, self.t[self.k].item())
b = min(b, self.t[n].item())
# if self.c.ndim == 1:
# # Fast path: use FITPACK's routine
# # (cf _fitpack_impl.splint).
# integral = splint(a, b, self.tck)
# return integral * sign
out = cupy.empty(
(2, int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype)
# Compute the antiderivative.
c = self.c
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
ta, ca, ka = splantider((self.t, c, self.k), 1)
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
ts, te = self.t[self.k], self.t[n]
period = te - ts
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
# Evaluate the difference of antiderivatives.
x = cupy.asarray([ts, te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral = out[1] - out[0]
integral *= n_periods
else:
integral = cupy.zeros((1, int(np.prod(self.c.shape[1:]))),
dtype=self.c.dtype)
# Map a to [ts, te], b is always a + left.
a = ts + (a - ts) % period
b = a + left
# If b <= te then we need to integrate over [a, b], otherwise
# over [a, te] and from xs to what is remained.
if b <= te:
x = cupy.asarray([a, b], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
x = cupy.asarray([a, te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
x = cupy.asarray([ts, ts + b - te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
# Evaluate the difference of antiderivatives.
x = cupy.asarray([a, b], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, extrapolate, out)
integral = out[1] - out[0]
integral *= sign
return integral.reshape(ca.shape[1:])
| cupy/cupy | cupyx/scipy/interpolate/_bspline.py | _bspline.py | py | 29,962 | python | en | code | 7,341 | github-code | 36 |
23108069207 | from flask import Flask, app
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
def create_app():
application = Flask(__name__)
application.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(application)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(application)
from .models import User
# blueprint for auth routes in our app
from . import auth
application.register_blueprint(auth.bp)
@login_manager.user_loader
def load_user(id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(id)
return application | peastuti/sb-admin-2-flask-login | project/__init__.py | __init__.py | py | 946 | python | en | code | 1 | github-code | 36 |
13962486159 | from django.db import IntegrityError
from django.utils.timezone import make_aware
from datetime import datetime
import logging
from .utils import get_extras
class DatabaseHandler(logging.Handler):
"""
A log handler to store logs into the database.
Currently, only log entries that belong to an event are stored in the database.
All other log entries are available in the log files / via syslog.
"""
def __init__(self, *args, **kwargs):
self._logentry_model = None
super(DatabaseHandler, self).__init__(*args, **kwargs)
def emit(self, record):
# the handler is initialized by django before the database setup, so the import would fail
# therefore, we do it here dynamically when necessary - but only once
if not self._logentry_model:
from .models import LogEntry
self._logentry_model = LogEntry
# get the event, helper and user if they are stored in the entry
event = record.event if hasattr(record, "event") else None
if not event:
return
helper = record.helper if hasattr(record, "helper") else None
user = record.user if hasattr(record, "user") else None
# create the entry
entry = self._logentry_model(
timestamp=make_aware(datetime.fromtimestamp(record.created)),
level=record.levelname,
message=record.getMessage(),
event=event,
helper=helper,
user=user,
extras=get_extras(record),
module=record.name,
)
try:
entry.save()
except ValueError:
# if the event is deleted, we cannot save. we only store logs for existing events,
# so we can discard this event (deletions are still logged via syslog / in files if container is used)
pass
except IntegrityError:
# if a helper is deleted, the helper object is still there while we prepare the entry.
# on save, the helper may already be deleted, so we have a foreign key error.
entry.helper = None
entry.save()
| helfertool/helfertool | src/toollog/handlers.py | handlers.py | py | 2,156 | python | en | code | 52 | github-code | 36 |
1859807791 | """
Support module for PyWikipediaBot regression tests.
"""
__version__ = '$Id: 7895f03ac2688d7155e5e94da60e51af65ee9b11 $'
import sys
# Add current directory and parent directory to module search path.
sys.path.insert(0, '..')
sys.path.insert(0, '.')
del sys
| SirComputer1/SCBot | tests/test_utils.py | test_utils.py | py | 263 | python | en | code | 1 | github-code | 36 |
22331304969 | from rubrix.server.apis.v0.models.commons.model import BulkResponse
from rubrix.server.apis.v0.models.text2text import (
Text2TextBulkRequest,
Text2TextRecordInputs,
Text2TextSearchResults,
)
def test_search_records(mocked_client):
dataset = "test_search_records"
delete_dataset(dataset, mocked_client)
records = [
Text2TextRecordInputs.parse_obj(data)
for data in [
{
"id": 0,
"text": "This is a text data",
"metadata": {
"field_one": "value one",
},
"prediction": {
"agent": "test",
"sentences": [{"text": "This is a test data", "score": 0.6}],
},
},
{
"id": 1,
"text": "ร
nother data",
},
]
]
response = mocked_client.post(
f"/api/datasets/{dataset}/Text2Text:bulk",
json=Text2TextBulkRequest(
tags={"env": "test", "class": "text classification"},
metadata={"config": {"the": "config"}},
records=records,
).dict(by_alias=True),
)
assert response.status_code == 200, response.json()
bulk_response = BulkResponse.parse_obj(response.json())
assert bulk_response.dataset == dataset
assert bulk_response.failed == 0
assert bulk_response.processed == 2
response = mocked_client.post(f"/api/datasets/{dataset}/Text2Text:search", json={})
assert response.status_code == 200, response.json()
results = Text2TextSearchResults.parse_obj(response.json())
assert results.total == 2
assert results.records[0].predicted is None
assert results.aggregations.dict(exclude={"score"}) == {
"annotated_as": {},
"annotated_by": {},
"annotated_text": {},
"metadata": {"field_one": {"value one": 1}},
"predicted": {},
"predicted_as": {},
"predicted_by": {"test": 1},
"predicted_text": {},
"status": {"Default": 2},
"words": {"data": 2, "รฅnother": 1},
}
def test_api_with_new_predictions_data_model(mocked_client):
dataset = "test_api_with_new_predictions_data_model"
delete_dataset(dataset, mocked_client)
records = [
Text2TextRecordInputs.parse_obj(
{
"text": "This is a text data",
"predictions": {
"test": {
"sentences": [{"text": "This is a test data", "score": 0.6}]
},
},
}
),
Text2TextRecordInputs.parse_obj(
{
"text": "Another data",
"annotations": {
"annotator-1": {"sentences": [{"text": "THis is a test data"}]},
"annotator-2": {"sentences": [{"text": "This IS the test datay"}]},
},
}
),
]
response = mocked_client.post(
f"/api/datasets/{dataset}/Text2Text:bulk",
json=Text2TextBulkRequest(
records=records,
).dict(by_alias=True),
)
assert response.status_code == 200, response.json()
bulk_response = BulkResponse.parse_obj(response.json())
assert bulk_response.dataset == dataset
assert bulk_response.failed == 0
assert bulk_response.processed == 2
response = mocked_client.post(
f"/api/datasets/{dataset}/Text2Text:search",
json={"query": {"query_text": "predictions.test.sentences.text.exact:data"}},
)
assert response.status_code == 200, response.json()
results = Text2TextSearchResults.parse_obj(response.json())
assert results.total == 1, results
response = mocked_client.post(
f"/api/datasets/{dataset}/Text2Text:search",
json={"query": {"query_text": "_exists_:annotations.annotator-1"}},
)
assert response.status_code == 200, response.json()
results = Text2TextSearchResults.parse_obj(response.json())
assert results.total == 1, results
def delete_dataset(dataset, mocked_client):
assert mocked_client.delete(f"/api/datasets/{dataset}").status_code == 200
| Skumarh89/rubrix | tests/server/text2text/test_api.py | test_api.py | py | 4,194 | python | en | code | null | github-code | 36 |
2490959644 | import IECore
import IECoreScene
import Gaffer
import GafferScene
import GafferImage
# Add standard cycles AOVs
with IECore.IgnoredExceptions( ImportError ) :
# If cycles isn't available for any reason, this will fail
# and we won't add any unnecessary output definitions.
import GafferCycles
lightPasses = [
"emission",
"background",
"ao",
"shadow",
"diffuse_direct",
"diffuse_indirect",
"glossy_direct",
"glossy_indirect",
"transmission",
"transmission_direct",
"transmission_indirect",
"volume_direct",
"volume_indirect",
"lightgroup",
]
dataPasses = [
"depth",
"position",
"normal",
"roughness",
"uv",
"object_id",
"material_id",
"motion",
"motion_weight",
"render_time",
"cryptomatte_asset",
"cryptomatte_object",
"cryptomatte_material",
"aov_color",
"aov_value",
"adaptive_aux_buffer",
"sample_count",
"diffuse_color",
"glossy_color",
"transmission_color",
"mist",
"denoising_normal",
"denoising_albedo",
"shadow_catcher",
"shadow_catcher_sample_count",
"shadow_catcher_matte",
"bake_primitive",
"bake_differential",
]
def __registerOutputs( aovs, halfFloat = False, denoise = False ) :
for aov in aovs :
label = aov.replace( "_", " " ).title().replace( " ", "_" )
data = aov
interactiveOutput = {
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : "${image:catalogue:port}",
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
}
batchOutput = {
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"halfFloat" : halfFloat
}
if data == "lightgroup":
if not GafferCycles.withLightGroups :
continue
data = "lg lightgroup"
label = "Light_Group"
if data == "aov_color" :
data = "aovc aov_color"
if data == "aov_value" :
data = "aovv aov_value"
if data.startswith( "cryptomatte" ) :
data = data.replace( "_", " " )
GafferScene.Outputs.registerOutput(
"Interactive/Cycles/" + label,
IECoreScene.Output(
aov,
"ieDisplay",
data,
interactiveOutput
)
)
GafferScene.Outputs.registerOutput(
"Batch/Cycles/" + label,
IECoreScene.Output(
"${project:rootDirectory}/renders/${script:name}/%s/%s.####.exr" % ( aov, aov ),
"exr",
data,
batchOutput
)
)
if denoise:
interactiveOutput["denoise"] = True
batchOutput["denoise"] = True
# Denoised variants
GafferScene.Outputs.registerOutput(
"Interactive/Cycles/" + label + "_Denoised",
IECoreScene.Output(
aov + "_denoised",
"ieDisplay",
data,
interactiveOutput
)
)
GafferScene.Outputs.registerOutput(
"Batch/Cycles/" + label + "_Denoised",
IECoreScene.Output(
"${project:rootDirectory}/renders/${script:name}/%s/%s_denoised.####.exr" % ( aov, aov ),
"exr",
data,
batchOutput
)
)
GafferScene.Outputs.registerOutput(
"Interactive/Cycles/Beauty_Denoised",
IECoreScene.Output(
"beauty_denoised",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : "${image:catalogue:port}",
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"denoise" : True
}
)
)
GafferScene.Outputs.registerOutput(
"Batch/Cycles/Beauty_Denoised",
IECoreScene.Output(
"${project:rootDirectory}/renders/${script:name}/beauty/beauty_denoised.####.exr",
"exr",
"rgba",
{
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"denoise" : True,
"halfFloat" : True
}
)
)
__registerOutputs( lightPasses, True )
__registerOutputs( dataPasses )
| boberfly/GafferCycles | startup/gui/outputs.py | outputs.py | py | 3,815 | python | en | code | 81 | github-code | 36 |
17884750945 | import pprint
import threading
from typing import Dict, TYPE_CHECKING
from PySide2.QtWidgets import QTabWidget, QTextBrowser, QWidget
from lib.comm import get_var, set_var
from widgets import PMTableView, PMGTableWidget, PMDockObject, PMGTableViewer, PMGJsonTree
if TYPE_CHECKING:
from lib.extensions.extensionlib.extension_lib import extension_lib
class AbstractViewer(object):
"""
ๆฝ่ฑก่งๅพ
"""
@staticmethod
def is_valid(data) -> bool:
"""
ๅคๆญdataๆฏๅฆไธบๅๆณ็ๅ้็ฑปๅ
"""
return True
def set_data(self, data: object, metadata: dict):
"""
่ฎพ็ฝฎๅ
ถๆพ็คบๆฐๆฎ็ๅผไธบdata๏ผๆพ็คบ็ๅ
ๆฐๆฎไธบmeadataใ
"""
pass
class PDDataViewer(PMGTableViewer, AbstractViewer):
"""
ๆพ็คบPandasๆฐๆฎ็่งๅพ
"""
def __init__(self, parent=None):
PMGTableViewer.__init__(self, parent, table_view=PMTableView())
AbstractViewer.__init__(self)
# self.action_split_by_columns:QAction = self.table_view.menu.addAction('ๆๅๅฝๅๅ')
# self.action_split_by_columns.triggered.connect(self.split_by_columns)
def split_by_columns(self):
# row =
# self.table_view.data
print('splitted!')
@staticmethod
def is_valid(data):
import pandas as pd
return isinstance(data, pd.DataFrame)
def set_data(self, data: object, metadata: dict = None):
super().set_data(data)
class NPDataViewer(PMGTableViewer, AbstractViewer):
"""
ๆพ็คบnumpy.ndarray็่งๅพ
"""
def __init__(self, parent=None):
PMGTableViewer.__init__(self, parent, table_view=PMTableView())
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data):
import numpy
return isinstance(data, numpy.ndarray)
def set_data(self, data: object, metadata: dict = None):
super(NPDataViewer, self).set_data(data)
class JsonViewer(PMGJsonTree, AbstractViewer):
"""
ๆ ็ถๅพ๏ผไธ้จๆพ็คบdictๅๆฐๆฎใ
"""
def __init__(self, parent=None):
PMGJsonTree.__init__(self, parent)
# AbstractViewer.__init__(self)
@staticmethod
def is_valid(data) -> bool:
return isinstance(data, dict)
def set_data(self, data: Dict[str, object], metadata: dict = None) -> None:
self.set_data_dic({self.tr('Data:'): data})
self.expandToDepth(1)
class GeneralIterableViewer(PMGTableWidget, AbstractViewer):
"""
ๆพ็คบๅฏ่ฟญไปฃๅฏน่ฑก็่งๅพ
่ฟไธชๅ้ๅฏไปฅไธบๅ่กจใๆฏ่ก้ฟๅบฆไธ็ญ็ไบ็ปดๅตๅฅๅ่กจ็ญใ
่งฃๆๆนๅผไธบๅ
ไป็ฌฌไธไธชๅฏ่ฟญไปฃ็ปดๅบฆไธ่งฃๆ๏ผๅๅบๅ
็ด ๏ผไนๅฐฑๆฏdata[0],data[1]ใใใdata[len(data)-1]๏ผ้่กๆพ็คบใ
ๅฆๆๅ
็ด ไธๅฏ่ฟญไปฃ๏ผ้ฃไนๅฐฑๅกซๅจๅฏนๅบ่ก็็ฌฌไธๅ๏ผๅฆๆๅ
็ด ๅฏ่ฟญไปฃ็๏ผ้ฃไนๅฐฑๆๅ
็ด ไพๆฌกๅกซๅๅจๅไธ่กๅไธชๅไธญใ
data[0][1],data[0][2]....
"""
def __init__(self, parent=None):
PMGTableWidget.__init__(self, parent)
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data: object):
import numpy
import pandas
if isinstance(data, numpy.ndarray) or isinstance(
data, pandas.DataFrame):
return False
return PMGTableWidget.check_data_can_be_displayed_by_table(data=data)
def set_data(self, data: 'np.ndarray', metadata: dict = None):
super().set_data_2d(data)
class GeneralObjectViewer(QTextBrowser, AbstractViewer):
"""
ไธไธชๆๆฌๆพ็คบๆงไปถ
ไธ้จๆพ็คบmetadataใ
"""
def __init__(self, parent=None):
QTextBrowser.__init__(self, parent)
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data: object):
import numpy
import pandas
if isinstance(data, numpy.ndarray) or isinstance(
data, pandas.DataFrame):
return False
elif GeneralIterableViewer.is_valid(data):
return False
return True
def set_data(self, data: object, metadata: dict = None):
self.setText(self.tr('value:') + '\n\n ' + pprint.pformat(data)
+ '\n\n\n' + self.tr('meta data:') + '\n\n' + pprint.pformat(metadata))
viewer_classes = [
PDDataViewer,
NPDataViewer,
GeneralIterableViewer,
JsonViewer,
GeneralObjectViewer]
def build_viewer(data: object, metadata: object) -> 'QWidget':
"""
ๅๅปบๅ้่งๅพ็ๅทฅๅๅฝๆฐใ
"""
for viewer_class in viewer_classes:
if viewer_class.is_valid(data):
viewer = viewer_class()
viewer.set_data(data, metadata)
return viewer
def get_viewer_class(data):
for viewer_class in viewer_classes:
if viewer_class.is_valid(data):
return viewer_class
class PMVariableViewerWidget(QTabWidget, PMDockObject):
"""
ๅจ่ฟ้้็จไบๅค็ปงๆฟ็ๆนๅผใๆณจๆ๏ผไธๅฎ่ฆๆPMDockObjectๅๅจๅณ่พนใ
"""
if TYPE_CHECKING:
lib = extension_lib
def __init__(self, parent=None):
super().__init__(parent)
self.setTabsClosable(True)
self.var_view_tables: Dict[str, object] = {}
self.tabCloseRequested.connect(self.on_tab_close_request)
self.variable_view_factory = None
def is_temporary(self) -> bool:
return True
def get_widget_text(self) -> str:
return self.tr('Variable Viewer')
def set_lib(self, lib):
'''
่ฎพ็ฝฎๅ่ฐๅฝๆฐใๆณจๆ๏ผๅชๆไธป็บฟ็จไธญๆ่ฝๅทๆฐ็้ข๏ผๅฆๅๅฐๅผ่ตทๅดฉๆบใ
:param varname:
:param variable:
:return:
'''
self.lib = lib
def on_changed(varname: str, variable, source: str):
if threading.current_thread() is threading.main_thread():
if varname in self.var_view_tables:
self.show_data(varname, raise_window=False)
def on_deletion(varname: str, provider: str):
if threading.current_thread() is threading.main_thread():
if varname in self.var_view_tables:
tab = self.var_view_tables.pop(varname)
index = self.indexOf(tab)
self.removeTab(index)
self.lib.Data.add_data_changed_callback(on_changed)
self.lib.Data.add_data_deleted_callback(on_deletion)
def show_data(self, dataname: str, raise_window=True):
"""
ๆพ็คบๆฐๆฎ๏ผๆพ็คบๆฐๆฎไนๅ๏ผไฝฟๅพไธๅฑๆงไปถๅฐๅ
ถๆๅๅฐไธๅฑๅฏ่งใ็นๅซ้็จไบๅ ไธชdockwidgetๅ ๅจไธ่ตท็ๆ
ๅตใ
ๅฆๆไธๅทฒๆ็ๆฐๆฎไธๆฏๅไธ็ง็ฑปๅ๏ผๅฐฑ็งป้คๅๅ
็๏ผ้ๅปบๆฐ็ใ
:param dataname:
:return:
"""
from lib.comm.base import DataDesc
desc: DataDesc = self.lib.Data.get_data_desc(dataname)
if desc.big:
data = get_var(dataname, preview=True)
else:
data = get_var(dataname)
try:
dataview: 'QWidget' = self.var_view_tables.get(dataname)
metadata = self.lib.Data.get_metadata(dataname)
except BaseException:
import traceback
traceback.print_exc()
return
last_index = self.count()
if dataview is not None:
if not isinstance(dataview, get_viewer_class(data)):
index = self.indexOf(dataview)
self.removeTab(index)
last_index = index
self.var_view_tables.pop(dataname)
dataview = None
if dataview is None:
dataview = build_viewer(data, metadata)
self.insertTab(last_index, dataview, dataname)
self.addTab(dataview, dataname)
self.var_view_tables[dataname] = dataview
dataview.set_data(data, metadata)
if hasattr(dataview, 'data_modified_signal'):
def set_var_data_modified():
set_var(dataname, dataview.get_data())
dataview.data_modified_signal.connect(set_var_data_modified)
dataview.setWindowTitle(dataname)
dataview.windowTitleChanged.connect(self.on_tab_window_title_changed)
self.setCurrentWidget(dataview)
if raise_window:
self.lib.UI.raise_dock_into_view('data_view_table')
def on_tab_window_title_changed(self, title: str):
widget = self.sender()
self.setTabText(self.indexOf(widget), title)
def on_tab_close_request(self, close_index: int):
self.var_view_tables.pop(self.tabText(close_index))
tab_to_close: 'QTextBrowser' = self.widget(close_index)
tab_to_close.deleteLater()
self.removeTab(close_index)
| pyminer/pyminer | pyminer/packages/workspace_inspector/data_viewer.py | data_viewer.py | py | 8,746 | python | en | code | 77 | github-code | 36 |
2286169944 | """
Created on Sat Sep 25 00:00:00 2018
@author: Nikhil
"""
"""
If you have any questions or suggestions regarding this script,
feel free to contact me via nikhil.ss4795@gmail.com
"""
# Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
plt.scatter(X, y, color = 'red')
plt.title('Salary vs Experience')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
linear_reg = LinearRegression()
linear_reg.fit(X, y)
# Visualising the Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, linear_reg.predict(X), color = 'blue')
plt.title('Salary vs Experience')
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.show()
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree = 4)
X_polynomial = poly_features.fit_transform(X)
poly_features.fit(X_polynomial, y)
polynomial_regression = LinearRegression()
polynomial_regression.fit(X_polynomial, y)
# Visualising the Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, polynomial_regression.predict(poly_features.fit_transform(X)), color = 'blue')
plt.title('Salary vs Experience')
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.show()
# Predicting a new result with Linear Regression
linear_reg.predict(6.5)
# Predicting a new result with Polynomial Regression
polynomial_regression.predict(poly_features.fit_transform(6.5))
"""
If you have any questions or suggestions regarding this script,
feel free to contact me via nikhil.ss4795@gmail.com
"""
| Nikhil4795/Polynomial_Linear_Regression | Polynomial_regression_2/polynomial_regression.py | polynomial_regression.py | py | 1,854 | python | en | code | 0 | github-code | 36 |
26987164949 | import datetime
from django import forms
from django.core.exceptions import ValidationError
from .models import TimeLog, Subject, Tag
class DateForm(forms.Form):
def __init__(self, *args, **kwargs):
self.min_date = kwargs.pop('min_date')
self.max_date = kwargs.pop('max_date')
# if user has no record (min_date and max_date is None) then disable the date inputs else set the min and max attrs
if self.min_date and self.max_date:
self.base_fields['start'].widget.attrs['min'] = self.min_date.isoformat()
self.base_fields['start'].widget.attrs['max'] = self.max_date.isoformat()
self.base_fields['end'].widget.attrs['min'] = self.min_date.isoformat()
self.base_fields['end'].widget.attrs['max'] = self.max_date.isoformat()
else:
self.base_fields['start'].widget.attrs['disabled'] = True
self.base_fields['end'].widget.attrs['disabled'] = True
super().__init__(*args, **kwargs)
start = forms.DateField(label='From')
end = forms.DateField(label='To')
def clean(self):
# if min_date or max_date is none, it means that user has no record yet
if self.min_date is None:
raise ValidationError("You don't have any record!")
cleaned_data = super().clean()
start = cleaned_data.get('start')
end = cleaned_data.get('end')
if start and end:
if start > end:
raise ValidationError('Your selected start date is greater then selected end date')
if not (self.min_date <= start <= self.max_date and self.min_date <= end <= self.max_date):
raise ValidationError(f'Your records date are between {self.min_date} and {self.max_date}')
return cleaned_data
class TimeLogForm(forms.ModelForm):
hours = forms.IntegerField(min_value=0, max_value=24)
minutes = forms.IntegerField(min_value=0, max_value=59)
def __init__(self, *args, **kwargs):
self.registrant_user = kwargs.pop('registrant_user', None)
super().__init__(*args, **kwargs)
# add registrant user's subjects and tags to the corresponding field choices
self.fields['subject'].queryset = self.registrant_user.subject_set.all()
self.fields['tags'].queryset = self.registrant_user.tag_set.all()
# add html attribute to the widget of fields
self.fields['subject'].widget.attrs['class'] = 'form-select'
self.fields['tags'].widget.attrs['class'] = 'form-select'
self.fields['tags'].widget.attrs['size'] = '3'
self.fields['date'].widget.attrs['class'] = 'form-control'
self.fields['hours'].widget.attrs['class'] = 'form-control'
self.fields['minutes'].widget.attrs['class'] = 'form-control'
self.fields['description'].widget.attrs['class'] = 'form-control'
class Meta:
model = TimeLog
exclude = ['user', 'duration']
widgets = {
'date': forms.DateInput(attrs={'type': 'date', 'max': datetime.date.today}),
}
def clean(self):
clean_data = super().clean()
hours = clean_data.get('hours')
minutes = clean_data.get('minutes')
date = clean_data.get('date')
# calculate and check if the duration is valid
if hours is not None and minutes is not None and date:
# calculate duration minutes based on hours and minutes
duration = (hours * 60) + minutes
if duration == 0:
raise ValidationError("Both hour and minute fields can not be 0.")
if duration > 1440:
raise ValidationError("One day is 24 hours!")
# check the particular date's durations doesn't exceed 24 hours
previous_durations_total = 0
for timelog in self.registrant_user.timelogs.filter(date=date):
previous_durations_total += timelog.duration
if (previous_durations_total + duration) > 1440:
remaind_hours = (1440 - previous_durations_total) // 60
remaind_miuntes = (1440 - previous_durations_total) % 60
if remaind_miuntes or remaind_hours:
raise ValidationError(f'Your remaind duration for '
f'{date} is {remaind_hours} hours and {remaind_miuntes} minutes.')
else:
raise ValidationError(f'There is no time left for {date}')
clean_data['duration'] = duration
return clean_data
class SubjectForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user_subjects = kwargs.pop('user_subjects')
super().__init__(*args, **kwargs)
# add bootstarp class to the fields
for v in self.visible_fields():
v.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Subject
fields = ['name', 'description']
def clean(self):
clean_data = super().clean()
name = clean_data.get('name')
if name:
if name.lower() in [s.name for s in self.user_subjects]:
raise ValidationError(f'{name.lower()} already exists.')
clean_data['name'] = name.lower()
return clean_data
class TagForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user_tags = kwargs.pop('user_tags')
super().__init__(*args, **kwargs)
# add bootstarp class to the name field
self.fields['name'].widget.attrs['class'] = 'form-control'
class Meta:
model = Tag
fields = ['name']
def clean(self):
clean_data = super().clean()
name = clean_data.get('name')
if name:
if name.lower() in [s.name for s in self.user_tags]:
raise ValidationError(f'{name.lower()} already exists.')
clean_data['name'] = name.lower()
return clean_data | mf210/WAYD | timing/forms.py | forms.py | py | 5,996 | python | en | code | 3 | github-code | 36 |
23701537076 | import argparse
import os
import shutil
import numpy as np
import torch
import torchvision
from torch import nn as nn
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import helpers
from dcgan import generators, discriminators
from dcgan.train_config import TrainConfig
def train(dataset: Dataset, train_config: TrainConfig,
generator, discriminator):
global_step = epoch = 0
if train_config.overwrite and os.path.exists(train_config.experiment_dirpath):
shutil.rmtree(train_config.experiment_dirpath)
real_images_writer = SummaryWriter(f"{train_config.experiment_dirpath}/real")
fake_images_writer = SummaryWriter(f"{train_config.experiment_dirpath}/fake")
stats_writer = SummaryWriter(f"{train_config.experiment_dirpath}/stats")
dataloader = DataLoader(
dataset=dataset, batch_size=train_config.batch_size, shuffle=True, num_workers=train_config.num_workers
)
num_iterations_per_epoch = len(dataset) // train_config.batch_size
generator = generator.to(device=train_config.device).train()
discriminator = discriminator.to(device=train_config.device).train()
criterion = torch.nn.BCELoss()
gen_opt = torch.optim.Adam(params=generator.parameters(), lr=train_config.lr, betas=(0.5, 0.999))
disc_opt = torch.optim.Adam(params=discriminator.parameters(), lr=train_config.lr, betas=(0.5, 0.999))
while True:
for batch_idx, (real_img_batch, labels) in tqdm(enumerate(dataloader), total=num_iterations_per_epoch, leave=False):
img_batch = normalize(real_img_batch)
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_discriminator(
labels=labels, cardinality=train_config.conditional_dim, spatial_size=train_config.image_size
)
img_batch = torch.cat([img_batch, conditional_input], dim=1)
img_batch = img_batch.to(device=train_config.device)
# train discriminator
noise = torch.randn(size=(len(labels), train_config.z_dim))
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_generator(
labels=labels, cardinality=train_config.conditional_dim
)
noise = torch.cat([noise, conditional_input], dim=1)
noise = noise.to(device=train_config.device)
fake_img_batch = generator(noise)
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_discriminator(
labels=labels, cardinality=train_config.conditional_dim, spatial_size=train_config.image_size
).to(device=train_config.device)
fake_img_batch = torch.cat([fake_img_batch, conditional_input], dim=1)
real_proba = discriminator(img_batch)
fake_proba = discriminator(fake_img_batch.detach())
disc_loss = (criterion(real_proba, torch.ones_like(real_proba)) +
criterion(fake_proba, torch.zeros_like(fake_proba)))
disc_loss = disc_loss / 2
disc_opt.zero_grad()
disc_loss.backward()
disc_opt.step()
# train generator
fake_proba = discriminator(fake_img_batch)
gen_loss = criterion(fake_proba, torch.ones_like(fake_proba))
gen_opt.zero_grad()
gen_loss.backward()
gen_opt.step()
if global_step % train_config.send_every == 0:
stats_writer.add_scalar("generator loss", gen_loss, global_step=global_step)
stats_writer.add_scalar("discriminator loss", disc_loss, global_step=global_step)
stats_writer.add_scalar("total loss", gen_loss + disc_loss, global_step=global_step)
if global_step % train_config.show_every == 0:
# visualize
real_images_grid = torchvision.utils.make_grid(
real_img_batch, normalize=True
)
real_images_writer.add_image("real images", real_images_grid, global_step=epoch)
generated_images = generate(train_config=train_config, generator=generator)
generated_images = torchvision.utils.make_grid(
generated_images, normalize=True
)
fake_images_writer.add_image("fake images", generated_images, global_step=global_step)
global_step += 1
epoch += 1
def normalize(x):
return 2 * x - 1
def generate(train_config: TrainConfig, generator: nn.Module) -> torch.Tensor:
noise = torch.randn(train_config.batch_size, train_config.z_dim)
if train_config.conditional_dim > 0:
label = np.random.randint(low=0, high=train_config.conditional_dim)
labels = np.asarray([label] * train_config.batch_size)
labels = torch.from_numpy(labels)
conditional_input = helpers.conditional_input_encoder_generator(
labels=labels, cardinality=train_config.conditional_dim
)
noise = torch.cat([noise, conditional_input], dim=1)
noise = noise.to(device=train_config.device)
with torch.no_grad():
generated_images = generator(noise).view(train_config.batch_size, -1, train_config.image_size, train_config.image_size)
return generated_images
def get_dataset_and_in_channels(dataset_name: str, image_size: int):
name_to_dataset_cls = {
"mnist": (torchvision.datasets.MNIST, 1),
"cifar-10": (torchvision.datasets.CIFAR10, 3)
}
dataset_cls, in_channels = name_to_dataset_cls[dataset_name]
dataset = dataset_cls(
root="data/", train=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(image_size),
torchvision.transforms.ToTensor()
]),
download=True
)
return dataset, in_channels
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name")
parser.add_argument("--dataset", choices=["mnist", "cifar-10"], default="mnist")
parser.add_argument("--image_size", type=int, default=32)
return parser.parse_args()
def main():
args = parse_args()
exp_dir = "../experiments"
if args.exp_name is not None:
exp_dir = f"{exp_dir}/{args.exp_name}"
dataset, in_channels = get_dataset_and_in_channels(dataset_name=args.dataset, image_size=args.image_size)
config = TrainConfig(
experiment_dirpath=exp_dir,
image_size=args.image_size,
in_channels=in_channels
)
generator = generators.DCGenerator.from_train_config(config)
discriminator = discriminators.DCDiscriminator.from_train_config(config)
train(
dataset=dataset, train_config=config, generator=generator, discriminator=discriminator
)
if __name__ == '__main__':
main()
| dfridman1/GANs | dcgan/train.py | train.py | py | 7,028 | python | en | code | 0 | github-code | 36 |
5515862018 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from vgg import load_pretrained_VGG16_pool5
import cifar10_utils
import tensorflow as tf
import numpy as np
LEARNING_RATE_DEFAULT = 1e-4
BATCH_SIZE_DEFAULT = 128
MAX_STEPS_DEFAULT = 15000
EVAL_FREQ_DEFAULT = 1000
CHECKPOINT_FREQ_DEFAULT = 5000
PRINT_FREQ_DEFAULT = 10
OPTIMIZER_DEFAULT = 'ADAM'
REFINE_AFTER_K_STEPS_DEFAULT = 0
DATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'
LOG_DIR_DEFAULT = './logs/cifar10'
CHECKPOINT_DIR_DEFAULT = './checkpoints'
def train_step(loss):
"""
Defines the ops to conduct an optimization step. You can set a learning
rate scheduler or pick your favorite optimizer here. This set of operations
should be applicable to both ConvNet() and Siamese() objects.
Args:
loss: scalar float Tensor, full loss = cross_entropy + reg_loss
Returns:
train_op: Ops for optimization.
"""
########################
# PUT YOUR CODE HERE #
########################
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
########################
# END OF YOUR CODE #
########################
return train_op
def fully_connected_layers(vgg_output):
# dense layers
with tf.name_scope('dense'):
flat = tf.reshape(vgg_output, [vgg_output.get_shape()[0].value, -1], name='flat_out')
xavier = tf.contrib.layers.xavier_initializer()
const0 = tf.constant_initializer(0.)
l2_reg = tf.contrib.layers.l2_regularizer(0.1)
n_classes = 10
with tf.name_scope('dense1'):
w1 = tf.get_variable('w1', shape=[flat.get_shape()[1], 384], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b1 = tf.get_variable('b1', shape=[384], dtype=tf.float32,
initializer=const0)
fc1 = tf.nn.relu(tf.matmul(flat, w1) + b1, name='d1_out')
# fc2 Multiplication [384, 192]
# ReLU
with tf.name_scope('dense2'):
w2 = tf.get_variable('w2', shape=[384, 192], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b2 = tf.get_variable('b2', shape=[192], dtype=tf.float32,
initializer=const0)
fc2 = tf.nn.relu(tf.matmul(fc1, w2) + b2, name='d2_out')
# fc3 Multiplication [192, 10]
with tf.name_scope('dense3'):
w3 = tf.get_variable('w3', shape=[192, n_classes], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b3 = tf.get_variable('b3', shape=[n_classes], dtype=tf.float32,
initializer=const0)
fc3 = tf.matmul(fc2, w3) + b3
return fc3
def vgg_loss(logits, labels):
ce_loss = tf.nn.softmax_cross_entropy_with_logits(logits, labels)
ce_loss = tf.reduce_mean(ce_loss)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_loss = tf.to_float(0.)
if None not in reg_losses: # this IS meant to switch while building the graph
reg_loss = reduce(lambda x, y: tf.add(x, y), reg_losses)
loss = ce_loss + reg_loss
tf.scalar_summary('ce_loss', ce_loss)
tf.scalar_summary('reg_loss', reg_loss)
tf.scalar_summary('full_loss', loss)
return loss
def accuracy(logits, labels):
guesses = tf.argmax(logits, dimension=1)
targets = tf.argmax(labels, dimension=1)
score = tf.to_int32(tf.equal(guesses, targets))
acc = tf.reduce_sum(score) / tf.size(score)
tf.scalar_summary('accuracy', acc)
return acc
def train():
"""
Performs training and evaluation of your model.
First define your graph using vgg.py with your fully connected layer.
Then define necessary operations such as trainer (train_step in this case),
savers and summarizers. Finally, initialize your model within a
tf.Session and do the training.
---------------------------------
How often to evaluate your model:
---------------------------------
- on training set every PRINT_FREQ iterations
- on test set every EVAL_FREQ iterations
---------------------------
How to evaluate your model:
---------------------------
Evaluation on test set should be conducted over full batch, i.e. 10k images,
while it is alright to do it over minibatch for train set.
"""
# Set the random seeds for reproducibility. DO NOT CHANGE.
tf.set_random_seed(42)
np.random.seed(42)
########################
# PUT YOUR CODE HERE #
########################
cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
data_dims = list(cifar10.train.images.shape[1:])
n_classes = 10
with tf.Graph().as_default():
x_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
y_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, n_classes])
stopgrads = tf.placeholder(dtype=tf.bool)
pool5, assign_ops = load_pretrained_VGG16_pool5(x_pl, scope_name='vgg')
pool5 = tf.cond(stopgrads, lambda: tf.stop_gradient(pool5), lambda: pool5)
logits = fully_connected_layers(pool5)
loss = vgg_loss(logits, y_pl)
acc = accuracy(logits, y_pl)
train_op = train_step(loss)
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init_op)
sess.run(assign_ops)
train_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train', sess.graph)
test_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test', sess.graph)
for step in range(FLAGS.max_steps):
x, y = cifar10.train.next_batch(FLAGS.batch_size)
switch = True if step < FLAGS.refine_after_k else False
feed = {x_pl: x, y_pl: y, stopgrads: switch}
train_loss, train_acc, summary_str, _ = sess.run([loss, acc, summary_op, train_op], feed_dict=feed)
if step == 0 or (step + 1) % FLAGS.print_freq == 0 or step + 1 == FLAGS.max_steps:
print('TRAIN step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
train_summary_writer.add_summary(summary_str, step)
train_summary_writer.flush()
if step == 0 or (step + 1) % FLAGS.eval_freq == 0 or step + 1 == FLAGS.max_steps:
x, y = cifar10.test.images, cifar10.test.labels
num_batches = int(np.floor(x.shape[0] / FLAGS.batch_size))
test_err = 0.
test_acc = 0.
for idx in range(num_batches):
x_batch = x[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :, :, :]
y_batch = y[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :]
feed = {x_pl: x_batch, y_pl: y_batch, stopgrads: True}
batch_err, batch_acc = sess.run([loss, acc], feed_dict=feed)
test_err += batch_err
test_acc += batch_acc
summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect. should pool summaries
test_summary_writer.add_summary(summary_str, step)
test_summary_writer.flush()
test_err /= num_batches
test_acc /= num_batches
print('--- TEST --- step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
# summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect. should pool summaries
# test_summary_writer.add_summary(summary_str, step)
# test_summary_writer.flush()
if (step + 1) % FLAGS.checkpoint_freq == 0 or step + 1 == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
saver.save(sess, checkpoint_file, global_step=(step + 1))
########################
# END OF YOUR CODE #
########################
def initialize_folders():
"""
Initializes all folders in FLAGS variable.
"""
if not tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.MakeDirs(FLAGS.log_dir)
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MakeDirs(FLAGS.data_dir)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def print_flags():
"""
Prints all entries in FLAGS variable.
"""
for key, value in vars(FLAGS).items():
print(key + ' : ' + str(value))
def main(_):
print_flags()
initialize_folders()
train()
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type = float, default = LEARNING_RATE_DEFAULT,
help='Learning rate')
parser.add_argument('--max_steps', type = int, default = MAX_STEPS_DEFAULT,
help='Number of steps to run trainer.')
parser.add_argument('--batch_size', type = int, default = BATCH_SIZE_DEFAULT,
help='Batch size to run trainer.')
parser.add_argument('--print_freq', type = int, default = PRINT_FREQ_DEFAULT,
help='Frequency of evaluation on the train set')
parser.add_argument('--eval_freq', type = int, default = EVAL_FREQ_DEFAULT,
help='Frequency of evaluation on the test set')
parser.add_argument('--refine_after_k', type = int, default = REFINE_AFTER_K_STEPS_DEFAULT,
help='Number of steps after which to refine VGG model parameters (default 0).')
parser.add_argument('--checkpoint_freq', type = int, default = CHECKPOINT_FREQ_DEFAULT,
help='Frequency with which the model state is saved.')
parser.add_argument('--data_dir', type = str, default = DATA_DIR_DEFAULT,
help='Directory for storing input data')
parser.add_argument('--log_dir', type = str, default = LOG_DIR_DEFAULT,
help='Summaries log directory')
parser.add_argument('--checkpoint_dir', type = str, default = CHECKPOINT_DIR_DEFAULT,
help='Checkpoint directory')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run()
| frhrdr/dlc2016 | practical_3/retrain_vgg.py | retrain_vgg.py | py | 10,700 | python | en | code | 1 | github-code | 36 |
74491447145 | import time
import json
import datetime
invalid = "\n--Invalid response, please try again.--"
scheduleFile = "schedule.json"
assignmentFile = "assignment.json"
def load():
for i in range(0, 40):
time.sleep(0.00000000000001)
print("-", end='', flush=True)
print()
def unload():
for i in range(0, 40):
time.sleep(0.00000000000001)
print('-' * (40 - i))
print("--Goodbye!--")
def anythingElse():
load()
while True:
userInput = input("Anything else?\n[0] - Yes\n[1] - No\n\nPlease choose an option: ")
if userInput == "0":
load()
break
elif userInput == "1":
unload()
return 1
else:
print(invalid)
def createSchedule():
with open(scheduleFile) as f:
data = json.load(f)
while True:
sched = input("Schedule Name: ")
if sched not in data:
break
print(f'"{sched}" is already a schedule. Please enter a different name.')
data[sched] = {}
while True:
number = input("Number of Classes (1 - 7): ")
try:
number = int(number)
if number > 7 or number < 1:
print(invalid)
else:
break
except Exception:
print(invalid)
for i in range(1, number + 1):
name = input(f"\nPeriod {i}: ")
teacher = input("Teacher: ")
description = input("Description: ")
data[sched][name] = {}
data[sched][name]["teacher"] = teacher
data[sched][name]["description"] = description
with open(scheduleFile, "w") as f:
json.dump(data, f, indent=2)
load()
print(f'--Schedule "{sched}" created!--')
def seeSchedule():
with open(scheduleFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no schedules.--")
return
num = 0
while True:
lister = []
for i in data:
lister += [i]
print(f"[{num}] {i}\nPeriods: {len(data[i])}\n")
num += 1
num -= 1
userInput = input("Please choose a schedule (or press e to exit): ")
if userInput == "e":
return
else:
try:
userInput = int(userInput)
if userInput > -1 and userInput <= num:
num = 0
load()
for i in data[lister[userInput]]:
print(f"Period {num + 1}: {i}\nTeacher: {data[lister[userInput]][i]['teacher']}\nDescription: {data[lister[userInput]][i]['description']}\n")
num += 1
userInput = input("Enter any key to return: ")
load()
else:
print(invalid)
except Exception:
print(invalid)
def deleteSchedule():
with open(scheduleFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no schedules.--")
return
num = 0
while True:
lister = []
for i in data:
lister += [i]
print(f"[{num}] {i}\nPeriods: {len(data[i])}\n")
num += 1
num -= 1
userInput = input("Please choose a schedule to delete (or press e to exit): ")
if userInput == "e":
return
else:
print()
try:
userInput = int(userInput)
if userInput > -1 and userInput <= num:
num = 0
confirm = input(f'Are you sure you want to delete "{lister[userInput]}"?\nEnter "13579" to confirm, or enter anything else to cancel: ')
if confirm == "13579":
load()
del data[i]
with open(scheduleFile, "w") as f:
json.dump(data, f, indent=2)
userInput = input("--Schedule has been deleted.--\n\nEnter any key to return: ")
print()
break
else:
return
else:
print(invalid)
except Exception:
print(invalid)
def createAssignment():
with open(assignmentFile) as f:
data = json.load(f)
while True:
name = input("Assignment Name: ")
if name not in data:
break
else:
print(f'"{name}" is already an assignment. Please enter a different name.')
classname = input("Class: ")
while True:
due = input('Due Date (mm/dd/yyyy): ')
try:
s = datetime.date(int(due.split("/")[2]), int(due.split("/")[1]), int(due.split("/")[0]))
n = datetime.datetime.now().date()
if s > n and len(due.split("/")) == 3:
break
elif(s <= n):
print("\n--That date has already passed. Please enter a different response.--")
else:
print(invalid)
except Exception:
print(invalid)
description = input("Description: ")
data[name] = {}
data[name]["class"] = classname
data[name]["due"] = due
data[name]["description"] = description
with open(assignmentFile, "w") as f:
json.dump(data, f, indent=2)
load()
print(f'--Assignment "{name}" created!--')
def seeAssignment():
with open(assignmentFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no assignments.--")
return
num = 0
for i in data:
print(f"[{num}] Assignment: {i}{len(data[i])}\n{' ' * len(str(len(data[i])))} Class: {data[i]['class']}\n{' ' * len(str(len(data[i])))} Due Date: {data[i]['due']}\n{' ' * len(str(len(data[i])))} Description: {data[i]['description']}\n")
num += 1
userInput = input("Press any key to return: ")
def deleteAssignment():
with open(assignmentFile) as f:
data = json.load(f)
lister = [x for x in data]
if len(data) < 1:
print("--There are currently no assignments.--")
return
num = 0
for i in data:
print(f"[{num}] Assignment: {i}{len(data[i])}\n{' ' * len(str(len(data[i])))} Class: {data[i]['class']}\n{' ' * len(str(len(data[i])))} Due Date: {data[i]['due']}\n{' ' * len(str(len(data[i])))} Description: {data[i]['description']}\n")
num += 1
num -= 1
while True:
try:
userInput = input("Please choose an assignment to delete (or press e to exit): ")
if userInput == "e":
return
elif int(userInput) > -1 and int(userInput) <= num:
confirm = input(f'\nAre you sure you want to delete "{lister[int(userInput)]}"?\nEnter "13579" to confirm, or enter anything else to cancel: ')
if confirm == "13579":
del data[lister[int(userInput)]]
with open(assignmentFile, "w") as f:
json.dump(data, f, indent=2)
userInput = input("--Assignment has been deleted.--\n\nEnter any key to return: ")
print()
break
else:
print(invalid)
except Exception as e:
print(e)
def programChoice():
while True:
userInput = input("[0] - Create a schedule\n[1] - See existing schedules\n[2] - Delete a schedule\n[3] - Create an assignment\n[4] - Create an assignment\n[5] - Delete a schedule\n\nPlease choose the program you would like to use: ")
if userInput == "0":
load()
createSchedule()
if anythingElse() == 1:
break
elif userInput == "1":
load()
seeSchedule()
if anythingElse() == 1:
break
elif userInput == "2":
load()
deleteSchedule()
if anythingElse() == 1:
break
elif userInput == "3":
load()
createAssignment()
if anythingElse() == 1:
break
elif userInput == "4":
load()
seeAssignment()
if anythingElse() == 1:
break
elif userInput == "5":
load()
deleteAssignment()
if anythingElse() == 1:
break
else:
print(invalid)
def main():
print("\n\n-----Welcome to Scheduler.py, a program made to schedule classes and assignments.-----")
while True:
userInput = input("[0] - Begin\n[1] - Quit\n\nPlease choose an option: ")
if userInput == "0":
load()
programChoice()
break
elif userInput == "1":
unload()
break
else:
print(invalid)
main()
| BenVN123/PythonScheduler | scheduler.py | scheduler.py | py | 8,943 | python | en | code | 1 | github-code | 36 |
1417017554 | import csv # import csv library
'''This code takes a file input and header input
the function utilises those inputs to open the file then checks the header
the header is added to a dictionary called unique_list and stores the count of the header
it then gets printed'''
# a function that takes the file and header and checks it adding the selected header into a list and the count of the header into a dictionary
def read_and_check(file, header):
unique_list = {} # initiates a unique list
file_infile = open(file, "r") # open file
csv_dict_reader = csv.DictReader(file_infile) # reads the file and store in dictionary
for lst_cols in csv_dict_reader: #iterates through the list of columns
if lst_cols[header] not in unique_list:
unique_list[lst_cols[header]] = 1 # if the value is not in unique_list add it with the value 1
else:
unique_list[lst_cols[header]] += 1 # if the value is is in unique_list increase value by 1
print(unique_list)
file_infile.close() #close the file
# Testing
read_and_check("Google Play Store.csv", "Category")
read_and_check("Google Play Store.csv", "Rating") | Kaizuu08/PythonShowcase2023Semester1 | Week 8/csv_dictreader.py | csv_dictreader.py | py | 1,178 | python | en | code | 0 | github-code | 36 |
8460276839 | from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.extensions.android.gsm import GsmCallActions
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestBrowser():
def setup(self):
des_caps = {
'platformName':'android',
'platformVersion':'6.0',
'appPackage':'com.xueqiu.android',
'appActivity':'com.xueqiu.android.common.MainActivity',
# 'browserName':'Browser',
# ไธๅๆญขAPP๏ผไธๆธ
้คappๆฐๆฎ๏ผไธๅธ่ฝฝapp
'noReset':True,
# ๅๆญขapp๏ผๆธ
้คappๆฐๆฎๅธ่ฝฝapp
# 'fullReset':True,
# ไธๅๆญขๆต่ฏapp็่ฟ็จ
'dontStopAppOnReset':True,
'deviceName':'127.0.0.1:7555',
'autoGrantPermissions':True,
# ่ชๅจๅฏๅจๆจกๆๅจ emulator -list-avds ไธญ็ Pixel_23_6
# ๅช่ฝๆฏๅฎๅ่ชๅธฆ็ๆจกๆๅจ ็ฌฌไธๆน็ไธๅฏไปฅ
# 'avd':'Pixel_23_6'
'newCommandTimeout':300
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', des_caps)
self.driver.implicitly_wait(10)
def teardown(self):
self.driver.quit()
def test_mobile(self):
pass
# self.driver.make_gsm_call('15910852286',GsmCallActions.CALL)
# self.driver.send_sms('15910852286','hello appium api')
# # ๅฝๅฑ 8.0็ๆฌไปฅไธๅฏไปฅ ๅไธบไธๅฏ
# self.driver.start_recording_screen()
# # ๅผๅฏ้ฃ่กๆจกๅผ
# self.driver.set_network_connection(1)
# self.driver.get_screenshot_as_file('./photos/img.png')
# sleep(3)
# self.driver.set_network_connection(4)
# sleep(3)
# self.driver.stop_recording_screen()
| yyw15910852287/hogwarts_appium | ไบคไบapi/test_jiaohu.py | test_jiaohu.py | py | 1,876 | python | zh | code | 0 | github-code | 36 |
33920201413 | from math import ceil
type_sushi = input()
name_restaurant = input()
number_portions = int(input())
delivery = input()
is_invalid_restaurant = False
price = 1
if name_restaurant == "Sushi Zone":
if type_sushi == "sashimi":
price = 4.99
elif type_sushi == "maki":
price = 5.29
elif type_sushi == "uramaki":
price = 5.99
elif type_sushi == "temaki":
price = 4.29
elif name_restaurant == "Sushi Time":
if type_sushi == "sashimi":
price = 5.49
elif type_sushi == "maki":
price = 4.69
elif type_sushi == "uramaki":
price = 4.49
elif type_sushi == "temaki":
price = 5.19
elif name_restaurant == "Sushi Bar":
if type_sushi == "sashimi":
price = 5.25
elif type_sushi == "maki":
price = 5.55
elif type_sushi == "uramaki":
price = 6.25
elif type_sushi == "temaki":
price = 4.75
elif name_restaurant == "Asian Pub":
if type_sushi == "sashimi":
price = 4.50
elif type_sushi == "maki":
price = 4.80
elif type_sushi == "uramaki":
price = 5.50
elif type_sushi == "temaki":
price = 5.50
else:
print(f"{name_restaurant} is invalid restaurant!")
is_invalid_restaurant = True
if delivery == "Y":
price_order = price * number_portions
price_order *= 120/100
price_order = ceil(price_order)
else:
price_order = price * number_portions
price_order = ceil(price_order)
if not is_invalid_restaurant:
print(f"Total price: {price_order} lv.") | IvayloSavov/Programming-basics | sample_exam/3..py | 3..py | py | 1,549 | python | en | code | 0 | github-code | 36 |
14195362149 | #!/usr/bin/python3
"""
Unittest for review module
"""
import os
import unittest
from models.review import Review
from models.base_model import BaseModel
from models.engine.file_storage import FileStorage
class Test_Review(unittest.TestCase):
""" Test for
Review Class """
m = Review()
def setUp(self):
"""set up the
test for testing Reviews"""
FileStorage._FileStorage__file_path = "test.json"
self.rev = Review()
self.rev.place_id = "666"
self.rev.user_id = "666"
self.rev.text = "666"
self.rev.save()
def test_atrr_type_review(self):
"""test attribute type for Review"""
self.assertEqual(type(self.m.place_id), str)
self.assertEqual(type(self.m.user_id), str)
self.assertEqual(type(self.m.text), str)
def test_attribute_place_id(self):
""" Tests attr """
self.assertEqual(hasattr(self.m, "place_id"), True)
self.assertEqual(hasattr(self.m, "user_id"), True)
self.assertEqual(hasattr(self.m, "text"), True)
def test_subcls_Review(self):
"""test subclass BaseModel"""
self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)
self.assertIsInstance(self.rev, Review)
def test_docstring_Review(self):
"""checking for docstrings"""
self.assertIsNotNone(Review.__doc__)
def testpublic(self):
self.assertEqual(str, type(Review().id))
if __name__ == "__main__":
unittest.main()
| Drixner/holbertonschool-AirBnB_clone | tests/test_models/test_review.py | test_review.py | py | 1,509 | python | en | code | 4 | github-code | 36 |
14597675926 | from sys import stdin
ways = [[0 for length in range(1001)] for n in range(1001)]
ways[0][0] = 1
for n in range(1001):
for length in range(1, 1001):
ways[n][length] += 2 * ways[n - 1][length - 1]
if n >= 2:
ways[n][length] += ways[n - 2][length - 1]
if n >= 3:
ways[n][length] += ways[n - 3][length - 1]
ways = [sum(row) for row in ways]
for line in stdin:
print(ways[int(line)])
| vfolunin/archives-solutions | UVa Online Judge/10198.py | 10198.py | py | 440 | python | en | code | 0 | github-code | 36 |
14566552628 | from django.contrib.auth.models import User
from django.db import models
import cover.models
from documents.models import (Book, Chunk, Image, BookPublishRecord,
ImagePublishRecord)
from documents.signals import post_publish
from dvcs.signals import post_publishable
def book_changed(sender, instance, created, **kwargs):
instance.touch()
for c in instance:
c.touch()
models.signals.post_save.connect(book_changed, sender=Book)
def chunk_changed(sender, instance, created, **kwargs):
instance.book.touch()
instance.touch()
models.signals.post_save.connect(chunk_changed, sender=Chunk)
def image_changed(sender, instance, created, **kwargs):
instance.touch()
models.signals.post_save.connect(image_changed, sender=Image)
def publish_listener(sender, *args, **kwargs):
if isinstance(sender, BookPublishRecord):
sender.book.touch()
for c in sender.book:
c.touch()
elif isinstance(sender, ImagePublishRecord):
sender.image.touch()
post_publish.connect(publish_listener)
def chunk_publishable_listener(sender, *args, **kwargs):
sender.tree.touch()
if isinstance(sender.tree, Chunk):
sender.tree.book.touch()
post_publishable.connect(chunk_publishable_listener)
def publishable_listener(sender, *args, **kwargs):
sender.tree.touch()
post_publishable.connect(publishable_listener, sender=Image)
def listener_create(sender, instance, created, **kwargs):
if created:
instance.chunk_set.create(number=1, slug='1')
models.signals.post_save.connect(listener_create, sender=Book)
def cover_changed(sender, instance, created, **kwargs):
for book in instance.book_set.all():
book.build_cover()
models.signals.post_save.connect(cover_changed, sender=cover.models.Image)
| fnp/redakcja | src/documents/models/listeners.py | listeners.py | py | 1,794 | python | en | code | 4 | github-code | 36 |
25969368475 | """
Given an array consisting of n integers, find the contiguous subarray of given length k that has the maximum average value. And you need to output the maximum average value.
Example 1:
Input: [1,12,-5,-6,50,3], k = 4
Output: 12.75
Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75
Note:
1 <= k <= n <= 30,000.
Elements of the given array will be in the range [-10,000, 10,000].
"""
class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
if len(nums) > k:
num_sum = sum(nums[0:k])
max_sum = num_sum
for i in range(1, len(nums)-k+1):
num_sum += nums[i+k-1] - nums[i-1]
max_sum = max(max_sum, num_sum)
return max_sum/float(k)
else:
return sum(nums)/float(k) | wqh872081365/leetcode | Python/643_Maximum_Average_Subarray_I.py | 643_Maximum_Average_Subarray_I.py | py | 884 | python | en | code | 0 | github-code | 36 |
5921271913 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 18:57:26 2019
@author: Mico
"""
import pandas as pd
import os
import numpy as np
def enconde_string_category(df,df_clean,mapping,col_name):
column_data = pd.factorize(df[col_name].str.lower())
mapping[col_name] = column_data[1].tolist()
df_clean[col_name] = column_data[0]
return df_clean, mapping
def column_to_float(df,col_name):
col_list = []
column_data = df[col_name]
for row in column_data:
col_list.append(float(str(row).replace(' in','').replace('.','.').replace('..','.')))
return pd.DataFrame(col_list, columns=[col_name])
data_path = os.path.join('.','BaseDatosHistorica_Tronadura_Hackathon.xlsx')
df = pd.read_excel(data_path,skiprows=2) #el header esta en la linea
df = df.dropna() #quitamos las filas con datos NaN
headers = df.columns #nombres de los headers
#constantes
altura_banco = 15 #15 metros
pasadura = 1 #1 metro
largo_pozo = altura_banco + pasadura
categorias = ['Fase','Tipo de tronadura','Tipo Material','M','Dominio Estructural','Tipo Explosivo']
floats = ['Banco','Diรกmetro','Fc','P10','P20','P30','P40','P50','P60','P70','P80','P90','P100','Este','Norte','Cota']
otros = ['BxS','Tiempo entre Pozos Filas ms']
clean_dataframe = pd.DataFrame()
class_mapping = {}
#Fase
for header_name in headers:
if header_name in categorias:
clean_dataframe, class_mapping = enconde_string_category(df,clean_dataframe,class_mapping,header_name)
else:
if header_name in floats:
clean_dataframe[header_name] = column_to_float(df,header_name)
#clean_dataframe[header_name] = pd.to_numeric(df[header_name])
#otros
#BxS
burden_list = []
espaciamiento_list = []
for bxs in df['BxS']:
burden,espaciamiento = (bxs.lower()).split('x')
burden_list.append(float(burden))
espaciamiento_list.append(float(espaciamiento))
clean_dataframe['Burden'] = burden_list
clean_dataframe['Espaciamiento'] = espaciamiento_list
clean_dataframe['Area'] = clean_dataframe['Burden']*clean_dataframe['Espaciamiento']
# tiempo entre pozos y filas ms
tx_list = []
ty_list = []
for txy in df['Tiempo entre Pozos Filas ms']:
tx,ty = txy.split('-')
tx_list.append(float(tx))
ty_list.append(float(ty))
clean_dataframe['t_x'] = tx_list
clean_dataframe['t_y'] = ty_list
clean_dataframe.to_excel('clean_database.xls', index = False)
np.save('mapping.npy',class_mapping)
| larosi/hackathon-enaex-2019 | 0_Data_cleansing.py | 0_Data_cleansing.py | py | 2,522 | python | en | code | 0 | github-code | 36 |
34846072669 | #!/usr/bin/env python
# coding: utf-8
# refer to https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
#
# to tune parameters
# refer to http://yangguang2009.github.io/2017/01/08/deeplearning/grid-search-hyperparameters-for-deep-learning/
# In[1]:
from __future__ import print_function
import json
import numpy as np
import os
import pandas as pd
import urllib
import math
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
# connect to poloniex's API
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=1546300800&end=9999999999&period=300&resolution=auto'
# parse json returned from the API to Pandas DF
openUrl = urllib.request.urlopen(url)
r = openUrl.read()
openUrl.close()
d = json.loads(r.decode())
df = pd.DataFrame(d)
original_columns=[u'date', u'close', u'high', u'low', u'open', u'volume']
new_columns = ['Timestamp','Close','High','Low','Open','Volume']
df = df.loc[:,original_columns]
df.columns = new_columns
df.to_csv('bitcoin201901to201905.csv',index=None)
# In[2]:
df = df.set_index('Timestamp')
df.head()
# In[3]:
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import seaborn as sns
import numpy as np
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# In[4]:
pyplot.plot(df['Close'].values, label='price')
pyplot.legend()
pyplot.show()
# In[5]:
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', linewidths=0.1, vmin=0)
# In[6]:
# load dataset
#dataset = read_csv('update_20190301_bitbank_f.csv', header=0, index_col=0)
#values = dataset.values
#dataset.head()
values = df['Close'].values
values = values.reshape(-1, 1)
print(values)
# In[7]:
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
#test = series_to_supervised(values, 1, 1)
#print(test.head())
#print(test.shape)
# In[8]:
print(values.shape)
print(reframed.shape)
print('---------')
#print(reframed.columes)
# split into train and test sets
values = reframed.values
print(values.shape)
n_train_rate = 0.7
n_train = values.shape[0] * n_train_rate
n_train = math.floor(n_train)
print(n_train)
train = values[:n_train, :]
test = values[n_train:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# In[9]:
import math
# drop columns we don't want to predict
# ๅช็ไธ close ๅ
#reframed.drop(reframed.columns[[6, 7, 8, 10, 11]], axis=1, inplace=True)
#print(reframed.head())
# split into train and test sets
values = reframed.values
print(values.shape)
n_train_rate = 0.7
n_train = values.shape[0] * n_train_rate
n_train = math.floor(n_train)
print(n_train)
train = values[:n_train, :]
test = values[n_train:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# In[10]:
#!pip install tqdm --upgrade
#!pip install hyperopt --upgrade
#!pip install hyperas --upgrade
type(train_X)
# In[16]:
def data():
global train_X, test_X, train_y, test_y
return train_X, test_X, train_y, test_y
# design network
def model(train_X, train_Y, test_X, test_Y):
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X, train_y, epochs={{choice([10, 25, 50])}}, batch_size={{choice([8, 16, 32,50])}}, validation_data=(test_X, test_y), verbose=2, shuffle=False)
score, acc = model.evaluate(test_X, test_y, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=Trials(),
notebook_name='LSTMsinKeras-VirtualCurrency-Simple')
print("Evalutation of best performing model:")
print(best_model.evaluate(test_X, test_y))
# In[ ]:
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# In[ ]:
# make a prediction
yhat = model.predict(test_X)
print('yhat.shape', yhat.shape, yhat[0:5, :])
test_X_reshape = test_X.reshape((test_X.shape[0], test_X.shape[2]))
print(test_X_reshape.shape, test_X_reshape[0:5, -7:])
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X_reshape[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
print('inv_yhat.shape', inv_yhat.shape, inv_yhat[0:5, :])
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X_reshape[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# calculate RMSE
# ๅ ไธบinv_y ้ขๆตๆฏไธไธๆถๅป็ๅผ๏ผๆไปฅ้่ฆๆ inv_yhat ๅพๅ shift ไธไธชๆถๅป
rmse = sqrt(mean_squared_error(inv_y[:-1], inv_yhat[1:]))
print('Test RMSE: %.3f' % rmse)
# In[ ]:
print(test_X.shape)
#print(range(test_X.shape))
#pyplot.plot( inv_y[-100:-1], label='predict')
#pyplot.plot( inv_yhat[-99:], label='actual')
pyplot.plot( inv_y, label='predict')
pyplot.plot( inv_yhat, label='actual')
pyplot.legend()
pyplot.show()
#ๆถจ่ท็ๅคๅ็
#่ทๅ้ขๆต่ทๅฎ้
ๅฏนๅบๅ
็ด ๅผ๏ผๆฏๅฆๅคงไบ0
a = np.diff(inv_y) > 0
b = np.diff(inv_yhat) > 0
#ๆฏ่พ็ธๅๅผ็ไธชๆฐ
print(sum(a ==b)/a.shape[0])
# In[14]:
x = 6
def func():
global x
print(x)
return x
func()
# In[ ]:
| dxcv/TradingAlgo | Multi-LSTM/LSTMsinKeras-VirtualCurrency-Simple.py | LSTMsinKeras-VirtualCurrency-Simple.py | py | 7,561 | python | en | code | 0 | github-code | 36 |
41895099219 | class Item:
def __init__(self, name, price,
quantity=0): # this method is like constructor in java.this method is executed automatically when an instance is created
# by making quantity = 0 that means giving a default value when u don't the value currently
# so if quantity is not passed then it will assume qunatity as 0 by default and show no error
print(f"Instance is created : {name}")
self.name = name # here an attribute is created by passing argument value while creating an instance by using init() method
self.price = price # dynamicly allocating the attributes
self.quantity = quantity
# instead of passing separate variables to this function we can use self.attributes as it is already initialize
# since we are passing self so it will also pass attributes connected to it
def calculate_total_price(self):
return self.price * self.quantity
'''instead of hardcoding attributes to avoid it
__init__(self) function is used'''
item1 = Item("Phone", 55900, 5)
print(item1.name, item1.price, item1.quantity)
print("Total price:", item1.calculate_total_price())
item2 = Item("Laptop", 129900, 3)
print(item2.name, item2.price, item2.quantity)
print("Total price:", item2.calculate_total_price())
# this is an attribute for this instance only item2 and other instances will not have this attribute
item2.has_numpad = False
# what if it would have been easier instead of declaring instances like above
# an instance can be created only by passing those attributes values
| Harjith001/python_files | OOPs/p2.py | p2.py | py | 1,576 | python | en | code | 0 | github-code | 36 |
40497969641 | from PRISMRenderingShaders.CustomShader import CustomShader
"""PlaneIntersectingShader Class containing the code for the Plane intersecting shader.
:param CustomShader: Parent class containing the function to access the parameters of the shader.
:type CustomShader: class.
"""
class PlaneIntersectingShader(CustomShader):
shaderfParams = { 'relativePosition' : { 'displayName' : 'Relative Position', 'min' : 0.0, 'max' : 1.0, 'defaultValue' : 1.0 }}
shader4fParams = {'entry': {'displayName': 'Entry', 'defaultValue': {'x': 0.0, 'y': 0.0, 'z': 0.0, 'w': 0.0}}, \
'target': {'displayName': 'Target', 'defaultValue': {'x': 0.0, 'y': 0.0, 'z': 0.0, 'w': 0.0}}}
shaderbParams = { 'plane' : { 'displayName' : 'Third Plane', 'defaultValue' : 0, 'optionalWidgets' : []}}
def __init__(self, shaderPropertyNode, volumeNode = None):
CustomShader.__init__(self,shaderPropertyNode)
@classmethod
def GetBasicDescription(cls):
"""Function to get a basic description of the current shader.
:return: Description of the current shader.
:rtype: str
"""
return 'Allows to visualize the anatomy along the approach plane for surgery'
@classmethod
def GetDisplayName(cls):
return 'Plane intersecting'
def setupShader(self):
super(PlaneIntersectingShader,self).setupShader()
replacement = """
vec4 texCoordRAS = in_volumeMatrix[0] * in_textureDatasetMatrix[0] * vec4(g_dataPos, 1.);
vec3 dirVect = normalize(entry.xyz - target.xyz);
bool skipAlongAxis = dot(texCoordRAS.xyz - entry.xyz, dirVect) + length(entry.xyz - target.xyz) * relativePosition > 0;
vec3 vPlaneN = cross(vec3(0.0,0.0,1.0), dirVect);
float dirCoord = dot(texCoordRAS.xyz - entry.xyz, vPlaneN);
float dirCam = dot(in_cameraPos - entry.xyz, vPlaneN);
bool skipVertical = dirCoord * dirCam > 0.0;
vec3 hPlaneN = cross(vPlaneN, dirVect);
dirCoord = dot(texCoordRAS.xyz - entry.xyz, hPlaneN);
dirCam = dot(in_cameraPos - entry.xyz, hPlaneN);
bool skipHorizontal = dirCoord * dirCam > 0.0;
if (plane == 1)
g_skip = skipAlongAxis && skipVertical && skipHorizontal;
else
g_skip = skipAlongAxis && skipVertical;
"""
self.shaderProperty.AddFragmentShaderReplacement("//VTK::Cropping::Impl", True, replacement, False)
#shaderreplacement | andrey-titov/SlicerPRISMRendering | PRISMRendering/PRISMRenderingShaders/PlaneIntersectingShader.py | PlaneIntersectingShader.py | py | 2,384 | python | en | code | null | github-code | 36 |
39974821125 |
# USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# 0.25 is the desired zoom 0.25 is the default
fa = FaceAligner(predictor, desiredLeftEye=(0.25, 0.25),desiredFaceWidth=112)
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
# loop over the face detections
# rect contains the bounding boxes
for rect in rects:
# extract the ROI of the *original* face, then align the face
# using facial landmarks
(x, y, w, h) = rect_to_bb(rect)
faceAligned = fa.align(image, gray, rect)
#faceAligned = cv2.resize(faceAligned, (224, 224))
import uuid
f = str(uuid.uuid4())
# write resulting image
cv2.imwrite("/home/monete/monete@gmail.com/studying/IA/thesis/deeplearning/dataset/fer2013/output/7-surprise/" + f + ".png", faceAligned)
# display the output images
#cv2.imshow("Aligned", faceAligned)
#cv2.waitKey(0)
| juanluisrosaramos/dataset_tuning | align_faces.py | align_faces.py | py | 1,729 | python | en | code | 1 | github-code | 36 |
4778253189 | import os
import tempfile
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
from typing import Optional, Union, Tuple, List
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions import gemm, fp8_gemm, gelu, cast_to_fp8, cast_from_fp8
from transformer_engine.pytorch.module.base import get_workspace
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
from transformer_engine.pytorch.export import is_in_onnx_export_mode
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
# Global test configuration knobs.
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))
if SAVE_TEST_IO:
from polygraphy.json import save_json
from polygraphy.comparator import RunResults
# The directory where generated ONNX test models are stored.
NVTE_TEST_ARTIFACTS_DIR = os.environ.get('NVTE_TEST_ARTIFACTS_DIR')
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(tempfile.gettempdir(), "./gen_onnx_models")
# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
OPSET = 17
assert OPSET >= TRILU_OPSET
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
supported_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]
all_normalizations = ["LayerNorm", "RMSNorm"]
@pytest.fixture()
def seed_default_rng():
"""Reseed the PRNG for test reproducibility"""
torch.random.seed()
@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
"""Set the maximum sequence length that can be used for attention masking"""
os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"
def create_fp8_recipe():
return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3)
def do_export(
model: torch.nn.Module,
inp: torch.Tensor,
fname: str,
use_fp8: bool=True,
opset: int=OPSET,
input_names: List[str]=None,
output_names: List[str]=None,
dynamic_axes: List[str]=None
):
"""Export to ONNX"""
fp8_recipe = create_fp8_recipe()
input_names = input_names or ["input"]
output_names = output_names or ["output"]
with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=torch.jit.TracerWarning,
module=r'.*'
)
model.cuda().eval()
os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
assert len(inps) == len(input_names)
inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]
with te.onnx_export(True):
torch.onnx.export(
model,
inps,
fname,
verbose=True,
dynamic_axes=dynamic_axes,
opset_version=opset,
input_names=input_names,
output_names=output_names,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
def to_numpy(tensor):
if isinstance(tensor, torch.Tensor):
if tensor.dtype == torch.bfloat16:
tensor = tensor.type(torch.float32)
tensor = tensor.detach().cpu().numpy()
return tensor
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
"""Initialize the FP8 quantization scales in module"""
NB_SCALES_PER_GEMM = 3 # One scale per: input, weights, and output GEMM tensors.
nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
module.fp8_init(num_gemms)
module.fp8_meta["scaling_fwd"].scale = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") / scale
module.fp8_meta["scaling_fwd"].scale_inv = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") * scale
def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
"""Transformer Engine forward propagation."""
fp8_recipe = create_fp8_recipe()
with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
if not isinstance(te_outputs, tuple):
te_outputs = (te_outputs,)
return te_outputs
def compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname):
""" Compare ORT and TE outputs."""
assert len(onnx_outputs) == len(te_outputs)
# Compare ORT and PyTorch outputs.
for onnx_output, te_output in zip(onnx_outputs, te_outputs):
# np.isclose: abs(a - b) <= (atol + rtol * abs(b))
te_output = to_numpy(te_output)
onnx_output = to_numpy(onnx_output)
ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
mismatches = ac.nonzero()
mismatched_ids = [loc for loc in zip(*mismatches)]
if mismatched_ids:
# Log some information in case of error.
print("*" * 100)
nb_errors = len(mismatched_ids)
nb_vals = min(nb_errors, max_errors_printed)
print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
print(f"Showing first {nb_vals} errors (ONNX -- TE):")
abs_err = np.abs(onnx_output - te_output)
errors = abs_err[mismatches]
for loc in mismatched_ids[:nb_vals]:
ref = te_output[loc]
print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}")
print(f"Max error: {np.max(errors)}")
if nb_errors > allow_cnt_errors:
raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")
def serialize_inputs_outputs(
fname: str,
inputs: Union[Tuple[torch.Tensor], torch.Tensor],
te_outputs: List[torch.Tensor],
input_names: Optional[List[str]] = None,
output_names: Optional[List[str]] = None,
):
if not SAVE_TEST_IO:
return
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
input_names = input_names or ["input"]
output_names = output_names or ["output"]
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
named_inputs = zip(input_names, inputs)
input_data = [{k: v.cpu() for k, v in named_inputs if v is not None}]
json_fname = fname[:-len(".onnx")] + "_inputs.json"
save_json(input_data, json_fname, description="custom input data")
json_fname = fname[:-len(".onnx")] + "_output.json"
named_outputs = zip(output_names, te_outputs)
output_data = {k: v.detach().cpu() for k, v in named_outputs if v is not None}
custom_outputs = RunResults()
custom_outputs.add([output_data], runner_name="custom_runner")
custom_outputs.save(json_fname)
def validate_result(
fname: str,
inps: Union[Tuple[torch.Tensor], torch.Tensor],
model: torch.nn.Module,
atol: float=1.e-8, # np.isclose default atol
rtol: float=1.e-5, # np.isclose default rtol
max_errors_printed: int=10,
is_fp8: bool=False,
allow_cnt_errors: int=0,
input_names: List[str]=None,
output_names: List[str]=None,
te_outputs: List[torch.Tensor]=None,
):
"""Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
representation using ONNX Runtime (ORT) and ensure they are close.
The purpose of the output comparison is to validate that TE models are converted to
their correct ONNX representation by testing that TE and ORT outputs match within some
small threshold (allowing for finite precision errors).
Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
a very small number (0-3) of outliers. This is fine to do because these outliers are due to
small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
representation (the tests assume both ORT or TE kernels are correct).
Argument `te_outputs` can be used to provide pre-computed TE outputs.
"""
def create_ort_session(fname: str, is_fp8: bool):
def load_custom_ops(session_opts: ort.SessionOptions):
"""For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
if not os.path.exists(ORT_CUSTOM_OPS_LIB):
raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
print("registered custom FP8 Q/DQ ops!")
"""Create an ONNX Runtime session for validation."""
kwargs = {"providers": ['CUDAExecutionProvider', 'CPUExecutionProvider']}
if is_fp8:
sess_options = ort.SessionOptions()
load_custom_ops(sess_options)
kwargs["sess_options"] = sess_options
s = ort.InferenceSession(fname, **kwargs)
return s
def create_ort_input_dict(session, inputs):
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
input_names = [x.name for x in session.get_inputs()]
inps = [to_numpy(x) for x in inputs if x is not None]
inp_dict = dict(zip(input_names, inps))
return inp_dict
input_names = input_names or ["input"]
output_names = output_names or ["output"]
# Run ORT session and TE model.
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
if not te_outputs:
te_outputs = te_infer(model, inps, is_fp8)
ort_s = create_ort_session(fname, is_fp8)
input_feed = create_ort_input_dict(ort_s, inps)
onnx_outputs = ort_s.run(None, input_feed=input_feed)
compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname)
def create_meta(scale_factor: float, size: int=1):
meta = tex.FP8TensorMeta()
meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
return meta
def dtype2str(dtype: torch.dtype, fake_bf16_io=False):
if fake_bf16_io:
assert dtype == torch.bfloat16
return "_fake_bf16"
return {
torch.float32: "_fp32",
torch.float16: "_fp16",
torch.bfloat16: "_bf16",
}[dtype]
def as_te_type(dtype: torch.dtype):
return {
torch.float32: tex.DType.kFloat32,
torch.float16: tex.DType.kFloat16,
torch.bfloat16: tex.DType.kBFloat16,
}[dtype]
def get_attn_mask_str(use_mask, attn_mask_type):
# See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
if attn_mask_type is None:
return "_mask" if use_mask else "_no-mask"
attn_mask_str = "_arbitrary-no-mask"
attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
attn_mask_str = "_arbitrary-mask" if use_mask and attn_mask_type == "arbitrary" else attn_mask_str
return attn_mask_str
"""
Tests cases begin here.
"""
@skip_FP8
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-7],
[torch.float16, 1e-7],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3],
])
def test_export_cast_ops(seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_QDQ(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = cast_to_fp8(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_QDQ(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, atol=atol, is_fp8=True, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-5],
[torch.float16, 1e-5],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3]
])
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_Gelu(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = gelu(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_Gelu(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, rtol=0, atol=atol, is_fp8=True, allow_cnt_errors=2, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factors",
[(224, 224,),
])
@pytest.mark.parametrize(
"precision, use_fp8, use_bias, use_gelu", [
(torch.float32, False, False, False),
(torch.float16, False, False, False),
(torch.bfloat16, False, False, False),
(torch.float32, False, True, False),
(torch.float16, False, True, False),
(torch.bfloat16, False, True, False),
(torch.float32, False, True, True),
(torch.float16, False, True, True),
(torch.bfloat16, False, True, True),
# For FP8 GEMM GeLU is not used.
(torch.float32, True, False, False),
(torch.float16, True, False, False),
(torch.bfloat16, True, False, False),
# When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
(torch.float16, True, True, False),
(torch.bfloat16, True, True, False),
])
def test_export_gemm(
seed_default_rng,
precision, # Precision of inputs, weights, output and bias
use_fp8,
use_bias,
use_gelu,
scale_factors
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret, _ = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
class Test_GEMM(nn.Module):
def __init__(self, precision, use_bias=False, gelu=False):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
bias_size = out_features
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
def forward(self, inp, weight):
outp_type = self.precision
# note: due to logic in lines 104:116 and L129 in cpp_extensions.py
# it appears either bias OR gelu can be activated, not both
ret, _, _ = gemm(
weight,
inp,
outp_type,
get_workspace(),
# test bias
bias=self.bias,
use_bias=self.use_bias,
# test gelu
gelu=self.gelu,
gelu_input=self.gelu_input,
grad=False, # only True for backward pass
accumulate=False,
)
return ret
# If gelu is applied then bias must be added, as defined by TE kernel.
if use_gelu: assert use_bias
# Set dimensions (these are arbitrary).
out_features = 128
hidden_size = 256
in_features = 64
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
weight = torch.randn(out_features, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
gelu_str = "_gelu" if use_gelu else ""
high_prec_str = dtype2str(precision)
fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
if use_fp8:
model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
is_fp8=True, input_names=input_names, te_outputs=te_outputs)
else:
model = Test_GEMM(precision, use_bias, use_gelu)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_layernorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
zero_centered_gamma: bool,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.LayerNorm(inp_shape[1], eps, params_dtype=dtype,
zero_centered_gamma=False).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.bias = torch.zeros(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.layernorm_fwd_fp8_inf(
inp,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_rmsnorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.RMSNorm(inp_shape[1], eps, params_dtype=dtype).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.rmsnorm_fwd_fp8_inf(
inp,
self.weight,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
False)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_RMSnorm() if use_fp8 else Test_RMSnorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, False),
(torch.float16, True),
# Todo: cannot configure BF16 when bias is disabled (ORT issue?)
(torch.bfloat16, False),
# Todo: cannot configure BF16 when bias is enabled (ORT issue?)
(torch.bfloat16, True),
])
def test_export_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
precision: torch.dtype
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
class Test_Linear(nn.Module):
def __init__(self,
in_features,
out_features,
use_bias,
return_bias,
precision
):
super().__init__()
self.linear = te.Linear(
in_features,
out_features,
bias=use_bias,
return_bias=return_bias,
params_dtype=precision
)
def forward(self, inp):
ret = self.linear(inp)
return ret
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = Test_Linear(
in_features,
out_features,
use_bias,
return_bias,
precision
).to(device='cuda')
if use_fp8:
set_layer_scale(model.linear, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
else:
validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormLinear(
hidden_size,
3 * hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
elif precision != torch.bfloat16:
validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_mlp(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
activation: str,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
ffn_hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}_{activation}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormMLP(
hidden_size,
ffn_hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
activation=activation,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=2)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
atol = 1e-6 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize(
"precision, use_mask, attn_mask_type", [
(torch.float32, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.float32, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.float16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.float16, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.float16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.bfloat16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.bfloat16, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.bfloat16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
])
def test_export_core_attention(
seed_default_rng,
set_max_seq_len,
precision: torch.dtype,
use_mask: bool,
attn_mask_type: str,
):
# Set dimensions (these are arbitrary).
seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
qkv_format = "sbhd"
query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
input_names = ["query", "key", "value", "attention_mask"]
attention_mask = None
if use_mask:
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, 1, seq_len, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (query_layer, key_layer, value_layer, attention_mask)
mask_str = get_attn_mask_str(use_mask, attn_mask_type)
high_prec_str = dtype2str(precision)
fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
model = te.attention.DotProductAttention(
num_attention_heads=num_attention_heads,
kv_channels=kv_channels,
attention_dropout=0.5,
qkv_format=qkv_format,
attn_mask_type=attn_mask_type,
).to(device='cuda')
do_export(model,
inp,
fname,
input_names=input_names,
use_fp8=True)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
validate_result(fname, inp, model, is_fp8=True, atol=1e-2, input_names=input_names, te_outputs=te_outputs)
test_configs_multihead_attention = [
#"use_mask, attn_mask_type"
(False, "no_mask"), # calls ScaledSoftmax
(True, "arbitrary"), # calls ScaledMaskedSoftmax
]
test_configs_attention_type = [
#"input_layernorm, attention_type, fuse_qkv_params"
(True, "self", True),
(False, "self", True),
(True, "self", False),
(False, "self", False),
(True, "cross", True),
(False, "cross", True),
(True, "cross", False),
(False, "cross", False),
]
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type)
def test_export_multihead_attention(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
precision: torch.dtype,
return_layernorm_output: bool,
input_layernorm: bool,
attention_type: str,
fuse_qkv_params: bool
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
hidden_size = 256
sequence_length = 128
batch_size = 4
num_attention_heads = 32
kv_channels = 8
attention_dropout = 0.1
layernorm_epsilon = 1e-5
init_method = output_layer_init_method = get_default_init_method()
attention_args = (
hidden_size,
num_attention_heads,
kv_channels,
attention_dropout,
layernorm_epsilon,
init_method,
output_layer_init_method,
)
hidden_states_context = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
encoder_output = None
if attention_type == "cross":
encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
fp8_str = "_fp8" if use_fp8 else ""
dtype_str = dtype2str(precision)
attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
input_ln_str = "_input-ln" if input_layernorm else ""
fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"
model = te.MultiheadAttention(
*attention_args,
attn_mask_type=attn_mask_type,
params_dtype=precision,
return_layernorm_output=return_layernorm_output,
input_layernorm=input_layernorm,
attention_type=attention_type,
fuse_qkv_params=fuse_qkv_params,
return_bias=True,
).to(device='cuda')
inp_context = (hidden_states_context, attention_mask, encoder_output)
input_names = ["hidden_states", "attention_mask", "encoder_output"]
output_names=["attention_output", "attention_bias"]
do_export(model, inp_context, fname, use_fp8, input_names=input_names, output_names=output_names,
dynamic_axes={"hidden_states": {0: "seq", 1:"bs"},
"attention_output": {0: "seq", 1:"bs"}})
te_outputs = te_infer(model, inp_context, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp_context, te_outputs, input_names=input_names, output_names=output_names)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp_context, model, atol=1e-3, input_names=input_names,
output_names=output_names, te_outputs=te_outputs)
else:
validate_result(fname, inp_context, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3,
te_outputs=te_outputs)
# In GPT generative phase (inference) the input sequence is smaller than the maximum
# allowed sequence length and we want to test this condition.
# Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
is_generative_phase = (attn_mask_type == "causal" and attention_type == "self")
if is_generative_phase:
seq_len_offset = 8
hidden_states_generative = torch.randn(sequence_length-seq_len_offset, batch_size, hidden_size, dtype=precision, device="cuda")
inp_generative = (hidden_states_generative, attention_mask, encoder_output)
if not use_fp8:
validate_result(fname, inp_generative, model, atol=1e-3, input_names=input_names, output_names=output_names)
else:
validate_result(fname, inp_generative, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3)
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("output_layernorm", [
#True, # TO DO: handle this
False
])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
def test_export_transformer_layer(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
output_layernorm: bool,
precision: torch.dtype,
fuse_qkv_params: bool,
zero_centered_gamma: bool,
activation: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
input_names = ["input", "attention_mask"]
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (input_tensor, attention_mask)
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}_{activation}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
self_attn_mask_type=attn_mask_type,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma,
activation=activation).to(device='cuda')
do_export(model, inp, fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
atol = 5e-1 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("use_fp8", [True])
@pytest.mark.parametrize("ln_scale_factor", [448*2])
@pytest.mark.parametrize("gemm_scale_factors", [(224, 224,),])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
seed_default_rng,
use_fp8: bool,
ln_scale_factor: float,
gemm_scale_factors: Tuple[float, float],
precision: torch.dtype,
zero_centered_gamma: bool
):
"""This is a regression test for testing that all LN inputs have the same type.
The test sets up GEMM with FP32 output which feeds into an LN that is configured
with FP16 or BF16 weights and bias.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GemmLayernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(ln_scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
self.gemm = TestFP8_GEMM(
precision, use_bias=False, gelu=False, scale_factors=gemm_scale_factors)
def forward(self, inp, weight):
x = self.gemm(inp, weight)
x = texcpp.layernorm_fwd_fp8_inf(
x,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
x = cast_from_fp8(
x,
self.meta,
self.fp8_tensor,
self.fp8_type,
tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
return x
out_features = 128
hidden_size = 128
in_features = 128
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret, _ = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
model = TestFP8_GemmLayernorm()
high_prec_str = dtype2str(precision)
fp8_str = f"_fp8" if use_fp8 else ""
fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(
fname, (inp, weight), model, atol=5e-2, is_fp8=use_fp8, allow_cnt_errors=2,
input_names=input_names, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
):
"""Test that the ONNX model can correctly handle inputs with different shapes and that
the attention mask it adjusted on-the-fly to different sequence lengths.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
attention_mask = None
use_mask = True
attn_mask_type = "causal"
fuse_qkv_params = True
output_layernorm = False
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
self_attn_mask_type=attn_mask_type,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma).to(device='cuda')
# "Context phase": use full input sequence length
input_names = ["input"]
output_names = ["output"]
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor,)
do_export(model, inp, fname, use_fp8,
input_names=input_names, output_names=output_names,
dynamic_axes={"input": {0: "seq", 1:"bs"},
"output": {0: "seq", 1:"bs"}, })
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names, output_names=output_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
# "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
sequence_length = 1 if not use_fp8 else 8
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor, attention_mask)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
assert is_in_onnx_export_mode() == False
with te.onnx_export(enabled):
assert is_in_onnx_export_mode() == enabled
assert is_in_onnx_export_mode() == False
| NVIDIA/TransformerEngine | tests/pytorch/test_onnx_export.py | test_onnx_export.py | py | 55,538 | python | en | code | 1,056 | github-code | 36 |
21756414147 | while 1:
try:
numbers = input()
data = [int(i) for i in input().split()]
#create variable
max_by_far = data[0]
min_by_far = data[0]
max_location = 0
min_location = 0
current_index = 1
# now data is a map object , but also iterable
for i in data[1:]:
#check max
if i == max_by_far:
#if equal , don't need to change max_location
pass
elif i > max_by_far:
max_by_far = i
max_location = current_index
#check min
if i == min_by_far:
# if equal , change the min_location since it's near the end
min_location = current_index
elif i < min_by_far:
min_by_far = i
min_location = current_index
current_index += 1
max_move = max_location
#min location is index , so it's actual place is index +1
min_move = len(data) - (min_location+1)
if max_location > min_location:
# because they cross each other
answer = (max_move + min_move) - 1
print(answer)
# print('type 1')
# print("max_info :",max_by_far,max_location,max_move)
# print('min_info :',min_by_far,min_location,min_move)
else:
answer = max_move + min_move
print(answer)
# print('type 2')
# print("max_info :",max_by_far,max_location,max_move)
# print('min_info :',min_by_far,min_location,min_move)
except:
break | nikita-sunyata/codeforces | 144A/144A.py | 144A.py | py | 1,655 | python | en | code | 0 | github-code | 36 |
42854245545 | #!/usr/bin/env python3
import tkinter as tk
root = tk.Tk()
root.geometry("400x480")
root.resizable(width=False, height=False)
root.title("Calculator")
def btn1():
val1 = valVar.get()+'1'
notOk = True
while notOk:
if val1[0] == '0' and val1[1] == '.':
notOk = False
elif val1[0] == '0' and val1[1] != '.':
val1 = val1[1:]
elif val1[0] != '0':
notOk = False
valVar.set(val1)
def btn2():
val2 = valVar.get()+'2'
notOk = True
while notOk:
if val2[0] == '0' and val2[1] == '.':
notOk = False
elif val2[0] == '0' and val2[1] != '.':
val2 = val2[1:]
elif val2[0] != '0':
notOk = False
valVar.set(val2)
def btn3():
val3 = valVar.get()+'3'
notOk = True
while notOk:
if val3[0] == '0' and val3[1] == '.':
notOk = False
elif val3[0] == '0' and val3[1] != '.':
val3 = val3[1:]
elif val3[0] != '0':
notOk = False
valVar.set(val3)
def btn4():
val4 = valVar.get()+'4'
notOk = True
while notOk:
if val4[0] == '0' and val4[1] == '.':
notOk = False
elif val4[0] == '0' and val4[1] != '.':
val4 = val4[1:]
elif val4[0] != '0':
notOk = False
valVar.set(val4)
def btn5():
val5 = valVar.get()+'5'
notOk = True
while notOk:
if val5[0] == '0' and val5[1] == '.':
notOk = False
elif val5[0] == '0' and val5[1] != '.':
val5 = val5[1:]
elif val5[0] != '0':
notOk = False
valVar.set(val5)
def btn6():
val6 = valVar.get()+'6'
notOk = True
while notOk:
if val6[0] == '0' and val6[1] == '.':
notOk = False
elif val6[0] == '0' and val6[1] != '.':
val6 = val6[1:]
elif val6[0] != '0':
notOk = False
valVar.set(val6)
def btn7():
val7 = valVar.get()+'7'
notOk = True
while notOk:
if val7[0] == '0' and val7[1] == '.':
notOk = False
elif val7[0] == '0' and val7[1] != '.':
val7 = val7[1:]
elif val7[0] != '0':
notOk = False
valVar.set(val7)
def btn8():
val8 = valVar.get()+'8'
notOk = True
while notOk:
if val8[0] == '0' and val8[1] == '.':
notOk = False
elif val8[0] == '0' and val8[1] != '.':
val8 = val8[1:]
elif val8[0] != '0':
notOk = False
valVar.set(val8)
def btn9():
val9 = valVar.get()+'9'
notOk = True
while notOk:
if val9[0] == '0' and val9[1] == '.':
notOk = False
elif val9[0] == '0' and val9[1] != '.':
val9 = val9[1:]
elif val9[0] != '0':
notOk = False
valVar.set(val9)
def btn0():
val0 = valVar.get()+'0'
if val0[0] == '0' and val0[1] == '0':
val0 = val0[1:]
valVar.set(val0)
def reset():
valVar.set("0")
valVar = tk.StringVar(root)
valVar.set("0")
# Results display Frame
display_lbl_frame = tk.LabelFrame(root)
display_lbl_frame.grid(row=0,column=0,columnspan=2,padx=10, pady=5)
display_label = tk.Entry(display_lbl_frame,font=('10'), textvariable=valVar,highlightthickness=5,bd=5,width=35, justify="right")
display_label.pack()
# Numbers Frame
nums_frame = tk.LabelFrame(root)
nums_frame.grid(row=1,column=0,sticky='N',padx=5,pady=5)
# Math Symbols Frame
math_sym_frame = tk.LabelFrame(root,pady=3)
math_sym_frame.grid(row=1,column=1,columnspan=1,sticky='N',padx=5,pady=5)
# 1,2,3
b1 = tk.Button(nums_frame, text='1',font=(12),padx=25,pady=25, command=btn1)
b1.grid(row=0,column=0,pady=2,padx=2)
b2 = tk.Button(nums_frame, text='2',font=(12),padx=25,pady=25, command=btn2)
b2.grid(row=0,column=1,pady=2,padx=2)
b3 = tk.Button(nums_frame, text='3',font=(12),padx=25,pady=25, command=btn3)
b3.grid(row=0,column=2,pady=2,padx=2)
# 4,5,6
b4 = tk.Button(nums_frame, text='4',font=(12),padx=25,pady=25, command=btn4)
b4.grid(row=1,column=0,pady=2,padx=2)
b5 = tk.Button(nums_frame, text='5',font=(12),padx=25,pady=25, command=btn5)
b5.grid(row=1,column=1,pady=2,padx=2)
b6 = tk.Button(nums_frame, text='6',font=(12),padx=25,pady=25, command=btn6)
b6.grid(row=1,column=2,pady=2,padx=2)
# 7,8,9
b7 = tk.Button(nums_frame, text='7',font=(12),padx=25,pady=25, command=btn7)
b7.grid(row=2,column=0,pady=2,padx=2)
b8 = tk.Button(nums_frame, text='8',font=(12),padx=25,pady=25, command=btn8)
b8.grid(row=2,column=1,pady=2,padx=2)
b9 = tk.Button(nums_frame, text='9',font=(12),padx=25,pady=25, command=btn9)
b9.grid(row=2,column=2,pady=2,padx=2)
# 0
b0 = tk.Button(nums_frame, text='0',font=(12),padx=96,pady=23, command=btn0)
b0.grid(row=3,column=0,columnspan=3,pady=2,padx=2)
def addition():
addVar = valVar.get()+'+'
valVar.set(addVar)
def subtraction():
subVar = valVar.get()+'-'
valVar.set(subVar)
def multiplication():
multVar = valVar.get()+'*'
valVar.set(multVar)
def division():
divVar = valVar.get()+'/'
valVar.set(divVar)
def dot():
dotVar = valVar.get()
dotVar += '.'
valVar.set(dotVar)
def open_bracket():
openBracketVar = valVar.get()
operations = ['+','-','*','/']
for operation_char in operations:
if openBracketVar[-1] == operation_char:
openBracketVar += '('
elif openBracketVar[-1] == '(':
openBracketVar += '('
break
elif openBracketVar[-1].isnumeric():
openBracketVar += '*('
break
valVar.set(openBracketVar)
def close_bracket():
closeBracketVar = valVar.get() + ')'
valVar.set(closeBracketVar)
def equals():
eqVar = eval(valVar.get())
valVar.set(eqVar)
def removeChar():
removeVar = valVar.get()
removeVar = removeVar[:-1]
if len(removeVar) == 0:
removeVar = '0'
valVar.set(removeVar)
# Symbols Buttons
additionBtn = tk.Button(math_sym_frame, text = "+",font=(12),padx=25,pady=25, command=addition)
additionBtn.grid(row=0,column=0,padx=2,pady=2)
subtractionBtn = tk.Button(math_sym_frame, text = "-",font=('TkDefaultFont',12,'bold'),padx=25,pady=25, command=subtraction)
subtractionBtn.grid(row=0,column=1,padx=2,pady=2)
multiplicationBtn = tk.Button(math_sym_frame, text = "x",font=(12),padx=25,pady=25, command=multiplication)
multiplicationBtn.grid(row=1,column=0,padx=2,pady=2)
divisionBtn = tk.Button(math_sym_frame, text = ":",font=(12),padx=25,pady=25, command=division)
divisionBtn.grid(row=1,column=1,padx=2,pady=2)
open_bracketBtn = tk.Button(math_sym_frame, text= "(",font=(12), padx=25, pady=25, command=open_bracket)
open_bracketBtn.grid(row=2,column=0)
close_bracketBtn = tk.Button(math_sym_frame, text= ")",font=(12), padx=25, pady=25, command=close_bracket)
close_bracketBtn.grid(row=2,column=1)
dotBtn = tk.Button(math_sym_frame, text= ".",font=('TkDefaultFont',12,'bold'), padx=24, pady=22, command=dot)
dotBtn.grid(row=3,column=0,sticky='N',padx=2,pady=2)
delBtn = tk.Button(math_sym_frame, text= "<-",font=("TkDefaultFont",12,'bold'),padx=17,pady=22,command=removeChar)
delBtn.grid(row=3,column=1,sticky='N',padx=2,pady=2)
# Equal Frame
equalFrame = tk.LabelFrame(root)
equalFrame.grid(row=2,column=1,sticky='N')
equalsBtn = tk.Button(equalFrame, text = "=",font=("TkDefaultFont",12,'bold'), padx=58, pady=27, command=equals)
equalsBtn.grid(row=4,column=0,columnspan=2,padx=2,pady=2)
# Exit & Reset Frame
exit_frame = tk.LabelFrame(root)
exit_frame.grid(row=2)
resetBtn = tk.Button(exit_frame, text="Reset",bd=3,font=(12),padx=20,pady=25,command=reset)
resetBtn.grid(row=2,column=0,sticky='N',padx=8,pady=2)
exitBtn = tk.Button(exit_frame, text="Exit",font=(12),bd=3,padx=20,pady=25,command=root.destroy)
exitBtn.grid(row=2,column=1,sticky='N',padx=8,pady=2)
root.mainloop()
| cezarnegru/Calculator_python | main.py | main.py | py | 7,162 | python | en | code | 0 | github-code | 36 |
11476729859 | """AD&D Second Edition Combat Simulator"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='adnd2e-combat-simulator',
version='1.0.2',
description='A tool to simulate combat in AD&D 2nd Edition',
long_description=long_description,
url='https://github.com/gene1wood/adnd2e-combat-simulator',
author='Gene Wood',
author_email='gene_wood@cementhorizon.com',
license='GPL-3.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Games/Entertainment :: Role-Playing',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='ad&d d&d adnd dnd combat',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['PyYAML', 'dice', 'colorama'],
package_data={
'adnd2e_combat_simulator': ['combatants.example.yaml'],
},
entry_points={
'console_scripts': [
'battle=adnd2e_combat_simulator:main',
],
},
)
| gene1wood/adnd2e-combat-simulator | setup.py | setup.py | py | 1,409 | python | en | code | 2 | github-code | 36 |
14091966619 | quiz = {
"stimulus":"Answer the following algebra questions:",
"stem":"If x = 8, then what is the value of 4(x+3)?",
"choices":["1.35","2.36","3.40","4.44"],
"right choice": 4,
}
while True:
print(quiz["stimulus"])
print(quiz["stem"])
print(*quiz["choices"],sep='\n')
answer = input("Your choice: ")
if answer.isdigit():
answer = int(answer)
if answer == quiz["right choice"]:
print("Bingo")
break
else :
print(":(((")
| VuThiThuyB/vuthithuy-fundamental-c4e22 | session4/hw/serious3.py | serious3.py | py | 516 | python | en | code | 0 | github-code | 36 |
15868980621 | from collections import deque
import sys
dx = [1,-1,0,0]
dy = [0,0,-1,1]
def iswall(x,y):
if x<0 or y<0 :
return False
if x >= n or y >= m :
return False
if matrix[x][y] == 0 : # ๋ฐฉ๋ฌธํ ๊ฒฝ์ฐ
return False
return True # ๊ทธ ์ธ์ ๊ฒฝ์ฐ
def bfs(x,y):
queue = deque()
print(queue)
queue.append((x, y))
print(queue)
# queue = deque((x,y)) # ์์ ์ง์ ์ ๋ฃ๋๋ค.
while queue:
x,y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if iswall(nx,ny) and matrix[nx][ny]==1:
matrix[nx][ny] = matrix[x][y]+1
queue.append((nx,ny))
return matrix[n-1][m-1]
n,m = map(int,input().split())
matrix = [[1, 1, 0], [0, 1, 0], [0, 1, 1]]
print(bfs(0,0))
| HYEONAH-SONG/Algorithms | ํ์ด์ฌ ์๊ณ ๋ฆฌ์ฆ ์ธํฐ๋ทฐ/๋ฏธ๋กํ์ถ.py | ๋ฏธ๋กํ์ถ.py | py | 815 | python | en | code | 0 | github-code | 36 |
25625621383 | class Classy:
def __init__(self):
pass
def minSlidingWindow(self,s,t):
'''
:param s:
:param t:
:return:
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
'''
i = 0
final_count = len(s)
final_string = ""
temp = len(t)
t_ls = list(t)
while i+len(t) < len(s):
temp_string = s[i:i+temp]
temp_ls = list(temp_string)
flag = False
for each in t_ls:
if each not in temp_ls:
temp+=1
flag = True
break
if not flag:
temp_ln = len(temp_string)
if final_count>temp_ln:
final_count = temp_ln
final_string = temp_string
i+=1
return final_count,final_string
S = "ADOBECODEBANC"
T = "ABC"
S1 ="a"
T1 = "b"
obj = Classy()
print (obj.minSlidingWindow (S1, T1)) | Akashdeepsingh1/project | 2020/MinSlidingWindow.py | MinSlidingWindow.py | py | 1,175 | python | en | code | 0 | github-code | 36 |
38449435175 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from move_base_msgs.msg import MoveBaseGoal
from move_base_msgs.msg import MoveBaseAction
import re
from Command import Command
from Queue import Queue
import actionlib
from tf import transformations
from geometry_msgs.msg import Quaternion
from sound_play.libsoundplay import SoundClient
import genpy
class CommandScheduler:
"""
Scheduler class for the multi-step speech processing
"""
def __init__(self):
rospy.init_node('command_scheduler', anonymous=True)
self.rate = rospy.Rate(10) # 10hz
self.command_listener = rospy.Subscriber('/autospeech/run', String, self.received_command)
self.typeSwitch = {
'go': self.navigate,
'turn': self.turn,
'say': self.say
}
self.queue = Queue()
self.sound_client = SoundClient()
while not rospy.is_shutdown():
if self.queue.not_empty:
current = self.queue.get()
if current.get_data_type() == SoundClient:
rospy.loginfo("Saying " + current.get_data())
self.sound_client.say(current.get_data())
rospy.sleep(2)
else:
ac = actionlib.SimpleActionClient(current.get_path(), current.get_data_type())
ac.wait_for_server()
ac.send_goal_and_wait(current.get_data())
rospy.spin()
def received_command(self, data):
split = re.split('///', data.data)
command = self.typeSwitch[split[0]](split[1])
self.queue.put(command)
@staticmethod
def navigate(location):
goal = MoveBaseGoal()
goal.target_pose.header.stamp = genpy.Time()
goal.target_pose.header.frame_id = "/base_link"
dirs = {
'forward': 1.0,
'backward': -1.0
}
goal.target_pose.pose.position.x = dirs[location]
goal.target_pose.pose.orientation.w = 1.0
return Command('/move_base', MoveBaseAction, goal)
@staticmethod
def say(string):
return Command('', SoundClient, string)
@staticmethod
def turn(direction):
goal = MoveBaseGoal()
goal.target_pose.header.stamp = genpy.Time()
goal.target_pose.header.frame_id = "/base_link"
dirs = {
'left': 90,
'right': -90
}
quaternion = transformations.quaternion_from_euler(0, 0, dirs[direction])
goal.target_pose.pose.orientation.x = quaternion[0]
goal.target_pose.pose.orientation.y = quaternion[1]
goal.target_pose.pose.orientation.z = quaternion[2]
goal.target_pose.pose.orientation.w = quaternion[3]
return Command('/move_base', MoveBaseAction, goal)
if __name__ == '__main__':
try:
CommandScheduler()
except rospy.ROSInterruptException:
pass
| elmdecoste/ros_advanced_voice | scripts/speech_queue.py | speech_queue.py | py | 2,950 | python | en | code | 0 | github-code | 36 |
4062917788 | import pickle
import random
def main():
## Analyze a bridge hand.
bridgeHand = getHandOfCards(13)
displayBridgeHand(bridgeHand)
analyzeBridgeHand(bridgeHand)
def getHandOfCards(numberOfCards):
deckOfCards = pickle.load(open("deckOfCardsList.dat", 'rb'))
return random.sample(deckOfCards, numberOfCards)
def displayBridgeHand(bridgeHand):
print(", ".join(bridgeHand))
def analyzeBridgeHand(bridgeHand):
suits = {x[-1] for x in bridgeHand}
d = {suit:0 for suit in suits} # distribution of cards into suits
for card in bridgeHand:
d[card[-1]] += 1
t = tuple(d.items())
tSorted = sorted(t)
tSorted = sorted(t, key=lambda x: x[1], reverse=True)
for k in tSorted:
print("Number of", k[0], "is", k[1])
main()
| guoweifeng216/python | python_design/pythonprogram_design/Ch6/6-PP-3.py | 6-PP-3.py | py | 783 | python | en | code | 0 | github-code | 36 |
39430112426 | #
# @lc app=leetcode.cn id=189 lang=python3
#
# [189] ๆ่ฝฌๆฐ็ป
#
# @lc code=start
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
def swap(l, r):
while l < r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
k %= len(nums)
swap(0, len(nums) - k - 1)
swap(len(nums) - k, len(nums) - 1)
swap(0, len(nums) - 1)
# @lc code=end
| RoseCabbage/Leetcode_Solutions | Solutions/189.ๆ่ฝฌๆฐ็ป.py | 189.ๆ่ฝฌๆฐ็ป.py | py | 540 | python | en | code | 0 | github-code | 36 |
74512068263 | import math
def fun(a, first, last, key):
if first > last:
return -1
else:
mid = math.floor((first+last)/2)
if key == a[mid]:
return mid
elif key < a[mid]:
return fun(a, first, mid-1, key)
else:
return fun(a, mid+1, last, key)
ls = [2, 5, 7, 10, 12, "apple"]
size = 6
print(fun(ls, 0, size-1, 10))
# can't do this kind of type
#print("apple"< 7)
# can't do this kind of type either
#size = "Hello!"
print(size)
print("Python doesn't need any type infront of parameters of the functions")
print("\nso cool")
| heatherThida/Function-and-Compiler-Languages-comparison | mystery.py | mystery.py | py | 633 | python | en | code | 0 | github-code | 36 |
26297680284 | import random
def rand_white():
num = random.randrange(0,3)
if num == 0:
return " "
elif num == 1:
return "\t"
else:
return "\n"
amounts = 5
fil = "duplicate.txt"
dup = True
d_number = 19
if dup:
amounts -= 2
lista = []
for i in range(amounts):
num = random.randrange(1, 10001)
lista.append(num)
inx1 = random.randrange(0, len(lista))
if inx1 != len(lista) - 1:
lista.insert(inx1, d_number)
else:
lista.append(d_number)
inx2 = random.randrange(0, len(lista))
if inx2 != len(lista) - 1:
lista.insert(inx2, d_number)
else:
lista.append(d_number)
lista = map(str, lista)
with open(fil, "w") as f:
for i in lista:
f.write(i)
for k in range(10):
f.write(rand_white())
f.write("\n")
| hadi-ansari/TDP002 | gamla_tentor_tdp002/2018_jan/uppgift5.py | uppgift5.py | py | 825 | python | en | code | 0 | github-code | 36 |
27037909109 | import torch
from torch import nn
from fuxictr.pytorch.models import MultiTaskModel
from fuxictr.pytorch.layers import FeatureEmbedding, MLP_Block
class SharedBottom(MultiTaskModel):
def __init__(self,
feature_map,
model_id="SharedBottom",
gpu=-1,
task=["binary_classification"],
num_tasks=1,
loss_weight='EQ',
learning_rate=1e-3,
embedding_dim=10,
bottom_hidden_units=[64, 64, 64],
tower_hidden_units=[64, ],
hidden_activations="ReLU",
net_dropout=0,
batch_norm=False,
embedding_regularizer=None,
net_regularizer=None,
**kwargs):
super(SharedBottom, self).__init__(feature_map,
task=task,
loss_weight=loss_weight,
num_tasks=num_tasks,
model_id=model_id,
gpu=gpu,
embedding_regularizer=embedding_regularizer,
net_regularizer=net_regularizer,
**kwargs)
self.embedding_layer = FeatureEmbedding(feature_map, embedding_dim)
self.bottom = MLP_Block(input_dim=embedding_dim * feature_map.num_fields,
hidden_units=bottom_hidden_units,
hidden_activations=hidden_activations,
output_activation=None,
dropout_rates=net_dropout,
batch_norm=batch_norm)
self.tower = nn.ModuleList([MLP_Block(input_dim=bottom_hidden_units[-1],
output_dim=1,
hidden_units=tower_hidden_units,
hidden_activations=hidden_activations,
output_activation=None,
dropout_rates=net_dropout,
batch_norm=batch_norm)
for _ in range(num_tasks)])
self.compile(kwargs["optimizer"], kwargs["loss"], learning_rate)
self.reset_parameters()
self.model_to_device()
def forward(self, inputs):
X = self.get_inputs(inputs)
feature_emb = self.embedding_layer(X)
bottom_output = self.bottom(feature_emb.flatten(start_dim=1)) # (?, bottom_hidden_units[-1])
tower_output = [self.tower[i](bottom_output) for i in range(self.num_tasks)]
y_pred = [self.output_activation[i](tower_output[i]) for i in range(self.num_tasks)]
return_dict = {}
labels = self.feature_map.labels
for i in range(self.num_tasks):
return_dict["{}_pred".format(labels[i])] = y_pred[i]
return return_dict
| xue-pai/FuxiCTR | model_zoo/multitask/SharedBottom/src/SharedBottom.py | SharedBottom.py | py | 3,155 | python | en | code | 671 | github-code | 36 |
1925887546 | #!/bin/python
import collections
import os
import re
import subprocess
import time
GHOSTLY_PATH = '/usr/bin/ghostly'
ALLIE_DBG = '../target/debug/allie'
# Old versions
ALLIE_1_1 = './bin/allie_v1.1'
ALLIE_1_0 = './bin/allie_v1.0'
ALLIE_0_9 = './bin/allie_v0.9'
ALLIE_0_8 = './bin/allie_v0.8'
ALLIE_0_7 = './bin/allie_v0.7'
ALLIE_0_6 = './bin/allie_v0.6'
ALLIE_0_5 = './bin/allie_v0.5'
ALLIE_0_4 = './bin/allie_v0.4'
ALLIE_0_3 = './bin/allie_v0.3'
ALLIE_0_2 = './bin/allie_v0.2'
ALLIE_0_1 = './bin/allie_v0.1'
RESULT_RE = re.compile(r'^name:(?P<name>[^;]+);wins:(?P<wins>\d+);score:(?P<score>\d+)$')
ROUNDS = 25
Score = collections.namedtuple('Score', ['wins', 'score'])
def parse_result(server_output):
ret = {}
for result in server_output.decode("utf-8").split('\n'):
match = RESULT_RE.match(result)
if match is not None:
ret[match.group('name')] = Score(int(match.group('wins')), int(match.group('score')))
return ret
def benchmark():
# Start the server
server = subprocess.Popen([GHOSTLY_PATH
# , '--headless'
, '--start-at', '2'
, '--tickless'
, '--rounds', str(ROUNDS)]
, stdout=subprocess.PIPE
, stderr=subprocess.PIPE)
time.sleep(1)
# Start the bots, ignoring any output
devnull = open(os.devnull, 'w')
subprocess.Popen([ALLIE_1_0], stdout=devnull, stderr=devnull)
subprocess.Popen([ALLIE_1_1])
# Wait here until the match is finished
out, _ = server.communicate()
# Parse the result
results = parse_result(out)
total_wins = sum(t.wins for t in results.values())
total_score = sum(t.score for t in results.values())
# Print the result
for name, result in results.items():
print(name + ":")
print('\tWins: {}/{} {:.2f}%'
.format(result.wins
, total_wins
, result.wins / total_wins * 100 if total_wins > 0 else 0))
print('\tScore: {}/{} {:.2f}%'
.format(result.score
, total_score
, result.score / total_score * 100 if total_score > 0 else 0))
if __name__ == '__main__':
benchmark()
| Kwarf/Allie-2017 | benchmarker/bench.py | bench.py | py | 2,332 | python | en | code | 0 | github-code | 36 |
25209486610 | #!/usr/local/bin/python3
import socket
import struct
import crcmod
#from dataservice.datawave_produce.waveproduce import sin_wave,triangle_wave
import random
def crccreate(b,length):
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return crc16_func(b[0:length])
def crccheckhole(b,length):
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return hex(crc16_func(b[0:length]))==bytesToHex(b[length],b[length+1])
def crccheck(b,length):
print('ไผ ่ฟๆฅ็b๏ผๅlenght',b,' ',length)
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return crc16_func(b[0:length]) == bytesToInt(b[length], b[length + 1])
def get_send_msgflowbytes(slave,func,register,length,data):
if length!=4:
pass
else:
# print('data',data)
a = struct.pack('!bbbbf', slave, func, register, length, data)
# print(len(a))
b=struct.pack('H',crccreate(a[0:8], length=8))
a=a + b
# print(a)
return a
if __name__=='__main__':
tcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)#ๅๅปบๅฅๆฅๅญ
tcp_server_socket.bind(('127.0.0.1',5000))#็ปๅฎๆฌๆบๅฐๅๅๆฅๆถ็ซฏๅฃ
tcp_server_socket.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY,True)
print('Waiting connecting')
# tcp_server_socket.listen(1)#็ๅฌ๏ผ๏ผๅ
ไธบๆๅคง็ๅฌๅผ
# client_socket,client_addr= tcp_server_socket.accept()#ๅปบ็ซ่ฟๆฅ๏ผaccept๏ผๆ ๅๆฐ๏ผ
# print('Someone has connected to this sever')
#xsin,ysin=sin_wave(0,100,1,2,2)
#xtri,ytri=triangle_wave(0,100,1,2,2)
#ysin=ysin-0.5
#ytri=10*ytri
data=0.0
#sinindex=0;
#triindex=0;
while True:
tim
# b =client_socket.recv(10)
# print('receiving msg:',b)
# if b[1]==0x03:
# print('we are receiving setting command',b)
# # client_socket.send(b)
# elif b[2]==0x01: #ๆญฃๅผฆๆณขไบง็ๅฝๆฐ
# slave,func,register,length=struct.unpack('!bbbb',b[0:4]) #่งฃๆไผ ่ฟๆฅ็ไบ่ฟๅถๅญ่ๆต
#sinindex +=1
data=random.uniform(10,11)
print(data)
# #ๆญคๅค็ๆฐๆฎๅ
ๆ ผๅผ็ฑepics ็protocolๆไปถๆ็กฎๅฎ
# msg = get_send_msgflowbytes(slave, func, register, length, data) #ๆๅปบ็ฌฆๅ่ฆๆฑ็ๆฐๆฎๅ
ๆ ผๅผ
# print('sending msg:',msg)
# print(b)
# client_socket.send(msg)
#if sinindex==99:
# sinindex=0
| Scottars/nis_website | dataservice/epicsrelated/simulate2.py | simulate2.py | py | 2,560 | python | en | code | 0 | github-code | 36 |
2111005207 | import machine
# Sensor is completly unreliable for me and showing extremely different values in same condition when trying to get the max and min values
class MoistureSensor:
"""A class that can read set pins for a moisture sensor installed on Lopy4"""
max_moisture_sensor_value = 1000 # From multiple manual callibration test in glas of water, the average value
min_moisture_sensor_value = 4095 # From manual calibration in complety dry in air, average value
range_sensor_value = min_moisture_sensor_value - max_moisture_sensor_value
def __init__(self):
"""Default constructor"""
adc = machine.ADC()
adc.vref(1100)
self.pin16 = adc.channel(pin='P16',attn=machine.ADC.ATTN_11DB)
def get_value_in_procent(self):
"""Get the value in procent depending of the fixed max and min moisture values"""
value = self.pin16.value()
if value < self.max_moisture_sensor_value: return 1
if value > self.min_moisture_sensor_value: return 0
valueOverMax = value - self.max_moisture_sensor_value
return round(1 - (valueOverMax / self.range_sensor_value), 2)
| christoffergranstedt/lnu-iot-moisture-thing | lib/sensors/MoistureSensor.py | MoistureSensor.py | py | 1,152 | python | en | code | 1 | github-code | 36 |
15469459200 | pixel_data = open("D8-input.txt").read().strip()
width = 25
height = 6
row_length = width * height
layers = [[] for i in range(len(pixel_data) // row_length)]
print("Number of layers: ", len(layers))
for l in range(len(layers) ):
for pos in range(row_length):
layers[l].append(pixel_data[l * row_length + pos])
counts = list()
for list in layers:
ones, twos, zeros = [0,0,0]
for raw in list:
val = int(raw)
if val == 0:
zeros += 1
elif val == 1:
ones += 1
elif val == 2:
twos += 1
counts.append((zeros, ones, twos))
least_zeros = None
zeros_count = None
for layer in counts:
if (layer[0] + layer[1] + layer[2]) != 150:
print(layer, "is not 150 in size")
if zeros_count == None or layer[0] < zeros_count:
least_zeros = layer
zeros_count = layer[0]
print('The digit count for layer with least amount of zeros (0, 1, 2): ', least_zeros)
print('Count of ones multiplied with count of twos: ', least_zeros[1] * least_zeros[2])
# Task 2
print('Rendering image:\n')
image = [[2] * width for i in range(height)]
for h in range(height):
for w in range(width):
if image[h][w] == 2:
layer_pos = h * width + w
for layer in layers:
char = int(layer[layer_pos])
if char != 2:
image[h][w] = char
break
for row in image:
for pos in row:
char = ' '
if pos == 1:
char = '#'
print(char, end='')
print()
| micheltosu/AdventOfPythonCode | 2019/D8.py | D8.py | py | 1,585 | python | en | code | 0 | github-code | 36 |
29772321096 | import unittest
import HtmlTestRunner
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class LoginTest(unittest.TestCase):
baseURL = "https://test-bitdef.web.app"
driver = webdriver.Chrome(executable_path = "..\drivers\chromedriver.exe")
@classmethod
def setUpClass(cls):
cls.driver.get(cls.baseURL)
cls.driver.maximize_window()
def test_createReport(self):
wait = WebDriverWait(self.driver, 15)
#Assert Title Page
assert self.driver.title == "TestFrontend"
#Create Report
wait.until(EC.presence_of_element_located((By.XPATH,"//span[text()=' CREATE REPORT ']"))).click()
#Details
detailsType = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select type']")))
detailsType.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
detailsCompany = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select Company']")))
detailsCompany.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Enter name']"))).send_keys("Bogdan Eugen")
#Settings
wait.until(EC.presence_of_element_located((By.XPATH,"//label[@for = 'mat-radio-2-input']//span[@class='mat-radio-container']"))).click()
settingsReccurance = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select reccurence']")))
settingsReccurance.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
settingsOn = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select day']")))
settingsOn.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ARROW_DOWN,Keys.ENTER)
settingInterval = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select interval']")))
settingInterval.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
wait.until(EC.presence_of_element_located((By.XPATH,"//label[@for = 'mat-checkbox-1-input']"))).click()
wait.until(EC.presence_of_element_located((By.XPATH,"//span[text()=' SAVE ']"))).click()
#Assert Raport SAVE
time.sleep(1)
successSave = self.driver.find_element_by_xpath("//div[text()=' Successfully saved the report ']").text
self.assertEqual("Successfully saved the report", successSave)
#Sleep to see ending
time.sleep(3)
@classmethod
def tearDownClass(cls):
cls.driver.close()
if __name__== "__main__":
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='..\\reports'))
| degea78/Bitdefender | test-bitdef/testCases/testBitdef.py | testBitdef.py | py | 2,917 | python | en | code | 0 | github-code | 36 |
73881080745 | from flask import Flask, request
import json
from jwt.exceptions import JWTException
from jwt.jwt import JWT
from jwt.jwk import OctetJWK
def login(app: Flask):
@app.post("/api/auth/login")
def test():
reqest_data = request.get_json()
try:
jwt = JWT()
login = reqest_data["login"]
password = reqest_data["password"]
secure_key = reqest_data["__secure_key"]
jwt.decode(secure_key, key=OctetJWK(b'123'))
return json.dumps({
"access_token": "None",
"logout_hash": "None",
"user_id": 0
}), 200, {
'Content-Type': 'application/json'
}
except KeyError as e:
return json.dumps({
"error": str(e),
"error_code": 0
}), 400, {
'Content-Type': 'application/json'
}
except JWTException:
return '{"error":"secure_key is invalid", "error_code": 0}', 400, {
'Content-Type': 'applicaiton/json'
}
return test
| Axime/Aska2.0 | server/routes/auth/login.py | login.py | py | 1,121 | python | en | code | 0 | github-code | 36 |
35623428711 | from django.urls import path
from .views import *
app_name = "Mentor"
urlpatterns = [
path("", view=MentorListView.as_view(), name="listar y crear mentores"),
path("user/", view=MentorByUserRUD.as_view(), name="traer mentor por id de usuario"),
path("<int:pk>/", view=MentorRUDView.as_view(), name="Obtener, actualizar y eliminar mentor"),
path("mentoria/", view=MentoriaListView.as_view(), name="listar y crear mentorias"),
path("mentoria/<int:pk>/", view=MentoriaRUDView.as_view(), name="Obtener, actualizar y eliminar mentoria")
] | DiegoStevenVera/MentorTic | apps/mentor/urls.py | urls.py | py | 554 | python | es | code | 0 | github-code | 36 |
36887287548 | from flask import Flask, request
from . import db
app = Flask(__name__)
@app.route("/api/message", methods=["GET"])
def get_random_message():
"""Return a random message to play the part of 'message in a bottle'."""
return { "content": db.get_random_message() }
@app.route("/api/message", methods=["POST"])
def create_message():
content = request.get_json()["content"]
if not 2 <= len(content) <= 1023:
raise Exception(f"Message must be between 2 and 1023 characters. It was {len(content)} characters.")
db.create_message(content)
return "", 201
| mshenfield/swellnote | swellnote/__init__.py | __init__.py | py | 584 | python | en | code | 1 | github-code | 36 |
15560736212 | #!/usr/bin/env python3
# This is a simple script that takes in an scurve file produced by
# csvcolumn_to_scurve and produces a png graph of the scurve.
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
FIELDS = ['N/total', 'New/Old']
def get_data(input_file):
global FIELDS
for row in csv.DictReader(input_file):
yield (float(row[FIELDS[0]]), float(row[FIELDS[1]]))
def main():
p = argparse.ArgumentParser()
p.add_argument('input_csv_file', type=argparse.FileType('r'))
p.add_argument('output_file', type=str)
p.add_argument('-y-axis-num-tick-marks', type=int,
help='The number of y tick marks to use above/below zero.')
p.add_argument('-y-axis-min', type=float,
help='Override the min y axis that we use')
p.add_argument('-y-axis-max', type=float,
help='Override the min y axis that we use')
p.add_argument('-title', type=str,
help='Title of the graph')
p.add_argument('-x-axis-title', type=str,
help='The title to use on the x-axis of the graph')
p.add_argument('-y-axis-title', type=str,
help='The title to use on the x-axis of the graph')
args = p.parse_args()
data = np.array(list(get_data(args.input_csv_file)))
assert np.all(data >= 0)
x = data[:, 0]
y = data[:, 1]
x_axis_title = args.x_axis_title or FIELDS[0]
y_axis_title = args.y_axis_title or FIELDS[1]
title = args.title or "{} vs {}".format(x_axis_title, y_axis_title)
fig, ax = plt.subplots()
fig.set_size_inches(18.5, 18.5)
fig.suptitle(title, fontsize=20)
ax.set_xlabel(x_axis_title, fontsize=20)
ax.set_ylabel(y_axis_title, fontsize=20)
ax.plot(x, y)
ax.scatter(x, y)
# To get good bounds, we:
#
# 1. Re-center our data at 0 by subtracting 1. This will give us the %
# difference in between new and old (i.e. (new - old)/old)
#
# 2. Then we take the maximum absolute delta from zero and round to a
# multiple of 5 away from zero. Lets call this value limit.
#
# 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit]
recentered_data = y - 1.0
max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0)
y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01
ax.set_xlim(0.0, 1.0)
y_min = args.y_axis_min or 1.0 - y_limit
y_max = args.y_axis_max or 1.0 + y_limit
assert y_min <= y_max
ax.set_ylim(y_min, y_max)
ax.grid(True)
ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05))
if args.y_axis_num_tick_marks:
y_delta = y_max - y_min
y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks)
ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency))
plt.savefig(args.output_file)
if __name__ == "__main__":
main()
| apple/swift | utils/dev-scripts/scurve_printer.py | scurve_printer.py | py | 2,875 | python | en | code | 64,554 | github-code | 36 |
39279840802 | from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import glob
'''
O 436
B 582
A 745
F 766
G 596
K 759
M 306
'''
'''
O 476, 8773, 9818
B 96, 378, 462, 489, 492
A 17, 114, 120, 136
F 52, 158
G 25, 27, 30, 85
K 61, 65
M 256, 291, 300
'''
i = [476, 378, 17, 158, 30, 61, 256]
c = ['O', 'B', 'A', 'F', 'G', 'K', 'M'][::-1]
loc = 5891
files = [glob.glob('/data2/cpb405/Training_2/*.fits')[j] for j in i][::-1]
fig, ax = plt.subplots(figsize = (5,0.9*5*sp.sqrt(2)))
ax.axvline(6565, c = 'r', alpha = 0.1)
ax.text(6600, 7, 'Ha', color = 'r')
ax.axvline(4862, c = 'r', alpha = 0.1)
ax.text(4900, 7, 'Hb', color = 'r')
ax.axvline(4342, c = 'r', alpha = 0.1)
ax.text(4400, 7, 'Hg', color = 'r')
for idx in range(len(files)):
with fits.open(files[idx]) as hdulist:
flux = hdulist[0].data[0]
init = hdulist[0].header['COEFF0']
disp = hdulist[0].header['COEFF1']
CLS = hdulist[0].header['CLASS']
SCLS = hdulist[0].header['SUBCLASS'][0]
#print('{}, {}, {}'.format(idx, CLS, SCLS))
wavelength = 10**sp.arange(init, init+disp*(len(flux)-0.9), disp)
wavelength = wavelength[:-100]
flux = flux[:-100]
flux = sp.array(flux)
wi = sp.searchsorted(wavelength, loc)
#wi = -1
flux = flux/sp.amax(flux)
ax.plot(wavelength, flux + idx, label = c[idx], c = '#1f77b4')
ax.annotate(c[idx], xy = (wavelength[sp.argmax(flux)]-75, idx+1.03))
ax.set_title('Stellar Spectra')
ax.set_xlabel('Wavelength \ Angstroms')
ax.set_ylabel('Normalised Flux')
plt.yticks([]," ")
#ax.set_yticklabels([])
#ax.get_yaxis().set_visible(False)
plt.tight_layout()
plt.savefig('MK.pdf')
plt.show() | grd349/LearningLAMOST | Matt/RegressorRF/Figures/plot_class.py | plot_class.py | py | 1,790 | python | en | code | 1 | github-code | 36 |
12675528453 | #!/usr/bin/python3
from tkinter import *
from tkinter import messagebox
from tkinter import simpledialog
from decimal import *
entries = []
class LoanCalculator:
def __init__(self):
self.window = Tk() # Create Window
self.window.title("Loan Calculator")
# Create Labels
Label(self.window, text="Annual Interest Rate").grid(row=1, column=1, sticky=W)
Label(self.window, text="Number of Years").grid(row=2, column=1, sticky=W)
Label(self.window, text="Loan Amount").grid(row=3, column=1, sticky=W)
Label(self.window, text="Monthly Payment").grid(row=4, column=1, sticky=W)
Label(self.window, text="Total Payment W/O Additional").grid(
row=5, column=1, sticky=W
)
Label(self.window, text="Total Payment w Additional").grid(
row=5, column=3, sticky=W
)
Label(self.window, text="Additional Payment").grid(row=6, column=1, sticky=W)
Label(self.window, text="Reinvest Times").grid(row=6, column=3, sticky=W)
Label(self.window, text="Total Years").grid(row=3, column=3, sticky=W)
Label(self.window, text="Total Properties").grid(row=4, column=3, sticky=W)
# Create the text widget with a scroll bar
self.text = Text(self.window)
self.text.grid(row=8, column=1, columnspan=6, sticky=W)
scrollbar = Scrollbar(self.window)
scrollbar.config(command=self.text.yview)
self.text.config(yscrollcommand=scrollbar.set)
scrollbar.grid(row=8, column=7, columnspan=10, stick=NS)
# Create Entries
self.annualInterestRateVar = StringVar()
self.annualInterestRateVar.set("11")
Entry(self.window, textvariable=self.annualInterestRateVar, justify=RIGHT).grid(
row=1, column=2
)
self.numberOfYearsVar = StringVar()
self.numberOfYearsVar.set("20")
Entry(self.window, textvariable=self.numberOfYearsVar, justify=RIGHT).grid(
row=2, column=2
)
self.loanAmountVar = StringVar()
self.loanAmountVar.set("500000")
Entry(self.window, textvariable=self.loanAmountVar, justify=RIGHT).grid(
row=3, column=2
)
self.monthlyPaymentVar = StringVar()
lblMonthlyPayment = Label(
self.window, textvariable=self.monthlyPaymentVar
).grid(row=4, column=2, sticky=E)
self.totalPaymentVar = StringVar()
lblTotalPayment = Label(self.window, textvariable=self.totalPaymentVar).grid(
row=5, column=2, sticky=E
)
self.totalPaymentWithVar = StringVar()
lblTotalPaymentWith = Label(
self.window, textvariable=self.totalPaymentWithVar
).grid(row=5, column=4, sticky=E)
self.totalYears = StringVar()
lblTotalYears = Label(self.window, textvariable=self.totalYears).grid(
row=3, column=4, sticky=E
)
self.totalProperties = StringVar()
lblTotalYears = Label(self.window, textvariable=self.totalProperties).grid(
row=4, column=4, sticky=E
)
self.additionalPayment = StringVar()
self.additionalPayment.set("5000")
Entry(self.window, textvariable=self.additionalPayment, justify=RIGHT).grid(
row=6, column=2
)
self.reInvestTimes = StringVar()
self.reInvestTimes.set("0")
Entry(self.window, textvariable=self.reInvestTimes, justify=RIGHT).grid(
row=6, column=4
)
# Create Button callback
btComputePayment = Button(
self.window, text="Compute Payment", command=self.computePayment
).grid(row=7, column=1, sticky=E)
# Added a button to save a loan
btSaveLoan = Button(
self.window, text="Save Loan to File", command=self.saveLoanFile
).grid(row=7, column=2, sticky=E)
btSaveLoan = Button(
self.window, text="Clear File", command=self.clearFile
).grid(row=7, column=3, sticky=E)
self.window.mainloop() # Create an event loop
def valueCheck(self):
interest = self.annualInterestRateVar.get()
years = self.numberOfYearsVar.get()
loan = self.loanAmountVar.get()
try:
float(interest)
except ValueError:
messagebox.showerror(
"Calculation Error",
"Please make sure to enter numeric values for interest rate, years, and loan amount",
)
self.window.destroy()
LoanCalculator()
try:
float(loan)
except ValueError:
messagebox.showerror(
"Calculation Error",
"Please make sure to enter numeric values for interest rate, years, and loan amount",
)
self.window.destroy()
LoanCalculator()
try:
int(years)
except ValueError:
messagebox.showerror(
"Calculation Error",
"Please make sure to enter numeric values for interest rate, years, and loan amount",
)
self.window.destroy()
LoanCalculator()
def computePayment(self): # Compute Payment
self.valueCheck()
self.totalMonths = 0
monthlyPayment = self.getMonthlyPayment(
float(self.loanAmountVar.get()),
float(self.annualInterestRateVar.get()) / 1200,
int(self.numberOfYearsVar.get()),
) # Error fix
self.monthlyPaymentVar.set(
format(monthlyPayment, "10.2f")
) # Set monthly payment
totalPayment = (
float(self.monthlyPaymentVar.get()) * 12 * int(self.numberOfYearsVar.get())
)
self.totalPaymentVar.set(
"{:,.2f}".format(totalPayment).replace(",", " ")
) # Set total payment
times = int(self.reInvestTimes.get())
self.totalProperties.set("%d" % (times + 1))
for time in range(0, times + 1):
self.calcAmortization(
float(self.loanAmountVar.get()),
float(self.annualInterestRateVar.get()) / 1200,
int(self.numberOfYearsVar.get()),
float(self.monthlyPaymentVar.get()),
float(self.additionalPayment.get()) + time * monthlyPayment,
time + 1,
)
totalPaymentWith = self.totalMonths * (
float(self.additionalPayment.get()) + monthlyPayment
)
self.totalPaymentWithVar.set(
"{:,.2f}".format(totalPaymentWith).replace(",", " ")
) # Set total payment with additional payment
def getMonthlyPayment(
self, loanAmount, monthlyInterestRate, numberOfYears
): # Get monthly payment
monthlyPayment = (
loanAmount
* monthlyInterestRate
/ (1 - 1 / (1 + monthlyInterestRate) ** (numberOfYears * 12))
)
return monthlyPayment
def calcAmortization(
self,
balance,
monthlyInterestRate,
numberOfYears,
monthlyPayment,
additionalPayment,
investment,
):
getcontext().prec = 2
self.payNum = 1
global entries
entries = entries + ["Property %d" % investment]
for payNum in range(1, numberOfYears * 12 + 1):
interest = monthlyInterestRate * balance
principal = monthlyPayment - interest
balance = balance - principal - additionalPayment
if balance <= 0:
balance = 0
entries = entries + [
str(self.payNum)
+ " => %dy %dm" % (self.payNum // 12, self.payNum % 12)
+ "\t\t"
+ "$"
+ "%.2f" % interest
+ "\t\t"
+ "$"
+ "%.2f" % principal
+ "\t"
+ " $"
+ "%.2f" % additionalPayment
+ "\t\t$"
+ "%.2f" % balance
]
if balance == 0:
break
self.payNum += 1
self.totalMonths += self.payNum
self.totalYears.set(
"%d Years %d Months" % (self.totalMonths // 12, self.totalMonths % 12)
)
if investment > 1:
self.text.delete(1.0, END)
self.text.insert(END, "Amortization Schedule\n")
self.text.insert(
END, "Pmt #\t\t Interest\t\tPrin Pmt\t Adtn Pay\t Remaining Prin\n"
)
for i in entries:
self.text.insert(END, i + "\n")
def clearFile(self):
global entries
entries.clear()
self.text.delete(1.0, END)
def saveLoanFile(self):
filename = simpledialog.askstring(
"Save Schedule To Recipient", "Enter Recipient Name"
)
if filename == "":
messagebox.showerror(
"Input Error", "Please make sure to enter the name of the recipient"
)
filename = simpledialog.askstring(
"Save Schedule To Recipient", "Enter Recipient Name"
)
print(filename + " Loan Document.txt has been saved")
f = open(filename + " Loan Document.txt", "w+")
global entries
f.write("\t\t\tLoan Document For " + filename + "\n")
f.write(
"------------------------------------------------------------------\n\n"
)
f.write(
"Loan Amount: "
+ "$"
+ str(self.loanAmountVar.get())
+ "\t\t"
+ "Interes Rate: "
+ str(self.annualInterestRateVar.get())
+ "%"
+ "\t"
+ "Nbr Years: "
+ str(self.numberOfYearsVar.get())
+ "\n"
)
f.write(
"Monthly Payment: "
+ "$"
+ str(self.monthlyPaymentVar.get())
+ "\t\t"
+ "Total Payment: "
+ "$"
+ str(self.totalPaymentVar.get())
+ "\n\n"
)
f.write("Amortization Schedule\n")
f.write(
"Pmt #"
+ "\t\t"
+ " Interest"
+ "\t"
+ "Prin Pmt"
+ "\t"
+ "Remaining Prin\n"
)
f.write("\n".join(map(lambda x: str(x), entries)))
f.close()
LoanCalculator() # Create GUI
| ZimboPro/scripts | pythonScripts/homeloan/homeloan.py | homeloan.py | py | 10,534 | python | en | code | 0 | github-code | 36 |
43156623065 | #!/usr/bin/env python
import unittest
import mock
from quadcopter_brain import QuadcopterBrain
class TestQuadcopterBrain(unittest.TestCase):
@mock.patch('landing_site.LandingSite')
@mock.patch('quadcopter.Quadcopter')
def setUp(self, quadcopter_mock, landing_site_mock):
self.quadcopter_brain = QuadcopterBrain()
self.quadcopter_mock = self.quadcopter_brain.quadcopter
self.landing_site_mock = self.quadcopter_brain.landing_site
@mock.patch('rospy.sleep')
@mock.patch('waypoint_tools.WaypointTools.build_waypoint')
def test_go_to_waypoints(self, build_waypoint_mock, sleep_mock):
waypoint_data = [0, 1]
build_waypoint_mock.side_effect = [10, 11]
self.quadcopter_brain.go_to_waypoints(waypoint_data)
expected = [mock.call(0), mock.call(1)]
self.assertEqual(build_waypoint_mock.call_args_list, expected)
expected = [mock.call(10), mock.call(11)]
self.assertEqual(
self.quadcopter_mock.send_waypoint.call_args_list, expected)
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_fly_path(self, go_to_waypoints_mock):
waypoint_data = [0, 1]
self.quadcopter_brain.fly_path(waypoint_data)
self.quadcopter_mock.launch.assert_called_once_with()
go_to_waypoints_mock.assert_called_once_with(waypoint_data)
self.quadcopter_mock.land.assert_called_once_with()
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_go_to_waypoint_given_metered_offset(self, go_to_waypoint_mock):
delta_east = 10 # Meters
delta_north = -10 # Meters
self.quadcopter_brain.quadcopter.current_lat = 42.0
self.quadcopter_brain.quadcopter.current_long = -71.0
self.quadcopter_brain.quadcopter.current_rel_alt = 4.5
self.quadcopter_brain.go_to_waypoint_given_metered_offset(delta_east,
delta_north)
called_waypoint = go_to_waypoint_mock.call_args[0][0][0]
actual_waypoint = {"latitude": 41.999912, "longitude": -70.999877,
"altitude": 4.5} # Taken from google maps
self.assertAlmostEqual(called_waypoint["latitude"],
actual_waypoint["latitude"], 6)
self.assertAlmostEqual(called_waypoint["longitude"],
actual_waypoint["longitude"], 6)
self.assertAlmostEqual(called_waypoint["altitude"],
actual_waypoint["altitude"])
wait_time = go_to_waypoint_mock.call_args[0][1]
self.assertAlmostEqual(wait_time, 15)
delta_east = -10 # Meters
delta_north = 10 # Meters
delta_alt = 2 # Meters
sleep_time = 10 # Seconds
self.quadcopter_brain.go_to_waypoint_given_metered_offset(delta_east,
delta_north,
delta_alt,
sleep_time)
called_waypoint = go_to_waypoint_mock.call_args[0][0][0]
actual_waypoint = {"latitude": 42, "longitude": -71,
"altitude": 6.5} # Taken from google maps
self.assertNotEqual(called_waypoint["latitude"],
actual_waypoint["latitude"], 6)
self.assertNotEqual(called_waypoint["longitude"],
actual_waypoint["longitude"], 6)
self.assertAlmostEqual(called_waypoint["altitude"],
actual_waypoint["altitude"])
wait_time = go_to_waypoint_mock.call_args[0][1]
self.assertAlmostEqual(wait_time, 10)
# # Ask Kyle what's up
# @mock.patch('rospy.sleep')
# def test_find_landing_site(self, sleep_mock):
# # Test what happens when seen
# self.landing_site_mock.in_view = True
# self.landing_site_mock.lat_long.result = (-42, 71)
# res = self.quadcopter_brain.find_landing_site()
# self.assertEqual(res, (True, -42, 71))
# # Test what happens when not seen
# self.landing_site_mock.in_view = False
# self.landing_site_mock.lat_long.result = (-42, 71)
# res = self.quadcopter_brain.find_landing_site()
# self.assertEqual(res, (False, 0, 0))
# # Test what happens when seen after a few tries
# in_view_mock = mock.PropertyMock(side_effect=[False, False, True])
# type(self.landing_site).in_view = in_view_mock
# res = self.quadcopter_brain.find_landing_site()
# expected = [mock.call(0.1), mock.call(0.1)]
# self.assertEqual(res, (True, -42, 71))
# self.assertEqual(sleep_mock.call_args_list, expected)
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
@mock.patch('quadcopter_brain.QuadcopterBrain.find_landing_site')
def test_land_on_fiducial_simple(self, find_mock, go_to_mock):
# Fiducial found during landing
find_mock.return_value = True, 42, 71
self.quadcopter_brain.land_on_fiducial_simple()
wpt = {'latitude': 42,
'longitude': 71,
'altitude': 1.0}
go_to_mock.assert_called_once_with([wpt])
self.quadcopter_mock.land.assert_called_once_with()
# Fiducial not found during landing
go_to_mock.reset_mock()
self.quadcopter_mock.land.reset_mock()
find_mock.return_value = False, 0, 0
self.quadcopter_brain.land_on_fiducial_simple()
assert not go_to_mock.called
self.quadcopter_mock.land.assert_called_once_with()
@mock.patch('quadcopter_brain.QuadcopterBrain.find_landing_site')
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_find_landing_site_at_waypoints(self, go_to_mock, find_site_mock):
waypoint_data = [0, 1]
find_site_mock.return_value = False, 0, 0
res = \
self.quadcopter_brain.find_landing_site_at_waypoints(waypoint_data)
go_to_expected = [mock.call([pt]) for pt in waypoint_data]
self.assertEqual(go_to_mock.call_args_list, go_to_expected)
find_site_expected = [mock.call(15) for point in waypoint_data]
self.assertEqual(find_site_mock.call_args_list, find_site_expected)
self.assertEqual(res, (False, 0, 0))
go_to_mock.reset_mock()
find_site_mock.reset_mock()
find_site_mock.return_value = True, 42.0, -71.0
res = \
self.quadcopter_brain.find_landing_site_at_waypoints(waypoint_data)
go_to_mock.assert_called_once_with([0])
find_site_mock.assert_called_once_with(15)
self.assertEqual(res, (True, 42.0, -71.0))
if __name__ == '__main__':
unittest.main()
| vpreston/mission_runner | quadcopter_brain/src/quadcopter_brain/test_quadcopter_brain.py | test_quadcopter_brain.py | py | 6,854 | python | en | code | 0 | github-code | 36 |
24012640957 | import random
def random_network_creator(n):
''' Creates a random network for a given number of variables '''
# create a new network file
file_name = "random_networks/demofile2.BIFXML" # can change output filename here
f = open(file_name, "a")
# write stock variables into file
f.writelines([
'<?xml version="1.0" encoding="US-ASCII"?>' '\n',
'<!DOCTYPE BIF [''\n',
' <!ATTLIST BIF VERSION CDATA #REQUIRED>''\n',
' <!ELEMENT NETWORK ( NAME, ( PROPERTY | VARIABLE | DEFINITION )* )>''\n',
' <!ELEMENT NAME (#PCDATA)>''\n',
' <!ELEMENT VARIABLE ( NAME, ( OUTCOME | PROPERTY )* ) >''\n',
' <!ATTLIST VARIABLE TYPE (nature|decision|utility) "nature">''\n',
' <!ELEMENT OUTCOME (#PCDATA)>''\n',
' <!ELEMENT DEFINITION ( FOR | GIVEN | TABLE | PROPERTY )* >''\n',
' <!ELEMENT FOR (#PCDATA)>''\n',
' <!ELEMENT GIVEN (#PCDATA)>''\n',
' <!ELEMENT TABLE (#PCDATA)>''\n',
' <!ELEMENT PROPERTY (#PCDATA)>''\n',
']>''\n',
'\n',
'\n',
'<BIF VERSION="0.3">''\n',
'<NETWORK>''\n',
'<NAME>random-network</NAME>''\n',
'\n',
'<!-- Variables -->',
'\n'])
# write n variables into the file
i = 0
while i < n:
f.writelines([
'<VARIABLE TYPE="nature">' '\n',
' <NAME>NODE_'+str(i)+'</NAME>''\n',
' <OUTCOME>true</OUTCOME>''\n',
' <OUTCOME>false</OUTCOME>''\n',
' <PROPERTY>position = ('+str(random.randint(-500, 500))+', '+str(random.randint(-500, 500))+')</PROPERTY>''\n', #dont think this is used atm
'</VARIABLE>''\n',
'\n'])
i += 1
f.writelines(['<!-- Probability distributions -->',
'\n'])
# NEXT: write n probability distributions for all the variables
i = 0
lst = list(range(0, n))
random.shuffle(lst)
while i < n:
var_1 = lst.pop() # guarantees all the nodes in the network are present at least once (never any leaf nodes like this)
var_2 = random.randint(0, n)
var_3 = random.randint(0, n)
var_4 = random.randint(0, n)
if var_1 == var_2:
var_2 = random.randint(0, n)
if var_1 == var_3:
var_3 = random.randint(0, n)
if var_1 == var_4:
var_4 = random.randint(0, n)
# could make random cpt values, but i don't currently see the use for the purpose of random networks
f.writelines([
'<DEFINITION>' '\n',
' <FOR>NODE_'+ str(var_1) +'</FOR>' '\n',
#' <FOR>NODE_'+ str(var_3) +'</FOR>' '\n', # the second <FOR> connection can be removed along with the CPT values, because less edges will be drawn
' <GIVEN>NODE_'+ str(var_2) +'</GIVEN>' '\n'
#' <GIVEN>NODE_'+ str(var_4) +'</GIVEN>' '\n' # this can also be removed if there is less edges required
' <TABLE>0.6 0.4 0.05 0.95 </TABLE>' '\n',
'</DEFINITION>' '\n',
'\n'])
i += 1
# closing off the document properly
f.writelines([
'</NETWORK>' '\n',
'</BIF>'])
f.close()
random_network_creator(15) # change size of the network to be created here
| ORickL/KR-Bayesian-network | generating_networks.py | generating_networks.py | py | 3,283 | python | en | code | 0 | github-code | 36 |
23563895086 | from sys import argv
from os.path import join
from define import define
from resources import ResourceTimestamp,resources_dirname
from storage import StorageAccessor
def upload(filename, filepath):
storage.upload_resource(filename, filepath)
timestamp_str = storage.get_resource_timestamp(filename)
timestamp = ResourceTimestamp(filename)
timestamp.write_timestamp(timestamp_str)
return True
if __name__ == '__main__':
if len(argv) == 1:
print('please argment.')
exit()
storage = StorageAccessor()
if '-all' in argv or '-informations' in argv:
filename_informations = f'{define.informations_resourcename}.res'
filepath_informations = join(resources_dirname, filename_informations)
if upload(filename_informations, filepath_informations):
print(f'Upload complete {filename_informations}')
if '-all' in argv or '-details' in argv:
filename_details = f'{define.details_resourcename}.res'
filepath_details = join(resources_dirname, filename_details)
if upload(filename_details, filepath_details):
print(f'Upload complete {filename_details}')
if '-all' in argv or '-musictable' in argv:
filename_musictable = f'{define.musictable_resourcename}.res'
filepath_musictable = join(resources_dirname, filename_musictable)
if upload(filename_musictable, filepath_musictable):
print(f'Upload complete {filename_musictable}')
| kaktuswald/inf-notebook | resources_upload.py | resources_upload.py | py | 1,554 | python | en | code | 4 | github-code | 36 |
35855718282 | from __future__ import print_function
import scrapy
from scrapy.http.cookies import CookieJar
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.http import Request,FormRequest
from mytest.items import myItem
class mySpider(scrapy.Spider):
name = "myspider"
allowed_domains = ["www.amazon.com"]
start_urls =[
#"https://www.amazon.com/s"
"https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=milk"
]
headers ={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Referer": "https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=milk"
}
formdata = {
'url': 'search-alias=aps',
'field-keywords': 'milk'
}
'''
def start_requests(self):
return [FormRequest("https://www.amazon.com/s",formdata=self.formdata)]
'''
def parse(self, response):
myitem = myItem()
items = Selector(response).css('.s-item-container')
for item in items:
titles = item.css('h2::text').extract_first()
prices = item.xpath('descendant::div/div/a/span/@aria-label').extract_first()
#prices = item.css('div>div>a>span').extract_first()
#prices = item.css('[aria-label]::first_child').extract_first()
#stars = item.xpath('//span/a/i[1]/span/text()').extract_first()
stars = item.css('.a-icon-alt::text').extract_first()
stars = str(stars)[:-15]
yield myItem(
title=item.css('h2::text').extract_first(),
stars=stars,
)
#myitem['title'] = p.item.css('h2::text').extract_first()
print(response.url)
print(myitem['title'])
#atfResults
'''
all_urls = hxs.select('//a/@href').extract()
for url in all_urls:
if url.startswith('http://www.xiaohuar.com/list-1-'):
yield Request(url, callback=self.parse)
''' | zhengwuyang/notes | Testcode/Scrapytest/mytest/spiders/my_spider.py | my_spider.py | py | 2,289 | python | en | code | 0 | github-code | 36 |
34598487595 |
# IntesisHome Inegration with Domoticz
#
# Author: CV8R
#
"""
<plugin key="BasePlug" name="IntesisBox WMP-1 Protocol" author="CV8R" version="0.0.9" >
<description>
<h2>IntesisBox WMP-1</h2><br/>
<ul style="list-style-type:square">
<li>IntesisBox WMP-1 interface for air conditioners into IP based control systems</li>
</ul>
<ul style="list-style-type:square">
<h3>Configuration</h3><br/>
<li>IP Address and Port number default 3310 </li>
</ul>
</description>
<params>
<param field="Address" label="IP Address" width="200px" required="true" default=""/>
<param field="Port" label="Port" width="30px" required="true" default="3310"/>
<param field="Mode1" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
from typing import List
# Global var definitions
InitHeartbeatCount = 0
unitmode = "N/A"
oustandingPings = -1
lastHeartbeat = 0
# Limits as Global vars
minTempLimit = 180
maxTempLimit = 280
import Domoticz
import base64
import datetime
import re
class BasePlugin:
enabled = True
powerOn = 0
runCounter = 0
WMPConn = None
oustandingPings = 0
lastHeartbeat = datetime.datetime.now()
def __init__(self):
#self.var = 123
return
def onStart(self):
Domoticz.Log("onStart called")
Domoticz.Heartbeat(20) # Set heartbeat interval slower than default
if Parameters["Mode1"] == "Debug":
Domoticz.Debugging(1)
if (len(Devices) == 0):
Domoticz.Device(Name="Power", Unit=1, Image=16, TypeName="Switch", Used=1).Create()
Domoticz.Device(Name="Ambient Temp", Unit=2, TypeName="Temperature", Used=1).Create()
Options = {"LevelActions" : "|||||",
"LevelNames" : "|Auto|Heat|Dry|Cool|Fan",
"LevelOffHidden" : "true",
"SelectorStyle" : "0"}
Domoticz.Device(Name="Mode", Unit=3, TypeName="Selector Switch", Image=16, Options=Options, Used=1).Create()
Options = {"LevelActions" : "||||",
"LevelNames" : "|Auto|L1|L2|L3",
"LevelOffHidden" : "true",
"SelectorStyle" : "0"}
Domoticz.Device(Name="Fan Speed", Unit=4, TypeName="Selector Switch", Image=7, Options=Options, Used=1).Create()
Domoticz.Device(Name="Set Temp", Unit=5, Type=242, Subtype=1, Image=16, Used=1).Create()
Domoticz.Device(Name="Error LED", Unit=6, Image=13, TypeName="Switch", Used=1).Create()
Domoticz.Device(Name="Error Text", Unit=7, TypeName="Text", Used=1).Create()
Domoticz.Log("Device created.")
DumpConfigToLog()
def onStop(self):
Domoticz.Log("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Log("onConnect called")
global ConnectState
Domoticz.Log("Connecting")
if (Connection == self.WMPConn):
if (Status == 0):
Domoticz.Log("Connected successfully to: " + Connection.Address + ":" + Connection.Port)
self.WMPConn.Send('ID\n') # Get ID at startup
else:
if (Description.find("Only one usage of each socket address") > 0):
Domoticz.Log(Connection.Address + ":" + Connection.Port + " is busy, waiting.")
else:
Domoticz.Log("Failed to connect (" + str(Status) + ") to: " + Connection.Address + ":" + Connection.Port + " with error: " + Description)
self.WMPConn = None
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called")
global unitmode
global oustandingPings
global lastHeartbeat
global minTempLimit
global maxTempLimit
strData = Data.decode("utf-8", "ignore")
Domoticz.Debug("onMessage called with Data: '" + str(strData) + "'")
#msgDataListRaw = re.split(r':+|,', strData) # type: List[str]
msgDataListRaw = re.split(r':+|,+|\[+|\]', strData) # split string to list of strings
msgDataList = list(filter(None, msgDataListRaw)) # Remove consecutive delimiters note: filter does not return a list, use list to turn into list
# Dump stripped messages in to Domoticz Log
count = 0
for msgData in msgDataList:
Domoticz.Debug("Stripped Message[" + str(count) + "] = " + msgData ) # Log the messages incoming and their stripped count
count = count + 1
Domoticz.Debug("Resetting Ping to 0")
oustandingPings = 0 # Reset ping counter onmessage for making sure connection is up in Heartbeat
# Is it a status update
if (msgDataList[0] == 'ACK'):
Domoticz.Debug("Message Acknowledged with response: " + msgDataList[0])
elif (msgDataList[0] == 'ERR'):
Domoticz.Error("WMP Message ########## SENDING MESSAGE ERROR ########## with response: " + msgDataList[0])
Devices[6].Update(nValue=1, sValue="100") # Set the Error LED switch to ON to flag for a send error
elif (msgDataList[0] == 'LIMITS'): #Get the limits from the AC unit
DataValues = '|'.join(msgDataList[2:])
if (msgDataList[1] == 'ONOFF'): #Get the ONOFF limits from the AC unit
Domoticz.Log("ONOFF Limits from unit: " + DataValues)
elif (msgDataList[1] == 'MODE'): #Get the MODE limits from the AC unit
Domoticz.Log("MODE Limits from unit: " + DataValues)
elif (msgDataList[1] == 'FANSP'): #Get the FANSP limits from the AC unit
Domoticz.Log("FANSP Limits from unit: " + DataValues)
elif (msgDataList[1] == 'VANEUD'): #Get the VANEUD limits from the AC unit
Domoticz.Log("VANEUD Limits from unit: " + DataValues)
elif (msgDataList[1] == 'VANELR'): #Get the VANELR limits from the AC unit
Domoticz.Log("VANELR Limits from unit: " + DataValues)
elif (msgDataList[1] == 'SETPTEMP'): #Get the SETPTEMP temp limits from the AC unit
Domoticz.Debug("SETPTEMP Temp limit values from unit: " + DataValues)
minTempLimit = int(msgDataList[2])
maxTempLimit = int(msgDataList[3])
Domoticz.Status("Min Temp Limit: " + str(minTempLimit) + " Max Temp Limit: " + str(maxTempLimit))
if (msgDataList[0] == 'CHN'):
Domoticz.Debug("Status Update - Unit: " + msgDataList[1] + " Function: " + msgDataList[2] + " Value = " + msgDataList[3])
# Update the status to Domoticz
if (msgDataList[2] == 'ONOFF'):
if (msgDataList[3] == 'ON'):
Domoticz.Status("Update status to On")
Devices[1].Update(nValue=1, sValue="100") # AC Power
elif (msgDataList[3] == 'OFF'):
Domoticz.Status("Update status to Off")
Devices[1].Update(nValue=0, sValue="0")
elif (msgDataList[2] == 'AMBTEMP'):
ambtemp = str(float(msgDataList[3])/10)
Domoticz.Log("Ambient temp")
Domoticz.Debug("Current ambient temp: " + ambtemp + " Degrees")
Devices[2].Update(nValue=0, sValue=ambtemp)
#Domoticz.Debug("Resetting Ping to 0") # using AMBTEMP
#oustandingPings = 0 # Reset ping counter for making sure connection is up in Heartbeat
elif (msgDataList[2] == 'SETPTEMP'):
settemp = str(int(msgDataList[3])/10)
if (unitmode != 'FAN'):
Domoticz.Status("Set temp is set to: " + settemp + " Degrees")
Devices[5].Update(nValue=1, sValue=settemp) # Update the temp display in the set temp device
else:
Domoticz.Debug("FAN MODE setting temp to not display")
Devices[5].Update(nValue=1, sValue="22") # N/A to have a temp displayed
elif (msgDataList[2] == 'MODE'):
unitmode = msgDataList[3]
if (unitmode == "AUTO"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="10") # Auto
elif (unitmode == "HEAT"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="20") # Heat
elif (unitmode == "DRY"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="30") # Dry
elif (unitmode == "COOL"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="40") # Cool
elif (unitmode == "FAN"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="50") # Fan
Devices[3].Refresh()
elif (msgDataList[2] == 'FANSP'):
fspeed = msgDataList[3]
if (fspeed == "AUTO"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="10") # Fan Auto
elif (fspeed == "1"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="20") # Fan Level 1
elif (fspeed == "2"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="30") # Fan Level 2
elif (fspeed == "3"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="40") # Fan Level 3
Devices[4].Refresh()
elif (msgDataList[2] == 'VANEUD'):
vaneud = msgDataList[3]
Domoticz.Status("Vane Up/Down: " + vaneud)
elif (msgDataList[2] == 'VANELR'):
vanelr = msgDataList[3]
Domoticz.Status("Vane Left/Right: " + vanelr)
elif (msgDataList[2] == 'ERRSTATUS'):
errorstatus = msgDataList[3]
if (errorstatus != "OK"):
Domoticz.Status("Error Status: " + errorstatus)
Devices[6].Update(nValue=1, sValue="100") # Set the Error LED switch to ON to flag for an ERROR
elif (errorstatus == "OK"):
Domoticz.Status("Error Status: " + errorstatus)
Devices[6].Update(nValue=0, sValue="0") # Set the Error LED switch to OFF to clear ERROR
elif (msgDataList[2] == 'ERRCODE'):
errorcode = msgDataList[3]
Domoticz.Status("Error Code: " + errorcode)
Devices[7].Update(nValue=1, sValue=errorcode) # Set error text
else:
Domoticz.Error("Unrecognised status command")
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
if (Unit == 1):
if (Command == "On"):
Domoticz.Status("Sending Power ON")
self.powerOn = 1
self.WMPConn.Send('SET,1:ONOFF,ON\n')
elif(Command == "Off"):
Domoticz.Status("Sending Power OFF")
self.powerOn = 0
self.WMPConn.Send('SET,1:ONOFF,OFF\n')
elif (Unit == 3):
if (Command == "Set Level"):
Domoticz.Debug("Sending Mode")
if (str(Level) == '10'):
Domoticz.Status("Sending Mode Auto")
self.WMPConn.Send('SET,1:MODE,auto\n')
elif (str(Level) == '20'):
Domoticz.Status("Sending Mode Heat")
self.WMPConn.Send('SET,1:MODE,heat\n')
elif (str(Level) == '30'):
Domoticz.Status("Sending Mode Dry")
self.WMPConn.Send('SET,1:MODE,dry\n')
elif (str(Level) == '40'):
Domoticz.Status("Sending Mode Cool")
self.WMPConn.Send('SET,1:MODE,cool\n')
elif (str(Level) == '50'):
Domoticz.Status("Sending Mode Fan")
self.WMPConn.Send('SET,1:MODE,fan\n')
self.WMPConn.Send('LIMITS:SETPTEMP\n') # Check temp limits again when changing modes
elif (Unit == 4):
if (Command == "Set Level"):
Domoticz.Debug("Sending Fan Speed")
if (str(Level) == '10'):
Domoticz.Status("Sending Fan Speed Auto")
self.WMPConn.Send('SET,1:FANSP,AUTO\n')
elif (str(Level) == '20'):
Domoticz.Status("Sending Fan Speed Level 1")
self.WMPConn.Send('SET,1:FANSP,1\n')
elif (str(Level) == '30'):
Domoticz.Status("Sending Fan Speed Level 2")
self.WMPConn.Send('SET,1:FANSP,2\n')
elif (str(Level) == '40'):
Domoticz.Status("Sending Fan Speed Level 3")
self.WMPConn.Send('SET,1:FANSP,3\n')
elif (Unit == 5):
if (Command == "Set Level"):
settemp = Level
Domoticz.Debug("String of Set Temp raw value = " + str(Level))
settemp = round((int((float(settemp) * 10)))/5)*5 #includes complex rounding to nearest 5
Domoticz.Debug("Set Temp converted value = " + str(settemp))
if settemp < minTempLimit: #Adjusting for minLimit of unit
Domoticz.Status("Set temp point less than min limit setting to min value = " + str(minTempLimit / 10) + " Degrees")
settemp = minTempLimit #Send the minimum of unit
if settemp > maxTempLimit: #Adjusting for minLimit of unit
Domoticz.Status("Set temp point greater than max limit setting to max value = " + str(maxTempLimit / 10) + " Degrees")
settemp = maxTempLimit
Domoticz.Status("Setting Temp to: " + str(settemp / 10) + " Degrees")
Domoticz.Debug("Sending Set Temp to: " + str(settemp))
self.WMPConn.Send('SET,1:SETPTEMP,' + str(settemp) + '\n')
elif (Unit == 6):
if (Command == "Off"):
Domoticz.Log("User cleared the ERROR Status LED")
Devices[6].Update(nValue=0, sValue="0") # Set the Error LED switch to Off
else:
Domoticz.Error("No command available to send")
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Log("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called")
self.WMPConn = None
def onHeartbeat(self):
global InitHeartbeatCount # Counter for first heartbeats
global oustandingPings # Counter for the Pings for check alive using AMBTEMP
global lastHeartbeat
Domoticz.Debug("onHeartbeat called")
Domoticz.Debug("onHeartbeat called, last response seen " + str(oustandingPings) + " heartbeats ago.")
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount))
lastHeartbeat = datetime.datetime.now()
if (self.WMPConn == None):
Domoticz.Log("Connect to WMP")
InitHeartbeatCount = 0 # reset heartbeat count
oustandingPings = -1 # reset ping count
self.handleConnect()
else:
if (self.WMPConn.Name == "WMP_Connection") and (self.WMPConn.Connected()):
oustandingPings = oustandingPings + 1 # Increment Ping Counter, reset at AMPTEMP Status
if InitHeartbeatCount <= 6:
InitHeartbeatCount = InitHeartbeatCount + 1
Domoticz.Debug("Heartbeat Init Count Incremented now = " + str(InitHeartbeatCount))
if InitHeartbeatCount == 1: #Need to delay these inital messages or some are missed
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ONOFF")
self.WMPConn.Send('GET,1:ONOFF\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting MODE")
self.WMPConn.Send('GET,1:MODE\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting SETPTEMP")
self.WMPConn.Send('GET,1:SETPTEMP\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting FANSP")
self.WMPConn.Send('GET,1:FANSP\n')
if InitHeartbeatCount == 3:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting VANEUD")
self.WMPConn.Send('GET,1:VANEUD\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting VANELR")
self.WMPConn.Send('GET,1:VANELR\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ERRSTATUS")
self.WMPConn.Send('GET,1:ERRSTATUS\n')
if InitHeartbeatCount == 4:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ERRCODE")
self.WMPConn.Send('GET,1:ERRCODE\n')
if InitHeartbeatCount == 5:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS ONOFF")
self.WMPConn.Send('LIMITS:ONOFF\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS MODE")
self.WMPConn.Send('LIMITS:MODE\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS FANSP")
self.WMPConn.Send('LIMITS:FANSP\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS VANEUD")
self.WMPConn.Send('LIMITS:VANEUD\n')
if InitHeartbeatCount == 6:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS VANELR")
self.WMPConn.Send('LIMITS:VANELR\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS SETPTEMP")
self.WMPConn.Send('LIMITS:SETPTEMP\n')
Domoticz.Heartbeat(20) # Extending heartbeat at last Limit
if InitHeartbeatCount == 7: # when count gets to this number and is connected, it will not increment and commence AMBTEMP Heartbeats
Domoticz.Debug("Getting Ambient Temp")
self.WMPConn.Send('GET,1:AMBTEMP\n') # Get AMBTEMP at Heartbeat to confirm connected
if (oustandingPings == 3):
Domoticz.Log(self.WMPConn.Name + " has not responded to 3 heartbeats terminating connection.")
if (self.WMPConn.Connected()):
self.WMPConn.Disconnect()
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount))
self.WMPConn = None
def handleConnect(self):
self.WMPConn = None
Domoticz.Debug("Settings shorter heartbeat to speed up initialisation")
Domoticz.Heartbeat(5) # Setting the inital hearbeat timeout used for delaying startup messages - extended in onHeartbeat after counter reached
self.WMPConn = Domoticz.Connection(Name="WMP_Connection", Transport="TCP/IP", Protocol="Line", Address=Parameters["Address"], Port=Parameters["Port"])
self.WMPConn.Connect()
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
| luismalddonado/IntesishomewithDomoticz | plugin.py | plugin.py | py | 18,692 | python | en | code | 3 | github-code | 36 |
18038169787 | class MagicDictionary:
def __init__(self):
self.wordsdict = {}
def buildDict(self, dictionary: List[str]) -> None:
for word in dictionary:
self.wordsdict[len(word)] = self.wordsdict.get(len(word),[]) + [word]
def search(self, searchWord: str) -> bool:
for candi in self.wordsdict.get(len(searchWord), []):
countdiff = 0
for i in range(len(searchWord)):
if candi[i] != searchWord[i]:
countdiff += 1
if countdiff == 1:
return True
return False
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dictionary)
# param_2 = obj.search(searchWord) | LittleCrazyDog/LeetCode | 676-implement-magic-dictionary/676-implement-magic-dictionary.py | 676-implement-magic-dictionary.py | py | 750 | python | en | code | 2 | github-code | 36 |
28987537714 | import threading as td
import RPi.GPIO as GPIO
import datetime as dt
import time
from helpers import TimeMeasure
import elemental_api_class as liveapi
class StreamAvailController:
def __init__(self, gpi_trigger, event_id, elemental_ip, lock_interval = 3, in_cue = False):
self.gpi_trigger = gpi_trigger
self.event_id = event_id
self.elemental_api = liveapi.Elemental_api(elemental_ip)
self.lock_interval = lock_interval
self.in_cue = in_cue
self.stream_locked = False
self.splice_counter = 0
self.interrupt_counter = 0
self.reaction_time = TimeMeasure()
def __str__(self):
return "GPI: {}, event_id: {}, in_cue: {}".format(self.gpi_trigger, self.event_id, self.in_cue)
# def event_detected(self):
# # Edge double checking to avoid false positives
# edge_before = GPIO.input(self.gpi_trigger)
# time.sleep(0.003)
# edge_after = GPIO.input(self.gpi_trigger)
# # If two edges are different -> measure third time
# if edge_before != edge_after:
# time.sleep(0.001)
# edge = GPIO.input(self.gpi_trigger)
# elif edge_before == edge_after:
# time.sleep(0.001) # Added for determinisim between the two cases
# edge = edge_before
# self.start_avail() if not edge else self.stop_avail()
def start_cue(self):
if self.stream_locked:
return 1
response = self.elemental_api.start_cue(self.event_id)
self.in_cue = True
self.lock_stream()
print("3. Starting cue")
return response
def stop_cue(self):
if self.stream_locked:
return 1
response = self.elemental_api.stop_cue(self.event_id)
self.in_cue = False
self.lock_stream()
print("3. Stopping cue")
return response
def start_stop_avail(self, gpi_triggered):
time.sleep(0.001)
edge = GPIO.input(gpi_triggered) # Read if rising or falling edge
self.reaction_time.start_measure()
self.interrupt_counter += 1
print('--------------------------------------------\n')
print("1.{} / {} Event detcted / Number: {}".format(dt.datetime.now(), edge, self.interrupt_counter))
print("2. Stream is in cue: {}".format(self.in_cue))
# Rising edge detected and Stream is NOT in Cue => Start cue
if edge and not self.in_cue:
response = self.start_cue()
if response is 1:
print('Stream is locked!')
return 0
self.reaction_time.end_measure()
self.splice_counter += 1
print('4. AD STARTED: Splice count:{} / Event Num: {}\n'.format(self.splice_counter, self.interrupt_counter))
print(response.text)
self.reaction_time.print_measure()
print('--------------------------------------------\n')
return 0
# Falling edge detected and Stream is in Cue => Stop cue
elif not edge and self.in_cue:
response = self.stop_cue()
self.reaction_time.end_measure()
if response is 1:
print('Stream is locked!')
return 0
print('4. AD STOPPED: Splice count:{} / Event Num: {}\n'.format(self.splice_counter, self.interrupt_counter))
print(response.text)
self.reaction_time.print_measure()
print('--------------------------------------------\n')
return 0
return 0
def lock_stream(self):
self.stream_locked = True
unlock_timer = td.Timer(self.lock_interval, self.unlock_stream)
unlock_timer.start()
def unlock_stream (self):
self.stream_locked = False
# If stream was locked on entering in an avail (GPIO -> 1)
if self.in_cue:
# If GPIO input is still 1 -> do nothing // If GPIO went to 0 -> stop cue
return 0 if GPIO.input(int(self.gpi_trigger)) else self.stop_cue()
# Or stream was locked on exiing from an avail (GPIO -> 0)
elif not self.in_cue:
# If GPIO input is still 0 -> do nothing // if GPIO went to 1 -> start cue
return 0 if not GPIO.input(int(self.gpi_trigger)) else self.start_cue()
| Hristiyan-Andreev/gpi_0.7_hw_reworked | s_av_ctrl.py | s_av_ctrl.py | py | 4,401 | python | en | code | 2 | github-code | 36 |
10331225638 | import json
import requests
class SSEStatsOnTime(object):
"""
http://www.sse.com.cn/services/hkexsc/home/
"""
def __init__(self):
self.url = 'http://yunhq.sse.com.cn:32041//v1/hkp/status/amount_status'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
def get_balance_info(self):
resp = requests.get(self.url)
if resp.status_code == 200:
datas = json.loads(resp.text)
item = dict()
# ไบคๆๆๆๅฑ็ฑปๅ
item['Category'] = "SH"
# ๅฝๅ็ๆถ้ด
m_today = str(datas['date'])
m_today = "-".join([m_today[:4], m_today[4:6], m_today[6:8]])
m_time = str(datas['status'][0][1])
# ๅบๅๅฐๆถๆถ้ดๆฏ 2 ไฝๆฐๅ 1 ไฝๆฐ็ ๅณ 9 ็นไปฅๅไนๅ็ๆฐๆฎ 10 ็นไปฅๅไนๅ็ๆฐๆฎ
if len(m_time) >= 9: # {'date': 20200417, 'status': [[100547000, 100547000], [417, 418], ['3 ', '111 '], 42000000000, 41207590461, '2']}
m_time = ":".join([m_time[:2], m_time[2:4], m_time[4:6]])
else: # {'date': 20200417, 'status': [[94338000, 94337000], [417, 418], ['3 ', '111 '], 42000000000, 41543482907, '2']}
m_time = ":".join([m_time[:1], m_time[1:3], m_time[3:5]])
_time = " ".join([m_today, m_time])
item['Time'] = _time
# ๅฝๆฅ้ขๅบฆ
item['DailyLimit'] = datas['status'][3]
# ๅฝๆฅ่ต้ไฝ้ข
item['Balance'] = datas['status'][4]
# print(item)
return item
if __name__ == "__main__":
sse = SSEStatsOnTime()
sse.get_balance_info()
| wilsonkrum/DataFactory | hkland_flow/stock_hu_ontime.py | stock_hu_ontime.py | py | 1,796 | python | en | code | 0 | github-code | 36 |
23480034500 | from copy import deepcopy
# 4 x 4 ํฌ๊ธฐ์ ์ ์ฌ๊ฐํ์ ์กด์ฌํ๋ ๊ฐ ๋ฌผ๊ณ ๊ธฐ์ ๋ฒํธ์ ๋ฐฉํฅ ๊ฐ์ ๋ด๋ ํ
์ด๋ธ
fish_array = [[None] * 4 for _ in range(4)]
for i in range(4):
fish = list(map(int, input().split()))
# ๋งค ์ค๋ง๋ค 4๋ง๋ฆฌ์ ๋ฌผ๊ณ ๊ธฐ๋ฅผ ํ๋์ฉ ํ์ธํ๋ฉฐ
for j in range(4):
# ๊ฐ ์์น๋ง๋ค [๋ฌผ๊ณ ๊ธฐ ๋ฒํธ, ๋ฐฉํฅ]์ ์ ์ฅ
# ๋จ, ์ฃผ์ด์ง๋ ๋ฐฉํฅ์ 1๋ฒ๋ถํฐ ์์ํ๊ธฐ ๋๋ฌธ์ 1์ ๋นผ์ค
fish_array[i][j] = [fish[j * 2], fish[j * 2 + 1] - 1]
# 8๊ฐ์ง ๋ฐฉํฅ ์ ์
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, -1, -1, -1, 0, 1, 1, 1]
# ํ์ฌ ์์น์์ ์ผ์ชฝ์ผ๋ก ํ์ ๋ ๊ฒฐ๊ณผ ๋ฐํ
def turn_left(direction):
return (direction + 1) % 8
result = 0 # ์ต์ข
๊ฒฐ๊ณผ
# ํ์ฌ ๋ฐฐ์ด์์ ํน์ ํ ๋ฒํธ์ ๋ฌผ๊ณ ๊ธฐ ์์น ์ฐพ๊ธฐ
def find_fish(array, index):
for i in range(4):
for j in range(4):
if array[i][j][0] == index:
return (i, j)
return None # ์ก์ ๋จนํ์ผ๋ฉด -1์ด๊ธฐ ๋๋ฌธ์ None ๋ฐํ
# ๋ชจ๋ ๋ฌผ๊ณ ๊ธฐ๋ฅผ ํ์ ๋ฐ ์ด๋์ํค๋ ํจ์
def move_all_fishes(array, now_x, now_y):
# 1๋ฒ๋ถํฐ 16๋ฒ๊น์ง์ ๋ฌผ๊ณ ๊ธฐ๋ฅผ ์ฐจ๋ก๋๋ก (๋ฎ์๋ฒํธ๋ถํฐ) ํ์ธ
for i in range(1, 17):
# ํด๋น ๋ฌผ๊ณ ๊ธฐ์ ์์น ์ฐพ๊ธฐ
position = find_fish(array, i)
if position != None: # ๋ฌผ๊ณ ๊ธฐ๊ฐ ์ ์ก์ ๋จนํ๋ค๋ฉด
x, y = position[0], position[1]
direction = array[x][y][1] # ํด๋น ๋ฌผ๊ณ ๊ธฐ๊ฐ ํฅํ ๋ฐฉํฅ์ ํ์ธ
# ํด๋น ๋ฌผ๊ณ ๊ธฐ์ ๋ฐฉํฅ์ ์ผ์ชฝ์ผ๋ก ๊ณ์ ํ์ ์ํค๋ฉฐ ์ด๋์ด ๊ฐ๋ฅํ์ง ํ์ธ
for _ in range(8):
nx = x + dx[direction]
ny = y + dy[direction]
# ํด๋น ๋ฐฉํฅ์ผ๋ก ์ด๋์ด ๊ฐ๋ฅํ๋ค๋ฉด ์ด๋์ํค๊ธฐ
if 0 <= nx and nx < 4 and 0 <= ny and ny < 4: # 4 x 4 ์ ์ฌ๊ฐํ์ ๋ฒ์ด๋์ง ์๊ณ ,
if not (nx == now_x and ny == now_y): # ๊ฐ๊ณ ์ ํ๋ ๋ฐฉํฅ์ ์์ด๊ฐ ์๋ค๋ฉด
array[x][y][1] = direction # ๋ฐฉํฅ์ด ์ ํ ๋๋ค๋ฉด ํด๋น ๋ฐฉํฅ์ผ๋ก ์ด๊ธฐํ
array[x][y], array[nx][ny] = array[nx][ny], array[x][y] # ๋ฌผ๊ณ ๊ธฐ๋ผ๋ฆฌ ์๋ฆฌ ๋ฐ๊ฟ
break
direction = turn_left(direction) # ์ ์ฌ๊ฐํ์ ๋ฒ์ด๋ฌ๊ฑฐ๋ ์์ด๊ฐ ์์๋ค๋ฉด ๋ฐฉํฅ ์ ํ
# ์์ด๋ฅผ ์ด๋์ํค๋ ํจ์
def get_possible_positions(array, now_x, now_y):
positions = []
direction = array[now_x][now_y][1] # ์์ด๊ฐ ํฅํ ๋ฐฉํฅ ํ์ธ
# ํ์ฌ์ ๋ฐฉํฅ์ผ๋ก ๊ณ์ ์ด๋์ํค๊ธฐ
for i in range(4):
now_x += dx[direction]
now_y += dy[direction]
# ๋ฒ์๋ฅผ ๋ฒ์ด๋์ง ์๋์ง ํ์ธํ๋ฉฐ
if 0 <= now_x and now_x < 4 and 0 <= now_y < 4:
# ๋ฌผ๊ณ ๊ธฐ๊ฐ ์กด์ฌํ๋ ๊ฒฝ์ฐ
if array[now_x][now_y][0] != -1:
positions.append((now_x, now_y)) # ๋ฌผ๊ณ ๊ธฐ๊ฐ ์๋ ์ขํ๋ฅผ ๋ฐํ
return positions
# ๋ชจ๋ ๊ฒฝ์ฐ๋ฅผ ํ์ํ๊ธฐ ์ํ DFS ํจ์
def dfs(array, now_x, now_y, total):
global result
array = deepcopy(array) # ๋ฆฌ์คํธ๋ฅผ ํต์งธ๋ก ๋ณต์ฌ
total += array[now_x][now_y][0] # ํ์ฌ ์์น์ ๋ฌผ๊ณ ๊ธฐ ๋จน๊ธฐ
array[now_x][now_y][0] = -1 # ๋ฌผ๊ณ ๊ธฐ๋ฅผ ๋จน์์ผ๋ฏ๋ก ๋ฒํธ ๊ฐ์ -1๋ก ๋ณํ
move_all_fishes(array, now_x, now_y) # ์ ์ฒด ๋ฌผ๊ณ ๊ธฐ ์ด๋์ํค๊ธฐ
# ์ด์ ๋ค์ ์์ด๊ฐ ์ด๋ํ ์ฐจ๋ก์ด๋ฏ๋ก, ์ด๋ ๊ฐ๋ฅํ ์์น ์ฐพ๊ธฐ
positions = get_possible_positions(array, now_x, now_y)
# ๋ ์ด์ ๋ฌผ๊ณ ๊ธฐ๋ฅผ ๋จน์ ์ ์๋ค๋ฉด
if len(positions) == 0:
result = max(result, total) # ์ต๋๊ฐ ์ ์ฅ
return
# ๋ชจ๋ ์ด๋ํ ์ ์๋ ์์น๋ก ์ฌ๊ท์ ์ํ
for next_x, next_y in positions:
dfs(array, next_x, next_y, total)
# ์ฒญ์๋
์์ด์ ์์ ์์น(0, 0)์์๋ถํฐ ์ฌ๊ท์ ์ผ๋ก ๋ชจ๋ ๊ฒฝ์ฐ ํ์
dfs(fish_array, 0, 0, 0)
print(result)
| raddaslul/basic_algoritm | hikers/adolescent_shark.py | adolescent_shark.py | py | 4,047 | python | ko | code | 0 | github-code | 36 |
71911570984 | # Import tools and libraries
import random
from words import words
import string # Import pre-dertermined list of uppercased characteres
# Getting a valid word with only letters from our WORDS list
def get_valid_word(words):
word = random.choice(words) # Randomly chooses a word from the list
while "-" in word or " " in word:
word = random.choice(words) # Loops trhough list, getting valid words
return word.upper() # Return valid word, uppercased to make it standard
# Keep track of possible guesses and letters already guessed
def hangman():
# Add a counter for lives
lives = 7
word = get_valid_word(words) # Call get_valid_word function
word_letters = set(word) # Set of letters in the word
alphabet = set(string.ascii_uppercase) # Import pre-dertermined list
used_letters = set() # Keep track of what user has guessed
while len(word_letters) > 0 and lives > 0: # Loops until finds all letters
# Join and print letters already used
print("*** Welcome to Hangman Game ***")
print(f"You have {lives} lives left!")
print("Misses: ", " ".join(used_letters))
# Show what current word is
word_l = [letter if letter in used_letters else "-" for letter in word]
print("Secret Word: ", " ".join(word_l))
# Getting user input
user_letter = input("Your Guess:").upper() # User Input
if user_letter in alphabet - used_letters:
used_letters.add(user_letter) # Add valid letter
if user_letter in word_letters:
word_letters.remove(user_letter) # Remove letter from word
else:
lives = lives - 1 # Removes a life if wrong
print("Letter is not in word.")
elif user_letter in used_letters: # Check for repeated letters
print("\nYou have already used this letter. Please try again: ")
else: # Check for invalid characteres
print("\nInvalid character. Please try again: ")
# Print when the word is guessed correctly
if lives == 0:
print("You lost, sorry. The word was", word)
elif len(word_letters) == 0:
print("\nCongratulations ** YOU WON **")
print("The word is", word)
# Run the game again
while True:
# Call function for the game to start
hangman()
| Luciano2712/game_hangman | run.py | run.py | py | 2,360 | python | en | code | 0 | github-code | 36 |
40961448639 | # coding: utf-8
import itertools
import re
from simpleai.search import (backtrack, CspProblem, LEAST_CONSTRAINING_VALUE,
min_conflicts, MOST_CONSTRAINED_VARIABLE)
largos = {
'1H': 2, '2H': 3, '4H': 2, '5H': 2, '7H': 2, '8H': 2, '10H': 3, '11H': 2,
'1V': 2, '2V': 2, '3V': 3, '4V': 2, '6V': 3, '7V': 2, '8V': 2, '9V': 2,
}
palabras = set(re.sub(r'[^\w] ', '', '''Este es un texto para sacar palabras y asi
emular las claves del diccionario expuesto en el ejercicio.
Artificial Intelligence (AI) is a big field, and this is a big book. We have tried to explore the
full breadth of the field, which encompasses logic, probability, and continuous mathematics;
perception, reasoning, learning, and action; and everything from microelectronic devices to
robotic planetary explorers. The book is also big because we go into some depth.
The subtitle of this book is โA Modern Approach.โ The intended meaning of this rather
empty phrase is that we have tried to synthesize what is now known into a common frame-
work, rather than trying to explain each subfield of AI in its own historical context. We
apologize to those whose subfields are, as a result, less recognizable.
How to use Machine Learning on a Very Complicated Problem
So far in Part 1, 2 and 3, weโve used machine learning to solve isolated problems that have only
one step โ estimating the price of a house, generating new data based on existing data and telling
if an image contains a certain object. All of those problems can be solved by choosing one machine
learning algorithm, feeding in data, and getting the result.
But face recognition is really a series of several related problems:
First, look at a picture and find all the faces in it
Second, focus on each face and be able to understand that even if a face is turned in a weird
direction or in bad lighting, it is still the same person.
Third, be able to pick out unique features of the face that you can use to tell it apart from other
peopleโ like how big the eyes are, how long the face is, etc.
Finally, compare the unique features of that face to all the people you already know to determine
the personโs name.
As a human, your brain is wired to do all of this automatically and instantly. In fact, humans are
too good at recognizing faces and end up seeing faces in everyday objects:
Computers are not capable of this kind of high-level generalization (at least not yetโฆ), so we have
to teach them how to do each step in this process separately.
We need to build a pipeline where we solve each step of face recognition separately and pass the
result of the current step to the next step. In other words, we will chain together several machine
learning algorithms:
''').lower().split())
variables = []
dominios = {}
for var, largo in largos.items():
# agrego variables
variables.append(var)
# optamos por restringir el dominio a solo las palabras que poseen el largo
# para completar la variable. Otra posibilidad es agregar restricciones.
dominios[var] = [x for x in palabras if len(x) == largo]
restricciones = []
def distinto_valor(variables, valores):
'Compara que los valores de las variables sean distintos'
return valores[0] != valores[1]
# Todas las variables tienen que ser distintas. Con este diccionario no alcanza
# para que se cumpla esta restriccion; si se quiere ver un resultado hay que
# comentar esta restriccion o agregar un texto que contenga mas palabras para
# formar el vocabulario.
for var1, var2 in itertools.combinations(variables, 2):
restricciones.append(((var1, var2), distinto_valor))
def interseccion(pos1, pos2):
'''
Devuelve una "restriccion" que controla que la interseccion de la primer
palabra[pos1] sea igual a la segunda palabra[pos2].
'''
def restriccion(variables, valores):
return valores[0][pos1] == valores[1][pos2]
return restriccion
# Agregamos las intersecciones donde tienen que coincidir los caracteres
restricciones.append((('1H', '1V'), interseccion(0, 0)))
restricciones.append((('2H', '2V'), interseccion(0, 0)))
restricciones.append((('2H', '3V'), interseccion(2, 0)))
restricciones.append((('4H', '4V'), interseccion(0, 0)))
restricciones.append((('4H', '2V'), interseccion(1, 1)))
restricciones.append((('5H', '4V'), interseccion(1, 1)))
restricciones.append((('7H', '7V'), interseccion(0, 0)))
restricciones.append((('8H', '8V'), interseccion(0, 0)))
restricciones.append((('8H', '7V'), interseccion(1, 1)))
restricciones.append((('6V', '10H'), interseccion(2, 0)))
restricciones.append((('10H', '8V'), interseccion(2, 1)))
restricciones.append((('11H', '9V'), interseccion(1, 1)))
problem = CspProblem(variables, dominios, restricciones)
print('backtrack:')
result = backtrack(problem,
variable_heuristic=MOST_CONSTRAINED_VARIABLE,
value_heuristic=LEAST_CONSTRAINING_VALUE,
inference=True)
posiciones = {
'1H': (0, 0), '2H': (0, 3), '4H': (1, 2), '5H': (2, 1), '7H': (3, 3),
'8H': (4, 2), '10H': (5, 0), '11H': (5, 4),
'1V': (0, 0), '2V': (0, 3), '3V': (0, 5), '4V': (1, 2), '6V': (3, 0),
'7V': (3, 3), '8V': (4, 2), '9V': (4, 5),
}
posiciones_letras = {}
crucigrama = [['\u25A0'] * 6 for x in range(6)]
for palabra, (fila, columna) in posiciones.items():
for letra in range(largos[palabra]):
fila_letra = fila
columna_letra = columna
if palabra.endswith('H'):
columna_letra += letra
else:
fila_letra += letra
crucigrama[fila_letra][columna_letra] = result[palabra][letra]
print(result)
print('\n'.join(['| ' + ' | '.join(palabra) + ' |' for palabra in crucigrama]))
| ucse-ia/ucse_ia | practicas/crucigramas.py | crucigramas.py | py | 5,727 | python | en | code | 5 | github-code | 36 |
10834212692 | from turtle import Turtle
STARTING_POSITION = (0, -280)
MOVE_DISTANCE = 20
FINISH_LINE_Y = 280
class Player(Turtle):
# create a turtle
def __init__(self):
super().__init__()
self.shape('turtle')
self.penup()
self.shapesize(1)
self.setheading(90)
self.goto(STARTING_POSITION)
# create a function for when the up arrow is pressed
def up(self):
self.forward(MOVE_DISTANCE)
def reset(self):
self.goto(STARTING_POSITION)
| joshrivera116/crossyRoad | player.py | player.py | py | 532 | python | en | code | 0 | github-code | 36 |
34638872728 | import requests
from bs4 import BeautifulSoup
""" https://www.youtube.com/watch?v=PzWIdcFY9YQ """
url = 'https://url.com/sitemap.xml'
sitemapsoup = BeautifulSoup(requests.get(url).content, 'lxml')
sitemapurls = sitemapsoup.find_all("loc")
xml_urls = [sitemapurl.text for sitemapurl in sitemapurls]
count = 0
cerror = 0
mydata = open("FILEPATH/data.txt", "w")
for websiteurls in xml_urls:
source = BeautifulSoup(requests.get(websiteurls).text , 'html.parser')
try:
count += 1
mydata.write("yes!")
mydata.write("\n")
mydata.write(source.find('link', {'rel': 'canonical'}) ['href'])
mydata.write("\n")
print(count)
except:
mydata.write("no!")
mydata.write(websiteurls)
cerror += 1
print(cerror)
mydata.close()
| martamc-sp/PythonforSEO | lessons/4-urls-canonical.py | 4-urls-canonical.py | py | 824 | python | en | code | 0 | github-code | 36 |
39013711189 | # https://www.acmicpc.net/problem/15649
# N๊ณผ M (1)
def seq(idx):
# idx๊ฐ M๋ผ๋ฉด arr ์ถ๋ ฅ
if idx == M:
print(*arr)
return
for i in range(1, N+1):
# ์์ด์ ์ฌ์ฉํ์ง ์์๋ค๋ฉด ์ฌ๊ท
if not used[i]:
# ๋ฐฐ์ด์ i๊ฐ์ ๋ฃ์
arr[idx] = i
used[i] = 1
seq(idx+1)
used[i] = 0
N, M = map(int, input().split())
arr = [0]*M
used = [0]*(N+1)
seq(0) | eomsteve/algo_study | dm/3_week/15649.py | 15649.py | py | 463 | python | ko | code | 0 | github-code | 36 |
31802052529 | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
def get(stack):
result = stack.pop()
if stack.__len__() == 0:
print("result:"+str(result))
return result
else:
last = get(stack)
stack.append(result)
return last
def reverse_stack(stack):
if stack.__len__() == 0:
return
a = get(stack)
reverse_stack(stack)
stack.append(a)
def main():
stack = [x for x in range(10)]
print(stack)
reverse_stack(stack)
print(stack)
if __name__ == '__main__':
main()
| bobcaoge/my-code | python/face_programs/codes/03_usingrecursivefunctiontoreservestack.py | 03_usingrecursivefunctiontoreservestack.py | py | 544 | python | en | code | 0 | github-code | 36 |
11525663576 | import sqlite3
con = sqlite3.connect('example.db')
cursor = con.cursor()
persons = [("kiran", 21, "kiran@gmail.com"),
("anu", 29, "anu@yahoo.com"),
("sathis", 65, "satish@rediff.com")]
cursor.executemany("INSERT INTO person values (?, ?, ?)", persons)
print(cursor.rowcount)
con.commit()
con.close()
| avinash431/IntroductionToPython | databases/database-3.py | database-3.py | py | 325 | python | en | code | 0 | github-code | 36 |
4109037837 | from sys import stdin
input = stdin.readline
nodes, n = [int(x) for x in input().split()]
isEntrance = [1] * nodes
isDest = [1] * nodes
connections = [[] for _ in range(nodes)]
for _ in range(n):
a, b = [int(x) for x in input().split()]
isEntrance[b] = 0
isDest[a] = 0
connections[a].append(b)
q = [[]]
counter = 0
people = 0
fast = [696969696969] * nodes
for i in range(nodes):
if isEntrance[i]:
q[0].append(i)
while q:
counter += 1
queued = q.pop()
added = []
for node in queued:
for next in connections[node]:
if isDest[next]:
people += 1
fast[next] = min(fast[next], counter + 1)
else:
added.append(next)
if added:
q.append(added)
print(people % 1000000007)
print(" ".join([str(x) for x in fast if x != 696969696969]))
| AAZZAZRON/DMOJ-Solutions | tsoc15c2p4.py | tsoc15c2p4.py | py | 862 | python | en | code | 1 | github-code | 36 |
33911980837 | #!/usr/bin/env python3
import queries
import connection_handler
from IPython import embed
import mysql.connector
import grammar_format
from dotenv import load_dotenv
from managers.word_manager import Word_Manager
import re
load_dotenv()
class Phrase_Manager:
def __init__(self, phrase="None", person="None", person_manager="None"):
self.person = person
self.person_manager = person_manager
lower_phrase = phrase.lower()
result = lower_phrase.find("?")
self.new_phrase = self.remove_bad_chars(lower_phrase)
self.parsed_phrase = self.new_phrase.split()
if lower_phrase == 'teach':
self.teach_phrase()
else:
if result == -1:
print("That is a statement")
else:
self.get_question_format()
def get_question_format(self):
question_format = ''
for word in self.parsed_phrase:
word_manager = Word_Manager(word)
question_format += grammar_format.assign_part_of_speech(
word_manager.word)
def check_for_assigning_attribute(self):
if self.possessive and self.check_for_attribute():
self.assign_attribute()
def check_for_attribute(self):
attribute_reference = self.parsed_phrase.index("is")
attribute_index = attribute_reference - 1
self.attribute = self.parsed_phrase[attribute_index]
if self.attribute == 'name':
try:
first_or_last = self.parsed_phrase[attribute_index - 1]
self.attribute = first_or_last + "_" + self.attribute
except Exception as e:
print("Exception has occured 48: " + str(e))
self.get_new_value()
if hasattr(self.person, self.attribute):
return True
else:
return False
def get_new_value(self):
self.new_value_index = self.parsed_phrase.index("is") + 1
self.new_value = self.parsed_phrase[self.new_value_index]
if self.attribute == 'first_name' or self.attribute == 'last_name':
self.new_value = self.new_value.capitalize()
def assign_attribute(self):
self.person_manager.update_person(
self.person.id, self.attribute, self.new_value)
print("Updated!")
def determine_if_possessive(self):
self.establish_new_connection()
word = self.parsed_phrase[0]
try:
self.cursor.execute(queries.check_possessive(word))
except Exception as e:
print("Exception has occured 40: " + str(e))
result = self.cursor.fetchall()
self.list_result = [list(i) for i in result]
if 'is' in self.parsed_phrase:
if self.check_exists_result(self.list_result):
self.possessive = True
else:
self.possessive = False
# def handle_question(self):
# phrases = self.get_questions()
def get_questions(self):
self.establish_new_connection()
try:
self.cursor.execute(queries.get_questions())
except Exception as e:
print("Exception has occured 40: " + str(e))
result = self.cursor.fetchall()
self.list_result = [list(i) for i in result]
print("Results: " + str(self.list_result))
def save_new_phrase(self, phrase):
self.establish_new_connection()
try:
self.cursor.execute(queries.save_new_phrase(
phrase, self.person.id))
phrase_id = self.cursor.lastrowid
except Exception as e:
print("Exception has occured 54: " + str(e))
self.cnx.commit()
try:
self.cursor.execute(queries.save_person_phrase_matrix(
phrase_id, self.person.id))
except Exception as e:
print("Exception has occured 61: " + str(e))
self.cnx.commit()
self.cursor.close()
self.cnx.close()
def remove_bad_chars(self, phrase):
bad_chars = [';', ':', '!', "*", "?"]
for i in bad_chars:
phrase = phrase.replace(i, '')
return phrase
def teach_phrase(self):
self.phrase = input(
f"What new phrase would you like to teach me?")
if self.check_if_known():
print(f"I already know the phrase {self.phrase}")
else:
self.learn_phrase()
def learn_phrase(self):
self.definition = input(
f"What does the phrase {self.phrase} mean? ")
print("Thanks! I'll remember that.")
self.save_new_phrase()
def learn_phrase(self, phrase):
print(f"I'm now learning the phrase: {phrase}")
def check_if_known(self):
if self.check_for_phrase():
self.phrase_known()
else:
self.phrase_not_known()
def check_for_phrase(self):
try:
self.cursor.execute(queries.check_for_phrase(self.phrase))
except Exception as e:
print("Exception has occured 102: " + str(e))
result = self.cursor.fetchall()
self.check_exists_result(result)
def check_exists_result(self, result):
result_list = [list(i) for i in result]
number_returned = result_list[0][0]
if int(number_returned) > 0:
return True
self.update_phrase()
else:
return False
def update_phrase(self):
try:
self.cursor.execute(queries.update_phrase(
phrase, self.person.person_id))
except Exception as e:
print("Exception has occured: 120 " + str(e))
self.cnx.commit()
self.cursor.close()
self.cnx.close()
def establish_new_connection(self):
connection = connection_handler.establish_connection()
self.cnx = connection[0]
self.cursor = connection[1]
@staticmethod
def is_confirmation(word_or_phrase):
Phrase_Manager.establish_new_connection()
try:
cursor.execute(queries.check_for_confirmation(word_or_phrase))
except Exception as e:
print("Exception has occured 144: " + str(e))
result = cursor.fetchall()
if Phrase_Manager.confirmation_exists(result):
return True
else:
return False
@staticmethod
def confirmation_exists(result):
result_list = [list(i) for i in result]
number_returned = result_list[0][0]
if int(number_returned) > 0:
return True
else:
return False
| aburk3/Brain | managers/phrase_manager.py | phrase_manager.py | py | 6,593 | python | en | code | 1 | github-code | 36 |
41907946588 | import torch
import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv_1 = self._con_dw_sep(3, 16)
self.conv_2 = self._con_dw_sep(16, 32)
self.conv_3 = self._con_dw_sep(32, 64)
self.fc1 = nn.Linear(10816, 512)
self.fc2 = nn.Linear(512, 1)
self.dropout = nn.Dropout(0.5)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def _con_dw_sep(self, C_in, C_out):
conv_layer = nn.Sequential(
nn.Conv2d(C_in, C_in, kernel_size = 4, groups=C_in),
nn.Conv2d(C_in, C_out , kernel_size=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
return conv_layer
def forward(self, x):
out = self.conv_1(x)
out = self.conv_2(out)
out = self.conv_3(out)
out = out.view(-1, 10816)
out = self.dropout(out)
out = self.fc1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.fc2(out)
out = out.squeeze()
out = self.sigmoid(out)
return out.float()
| CSID-DGU/2022-2-SCS4031-EZ_SW | age_prediction_model/model.py | model.py | py | 1,212 | python | en | code | 0 | github-code | 36 |
8525512026 | import tensorflow as tf
import os
import sys
import data_generation
import networks
import scipy.io as sio
import param
import util
import truncated_vgg
from keras.backend.tensorflow_backend import set_session
from keras.optimizers import Adam
import scipy.misc
def train(model_name, gpu_id):
with tf.Session() as sess:
params = param.get_general_params()
network_dir = params['model_save_dir'] + '/' + model_name
# Creates models directory if not exist.
if not os.path.isdir(network_dir):
os.mkdir(network_dir)
train_feed = data_generation.create_feed(params, params['data_dir'], 'train')
test_feed = data_generation.create_feed(params, params['data_dir'], 'test')
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
vgg_model = truncated_vgg.vgg_norm()
networks.make_trainable(vgg_model, False)
response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')
model = networks.network_posewarp(params)
model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
n_iters = params['n_training_iter']
summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph)
tr_x, tr_y = next(train_feed)
te_x, te_y = next(test_feed)
# Prepare output directories if they don't exist.
output_dir = '../output/' + model_name + '/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
scipy.misc.imsave('../output/tr_orig_image.png', tr_x[0][0, :, :, :])
scipy.misc.imsave('../output/tr_targ_image.png', tr_y[0, :, :, :])
scipy.misc.imsave('../output/te_orig_image.png', te_x[0][0, :, :, :])
scipy.misc.imsave('../output/te_targ_image.png', te_y[0, :, :, :])
for step in range(0, n_iters):
x, y = next(train_feed)
train_loss = model.train_on_batch(x, y)
util.printProgress(step, 0, train_loss)
# out = sess.run(conv, feed_dict={"input_1:0" : x[0]})
# plt.matshow(out[0, :, :, 0])
# plt.show()
gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0")
inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
p_s = tf.get_default_graph().get_tensor_by_name("mask_src/truediv:0")
# p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0")
image_summary_1 = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]],
max_outputs=100)
# image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100)
image_summary_1 = sess.run(image_summary_1,
feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2],
"mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y})
# image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2],
# "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y})
summary_writer.add_summary(image_summary_1)
# summary_writer.add_summary(image_summary_2)
train_image = sess.run(gen, feed_dict={"in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2],
"mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y})
test_image = sess.run(gen, feed_dict={"in_img0:0": te_x[0], "in_pose0:0": te_x[1], "in_pose1:0": te_x[2],
"mask_prior:0": te_x[3], "trans_in:0": te_x[4], "in_img1:0": te_y})
if step > 0 and step % params['model_save_interval'] == 0:
model.save_weights(network_dir + '/' + str(step) + '.h5')
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Need model name and gpu id as command line arguments.")
else:
train(sys.argv[1], sys.argv[2])
| TZebin/Deep-Learning-Camp-JEJU2018 | Code/posewarp-cvpr2018/code/posewarp_train.py | posewarp_train.py | py | 4,468 | python | en | code | 0 | github-code | 36 |
410387277 | """gistandard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.views.static import serve
from gistandard.settings import MEDIA_ROOT
import xadmin
from users.views_user import LoginView, IndexView, LogoutView
from system.views import SystemView
from adm.views import AdmView
from personal import views as personal_views
from personal import views_work_order as order
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url(r'^$', IndexView.as_view(), name='index'),
# ็จๆท็ปๅฝ
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name="logout"),
url(r'^system/$', SystemView.as_view(), name="system"),
url(r'^system/basic/', include('users.urls', namespace='system-basic')),
url(r'^system/rbac/', include('rbac.urls', namespace='system-rbac')),
url(r'^system/tools/', include('system.urls', namespace='system-tools')),
url(r'^adm/$', AdmView.as_view(), name="adm-main"),
url(r'^adm/bsm/', include('adm.urls', namespace='adm-bsm')),
url(r'^adm/equipment/', include('adm.urls_equipment', namespace='adm-equipment')),
url(r'^adm/asset/', include('adm.urls_asset', namespace='adm-asset')),
url(r'^personal/$', personal_views.PersonalView.as_view(), name="personal"),
url(r'^personal/userinfo', personal_views.UserInfoView.as_view(), name="personal-user_info"),
url(r'^personal/uploadimage', personal_views.UploadImageView.as_view(), name="personal-uploadimage"),
url(r'^personal/passwordchange', personal_views.PasswdChangeView.as_view(), name="personal-passwordchange"),
url(r'^personal/phonebook', personal_views.PhoneBookView.as_view(), name="personal-phonebook"),
url(r'^personal/workorder_Icrt/$', order.WorkOrderView.as_view(), name="personal-workorder_Icrt"),
url(r'^personal/workorder_Icrt/list', order.WorkOrderListView.as_view(), name="personal-workorder-list"),
url(r'^personal/workorder_Icrt/create', order.WorkOrderCreateView.as_view(), name="personal-workorder-create"),
url(r'^personal/workorder_Icrt/detail', order.WorkOrderDetailView.as_view(), name="personal-workorder-detail"),
url(r'^personal/workorder_Icrt/delete', order.WorkOrderDeleteView.as_view(), name="personal-workorder-delete"),
url(r'^personal/workorder_Icrt/update', order.WorkOrderUpdateView.as_view(), name="personal-workorder-update"),
url(r'^personal/workorder_app/$', order.WorkOrderView.as_view(), name="personal-workorder_app"),
url(r'^personal/workorder_app/send', order.WrokOrderSendView.as_view(), name="personal-workorder-send"),
url(r'^personal/workorder_rec/$', order.WorkOrderView.as_view(), name="personal-workorder_rec"),
url(r'^personal/workorder_rec/execute', order.WorkOrderExecuteView.as_view(), name="personal-workorder-execute"),
url(r'^personal/workorder_rec/finish', order.WorkOrderFinishView.as_view(), name="personal-workorder-finish"),
url(r'^personal/workorder_rec/upload', order.WorkOrderUploadView.as_view(), name="personal-workorder-upload"),
url(r'^personal/workorder_rec/return', order.WorkOrderReturnView.as_view(), name="personal-workorder-return"),
url(r'^personal/workorder_Icrt/upload', order.WorkOrderProjectUploadView.as_view(),
name="personal-workorder-project-upload"),
url(r'^personal/workorder_all/$', order.WorkOrderView.as_view(), name="personal-workorder_all"),
url(r'^personal/document/$', order.WorkOrderDocumentView.as_view(), name="personal-document"),
url(r'^personal/document/list', order.WorkOrderDocumentListView.as_view(), name="personal-document-list"),
]
| RobbieHan/gistandard | gistandard/urls.py | urls.py | py | 4,296 | python | en | code | 546 | github-code | 36 |
35535578848 | def fibonacci(n):
x = [0, 1]
if n in x:
return n
a, b = x
for i in range(n-1):
a, b = b, (a + b) % 10
return b
n = int(input())
print(fibonacci(n)) | calkikhunt/algorithmic-toolbox | fibonacci_last_digit.py | fibonacci_last_digit.py | py | 184 | python | en | code | 0 | github-code | 36 |
18844787736 | import sys,time,unittest
from selenium.webdriver.common.by import By
from selenium import webdriver
sys.path.append(".//")
sys.path.append(sys.path[0].split("ATQ้กน็ฎ")[0] + 'ATQ้กน็ฎ\\02.ๆนๆณๆจกๅ')
import Function_temp as F
class ATQtest(unittest.TestCase):
#driverๅ
จๅฑๅ้
option =webdriver.FirefoxOptions()
option.set_headless()
dr1 = webdriver.Firefox(firefox_options=option)#่ฎพ็ฝฎๅฏน่ฑก
dr = webdriver.Firefox()#่ฎพ็ฝฎๅฏน่ฑก
F.driver=dr
def setUp(self,driver = dr):
self.driver =driver#่ฎพ็ฝฎๅฏน่ฑก
self.base_url = "http://11.8.127.248:8080/atq/frame.jsp"#็ฝๅ
self.username='sunhongbin'#็ปๅฝ็จๆทๅ
def test_01_login(self):
#data
print("็ปๅฝATQ๏ผ")
username = self.username
#page_element
็จๆทๅ่พๅ
ฅๆก=(By.NAME,'loginId')
ๅฏ็ ่พๅ
ฅๆก=(By.NAME,'password')
็ปๅฝๆ้ฎ=(By.LINK_TEXT,'็ปๅฝ')
#Script
driver = self.driver
self.driver.get(self.base_url)
F.find_element(็จๆทๅ่พๅ
ฅๆก).clear()
F.find_element(็จๆทๅ่พๅ
ฅๆก).send_keys(username)
F.find_element(ๅฏ็ ่พๅ
ฅๆก).send_keys('123456')
F.find_element(็ปๅฝๆ้ฎ).click()
#็ญๅพ
time.sleep(3)
def test_02_case_import(self):
#data
print("่ฟๅ
ฅๆกไพ็ฎก็้กต้ข๏ผ")
driver = self.driver
#case_path=self.case_path
#page
ๆกไพ็ฎก็่ๅ=(By.XPATH,"//span[contains(text(),'ๆกไพ็ฎก็')]")
้ๆฑ็ฎก็่ๅ=(By.XPATH,"//span[contains(text(),'้ๆฑ็ฎก็')]")
xf=(By.XPATH,"/html/body/div[3]/div/div/div[2]/div[2]/div/iframe")
ๅ ่ฝฝๆ็คบ=(By.XPATH,"//div[contains(text(),'Loading')]")
ๅฏผๅ
ฅๆกไพๆ้ฎ=(By.XPATH,"//span[text()='ๅฏผๅ
ฅๆกไพ']")
#Script
print("ๅฏผๅ
ฅๆกไพ๏ผ")
F.find_element(ๆกไพ็ฎก็่ๅ).click()
time.sleep(2)
#ๅๆขiframe้กต้ข
#driver.switch_to.frame(F.find_element(xf).find_element())
F.switch_to.frame(xf)
print("ๅๆขๆๅ๏ผ")
time.sleep(5)
if F.find_element(้ๆฑ็ฎก็่ๅ):
F.find_element(้ๆฑ็ฎก็่ๅ).highlight()
print(F.find_element(้ๆฑ็ฎก็่ๅ))
print("ๅคฑ่ดฅ")
else:
print("ๆๅ")
print("็ญๅพ
ใๆกไพ็ฎก็_ๆกไพๅ่กจใ้กต้ขๅ ่ฝฝ...")
print(F.find_elements(ๅ ่ฝฝๆ็คบ))
for i in range(0,30):
refresh=F.find_elements(ๅ ่ฝฝๆ็คบ).find_elements()
print()
if len(refresh)<2:
print("ใๆกไพ็ฎก็_ๆกไพๅ่กจใ้กต้ขๅ ่ฝฝๅฎๆ๏ผ")
F.find_element(ๅฏผๅ
ฅๆกไพๆ้ฎ).click()
break
else:
print(i)
time.sleep(3)
time.sleep(2)
#ไธไผ ๆ้ฎ
upload=driver.find_element_by_id("upload")
upload.send_keys(case_path)
value=upload.get_attribute('value')
if value!="":
print("ๆไปถไธไผ ๆๅ๏ผ")
driver.find_element_by_xpath("//span[text()='็กฎๅฎ']").click()
time.sleep(2)
#ๅคๆญ้กต้ข็ๅ
็ด ๆฃๆฅๆฏๅฆๆญฃๅจๅฏผๅ
ฅ๏ผ้ป่ฎคๅ
็ญ30s
if len(driver.find_elements_by_xpath("//div[contains(text(),'ๅฏผๅ
ฅไธญ๏ผ่ฏท็จๅ')]"))>0:
print("ๅฏผๅ
ฅไธญ๏ผ่ฏท่ๅฟ็ญๅ...")
time.sleep(30)
#30sๅ้่ฟๅคๆญๅฏผๅ
ฅ็ปๆ็ๅผนๅบ็ชๅฃๅคๆญๆฏๅฆๅฏผๅ
ฅๅฎๆฏ๏ผๅฆๆๆฒกๆๆพๅฐ็ชๅฃๅ็ญๅพ
็ปง็ปญๅฏปๆพ็ชๅฃ๏ผ็ด่ณๅฏปๆพๆๅ
#ๅๅฐไธปframe้กต
driver.switch_to.default_content()
for i in range(0,100):
try:
text=driver.find_element_by_xpath("/html/body/div[10]/div[2]").text
print("ๆกไพๅฏผๅ
ฅๅฎๆฏ๏ผ")
break
except:
time.sleep(0.01)
time.sleep(5)
#้่ฟๅคๆญLoadingๅ
็ด ๏ผ็ฎๅฝๆ ้กต้ขๆฏๅฆๅ ่ฝฝๆๅ๏ผๅฆๆๆชๅ ่ฝฝๆๅๅ็ญๅพ
2s๏ผๅๅคๅพช็ฏ
self.driver.switch_to.frame(xf)
print("็ญๅพ
ใๆกไพ็ฎก็_ๆกไพ็ฎๅฝใ้กต้ขๅ ่ฝฝ...")
for i in range(0,100):
refresh= driver.find_elements_by_xpath("//div[contains(text(),'Loading')]")
if len(refresh)>0:
time.sleep(2)
else:
print("ใๆกไพ็ฎก็_ๆกไพ็ฎๅฝใ้กต้ขๅ ่ฝฝๅฎๆ๏ผ")
break
#ๅๅปบexcelๅฏน่ฑกๅนถๅexcelไธญ็ๆฐๆฎๅปๅคๆญ็ฎๅฝๆ ๅฏนๅบ็็ฎๅฝๆฏๅฆๅทฒ็ปๅญๅจ๏ผไปฅๆญคๅคๆญๆกไพๆฏๅฆๅฏผๅ
ฅๆๅ
ex=openpyxl.load_workbook(case_path)
sh=ex[ex.sheetnames[0]]
print("ๆกไพ็ฎๅฝๆฃๆฅ...")
if self.isElementExist("by.xpath","//span[text()='"+sh['C2'].value+"']/../../../following-sibling::tr[1]//span[text()='"+sh['D2'].value+"']"):
print("ๆกไพ็ฎๅฝๆฃๆฅๅฎๆฏ๏ผๆกไพ็ฎๅฝๅญๅจ๏ผๆกไพๅฏผๅ
ฅๆๅ๏ผ")
else:print("ๆกไพ็ฎๅฝๆฃๆฅๅฎๆฏ๏ผๆชๅ็ฐๆกไพ็ฎๅฝ๏ผๆกไพๅฏผๅ
ฅๅคฑ่ดฅ๏ผ")
#ๅๅฐไธปframe้กต
driver.switch_to.default_content()
time.sleep(5)
def tearDown(self):
pass
#self.driver.quit()#่ฟ้ๆๅคไธชtest้่ฆ็จๅฐdriver๏ผๆไปฅtearDownไธญ๏ผไธ่ฆๅ
ณ้ญๆต่งๅจ
if __name__ == "__main__":
unittest.main() | cainiaosun/study | ๆต่ฏ/UI่ชๅจๅ/ๆต่ฏๅทฅๅ
ท__Selenium/selenium/Selenium/ATQ้กน็ฎ/01.่ๆฌๆไปถ/็ปๅฝ.py | ็ปๅฝ.py | py | 5,365 | python | zh | code | 0 | github-code | 36 |
16989917132 | from logging import Logger
from typing import List
from pypika import Table # type: ignore
from pypika import PostgreSQLQuery as Q
from app.models.mart import engine_mart
from app.models.askue import AccountPoint
from app.models.mart import RegPointModel, RsPointModel, BalanceModel, BalanceRegModel
from sqlalchemy.engine import Transaction
from pypika.functions import Max # type: ignore
class DalMart:
"""
ะะปะฐัั DAL ัะฐะฑะพัั ั ะะ Data Mart ะพะฑัะตะบัะพะฒ
"""
def __init__(self, logger: Logger):
self._logger = logger
def get_max_rv_point_list(self, point_table: str) -> int:
rv = 0
try:
p = Table(point_table)
q = (Q.from_(p)
.select(Max(p.rv)))
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
rv = engine_mart.scalar(sql)
if rv is None:
rv = 0
except Exception as e:
self._logger.error(e)
return rv
def insert_points(self, points: List[AccountPoint], dest_table: str) -> None:
con = engine_mart.connect()
self._logger.debug(f'DalMart.insert_point() dest_table:{dest_table}')
if dest_table == 'askue_reg_point':
data_result: List[RegPointModel] = []
tran: Transaction = con.begin()
try:
for p in points:
reg_string = p.DisplayName.split('\\')
if len(reg_string) < 4:
self._logger.warning(f"ะะผั ะพะฑัะตะบัะฐ ({p.DisplayName}) ะฝะต ัะพะพัะฒะตัััะฒัะตั ัะพัะผะฐัั")
continue
reg_object = RegPointModel(id_point=p.Id, display_name=p.DisplayName, res=reg_string[0],
fes=reg_string[1], ps=reg_string[2], vl=reg_string[3], rv=p.Rv)
data_result.append(reg_object)
except Exception as e:
self._logger.error(f'convert to model failed {e}')
try:
for elem in data_result:
d = Table(dest_table)
q = Q.into(d).insert(elem.Id, elem.DisplayName, elem.Res, elem.Fes, elem.Ps, elem.Vl, elem.Rv) \
.on_conflict(d.id) \
.do_update(d.object_name, elem.DisplayName) \
.do_update(d.fes, elem.Fes) \
.do_update(d.res, elem.Res) \
.do_update(d.ps, elem.Ps) \
.do_update(d.vl, elem.Vl) \
.do_update(d.rv, elem.Rv)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_point() {e}')
tran.rollback()
else:
data_result: List[RsPointModel] = []
tran: Transaction = con.begin()
try:
for p in points:
rs_string = p.DisplayName.split('\\')
if len(rs_string) < 6:
self._logger.warning(f"ะะผั ะพะฑัะตะบัะฐ ({p.DisplayName}) ะฝะต ัะพะพัะฒะตัััะฒัะตั ัะพัะผะฐัั")
continue
rs_object = RsPointModel(id_point=p.Id, display_name=p.DisplayName, res=rs_string[0],
fes=rs_string[1], ps=rs_string[2], vl=rs_string[3], tp=rs_string[4],
sch=rs_string[5], ktt=p.Ktt, str_ra=p.Str_ra, rxx=p.Rxx, region=p.Locality,
number_point=p.Number_point, driver=p.Driver, rv=p.Rv, country=p.Country)
data_result.append(rs_object)
except Exception as e:
self._logger.error(f'convert to model failed {e}')
try:
for elem in data_result:
d = Table(dest_table)
q = Q.into(d).insert(elem.Id, elem.DisplayName, elem.Res, elem.Fes, elem.Ps, elem.Vl,
elem.Tp, elem.Sch, elem.Rv, elem.Str_ra, elem.Rxx, elem.Ktt, elem.Region,
elem.Number_point, elem.Driver, elem.Country) \
.on_conflict(d.id) \
.do_update(d.object_name, elem.DisplayName) \
.do_update(d.fes, elem.Fes) \
.do_update(d.res, elem.Res) \
.do_update(d.ps, elem.Ps) \
.do_update(d.vl, elem.Vl) \
.do_update(d.tp, elem.Tp) \
.do_update(d.sch, elem.Sch) \
.do_update(d.rv, elem.Rv) \
.do_update(d.str_ra, elem.Str_ra) \
.do_update(d.rxx, elem.Rxx) \
.do_update(d.ktt, elem.Ktt) \
.do_update(d.locality, elem.Region) \
.do_update(d.number_point, elem.Number_point) \
.do_update(d.driver, elem.Driver) \
.do_update(d.country, elem.Country)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_point() {e}')
tran.rollback()
def read_rs_points(self) -> List[RsPointModel]:
"""
ะัะฟะพะปะฝัะตั ััะตะฝะธะต ะฒัะตั
ัะพัะตะบ ััะตัะฐ ัะฐัะฟัะตะดะตะปะธัะตะปัะฝัั
ัะตัะตะน
:return: ะผะฐััะธะฒ ัะพัะตะบ ััะตัะฐ
"""
p = Table('askue_rs_point', alias='p')
q = (Q.from_(p)
.select(p.id, p.object_name, p.fes, p.res, p.ps, p.vl, p.tp, p.sch, p.rv, p.str_ra, p.rxx, p.ktt,
p.locality, p.number_point, p.driver, p.country))
sql_query = q.get_sql()
return_values: List[RsPointModel] = []
try:
self._logger.debug(f'SQL: {sql_query}')
result = engine_mart.execute(sql_query)
for row in result:
data = RsPointModel(id_point=row['id'], display_name=row['object_name'], fes=row['fes'], res=row['res'],
ps=row['ps'], vl=row['vl'], tp=row['tp'], sch=row['sch'], rv=row['rv'],
str_ra=row['str_ra'], rxx=row['rxx'], ktt=row['ktt'], region=row['locality'],
number_point=row['number_point'], driver=row['driver'], country=row['country'])
return_values.append(data)
except Exception as e:
self._logger.error(e)
return return_values
def read_reg_points(self) -> List[RegPointModel]:
"""
ะัะฟะพะปะฝัะตั ััะตะฝะธะต ะฒัะตั
ัะพัะตะบ ััะตัะฐ ัะฐัะฟัะตะดะตะปะธัะตะปัะฝัั
ัะตัะตะน
:return: ะผะฐััะธะฒ ัะพัะตะบ ััะตัะฐ
"""
p = Table('askue_reg_point', alias='p')
q = (Q.from_(p)
.select(p.id, p.object_name, p.fes, p.res, p.ps, p.vl, p.rv))
sql_query = q.get_sql()
return_values: List[RegPointModel] = []
try:
self._logger.debug(f'SQL: {sql_query}')
result = engine_mart.execute(sql_query)
for row in result:
data = RegPointModel(row['id'], row['object_name'], row['fes'], row['res'], row['ps'], row['vl'],
row['rv'])
return_values.append(data)
except Exception as e:
self._logger.error(e)
return return_values
def insert_balance_calc(self, points: List[BalanceModel]):
"""
ะัะฟะพะปะฝัะตั ะดะพะฑะฐะฒะปะตะฝะธะต ะฒัะตั
ัะฐัััะตัะพะฒ ะฒ ะฑะฐะทั ะดะฐะฝะฝัั
"""
con = engine_mart.connect()
self._logger.debug("insert_balance_calc()... start")
tran: Transaction = con.begin()
try:
for elem in points:
d = Table('calc_balance')
q = Q.into(d).insert(elem.Id, elem.Id_tu, elem.Dtp, elem.Locality, elem.NameOfAccountingPoint,
elem.STrRa,
elem.Pxx,
elem.LossXX, elem.Ktt, elem.HeadOfCounter, elem.StartPeriod,
elem.QSlim, elem.Time_Start_Write, elem.Country, elem.Driver) \
.on_conflict(d.id) \
.do_update(d.id_tu, elem.Id_tu) \
.do_update(d.dtp, elem.Dtp) \
.do_update(d.locality, elem.Locality) \
.do_update(d.name_of_accounting_point, elem.NameOfAccountingPoint) \
.do_update(d.str_ra, elem.STrRa) \
.do_update(d.pxx, elem.Pxx) \
.do_update(d.loss_xx, elem.LossXX) \
.do_update(d.ktt, elem.Ktt) \
.do_update(d.head_of_counter, elem.HeadOfCounter) \
.do_update(d.start_period, elem.StartPeriod) \
.do_update(d.q_slim, elem.QSlim) \
.do_update(d.time_start_write, elem.Time_Start_Write) \
.do_update(d.country, elem.Country) \
.do_update(d.driver, elem.Driver)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_balance_calc() {e}')
tran.rollback()
def insert_balance_reg_calc(self, points: List[BalanceRegModel]):
"""
ะัะฟะพะปะฝัะตั ะดะพะฑะฐะฒะปะตะฝะธะต ะฒัะตั
ัะฐัััะตัะพะฒ ะฒ ะฑะฐะทั ะดะฐะฝะฝัั
"""
con = engine_mart.connect()
self._logger.debug("insert_balance_calc()... start")
tran: Transaction = con.begin()
try:
for elem in points:
d = Table('calc_reg_balance')
q = Q.into(d).insert(elem.Id, elem.Id_tu, elem.StartPeriod, elem.Time_Start_Write) \
.on_conflict(d.id) \
.do_update(d.id_tu, elem.Id_tu) \
.do_update(d.start_period, elem.StartPeriod) \
.do_update(d.time_start_write, elem.Time_Start_Write)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_balance_reg_calc() {e}')
tran.rollback() | giveyourtears/electroComputationServer | app/jobs/balance/data_mart_layer.py | data_mart_layer.py | py | 10,828 | python | en | code | 2 | github-code | 36 |
73223886825 | #!/usr/bin/env python3
import argparse
import cv2
import pic
import sys
import time
from PIL import *
def clearscreen(n):
print('\033[1A\033[K'*n, end='')
def main(filename, resize, colors=None, webcam=False, invert=False,
scale=(1, 1), nosleep=False):
vc = cv2.VideoCapture(filename)
tpf = 1.0/vc.get(cv2.CAP_PROP_FPS)
ei = pic.EmojiImage(colors=colors, invert=invert, scale=scale)
rval = False
height = 0
# Get the first frame to read the properties.
if vc.isOpened():
rval, frame = vc.read()
ei.fromarray(frame)
res, height = ei.make(resize)
print(res, end='')
while rval:
start = time.time()
clearscreen(height*scale[1])
rval, frame = vc.read()
if rval:
ei.fromarray(frame)
res, height = ei.make(resize)
print(res, end='')
# determine if we need to sleep. Not really that accurate, but i'm
# lazy and this is good enough.
diff = time.time()-start
if webcam is False and nosleep is False and diff < tpf:
time.sleep(tpf-diff)
vc.release()
| bahorn/emojipic | emojipic/ani.py | ani.py | py | 1,143 | python | en | code | 1 | github-code | 36 |
30478416347 | import pprint
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import tensorflow_recommenders as tfrs
import collections
class RankingModel(tfrs.Model):
def __init__(self, loss):
super().__init__()
# Compute predictions.
self.score_model = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
self.task = tfrs.tasks.Ranking(
loss=loss,
metrics=[
tfr.keras.metrics.NDCGMetric(name="ndcg_metric"),
tf.keras.metrics.RootMeanSquaredError()
]
)
def call(self, features):
return self.score_model(features['embeddings'])
def compute_loss(self, features, training=False):
labels = features.pop("ranking")
scores = self(features)
return self.task(
labels=labels,
predictions=tf.squeeze(scores, axis=-1),
) | colinfritz-ai/GAP_Recommender_System_MVP | GAP_Recommender_System_Model.py | GAP_Recommender_System_Model.py | py | 1,065 | python | en | code | 0 | github-code | 36 |
40264961839 | first_sector = "A"
last_sector = input()
first_sector_rows_count = int(input())
odd_places_count = int(input())
total = 0
for sector in range(ord(f"{first_sector}"), ord(f"{last_sector}") + 1):
for row in range(1, first_sector_rows_count + 1):
current_places = odd_places_count
if row % 2 == 0:
current_places += 2
for place in range(1, current_places + 1):
place_alphabetical = chr(place + 96)
total += 1
print(f"{chr(sector)}{row}{place_alphabetical}")
first_sector_rows_count += 1
print(total)
#ะะปะฐะดะพะถะตะฝัะธัะต ะธัะบะฐั ะดะฐ ะฝะฐะฟัะฐะฒัั ัะฟะธััะบ ะบะพะน ะฝะฐ ะบะพะต ะผัััะพ ัะต ัะตะดะธ ะฝะฐ ัะฒะฐัะฑะตะฝะฐัะฐ ัะตัะตะผะพะฝะธั.
#ะะตััะฐัะฐ ัะฐ ัะฐะทะดะตะปะตะฝะธ ะฝะฐ ัะฐะทะปะธัะฝะธ ัะตะบัะพัะธ. ะกะตะบัะพัะธัะต ัะฐ ะณะปะฐะฒะฝะธัะต ะปะฐัะธะฝัะบะธ ะฑัะบะฒะธ ะบะฐัะพ ะทะฐะฟะพัะฒะฐั ะพั A.
#ะัะฒ ะฒัะตะบะธ ัะตะบัะพั ะธะผะฐ ะพะฟัะตะดะตะปะตะฝ ะฑัะพะน ัะตะดะพะฒะต. ะั ะบะพะฝะทะพะปะฐัะฐ ัะต ัะตัะต ะฑัะพัั ะฝะฐ ัะตะดะพะฒะตัะต ะฒ ะฟััะฒะธั ัะตะบัะพั (A),
#ะบะฐัะพ ะฒัะฒ ะฒัะตะบะธ ัะปะตะดะฒะฐั ัะตะบัะพั ัะตะดะพะฒะตัะต ัะต ัะฒะตะปะธัะฐะฒะฐั ั 1. ะะฐ ะฒัะตะบะธ ัะตะด ะธะผะฐ ะพะฟัะตะดะตะปะตะฝ ะฑัะพะน ะผะตััะฐ -
#ััั
ะฝะฐัะฐ ะฝะพะผะตัะฐัะธั ะต ะฟัะตะดััะฐะฒะตะฝะฐ ั ะผะฐะปะบะธัะต ะปะฐัะธะฝัะบะธ ะฑัะบะฒะธ. ะัะพั ะฝะฐ ะผะตััะฐัะฐ ะฝะฐ ะฝะตัะตัะฝะธัะต ัะตะดะพะฒะต ัะต ะฟัะพัะธัะฐ ะพั ะบะพะฝะทะพะปะฐัะฐ,
#ะฐ ะฝะฐ ัะตัะฝะธัะต ัะตะดะพะฒะต ะผะตััะฐัะฐ ัะฐ ั 2 ะฟะพะฒะตัะต.
#ะั
ะพะด
#ะั ะบะพะฝะทะพะปะฐัะฐ ัะต ัะตัaั 3 ัะตะดะฐ:
#ะะพัะปะตะดะฝะธั ัะตะบัะพั ะพั ัะตะบัะพัะธัะต - ัะธะผะฒะพะป (B-Z)
#ะัะพัั ะฝะฐ ัะตะดะพะฒะตัะต ะฒ ะฟััะฒะธั ัะตะบัะพั - ััะปะพ ัะธัะปะพ (1-100)
#ะัะพัั ะฝะฐ ะผะตััะฐัะฐ ะฝะฐ ะฝะตัะตัะตะฝ ัะตะด - ััะปะพ ัะธัะปะพ (1-24)
#ะะทั
ะพะด
#ะะฐ ัะต ะพัะฟะตัะฐัะฐ ะฝะฐ ะบะพะฝะทะพะปะฐัะฐ ะฒััะบะพ ะผัััะพ ะฝะฐ ะพัะดะตะปะตะฝ ัะตะด ะฟะพ ัะปะตะดะฝะธั ัะพัะผะฐั:
#{ัะตะบัะพั}{ัะตะด}{ะผัััะพ}
#ะะฐะบัะฐั ัััะฑะฒะฐ ะดะฐ ะพัะฟะตัะฐัะฐ ะฑัะพั ะฝะฐ ะฒัะธัะบะธ ะผะตััะฐ.
#ะั
ะพะด ะะทั
ะพะด
#B A1a
#3 A1b
#2 A2a
# A2b
# A2c
# A2d
# A3a
# A3b
# B1a
# B1b
# B2a
# B2b
# B2c
# B2d
# B3a
# B3b
# B4a
# B4b
# B4c
# B4d
# 20
| ivoivanov0830006/1.1.Python_BASIC | 6.Nested_loops/**06.Wedding_seats.py | **06.Wedding_seats.py | py | 2,824 | python | bg | code | 1 | github-code | 36 |
43509623765 | #!/usr/bin/env python3
"""
Fake module for testing.
Imitiates link-parser bindings.
"""
__author__ = "Mark Birger"
__date__ = "4 Apr 2015"
def parse(string):
if string == "Hello world":
return {'links': [[0, 2, 'Wa'], [1, 2, 'AN']], 'words': ['LEFT-WALL', 'hello.n', 'world.n']}
elif string == "Another string for testing":
return {'links': [[0, 4, 'Wa'], [2, 4, 'AN']], 'words': ['LEFT-WALL', '[Another]', 'string.s', '[for]', 'testing.n-u']}
elif string == "word is word":
return {'links': [[0, 3, 'Wa'], [1, 3, 'AN']], 'words': ['LEFT-WALL', 'word.n', '[is]', 'word.n']}
elif string == "my name is Mark and i do like cats":
return {'links': [[0, 5, 'WV'], [0, 2, 'Wd'], [2, 5, 'Ss'], [1, 2, 'Ds**c'], [3, 5, 'VJlsi'], [3, 4, 'Osm'], [5, 8, 'MVp'], [5, 7, 'VJrsi'], [8, 9, 'Jp']], 'words': ['LEFT-WALL', 'my.p', 'name.n', 'is.v', 'Mark.b', 'and.j-v', '[i]', 'do.v', 'like.p', 'cats.n']}
elif string == "my name is Mark":
return {'links': [[0, 3, 'WV'], [0, 2, 'Wd'], [2, 3, 'Ss'], [1, 2, 'Ds**c'], [3, 4, 'Ost']], 'words': ['LEFT-WALL', 'my.p', 'name.n', 'is.v', 'Mark.b']}
elif string == "my name is John":
return {'words': ['LEFT-WALL', 'my.p', 'name.n', 'is.v', 'John.m'], 'links': [[0, 3, 'WV'], [0, 2, 'Wd'], [2, 3, 'Ss'], [1, 2, 'Ds**c'], [3, 4, 'Ost']]}
else:
return {'links': [], 'words': []}
def substitute(sentence):
"""
Real method. Too simple to be fake.
"""
result = []
for link in sentence["links"]:
first = sentence["words"][link[0]]
second = sentence["words"][link[1]]
result.append([first, second, link[2]])
return result
def extract(idx, sentence1, sentence2):
return "example" | kusha/dialog | tests/link_parser.py | link_parser.py | py | 1,737 | python | en | code | 1 | github-code | 36 |
37635424970 | # Given an array nums which consists of non-negative integers and an integer m, you can split the array into m non-empty continuous subarrays.
# Write an algorithm to minimize the largest sum among these m subarrays.
# Example 1:
# Input: nums = [7,2,5,10,8], m = 2
# Output: 18
# Explanation:
# There are four ways to split nums into two subarrays.
# The best way is to split it into [7,2,5] and [10,8],
# where the largest sum among the two subarrays is only 18.
# Example 2:
# Input: nums = [1,2,3,4,5], m = 2
# Output: 9
# Example 3:
# Input: nums = [1,4,4], m = 3
# Output: 4
# Constraints:
# 1 <= nums.length <= 1000
# 0 <= nums[i] <= 106
# 1 <= m <= min(50, nums.length)
from functools import cache
class Solution(object):
def splitArray(self, nums, m):
"""
:type nums: List[int]
:type m: int
:rtype: int
"""
prefix_sum = [0]
for n in nums:
prefix_sum.append(prefix_sum[-1]+n)
@cache
def min_max_subarray_sum(ind, splits):
if splits == 1:
return prefix_sum[-1]-prefix_sum[ind]
if splits == len(nums)-ind:
return max(nums[ind:])
min_max = float("inf")
acc_sum = 0
for end in range(ind, len(nums)-splits+1):
acc_sum += nums[end]
if acc_sum > min_max:
break
next_min_max = min_max_subarray_sum(end+1, splits-1)
cur_min_max = max(acc_sum, next_min_max)
min_max = min(min_max, cur_min_max)
return min_max
return min_max_subarray_sum(0, m)
| sunnyyeti/Leetcode-solutions | 410 Split Array Largest Sum.py | 410 Split Array Largest Sum.py | py | 1,656 | python | en | code | 0 | github-code | 36 |
11875963511 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 12:34:56 2019
@author: stark
"""
import requests
from PageLinker import LinkFinder
from domain import *
from utility import *
class Spider:
projectName = ''
baseURL = ''
domainName = ''
queueFile = ''
crawledFile = ''
queue = set()
crawled = set()
failed = set()
def __init__(self,projectName,baseURL,domainName):
Spider.projectName = projectName
Spider.baseURL = baseURL
Spider.domainName = domainName
Spider.queueFile = pathJoin(Spider.projectName,'queue.txt')
Spider.crawledFile = pathJoin(Spider.projectName,'crawled.txt')
Spider.boot()
Spider.crawlPage('First Page', Spider.baseURL)
#Creates directory and files for the first run and starts the spider
@staticmethod
def boot():
createProjectDir(Spider.projectName)
createDataFiles(Spider.projectName,Spider.baseURL)
Spider.queue = fileToSet(Spider.queueFile)
Spider.crawled = fileToSet(Spider.crawledFile)
Spider.queue.add(Spider.baseURL)
#Updates user display, fills queue and update files
@staticmethod
def crawlPage(threadName,pageURL):
if pageURL not in Spider.crawled:
print(threadName +': now crawling : '+ pageURL)
print('Queue : ' + str(len(Spider.queue)) + ' | Crawled : ' + str(len(Spider.crawled)))
Spider.queue.remove(pageURL)
Spider.addLinksToQueue(Spider.gatherLinks(pageURL))
Spider.crawled.add(pageURL)
Spider.updateFiles()
#COnverts raw response data into readable information and checks for proper html formating
@staticmethod
def gatherLinks(pageURL):
try:
response = requests.get(pageURL)
if response.status_code == 200:
if 'text/html' in response.headers['Content-Type']:
response.encoding = 'UTF-8'
htmlString = response.text
finder = LinkFinder(Spider.baseURL,pageURL,Spider.projectName)
finder.feeder(htmlString)
else:
return set()
else:
raise Exception('Request staus code' , response.status_code)
except Exception as e:
print(str(e))
if(pageURL not in Spider.failed):
Spider.queue.add(pageURL)
Spider.failed.add(pageURL)
print(Spider.failed)
return set()
return finder.returnLinks()
#Save queue data to project files
@staticmethod
def addLinksToQueue(links):
for url in links:
if (url in Spider.queue) or (url in Spider.crawled):
continue
if(Spider.domainName != get_domain_name(url)):
continue
Spider.queue.add(url)
@staticmethod
def updateFiles():
setToFile(Spider.queueFile,Spider.queue)
setToFile(Spider.crawledFile,Spider.crawled)
| pandafy/WebCrawler | spider.py | spider.py | py | 3,354 | python | en | code | 0 | github-code | 36 |
12487779900 | """
Programming Fundamentals Final Exam Preparation - 24 July 2019
link: https://judge.softuni.bg/Contests/Practice/Index/1759#0
Name: 01. Concert
"""
class Band:
def __init__(self, name: str, new_members=None, time=0):
self.name = name
self.members = []
self.add_members(new_members)
self.time = time
def add_members(self, new_members):
if new_members:
for member in new_members:
if member not in self.members:
self.members.append(member)
all_bands = []
while True:
command = input().split("; ")
if command[0] == "start of concert":
break
if command[0] == "Add":
add_name = command[1]
members_to_add = command[2].split(", ")
band_is_present = False
for band in all_bands:
if band.name == add_name:
band_is_present = True
band.add_members(new_members=members_to_add)
if not band_is_present:
all_bands.append(Band(name=add_name, new_members=members_to_add))
elif command[0] == "Play":
play_band_name = command[1]
play_time = int(command[2])
play_band_is_present = False
for band in all_bands:
if band.name == play_band_name:
play_band_is_present = True
band.time += play_time
if not play_band_is_present:
all_bands.append(Band(name=play_band_name, time=play_time))
print(f"Total time: {sum([band.time for band in all_bands])}")
for band in sorted(all_bands, key=lambda x: (-x.time, x.name)):
print(f"{band.name} -> {band.time}")
band_to_print = input()
for band in all_bands:
if band.name == band_to_print:
print(band.name)
for band_member in band.members:
print(f"=> {band_member}")
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals September 2019/Problems And Files/41 PAST EXAMS/Final Exams/04. 24 July 2019 Preparation Final Exam/01. Concert.py | 01. Concert.py | py | 1,901 | python | en | code | 9 | github-code | 36 |
40961271099 | import argparse
import os
import shutil
from subprocess import run
from probar_entrega1 import probar
import pandas as pd
BASE_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__)))
def bajar_repositorio(info_grupo):
print("Cloning", info_grupo['grupo'])
grupo_path = os.path.join(BASE_PATH, info_grupo.grupo)
if os.path.exists(grupo_path):
shutil.rmtree(grupo_path)
if info_grupo.cvs == 'git':
cmd = '{cvs} clone {cvs}@{servicio}:{url} {grupo}'.format(**info_grupo.to_dict())
elif info_grupo.cvs == 'hg':
cmd = '{cvs} clone ssh://{cvs}@{servicio}/{url} {grupo}'.format(**info_grupo.to_dict())
print("About to execute:", cmd)
run(cmd, shell=True)
def correr_pruebas(info_grupo):
probar(grupo=info_grupo.grupo)
def main(grupo=None, mantener_repositorio=False):
grupos = pd.read_csv('repos.config', sep='|')
if grupo is not None:
grupos = grupos[grupos.grupo == grupo]
for _, info_grupo in grupos.iterrows():
print("#"*160)
print("#"*160)
print("Grupo ", info_grupo.grupo)
if mantener_repositorio:
print("Se saltea la actualizaciรณn del repositorio")
else:
bajar_repositorio(info_grupo)
correr_pruebas(info_grupo)
print("#"*160)
print("#"*160)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--grupo', help='Grupo en particular')
parser.add_argument('--mantener_repositorio', action='store_true', help='Evita volver a clonar el repo')
args = parser.parse_args()
main(args.grupo, args.mantener_repositorio)
| ucse-ia/ucse_ia | 2018/corrector.py | corrector.py | py | 1,647 | python | es | code | 5 | github-code | 36 |
2809455803 | class Scope:
def __init__(self, parent=None):
self.dct = {}
self.parent = parent
def __getitem__(self, item):
if not item in self.dct:
if self.parent:
return self.parent[item]
else:
return None
return self.dct[item]
def __setitem__(self, item, x):
self.dct[item] = x
class Number:
def __init__(self, value):
self.value = value
def evaluate(self, scope):
return self
class Function:
def __init__(self, args, body):
self.args = args
self.body = body
def evaluate(self, scope):
res = None
for f in self.body:
res = f.evaluate(scope)
return res
class FunctionDefinition:
def __init__(self, name, function):
self.name = name
self.function = function
def evaluate(self, scope):
scope[self.name] = self.function
return self.function
class Conditional:
def __init__(self, condtion, if_true, if_false=None):
self.condtion = condtion
self.if_true = if_true
self.if_false = if_false
def evaluate(self, scope):
tmp = None
res = None
if self.condtion.evaluate(scope).value:
tmp = self.if_true
else:
tmp = self.if_false
if tmp == None:
return Number(0)
for f in tmp:
res = f.evaluate(scope)
return res
class Print:
def __init__(self, expr):
self.expr = expr
def evaluate(self, scope):
res = self.expr.evaluate(scope).value
print(res)
return res
class Read:
def __init__(self, name):
self.name = name
def evaluate(self, scope):
res = Number(int(input()))
scope[self.name] = res
return res
class FunctionCall:
def __init__(self, fun_expr, args):
self.fun_expr = fun_expr
self.args = args
def evaluate(self, scope):
function = self.fun_expr.evaluate(scope)
call_scope = Scope(scope)
for name, x in zip(function.args, self.args):
call_scope[name] = x.evaluate(scope)
return function.evaluate(call_scope)
class Reference:
def __init__(self, name):
self.name = name
def evaluate(self, scope):
return scope[self.name]
class BinaryOperation:
oper = {
'+': (lambda x,y: x + y),
'*': (lambda x,y: x * y),
'-': (lambda x,y: x - y),
'/': (lambda x,y: x // y),
'%': (lambda x,y: x % y),
'==': (lambda x,y: x == y),
'!=': (lambda x,y: x != y),
'<': (lambda x,y: x < y),
'>': (lambda x,y: x > y),
'<=': (lambda x,y: x <= y),
'>=': (lambda x,y: x >+ y),
'&&': (lambda x,y: x and y),
'||': (lambda x,y: x or y)
}
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.op = op
self.rhs = rhs
def evaluate(self, scope):
lhs = self.lhs.evaluate(scope)
rhs = self.rhs.evaluate(scope)
return Number(self.oper[self.op](lhs.value, rhs.value))
class UnaryOperation:
oper = {
'-': (lambda x: -x),
'!': (lambda x: not x)
}
def __init__(self, op, expr):
self.op = op
self.expr = expr
def evaluate(self, scope):
expr = self.expr.evaluate(scope)
return Number(self.oper[self.op](expr.value))
def example():
parent = Scope()
parent["foo"] = Function(('hello', 'world'),
[Print(BinaryOperation(Reference('hello'),
'+',
Reference('world')))])
parent["bar"] = Number(10)
scope = Scope(parent)
assert 10 == scope["bar"].value
scope["bar"] = Number(20)
assert scope["bar"].value == 20
print('It should print 2: ', end=' ')
FunctionCall(FunctionDefinition('foo', parent['foo']),
[Number(5), UnaryOperation('-', Number(3))]).evaluate(scope)
def my_tests():
scope = Scope()
a = Read('a').evaluate(scope)
b = Read('b').evaluate(scope)
Print(BinaryOperation(a, '*', b)).evaluate(scope)
scope['zero'] = Number(0)
cond = BinaryOperation(scope['a'], '>', scope['zero'])
if_true = Print(scope['a'])
if_false = Print(UnaryOperation('-', scope['a']))
Conditional(cond, [if_true], [if_false]).evaluate(scope)
Print(UnaryOperation('-', BinaryOperation(scope['b'], '/', scope['a']))).evaluate(scope)
func = Function(
('x', 'y'), [
Print(
BinaryOperation(
Reference('x'), '*', Reference('y')
)
),
Print(
BinaryOperation(
Reference('x'), '+', Reference('y')
)
)
]
)
scope['n'] = Number(2)
scope['m'] = Number(4)
FunctionCall(FunctionDefinition("func", func), [scope['n'], scope['m']]).evaluate(scope)
if __name__ == '__main__':
example()
my_tests()
| GrigoryBartosh/au01_paradigms2016 | HW_04/model.py | model.py | py | 5,374 | python | en | code | 0 | github-code | 36 |
5843240330 | import sublime, sublime_plugin, os
mainfilepath="../main.tex"
texcommand="%!TEX root"
texroot = texcommand + " = " + mainfilepath
class TexRootCommand(sublime_plugin.TextCommand):
def run(obj, edit):
line = obj.view.substr(obj.view.line(0))
if not line.startswith(texcommand):
obj.view.insert(edit, 0, texroot + "\n")
class Loader(sublime_plugin.EventListener):
"""docstring for Loader"""
def on_load(obj,view):
name = view.file_name()
if "main.tex" not in name and "FrontBackmatter" not in name:
fileName, fileExtension = os.path.splitext(name)
if fileExtension == ".tex":
view.run_command('tex_root')
| saspre/SublimeLatexTopping | Pratex.py | Pratex.py | py | 647 | python | en | code | 0 | github-code | 36 |
3745893127 | """empty message
Revision ID: 9c5fa6db20f1
Revises: ar399258p714
Create Date: 2023-03-06 13:56:47.958406
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9c5fa6db20f1'
down_revision = 'ar399258p714'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('finding', sa.Column('column_start', sa.Integer(), nullable=False, server_default=sa.text("0")))
op.add_column('finding', sa.Column('column_end', sa.Integer(), nullable=False, server_default=sa.text("0")))
op.drop_constraint('uc_finding_per_branch', 'finding', type_='unique')
op.create_unique_constraint('uc_finding_per_branch', 'finding',
['commit_id', 'branch_id', 'rule_name', 'file_path', 'line_number',
'column_start', 'column_end'])
def downgrade():
op.drop_constraint('uc_finding_per_branch', 'finding', type_='unique')
op.create_unique_constraint('uc_finding_per_branch', 'finding',
['commit_id', 'branch_id', 'rule_name', 'file_path', 'line_number'])
op.drop_column('finding', 'column_start')
op.drop_column('finding', 'column_end')
| abnamro/repository-scanner | components/resc-backend/alembic/versions/9c5fa6db20f1_finding_column.py | 9c5fa6db20f1_finding_column.py | py | 1,202 | python | en | code | 137 | github-code | 36 |
13324525829 | # ์๋ผํ ์คํ
๋ค์ค์ ์ฒด
def list_prime(n):
sieve = [True]*n # ์ฒด ์ด๊ธฐํ: n๊ฐ ์์์ True ์ค์ (์์๋ก ๊ฐ์ฃผ)
m = int(n**0.5) # n๊ฐ์ ์ต๋ ์ฝ์๊ฐ sqrt(n)์ดํ์ด๋ฏ๋ก i=sqrt(n)๊น์ง ๊ฒ์ฌ
for i in range(2, m+1):
if sieve[i] == True: # i๊ฐ ์์์ธ ๊ฒจ์ฐ
for j in range(i+i, n, i): # i์ดํ i์ ๋ฐฐ์๋ค์ False ํ์
sieve[j] = False
return [i for i in range(2,n) if sieve[i]==True]
print(list_prime(10)) | ipcoo43/baekjoon | lesson115.py | lesson115.py | py | 507 | python | ko | code | 0 | github-code | 36 |
4889121541 | from utils import parseDate, checkDateInTheFuture, checkDateFromNotTooBig, s3Query
from http_response import okResponse, badRequestResponse
from typing import Union
import os
import boto3
BucketName = os.environ.get('BUCKET_NAME')
FileName = os.environ.get('PROV_FILE_NAME')
s3 = boto3.client('s3')
maxMonths = 5
def lambda_handler(event, context):
try:
if not(event['queryStringParameters']) is None and 'prov' in event['queryStringParameters']:
prov = event['queryStringParameters']['prov']
dateFrom = parseDate(event['queryStringParameters'], 'date-from')
if not checkDateFromNotTooBig(maxMonths, dateFrom):
return badRequestResponse(f'date-from should be max {maxMonths} months in the past')
if checkDateInTheFuture(dateFrom):
return badRequestResponse(f'date-from should not be in the future')
message = queryData(prov, dateFrom)
return okResponse(message)
else:
return badRequestResponse('Province is missing')
except Exception as e:
print(e)
return {
"statusCode": 500,
}
def queryData(prov: str, dateFrom: str) -> str:
query = f"""
SELECT
denominazione_regione AS region,
denominazione_provincia AS province,
sigla_provincia AS province_initials,
totale_casi AS total,
data AS reporting_date
FROM s3object s
WHERE sigla_provincia ='{prov}' AND data > '{dateFrom}'
"""
return s3Query(s3, query, BucketName, FileName)
| menalb/covid-data-app | api/bucketquery/coviddata/app_prov.py | app_prov.py | py | 1,582 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.