blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60e93b6931bbe1a569545feb88123f30f7f44f44 | a26472ee6694b9aad73d009667728b5482b1e8b4 | /inventory/forms.py | cc8f9fb0ff46a73a398a76c858136f3277190f36 | [] | no_license | peterson1999/managementsystem | a1dd49916284c960aa2053765dbba49ff84be4d1 | 9c2392cfffe202820aa71b40c137bb03880b717e | refs/heads/master | 2022-12-28T03:35:13.276873 | 2020-10-16T15:26:41 | 2020-10-16T15:26:41 | 290,228,900 | 0 | 0 | null | 2020-10-16T12:50:09 | 2020-08-25T13:53:47 | Python | UTF-8 | Python | false | false | 531 | py | from django import forms
from .models import *
class ProductForm(forms.ModelForm):
class Meta:
model=Product
fields = ('category', 'brand','name','price','stock',)
class MultiImages(forms.ModelForm):
class Meta:
model=MultiImage
fields = ('image',)
class OrderForm(forms.ModelForm):
class Meta:
model=Order
fields = ('employeeEmail',)
class OrderedProduct(forms.ModelForm):
class Meta:
model=OrderedProducts
fields = ('productID','price','qty',) | [
"anjunieco@gmail.com"
] | anjunieco@gmail.com |
decaae0b54d14f6096ea42bf73d736efa07a8495 | ce5dec2a0601afbe7666df6d3be8d186245cc6ab | /10-dinamicno-programiranje/minsko_polje.py | 6a0215c5c5b9921630c98b8d2b798a0a456fc6fa | [] | no_license | jakobvalic/RAC-1 | aa9a15abdd657bbc8df31e219835ae762ec9f8b3 | 5b14f61e46761a272738fb93c007a00f147e36d0 | refs/heads/master | 2018-12-20T07:09:18.034633 | 2018-09-18T07:34:42 | 2018-09-18T07:34:42 | 118,833,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,524 | py | # =============================================================================
# Minsko polje
# =====================================================================@010509=
# 1. podnaloga
# Robotka moramo prepeljati čez minirano območje, ki je pravokotne oblike
# in je razdeljeno na $m \times n$ kvadratnih polj. Na začetku je robotek
# parkiran v zgornjem levem polju s koordinatama $(1, 1)$. Spodnje desno
# polje ima koordinati $(m,n)$. Robotek se lahko v vsakem koraku pomakne
# za eno polje navzdol ali za eno polje v desno. Navzgor ali na desno se
# ne more premikati. Prav tako se ne more premikati diagonalni ali
# preskakovati polj. Na nekaterih poljih so zakopane mine, ki se jim mora
# robotek izogniti. Na koliko načinov lahko pride iz začetnega na končno
# polje? Predpostavite lahko, da na začetnem in končnem polju ni min.
#
# Napišite funkcijo `stevilo_poti(m, n, mine)`, kjer sta `m` in `n` celi
# števili, ki predstavljata velikost polja. Mine je seznam parov in sicer
# so to koordinate polj, kjer so zakopane mine. Funkcija naj vrne število
# različnih poti med $(1, 1)$ in $(m, n)$, ki se izognejo minam. Zgled:
#
# >>> stevilo_poti(5, 4, [])
# 35
# >>> stevilo_poti(5, 4, [(2, 3), (4, 3)])
# 9
# =============================================================================
from functools import lru_cache
def stevilo_potiV2(m, n, mine):
'''Goljufija, da pretvorimo v tuple.'''
return stevilo_poti_tuple(m, n, tuple(mine))
@lru_cache(maxsize=None)
def stevilo_poti_tuple(i, j, mine):
'''Izračuna, koliko poti ima na voljo robotek. Premikamo se levo in navzgor.'''
if (i, j) in mine:
return 0
if i == 1 and j == 1:
return 1
elif i == 1: # Lahko se premikamo le še po stolpcih levo
return stevilo_poti_tuple(i, j - 1, mine)
elif j == 1:
return stevilo_poti_tuple(i - 1, j, mine)
else:
return stevilo_poti_tuple(i - 1, j, mine) + stevilo_poti_tuple(i, j - 1, mine)
# Bolj eleganten način
def stevilo_poti(i, j, mine):
'''Ker je funkcija pomožna, ne sprejme seznama min, temveč se nanj sklicuje znotraj funkcije.'''
@lru_cache(maxsize=None)
def pomozna(i, j):
'''Izračuna, koliko poti ima na voljo robotek. Premikamo se levo in navzgor.'''
if (i, j) in mine:
return 0
if i == 1 and j == 1:
return 1
elif i == 1: # Lahko se premikamo le še po stolpcih levo
return pomozna(i, j - 1)
elif j == 1:
return pomozna(i - 1, j)
else:
return pomozna(i - 1, j) + pomozna(i, j - 1)
return pomozna(i, j)
# ============================================================================@
'Če vam Python sporoča, da je v tej vrstici sintaktična napaka,'
'se napaka v resnici skriva v zadnjih vrsticah vaše kode.'
'Kode od tu naprej NE SPREMINJAJTE!'
import io, json, os, re, sys, shutil, traceback, urllib.error, urllib.request
from contextlib import contextmanager
class Check:
@staticmethod
def has_solution(part):
return part['solution'].strip() != ''
@staticmethod
def initialize(parts):
Check.parts = parts
for part in Check.parts:
part['valid'] = True
part['feedback'] = []
part['secret'] = []
Check.current_part = None
Check.part_counter = None
@staticmethod
def part():
if Check.part_counter is None:
Check.part_counter = 0
else:
Check.part_counter += 1
Check.current_part = Check.parts[Check.part_counter]
return Check.has_solution(Check.current_part)
@staticmethod
def feedback(message, *args, **kwargs):
Check.current_part['feedback'].append(message.format(*args, **kwargs))
@staticmethod
def error(message, *args, **kwargs):
Check.current_part['valid'] = False
Check.feedback(message, *args, **kwargs)
@staticmethod
def clean(x, digits=6, typed=False):
t = type(x)
if t is float:
x = round(x, digits)
# Since -0.0 differs from 0.0 even after rounding,
# we change it to 0.0 abusing the fact it behaves as False.
v = x if x else 0.0
elif t is complex:
v = complex(Check.clean(x.real, digits, typed), Check.clean(x.imag, digits, typed))
elif t is list:
v = list([Check.clean(y, digits, typed) for y in x])
elif t is tuple:
v = tuple([Check.clean(y, digits, typed) for y in x])
elif t is dict:
v = sorted([(Check.clean(k, digits, typed), Check.clean(v, digits, typed)) for (k, v) in x.items()])
elif t is set:
v = sorted([Check.clean(y, digits, typed) for y in x])
else:
v = x
return (t, v) if typed else v
@staticmethod
def secret(x, hint=None, clean=None):
clean = clean or Check.clean
Check.current_part['secret'].append((str(clean(x)), hint))
@staticmethod
def equal(expression, expected_result, clean=None, env={}):
local_env = locals()
local_env.update(env)
clean = clean or Check.clean
actual_result = eval(expression, globals(), local_env)
if clean(actual_result) != clean(expected_result):
Check.error('Izraz {0} vrne {1!r} namesto {2!r}.',
expression, actual_result, expected_result)
return False
else:
return True
@staticmethod
def run(statements, expected_state, clean=None, env={}):
code = "\n".join(statements)
statements = " >>> " + "\n >>> ".join(statements)
s = {}
s.update(env)
clean = clean or Check.clean
exec(code, globals(), s)
errors = []
for (x, v) in expected_state.items():
if x not in s:
errors.append('morajo nastaviti spremenljivko {0}, vendar je ne'.format(x))
elif clean(s[x]) != clean(v):
errors.append('nastavijo {0} na {1!r} namesto na {2!r}'.format(x, s[x], v))
if errors:
Check.error('Ukazi\n{0}\n{1}.', statements, ";\n".join(errors))
return False
else:
return True
@staticmethod
@contextmanager
def in_file(filename, content, encoding=None):
with open(filename, 'w', encoding=encoding) as f:
for line in content:
print(line, file=f)
old_feedback = Check.current_part['feedback'][:]
yield
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodni datoteki {0} z vsebino\n {1}\nso se pojavile naslednje napake:\n- {2}', filename, '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
@contextmanager
def input(content, encoding=None):
old_stdin = sys.stdin
old_feedback = Check.current_part['feedback'][:]
sys.stdin = io.StringIO('\n'.join(content))
try:
yield
finally:
sys.stdin = old_stdin
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodu\n {0}\nso se pojavile naslednje napake:\n- {1}', '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
def out_file(filename, content, encoding=None):
with open(filename, encoding=encoding) as f:
out_lines = f.readlines()
equal, diff, line_width = Check.difflines(out_lines, content)
if equal:
return True
else:
Check.error('Izhodna datoteka {0}\n je enaka{1} namesto:\n {2}', filename, (line_width - 7) * ' ', '\n '.join(diff))
return False
@staticmethod
def output(expression, content, use_globals=False):
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
def visible_input(prompt):
inp = input(prompt)
print(inp)
return inp
exec(expression, globals() if use_globals else {'input': visible_input})
finally:
output = sys.stdout.getvalue().strip().splitlines()
sys.stdout = old_stdout
equal, diff, line_width = Check.difflines(output, content)
if equal:
return True
else:
Check.error('Program izpiše{0} namesto:\n {1}', (line_width - 13) * ' ', '\n '.join(diff))
return False
@staticmethod
def difflines(actual_lines, expected_lines):
actual_len, expected_len = len(actual_lines), len(expected_lines)
if actual_len < expected_len:
actual_lines += (expected_len - actual_len) * ['\n']
else:
expected_lines += (actual_len - expected_len) * ['\n']
equal = True
line_width = max(len(actual_line.rstrip()) for actual_line in actual_lines + ['je enaka'])
diff = []
for out, given in zip(actual_lines, expected_lines):
out, given = out.rstrip(), given.rstrip()
if out != given:
equal = False
diff.append('{0} {1} {2}'.format(out.ljust(line_width), '|' if out == given else '*', given))
return equal, diff, line_width
@staticmethod
def generator(expression, expected_values, should_stop=False, further_iter=0, env={}, clean=None):
from types import GeneratorType
local_env = locals()
local_env.update(env)
clean = clean or Check.clean
gen = eval(expression, globals(), local_env)
if not isinstance(gen, GeneratorType):
Check.error("Izraz {0} ni generator.", expression)
return False
try:
for iteration, expected_value in enumerate(expected_values):
actual_value = next(gen)
if clean(actual_value) != clean(expected_value):
Check.error("Vrednost #{0}, ki jo vrne generator {1} je {2!r} namesto {3!r}.",
iteration, expression, actual_value, expected_value)
return False
for _ in range(further_iter):
next(gen) # we will not validate it
except StopIteration:
Check.error("Generator {0} se prehitro izteče.", expression)
return False
if should_stop:
try:
next(gen)
Check.error("Generator {0} se ne izteče (dovolj zgodaj).", expression)
except StopIteration:
pass # this is fine
return True
@staticmethod
def summarize():
for i, part in enumerate(Check.parts):
if not Check.has_solution(part):
print('{0}. podnaloga je brez rešitve.'.format(i + 1))
elif not part['valid']:
print('{0}. podnaloga nima veljavne rešitve.'.format(i + 1))
else:
print('{0}. podnaloga ima veljavno rešitev.'.format(i + 1))
for message in part['feedback']:
print(' - {0}'.format('\n '.join(message.splitlines())))
def _validate_current_file():
def extract_parts(filename):
with open(filename, encoding='utf-8') as f:
source = f.read()
part_regex = re.compile(
r'# =+@(?P<part>\d+)=\n' # beginning of header
r'(#( [^\n]*)?\n)+' # description
r'# =+\n' # end of header
r'(?P<solution>.*?)' # solution
r'(?=\n# =+@)', # beginning of next part
flags=re.DOTALL | re.MULTILINE
)
parts = [{
'part': int(match.group('part')),
'solution': match.group('solution')
} for match in part_regex.finditer(source)]
# The last solution extends all the way to the validation code,
# so we strip any trailing whitespace from it.
parts[-1]['solution'] = parts[-1]['solution'].rstrip()
return parts
def backup(filename):
backup_filename = None
suffix = 1
while not backup_filename or os.path.exists(backup_filename):
backup_filename = '{0}.{1}'.format(filename, suffix)
suffix += 1
shutil.copy(filename, backup_filename)
return backup_filename
def submit_parts(parts, url, token):
submitted_parts = []
for part in parts:
if Check.has_solution(part):
submitted_part = {
'part': part['part'],
'solution': part['solution'],
'valid': part['valid'],
'secret': [x for (x, _) in part['secret']],
'feedback': json.dumps(part['feedback']),
}
if 'token' in part:
submitted_part['token'] = part['token']
submitted_parts.append(submitted_part)
data = json.dumps(submitted_parts).encode('utf-8')
headers = {
'Authorization': token,
'content-type': 'application/json'
}
request = urllib.request.Request(url, data=data, headers=headers)
response = urllib.request.urlopen(request)
return json.loads(response.read().decode('utf-8'))
def update_attempts(old_parts, response):
updates = {}
for part in response['attempts']:
part['feedback'] = json.loads(part['feedback'])
updates[part['part']] = part
for part in old_parts:
valid_before = part['valid']
part.update(updates.get(part['part'], {}))
valid_after = part['valid']
if valid_before and not valid_after:
wrong_index = response['wrong_indices'].get(str(part['part']))
if wrong_index is not None:
hint = part['secret'][wrong_index][1]
if hint:
part['feedback'].append('Namig: {}'.format(hint))
filename = os.path.abspath(sys.argv[0])
file_parts = extract_parts(filename)
Check.initialize(file_parts)
if Check.part():
try:
test_data = [
("""stevilo_poti(5, 4, [])""", 35),
("""stevilo_poti(5, 4, [(2, 3), (4, 3)])""", 9),
("""stevilo_poti(5, 4, [(3, 3)])""", 17),
("""stevilo_poti(5, 4, [(2, 1), (2, 3), (3, 4), (4, 2), (4, 4), (5, 2)])""", 1),
("""stevilo_poti(5, 4, [(1, 4), (2, 3), (3, 2), (4, 1)])""", 0),
("""stevilo_poti(50, 40, [])""", 14949040860667351485471600),
("""stevilo_poti(50, 40, [(3, 4), (3, 11), (3, 31), (6, 3), (8, 20), (41, 31)])""", 6132100941099086511785845),
]
for td in test_data:
Check.equal(*td)
except:
Check.error("Testi sprožijo izjemo\n {0}",
"\n ".join(traceback.format_exc().split("\n"))[:-2])
print('Shranjujem rešitve na strežnik... ', end="")
try:
url = 'https://www.projekt-tomo.si/api/attempts/submit/'
token = 'Token a7d3422f9635f98a67f097384251c60b342d336e'
response = submit_parts(Check.parts, url, token)
except urllib.error.URLError:
print('PRI SHRANJEVANJU JE PRIŠLO DO NAPAKE! Poskusite znova.')
else:
print('Rešitve so shranjene.')
update_attempts(Check.parts, response)
if 'update' in response:
print("Posodabljam datoteko... ", end="")
backup_filename = backup(filename)
r = urlopen(response['update'])
with open(filename, 'w', encoding='utf-8') as f:
f.write(r.read().decode('utf-8'))
print("Stara datoteka je preimenovana v {0}.".format(os.path.basename(backup_filename)))
print("Če se datoteka v urejevalniku ni osvežila, jo zaprite ter ponovno odprite.")
Check.summarize()
if __name__ == '__main__':
_validate_current_file()
| [
"jakob.valic@gmail.com"
] | jakob.valic@gmail.com |
524ccd8f6e0f5739ac7fcba048a7a10e5d75824d | db79787a8726e136e48b25133fbe6724d25ec5f2 | /src/uiopstestcase/ops_testcase09_citymanage_areaadd.py | 4fca3f5db348942618545b8c2aa343eb3a4fbf0e | [] | no_license | cash2one/edaixi_python_selenium | a1d51ada40788c550f3014bf62a44360781b27b9 | ae63b323a46032dc3116c4515ee375ace67dddda | refs/heads/master | 2020-05-22T16:55:53.137551 | 2016-11-26T15:31:52 | 2016-11-26T15:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re ,ConfigParser
import appobjectops
class OpsTestcase09citymanageareaadd(unittest.TestCase):
def setUp(self):
#self.driver = webdriver.Firefox()
self.driver = appobjectops.GetInstance()
self.driver.implicitly_wait(30)
conf = ConfigParser.ConfigParser()
conf.read("C:/edaixi_testdata/userdata_ops.conf")
global CAIWU_URL,USER_NAME,PASS_WORD
OPS_URL = conf.get("opssection", "uihostname")
USER_NAME = conf.get("opssection", "uiadminusername")
PASS_WORD = conf.get("opssection", "uiadminpassword")
print OPS_URL,USER_NAME,PASS_WORD
self.base_url = OPS_URL
#self.base_url = "http://ops05.edaixi.cn:81/"
self.verificationErrors = []
self.accept_next_alert = True
def test_ops_testcase09_citymanageareaadd(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_css_selector("div#container.container h3.text-center.text-primary a.btn.btn-success.text-center").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys(USER_NAME)
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(PASS_WORD)
driver.find_element_by_id("login-submit").click()
print driver.title
self.assertEqual(driver.title, u"e袋洗城市运营后台")
driver.find_element_by_css_selector("div.container>div.navbar-collapse.collapse.navbar-responsive-collapse>ul.nav.navbar-nav>li:nth-child("+str(9)+") a").click()
driver.implicitly_wait(10)
print driver.title
driver.find_element_by_id("name").clear()
driver.find_element_by_id("name").send_keys(u"北京")
driver.find_element_by_name("commit").click()
self.assertEqual(driver.title, u"e袋洗城市运营后台")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"ibmcuijun2015@126.com"
] | ibmcuijun2015@126.com |
e6dfe1b20f406269746fe4d77c02d931f95e03f5 | 623ebf2af50e4473994c6a4911c8a74ecbb19b34 | /budgetproject/urls.py | 48a22cea86e670b0166db168462614af094727d3 | [
"MIT"
] | permissive | Meakelvis/Project_Budget_app | 57dac521d698fe0b658872eeb1340a64dff4804c | 378cc8a364a2623e42bcc5193c0344cd016579f5 | refs/heads/master | 2020-06-23T22:08:51.623524 | 2019-07-26T14:12:52 | 2019-07-26T14:12:52 | 198,767,932 | 0 | 0 | MIT | 2019-08-06T06:24:57 | 2019-07-25T06:05:51 | Python | UTF-8 | Python | false | false | 802 | py | """budgetproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('budget.urls')),
path('admin/', admin.site.urls),
]
| [
"markelvis37@gmail.com"
] | markelvis37@gmail.com |
fc2a5b8c56d6d6995033f1add6c0aa59388b4c7e | a68502d5a9f259ee82bff46440f580413fba2640 | /nutritionix_mcd_public.py | 08664de180e9bcbebe3e427a0b12e2b79877d5e1 | [] | no_license | jenzhenky/fast_food | 3e82e541f261b601a814c40ae27c343299ae23ca | 26cfcaf7f6f4999cbf955462ebd71e44563dd45f | refs/heads/master | 2020-03-27T18:56:07.441813 | 2018-09-22T23:59:20 | 2018-09-22T23:59:20 | 146,954,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from nutritionix import Nutritionix
import json
nix = Nutritionix(app_id="XXXXXXXX", api_key="XXXXXXXXXXXXXXXXXXXXXXXXXXX")
mcd_search = nix.search().nxql(queries={"brand_name": "mcdonald"}).json()
mcd_hits = mcd_search['total']
dict_mcd = []
for i in range(round(mcd_hits/50)+1):
data = nix.search().nxql(
queries={"brand_name": "mcdonald"},
fields=["brand_name", "brand_id", "item_name", "item_id", "updated_at", "nf_calories", "nf_total_fat", "nf_saturated_fat", "nf_cholesterol", "nf_sodium", "nf_total_carbohydrate", "nf_dietary_fiber", "nf_sugars", "nf_protein", "nf_servings_per_container", "nf_serving_size_qty", "nf_serving_size_unit", "nf_serving_weight_grams"],
offset = i*50,
limit = 50
).json()
dict_mcd.append(data)
i+=1
with open('mcd.json', 'w') as f:
json.dump(dict_mcd, f)
| [
"noreply@github.com"
] | jenzhenky.noreply@github.com |
574e6651485bda54703ff6affe5bca8b0db11e1f | 1f54501e3ed025464fbe2f42e9d9046b8c088cbe | /Config.py | 1c0142fe939a11f6ebbe4b6d91cc6ec7b035b5ee | [] | no_license | Anemun/AnemEvolutionSimulatorPy | 1b5decaba42d828bb4b446b5589dd88dfe1fefb4 | e2efdb1f682e7cc6107ef83f47be5be8582cf8e8 | refs/heads/master | 2020-03-31T21:00:46.285149 | 2019-08-27T14:40:10 | 2019-08-27T14:40:10 | 152,563,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | MAX_TICKS_PER_SECOND = 200
WINDOW_SIZE = (1200, 700) #default 1200 700
WORLD_SIZE = (165, 80) #default 165 80
DRAW_CELL_SIZE = (6, 6)
MAX_GENOME_DIFFERENCE = 1
PHOTOSYNTESIS_VALUE = 5
FOOD_TO_HEALTH = 1
FOOD_TO_ENERGY = 0
ORGANIC_TO_HEALTH = 0.75
ORGANIC_TO_ENERGY = 0.25
GOD_MODE = False # bots never die
ENABLE_ORGANIC_DISSAPEAR = True
ORGANIC_MAX_TIME = 150
RESPAWN_ORGANIC = True
RESPAWN_ORGANIC_COUNTDOWN = 300
DIE_IF_NO_ROOM = True
DIE_OF_AGE = False
MAX_AGE = 10000
captureScreenshots = True
screenshotDelay = 25
screenshotDefaultFolder = "./Screenshots"
| [
"anemun@gmail.com"
] | anemun@gmail.com |
02012e0e8d0fdad41864f5aae4f295faf8bbf69c | 932b8ef09482ba1c041b52d9b261b7e87fe428dc | /0529/qugongzhang.py | 0f8e12b0e3885b2dd3bf7d72fd12bd0fd558c584 | [] | no_license | yuehongxia/lala | 61a29787fa491c79672aa1160c569a2f911f33d3 | b9ea75caf5d8e20539d85028aba98c7a774a0166 | refs/heads/master | 2020-03-19T03:05:49.873175 | 2018-06-01T08:51:58 | 2018-06-01T08:51:58 | 135,694,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,165 | py | # -*- coding: utf-8 -*-
__author__ = 'River'
import cv2,os,shutil,datetime,re,time
from threading import Thread
from hashlib import md5
# im = cv2.imread("1.jpg") #读取图片
# im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# cv2.imwrite("test/1/1.jpg", im_gray)
PICHASH= {}
def md5_file(name):
try:
m = md5()
a_file = open(name, 'rb')
m.update(a_file.read())
a_file.close()
return m.hexdigest()
except:
return None
def nowater(dir,newdir,dirlist):
global PICHASH
for ppicdir in dirlist:
if(os.path.isdir(dir+ppicdir)):
sortfiles=os.listdir(dir+ppicdir)
if '.DS_Store' in sortfiles:
sortfiles.remove('.DS_Store')
sortfiles.sort()
for oldfile in sortfiles:
filetype="."+oldfile.split(".")[len(oldfile.split("."))-1]
picname_front=oldfile.split(filetype)[0]
oldfile=dir+ppicdir+"/"+oldfile
jpgname=picname_front+".jpg"
jpgname=newdir+ppicdir+"/"+jpgname
try:
oldfile_hash=md5_file(oldfile)
oldfile_tmphashvalue=PICHASH.get(oldfile_hash)
file_object = open('pichash.txt', 'a')
file_object.write(oldfile+":"+oldfile_hash+'\n')
file_object.close()
if(oldfile_tmphashvalue==None):#新文件,已经处理过的图片,就不会再次处理了
if not os.path.exists(newdir+ppicdir):
os.makedirs(newdir+ppicdir)
#print oldfile 哈哈
#print jpgname
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+oldfile+",img\n")
img=cv2.imread(oldfile)
x,y,z=img.shape
if x < 10:#太小文件不处理
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+jpgname+"文件太小,跳过")
elif x >8000:#太大的;文件不处理
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+jpgname+"文件太大,跳过")
elif not os.path.exists(jpgname):#这就是最关键的代码了
for i in range(x):
for j in range(y):
varP=img[i,j]
if sum(varP)>350 and sum(varP)<765:#大于250,小于765(sum比白色的小)
img[i,j]=[255,255,255]
#cv2.imwrite(jpgname,img,[int(cv2.IMWRITE_JPEG_QUALITY),70])#linux跑悲剧了
cv2.imwrite(jpgname,img)
print ("jpgname:"+jpgname)
PICHASH[oldfile_hash]=oldfile
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+oldfile+",done\n")
else:
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+jpgname+"文件已存在,跳过\n")
elif(oldfile_tmphashvalue!=None):
if(os.path.exists(jpgname)):
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+jpgname+"文件已存在,跳过\n")
else:
shutil.copyfile(oldfile_tmphashvalue,oldfile)
shutil.copyfile(oldfile,jpgname)
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+","+jpgname+"和老文件一样,拷贝旧文件,跳过")
except Exception as e:
print ("Exception:",e)
continue
if __name__=='__main__':
dir="old/"
newdir="new/"
list0=[]
list1=[]
list2=[]
list3=[]
list4=[]
for ppicdir in os.listdir(dir) :#生成多个list,主要是为了并发处理多个目录的图片
if(os.path.isdir(dir+ppicdir)):
if (re.compile(r'^[0-1].*').match(str(ppicdir))):
list0.append(ppicdir)
elif(re.compile(r'^[2-3].*').match(str(ppicdir))):
list1.append(ppicdir)
elif(re.compile(r'^[4-5].*').match(str(ppicdir))):
list2.append(ppicdir)
elif(re.compile(r'^[6-7].*').match(str(ppicdir))):
list3.append(ppicdir)
elif(re.compile(r'^[8-9].*').match(str(ppicdir))):
list4.append(ppicdir)
else:
continue
#启n线程并行处理
Thread(target=nowater,args=(dir,newdir,list0)).start()#这里只有
Thread(target=nowater,args=(dir,newdir,list1,)).start()
Thread(target=nowater,args=(dir,newdir,list2,)).start()
Thread(target=nowater,args=(dir,newdir,list3,)).start()
Thread(target=nowater,args=(dir,newdir,list4,)).start() | [
"810996753@qq.com"
] | 810996753@qq.com |
eed30255ebda6a4fad2b3084385f63ee121dbaca | 950be9cb9c532f297481306981e8b1c8febbed9a | /Volume 7/src/Chocolate 2.py | 1642c7623acbf3d76c5d7afa2c792761cace72bf | [] | no_license | MyCatWantsToKillYou/TimusTasks | 9b075b19d54cf048517c0f6e08b509fd51e8a8a5 | 8064e0ca6671ec4cfa5c1d588d6d714562afa69a | refs/heads/master | 2023-07-07T19:20:46.508301 | 2023-06-29T10:54:30 | 2023-06-29T10:54:30 | 132,224,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # task #1639
# Difficulty 34
m, n = map(int, input().split())
if m*n % 2 == 0:
print('[:=[first]')
else:
print('[second]=:]') | [
"pinigin_iv@taximaxim.ru"
] | pinigin_iv@taximaxim.ru |
e659b2e49a6a7398480fd8a60df4fb67546b840f | 99c09d59577bdbccc2526d1a91f309d43cbf2381 | /Implementacao/main/mytree.py | a80edf03357714f0eedc886b2de02a1153960716 | [] | no_license | JulianoCP/compiler | 98cf4094eaede7341241473c7fcfa3b87608b73c | ac355eda914638b4e26c80831944a3172d1c93df | refs/heads/master | 2023-05-04T10:36:33.793532 | 2021-05-16T19:52:20 | 2021-05-16T19:52:20 | 246,413,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | from anytree import Node, RenderTree, AsciiStyle, PreOrderIter
from anytree.exporter import DotExporter
from anytree import NodeMixin, RenderTree
node_sequence = 0
class MyNode(NodeMixin):
def __init__(self, name, parent=None, id=None, type=None, label=None, children=None):
super(MyNode, self).__init__()
global node_sequence
if (id):
self.id = id
else:
self.id = str(node_sequence) + ': ' + str(name)
self.label = name
self.name = name
node_sequence = node_sequence + 1
self.type = type
self.parent = parent
if children:
self.children = children
def clearChildren():
self.children = None
return
def nodenamefunc(node):
return '%s' % (node.name)
def nodeattrfunc(node):
return '%s' % (node.name)
def edgeattrfunc(node, child):
return ''
def edgetypefunc(node, child):
return '--'
| [
"julianopetini@alunos.utfpr.edu.br"
] | julianopetini@alunos.utfpr.edu.br |
2770b1389482e8828867d97a6aa1bf0489db3e64 | 4fc86f5c444f52619f9f748c9bad5bf3e0e2c0b2 | /megatron/checkpointing.py | ddada534b7786ee8a8fd8062495fd8c81ab1ff93 | [
"MIT",
"Apache-2.0"
] | permissive | Xianchao-Wu/megatron2 | 95ea620b74c66e51f9e31075b1df6bb1b761678b | f793c37223b32051cb61d3b1d5661dddd57634bf | refs/heads/main | 2023-08-17T03:42:31.602515 | 2021-09-24T05:12:00 | 2021-09-24T05:12:00 | 330,527,561 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,103 | py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input/output checkpointing."""
import os
import random
import sys
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as torchDDP
from megatron import mpu, get_args, update_num_microbatches
from megatron import get_args
from megatron import print_rank_0
_CHECKPOINT_VERSION = None
def set_checkpoint_version(value):
global _CHECKPOINT_VERSION
assert _CHECKPOINT_VERSION is None, \
"checkpoint version already set"
_CHECKPOINT_VERSION = value
def get_checkpoint_version():
global _CHECKPOINT_VERSION
return _CHECKPOINT_VERSION
def check_checkpoint_args(checkpoint_args):
"""Ensure fixed arguments for a model are the same for the input
arguments and the one retrieved from checkpoint."""
args = get_args()
def _compare(arg_name, old_arg_name=None):
if old_arg_name is not None:
checkpoint_value = getattr(checkpoint_args, old_arg_name)
else:
checkpoint_value = getattr(checkpoint_args, arg_name)
args_value = getattr(args, arg_name)
error_message = '{} value from checkpoint ({}) is not equal to the ' \
'input argument value ({}).'.format(
arg_name, checkpoint_value, args_value)
assert checkpoint_value == args_value, error_message
_compare('num_layers')
_compare('hidden_size')
_compare('num_attention_heads')
_compare('max_position_embeddings')
_compare('make_vocab_size_divisible_by')
_compare('padded_vocab_size')
_compare('tokenizer_type')
if get_checkpoint_version() < 3.0:
_compare('tensor_model_parallel_size',
old_arg_name='model_parallel_size')
if get_checkpoint_version() >= 3.0:
_compare('tensor_model_parallel_size')
_compare('pipeline_model_parallel_size')
def ensure_directory_exists(filename):
"""Build filename's path if it does not already exists."""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_checkpoint_name(checkpoints_path, iteration,
release=False):
"""A unified checkpoint name."""
if release:
directory = 'release'
else:
directory = 'iter_{:07d}'.format(iteration)
# Use both the tensor and pipeline MP rank.
if mpu.get_pipeline_model_parallel_world_size() == 1:
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}'.format(
mpu.get_tensor_model_parallel_rank()),
'model_optim_rng.pt')
return os.path.join(checkpoints_path, directory, # TODO important for mp=model parallel, not implemented yet!
'mp_rank_{:02d}_{:03d}'.format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank()),
'model_optim_rng.pt')
def get_checkpoint_tracker_filename(checkpoints_path):
"""Tracker file rescords the latest chckpoint during
training to restart from."""
return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
def save_checkpoint(iteration, model, optimizer, lr_scheduler):
"""Save a model checkpoint."""
args = get_args()
# Only rank zero of the data parallel writes to the disk.
if isinstance(model, torchDDP):
model = model.module
if torch.distributed.get_rank() == 0:
print('saving checkpoint at iteration {:7d} to {}'.format(
iteration, args.save), flush=True)
if mpu.get_data_parallel_rank() == 0:
# Arguments, iteration, and model.
state_dict = {}
state_dict['args'] = args
state_dict['checkpoint_version'] = 3.0
state_dict['iteration'] = iteration
state_dict['model'] = model.state_dict_for_save_checkpoint()
# Optimizer stuff.
if not args.no_save_optim:
if optimizer is not None:
state_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
# RNG states.
if not args.no_save_rng:
state_dict['random_rng_state'] = random.getstate()
state_dict['np_rng_state'] = np.random.get_state()
state_dict['torch_rng_state'] = torch.get_rng_state()
state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()
state_dict['rng_tracker_states'] \
= mpu.get_cuda_rng_tracker().get_states()
# Save.
checkpoint_name = get_checkpoint_name(args.save, iteration)
ensure_directory_exists(checkpoint_name)
torch.save(state_dict, checkpoint_name)
# Wait so everyone is done (necessary)
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(' successfully saved checkpoint at iteration {:7d} to {}'.format(
iteration, args.save), flush=True)
# And update the latest iteration
if torch.distributed.get_rank() == 0:
tracker_filename = get_checkpoint_tracker_filename(args.save)
with open(tracker_filename, 'w') as f:
f.write(str(iteration))
# Wait so everyone is done (not necessary)
torch.distributed.barrier()
def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
"""Load a model checkpoint and return the iteration."""
args = get_args()
load_dir = getattr(args, load_arg)
if isinstance(model, torchDDP):
model = model.module
# Read the tracker file and set the iteration.
tracker_filename = get_checkpoint_tracker_filename(load_dir)
# If no tracker file, return iretation zero.
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
# Otherwise, read the tracker file and either set the iteration or
# mark it as a release checkpoint.
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip() # 'release'
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
sys.exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
# Checkpoint.
checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
if torch.distributed.get_rank() == 0:
print(' loading checkpoint from {} at iteration {}'.format(
args.load, iteration), flush=True)
# Load the checkpoint.
try:
print('checkpoint_name={}'.format(checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu') # TODO important here for loading state_dict into memory!
except ModuleNotFoundError:
from megatron.fp16_deprecated import loss_scaler
# For backward compatibility.
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
sys.modules.pop('megatron.fp16.loss_scaler', None)
except BaseException:
print_rank_0('could not load the checkpoint')
sys.exit()
# set checkpoint version
set_checkpoint_version(state_dict.get('checkpoint_version', 0))
# Set iteration.
if args.finetune or release:
iteration = 0
else:
try:
iteration = state_dict['iteration'] # 2,000,000
except KeyError:
try: # Backward compatible with older checkpoints
iteration = state_dict['total_iters']
except KeyError:
print_rank_0('A metadata file exists but unable to load '
'iteration from checkpoint {}, exiting'.format(
checkpoint_name))
sys.exit()
# Check arguments.
assert args.consumed_train_samples == 0
assert args.consumed_valid_samples == 0
if 'args' in state_dict:
checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args)
args.consumed_train_samples = getattr(checkpoint_args,
'consumed_train_samples', 0)
update_num_microbatches(consumed_samples=args.consumed_train_samples)
args.consumed_valid_samples = getattr(checkpoint_args,
'consumed_valid_samples', 0)
else:
print_rank_0('could not find arguments(args) in the checkpoint ...')
# Model. TODO important for loading state_dict
model.load_state_dict(state_dict['model'])
# Optimizer.
if not release and not args.finetune and not args.no_load_optim:
try:
if optimizer is not None:
optimizer.load_state_dict(state_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
# rng states.
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(state_dict['random_rng_state'])
np.random.set_state(state_dict['np_rng_state'])
torch.set_rng_state(state_dict['torch_rng_state'])
torch.cuda.set_rng_state(state_dict['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(
state_dict['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-rng or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(' successfully loaded checkpoint from {} at iteration {}'.format(
args.load, iteration), flush=True)
# args.load='/workspace/megatron/ngc_models/release_bert_345m_uncased', iteration=0
return iteration
def load_ict_checkpoint(model, only_query_model=False, only_block_model=False, from_realm_chkpt=False):
"""selectively load ICT models for indexing/retrieving from ICT or REALM checkpoints"""
args = get_args()
if isinstance(model, torchDDP):
model = model.module
load_path = args.load if from_realm_chkpt else args.ict_load
tracker_filename = get_checkpoint_tracker_filename(load_path)
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
# assert iteration > 0
checkpoint_name = get_checkpoint_name(load_path, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu')
ict_state_dict = state_dict['model']
if from_realm_chkpt and mpu.get_data_parallel_rank() == 0:
print(" loading ICT state dict from REALM", flush=True)
ict_state_dict = ict_state_dict['retriever']['ict_model']
if only_query_model:
ict_state_dict.pop('context_model')
if only_block_model:
ict_state_dict.pop('question_model')
model.load_state_dict(ict_state_dict)
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return model
| [
"wuxianchao@gmail.com"
] | wuxianchao@gmail.com |
f3cd6bac07015855fda51cdb78a4a13de9ede91a | 9e867267783b300123fb2a22767ebda88cbd2e90 | /Week0/is_int_palindrom/tests.py | d678e4e7618ba773c97aab3f104bd828fd5e5439 | [] | no_license | tblazhev/HackBulgaria-Programming101 | 7bb2d40e9426d1d400237e91d7e42adb2f567bc8 | 96d61e446a7b0ced00b0b3491643ad105d9eb4ac | refs/heads/master | 2020-05-18T14:08:31.383772 | 2014-04-24T09:23:21 | 2014-04-24T09:23:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import unittest
from solution import is_int_palindrom
class IsIntPalindrom(unittest.TestCase):
"""docstring for IsIntPalindrom"""
def test_is_int_palindrom(self):
self.assertTrue(is_int_palindrom(1))
self.assertTrue(not is_int_palindrom(42))
self.assertTrue(is_int_palindrom(100001))
self.assertTrue(is_int_palindrom(999))
self.assertTrue(not is_int_palindrom(123))
if __name__ == '__main__':
unittest.main()
| [
"tblazhev@users.noreply.github.com"
] | tblazhev@users.noreply.github.com |
b93217b9416afc18854af759391aa20842520bf3 | 0e303e893bf7250e202a67b28dc036b557c6617e | /viber/__init__.py | fc1d2299a9908df4a4ae18a824193c46e8d00900 | [] | no_license | iCodeIN/aiohttp_viber | f8949e72f3ec4005dd8ecf498751429b4d39d2e8 | b2beb157d8e161b1e86c77405dda3a749c348b7e | refs/heads/master | 2023-03-10T17:25:45.021553 | 2020-03-27T22:00:12 | 2020-03-27T22:00:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | import logging
"""
Loggers Definitions
"""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
| [
"eyaadh@gmail.com"
] | eyaadh@gmail.com |
f8c2a51c18bbeba2f1a6eba413516ca9d60b67ad | afa6048527e6e43000057ea8a2a92305644842e3 | /posts/urls.py | 539ab577db7eedea87cbe69b0f9875cd3ce12e39 | [] | no_license | davidnamango/simplesocial | 4fc1a04a1da7ba638c07cc3a652965a5d7de955d | aa27d709ff0ff319cefc7719d8158621b5522373 | refs/heads/main | 2023-01-11T23:05:34.761203 | 2020-11-16T22:03:44 | 2020-11-16T22:03:44 | 313,180,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Posts urls.py
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.PostList.as_view(), name='all'),
path('new/', views.CreatePost.as_view(), name='create'),
path('by/<username>', views.UserPosts.as_view(), name='for_user'),
path('by/<username>/<pk>/', views.PostDetail.as_view(), name='single'),
path('delete/<pk>', views.DeletePost.as_view(), name='delete')
]
| [
"davidnaanderson@gmail.com"
] | davidnaanderson@gmail.com |
9184b2f03e45cbabffdc1bdf821e9c9ffea10b6e | a6aa5c5380267a1d233ca0dcd0963416fc5807c6 | /upload/migrations/0002_second_data.py | c1c74d4a3242576c293397d3ce0289dd75e28cd3 | [] | no_license | xxxxsars/report_generated | 90bb2b4bd94c5c9bd64a2a818af975fd145be131 | 77dcbda25e5993906f5d22575f9fa3f258e5b0e7 | refs/heads/master | 2017-11-11T12:47:28.603556 | 2017-03-07T01:06:01 | 2017-03-07T01:06:01 | 83,805,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 01:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Second_Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Second_Class', models.CharField(max_length=100)),
('Second_name', models.CharField(max_length=100)),
('Second_num', models.CharField(max_length=100)),
('Second_absenteeism', models.IntegerField()),
],
),
]
| [
"xxxxsars@gmail.com"
] | xxxxsars@gmail.com |
2ab9c6aae658796991d04ae8393361738813a7fb | b6233af6a39e7ab500743d6b2ac7d52f68ae3be2 | /15/00/2.py | b738c43f76091d7e37910b5e9ab030f6c459d903 | [
"CC0-1.0"
] | permissive | pylangstudy/201712 | 9754526e1d8f1c0519fcce98bc7df803f456cc4e | f18f1251074729c4a3865b113edc89ec06b54130 | refs/heads/master | 2021-09-02T06:08:08.278115 | 2017-12-30T23:04:55 | 2017-12-30T23:04:55 | 112,670,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import socket, array
def send_fds(sock, msg, fds):
return sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", fds))])
| [
"pylangstudy@yahoo.co.jp"
] | pylangstudy@yahoo.co.jp |
b7ccc85c33abe9dfe714212db07ee1eb7790bc1e | d712f4cad7a1207f45118b10fdf0e648012b56d5 | /mailbrute.py | dda4a065070dcfda1c31638ad114b04b9ac24280 | [] | no_license | code-scan/MailBrute | 04cff542cff8eb69cc81b04d3cd42e6829d6ac74 | ca21ca4a5dfebb8073986c008f571d650430e9b4 | refs/heads/master | 2016-08-11T16:04:12.502053 | 2015-11-18T07:20:26 | 2015-11-18T07:20:26 | 46,402,203 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | #coding=utf-8
import smtplib
import threadpool
from sys import argv
import sys
import time
def print_result(request, result):
scanow="[*]Scan %s "%result
sys.stdout.write(str(scanow)+" "*25+"\b\b\r")
sys.stdout.flush()
time.sleep(0.05)
def login(logininfo):
try:
logins=logininfo
logininfo=logininfo.split(" ")
username=logininfo[0]
password=logininfo[1]
trycount=0
smtp=smtplib.SMTP()
smtp.connect(argv[4])
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(username,password)
smtp.quit()
print "[*]Success %s %s"%(username,password)
except Exception,ex:
pass
return username
if len(argv)!=5:
print "\n Cond0r@Codescan\nUseage:\n python smtp.py user.txt pass.txt domain.com smtp.server.com"
exit()
user=open(argv[1]).read()
user=user.split("\n")
pass_=open(argv[2]).read()
pass_=pass_.split("\n")
domain=argv[3]
burp=[]
for u in user:
for p in pass_:
info="%s@%s %s"%(u,domain,p)
burp.append(info)
burp=set(burp)
print " Task Run\n Task Count:%d\n\n"%len(burp)
pool = threadpool.ThreadPool(10)
requests = threadpool.makeRequests(login, burp, print_result)
[pool.putRequest(req) for req in requests]
pool.wait()
| [
"cond0r@126.com"
] | cond0r@126.com |
17fd8908609e0c7f713468722f867f4c70981b9b | 6e65a4459623cd1190f02f7a16815753e1049aa1 | /python/tools/plot_rtd.py | a7147f6242d3a385899c6ad152ac08f8a61845a8 | [
"BSD-3-Clause"
] | permissive | alexchartier/digital_rf | 2d79d7d4b5f19d1f60a73632e8a0d0a25b0d512c | cb548266bac910eeee12c210cf31e36809a38565 | refs/heads/master | 2021-05-16T23:47:16.333751 | 2020-03-27T12:18:04 | 2020-03-27T12:18:04 | 250,521,784 | 0 | 0 | NOASSERTION | 2020-03-27T11:53:52 | 2020-03-27T11:53:52 | null | UTF-8 | Python | false | false | 13,959 | py | import numpy as np
import numpy.matlib
import pdb
import pickle
import os
import datetime as dt
import glob
import matplotlib.dates as mdates
import matplotlib.colors
import matplotlib.pyplot as plt
from argparse import ArgumentParser
"""
Plot multi-frequency returns as a function of altitude and time
"""
def save_daily_files(op):
op.chdir = os.path.join(op.datadir, os.path.join('prc_analysis', op.ch))
assert os.path.isdir(op.chdir), 'Directory not found: %s' % op.chdir
specdir = os.path.join(op.chdir, 'spectra')
assert os.path.isdir(specdir), 'No spectra found in chdir - check %s' % op.chdir
print('Processing daily plots in %s' % op.chdir)
# Set up output dir
op.outdir = os.path.join(op.chdir, 'daily/data/')
op.plotdir = os.path.join(op.chdir, 'plots/')
try:
os.makedirs(op.outdir)
except:
None
try:
os.makedirs(op.plotdir)
except:
None
# Get metadata first
metafiles = glob.glob(os.path.join(specdir, 'meta*.pkl'))
if len(metafiles) > 0:
meta = {}
for fname in metafiles:
freq = float(fname.split('/')[-1].split('_')[1])
with open(fname, 'rb') as f:
meta[freq] = pickle.load(f)
# Clean up input data directory names
keys = 'time', 'range', 'doppler', 'pwr'
dirnames = [os.path.join(specdir, dn) for dn in os.listdir(specdir)]
dirnames = [dn for dn in dirnames if os.path.isdir(dn)]
dirnames.sort()
# Go through individual spectra files and process them into daily files
out_fname_fmt = os.path.join(op.outdir, '%Y%b%d_analysis.pkl')
if op.reproc:
preproc_spectra(dirnames, meta, out_fname_fmt)
# Ignore days outside the limits
good_dirn = []
if op.daylim:
for dirn in dirnames:
day = dt.datetime.strptime(dirn.split('/')[-1], '%Y%m%d')
if (day >= op.startday) and (day <= op.endday):
good_dirn.append(dirn)
# Plot the pre-processed output
if op.daily:
for dirn in good_dirn:
dl = load_daily(dirn, out_fname_fmt)
plot(dl, op)
else:
data = concat_files(good_dirn, out_fname_fmt)
plot(data, op)
def concat_files(good_dirn, out_fname_fmt):
# Concatenate daily files
for dirn in good_dirn:
dl = load_daily(dirn, out_fname_fmt)
if 'data' in dir():
for key, val in dl.items():
for k, v in val.items():
if isinstance(v, list):
if (k in data[key].keys()):
data[key][k].extend(v)
else:
data[key][k] = v
else:
data = dl
return data
def load_daily(dirn, out_fname_fmt):
nfiles = len(glob.glob(os.path.join(dirn, '*.pkl')))
print('Loading %s (%i files)' % (dirn, nfiles))
day = dt.datetime.strptime(dirn.split('/')[-1], '%Y%m%d')
with open(day.strftime(out_fname_fmt), 'rb') as f:
dl = pickle.load(f)
return dl
def plot(data, op, plot_fname=None):
params = {
'font.size': 13,
}
plt.rcParams.update(params)
freqs, tlim = get_freqs_tlim(data, fmin=op.fmin, fmax=op.fmax)
# Set up the subplots
if not op.white_bg:
plt.style.use('dark_background')
fig, ax = plt.subplots(len(freqs), 1, figsize=(12, 8))
if not isinstance(ax, np.ndarray):
ax = np.array([ax,])
# Run through frequencies and plot
for ind, freq in enumerate(freqs[::-1]):
print('plotting freq %2.2f MHz' % freq)
xlabels = True if ind == len(freqs) - 1 else False
times = data[freq].pop('time')
im, colorlabel = plt_frq(
data[freq], fig, ax[ind], tlim, freq=freq, plottype=op.type,
xlabels=xlabels, vht=op.virt_ht, daylim=op.daylim, white_bg=op.white_bg,
)
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.2, 0.03, 0.67])
cbar = plt.colorbar(im, cax=cax)
cbar.set_label(colorlabel)
if op.daylim:
fig.suptitle('%s to %s' % (\
times[0].strftime('%Y-%m-%d'),
times[-1].strftime('%Y-%m-%d'),
))
else:
fig.suptitle(times[0].strftime('%Y-%m-%d '))
# Plot or save figures
timestr = times[0].strftime('%Y%b%d')
if not op.daily:
timestr += times[-1].strftime('_to_%Y%b%d')
fig_fname = os.path.join(
op.plotdir, '%s_%s.png' % (op.type, timestr),
)
if len(freqs) > 0:
if op.show:
plt.show()
else:
if os.path.isfile(fig_fname):
os.remove(fig_fname)
plt.savefig(fig_fname)
print('Saving to %s'% fig_fname)
def plt_frq(spectra, fig, ax, tlim, freq=None, plottype='power', xlabels=False, vht=False, daylim=False, white_bg=True):
# Add endpoints to all the times, fill in with NaNs for non-recorded times
ranges = spectra['range']
times = spectra['time']
alts = calc_vht(ranges)
doppler = spectra['doppler']
if vht:
y_ax = alts
ylabel = 'vHt (km)'
ylim = (0, 800)
width = 60
else:
y_ax = ranges
ylabel = 'Rg (km)'
ylim = (op.rmin, op.rmax)
width = 60
times = np.array([times, times + dt.timedelta(seconds=width)]).T.flatten()
for key in ['max_pwr_db', 'dop_vel']:
val = spectra[key]
tdim = val.shape[0]
vals_ext = np.zeros((tdim * 2, val.shape[1])) * np.nan
vals_ext[np.arange(tdim) * 2, :] = val
spectra[key] = vals_ext
# Normalize power
A = spectra['max_pwr_db'].copy()
sortind = np.argsort(times)
# Set plot labels
ax.grid(which='both', linestyle='--')
ylabel = ('%2.2f MHz\n' + ylabel) % freq if freq else ylabel
ax.set_ylabel(ylabel)
if xlabels:
fmstr = '%d %b' if daylim else '%H:%M'
ax.set_xlabel('Time (UT)')
ax.xaxis.set_major_formatter(mdates.DateFormatter(fmstr))
fig.autofmt_xdate()
else:
ax.set_xlabel('')
ax.set_xticklabels('')
# Set colours
if plottype == 'doppler':
plotvals = spectra['dop_vel'].copy()[sortind, :]
plotvals[plotvals == 0] *= np.nan
cmapn = 'seismic_r' if white_bg else 'seismic'
cmap = plt.get_cmap(cmapn)
norm = matplotlib.colors.Normalize(vmin=-400, vmax=400)
colorlabel = 'Doppler velocity (m/s)'
title = 'Range-Time-Doppler'
elif plottype == 'power':
plotvals = A[sortind, :]
plotvals[plotvals == 0] *= np.nan
cmapn = 'gist_heat_r' if white_bg else 'gist_heat'
cmap = plt.get_cmap(cmapn)
norm = matplotlib.colors.Normalize(vmin=5, vmax=10)
colorlabel = 'Intensity (dB)'
title = 'Range-Time-Intensity'
# Get rid of NaN alt. entries
finind = np.isfinite(y_ax)
plotvals = plotvals[:, finind]
y_ax = y_ax[finind]
# Plot
im = ax.pcolormesh(
times[sortind], y_ax, plotvals.T,
cmap=cmap, norm=norm, shading='flat',
)
ax.set_xlim(tlim[0], tlim[1])
ax.set_ylim(ylim)
ax.grid()
return im, colorlabel
def preproc_spectra(dirnames, meta, out_fname_fmt):
for dirn in dirnames:
nfiles = len(glob.glob(os.path.join(dirn, '*.pkl')))
print('Processing %s (%i files)' % (dirn, nfiles))
day = dt.datetime.strptime(dirn.split('/')[-1], '%Y%m%d')
# Set up the data holder here
data = {}
for freq, vals in meta.items():
data[freq] = {}
data[freq]['time'] = []
data[freq]['range'] = vals['range']
data[freq]['doppler'] = vals['doppler']
for fn in os.listdir(dirn):
# Get frequencies and times from the filename
freq = float(fn.split('_')[0])
tod = dt.datetime.strptime(fn.split('_')[2], '%H%M%S')
data[freq]['time'].append(
day + dt.timedelta(
hours=tod.hour,
minutes=tod.minute,
seconds=tod.second,
)
)
# load range, doppler and intensity
with open(os.path.join(dirn, fn), 'rb') as f:
spec = pickle.load(f)
for k, v in spec.items():
try:
data[freq][k].append(np.squeeze(v.toarray()))
except:
data[freq][k] = [np.squeeze(v.toarray()),]
# Save daily files (concatenated spectra)
out_fname = day.strftime(out_fname_fmt)
with open(out_fname, 'wb') as f:
print('Writing to %s' % out_fname)
pickle.dump(data, f)
print('\n\n')
def get_freqs_tlim(data, fmin=None, fmax=None):
# figure out which frequencies have data
freqs = []
tmin = dt.datetime(2050, 1, 1)
tmax = dt.datetime(1, 1, 1)
for freq, spectra in data.items():
for k, v in spectra.items():
spectra[k] = np.array(v)
min_cts = 90 # require at least this many counts
if ('int_pwr' in spectra.keys()) and (spectra['int_pwr'].shape[0] > min_cts):
freqs.append(freq)
# figure out time limits along the way
min_t = np.min(spectra['time'])
max_t = np.max(spectra['time'])
if min_t < tmin:
tmin = min_t
if max_t > tmax:
tmax = max_t
freqs = np.array(freqs)
if fmin:
freqs = freqs[freqs >= np.float(op.fmin)]
if fmax:
freqs = freqs[freqs <= np.float(op.fmax)]
try:
freqs.sort()
except:
None
if op.tmin:
tmin = tmin.replace(hour=int(op.tmin), minute=0, second=0)
else:
tmin = tmin.replace(hour=0, minute=0, second=0)
if op.tmax:
if int(op.tmax) == 24:
tmax = tmax + dt.timedelta(days=1)
tmax = tmax.replace(hour=0, minute=0, second=0)
else:
tmax = tmax.replace(hour=int(op.tmax), minute=0, second=0)
else:
tmax = tmax.replace(hour=23, minute=59, second=59)
# Abort if no data
if freqs.shape[0] == 0:
print('aborting')
return
tlim = tmin, tmax
return freqs, tlim
def calc_vht(ranges):
# Calculate distance between McMurdo and Pole
mcm_zsp, midpt_depth = calc_dist(0, -77.8564, 166.6881, 0, -90, 0)
alts = np.sqrt((ranges / 2) ** 2 - (mcm_zsp / 2) ** 2) - midpt_depth
return alts
def calc_dist(alt1, lat1, lon1, alt2, lat2, lon2,):
import nvector as nv
wgs84 = nv.FrameE(name='WGS84')
pt1 = wgs84.GeoPoint(latitude=lat1, longitude=lon1, z=alt1, degrees=True)
pt2 = wgs84.GeoPoint(latitude=lat2, longitude=lon2, z=alt2, degrees=True)
# Great circle dist
# dist_gc = np.sqrt(np.sum(pt1.delta_to(pt2).pvector ** 2)) / 1E3
# Straight-line dist
dist_strt = np.sqrt(np.sum((pt1.to_ecef_vector().pvector - pt2.to_ecef_vector().pvector) ** 2)) / 1E3
# midpoint between the two
midpt = nv.GeoPath(pt1, pt2).interpolate(0.5).to_geo_point()
midpt_ll = [midpt.latitude, midpt.longitude]
# dist. from straight-line midpoint up to ground level
hypot = np.sqrt(np.sum((pt1.to_ecef_vector().pvector - midpt.to_ecef_vector().pvector) ** 2)) / 1E3
midpt_depth = np.sqrt(hypot ** 2 - (dist_strt / 2) ** 2)
return dist_strt, midpt_depth
if __name__ == '__main__':
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
desc = 'daily plots of range-time-doppler at each frequency in the specified directory'
parser = ArgumentParser(description=desc)
parser.add_argument(
'datadir', help='''Data directory to analyze.''',
)
parser.add_argument(
'-c', '--ch', default='hfrx',
help='''Channel name of data to analyze. (default: %(default)s)'''
)
parser.add_argument(
'-a', '--virt_ht', action='store_true', default=False,
help='''Plot against virtual height instead of range.''',
)
parser.add_argument(
'-s', '--show', action='store_true', default=False,
help='''Show the figure instead of saving it''',
)
parser.add_argument(
'-t', '--type', default='power',
help='''Choose whether to plot doppler or power on colour axis''',
)
parser.add_argument(
'-wb', '--white_bg', action='store_true', default=False,
help='''Plot on a white background''',
)
parser.add_argument(
'-x', '--reproc', action='store_true', default=False,
help='''Repeat processing for all files.''',
)
parser.add_argument(
'-fmin', '--fmin', default=None,
help='''Min frequency (MHz)''',
)
parser.add_argument(
'-fmax', '--fmax', default=None,
help='''Max frequency (MHz)''',
)
parser.add_argument(
'-tmin', '--tmin', default=None,
help='''Min plot time (hour)''',
)
parser.add_argument(
'-tmax', '--tmax', default=None,
help='''max plot time (hour)''',
)
parser.add_argument(
'-rmin', '--rmin', default=1300,
help='''Min plot range (km)''',
)
parser.add_argument(
'-rmax', '--rmax', default=2000,
help='''max plot range (km)''',
)
parser.add_argument(
'-d', '--daylim', default=None,
help='''plot days (yyyy,mm,dd, yyyy,mm,dd)''',
)
parser.add_argument(
'-dy', '--daily', action='store_true', default=False,
help='''make separate daily plots instead of one long one''',
)
op = parser.parse_args()
tn = [int(d) for d in op.daylim.split(',')]
op.startday = dt.datetime(*tn[:3])
op.endday = dt.datetime(*tn[3:])
op.datadir = os.path.abspath(op.datadir)
op.rmin=int(op.rmin)
op.rmax=int(op.rmax)
save_daily_files(op)
| [
"alex.chartier@outlook.com"
] | alex.chartier@outlook.com |
2253fe35a619690566ee7697671f988cf67880f3 | e635826be96bef666bc9119e4df8f82b7f076fb8 | /pages/chat.py | dca5627c853c0ef99a11486ce1b76087b08acc4c | [] | no_license | qalight-python-automation/main-qalight-qacomplexapp-tests | 0d40ebe2eb5c788324bc4bf63248cd6c788ecfa5 | 7b0b928cc37eac181cd08d3b5990eaf0a29e4bbf | refs/heads/master | 2023-07-13T13:19:28.595888 | 2021-08-17T16:02:01 | 2021-08-17T16:02:01 | 360,634,509 | 0 | 2 | null | 2021-08-17T16:02:01 | 2021-04-22T17:49:05 | Python | UTF-8 | Python | false | false | 1,204 | py | """Store chat actions and helpers"""
import logging
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webelement import WebElement
from pages.base import BasePage
from constants import chat as chat_constants
class Chat(BasePage):
"""Store chat actions and helpers"""
def __init__(self, driver):
super().__init__(driver)
self.logger = logging.getLogger(__name__)
def send_message(self, message_text):
"""Send text message"""
self.wait_until_send_keys(locator_type=By.XPATH, locator=chat_constants.CHAT_INPUT_FIELD_XPATH, data=message_text)
self.driver.find_element(by=By.XPATH, value=chat_constants.CHAT_INPUT_FIELD_XPATH).send_keys(Keys.ENTER)
def verify_messages(self, messages):
"""Verify self sent messages"""
actual_messages = self.wait_until_find_elements(locator_type=By.XPATH, locator=chat_constants.SELF_CHAT_MESSAGES_XPATH)
actual_messages_text = [actual_message.text for actual_message in actual_messages]
assert messages == actual_messages_text, f"Actual: {actual_messages_text}, Expected: {messages}"
| [
"denkondrat13+1@gmail.com"
] | denkondrat13+1@gmail.com |
2245d137d3c96c0d0184ca4ce3a2b2930945227a | e0980f704a573894350e285f66f4cf390837238e | /.history/news/models_20201124125236.py | 70a66f359a9f8e8b4fff11e3efb6146fd18c69f7 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
te | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
22fd473d406211a4a51bde562452f1d42030f35f | d76be1b2fdc11e7b699773dece9ea7fa0c7756e3 | /rejected377/Singleton.py | c99c499fe339ce5466d939b49af511da5cc016a8 | [] | no_license | B-Rich/rejected377 | 64041d976edf6cff43cc273f0dbfef6defca0b7f | 0bf283cdfb464236570df1f05e005b5a3b171ede | refs/heads/master | 2021-01-24T20:58:36.964879 | 2012-10-04T19:28:36 | 2012-10-04T19:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Singleton metaclass implementation from
http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python"""
class Singleton(type):
"""Singleton metaclass; classes of this type have only one instance"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
| [
"davidf@sjsoft.com"
] | davidf@sjsoft.com |
fdf3d2f3fa34b5f31a1a475f8b88058eb6fae21b | bff20a295661ddf2900a2777165f9b4bdfb5656b | /caffe2/python/operator_test/sequence_ops_test.py | 7afca6bdc0524fae9ad0460ff8998981fda678f4 | [
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | wangxu823/caffe2 | cc8ba41abacb36dd5ebb1c4ed68aaae6d43dd91f | 0a68778916f3280b5292fce0d74b73b70fb0f7e8 | refs/heads/master | 2021-04-04T11:29:54.224522 | 2016-08-09T23:22:45 | 2016-08-09T23:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,548 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from functools import partial
def _gen_test_add_padding(with_pad_data=True,
is_remove=False):
def gen_with_size(args):
lengths, inner_shape = args
data_dim = [sum(lengths)] + inner_shape
lengths = np.array(lengths, dtype=np.int64)
if with_pad_data:
return st.tuples(
st.just(lengths),
hu.arrays(data_dim),
hu.arrays(inner_shape),
hu.arrays(inner_shape))
else:
return st.tuples(st.just(lengths), hu.arrays(data_dim))
min_len = 4 if is_remove else 0
lengths = st.lists(
st.integers(min_value=min_len, max_value=10),
min_size=0,
max_size=5)
inner_shape = st.lists(
st.integers(min_value=1, max_value=3),
min_size=0,
max_size=2)
return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
def _add_padding_ref(
start_pad_width, end_pad_width,
data, lengths, start_padding=None, end_padding=None):
if start_padding is None:
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = (
end_padding if end_padding is not None else start_padding)
out_size = data.shape[0] + (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out[out_ptr:(out_ptr + start_pad_width)] = start_padding
out_ptr += start_pad_width
out[out_ptr:(out_ptr + length)] = data[in_ptr:(in_ptr + length)]
in_ptr += length
out_ptr += length
out[out_ptr:(out_ptr + end_pad_width)] = end_padding
out_ptr += end_pad_width
lengths_out = lengths + (start_pad_width + end_pad_width)
return (out, lengths_out)
def _remove_padding_ref(start_pad_width, end_pad_width, data, lengths):
pad_width = start_pad_width + end_pad_width
out_size = data.shape[0] - (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out_length = length - pad_width
out[out_ptr:(out_ptr + out_length)] = data[
(in_ptr + start_pad_width):(in_ptr + length - end_pad_width)]
in_ptr += length
out_ptr += out_length
lengths_out = lengths - (start_pad_width + end_pad_width)
return (out, lengths_out)
def _gather_padding_ref(start_pad_width, end_pad_width, data, lengths):
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = np.zeros(data.shape[1:], dtype=data.dtype)
pad_width = start_pad_width + end_pad_width
ptr = 0
for length in lengths:
for i in range(start_pad_width):
start_padding += data[ptr]
ptr += 1
ptr += length - pad_width
for i in range(end_pad_width):
end_padding += data[ptr]
ptr += 1
return (start_padding, end_padding)
class TestSequenceOps(hu.HypothesisTestCase):
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True))
def test_add_padding(self, start_pad_width, end_pad_width, args):
lengths, data, start_padding, end_padding = args
start_padding = np.array(start_padding, dtype=np.float32)
end_padding = np.array(end_padding, dtype=np.float32)
op = core.CreateOperator(
'AddPadding',
['data', 'lengths', 'start_padding', 'end_padding'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths, start_padding, end_padding],
partial(_add_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False))
def test_add_zero_padding(self, start_pad_width, end_pad_width, args):
lengths, data = args
op = core.CreateOperator(
'AddPadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths],
partial(_add_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
data=hu.tensor(min_dim=1, max_dim=3))
def test_add_padding_no_length(self, start_pad_width, end_pad_width, data):
op = core.CreateOperator(
'AddPadding',
['data'],
['output', 'output_lens'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data],
partial(
_add_padding_ref, start_pad_width, end_pad_width,
lengths=np.array([data.shape[0]])))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False, is_remove=True))
def test_remove_padding(self, start_pad_width, end_pad_width, args):
lengths, data = args
op = core.CreateOperator(
'RemovePadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths],
partial(_remove_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True))
def test_gather_padding(self, start_pad_width, end_pad_width, args):
lengths, data, start_padding, end_padding = args
padded_data, padded_lengths = _add_padding_ref(
start_pad_width, end_pad_width, data,
lengths, start_padding, end_padding)
op = core.CreateOperator(
'GatherPadding',
['data', 'lengths'],
['start_padding', 'end_padding'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[padded_data, padded_lengths],
partial(_gather_padding_ref, start_pad_width, end_pad_width))
@given(data=hu.tensor(min_dim=3, max_dim=3, dtype=np.float32,
elements=st.floats(min_value=-np.inf,
max_value=np.inf),
min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_reverse_packed_segs(self, data, gc, dc):
max_length = data.shape[0]
batch_size = data.shape[1]
lengths = np.random.randint(max_length + 1, size=batch_size)
op = core.CreateOperator(
"ReversePackedSegs",
["data", "lengths"],
["reversed_data"])
def op_ref(data, lengths):
rev_data = np.array(data, copy=True)
for i in range(batch_size):
seg_length = lengths[i]
for j in range(seg_length):
rev_data[j][i] = data[seg_length - 1 - j][i]
return (rev_data,)
def op_grad_ref(grad_out, outputs, inputs):
return op_ref(grad_out, inputs[1]) + (None,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=op_ref,
output_to_grad='reversed_data',
grad_reference=op_grad_ref)
| [
"jiayq84@gmail.com"
] | jiayq84@gmail.com |
6a586b99db2e3d6ef9c60f0b254d20f5b31c9bc7 | 06461b51ee2c309b4a72ffa8bc26173a08d77c33 | /Code/ReverseInteger.py | 1a0cb17dfb3696e2e29001db2335a17e71b94b6b | [] | no_license | aparnavt/LeetCode-Top-100-Liked-questions | 42d3079f596672f88a48039e5535ed0f09562743 | a462f1a75db6dbb7557a1836d8a62e4c8bbbc717 | refs/heads/master | 2023-01-05T00:25:15.673665 | 2020-11-03T15:53:50 | 2020-11-03T15:53:50 | 297,335,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | class Solution:
def reverse(self, x: int) -> int:
if x>0:
ans = int(str(x)[::-1])
return ans if ans<(2**31 -1) else 0
else:
ans = -int(str(-x)[::-1])
return ans if abs(ans)<2**31 else 0
"""
Naive Approach
n = abs(x)
i=0
dig={}
while(x>0):
dig[i] = n%10
i+=1
n=n/10
for j in range(len(dig)) :
reverse+= dig[j] * pow(10,len(dig)-1-j)
reverse = reverse * x/abs(x)
return reverse
"""
| [
"noreply@github.com"
] | aparnavt.noreply@github.com |
4db19ef43f850ee252401b2e092706de359951b9 | 9c76c9dd92581fa1eea989cd314b9567a88c5c7f | /account/views.py | 50783cb4d6d9ad5c6072ce68661772a182dd8c3f | [] | no_license | wangqiang-python/EMSa | 8067797302215426f56487eeae3884a6efcddc8e | 9fe5370f3d3884e1c903d007c620dc614eb8afaa | refs/heads/master | 2020-07-11T07:19:31.886967 | 2019-08-26T12:41:47 | 2019-08-26T12:41:47 | 204,476,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | from django.db import transaction
from django.shortcuts import render,HttpResponse,redirect
from account.models import User
from employee.models import Employee
# Create your views here.
def login(request):
name = request.COOKIES.get("name")
pwd = request.COOKIES.get("password")
result = User.objects.filter(name=name,password=pwd)
if result:
request.session["login"] = "ok"
return redirect("account:home")
return render(request, "account/login.html")
def loginlogic(request):
name = request.POST.get("name")
pwd = request.POST.get("pwd")
rem = request.POST.get("rember")
result = User.objects.filter(name=name,password=pwd)
if result:
request.session["login"] = "ok"
response = redirect('account:home')
if rem:
response.set_cookie("name",name,max_age=7*24*3600)
response.set_cookie("password",pwd,max_age=7*24*3600)
return response
return HttpResponse("登陆失败")
def regist(request):
return render(request, "account/regist.html")
def registlogic(request):
try:
with transaction.atomic():
uname = request.POST.get("uname")
name = request.POST.get("name")
pwd = request.POST.get("pwd")
sex = request.POST.get('sex')
auto = request.POST.get('number')
u = User.objects.filter(name=uname)
if u:
return HttpResponse("用户名已存在")
User.objects.create(name=name, uname=uname, password=pwd, gender=sex, auto_code=auto, )
return redirect("account:login")
except Exception as a:
print(a)
return HttpResponse("注册失败")
def home(request):
re = request.session.get("login")
save = Employee.objects.all()
if re:
return render(request, "employee/emplist.html",{"save" : save})
# return HttpResponse('22222222222222222')
return redirect("account:login") | [
"1297474538@qq.com"
] | 1297474538@qq.com |
18d6600c76c4f826ff670266cd750b7813cbf302 | d6cc907335f0fc1469855018a6adf282ebdd78f0 | /logic/libs/logger/logger.py | 95e768fd1199567c86e3f5ad4c78e9402c7ce063 | [] | no_license | brianwolf/utn-2020-2c-desa-mobile | b4a432fb31d0b575aed92eb433442e056eead915 | f57aac4ec8cdb03d09c7e6fc374ff7918f4c28ce | refs/heads/master | 2023-01-21T21:22:35.470062 | 2020-11-30T23:03:57 | 2020-11-30T23:03:57 | 295,231,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | """
Logger
-------
Crea logs de la aplicacion
"""
import logging
from logic.libs.logger.src import config
from logic.libs.logger.src.archivo import crear_log
def iniciar(directorio: str, nivel: str):
"""
Configura el logger para el proyecto
"""
config.DIRECTORIO_LOGS = directorio
config.NIVEL_LOGS = nivel
def log(nombre: str = 'app') -> logging.Logger:
"""
Devuelve un objeto logger por un nombre, en caso de que no exista lo crea
"""
if nombre not in config.LOGGERS:
config.LOGGERS[nombre] = crear_log(nombre)
return config.LOGGERS[nombre]
| [
"brian.lobo@moorea.io"
] | brian.lobo@moorea.io |
c43c62823b494adf5bd81624350f95d4d0f6d884 | 8581c776a00bb15e4734c3d245bd86e375a2b219 | /quiz/configurations/database.py | 7a73d4de1f7943dc136d264337dbfa2b7a77aba4 | [] | no_license | andre-rosas/qzen | 3f96dbbbb95032efe4c2f8f86c8f029af966d557 | 776a0f58e4102ecd73efa74fa3d4c36ac328e7ab | refs/heads/master | 2023-05-01T06:10:10.647931 | 2021-04-23T16:51:14 | 2021-04-23T16:51:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from flask.app import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def init_app(app: Flask):
db.init_app(app)
app.db = db
from quiz.models.pergunta_model import PerguntaModel
from quiz.models.alternativa_model import AlternativaModel
from quiz.models.pergunta_tema_model import PerguntaTemaModel
from quiz.models.tema_model import TemaModel
from quiz.models.user_model import UserModel
| [
"luizmatheusga@gmail.com"
] | luizmatheusga@gmail.com |
c3e593a11fa780431165c332212fd8a39a54f02a | 0f968baea4554519043c3ed4d1464fe4c64d592f | /src/mesh/deep/__init__.py | 2d81c12b5a07d5cf15b92582b6caa017185c5111 | [] | no_license | seantyh/OntoDeepLex | a50911edea9b3ce9f5997ecab327b4bb54783a00 | 89148884ff09710877d18d0a63da00a304060079 | refs/heads/master | 2023-01-22T23:23:35.653260 | 2020-11-23T13:14:22 | 2020-11-23T13:14:22 | 257,922,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | from .tensor_utils import BertService
from .vocab import VocabZhTw | [
"seantyh@gmail.com"
] | seantyh@gmail.com |
37a83c9b14f7582f5e2c4b4b2b05811c76da5278 | 24883c4af746d0a5e72e201239faa44ed670493d | /venv/Scripts/pip-script.py | 1284262dd05b336492cf52db83f099247cca721b | [] | no_license | LeeSangShin/kafka-python | cd9b14b96dace407898ff12093a5503eaf1cbcb9 | 671e155bf68017bda0f9f1b9b55ab2353a63b521 | refs/heads/master | 2020-05-04T15:23:20.252189 | 2019-04-04T05:02:11 | 2019-04-04T05:02:11 | 179,220,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #!C:\Users\User\PycharmProjects\ras\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"49224081+LeeSangShin@users.noreply.github.com"
] | 49224081+LeeSangShin@users.noreply.github.com |
8ac32f1448c14dd435ff22cb2ca8577a5f738d3c | aef889a2e1053e2485bd3530625a0e9b72edeb97 | /frontend/testing/formsCoordinatorTest.py | ad463ab485899374236b2b6d17cd01e52b472291 | [] | no_license | angelialau/se7en-scheduler | 13e11d2ede3656f19e99960e3bc68b079648aeb6 | 777ecbf6bed03f9289a10c7e066054f5319ae55b | refs/heads/master | 2020-03-17T13:22:05.246178 | 2018-04-22T15:42:52 | 2018-04-22T15:42:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,778 | py | from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.by import By
import unittest, time, pickle
preButtonError = "submit button should be disabled before form is filled"
postButtonError = "submit button should be enabled after form is filled"
formResetError = "form input fields did not revert to ng-pristine after refreshing"
def broken_function():
raise Exception('This is broken')
class FormsCoordinatorTest(unittest.TestCase):
def setUp(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--explicitly-allowed-ports=6666')
self.driver = webdriver.Chrome(options=chrome_options)
# login
self.driver.implicitly_wait(10)
self.driver.get("http://localhost:4200/login")
email = self.driver.find_element_by_id("email")
email.send_keys("email@email.com")
password = self.driver.find_element_by_id("password")
password.send_keys("password")
button = self.driver.find_element_by_tag_name("button").click()
header = WebDriverWait(self.driver,10).until(
EC.visibility_of_element_located((By.ID, 'logoutButton')))
def test_make_announcement(self):
driver = self.driver
driver.find_element_by_id("makeAnnouncementBtn").click()
header = driver.find_element_by_tag_name('h5')
self.assertIn("Make an Announcement", header.text)
submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, "announcementButton")))
self.assertEqual(submit.is_enabled(), False, preButtonError)
title = driver.find_element_by_id("title")
title.send_keys("Test Announcement")
content = driver.find_element_by_id("announcementContent")
content.send_keys("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Mollis nunc sed id semper risus. Diam maecenas sed enim ut. Pulvinar etiam non quam lacus suspendisse. Quis varius quam quisque id diam. Ultricies mi quis hendrerit dolor magna eget est lorem ipsum. Augue ut lectus arcu bibendum at varius vel. Diam sollicitudin tempor id eu nisl nunc. Scelerisque mauris pellentesque pulvinar pellentesque habitant morbi tristique. Et tortor consequat id porta nibh venenatis cras. Risus in hendrerit gravida rutrum quisque. Morbi tristique senectus et netus. Nam libero justo laoreet sit. Diam donec adipiscing tristique risus. Maecenas accumsan lacus vel facilisis. Viverra aliquet eget sit amet tellus cras. Nunc sed velit dignissim sodales ut eu sem integer vitae. Non blandit massa enim nec dui nunc mattis. Viverra nibh cras pulvinar mattis nunc.")
submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, "announcementButton")))
self.assertEqual(submit.is_enabled(), True, postButtonError)
def test_add_user(self):
driver = self.driver
driver.get("http://localhost:4200/user")
button = driver.find_element_by_id("sbAddUser").click() #move to navigation test
submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'submitFormButton')))
self.assertEqual(submit.is_enabled(), False, preButtonError)
select = driver.find_element_by_id("pillar")
options = select.find_elements_by_tag_name("option")
for option in options:
if(option.text=="ISTD"):
option.click()
break
name = driver.find_element_by_id("name")
name.send_keys("Add User Selenium Test")
email = driver.find_element_by_id("email")
email.send_keys("selenium@sutd.edu.sg")
phone = driver.find_element_by_id("phone")
phone.send_keys("63036662")
password = driver.find_element_by_id("password")
password.send_keys("password")
submit = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'submitFormButton')))
self.assertEqual(submit.is_enabled(), True, postButtonError)
def test_add_schedule(self):
driver = self.driver
driver.get("http://localhost:4200/schedules")
div = driver.find_element_by_id("scheduleFormTitle").click()
year = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID, "year")))
yearoptions = year.find_elements_by_tag_name("option")
yearoptions[len(yearoptions)-1].click()
trimester = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID, "trimester")))
trimesteroptions = trimester.find_elements_by_tag_name("option")
trimesteroptions[len(trimesteroptions)-1].click()
start = driver.find_element_by_id('start')
start.send_keys('2022-11-12')
end = driver.find_element_by_id('end')
end.send_keys('2022-11-13')
submit = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID, "addScheduleSubmitButton")))
self.assertEqual(submit.is_enabled(), True, "add schedule form button not enabled")
#test reset function
driver.find_element_by_id("addSchedResetButton").click()
#check that input fields revert to the pristine state
trimester = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID, "trimester")))
self.assertEqual("ng-pristine" in trimester.get_attribute("class"), True, formResetError)
def test_add_course(self):
driver = self.driver
driver.find_element_by_id("sbViewSchedules").click()
time.sleep(1)
present = False
buttons = driver.find_elements_by_tag_name("button")
title = "Click to add courses or generate an alternative schedule"
for button in buttons:
if title in button.get_attribute('title'):
button.click()
header = WebDriverWait(driver,10).until(
EC.visibility_of_element_located((By.ID, 'courseFormTitle')))
self.assertEqual("Add a Course", header.text.strip())
present = True
break
if not present:
driver.get("http://localhost:4200/schedules/53")
submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'addCourseSubmitButton')))
self.assertEqual(submit.is_enabled(), False, preButtonError)
select = WebDriverWait(driver,10).until(EC.visibility_of_element_located((By.ID,"courseDetail")))
options = select.find_elements_by_tag_name("option")
options[3].click()
core = driver.find_element_by_id("core")
core.click()
no_classes = driver.find_element_by_id("no_classes")
no_classesOptions = no_classes.find_elements_by_tag_name("option")
no_classesOptions[1].click()
class_size = driver.find_element_by_id("class_size")
class_sizeOptions = class_size.find_elements_by_tag_name("option")
class_sizeOptions[1].click()
prof1 = driver.find_element_by_xpath('//select[@formcontrolname="id"]')
prof1options = prof1.find_elements_by_tag_name("option")
prof1options[1].click()
class_types = driver.find_element_by_xpath('//select[@formcontrolname="class_types"]')
class_typesoptions = class_types.find_elements_by_tag_name("option")
class_typesoptions[1].click()
venue_types = driver.find_element_by_xpath('//select[@formcontrolname="venue_types"]')
venue_typesoptions = class_types.find_elements_by_tag_name("option")
venue_typesoptions[1].click()
sessions_hrs = driver.find_element_by_xpath('//select[@formcontrolname="sessions_hrs"]')
sessions_hrsoptions = sessions_hrs.find_elements_by_tag_name("option")
sessions_hrsoptions[1].click()
checkbox = driver.find_element_by_id("checkbox")
checkbox.click()
submit = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'addCourseSubmitButton')))
self.assertEqual(submit.is_enabled(), True, postButtonError)
driver.find_element_by_id("resetCourseButton").click()
dropdown = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'courseDetail')))
self.assertEqual("ng-pristine" in dropdown.get_attribute("class"), True, formResetError)
# def test_change_password(self):
# driver = self.driver
# driver.get('http://localhost:4200/password')
# submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'submitChangePasswordFormButton')))
# self.assertEqual(submit.is_enabled(), False, preButtonError)
# oldP = driver.find_element_by_id('oldPassword')
# oldP.send_keys('password')
# newP = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'newPassword')))
# newP.send_keys('newpassword')
# confirmP = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'confirmPassword')))
# confirmP.send_keys('newpassword')
# submit = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'submitChangePasswordFormButton')))
# self.assertEqual(submit.is_enabled(), True, postButtonError)
def test_add_event(self):
driver = self.driver
driver.get("http://localhost:4200/schedules")
present = False
buttons = driver.find_elements_by_tag_name("button")
title = "Click to add an event to this schedule"
for button in buttons:
if title in button.get_attribute('title'):
button.click()
time.sleep(2)
header = WebDriverWait(driver,10).until(
EC.visibility_of_element_located((By.ID, 'schedulesTitle')))
self.assertEqual("Create a new event", header.text.strip())
present = True
break
if not present:
driver.get("http://localhost:4200/schedules/31")
header = driver.find_elements_by_id("schedulesTitle")
self.assertEqual(len(header), 1)
submit = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'submitEventButton')))
self.assertEqual(submit.is_enabled(), False, preButtonError)
select = WebDriverWait(driver,10).until(EC.visibility_of_element_located((By.ID,"location")))
options = select.find_elements_by_tag_name("option")
options[1].click()
course = driver.find_element_by_id("course")
course.send_keys("Blockchain Seminar 2018")
instructorId = driver.find_element_by_id("instructorId")
instructorIdOptions = instructorId.find_elements_by_tag_name("option")
instructorIdOptions[0].click()
pillar = driver.find_element_by_id("pillar")
pillarOptions = pillar.find_elements_by_tag_name("option")
pillarOptions[0].click()
cohort = driver.find_element_by_id("cohort")
cohortOptions = cohort.find_elements_by_tag_name("option")
cohortOptions[0].click()
submit = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'submitEventButton')))
self.assertEqual(submit.is_enabled(), True, postButtonError)
def tearDown(self):
self.driver.close()
self.driver.quit()
if __name__ == "__main__":
unittest.main() | [
"angelialkm@gmail.com"
] | angelialkm@gmail.com |
925d4bcc4f9c12b106e27f99a905d73e6892a51e | 6c13f20675cc098c8e0ae0fe716e43b228a9c550 | /exercices/question_03.py | 3b1b788f3ee2c152e1f8ebf1b7e668231c6e0480 | [] | no_license | lfarci/getting-to-know-pandas | 6304ad4c358c6ef48d0b2bf448c12aeb5e8e6690 | 15f96e59e23c3b2e62c4c64ba26e3d2b8afba69d | refs/heads/main | 2023-01-04T00:00:28.641477 | 2020-10-23T13:24:24 | 2020-10-23T13:24:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import numpy as np
import pandas as pd
import question_02
def count_movies_with_genre(movies, genre):
return len(movies[movies["genres"].str.contains(genre)])
def count_movies_with_release_year(movies, release_year):
return len(movies[movies["release_year"] == release_year])
def count_thriller_with_release_year(movies, genre, year):
is_genre = movies["genres"].str.contains(genre)
is_year = movies["release_year"] == year
return len(movies[(is_genre) & (is_year)])
def main():
movies_df = pd.read_csv("./resources/movies.csv")
question_02.create_release_year_column(movies_df)
print(count_thriller_with_release_year(movies_df, "Thriller", 2016))
if __name__ == "__main__":
main() | [
"farci.logan@gmail.com"
] | farci.logan@gmail.com |
a7714a35d0c2be083cc2ecf9c3324c5f6f6b8aa4 | 411071823a909d86845b7750085dfdf7f1206f2e | /python/src/db.py | fa78bf438fc35c62924e9c0e7527942daf3918de | [] | no_license | Kerman-Sanjuan/REST-API | 695851447a25ce8e08505376111a2e2524c9ae80 | 927d21fbad20d285f5b81326ea9ed45ddd48052d | refs/heads/main | 2023-08-30T10:55:45.432406 | 2021-11-07T17:17:15 | 2021-11-07T17:17:15 | 421,827,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///database/users.sqlite')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
| [
"kermansanjuanmalax@gmail.com"
] | kermansanjuanmalax@gmail.com |
8b73fef38c8d4e2206ee1125f5a6dfbe6e4e74ed | 83943dc3b89e6eede501292dbc70f6b5f8dc9412 | /question(7).py | 58234fc907843d5734822014f47d5349499137bd | [] | no_license | preeti28dec/Hangman-game- | e58985c95a0e4cfba0d43c5641f61c1aa85cf5ed | 4d237171d5ec5d45ec3489fef7a29bd52079f6a5 | refs/heads/main | 2023-04-12T21:05:06.274986 | 2021-04-22T13:57:05 | 2021-04-22T13:57:05 | 360,537,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Q7.Text file data ko json file data mai convert karo,jaise ki neeche diya hai?
import json
filename = 'Text_file.txt'
dict1 = {}
# hum file ko open kar kar read and write karne ke liye with open file ka use karte hai .
with open(filename) as fh:
for line in fh:
command,description = line.strip().split(None, 1)
#yhan kuch split nhi huaa hai bs key and value ko split karne ke liye humne splite use kiya hai none ka matlab hai kuch nhi hai
dict1[command] = description.strip()
out_file = open("Hello.json", "w")
json.dump(dict1, out_file, indent = 4)
out_file.close()
| [
"noreply@github.com"
] | preeti28dec.noreply@github.com |
4a71137f92e3e2b73533f1a065e7017d0d2de440 | 413a2ead82ac085de117e7fc7fc926268a70c6ad | /useraddress/asgi.py | 262ffb6c450957ec4f01af9b6b099aff1441aa7f | [] | no_license | ShirsatAditi/AddressListAPI | 49f38dfeb4559fd0789ca1480649616b4b87f44f | 546ed2e07210053e6177b667c73bf40b22fca5ed | refs/heads/main | 2023-01-02T10:07:12.196129 | 2020-10-28T07:24:49 | 2020-10-28T07:24:49 | 307,936,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for useraddress project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'useraddress.settings')
application = get_asgi_application()
| [
"aditishirsat.magneto@gmail.com"
] | aditishirsat.magneto@gmail.com |
6c03d73cc41059479d264e5a0bb972901861ffd6 | ce0e55c0912232abde612c06f2768e14cefff8d9 | /calculator/__init__.py | cdb36146569d3803fdce4e971dd8af805c13d1bd | [] | no_license | hwanyhee/ezen | 4eb777a3cb87f0b3278d5f24e20472476ab55fe3 | 4f971258805bb499d919a31ed8ed3e74970c8e04 | refs/heads/master | 2020-07-02T14:36:51.723863 | 2019-08-10T09:16:44 | 2019-08-10T09:16:44 | 201,559,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from calculator.controller import CalculatorController
#컨트롤러를 인스턴스화 한다
if __name__ == '__main__':
num1 = int(input('1st number?'))
op = input('연산자?')
num2 = int(input('2nd number?'))
calc = CalculatorController(num1,num2)
print(calc.exec(op)) | [
"hwanyhee@naver.com"
] | hwanyhee@naver.com |
d85e0b393ae8d8ee90e85e1f7704da446a52d993 | 5e9eba022385185a2c84d60fffe6cdf04421ed1b | /academicos/views.py | e44f4be6e8ae947cd6a615b5209a4e6d14064e29 | [] | no_license | lizceth/proyecto-eventos | 4a6de50ee7ae7705fb7a81cb4fdbdbe2c9ed9516 | b408149a463d130da72bb555237814cc5bb2dbfa | refs/heads/master | 2020-06-02T18:21:08.415503 | 2014-10-14T22:35:26 | 2014-10-14T22:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | from django.contrib.auth.models import User
from django.shortcuts import render, render_to_response, get_object_or_404
from academicos.models import Coordinador, Escuela, Facultad
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from asistentes.models import Persona
from asistentes.forms import *
#from django.core.mail import EmailMessage
#from django.contrib.auth.forms import UserCreationForm, AuthentificationForm
#from django.contrib.auth import login, authentificate, logout
#from django.contrib.auth.decorators import login_required
from academicos.forms import CoordinadorForm, EscuelaForm, FacultadForm
def Cordinadores(request):
cordinadores = Coordinador.objects.all()
titulo = "Lista de Cordinadores"
return render_to_response('academicos/cordinadoresList.html',{
'cordinadores':cordinadores,'titulo':titulo},
context_instance=RequestContext(request))
def Cordinador_add(request):
if request.method == "POST":
formulario = CoordinadorForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/cordinadoresList/')
else:
formulario = CoordinadorForm()
return render_to_response('academicos/cordinadoresAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_edit (request, id):
cordinador_edit= Coordinador.objects.get(pk=id)
if request.method == 'POST':
formulario = CoordinadorForm(
request.POST, instance = cordinador_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/cordinadoresList/")
else:
formulario = CoordinadorForm(instance= cordinador_edit)
return render_to_response('academicos/cordinadoresEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_borrar (request, id):
cordinador_borrar = get_object_or_404(Coordinador, pk=id)
cordinador_borrar.delete()
return HttpResponseRedirect("/cordinadoresList/")
def Escuelas(request):
escuelas = Escuela.objects.all()
titulo = "Lista de Escuelas"
return render_to_response('academicos/escuelasList.html',
{'escuelas':escuelas,'titulo':titulo},
context_instance=RequestContext(request))
def Escuela_add (request):
if request.method == "POST":
formulario = EscuelaForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/escuelaList/')
else:
formulario = EscuelaForm()
return render_to_response('academicos/escuelasAdd.html',
{'formulario':formulario},
context_instance=RequestContext(request))
def Escuela_edit (request, id):
escuela_edit= Escuela.objects.get(pk=id)
if request.method == 'POST':
formulario = EscuelaForm(
request.POST, instance = escuela_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/escuelaList/")
else:
formulario = EscuelaForm(instance= escuela_edit)
return render_to_response('academicos/escuelasEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Escuelas_borrar (request, id):
escuelas_borrar = get_object_or_404(Escuela, pk=id)
escuelas_borrar.delete()
return HttpResponseRedirect("/escuelaList/")
def Facultades(request):
facultades = Facultad.objects.all()
titulo = "Lista de Facultades"
return render_to_response('academicos/facultadList.html',{
'facultades':facultades,'titulo':titulo},
context_instance=RequestContext(request))
def Facultad_add(request):
if request.method == "POST":
formulario = FacultadForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/facultadesList/')
else:
formulario = FacultadForm()
return render_to_response('academicos/facultadAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_edit (request, id):
facultad_edit= Facultad.objects.get(pk=id)
if request.method == 'POST':
formulario = FacultadForm(
request.POST, instance = facultad_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/facultadesList/")
else:
formulario = FacultadForm(instance= facultad_edit)
return render_to_response('academicos/facultadEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_borrar (request, id):
facultad_borrar = get_object_or_404(Facultad, pk=id)
facultad_borrar.delete()
return HttpResponseRedirect("/facultadesList/")
| [
"shanny.liz13@gmail.com"
] | shanny.liz13@gmail.com |
44325715254c5869560d81e2367ac008235b3da6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /r9y4yrSAGRaqTT7nM_21.py | 1ca7a31c9dbf3fb55b5622f3aa279dfb1fc15050 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py |
find_missing=lambda l:sum(range(len(min(l,key=len)),len(max(l,key=len))+1))-sum(len(x)for x in l)if l and all(l)else 0
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6a8cdc19597cae18c5c46132885f1df73c95b2ea | b73813d6cbfd1de690162234ba6c410d4bc037d5 | /leetcode/1.py | 855a2fa3458dca9879722787207f8c61df39b168 | [] | no_license | kavandalal/timepass | 27f4fe3944e16012e15d3094e320df1f48b43938 | 21f6af30b138633ed7edb8b95ec723e218fba6c4 | refs/heads/master | 2023-01-03T04:27:30.711172 | 2020-10-30T08:53:29 | 2020-10-30T08:53:29 | 288,215,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #1
nums = [2,8,7,3]
target = 9
d=dict()
for i,n in enumerate(nums):
v = target - n
if v not in d:
d[n] =i
else:
print([d[v]],i) | [
"noreply@github.com"
] | kavandalal.noreply@github.com |
8a146dbbf4a140a3c67971610a308d0ae7c10afc | ef4b01eb3cc09193ca0410d24ae40a8ccfc7fa27 | /kaldi/egs/aspire/s5/check.py | f09725a65a1dea0da2f78910b9719631e6ecf289 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | NitinShuklaML/ASR-WebAPPLinux | 67f25c7714dff451fc26a64099760edd2e5d18da | 1e70ec76cde118e48aa3141e347874716ff1ceeb | refs/heads/master | 2020-12-05T10:28:38.111633 | 2020-01-06T13:54:32 | 2020-01-06T13:54:32 | 232,080,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #!/usr/bin/env python
import os
import sys
args = str(sys.argv[1])
command = os.getcwd()+"/encoding.py "+ args
print("command is :"+command)
os.system(command)
| [
"nitin.shukla2014@gmail.com"
] | nitin.shukla2014@gmail.com |
d1ee3b808c8d175829f69a8e3110517b8be39f51 | 613bca33c036b069f3bdcddcc860c69e9eed6556 | /util.py | f8e54349655f74656336da58309747d8e3b643ad | [] | no_license | qsantos/majority-judgment | 821f4224eb55dc12e24f0999b68be1d92dff3566 | ef11e327bd0af4d74a4c6a6d394f11c5a084c2fe | refs/heads/master | 2020-04-14T23:29:32.607465 | 2019-01-05T10:14:51 | 2019-01-05T10:21:08 | 164,204,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | #!/usr/bin/env python3
"""Some utilities (mostly arithmetic)"""
import random
import json
import hashlib
import gmpy2
def powmod(x, y, m):
"""Computes `x^y mod m`
The method `powmod()` from `gmpy2` is faster than Python's builtin
`powmod()`. However, it does add some overhead which should be skipped for
`x = 1`.
Arguments:
x (int): base of the exponentiation
y (int): exponent
m (int): modulus
Returns:
int: the result of `x^y mod m`
"""
if x == 1:
return 1
elif y < 0:
return invert(powmod(x, -y, m), m)
else:
return int(gmpy2.powmod(x, y, m))
def invert(x, m):
"""Computes the invert of `x` modulo `m`
This is a wrapper for `invert() from `gmpy2`.
Arguments:
x (int): element to be inverted
m (int): modulus
Returns:
int: y such that `x × y = 1 mod m`
"""
return int(gmpy2.invert(x, m))
def is_prime(x):
"""Tests whether `x` is probably prime
This is a wrapper for `is_prime() from `gmpy2`.
Arguments:
x (int): the candidate prime
Returns:
bool: `True` if `x` is probably prime else `False`
"""
return int(gmpy2.is_prime(x))
def genprime(n_bits, safe_prime=False):
"""Generate a probable prime number of n_bits
This method is based on `next_prime()` from `gmpy2` and adds the safe prime
feature.
Arguments:
n_bits (int): the size of the prime to be generated, in bits
safe_prime (bool): whether the returned value should be a safe prime a
just a common prime
Returns:
int: a probable prime `x` from `[2^(n_bits-1), 2^n_bits]`
Is `safe_prime` is `True`, then `x` is also a probable safe prime
"""
if safe_prime:
# q of the form 2*p + 1 such that p is prime as well
while True:
p = genprime(n_bits - 1)
q = 2*p + 1
if is_prime(q):
return q
# just a random prime
n = random.SystemRandom().randrange(2**(n_bits-1), 2**n_bits) | 1
return int(gmpy2.next_prime(n))
def crt(residues, moduli):
"""Applies the Chinese Remainder Theorem on given residues
Arguments:
residues (list): the residues (int)
moduli (list): the corresponding modulis (int) in the same order
Returns:
int: `x` such that `x < ∏ moduli` and `x % modulus = residue` for
residue, modulus in `zip(moduli, redidues)`
"""
residues = list(residues)
product = prod(moduli)
r = 0
for residue, modulus in zip(residues, moduli):
NX = product // modulus
r += residue * NX * invert(NX, modulus)
r %= product
return r
def prod(elements_iterable, modulus=None):
"""Computes the product of the given elements
Arguments:
elements_iterable (iterable): values (int) to be multiplied together
modulus (int): if provided, the result will be given modulo this value
Returns:
int: the product of the elements from elements_iterable
If modulus is not None, then the result is reduced modulo the provided
value.
"""
elements_iterator = iter(elements_iterable)
product = next(elements_iterator)
for element in elements_iterator:
product *= element
if modulus is not None:
product %= modulus
return product
def random_numbers_totaling(total, count):
"""Generate random numbers of given sum
Arguments:
total (int): the value the random numbers should sum to
count (int): the number of random numbers to generate
Returns:
list: l, random numbers (int) such that `sum(l) == total` and `len(l)
== count`
"""
# inspired from <http://umusebo.com/generate-n-random-numbers-whose>
# divide [0, total] in count random subranges
fenceposts = sorted(random.choice(range(total+1)) for _ in range(count-1))
# return the lengths of these subranges
return [b - a for a, b in zip([0] + fenceposts, fenceposts + [total])]
def H(query, n=2**80):
"""Simulation of a random oracle by a hash function
Arguments:
m (JSON serializable object): the query for the oracle
n (int): the output will be selected in ℤ_n
Returns:
int: an arbitrary but deterministic value in [0, n)
"""
m = json.dumps(query, sort_keys=True).encode()
digest = hashlib.sha512(m).hexdigest()
return int(digest, 16) % n
| [
"quentin.santos@worldonline.fr"
] | quentin.santos@worldonline.fr |
25fba7312e0c89bdf77f6d597d2878f52fb8b372 | b7f1557e5a2ac164e59918efe3ac07f781699012 | /code/baseline/gen_result.py | 51d7c2fe3e27ce9a5fc0fe602528bc020e967593 | [] | no_license | aspnetcs/AT-CNN | 2dfeb1928df9521ac5a5c8f9bb8d082ad9237b2a | 46d1dfa599c186def93d48e1589b02f111a67599 | refs/heads/master | 2022-12-25T19:40:41.036715 | 2020-10-06T00:50:05 | 2020-10-06T00:50:05 | 299,809,649 | 0 | 0 | null | 2020-09-30T04:35:24 | 2020-09-30T04:35:24 | null | UTF-8 | Python | false | false | 11,444 | py | from utils import GetSmoothGrad, clip_and_save_single_img, clip_gradmap
import os
from cv2 import imwrite, imread
import argparse
import torch
import numpy as np
import torch
from utils import get_a_set
import torch.nn.functional as F
import torch.nn as nn
from dataset import create_test_dataset, create_train_dataset, \
create_saturation_test_dataset, create_edge_test_dataset, \
create_style_test_dataset, create_brighness_test_dataset, create_patch_test_dataset
import torchvision.models as models
import skimage.io as io
def GetSmoothRes(net, Data, DEVICE, save_path ='./SmoothRes/Fashion_MNIST'):
for i, (img, label) in enumerate(zip(Data.X, Data.Y)):
#print(i)
#print(img.shape, label.shape)
img = img.astype(np.float32)
#label = label.astype(np.float32)
img = img[np.newaxis,:]
img = torch.tensor(img)
#print(img.type())
label = torch.tensor(label).type(torch.LongTensor)
grad_map = GetSmoothGrad(net, img, label, DEVICE = DEVICE)
grad_map = grad_map.cpu().detach().numpy()
grad_map = clip_gradmap(grad_map)
#print(grad_map.shape, grad_map.mean())
save_p = os.path.join(save_path, '{}.png'.format(i))
#print(grad_map.shape)
imwrite(save_p, grad_map)
print('{} imgs saved in {}'.format(i+1, save_path))
def get_result(net, dl, DEVICE, net_name = ''):
save_bench = '../../data/benchmark/'
save_path = os.path.join('../../SmoothRes/', net_name)
labels = []
net.eval()
mean = torch.tensor(np.array([0.485, 0.456, 0.406]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
std = torch.tensor(np.array([0.229, 0.224, 0.225]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
mean = mean.to(DEVICE)
std = std.to(DEVICE)
for i, (batch_img, batch_label) in enumerate(dl):
if i> 5:
break
for j in range(int(batch_img.size(0))):
img = batch_img[j]
label = batch_label[j]
img = img.to(DEVICE)
label = label.to(DEVICE)
#print(img.size())
grad_map = GetSmoothGrad(net, img, label, DEVICE, stdev_spread = 0.05)
#print(grad_map.shape)
clip_and_save_single_img(grad_map, i * batch_img.size(0) + j, save_dir=save_path)
#print(grad.shape)
#simg = (img + mean) * std
simg = img * std + mean
#print('rb', simg.max(), simg.min())
simg = torch.clamp(simg, 0, 1)
#print('r', simg.max(), simg.min())
simg = simg.detach().cpu().numpy() * 255.0
#print(simg.shape)
#print(simg.shape)
simg = simg[0]
simg = np.transpose(simg, (1, 2, 0)).astype(np.uint8)
#print('r', simg.max(), simg.min())
#imwrite(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg)
io.imsave(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg)
print(i * batch_img.size(0) + j)
#grad = imread(os.path.join(save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j)))
grad = io.imread(os.path.join(save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j)),
as_gray = False)
# if gray
# grad = grad[:, :, np.newaxis]
# grad = np.repeat(grad, 3, axis = 2)
gray_grad = np.mean(grad, axis = -1, keepdims = True)
gray_grad = gray_grad.astype(np.uint8)
gray_grad = np.repeat(gray_grad, 3, axis = 2)
pair_img = np.concatenate((gray_grad, grad, simg), axis=1)
#imwrite(os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img)
io.imsave(os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img)
labels.append(batch_label.numpy())
labels = np.array(labels)
np.savetxt(os.path.join(save_bench, 'label.txt'), labels.reshape(-1))
#MakeVisual(save_bench, save_path)
def l1_for_without_smooth(net, dl, DEVICE):
net.eval()
net.to(DEVICE)
#criterion = nn.CrossEntropyLoss().to(DEVICE)
l1s = []
for i, (batch_img, batch_label) in enumerate(dl):
#if i> 5:
# break
for j in range(int(batch_img.size(0))):
img = batch_img[j]
label = batch_label[j]
img = img.to(DEVICE)
label = label.to(DEVICE)
#print(img.size())
grad_map = GetSmoothGrad(net, img, label, DEVICE, stdev_spread = 0.05, num=32)
#print(grad_maps.size(), batch_img.size())
l1s.append(torch.norm(grad_map, 1).item())
l1s = np.array(l1s)
print("Min: {:.4f} -- Max: {:.2f} -- Mean:{:.2f}".format(l1s.min(), l1s.max(), l1s.mean()))
def l1_for_with_smooth(net, dl, DEVICE):
net.eval()
net.to(DEVICE)
criterion = nn.CrossEntropyLoss().to(DEVICE)
l1s = []
for i, (batch_img, batch_label) in enumerate(dl):
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
batch_img.requires_grad = True
pred = net(batch_img)
loss = criterion(pred, batch_label)
grad_maps = torch.autograd.grad(loss, batch_img, create_graph=True, only_inputs=False)[0]
#print(grad_maps.size(), batch_img.size())
l1s.append(torch.norm(grad_maps, 1).item())
l1s = np.array(l1s)
print("Min: {:.2f} -- Max: {:.2f} -- Mean:{:.2f}".format(l1s.min(), l1s.max(), l1s.mean()))
def MakeVisual(data_dir = './benchmark/CIFAR', result_dir = './SmoothRes/CIFAR/'):
save_p = result_dir.split('/')[:-1]
save_p = os.path.join(*save_p)
print(save_p)
net_name = result_dir.split('/')[-1]
labels = np.loadtxt(os.path.join(data_dir, 'label.txt'))
imgs = get_a_set(labels, result_dir, data_dir, times = 3)
print(os.path.join(save_p, '{}.png'.format(net_name)))
imwrite(os.path.join(save_p, '{}.png'.format(net_name)), imgs)
def test_model(net, dl):
acc1s = []
acc3s = []
net.eval()
for i, (batch_img, batch_label) in enumerate(dl):
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
pred = net(batch_img)
acc1, acc3 = torch_accuracy(pred, batch_label)
acc1s.append(acc1)
acc3s.append(acc3)
acc1s = np.array(acc1s)
acc3s = np.array(acc3s)
print('accuracy top-1: {} top-3: {}'.format(acc1s.mean(), acc3s.mean()))
def test_model_genera(net, dl, dl_teacher):
acc1s = []
acc3s = []
aacc1s = []
aacc3s = []
net.eval()
dl_teacher = enumerate(dl_teacher)
with torch.no_grad():
for i, (batch_img, batch_label) in enumerate(dl):
j, (teacher_img, _) = next(dl_teacher)
#print(torch.sum(torch.eq(_, batch_label).float()))
teacher_img = teacher_img.to(DEVICE)
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
pred = net(batch_img)
teacher = net(teacher_img)
acc1, acc3 = torch_genera_accuracy(pred, batch_label, teacher)
aacc1, aacc3 = torch_accuracy(pred, batch_label)
tacc1, tacc3 = torch_accuracy(teacher, batch_label)
acc1 = (acc1 / tacc1) * 100
acc3 = (acc3 / tacc3) * 100
acc1s.append(acc1)
acc3s.append(acc3)
aacc1s.append(aacc1)
aacc3s.append(aacc3)
acc1s = np.array(acc1s)
acc3s = np.array(acc3s)
aacc1s = np.array(aacc1s)
aacc3s = np.array(aacc3s)
print('accuracy top-1: {:.2f} top-3: {:.2f}'.format(acc1s.mean(), acc3s.mean()))
print('Absolute accuracy top-1: {:.2f} top-3: {:.2f}'.format(aacc1s.mean(), aacc3s.mean()))
def torch_accuracy(output, target, topk = (1, 3)):
'''
param output, target: should be torch Variable
'''
#assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
#assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
#print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float().sum(0, keepdim = True)
ans.append(is_correct_i.mul_(100.0 / batch_size))
return ans
def torch_genera_accuracy(output, target, teacher, topk = (1, 3)):
'''
param output, target: should be torch Variable
'''
#assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
#assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
#print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
_, teacher_pred = teacher.topk(topn, 1, True, True)
teacher_pred = teacher_pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
is_teacher_correct = teacher_pred.eq(target.view(1, -1).expand_as(teacher_pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float()# .sum(0, keepdim = True)
is_teacher_correct_i = is_teacher_correct[:i].view(-1).float()
genera_correct_i = is_correct_i * is_teacher_correct_i
genera_correct_i = genera_correct_i.sum(0, keepdim = True)
#ans.append(is_correct_i.mul_(100.0 / batch_size))
ans.append(genera_correct_i.mul_(100.0 / batch_size))
return ans
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resume', type = str,
default='../exps/tradeoff.eps8/checkpoint.pth.tar')
parser.add_argument('-d', type = int, default=1)
parser.add_argument('-p', type = float, default=None, help = 'saturation level; 2 unchanged')
parser.add_argument('-b', type=float, default=None, help='brightness level; 1 unchanged')
parser.add_argument('-e', action = 'store_true', default=False, help='Edges?')
parser.add_argument('-k', type=int, default=None, help='patch num')
args = parser.parse_args()
net_name = args.resume.split('/')[-2]
print(net_name)
path = os.path.join('../../SmoothRes', net_name)
if not os.path.exists(path):
os.mkdir(path)
net = models.resnet18(pretrained=False)
net.fc = nn.Linear(512, 257)
net.load_state_dict(torch.load(args.resume)['state_dict'])
DEVICE = torch.device('cuda:{}'.format(args.d))
net.to(DEVICE)
dl_teacher = create_test_dataset(32)
if args.p is None and args.b is None:
dl = create_test_dataset(32)
if args.b is not None and args.p is None:
dl = create_brighness_test_dataset(batch_size = 32,
root = './', bright_level = args.b)
if args.p is not None and args.b is None:
dl = create_saturation_test_dataset(32, root = './', saturation_level = args.p)
if args.k is not None:
print('Creating path data')
dl = create_patch_test_dataset(32, './', args.k)
# style
#dl = create_style_test_dataset(32)
#xz_test(dl, 1,net, DEVICE)
#test_model(net, dl)
test_model_genera(net, dl, dl_teacher)
#l1_for_without_smooth(net, dl, DEVICE)
#l1_for_with_smooth(net, dl, DEVICE)
#get_result(net, dl, DEVICE, net_name)
| [
"1600012888@pku.edu.cn"
] | 1600012888@pku.edu.cn |
9af96fdcefff455cf4f7e1056a1b7e30e11e74a7 | 9bf6d807f9ba9c5122e3bb6872203e37875efbd3 | /BDA/Projet/vocabulary.py | f5be7fb50365e16e16dc55d9daf675e176c11ed3 | [] | no_license | EmmanuelDoumard/BDA | 99ca3cf879c16a90b0fe3a3f970098ebdc85410d | 4f7289138fb732d1c675c42f87adcac58f240082 | refs/heads/master | 2021-09-07T13:24:24.577830 | 2018-02-23T14:09:09 | 2018-02-23T14:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,899 | py | #!/usr/bin/python
""" This file contains the defintion of the classes needed to store a vocabulary
- only numerical attributes are considered in this version (values in the csv file casted to floats)
"""
import sys
import os
class PartitionElement:
def __init__(self,lab,a,i,ia=1):
self._label=lab
self._attribute=a
self._numAttribute = ia
self._position = i
def getAttributeNumber(self):
return self._numAttribute
def getPosition(self):
return self._position
def getAttribute(self):
return self._attribute
def getLabel(self):
return self._label
def __repr__(self):
return self.__str__()
class PartitionElementCat(PartitionElement):
def __init__(self,lab,cat,a,i,ia=1):
"""Stores the definition of a partition element"""
PartitionElement.__init__(self,lab,a,i,ia)
self._cats = dict()
cs = cat.split(';')
for c in cs:
d = c.split(':')
if len(d) == 2:
self._cats[d[0]] = float(d[1])
def mu(self,v):
"""Returns the satisfaction degree of v wrt. the partition element """
ret = 0.0
if '*' in self._cats:
ret = 1.0
else:
if v in self._cats:
ret = self._cats[v]
#print str(v)+" satisfies "+str(self)+" = "+str(ret)
return ret
def __str__(self):
""" Overloading of the string representation of a partition element"""
txt= "\t\tVocabulary element: "+self._label+" ("
for i in self._cats:
txt+= ''+i+'=>'+str(self._cats[i])+', '
return txt[:-2]+')'
class PartitionElementNum(PartitionElement):
def __init__(self,lab,s1,c1,c2,s2,a,i,ia=1):
"""Stores the definition of a partition element"""
PartitionElement.__init__(self,lab,a,i,ia)
self._minSupport=s1
self._minCore=c1
self._maxCore=c2
self._maxSupport=s2
def getAttributeNumber(self):
return self._numAttribute
def getPosition(self):
return self._position
def getAttribute(self):
return self._attribute
def getLabel(self):
return self._label
def mu(self,v):
"""Returns the satisfaction degree of v wrt. the partition element """
mu=0.0
if(v is not None) and (v != 'NA'):
v=float(v)
if v >= self._maxSupport or v <= self._minSupport:
mu= 0.0
elif v < self._minCore:
mu = 1 - ((self._minCore - v) / (self._minCore - self._minSupport))
elif v > self._maxCore:
mu = (self._maxSupport - v) / (self._maxSupport - self._maxCore)
else:
mu = 1.0
# print str(v)+" satisfies "+str(self)+" = "+str(mu)
return mu
def __str__(self):
""" Overloading of the string representation of a partition element"""
return "\t\tVocabulary element: "+self._label+" - Support ]"+str(self._minSupport)+","+str(self._maxSupport)+"[ - Core ["+str(self._minCore)+","+str(self._maxCore)+"]"
def __repr__(self):
return self.__str__()
class Partition:
def __init__(self,an,i,j):
""" Store a partition discretizing an attribute an"""
self._attribute = an
self._elements=dict()
self._ipe = 1
self._numP = i
self._attNumb = j
def getAttributeNumber(self):
return self._attNumb
def getAttribute(self):
return self._attribute
def getElements(self):
return self._elements
def getElement(self, e):
"""e est la clef dans le dict element c'est une string correpondant a la position"""
return self._elements[e]
def addElement(self,an,l,ms,mc,mac,mas):
""" Add an element to a partition described by an attribute identifier (integer), a linguistic label and the bound of the trapezium"""
if an not in self._elements:
self._elements[an] = PartitionElementNum(l,float(ms),float(mc),float(mac),float(mas),self._attribute,self._ipe,self._numP)
self._ipe+=1
else:
print("Error : a partition element already exists with the id "+str(an)+ " for partition "+self._attribute)
def addElementCat(self,an,l,cats):
""" Add an element to a partition described by an attribute identifier (integer), a linguistic label and the categorical values"""
if an not in self._elements:
self._elements[an] = PartitionElementCat(l,cats,self._attribute,self._ipe,self._numP)
self._ipe+=1
else:
print("Error : a partition element already exists with the id "+str(an)+ " for partition "+self._attribute)
def __str__(self):
""" Overloading of the string representation of an attribute partition"""
s="\tPartition of attribute: "+str(self._attribute)+" ["+str(self.getAttributeNumber())+"]\n"
for pek in self._elements:
s+=str(self._elements[pek])+"\n"
return s
def __repr__(self):
return self.__str__()
class Vocabulary:
""" This class stores and manipulates a fuzzy-set-based vocabulary"""
def __init__(self,vocF):
""" This class stores a vocabulary defined in the csv file
format of the csv file : attnumber,elementNb,label,minSupport,minCore,maxCore,maxSupport"""
self._vocCSVFile=vocF
self._partitions = dict()
self.loadVocabulary()
def mapping(self,att):
return {
'DayOfWeek': 3,
'DepTime': 4,
'ArrTime': 6,
'AirTime': 13,
'ArrDelay': 14,
'DepDelay': 15,
'Distance': 18,
'Month': 1,
'DayOfMonth': 2,
'TaxiIn': 19,
'TaxiOut': 20,
'CarrierDelay': 24,
'WeatherDelay': 25,
'SecurityDelay': 27,
'LateAircraftDelay': 28,
'Origin': 16,
'Dest':17
}[att]
def getPartitions(self):
return self._partitions
def getPartition(self, i):
"""i est une clef dans le dictionnaire partitions et correspond donc a un nom d'attribut """
return self._partitions[i]
def loadVocabulary(self):
"""Load and initiates the vocabulary, its partitions and elements"""
f = None
try:
f = open(self._vocCSVFile, 'r')
except:
print("File "+self._vocCSVFile+" not found or not readable")
j=1
if f is not None:
for line in f:
line=line.strip()
if line != "" and line[:1] != "#":
elDtls=line.split(',')
if len(elDtls) != 7 and len(elDtls) != 4:
print("Format error in the definition of a partition element "+line)
else:
if elDtls[0] not in self._partitions:
self._partitions[elDtls[0]] = Partition(elDtls[0],j,self.mapping(elDtls[0]))
j+=1
if len(elDtls) == 7:
self._partitions[elDtls[0]].addElement(elDtls[1],elDtls[2],elDtls[3],elDtls[4],elDtls[5],elDtls[6])
else:
self._partitions[elDtls[0]].addElementCat(elDtls[1],elDtls[2],elDtls[3])
def __str__(self):
""" Overloading of the string representation of a vocabulary"""
s="Vocabulary: "+self._vocCSVFile+"\n"
for pk in self._partitions:
s+=str(self._partitions[pk])+"\n"
return s
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python vocabulary.py <vocfile.csv>")
else:
if os.path.isfile(sys.argv[1]):
v= Vocabulary(sys.argv[1])
paDow = v.getPartition('DayOfWeek')
for posE in paDow.getElements():
pe = paDow.getElement(posE)
print(pe.getLabel()," mu(3)", pe.mu('3'))
| [
"edoumard@lochan1.enssat.fr"
] | edoumard@lochan1.enssat.fr |
c5d9cf121b69761030d0050d73f56d251f3b3b8d | a57eb85856cc93a389b815276765a06019d6f8d4 | /manage.py | a187e1a5e09bc71ad51ec80483e4c11d9cced0d9 | [
"MIT"
] | permissive | astrofrog/multistatus | 205f6ad9a06995bc4b418f6d0de222c4dacfcb79 | f63042c9e85cb4e58e3768a65f4684be07e432d1 | refs/heads/master | 2021-01-10T21:05:40.370555 | 2014-12-09T10:24:48 | 2014-12-09T10:24:48 | 22,805,918 | 0 | 1 | null | 2014-12-09T10:23:13 | 2014-08-10T09:39:38 | Python | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python3.4
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multistatus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
6a75db16fa3b23eccf97f4b2c9be2498879fde5e | 08166c6f67763ce121c3d0b5ddc341972547a0c0 | /factorial_number.py | 0b402f7f31aa22b2916a407113b569dfcd5e2a66 | [] | no_license | absognety/ML-Algorithms | e082b2fd92cfe90f9f0559297884647b86608d94 | 2f11d5297b81f92b8398d501420aec6648eea5ab | refs/heads/master | 2020-03-26T20:13:24.838718 | 2019-07-21T10:09:23 | 2019-07-21T10:09:23 | 145,311,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | def factorial(n):
if (n==0):
return (1)
else:
return (n*factorial(n-1))
| [
"cvikas10@ms.ds.uhc.com"
] | cvikas10@ms.ds.uhc.com |
86e9a4365750c680f76d042e03c4009c7370d109 | d6c0153d6bba38fae09ae5899562a3a900caee5d | /newton_raphson.py | 377a6beb841ab979cc26b33635e227ad8001d5d8 | [
"MIT"
] | permissive | Zaier9/Skills | b048df52de5d03a437c45d9fdba2b519a516f85a | 98962232e47c7d635ace91fe92f83346d719a73c | refs/heads/master | 2023-08-12T18:18:43.941128 | 2021-09-22T18:59:36 | 2021-09-22T18:59:36 | 406,978,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | #Implementando el método Newton en Python
#El método de Newton-Raphson (tambien conocido como metodo de Newton), es una forma de encontrar rápidamente una aproximación para la raíz de una funcion real
from __future__ import annotations
from decimal import Decimal
from math import *
from sympy import diff
def newton_raphson(
func: str, a: float | Decimal, precision: float = 10 ** -10
) -> float:
"""Encuentra la raiz desde el punto 'a' en adelante mediante el método de Newton-Raphson
>>> newton_raphson("sin(x)", 2)
3.1415926536808043
>>> newton_raphson("x**2 - 5*x +2", 0.4)
0.4384471871911695
>>> newton_raphson("x**2 - 5", 0.1)
2.23606797749979
>>> newton_raphson("log(x)- 1", 2)
2.718281828458938
"""
x = a
while True:
x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func)))))
# Este numero dicta la precision de la respuesta
if abs(eval(func)) < precision:
return float(x)
#Vamos a ejecutar
if __name__ == '__main__':
#Encuentra la raíz de la función trigonométrica
#Encuentra el valor de pi
print(f"La raíz de sin(x) = 0 es {newton_raphson('sin(x)', 2)}")
#Encuentra la raiz del polinomio
print(f"La raíz de x**2 - 5*x + 2 = 0 es {newton_raphson('x**2 - 5*x + 2', 0.4)}")
#Encuentra la raiz cuadrada de 5
print(f"La raíz de log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
#Raices exponenciales
print(f"La raiz de exp(x) - 1 = 0 es {newton_raphson('exp(x) - 1', 0)}") | [
"veraalber989@gmail.com"
] | veraalber989@gmail.com |
c5430a40f88a6e8928031c354a738696a86a4256 | 35c73d504e63702a5c198c8bf5ec931157c32933 | /linux/general/backups/check-tar.py | e912c3ee5a87a8798329246a803a8e290023a0e9 | [] | no_license | GunioRobot/bangadmin | ad3a04bbd5e4d7b9c3aa86a1c6200c061342a595 | d3c293e92dfd4ee85cc8f08460999b9660de7095 | refs/heads/master | 2020-04-13T23:01:51.025843 | 2011-11-15T15:17:29 | 2011-11-15T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | #!/usr/bin/python
# * Copyright (c) 2010, Sudheera Satyanarayana - http://techchorus.net
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without modification,
# * are permitted provided that the following conditions are met:
# *
# * * Redistributions of source code must retain the above copyright notice,
# * this list of conditions and the following disclaimer.
# *
# * * Redistributions in binary form must reproduce the above copyright notice,
# * this list of conditions and the following disclaimer in the documentation
# * and/or other materials provided with the distribution.
# *
# * * Neither the names of Sudheera Satyanarayana nor the names of the project
# * contributors may be used to endorse or promote products derived from this
# * software without specific prior written permission.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *
# /
# Create a tar file with date and time prepended
# Useful for backups
import tarfile
import datetime
tar = tarfile.open("/home/someuser/some/safe/location/%s-sample.tar"%datetime.datetime.now().strftime('%Y-%m-%d'), "w")
for name in ["/home/someuser/some/dir/to/backuo"]:
tar.add(name)
tar.close()
| [
"sudheer.s@sudheer.net"
] | sudheer.s@sudheer.net |
b62337d44f8079e8c87e79cf69dfd5d97f54906d | a089d34db7950b86880317fa3a3c4cabb552df03 | /blog/urls.py | 45d930a9adb85f2dff7a8243b8b06364e3b9f549 | [] | no_license | amitbuyo2020/Blogapp | 27721784ff87a390ab76475b771c48176a620407 | b64aa01d54f53d76b694e4ecc138fbcc48a3934f | refs/heads/master | 2022-04-21T02:18:33.077115 | 2020-04-22T04:49:14 | 2020-04-22T04:49:14 | 257,794,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from django.urls import path
from . import views
from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, UserPostListView
urlpatterns = [
path('', PostListView.as_view(), name='blog_home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog_about'),
path('contribute/', views.contribute, name='contribute'),
path('code/', views.code_of_conduct, name='code_of_conduct'),
path('team/', views.swifter_team, name='swifter_team'),
path('bugs/', views.report_bugs, name='report_bugs'),
path('security/', views.security_issue, name='security'),
path('join/', views.join_group, name='join_group'),
path('latest/', views.latest_post, name='latest_post'),
path('announcements/', views.announcements, name='announcements'),
path('calendar/', views.calendar, name='calendar'),
path('search/', views.searchBar, name='search')
]
| [
"58477464+amitbuyo2020@users.noreply.github.com"
] | 58477464+amitbuyo2020@users.noreply.github.com |
90449a0bc33f230b570751c77a0623290d25e6c9 | cf801d45f5330e3b87e2efa3b2d50253832962af | /baza/migrations/0006_auto_20180727_2115.py | bd8767979923ace63259183b46a0385fd49d23fd | [] | no_license | bbbrtk/biegnocny | 7daba92d5a5c765731eabf1256c7971ce37ba198 | 9f035902680f3aa30a23aad0d5bf0e086171b5a6 | refs/heads/master | 2020-03-26T12:46:47.948273 | 2019-04-25T20:01:40 | 2019-04-25T20:01:40 | 144,907,747 | 0 | 0 | null | 2019-04-25T19:42:20 | 2018-08-15T22:02:12 | Python | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.0.7 on 2018-07-27 19:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('baza', '0005_auto_20180727_1336'),
]
operations = [
migrations.AlterField(
model_name='punkt',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| [
"sobkowiak.bart@gmail.com"
] | sobkowiak.bart@gmail.com |
10d541866288a328b07bc1a9602e1afcbb87583f | 994461efa08e71403b2a1c0598391fddf3b44b87 | /june-leetcoding-challenge/d5-random-pick-with-weight.py | 65d47a61d6766e106ff6b66178910de5a81e3489 | [] | no_license | pkdism/leetcode | 26f16f9211ddfdef8e0fe74c9268e6a48da64717 | 589652ae727331d1f962d22a020fc6ae09bfcea4 | refs/heads/master | 2020-12-11T15:48:01.116731 | 2020-08-16T18:31:03 | 2020-08-16T18:31:03 | 233,889,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | """
Given an array w of positive integers,
where w[i] describes the weight of index i,
write a function pickIndex which randomly picks an index in proportion to its weight.
"""
import random
class Solution:
def __init__(self, w: List[int]):
self.w = w
self.n = len(self.w)
self.arr = []
self.curr = 0
for x in w:
self.curr += x
self.arr.append(self.curr)
def pickIndex(self) -> int:
# print(self.arr)
n = len(self.arr)
r = random.randrange(1, self.arr[-1] + 1)
l = 0
h = n-1
while l < h:
m = (l+h)//2
# if self.arr[m] == r:
# return m
if self.arr[m] < r:
l = m + 1
else:
h = m
return l
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex() | [
"pawan.dwivedi94@gmail.com"
] | pawan.dwivedi94@gmail.com |
488eedf2868298347e8fd761a86a7501687b5a22 | cc6e8b20a64940f2ad83be6a03134e8b105514e7 | /webviz/viz/migrations/0001_initial.py | 71fcb033e7e32232e310b0dfc022cfe73f08c521 | [
"MIT"
] | permissive | JagritiG/django-webviz | 50654c4fe76b1653f64e404c45d674d5d179e236 | 213489bb3a70019ca9cff5d127fd6d0c06bc61d6 | refs/heads/main | 2023-04-27T01:21:04.195996 | 2021-05-16T22:30:04 | 2021-05-16T22:30:04 | 353,126,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.1.7 on 2021-03-30 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Csv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('csv', models.FileField(upload_to='media/csvs/')),
],
),
]
| [
"jagritigoswami84@gmail.com"
] | jagritigoswami84@gmail.com |
d21b4633b406702e7ddc07b8954d02ab2b62f526 | 6828a9203c38d5079dfcefc8c3496849bd59f701 | /Lesson_01/2.py | 63c6eca1cf20476153250b2c9edb138ae0b781a1 | [] | no_license | vbelousPy/py_base | 340a1c3373bb764956c72d2cc9f2731086add732 | b8519af1815e79c775b36127f47bef5ef776037c | refs/heads/master | 2020-03-27T15:56:29.446374 | 2018-10-03T17:31:26 | 2018-10-03T17:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | x = input("input x: ")
y = input("input y: ")
print("x = ", x, "y = ", y)
z = x
x = y
y = z
print("x = ", x, "y = ", y)
| [
"vbbelous@gmail.com"
] | vbbelous@gmail.com |
9b2329c04f7996f67222cf50b79572e1b63ec22f | a91077de96c7d552f8c8a06732e5f028d4082e6d | /home/urls.py | 8139834fc119745c12a71af76464e8e9be09735b | [] | no_license | skbelalsaheb/DjangoWeb | 957d843a124d35fd145cccbc4bf44a5e968fad49 | f95a96a8d0f0a42796db59f93f795f7ae07ed0ea | refs/heads/master | 2023-01-22T19:15:47.486827 | 2020-12-05T06:21:33 | 2020-12-05T06:21:33 | 318,578,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
from home import views
urlpatterns = [
path('', views.Index.as_view()),
]
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"skbelalsaheb@gmail.com"
] | skbelalsaheb@gmail.com |
42f1185f0886804da485d15b1b19448422bc7df2 | 106953e82953c6c855f1b81ed4144cb6b23bd43d | /Node.py | cc713c9b51bba308a8b23093bc51905ac146d8fe | [] | no_license | Maciek-R/ALHE-Pizza | fe37b6a03648a981065e1aa8808c98851afe19a5 | 5e160ef6965d03c2391f5d65d7e3cb80b73617d6 | refs/heads/master | 2021-03-22T04:17:16.652407 | 2017-11-04T19:32:31 | 2017-11-04T19:32:31 | 108,561,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | from math import sqrt
class Node:
def __init__(self, x, y, ):
self.x = x
self.y = y
def getPosition(self):
return self.x, self.y
def getDistanceTo(self, node):
x,y = node.getPosition()
return sqrt((x - self.x)**2 + (y-self.y)**2)
def getLabel(self):
#return "{},{}".format(str(self.x), str(self.y))
return (self.x, self.y)
def __eq__(self, other):
if isinstance(other, Node):
return self.x == other.x and self.y == other.y
else:
return False
def __hash__(self):
return hash(self.x) ^ hash(self.y)
| [
"m.ruszczyk.1@stud.elka.pw.edu.pl"
] | m.ruszczyk.1@stud.elka.pw.edu.pl |
f197241de30622889b167ecb7dc068406820dbc8 | 6061ebee9fbce8eb5b48ed7ccd2aecb196156598 | /modulo07-funcoes/exercicios/exercicio04.py | 5ebbfcd3edf545e7ffdc636929ca0276708ef06c | [] | no_license | DarioCampagnaCoutinho/logica-programacao-python | fdc64871849bea5f5bbf2c342db5fda15778110b | b494bb6ef226c89f4bcfc66f964987046aba692d | refs/heads/master | 2023-02-24T11:45:29.551278 | 2021-01-26T22:02:49 | 2021-01-26T22:02:49 | 271,899,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | def aumento(numero, valor):
resultado = numero * valor / 100
return resultado + numero
resultado = aumento(100, 50)
print(resultado) | [
"campagnacoutinho67@gmail.com"
] | campagnacoutinho67@gmail.com |
b936c098695eb03ec4c9abb82e78537b33edc5f3 | b7a2a80843fa5141ffb9c7b4439f1d2ac713af30 | /UT2_Morsen.py | f0f71ad6765a17297467d402ad1566da64d52c4e | [] | no_license | wunnox/python_grundlagen | df1bc2b9b1b561bd6733ccc25305e799a48e714e | fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0 | refs/heads/master | 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 | Python | UTF-8 | Python | false | false | 450 | py | #!/usr/local/bin/python3
####################################################
#
# Uebung UT2_Morsen.py:
# Erstellen Sie ein Programm, welches anhand einer
# Morsetabelle einen Text in Morsecode ausgibt.
# Verwenden Sie hierzu das Modul UT2_Morsecodes.py
#
####################################################
import UT2_Morsecodes as mc
w = input('-> ') # Wort einlesen
w = w.replace(' ', '_')
for l in w:
print(mc.morse(l), end=' ')
print()
| [
"peter.christen@cssgmbh.ch"
] | peter.christen@cssgmbh.ch |
1555889b5560a7cb70066a87391d487773ca298f | 7c16a522a7006c06ee0dc29ec0c62049b9281be3 | /tests/context.py | 6906139686a9e53323d3892d380d773464ba715e | [
"MIT"
] | permissive | maryco/petoi-agent | d252f99c63d7d96c1fb243d8617ab5197066f22d | 3163ee3aacba4cd0a799aec22c972a178b3cc29e | refs/heads/main | 2023-06-04T11:42:16.474033 | 2021-06-25T12:30:10 | 2021-06-25T12:30:10 | 380,077,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | """The context module for run the test
Notes:
- Run the test in the parent directory of "src".
Example: Run all tests in the specified file.
% pipenv run python -m unittest -v tests.test_serial_agent
% pipenv run python -m unittest -v tests.test_serial_agent.TestSerialAgent
Example: Run specified test case.
% pipenv run python -m unittest -v tests.test_serial_agent.TestSerialAgent.test_create_incetanse
"""
import sys
import os
from datetime import date
import logging
import subprocess
import re
sys.path.insert(0, os.path.abspath('./src'))
TESTING_LOG_LEVEL = logging.DEBUG
def init_test_logger():
filepath = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(
filename=f'{filepath}/logs/{(date.today()).strftime("%Y%m%d")}.log',
format='%(asctime)s-[%(name)s][%(levelname)s][%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=TESTING_LOG_LEVEL
)
def has_active_devenv():
cp = subprocess.run(['which', 'docker'])
if cp.returncode != 0:
return False
cp = subprocess.run(['docker', 'ps'], stdout=subprocess.PIPE)
if cp.returncode != 0:
return False
active_container_cnt = 0
for line in cp.stdout.decode('utf-8').split('\n'):
if re.search('app\-django|app\-node', line) is not None:
active_container_cnt += 1
return active_container_cnt == 2 | [
""
] | |
093a11ff4eaea5d47dc8731c3face1e6656df75c | 2889ce81f2e0e2d6d22cbd8128a05c9c17addaef | /cspython/ServerLib.py | 614ff7b700e0c7344395a3a15e11e44dfcd15495 | [] | no_license | Roso29/CSPython | fd68465f1fadfb5a0f49f992c92e5be2a3dc6a5b | 635ca723a6665c4557b84e1819a8cfe0f57c0e47 | refs/heads/main | 2023-08-18T06:56:53.448693 | 2021-10-04T14:14:46 | 2021-10-04T14:14:46 | 410,812,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | import socket
'''
ServerLib
- Contains class Server
Sets up sockets
Sends and receives plain text over sockets
Only used to interact with the sockets - performs no verification/validation/logic
'''
class Server:
def __init__(self, port):
self.port = port
self.bytesToRecv = 1024
self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class Host(Server):
def __init__(self, port):
super().__init__(port)
self.SetUpSocket()
self.hasActiveClient = False
def SetUpSocket(self):
numberOfClients = 1
hostingSocket = socket.gethostbyname(socket.gethostname())
self.serverSocket.bind((hostingSocket, self.port))
self.serverSocket.listen(numberOfClients)
print("Waiting for incoming connections...")
self.clientSocket, self.clientAddress = self.serverSocket.accept()
print(f"Connection from {self.clientAddress} established...")
def SendMessage(self, messageString):
self.clientSocket.sendall(bytes(messageString,"utf-8"))
def ReceiveMessage(self):
receivedMessageBytes = self.clientSocket.recv(self.bytesToRecv)
receivedMessageString = receivedMessageBytes.decode("utf-8")
return receivedMessageString
def TeardownSocket(self):
self.clientSocket.close()
class Client(Server):
def __init__(self, port, hostAddress):
super().__init__(port)
self.hostAddress = hostAddress
self.SetUpSocket(port, hostAddress)
def SetUpSocket(self, port, hostAddress):
self.serverSocket.connect((hostAddress, port))
def SendMessage(self, messageString):
self.serverSocket.send(bytes(messageString,"utf-8"))
def ReceiveMessage(self):
receivedMessageBytes = self.serverSocket.recv(self.bytesToRecv)
receivedMessageString = receivedMessageBytes.decode("utf-8")
return receivedMessageString
'''
Use Server as a parent class
has receive and send functions and a lot of set up
client and host classes can inherit from it and add their specific server functions.
''' | [
"thrwawy81@gmail.com"
] | thrwawy81@gmail.com |
1c3ea9c37220f04f5ec059b8d964947f7d2508f6 | c31e69b763e1b52d3cefa4f5a49432ae966f22d0 | /day29/mySpider/mySpider/settings.py | 4e3e3faf4c4eeff43f7ccf6f99bfd3381a22506f | [] | no_license | lvah/201901python | cbda174a3c97bc5a2f732c8e16fc7cf8451522d2 | 7bffe04a846f2df6344141f576820730a7bbfa6a | refs/heads/master | 2022-12-13T09:49:29.631719 | 2019-04-06T09:48:33 | 2019-04-06T09:48:33 | 165,477,671 | 3 | 0 | null | 2022-12-08T04:57:01 | 2019-01-13T07:23:44 | HTML | UTF-8 | Python | false | false | 3,413 | py | # -*- coding: utf-8 -*-
# Scrapy settings for mySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mySpider'
SPIDER_MODULES = ['mySpider.spiders']
NEWSPIDER_MODULE = 'mySpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mySpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
MOOCFilename = "mooc.txt"
ITEM_PIPELINES = {
# 管道的位置: 优先级, 0~1000, 数字越小, 优先级越高;
'mySpider.pipelines.MyspiderPipeline': 300,
'mySpider.pipelines.CsvPipeline': 400,
'mySpider.pipelines.MysqlPipeline': 500,
'mySpider.pipelines.ImagePipeline': 200,
}
IMAGES_STORE = '/root/PycharmProjects/day29/mySpider/img'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"976131979@qq.com"
] | 976131979@qq.com |
7f0ebd1d128ac4924596d557e8d67a4d45622c13 | f11462f71080406e19616dc8f8a38cba3293ee40 | /notebooks/Veins/make_readme.py | 802117a7fb3662057ac89cb76eb57697a698a263 | [
"Apache-2.0"
] | permissive | SalishSeaCast/analysis-vicky | 95beaaf48d5be89eba0dc4bff6027c2639f43b80 | 42ae975652c17c2f990b0997c3c21c382c2d989e | refs/heads/master | 2020-12-23T09:25:22.433176 | 2020-05-19T23:56:09 | 2020-05-19T23:56:09 | 237,109,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | #Copyright 2013-2016 The Salish Sea MEOPAR contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Salish Sea NEMO Jupyter Notebook collection README generator
"""
import datetime
import glob
import json
import os
import re
NBVIEWER = 'https://nbviewer.jupyter.org/urls'
REPO = 'bitbucket.org/salishsea/analysis-vicky/raw/tip'
REPO_DIR = 'notebooks/Veins/'
TITLE_PATTERN = re.compile('#{1,6} ?')
def main():
url = os.path.join(NBVIEWER, REPO, REPO_DIR)
readme = """\
The Jupyter Notebooks in this directory are for development and testing of
the results figures generation modules of the Salish Sea model nowcast system.
The links below are to static renderings of the notebooks via
[nbviewer.jupyter.org](https://nbviewer.jupyter.org/).
Descriptions under the links below are from the first cell of the notebooks
(if that cell contains Markdown or raw text).
"""
for fn in glob.glob('*.ipynb'):
readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url)
readme += notebook_description(fn)
license = """
##License
These notebooks and files are copyright 2013-{this_year}
by the Salish Sea MEOPAR Project Contributors
and The University of British Columbia.
They are licensed under the Apache License, Version 2.0.
https://www.apache.org/licenses/LICENSE-2.0
Please see the LICENSE file for details of the license.
""".format(this_year=datetime.date.today().year)
with open('README.md', 'wt') as f:
f.writelines(readme)
f.writelines(license)
def notebook_description(fn):
description = ''
with open(fn, 'rt') as notebook:
contents = json.load(notebook)
try:
first_cell = contents['worksheets'][0]['cells'][0]
except KeyError:
first_cell = contents['cells'][0]
first_cell_type = first_cell['cell_type']
if first_cell_type not in 'markdown raw'.split():
return description
desc_lines = first_cell['source']
for line in desc_lines:
suffix = ''
if TITLE_PATTERN.match(line):
line = TITLE_PATTERN.sub('**', line)
suffix = '**'
if line.endswith('\n'):
description += (
' {line}{suffix} \n'
.format(line=line[:-1], suffix=suffix))
else:
description += (
' {line}{suffix} '.format(line=line, suffix=suffix))
description += '\n' * 2
return description
if __name__ == '__main__':
main()
| [
"vdo@eoas.ubc.ca"
] | vdo@eoas.ubc.ca |
530a8349c1d993059e04c0c67889935071e2ef34 | 6a2a4f97009e31e53340f1b4408e775f3051e498 | /Iniciante/p2581.py | 8788b781d8d7a15867ecc1b352f711f048beab64 | [] | no_license | rafacasa/OnlineJudgePythonCodes | 34c31f325cccb325f074492b40591ad880175816 | 030c18f9020898fdc4f672f9cc17723236e1271d | refs/heads/master | 2023-07-15T12:09:45.534873 | 2023-06-27T00:24:03 | 2023-06-27T00:24:03 | 250,595,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | while True:
try:
n = int(input())
for i in range(n):
q = input()
print("I am Toorg!")
except EOFError:
break
| [
"rafael.casa@pm.me"
] | rafael.casa@pm.me |
4b7fc79d1a1c8900a63129b210fd5bf8568c4bef | dccee2da3c571cf69dea60cf5150f57f1f9b2edb | /tc-kep100/v1.2.x/ja/autogen-openapi-generator/python/tc_kep100_client/api/init_api.py | 2899fc1938b00ec934874b669c77fa59eab789fc | [
"MIT"
] | permissive | y2kblog/poe-webapi-sensor-api | 173d3d6fd9c441968c8b900a0ae03c21bc2f266b | 7c21c88e4a7f74f7bc09c5d4dfc9ff352a98d458 | refs/heads/master | 2023-05-30T14:23:49.523025 | 2021-06-16T02:57:29 | 2021-06-16T02:57:29 | 369,937,604 | 0 | 0 | MIT | 2021-06-16T02:57:29 | 2021-05-23T01:27:34 | Python | UTF-8 | Python | false | false | 4,958 | py | """
PoE対応 WebAPI K型熱電対アンプ API仕様
\"Try it out\"機能は、API仕様を製品と同一ネットワーク上のローカルPCにダウンロードしブラウザで開くことで利用できます。 # noqa: E501
The version of the OpenAPI document: 1.2.x
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from tc_kep100_client.api_client import ApiClient, Endpoint as _Endpoint
from tc_kep100_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
class InitApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __init_put(
self,
**kwargs
):
"""製品を初期状態に戻す # noqa: E501
設定したパラメータをすべて削除し製品を初期状態に戻す。その後、自動的に再起動する。 その後、自動的に再起動する /init.html にブラウザからアクセスし操作することも可能 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.init_put(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.init_put = _Endpoint(
settings={
'response_type': None,
'auth': [
'basicAuth'
],
'endpoint_path': '/init',
'operation_id': 'init_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__init_put
)
| [
"info.y2kb@gmail.com"
] | info.y2kb@gmail.com |
f8bf11e12bc1581ed30ac99f941e2bf0f33f766b | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-ProblemSolving/Counting Valleys.py | 8dd7bd82fe261f99bf7584a8856d457b1d55009b | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def countingValleys(n, s):
count = 0
topography = 0
for _ in s:
if _ == 'D':
topography -= 1
else:
topography += 1
if topography == 0:
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| [
"rastogiritvik99@gmail.com"
] | rastogiritvik99@gmail.com |
a996ae3320923c71c7f1632940b84bcd5d3c9a9d | d04e2adc8a808a606fa12d01384b5ad8e0319765 | /chapter3/fewpractice.py | 4600d0ae7c2cfdc6b969f1a3269afe11f9d8003f | [] | no_license | Bishwajit-Shil/Python | 9c338939005b74d31232f0d2cd051319c3d30281 | 8e57829c044938158bc0ff3a3764e06030602f06 | refs/heads/master | 2023-01-13T06:18:37.297386 | 2020-11-19T09:51:56 | 2020-11-19T09:51:56 | 314,192,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | #-----------1-----------sum and average-------------------
n = int(input('Enter how many number u input : '))
total = 0
for i in range(1,n+1):
num = input(f"input number : ")
total += int(num)
print(f'the total sum :{total}')
print(f'the total avarege : {total/n}')
# ------------------using function--------------
def sumx(x):
total = 0
for i in range(1,x+1):
num = float(input('enter value : '))
total += num
return total
n = int(input('Enter number of data set : '))
sumx= sumx(n)
print(f'the total sum : {sumx} and the average : {sumx/n}')
# ----------2-------- sum for 1/n -----------------------
n = int(input('enter the value of n : '))
total = 0
for i in range(1,n+1):
total += 1.0/i
print(f' the calculated sum : {total}')
# ----2----using function-------
def sumx(x):
total = 0
for i in range(1,x+1):
total += 1.0/i
return total
n = int(input('enter the value of n : '))
print(f'total : {sumx(n)}')
# ----------3------------triangle---------------------------
a= int(input('Enter a : '))
b= int(input('Enter b : '))
c= int(input('Enter c : '))
if(a+b<c or a+c<b):
print('invalid length . ')
else:
perimeter= a+b+c
s = perimeter/2
area = pow((s*(s-a)*(s-b)*(s-c)),0.5)
print(f'the tirangle perimeter : {perimeter}')
print(f'the triangle area : {area}')
# ------------------------gamma--------------------------
#------------gamma=h1/(h1-h2)---------
n = int(input('How many data set want to input : '))
sumh1 = 0
sumh2 = 0
for i in range(1,n+1):
h1= float(input('Enter the values of h1 : '))
sumh1 += h1
print('\n')
for i in range(1,n+1):
h2= float(input('Enter the values of h2 : '))
sumh2 += h2
avgh1 = sumh1/n
avgh2 = sumh2/n
print(f'the calculated gamma {avgh1/(avgh1-avgh2)}')
#-------------------------using function---------------------
def sumx(x):
sumx = 0
for i in range(1,x+1):
h = float(input('Enter the value : '))
sumx += h
return sumx
n = int(input('How many data set want to input : '))
avgh1 = sumx(n)/n
print('\n')
avgh2 = sumx(n)/n
print(f'the calculated gamma is : {avgh1/(avgh1-avgh2)}')
# # ----------------------standard diviation= sqrt(x - average_x)/n)------------
n = int(input('Enter number of data set : '))
sumx= 0
for i in range(1,n+1):
x = float(input('Enter value of x : '))
sumx += x
avgx = sumx/n
ssd = 0
for i in range(1,n+1):
ssd += (x - avgx)**2
std = (ssd/n)**0.5
print(f'The calculated standard diviation is : {std}') | [
"jitshil@pop-os.localdomain"
] | jitshil@pop-os.localdomain |
b225078d0a91211eb3468b927ba33ab82c48402a | 9bcf0c5df0c8d7f387469975e1f5ed64ae61a701 | /coop_gateway/__init__.py | 3a4c388cb09435e6d5038c574d255491d6fe0585 | [
"BSD-2-Clause"
] | permissive | credis/coop-gateway | 4d3d8513ce693db7efb8d34c6823ae3b41cfcd1e | 57bfdac90118db6abab078d45fca2bbc65d8bb54 | refs/heads/master | 2021-01-22T07:03:19.261389 | 2013-11-25T14:06:28 | 2013-11-25T14:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from django.db.models.signals import (
post_save,
post_delete,
)
from coop_local.models import (
Organization,
Person,
)
from .signals import (
organization_saved,
organization_deleted,
person_saved,
person_deleted,
)
post_save.connect(organization_saved, Organization)
post_delete.connect(organization_deleted, Organization)
post_save.connect(person_saved, Person)
post_delete.connect(person_deleted, Person)
| [
"antoine.cezar@makina-corpus.com"
] | antoine.cezar@makina-corpus.com |
ffd754dfcd3f8ce81ed63d9d1957b3c012840687 | f63d8037abd95550bbf19820bbbf63fe004ea027 | /apps/auth/models.py | e887c3348f0f68c9ec65dc5781584c4c4638ab2f | [] | no_license | lsdlab/flask_socketio_celery | 6595e0fdc6534e4c3988b2e895194ba6f2302c53 | 84870110641feb8e49d9f45271894a66e202b7ec | refs/heads/master | 2020-03-24T20:54:47.793077 | 2018-08-01T04:33:54 | 2018-08-01T04:33:54 | 143,003,268 | 0 | 1 | null | 2018-08-24T01:54:32 | 2018-07-31T11:08:36 | Python | UTF-8 | Python | false | false | 2,521 | py | import datetime as dt
from flask_login import UserMixin
from apps.database import (Column, Model, SurrogatePK, db,
reference_col, relationship)
from apps.extensions import bcrypt
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'auth_roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('auth_users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'auth_users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=True)
#: The hashed password
password = Column(db.Binary(128), nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.now)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
sid = Column(db.String(80), nullable=True, default='')
def __init__(self, username, password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
def to_json(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
'active': self.active,
'is_admin': self.is_admin,
'sid': self.sid,
'created_at': self.created_at.strftime("%Y-%m-%d %H:%M:%S")
}
| [
"lsdlab@icloud.com"
] | lsdlab@icloud.com |
d82f4bb8ea67a5a20c2e438014b1ba0342be5e7f | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /Haroldo_weigh_history_sel.py | 96ace295518488fa84f0fc246b70004904f29539 | [] | no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | #! /usr/bin/env python
import sys
import os
from database import *
from datetime import *
def main ():
database="calorie_king_social_networking_2010"
server="tarraco.chem-eng.northwestern.edu"
user="calorieking"
passwd="n1ckuDB!"
db=Connection(server, database, user, passwd)
query="""Select `weigh_in_cuts`.`id` , `weigh_in_cuts`.`ck_id`, `weigh_in_cuts`.`fit_type`, `weigh_in_cuts`.`start_idx`, `weigh_in_cuts`.`stop_idx`,
`weigh_in_cuts`.`param1`,`weigh_in_cuts`.`param2`,`weigh_in_cuts`.`param3`
From `weigh_in_cuts`
Order by `weigh_in_cuts`.`ck_id`"""
selfile=db.query(query)
for row in selfile:
query="SELECT weigh_in_history.ck_id, weigh_in_history.on_day,weigh_in_history.weight FROM weigh_in_history WHERE weigh_in_history.ck_id='" + row['ck_id'] + "' Order by weigh_in_history.on_day"
res=db.query(query)
first_day=row['start_day']
last_day=row['stop_day']
fist_date=res[0]['on_date']
last_date=res[-1]['on_date']
count=0
for his in res:
day=his['on_day']
if (day>=) and (count<row['stop_idx']):
print str(row['id']) + "\t" + str(his['ck_id']) + "\t"+ str(his['on_day']) + "\t" + str(his['weight']) + "\t" + str(row['fit_type']) + "\t" + str(row['param1']) + "\t" + str(row['param2']) + "\t" + str(row['param3'])
count=count+1
#weigh_join_friend=db.query(query)
#file=open("weigh_history.dat",'wt')
#for results in weigh_join_friend:
#ck_id=results['ck_id']
#on_day=results['on_day']
#weigh=results['weight']
#print >> file, ck_id + "\t" + str(on_day) +"\t" + str(weigh)
if __name__== "__main__":
main()
| [
"noreply@github.com"
] | juliettapc.noreply@github.com |
ac14e0da172317774b395bde7f035d2668d68ea8 | 6f66a02e3bbf042da0c50958f9ccacbbf6427d11 | /scripts/audio_builder.py | abc7d2497f56d7fc290efbaaf242b63a79422978 | [] | no_license | lmirandam07/reverso_favs2anki | 86672641f68729bce13c0bae1f731c43572e3103 | 3f08b88ce88d9a9d8bbf8cb25aaf10da5ace2388 | refs/heads/master | 2023-06-28T16:48:50.255632 | 2021-08-01T22:02:44 | 2021-08-01T22:02:44 | 357,282,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | import time
import logging
import requests
from uuid import uuid4
from random import choice
from pathlib import Path
from decouple import AutoConfig
from xml.etree import ElementTree
logging.basicConfig(filename='../files/main.log', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
try:
config = AutoConfig(search_path='../files/.env')
AZURE_API_KEY = config('AZURE_API_KEY', cast=str)
AZURE_REGION = config('AZURE_REGION', cast=str)
except Exception as e:
logging.error("Exception occurred when trying to load .env vars")
class AzureAudio:
def __init__(self):
self._api_key = AZURE_API_KEY
self._region = AZURE_REGION
self.access_token = ''
self._voices = {
'de': {
'de-DE': ('de-DE-ConradNeural', 'de-DE-KatjaNeural'),
'de-AT': ('de-AT-JonasNeural', 'de-AT-IngridNeural'),
}
}
def get_voices(self, lang=None):
if lang != None:
return self._voices[lang]
return self._voices
def get_access_token(self, sub_key, region):
fetch_token_url = f"https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken"
headers = {
'Ocp-Apim-Subscription-Key': sub_key,
'User-Agent': 'reverso_favs2anki'
}
try:
response = requests.post(fetch_token_url, headers=headers)
self.access_token = response.content.decode('utf-8')
return self.access_token
except requests.exceptions.HTTPError as e:
logging.error("Exception occurred when trying to get access token")
def get_audio(self, text, lang):
azure_api_key = self._api_key
azure_region = self._region
if not self.access_token:
access_token = self.get_access_token(azure_api_key, azure_region)
if not access_token:
logging.error("Could not get azure access token")
return ''
else:
access_token = self.access_token
try:
langs_and_voices = self.get_voices(lang)
# From the list of voices in the API, randomly select one
lang_choice = choice(list(langs_and_voices.keys()))
voice_choice = choice(langs_and_voices[lang_choice])
rate = 0
pitch = 0
azure_api_url = f'https://{azure_region}.tts.speech.microsoft.com/cognitiveservices/v1'
headers = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/ssml+xml',
'X-Microsoft-OutputFormat': 'audio-24khz-96kbitrate-mono-mp3',
'User-Agent': 'reverso_favs2anki'
}
# Create XML format that uses the API to make the translation
xml_body = ElementTree.Element('speak', version='1.0')
xml_body.set(
'{http://www.w3.org/XML/1998/namespace}lang', lang_choice)
voice = ElementTree.SubElement(xml_body, 'voice')
voice.set('{http://www.w3.org/XML/1998/namespace}lang', lang_choice)
voice.set(
'name', voice_choice)
prosody = ElementTree.SubElement(voice, 'prosody')
prosody.set('rate', f'{rate}%')
prosody.set('pitch', f'{pitch}%')
prosody.text = text
body = ElementTree.tostring(xml_body)
response = requests.post(
azure_api_url, headers=headers, data=body)
# If there are too manny requests try again after some time
if response.status_code not in range(200, 300):
retry_after = response.headers.get('Retry_After')
time.sleep(int(retry_after) if retry_after else 10)
response = requests.post(
azure_api_url, headers=headers, data=body)
if response.status_code in range(200, 300):
audio_folder = Path("../files/audios")
audio_folder.mkdir(exist_ok=True)
audio_file_name = Path(f"azure-{str(uuid4())}.mp3")
audio_file_path = Path.joinpath(audio_folder, audio_file_name)
if not audio_file_path.exists():
with open(audio_file_path, 'wb') as audio:
audio.write(response.content)
audio.close()
return audio_file_name.name
logging.error(f'Could not create the audio for the text "{text}"')
return ''
except requests.exceptions.HTTPError as e:
logging.error("Exception occurred when trying to get access token")
| [
"luyimiranda07@gmail.com"
] | luyimiranda07@gmail.com |
90f0943c30d39f4eee6a5154e35632bd7d6b8d61 | 5f60b6c599de062d4d80987ea6b515a55c554a43 | /fridaySmallFunctions.py | 262e7f97b0254c37c122cc36c1ce00794262f72a | [] | no_license | ClauMaj/pyHomework | c6871d42a3c2b9eaa2616daeaec3c53b4f304413 | ca198451e9921d6fc604d28c50366480e800abf8 | refs/heads/main | 2023-01-30T12:00:06.109456 | 2020-12-04T03:26:34 | 2020-12-04T03:26:34 | 311,827,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | # 1. Madlib function
# madlibName = input("name: ")
# madlibSubject = input("subject: ")
# def interpolate(a, b):
# print(f"{a}'s favorite subject is {b}.'")
# interpolate(madlibName,madlibSubject)
# 2. Celsius to Fahrenheit conversion
# cel = int(input('cel temp: '))
# def celToFar(temp):
# return (temp * 9/5) + 32
# print(celToFar(cel))
# 3. Fahrenheit to Celsius conversion
# fah = int(input('Fahr temp: '))
# def fahrToCel(temp):
# return (temp - 32) * 5/9
# print(fahrToCel(fah))
# 4 . is_even function
# num = int(input('number: '))
# def isEven(a):
# if a % 2 == 0:
# return True
# else:
# return False
# print(f'Even {isEven(num)}')
# 5. is_odd function
# def isOdd(b):
# return not isEven(b)
# print(f'Odd {isOdd(num)}')
# 6. only_evens function
# myList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# def evenlist(evenlist):
# c = []
# for item in evenlist:
# if isEven(item):
# c.append(item)
# return c
# print(evenlist(myList))
# 7. only_odds function
# myList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# def oddlist(oddlist):
# d = []
# for item in oddlist:
# if isOdd(item):
# d.append(item)
# return d
# print(oddlist(myList))
# 6. only_evens function with remove
# myList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# def evenlist(evenlist):
# for item in evenlist:
# if isEven(item):
# evenlist.remove(item)
# print(evenlist(myList))
# print(myList) | [
"claudiumajor@gmail.com"
] | claudiumajor@gmail.com |
146c06c0679a7f052c159f39be6f261d6fe45303 | 4a3071cfe19f3430dc4c6f3072f41bc0d7ea5875 | /drinks/spiders/coolers.py | c03b31211896a91a069abdb7255fc0b020a56772 | [] | no_license | RachelLi13/Drinks | e4daf8e7d61bfe4c57b907f01acda8aba811ce0d | 838c54892daf3cb713e80b0bfdce5505bc3ab24f | refs/heads/master | 2023-05-05T02:23:29.168225 | 2021-05-31T23:04:19 | 2021-05-31T23:04:19 | 368,023,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | import scrapy
from scrapy.loader import ItemLoader
from drinks.items import DrinksItem
class CoolerSpider(scrapy.Spider):
name='coolers'
start_urls = ['https://www.lcbo.com/webapp/wcs/stores/servlet/en/lcbo/coolers-18-1/coolers-18029',
'https://www.lcbo.com/webapp/wcs/stores/servlet/en/lcbo/coolers-18-1/premixed-cocktails-18030']
def parse(self, response):
drinks = response.css('.product')
for drink in drinks:
loader = ItemLoader(item = DrinksItem(), selector = drink)
loader.add_css('drink_name', '.product_name a::text')
loader.add_css('drink_price', '.price::text')
loader.add_css('drink_link', '.product_name a::attr(href)')
drink_item = loader.load_item()
drink_page = drink.css('.product_name a')
yield from response.follow_all(drink_page, callback=self.parse_drink, meta={'drink_item': drink_item})
pagination_link = response.css('#WC_SearchBasedNavigationResults_pagination_link_right_categoryResults::attr(href)').get()
if drinks:
yield response.follow(pagination_link, callback=self.parse)
def parse_drink(self, response):
drink_item = response.meta['drink_item']
loader = ItemLoader(item=drink_item, response=response)
loader.add_css('summary', '.product-text-content p::text')
loader.add_css('drink_volume', 'dd b::text')
loader.add_css('alcohol_percentage', 'dd:nth-child(3) span::text')
loader.add_css('origin_place', 'dd:nth-child(5) span::text')
loader.add_css('drink_type', '#widget_breadcrumb li+ li a::text')
yield loader.load_item()
| [
"li.rachel.cheng@gmail.com"
] | li.rachel.cheng@gmail.com |
16d09bd1bac2eeb428afddc0c41bfb53b8ecd69e | b2a936c4d489f98316300982d3309e619bceef26 | /100Day_section1/Day29/Sol2.py | 3e3e7dd3d92d76289c287b7418f315330a2ef34f | [] | no_license | HarryZhang0415/Coding100Days | 01e4ba196c06008d84d866a4a92b87e0da48b75f | 77ba81abb59f95f09c030550dcf24bb90f27a4b1 | refs/heads/master | 2023-06-27T17:13:49.765008 | 2020-10-17T01:01:27 | 2020-10-17T01:01:27 | 266,252,644 | 0 | 1 | null | 2021-08-02T21:32:58 | 2020-05-23T03:05:07 | Python | UTF-8 | Python | false | false | 417 | py | from collections import Counter
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
ans = Counter(nums)
tmp = []
for key in range(3):
for n in range(ans[key]):
tmp += [key]
for i in range(len(nums)):
nums[i] = tmp[i] | [
"harryzhang0415@gmail.com"
] | harryzhang0415@gmail.com |
e4deaab552e15fd0a049d7785d37531f82b3d01f | acf6e8985035edd570568113f00041a438a260fd | /plugin/geo2france/utils/plugin_globals.py | 36a1f9d38034897f673e89e4c2fbb1c76f128150 | [
"BSD-3-Clause"
] | permissive | vfabry/geo2france-qgis3-plugin | 7b066361891d191dedcb5e42de830f02c5c6f8ee | f1be1bf19b0fa3b701c10c38fd8fb7690f0e5abf | refs/heads/master | 2020-12-23T01:20:55.905712 | 2020-01-07T11:10:47 | 2020-01-07T11:10:47 | 236,988,583 | 0 | 0 | null | 2020-01-29T13:29:32 | 2020-01-29T13:29:31 | null | UTF-8 | Python | false | false | 5,193 | py | # -*- coding: utf-8 -*-
import sys
import os
from geo2france.utils.singleton import Singleton
from qgis.PyQt.QtCore import QSettings
@Singleton
class PluginGlobals:
"""
"""
iface = None
plugin_path = None
# Plugin infos
PLUGIN_TAG = u"Géo2France"
PLUGIN_VERSION = u"0.9.0"
PLUGIN_SOURCE_REPOSITORY = u"https://github.com/geopicardie/geo2france-qgis3-plugin"
# Tree nodes types
NODE_TYPE_FOLDER = "folder"
NODE_TYPE_WS = "web_service"
NODE_TYPE_WMS_LAYER = "wms_layer"
NODE_TYPE_WMTS_LAYER = "wmts_layer"
NODE_TYPE_WMS_LAYER_STYLE = "wms_layer_style"
NODE_TYPE_WFS_FEATURE_TYPE = "wfs_feature_type"
NODE_TYPE_WFS_FEATURE_TYPE_FILTER = "wfs_feature_type_filter"
NODE_TYPE_GDAL_WMS_CONFIG_FILE = "gdal_wms_config_file"
# Node status values
NODE_STATUS_WARN = 'warn'
# Images dir
IMAGES_DIR_NAME = "images"
LOGO_FILE_NAME = "logo_geo2france.png"
ICON_WARN_FILE_NAME = "Icon_Simple_Warn.png"
ICON_WMS_LAYER_FILE_NAME = "mIconWms.svg"
ICON_WMS_STYLE_FILE_NAME = "mIconWmsStyle.svg"
ICON_WFS_LAYER_FILE_NAME = "mIconWfs.svg"
ICON_RASTER_LAYER_FILE_NAME = "mIconRaster.svg"
# Config files dir
CONFIG_FILES_DOWNLOAD_AT_STARTUP = False
CONFIG_DIR_NAME = "config"
CONFIG_FILE_NAMES = ["config.json"]
CONFIG_FILE_URLS = ["https://www.geo2france.fr/public/qgis3/plugins/geo2france/config.json"]
# Hide resources with status = warn
HIDE_RESOURCES_WITH_WARN_STATUS = True
# Hide empty group in the resources tree
HIDE_EMPTY_GROUPS = True
def __init__(self):
"""
"""
self.default_qsettings = {
"CONFIG_FILES_DOWNLOAD_AT_STARTUP": self.CONFIG_FILES_DOWNLOAD_AT_STARTUP,
"CONFIG_FILE_NAMES": self.CONFIG_FILE_NAMES,
"CONFIG_FILE_URLS": self.CONFIG_FILE_URLS,
"HIDE_RESOURCES_WITH_WARN_STATUS": self.HIDE_RESOURCES_WITH_WARN_STATUS,
"HIDE_EMPTY_GROUPS": self.HIDE_EMPTY_GROUPS
}
self.config_dir_path = None
self.config_file_path = None
self.images_dir_path = None
self.logo_file_path = None
def set_plugin_path(self, plugin_path):
"""
"""
# system_encoding = sys.getfilesystemencoding()
# self.plugin_path = plugin_path.decode(system_encoding)
self.plugin_path = plugin_path
def set_plugin_iface(self, iface):
"""
"""
self.iface = iface
def reload_globals_from_qgis_settings(self):
"""
Reloads the global variables of the plugin
"""
# Read the qgis plugin settings
s = QSettings()
self.CONFIG_FILES_DOWNLOAD_AT_STARTUP = \
True if s.value(
u"{0}/config_files_download_at_startup".format(self.PLUGIN_TAG),
self.CONFIG_FILES_DOWNLOAD_AT_STARTUP) == u"1" else False
self.CONFIG_DIR_NAME = s.value(u"{0}/config_dir_name".format(self.PLUGIN_TAG), self.CONFIG_DIR_NAME)
self.CONFIG_FILE_NAMES = s.value(u"{0}/config_file_names".format(self.PLUGIN_TAG), self.CONFIG_FILE_NAMES)
self.CONFIG_FILE_URLS = s.value(u"{0}/config_file_urls".format(self.PLUGIN_TAG), self.CONFIG_FILE_URLS)
self.HIDE_RESOURCES_WITH_WARN_STATUS = True if s.value(
u"{0}/hide_resources_with_warn_status".format(self.PLUGIN_TAG),
self.HIDE_RESOURCES_WITH_WARN_STATUS) == u"1" else False
self.HIDE_EMPTY_GROUPS = True if s.value(
u"{0}/hide_empty_groups".format(self.PLUGIN_TAG), self.HIDE_EMPTY_GROUPS) == u"1" else False
self.config_dir_path = os.path.join(self.plugin_path, self.CONFIG_DIR_NAME)
self.config_file_path = os.path.join(self.config_dir_path, self.CONFIG_FILE_NAMES[0])
self.images_dir_path = os.path.join(self.plugin_path, self.IMAGES_DIR_NAME)
self.logo_file_path = os.path.join(self.images_dir_path, self.LOGO_FILE_NAME)
def reset_to_defaults(self):
"""
Reset global variables to default values
"""
s = QSettings()
s.setValue(u"{0}/hide_resources_with_warn_status".format(self.PLUGIN_TAG), u"1")
s.setValue(u"{0}/hide_empty_groups".format(self.PLUGIN_TAG), u"1")
s.setValue(u"{0}/config_files_download_at_startup".format(self.PLUGIN_TAG), u"0")
s.setValue(u"{0}/config_file_names".format(self.PLUGIN_TAG), ["config.json"])
s.setValue(u"{0}/config_file_urls".format(self.PLUGIN_TAG),
["https://www.geo2france.fr/public/qgis3/plugins/geo2france/config.json"])
def get_qgis_setting_default_value(self, setting):
"""
"""
return self.default_qsettings.get(setting, None)
def set_qgis_settings_value(self, setting, value):
"""
Update a settings value
"""
s = QSettings()
# Convert boolean in unicode string
if type(value) == bool:
value = u"1" if value else u"0"
# Save the settings value
s.setValue(u"{0}/{1}".format(self.PLUGIN_TAG, setting), value)
# Reload all settings values
self.reload_globals_from_qgis_settings()
| [
"bchartier@neogeo.fr"
] | bchartier@neogeo.fr |
5c11684904226a34ff4139ac8f55a9665304d5fb | 7790fdf1d9f62b27440522a400b4ac08cc7714cd | /test.py | 39697bcad02bf9fc176dc14068e067978bab046c | [] | no_license | kajaje/liu | 79017f6106f4a1b498efced09e83a79de8f1eb56 | a1f24c62978296de754945a0c6e1c4f1f92418ac | refs/heads/master | 2020-11-28T02:06:39.002884 | 2020-01-09T07:02:19 | 2020-01-09T07:02:19 | 229,676,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | # -*- coding:utf-8 -*-
import os
import datetime
print datetime.datetime.now()
| [
"ywliuzhigang@ZBMAC-C02W3201W.local"
] | ywliuzhigang@ZBMAC-C02W3201W.local |
eae4908d71ee6fe97c0afdd406546590b7a29277 | 283771022ed7cb7ef9c621dd617e367f09278da5 | /import/import_from_xml.py | 70b83e6cf016985d24f872a72241e77a4924b78f | [] | no_license | victorpedrocs/dados-abertos-cd | b205180a7c3d9dc643b952d9ac8242d2ea905400 | 56cff6460c7555ca3119719318df8f7191f34210 | refs/heads/master | 2023-04-14T15:37:59.800050 | 2016-05-17T03:47:08 | 2016-05-17T03:47:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,398 | py | import urllib
import xml.etree.ElementTree as ET
import zipfile
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from models import *
from datetime import datetime
import re
def import_from_file( root, session ):
for despesa in root[0].findall('DESPESA'):
nome_parlamentar = despesa.find('txNomeParlamentar')
identificador_unico = despesa.find('ideCadastro')
numero_carteira = despesa.find('nuCarteiraParlamentar')
numero_legislatura = despesa.find('nuLegislatura')
sigla_estado = despesa.find('sgUF')
sigla_partido = despesa.find('sgPartido')
codigo_legislatura = despesa.find('codLegislatura')
numero_subcota = despesa.find('numSubCota')
descricao_subcota = despesa.find('txtDescricao')
num_especificacao_subcota = despesa.find('numEspecificacaoSubCota')
descricao_especificacao_subcota = despesa.find('txtDescricaoEspecificacao')
nome_fornecedor = despesa.find('txtFornecedor')
cnpj = despesa.find('txtCNPJCPF')
numero_documento = despesa.find('txtNumero')
tipo_documento = despesa.find('indTipoDocumento')
data_emissao_str = despesa.find('datEmissao')
valor_documento = despesa.find('vlrDocumento')
valor_glossa = despesa.find('vlrGlosa')
valor_liquido = despesa.find('vlrLiquido')
mes = despesa.find('numMes')
ano = despesa.find('numAno')
numero_parcela = despesa.find('numParcela')
nome_passageiro = despesa.find('txtPassageiro')
trecho = despesa.find('txtTrecho')
numero_lote = despesa.find('numLote')
numero_ressarcimento = despesa.find('numRessarcimento')
valor_restituicao = despesa.find('vlrRestituicao')
indentificador_solicitante = despesa.find('nuDeputadoId')
if sigla_partido is not None:
partido_obj = get_or_create(session, Partido, sigla=sigla_partido.text )
if sigla_estado is not None:
estado_obj = get_or_create(session, Estado, sigla=sigla_estado.text )
# if nome_fornecedor is not None and cnpj is not None:
nome_fornecedor_str = ""
cnpj_str = ""
if nome_fornecedor is not None:
nome_fornecedor_str = nome_fornecedor.text
if cnpj is not None:
cnpj_str = cnpj.text
if cnpj is not None or nome_fornecedor is not None:
fornecedor_obj = get_or_create(session, Fornecedor, nome=nome_fornecedor_str, cnpj=cnpj_str)
# Parlamentar
nome_parlamentar_str = ""
identificador_unico_str = ""
if nome_parlamentar is not None:
nome_parlamentar_str = nome_parlamentar.text
if identificador_unico is not None:
identificador_unico_str = identificador_unico.text
parlamentar_obj = get_or_create(session, Parlamentar,
nome=nome_parlamentar_str,
identificador_unico=identificador_unico_str )
# if numero_legislatura is not None and numero_carteira is not None:
foreign_key_param = {}
if partido_obj is not None:
foreign_key_param['partido_fk'] = partido_obj.id
if estado_obj is not None:
foreign_key_param['estado_fk'] = estado_obj.id
if numero_legislatura is not None:
foreign_key_param['numero'] = numero_legislatura.text
if numero_carteira is not None:
foreign_key_param['numero_carteira'] = numero_carteira.text
foreign_key_param['parlamentar_fk'] = parlamentar_obj.id
legislatura_obj = get_or_create(session, Legislatura, **foreign_key_param)
# if legislatura_obj is not None:
documento_obj = Documento()
if numero_documento is not None:
documento_obj.numero_documento=numero_documento.text
if tipo_documento is not None:
documento_obj.tipo_documento=tipo_documento.text
if data_emissao_str is not None:
date_string = re.sub('\.[0-9]+', "", data_emissao_str.text)
documento_obj.data_emissao=datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
if valor_documento is not None:
documento_obj.valor_documento=float(valor_documento.text)
if valor_glossa is not None:
documento_obj.valor_glossa=float(valor_glossa.text)
if valor_liquido is not None:
documento_obj.valor_liquido=float(valor_liquido.text)
if mes is not None:
documento_obj.mes=int(mes.text)
if ano is not None:
documento_obj.ano=int(ano.text)
if numero_parcela is not None:
documento_obj.numero_parcela=numero_parcela.text
if nome_passageiro is not None:
documento_obj.nome_passageiro=nome_passageiro.text
if trecho is not None:
documento_obj.trecho=trecho.text
if numero_lote is not None:
documento_obj.numero_lote=numero_lote.text
if numero_ressarcimento is not None:
documento_obj.numero_ressarcimento=numero_ressarcimento.text
if valor_restituicao is not None:
documento_obj.valor_restituicao=float(valor_restituicao.text)
if indentificador_solicitante is not None:
documento_obj.indentificador_solicitante=indentificador_solicitante.text
if numero_subcota is not None:
documento_obj.numero_subcota=numero_subcota.text
if descricao_subcota is not None:
documento_obj.descricao_subcota=descricao_subcota.text
if descricao_subcota is not None:
documento_obj.num_especificacao_subcota=num_especificacao_subcota.text
if descricao_especificacao_subcota is not None:
documento_obj.descricao_especificacao_subcota=descricao_especificacao_subcota.text
if legislatura_obj is not None:
documento_obj.legislatura_fk=legislatura_obj.id
if fornecedor_obj is not None:
documento_obj.fornecedor_fk=fornecedor_obj.id
session.add(documento_obj)
session.commit()
print "Iniciando o script..."
url_ano_atual = "http://www.camara.gov.br/cotas/AnoAtual.zip"
url_ano_anterior = "http://www.camara.gov.br/cotas/AnoAnterior.zip"
url_demais_anos = "http://www.camara.gov.br/cotas/AnosAnteriores.zip"
print "Importando Ano Atual"
print "Baixando o arquivo..."
downloaded_file, _ = urllib.urlretrieve(url_ano_atual)
print "Colocando o arquivo em um objeto..."
zip_file_object = zipfile.ZipFile(downloaded_file, 'r')
print "Removendo o arquivo interno..."
first_file = zip_file_object.namelist()[0]
print "Colocando o arquivo xml em uma variavel..."
file = zip_file_object.open(first_file)
print "Abrindo o arquivo XML com o ElementTree..."
tree = ET.parse(file)
root = tree.getroot()
print "Criando a sessao..."
Session = sessionmaker(engine)
session = Session()
print "Criando os objetos a partir de cada objeto no arquivo xml..."
import_from_file( root, session )
session.close()
print "Importando Ano anterior"
print "Baixando o arquivo..."
downloaded_file, _ = urllib.urlretrieve(url_ano_anterior)
print "Colocando o arquivo em um objeto..."
zip_file_object = zipfile.ZipFile(downloaded_file, 'r')
print "Removendo o arquivo interno..."
first_file = zip_file_object.namelist()[0]
print "Colocando o arquivo xml em uma variavel..."
file = zip_file_object.open(first_file)
print "Abrindo o arquivo XML com o ElementTree..."
tree = ET.parse(file)
root = tree.getroot()
print "Criando a sessao..."
Session = sessionmaker(engine)
session = Session()
print "Criando os objetos a partir de cada objeto no arquivo xml..."
import_from_file( root, session )
session.close()
print "Importando demais anos"
print "Baixando o arquivo..."
downloaded_file, _ = urllib.urlretrieve(url_demais_anos)
print "Colocando o arquivo em um objeto..."
zip_file_object = zipfile.ZipFile(downloaded_file, 'r')
print "Removendo o arquivo interno..."
first_file = zip_file_object.namelist()[0]
print "Colocando o arquivo xml em uma variavel..."
file = zip_file_object.open(first_file)
print "Abrindo o arquivo XML com o ElementTree..."
tree = ET.parse(file)
root = tree.getroot()
print "Criando a sessao..."
Session = sessionmaker(engine)
session = Session()
print "Criando os objetos a partir de cada objeto no arquivo xml..."
import_from_file( root, session )
session.close()
| [
"vpcx5@yahoo.com.br"
] | vpcx5@yahoo.com.br |
89588f1507285e3312add597434439152e7280fa | 7a3696072a511acc4974bb76004b315a35a106b7 | /SS-GCNs/SS-GMNN-GraphMix/GraphMix-clu/trainer.py | 0a339f94160ac998022247c886c38f0cec71a1bd | [
"MIT"
] | permissive | xyh97/graph_representation | 859e9f2ff911058db251fd6547098968960c6739 | e6967073a951cd029651389d4b76606f9cef7f6c | refs/heads/main | 2023-03-23T00:55:52.763740 | 2021-03-19T17:15:40 | 2021-03-19T17:15:40 | 343,223,074 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,909 | py | import math
import random
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim import Optimizer
bce_loss = nn.BCELoss().cuda()
softmax = nn.Softmax(dim=1).cuda()
class_criterion = nn.CrossEntropyLoss().cuda()
def mixup_criterion(y_a, y_b, lam):
return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def get_optimizer(name, parameters, lr, weight_decay=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'rmsprop':
return torch.optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adagrad':
return torch.optim.Adagrad(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adam':
return torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adamax':
return torch.optim.Adamax(parameters, lr=lr, weight_decay=weight_decay)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class Trainer(object):
def __init__(self, opt, model, partition_labels, ema= True):
partition_num = partition_labels.max() + 1
self.partition_labels = partition_labels.cuda()
self.task_ratio = opt['task_ratio']
self.loss_func = nn.CrossEntropyLoss()
self.opt = opt
self.ema = ema
self.model = model
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
self.ss_classifier = nn.Linear(opt['hidden_dim'], partition_num, bias=False)
if opt['cuda']:
self.criterion.cuda()
self.ss_classifier.cuda()
self.parameters.append(self.ss_classifier.weight)
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def reset(self):
self.model.reset()
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def update(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.train()
self.optimizer.zero_grad()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
loss.backward()
self.optimizer.step()
return loss.item()
def update_soft(self, inputs, target, idx, idx_u):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
logits= self.model(inputs)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
return loss, loss0
def update_soft_aux(self, inputs, target,target_discrete, idx, idx_unlabeled, adj, opt, mixup_layer, idx_u):
"""uses the auxiliary loss as well, which does not use the adjacency information"""
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
idx_unlabeled = idx_unlabeled.cuda()
self.model.train()
self.optimizer.zero_grad()
mixup = True
if mixup == True:
# get the supervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix=mixup_layer)
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
mixed_target = lam*target_a + (1-lam)*target_b
loss = bce_loss(softmax(logits[idx]), mixed_target)
# get the unsupervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx_unlabeled, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix= mixup_layer)
mixed_target = lam*target_a + (1-lam)*target_b
loss_usup = bce_loss(softmax(logits[idx_unlabeled]), mixed_target)
else:
logits = self.model.forward_aux(inputs, target=None, train_idx= idx, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
'''
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0, self.partition_labels)
'''
logits = self.model.forward_aux(inputs, target=None, train_idx= idx_unlabeled, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss_usup = -torch.mean(torch.sum(target[idx_unlabeled] * logits[idx_unlabeled], dim=-1))
return loss, loss_usup, loss0
def evaluate(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.eval()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
preds = torch.max(logits[idx], dim=1)[1]
correct = preds.eq(target[idx]).double()
accuracy = correct.sum() / idx.size(0)
return loss.item(), preds, accuracy.item()
def predict(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def save(self, filename):
params = {
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()
}
try:
torch.save(params, filename)
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optim'])
| [
"xiangning@cs.ucla.edu"
] | xiangning@cs.ucla.edu |
b3e16c11c3347c4bec9ffbfdc601c331183f2130 | 2eb7481aac8816259e99bfcefd8e5137f9473d49 | /analyze/db_operation.py | c6f3ee8e881feae0db540c9e9888154d6eef4498 | [] | no_license | chenxy1996/CXK | a7cfd32f0d4951fa8e6228571a9d1b729cc5a9e5 | 46d01e8446ba748bf6d8abe8efe69cc070854a21 | refs/heads/master | 2020-06-25T02:39:57.814117 | 2019-07-27T14:22:06 | 2019-07-27T14:22:06 | 199,173,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,852 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import pymongo
from pymongo import MongoClient
from bson import ObjectId
from snownlp import normal
from snownlp import seg
from snownlp.summary import textrank
from snownlp import SnowNLP
from snownlp import sentiment
from connect_to_mongo import ConnectToMongo
from multiprocessing import Pool
from danmaku_seg import segProcess
REFINE_LIST = [[".*蔡徐坤.*", "蔡徐坤"], [".*范丞.*", "范丞丞"], \
[".*吴亦.*", "吴亦凡"], [".*哈哈.*", "哈哈哈"], [".*233.*", "2333"], \
[".*林彦.*", "林彦俊"], [".*彦俊.*", "林彦俊"], \
[".*朱正.*", "朱正廷"], [".*正廷.*", "朱正廷"], [".*律师.*", "律师函"], \
[".*66.*", "666"]]
# 把一个列表或别的均分,返回二维数组[[list1], ..., [listN]]
def divide(target, part_num = 12):
ret = [] # 返回值
sum_count = len(target)
chunk_size = sum_count // part_num
if chunk_size:
start = 0
end = 0
while start < sum_count:
end += chunk_size
targey_partition = target[start:end]
start = end
ret.append(targey_partition)
return ret
else:
return [target]
# 得到content的sentiment系数
def getSentimentalIndex(content):
s = SnowNLP(content)
index = round(s.sentiments, 4)
return index
# 根据出生日期得到星座
def getConstellation(birthday_string):
birthday_string = birthday_string.replace("-", ".")
birthday_num = float(birthday_string)
constellation_dict = {
"山羊座": (12.21, 1.20),
"水瓶座": (1.21, 2.19),
"双鱼座": (2.20, 3.20),
"白羊座": (3.21, 4.19),
"金牛座": (4.20, 5.20),
"双子座": (5.21, 6.21),
"巨蟹座": (6.22, 7.22),
"狮子座": (7.23, 8.22),
"处女座": (8.23, 9.22),
"天秤座": (9.23, 10.23),
"天蝎座": (10.24, 11.21),
"射手座": (11.22, 12.20),
}
for each in constellation_dict:
if each != "山羊座":
if constellation_dict[each][0] <= birthday_num and\
birthday_num <= constellation_dict[each][1]:
return each
else:
if constellation_dict[each][0] <= birthday_num or\
birthday_num <= constellation_dict[each][1]:
return each
# 给数据库中comments集合中的每一个document根据其content,添加sentiment <field>
def addSentimentToComment():
client = ConnectToMongo()
comments = client.comments
count = 0
current_cid_comments = comments.find({})
for each in current_cid_comments:
count += 1
id = each["_id"]
content = each["content"]
sentiment = getSentimentalIndex(content)
comments.update_one({"_id": id}, {"$set": {"sentiment": sentiment}})
print(count, content, sentiment)
client.close()
# 给users_set集合中的每一个document根据其出生日期,添加星座;并添加评论数量;
def addConsAndCommCountToUsers(users_set):
client = ConnectToMongo()
comments = client.comments
users = client.users
for each_user in users_set:
mid = each_user["mid"]
name = each_user["name"]
constellation = each_user["constellation"]
comments_count = comments.count_documents({"mid": mid})
users.update_one({"mid": mid}, {"$set": {"constellation": \
constellation, "comments_count": comments_count}})
print(name, constellation, comments_count)
client.close()
# 给users_set集合中的每一个document添加给多少个视频发过弹幕(同一cid视频发送多个弹幕只算一个);
def addCidCountToUsers(users_set):
client = ConnectToMongo()
comments = client.comments
users = client.users
for each_user in users_set:
mid = each_user["mid"]
name = each_user["name"]
cid_stack = []
count = 0
curr_mid_comments = comments.find({"mid": mid})
for each_comment in curr_mid_comments:
if each_comment["cid"] not in cid_stack:
cid_stack.append(each_comment["cid"])
count += 1
users.update_one({"mid": mid}, {"$set": {"cid_count": count}})
print(name, count)
client.close()
# 给comments_set集合中的每一个document添加星座;并添加发送者性别;
def addConstsAndSexToComments(users_set):
client = ConnectToMongo()
comments = client.comments
for each_user in users_set:
name = each_user["name"]
mid = each_user["mid"]
sex = each_user["sex"]
constellation = each_user["constellation"]
comments.update_many({"mid": mid}, {"$set": {"constellation": \
constellation, "sex": sex}})
print(name, constellation, sex)
client.close()
# 给集合female_cloud_words或male_cloud_words添加document————关键词以及权重提取
def createCloudWords(params):
name = params[0] #name: female或male
comments_set = params[1] # comments_set: [{comments:####}, {}, ..., {}]
client = MongoClient()
db = client.get_database("bilibili")
col = db.get_collection(name + "_cloud_words")
comments_col = db.get_collection("蔡徐坤_comments")
for each_comment in comments_set:
curr_comm_id = each_comment["_id"]
curr_comm_keywords = segProcess(each_comment["content"])
comments_col.update_one({"_id": curr_comm_id}, {"$set": {"ifSeg": 1}})
for each_keyword in curr_comm_keywords:
col.update_one({"word" : each_keyword}, {"$inc": {"count": 1}}, upsert=True)
print(each_keyword, "+1")
client.close()
# 给users中全部数据添加新的field
def mainForUsers(filter, processes_num, function_name):
client = ConnectToMongo()
users_col = client.users
users = list(users_col.find(filter, {"birthday": 1, "mid": 1, "name": 1}))
client.close()
all_users_set = divide(users)
pool = Pool(processes_num)
pool.map(function_name, all_users_set)
pool.close()
pool.join()
# 给comments集合中全部数据添加新的field
def mainForComments(processes_num):
client = ConnectToMongo()
users_col = client.users
users = list(users_col.find({"comments_count": {"$exists": True}},
{"mid": 1, "constellation": 1, "name": 1, "sex": 1}))
client.close()
all_users_set = divide(users)
pool = Pool(processes_num)
pool.map(addConstsAndSexToComments, all_users_set)
pool.close()
pool.join()
# 关键词词云
def mainForKeywords(name, progress_num):
if name == "female":
sex = u"女"
elif name == "male":
sex = u"男"
client = ConnectToMongo()
comments_col = client.comments
comments = list(comments_col.find({"sex": sex, "ifSeg": {"$exists": False}}, {"content": 1})) # "ifSeg": {"$exists": False}
client.close()
all_comments_set = divide(comments)
params_list = []
for each in all_comments_set:
params_list.append([name, each])
pool = Pool(progress_num)
pool.map(createCloudWords, params_list)
pool.close()
pool.join()
# 对现有的云词(关键词及其词频), 做整合和优化。
# 由于关键词分割的遗留问题,造成一些关键词未能很好的提取出来。
# 例如“蔡徐坤”, “蔡徐坤蔡”等等
def collateCloudWord(refine_list = REFINE_LIST):
client = MongoClient()
db = client.get_database("bilibili")
female_col = db.get_collection("female_cloud_words")
male_col = db.get_collection("male_cloud_words")
for each in REFINE_LIST:
regex = each[0]
target = each[1]
female_regex_mathched_list = female_col.find({"word": {"$regex": regex}})
male_regex_mathched_list = male_col.find({"word": {"$regex": regex}})
print("-------------------------------------")
for female_each in female_regex_mathched_list:
word_content = female_each["word"]
if word_content != target:
current_count = female_each["count"]
female_col.update_one({"word": target}, {"$inc": {"count": current_count}})
female_col.delete_one({"word": word_content})
print(word_content, current_count)
for male_each in male_regex_mathched_list:
word_content = male_each["word"]
if word_content != target:
current_count = male_each["count"]
male_col.update_one({"word": target}, {"$inc": {"count": current_count}})
male_col.delete_one({"word": word_content})
print(word_content, current_count)
client.close()
# 从female_cloud_words或male_cloud_words词云中提取数据, 存入csv文件中
def getKeywordsFromCloudWordsCOl(nums): # nums为获取的数量
client = MongoClient()
db = client.get_database("bilibili")
female_col = db.get_collection("female_cloud_words")
male_col = db.get_collection("male_cloud_words")
female_frequency_list = female_col.find().sort("count", pymongo.DESCENDING).limit(nums)
male_frequency_list = male_col.find().sort("count", pymongo.DESCENDING).limit(nums)
client.close()
with open("female_frequency.csv", "w", encoding="utf-8") as f:
for each_doc in female_frequency_list:
f.write(each_doc["word"] + ";" + str(each_doc["count"]) + "\n")
with open("male_frequency.csv", "w", encoding="utf-8") as f:
for each_doc in male_frequency_list:
f.write(each_doc["word"] + ";" + str(each_doc["count"]) + "\n")
# 从数据库中拿到数据, 返回列表
def getDataFromDb(filter, projection=None, col_name = "蔡徐坤_comments", db_name="bilibili"):
client = MongoClient()
db = client.get_database(db_name)
col = db.get_collection(col_name)
ret = list(col.find(filter, projection))
client.close()
return ret
if __name__ == "__main__":
# mainForKeywords("male", 4)
# params = mainForKeywords("female", 8)
# createCloudWords(params)
# collateCloudWord()
# getKeywordsFromCloudWordsCOl(100)
# mainForUsers({"cid_count": {"$exists": False}}, function_name=addCidCountToUsers, processes_num=6)
# test = getDataFromDb({"sex": "男"}, {"_id": 0, "sentiment": 1})
# print(len(test))
index1 = getSentimentalIndex(u"666")
index2 = getSentimentalIndex(u"哈哈哈")
print("%s: %s" % (u"666", index1))
print("%s: %s" % (u"哈哈哈", index2)) | [
"www.459971074@qq.com"
] | www.459971074@qq.com |
f34d0f0a19af43018c6cff0a037f889953253534 | 949b3bedf6dd1e145a9ab65ec4e581a9125312d4 | /data/downloader.py | 9074f7ee789c90ce0af5a325a924a75ba737ba26 | [] | no_license | wroldwiedbwe/SanBeniculturali-Downloader | d1837aef34c2f4041d9a57dd6baaa150cd46e751 | 90c8a9b9ea5f5af57c3d7bd2dfa340c9c71c5772 | refs/heads/main | 2023-06-22T11:12:26.538795 | 2021-03-08T10:20:52 | 2021-03-08T10:20:52 | 338,150,135 | 0 | 0 | null | 2021-02-11T20:54:01 | 2021-02-11T20:54:00 | null | UTF-8 | Python | false | false | 5,393 | py | import os
import time
from threading import Thread
from queue import Queue
import platform
import requests
from PyQt5 import QtCore
from lxml import html
import urllib
from datetime import datetime
from multiprocessing import Process
BASE_URL = 'http://dl.antenati.san.beniculturali.it'
def get_url_list(url, urls):
"""get all the urls in the page"""
req = requests.get(BASE_URL + url)
tree = html.fromstring(req.content)
table = tree.xpath('//*[@class="giTitle"]//a')
for elem in table:
if elem.attrib['href'] not in urls:
urls.append(elem.attrib['href'])
next_page = tree.cssselect('div.next-and-last')
if next_page and len(next_page) and len(next_page[0]):
get_url_list(tree.cssselect('div.next-and-last')[0][0].get('href'), urls)
def get_url_list_download(url, urls, run):
"""get all the urls in the page"""
if not run():
return
if 'jpg' in url:
urls.put(url)
return
req = requests.get(BASE_URL + url)
tree = html.fromstring(req.content)
table = tree.xpath('//*[@class="giTitle"]//a')
for elem in table:
# Save url image
if 'jpg' in elem.attrib['href']:
urls.put(elem.attrib['href'])
# Get urls in children
else:
get_url_list_download(elem.attrib['href'], urls, run)
# Chek for multiple pages
next_page = tree.cssselect('div.next-and-last')
if next_page and len(next_page) and len(next_page[0]):
get_url_list_download(tree.cssselect('div.next-and-last')[0][0].get('href'), urls, run)
def compute_path(image):
"""Compute the path where save a collection"""
path = os.getcwd()
try:
if platform.system() == "Windows":
path += "\\Download\\"
for folder in image[3:].split('/')[:-1]:
path += folder + "\\"
elif platform.system() == "Linux":
path += "/Download/"
for folder in image[3:].split('/')[:-1]:
path += folder + "/"
elif platform.system() == 'Darwin':
path += "/Download/"
for folder in image[3:].split('/')[:-1]:
path += folder + "/"
if not os.path.exists(path):
os.makedirs(path)
except:
pass
return path
def save_image(image):
"""Save image to local device"""
path = compute_path(image)
request = requests.get(BASE_URL + image)
data_image = html.fromstring(request.content)
img_src = data_image.xpath('//*[@id="gsImageView"]//a')[0].attrib.get('href')
if img_src:
urllib.request.urlretrieve(img_src, path + image.split('/')[-1][:-5])
def get_size(start_path='.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
class ImageDownloader(QtCore.QThread):
"""Class to manage the downloading of images"""
url_list = Queue()
complete = QtCore.pyqtSignal(object)
start_time = None
tot_download = 0
processes = []
def __init__(self, start_url):
"""Constructor"""
QtCore.QThread.__init__(self)
self.start_url = start_url
self.run = False
self.get_urls = Thread(target=get_url_list_download, args=(self.start_url, self.url_list, lambda: self.run))
self.download_images = Thread(target=self.download)
def run(self):
"""Start the downloading of images"""
self.run = True
self.start_time = datetime.now()
self.get_urls.start()
self.download_images.start()
def download(self):
"""Download the images from the urls list"""
while self.get_urls.is_alive():
if not self.url_list.empty():
if len(self.processes) < 5:
image = self.url_list.get()
t = Thread(target=save_image, args=(image,))
t.start()
self.processes.append(t)
self.tot_download += 1
else:
for _ in range(len(self.processes)):
self.processes[0].join()
self.processes = self.processes[1:]
time.sleep(0.005)
if self.run:
while not self.url_list.empty():
if len(self.processes) < 5:
image = self.url_list.get()
p = Process(target=save_image, args=(image,))
p.start()
self.processes.append(p)
self.tot_download += 1
else:
for _ in range(len(self.processes)):
self.processes[0].join()
self.processes = self.processes[1:]
for p in self.processes:
p.join()
self.run = False
size = get_size(compute_path(self.start_url))
msg = "Downloaded {} images of {:.2f} Mbytes in {} seconds".format(
self.tot_download, size/1024/1024, str(datetime.now()-self.start_time).split('.')[0])
self.complete.emit(msg + '|' + self.start_url)
def stop(self):
"""Stop the downloading of images"""
self.run = False
| [
"marcello.cuoghi@gmail.com"
] | marcello.cuoghi@gmail.com |
a6bfa4bc21326b751b80728564ef1a6de6811fd9 | 878f3d634c6698043bf98d31cb77129dfcb051b4 | /ssht00ls/classes/ssync/utils.py | 8aa105f186ce770d5b184b537e56343ca0cfe077 | [] | no_license | 00mjk/ssht00ls | 592ca76467f69b41cb946499994bc0d81ccfc7b3 | 74605bebc30c6a8de408c882bf44e40385d88b02 | refs/heads/master | 2023-03-16T17:01:21.986459 | 2021-03-18T12:24:16 | 2021-03-18T12:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,222 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
import os
from dev0s.shortcuts import *
from ssht00ls.classes.config import *
from ssht00ls.classes.ssh import ssh
from ssht00ls.classes.ssync.index import index
from ssht00ls.classes.ssh.utils import execute
# settings.
INCLUDE = []# '.git', 'dist', "'*.egg-info'"
EXCLUDE = ['__pycache__', '.DS_Store']
# serialize path.
def serialize_path(path, append_last_slash=False):
if append_last_slash:
return gfp.clean(path, remove_double_slash=True, remove_last_slash=True)+"/"
else:
return gfp.clean(path)
# get the size of a dir.
def size(path, alias=None, log_level=0):
if alias == None:
return dev0s.response.success(f"Successfully retrieved the size of {path}.", {
"size":FilePath(path).size(mode="MB"),
})
else:
return execute(
command=f"""ssh {DEFAULT_SSH_OPTIONS} {alias} ' python3 /usr/local/lib/ssht00ls/lib/utils/size {path} --non-interactive ' '""",
message=f"Successfully retrieved the size of [{alias}:{path}].",
log_level=log_level,
serialize=True,)
# pull.
def pull(
# the local path.
path=None,
# the ssht00ls alias.
alias=None,
# the remote path.
remote=None,
# exlude subpaths (list) (leave None to exclude none).
exclude=[],
# include subpaths (list) (leave None to include none).
include=[],
# path is directory boolean (leave None to parse automatically).
directory=True,
empty_directory=False,
# update deleted files.
delete=False,
# forced mode.
forced=False,
# version control.
safe=False,
# accept new hosts keys.
accept_new_host_keys=True,
# checks.
checks=True,
# log level.
log_level=dev0s.defaults.options.log_level,
# get the command in str.
command=False,
):
# checks.
if exclude != None: exclude += EXCLUDE
if include != None: include += INCLUDE
if checks:
# check alias.
path = serialize_path(gfp.clean(path))
remote = serialize_path(gfp.clean(remote))
#response = aliases.check(alias)
#if not response.success: return response
# check encryption activated.
#if not ssht00ls_agent.activated:
# return dev0s.response.error(f"The {ssht00ls_agent.id} encryption requires to be activated.")
# check passphrase.
#if CONFIG["aliases"][alias]["smartcard"] in [True, "true", "True"]:
# response = ssht00ls_agent.encryption.decrypt(CONFIG["aliases"][alias]["passphrase"])
#else:
# response = ssht00ls_agent.encryption.decrypt(CONFIG["aliases"][alias]["passphrase"])
#if not response.success: return response
#passphrase = response.decrypted.decode()
# tests.
#response = agent.add(private_key=CONFIG["aliases"][alias]["private_key"], passphrase=passphrase)
#if not response["success"]: return response
response = ssh.utils.test_ssht00ls(alias=alias, accept_new_host_keys=accept_new_host_keys)
if not response.success: return response
response = ssh.utils.test_path(alias=alias, path=remote, accept_new_host_keys=accept_new_host_keys)
if not response.success: return response
# dir.
if directory == None:
response = ssh.utils.test_dir(alias=alias, path=remote, accept_new_host_keys=accept_new_host_keys)
if not response.success and "not a directory" not in response.error: return response
elif response.success:
directory = True
else: directory = False
tested = True
elif directory:
response = ssh.utils.test_dir(alias=alias, path=remote, accept_new_host_keys=accept_new_host_keys)
if not response.success: return response
tested = True
# check base.
base = FilePath(path).base(back=1)
if not Files.exists(base):
os.system(f"mkdir -p {base}")
if not Files.exists(base):
return dev0s.response.error(f"Failed to create pull base {base}.")
if log_level >= 3:
print(f"Created directory {base}.")
# fix rsync timestamp bug.
if empty_directory and directory and not Files.exists(path):
os.system(f"mkdir -p {path}")
# options.
exclude_str = Array(array=exclude).string(joiner=" --exclude ", sum_first=True)
include_str = Array(array=include).string(joiner=" --include ", sum_first=True)
delete_str = Boolean(delete).string(true="--delete", false="")
lremote = serialize_path(gfp.clean(remote), append_last_slash=directory)
lpath = serialize_path(gfp.clean(path), append_last_slash=directory)
_command_ = f"rsync -{Boolean(directory).string(true='a', false='')}zqt '{alias}:{lremote}' '{lpath}' {exclude_str} {include_str} {delete_str} --timeout={SSH_TIMEOUT}"
#_command_ = f"rsync -azqtr '{alias}:{lremote}' '{lpath}' {exclude_str} {include_str} {delete_str}"
# execute.
if command: return _command_
else:
return execute(
command=_command_,
loader=f"Pulling [{alias}:{remote}] to [{path}]",
message=f"Successfully pulled [{alias}:{remote}] to [{path}].",
log_level=log_level,
)
#
# push.
def push(
# the local path.
path=None,
# the ssht00ls alias.
alias=None,
# the remote path.
remote=None,
# exlude subpaths (list) (leave None to exclude none).
exclude=[],
# include subpaths (list) (leave None to include none).
include=[],
# path is directory boolean (leave None to parse automatically).
directory=True,
empty_directory=False,
# update deleted files.
delete=False,
# forced mode.
forced=False,
# version control.
safe=False,
# accept new hosts keys.
accept_new_host_keys=True,
# checks.
checks=True,
check_base=True,
# log level.
log_level=dev0s.defaults.options.log_level,
# get the command in str.
command=False,
):
# checks.
if exclude != None: exclude += EXCLUDE
if include != None: include += INCLUDE
if checks:
# check alias.
path = serialize_path(gfp.clean(path))
remote = serialize_path(gfp.clean(remote))
#response = aliases.check(alias)
#if not response.success: return response
# check encryption activated.
#if not ssht00ls_agent.activated:
# return dev0s.response.error(f"The {ssht00ls_agent.id} encryption requires to be activated.")
# check passphrase.
#if CONFIG["aliases"][alias]["smartcard"] in [True, "true", "True"]:
# response = encrypion.encryption.decrypt(CONFIG["aliases"][alias]["passphrase"])
#else:
# response = encrypion.encryption.decrypt(CONFIG["aliases"][alias]["passphrase"])
#if not response.success: return response
#passphrase = response.decrypted.decode()
# tests.
#response = agent.add(private_key=CONFIG["aliases"][alias]["private_key"], passphrase=passphrase)
#if not response["success"]: return response
response = ssh.utils.test_ssht00ls(alias=alias, accept_new_host_keys=accept_new_host_keys)
if not response.success: return response
#response = ssh.utils.test_path(alias=alias, path=FilePath(remote).base(), accept_new_host_keys=accept_new_host_keys)
#if not response.success: return response
# dir.
if directory == None: directory = os.path.isdir(path)
elif directory and not os.path.isdir(path):
return dev0s.response.error(f"Path {path} is not a directory.")
# check remote base.
# must be excluded from the checks == False.
base = FilePath(remote).base(back=1)
if check_base:
response = ssh.utils.test_dir(alias=alias, path=base, accept_new_host_keys=accept_new_host_keys, create=True, checks=False)
if not response.success: return response
if response.created and log_level >= 3: print(f"Created remote directory {base}.")
# options.
exclude_str = Array(array=exclude).string(joiner=" --exclude ", sum_first=True)
include_str = Array(array=include).string(joiner=" --include ", sum_first=True)
delete_str = Boolean(delete).string(true="--delete", false="")
lremote = serialize_path(gfp.clean(remote), append_last_slash=directory)
lpath = serialize_path(gfp.clean(path), append_last_slash=directory)
_command_ = f"rsync -{Boolean(directory).string(true='a', false='')}zqt '{lpath}' '{alias}:{lremote}' {exclude_str} {include_str} {delete_str} --timeout={SSH_TIMEOUT}"
#_command_ = f"rsync -azqtr --rsh=ssh '{lpath}' '{alias}:{lremote}' {exclude_str} {include_str} {delete_str}"
# execute.
if command: return _command_
else:
return execute(
command=_command_,
loader=f"Pushing [{path}] to [{alias}:{remote}].",
message=f"Successfully pushed [{path}] to [{alias}:{remote}].",
log_level=log_level,
)
#
# main.
if __name__ == "__main__":
a=1
#
| [
"vandenberghinc.contact@gmail.com"
] | vandenberghinc.contact@gmail.com |
a61c5986bdf09f0f3c5f9728359fa2757eb86c65 | a4ea4f97b25e8045ede8482bc81288c26ef0a279 | /scripts/checklexicon.py | f0fe2372401b8b1a8e7f8bc1fe7ecbf71179cecf | [] | no_license | MalinAhlberg/Fornis | 0b5409afd2f5a3631adc0cafdd9ae9040b505ac8 | 2b078e3a26003587fa4c51c0dc8cfc040500f22e | refs/heads/master | 2020-08-27T04:53:04.150176 | 2012-11-14T14:43:17 | 2012-11-14T14:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | ## check which lemgrams and sense-id that are duplicated
from lexiconer import *
def check(fil):
entries,lexicon = readIt(fil)
lex = lexicon.find('Lexicon')
lems,senseids = [],[]
for i,entry in enumerate(entries):
lemma = entry.find('Lemma')
lemgram,_ = getAtt(lemma,'lemgram')[0]
senses = entry.findall('Sense')
for sense in senses:
if sense is not None:
sid = sense.get('id'):
if sense.get('id') in senseids:
print 'sense',sense.get('id')
sense.set('id',
else:
senseids.append(sense.get('id'))
#[print sid for sid in sense.get('id') if sid in senses]
if lemgram in lems:
print 'lemgram',lemgram
else:
lems.append(lemgram)
| [
"ahlberg.malin@gmail.com"
] | ahlberg.malin@gmail.com |
3ce28df50431abb173399944f8b4ea38fb7a0ec0 | 1975118281f5a214bea5ab05ef955694fd0e82b6 | /env/Practica3/Practica3/urls.py | 78323e4bb2b75de8b16447cc0b980a5240484a87 | [] | no_license | gl3ncal3l/practica3 | ef015e1ffaa0f66bd7d17e288d817509aac2e0bd | d89c07b7bb12fe86c98b4782837a2196db475e4a | refs/heads/master | 2020-05-03T12:36:23.938522 | 2019-04-26T00:22:40 | 2019-04-26T00:22:40 | 178,630,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | """Practica3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path, include
from django.contrib import admin
from Practica3 import settings
urlpatterns = [
#re_path(r'^admin/', include(admin.site.urls)),
re_path(r'^', include(('main.urls', 'main'), namespace='main')),
]
| [
"gl3ncal3l@gmail.com"
] | gl3ncal3l@gmail.com |
2c993aff9729e90d6f7afc4a8f63410904ee47cf | c054bbb1faafd561711cc31c225195e324a632c0 | /106. Construct Binary Tree from Inorder and Postorder Traversal.py | 62d1447d6aaf747771f87874ab29d1d9f2df8c4a | [] | no_license | sakusss/leetcode | d2be75ee611015b86bdd627f7bfef906a10701d0 | 070636a44d1b03de340ecade11f9af4ad797f9e7 | refs/heads/master | 2021-04-15T09:37:46.770322 | 2018-07-18T12:29:41 | 2018-07-18T12:29:41 | 126,825,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | class Solution:
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if len(inorder) == 0:return None
if len(inorder) == 1: return TreeNode(inorder[0])
root = TreeNode(postorder[len(postorder)-1])
index = inorder.index(postorder[len(postorder)-1])
root.left =self.buildTree(inorder[:index],postorder[:index])
root.right = self.buildTree(inorder[index+1:len(inorder) ],postorder[index:len(postorder)-1])
return root
题意:根据二叉树的中序遍历和后序遍历恢复二叉树。
解题思路:看到树首先想到要用递归来解题。以这道题为例:如果一颗二叉树为{1,2,3,4,5,6,7},则中序遍历为{4,2,5,1,6,3,7},后序遍历为{4,5,2,6,7,3,1},我们可以反推回去。由于后序遍历的最后一个节点就是树的根。也就是root=1,然后我们在中序遍历中搜索1,可以看到中序遍历的第四个数是1,也就是root。根据中序遍历的定义,1左边的数{4,2,5}就是左子树的中序遍历,1右边的数{6,3,7}就是右子树的中序遍历。而对于后序遍历来讲,一定是先后序遍历完左子树,再后序遍历完右子树,最后遍历根。于是可以推出:{4,5,2}就是左子树的后序遍历,{6,3,7}就是右子树的后序遍历。而我们已经知道{4,2,5}就是左子树的中序遍历,{6,3,7}就是右子树的中序遍历。再进行递归就可以解决问题了。
,自然时间复杂度和空间复杂度也还是O(n)。代码如下:
这道题和Construct Binary Tree from Preorder and Inorder Traversal是树中难度比较大的题目了,有朋友可能会想根据先序遍历和后序遍历能不能重新构造出树来,答案是否定的。只有中序便利可以根据根的位置切开左右子树,其他两种遍历都不能做到,其实先序遍历和后序遍历是不能唯一确定一棵树的,会有歧义发生,也就是两棵不同的树可以有相同的先序遍历和后序遍历,有兴趣的朋友可以试试举出这种例子哈。
| [
"noreply@github.com"
] | sakusss.noreply@github.com |
3698b557bc7bea69e48a8d73d419c8616f8baa73 | c5f1b93ea560b38388f462965a026a2d1ba0ab0e | /120codes/sequential_search.py | 9fc20972d01220105053ced9c7454e98b82cee69 | [] | no_license | mdiksha/PYTHON | 76ab330bd953d3c78978f0600c53a729fb902246 | bd8317e3bf715695ec47273c22513475f17b4b0e | refs/heads/main | 2023-07-02T04:25:15.263673 | 2021-08-02T13:48:59 | 2021-08-02T13:48:59 | 390,958,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def sequential_search(list_, n):
found = False
print("Number not present")
for i in list_:
if i == n:
print("Number is present")
found=True
break
return found
numbers = list(range(0, 50))
print(sequential_search(numbers, 100))
| [
"mdiksha0495@gmail.com"
] | mdiksha0495@gmail.com |
5424576c5087ceb1ed0020f93159ed8237a68b19 | 8f0449cf971a64b511bec6db9e8b6de286431454 | /back_dev/wsgi.py | 7781c1c8517c3e99be9a24382aa53ace8bfef399 | [] | no_license | SidhuAB/back_end_driven | d4b7a658f835c9a0b9795356eb9dd74dd392e44e | 5353e3b06af5dd8479b8860afef0de4cc158a5b2 | refs/heads/master | 2022-11-29T11:24:55.589660 | 2020-08-14T09:03:15 | 2020-08-14T09:03:15 | 287,211,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for back_dev project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'back_dev.settings')
application = get_wsgi_application()
| [
"abhishekbakolia1998@gmail.com"
] | abhishekbakolia1998@gmail.com |
579313e35659c369b388067a4ddda3075f422537 | 03328462771b82021537396e233fe1155901b48e | /plugin.video.movies.thietkeweb30s.org/resources/lib/utils/link_parser.py | 438dddbeee26f0c73fd22b0acc293514072d648f | [] | no_license | quangthylove/thietkeweb30s.org | 154007f59af92d7571dacca9b9abbd08f518251d | f31c8bc61761ed43a846ca4a32ea10d9407ca51e | refs/heads/master | 2023-05-26T07:54:09.007067 | 2023-05-24T15:30:43 | 2023-05-24T15:30:43 | 58,783,132 | 0 | 0 | null | 2017-02-23T09:02:14 | 2016-05-14T01:21:10 | null | UTF-8 | Python | false | false | 3,615 | py | # -*- coding: utf-8 -*-
import re
import HTMLParser
import json
import utils.xbmc_helper as helper
from utils.mozie_request import Request
from utils.fshare import FShare
from utils.pastebin import PasteBin
def rsl(s):
s = str(s).replace('HDG', '') \
.replace('HD', '1080') \
.replace('SD', '640') \
.replace('large', '640') \
.replace('lowest', '240') \
.replace('low', '480') \
.replace('hd', '720') \
.replace('fullhd', '1080') \
.replace('Auto', '640') \
.replace('medium', '240') \
.replace('mobile', '240') \
.replace('AUTO', '640')
result = re.search('(\d+)', s)
if result:
return result.group(1)
else:
return '240'
class LinkParser:
def __init__(self, url):
self.url = url
def get_link(self):
print("Find link source of %s" % self.url)
if re.search('ok.ru', self.url):
self.url.replace('?autoplay=1', '')
return self.get_link_ok()
if re.search('openload.co', self.url):
return self.get_link_openload()
if re.search('fshare.vn', self.url):
return self.get_link_fshare()
if re.search('dailymotion.com', self.url):
return self.get_link_dailymotion()
if self.url.endswith('m3u8') or re.search('hastebin', self.url) or re.search('dpaste', self.url):
return self.get_m3u8()
if re.search('fptplay.net', self.url):
return self.get_fptplay()
return self.url, 'unknow'
def get_link_ok(self):
response = Request().get(self.url)
m = re.search('data-options="(.+?)"', response)
h = HTMLParser.HTMLParser()
s = m.group(1)
s = h.unescape(s)
s = json.loads(s)
s = json.loads(s['flashvars']['metadata'])
items = [(i['url'], rsl(i['name'])) for i in s['videos']]
items = sorted(items, key=lambda elem: int(elem[1]), reverse=True)
return items[0]
def get_link_openload(self):
try:
import resolveurl
stream_url = resolveurl.resolve(self.url)
return stream_url, '720'
except:
return None
def get_link_dailymotion(self):
try:
import resolveurl
stream_url = resolveurl.resolve(self.url)
return stream_url, '720'
except:
return None
def get_link_fshare(self):
if helper.getSetting('fshare.enable'):
return FShare(
self.url,
helper.getSetting('fshare.username'),
helper.getSetting('fshare.password')
).get_link(), '1080'
else:
return FShare(self.url).get_link(), '1080'
def get_m3u8(self):
return self.url, 'hls'
def get_fptplay(self):
base_url = self.url.rpartition('/')[0]
response = Request().get(self.url)
matches = re.findall('(chunklist_.*)', response)
for m in matches:
stream_url = base_url + '/' + m
response = response.replace(m, self.__get_fptplay_stream(stream_url, base_url))
url = PasteBin().dpaste(response, name=self.url, expire=60)
return url, '1080'
def __get_fptplay_stream(self, url, base_url):
response = Request().get(url)
matches = re.findall('(media_.*)', response)
for m in matches:
stream_url = base_url + '/' + m
response = response.replace(m, stream_url)
url = PasteBin().dpaste(response, name=url, expire=60)
return url
| [
"noreply@github.com"
] | quangthylove.noreply@github.com |
fa3e535aaacaa3dafcb031f0333a8778da9d2e30 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/1531.py | 9246536e333923658ae3a6c8448940cb36f0d831 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | def check(x, mn, mx):
global pairs
pcheck = []
x = str(x)
if len(x) == 1:
pass
if len(x) == 2:
if x[0] != x[1]:
first = x[::-1]
if int(first) > int(x):
pcheck.append(int(first))
if len(x) == 3:
second = x[1:]+x[0]
third = x[-1]+x[0:-1]
if second != x and second[0] != '0' and int(second) > int(x):
pcheck.append(int(second))
if third != x and third[0] != '0' and int(third) > int(x):
pcheck.append(int(third))
for item in pcheck:
if item >= mn and item <= mx:
pairs += 1
def recycle(numbers):
global pairs
pairs = 0
parameters = numbers.split()
for x in range(int(parameters[0]), int(parameters[1])+1):
check(x,int(parameters[0]),int(parameters[1]))
testcases.append(pairs)
testcases = []
pairs = 0
f = file('C-small-attempt2.in', 'r')
for line in f:
if len(line.split()) > 1:
recycle(line)
f.close()
f1 = file('outputC.txt', 'w')
for x in range(1, len(testcases)+1):
f1.write("Case #"+str(x)+": "+str(testcases[x-1])+'\n')
f1.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e71ad5b0a6c1ae9d05f8cb8f5607e2b6ff54691a | 8a9e7744d7a3de3514cc45ee493572c4198c1a80 | /MatrixMaps.py | 0c51bd2ca6108b84d58c8a088c6c5d54d57a1239 | [] | no_license | cylab/APCequencer | 5f8df81ff5e27ddb1036601c53cbdc4775848fe1 | c57bb20558f452cca5dcdbc46ccc83ea670dc5f9 | refs/heads/master | 2021-01-12T07:55:25.410735 | 2016-12-31T10:44:05 | 2016-12-31T10:44:05 | 77,049,304 | 3 | 2 | null | 2016-12-21T12:22:59 | 2016-12-21T12:22:59 | null | UTF-8 | Python | false | false | 679 | py | FEEDBACK_CHANNELS = range(5, 15)
PAD_FEEDBACK_CHANNEL = FEEDBACK_CHANNELS[-1]
NON_FEEDBACK_CHANNEL = 15
PAD_TRANSLATIONS = (
(0, 0, 24, PAD_FEEDBACK_CHANNEL),
(1, 0, 25, PAD_FEEDBACK_CHANNEL),
(2, 0, 26, PAD_FEEDBACK_CHANNEL),
(3, 0, 27, PAD_FEEDBACK_CHANNEL),
(0, 1, 16, PAD_FEEDBACK_CHANNEL),
(1, 1, 17, PAD_FEEDBACK_CHANNEL),
(2, 1, 18, PAD_FEEDBACK_CHANNEL),
(3, 1, 19, PAD_FEEDBACK_CHANNEL),
(0, 2, 8, PAD_FEEDBACK_CHANNEL),
(1, 2, 9, PAD_FEEDBACK_CHANNEL),
(2, 2, 10, PAD_FEEDBACK_CHANNEL),
(3, 2, 11, PAD_FEEDBACK_CHANNEL),
(0, 3, 0, PAD_FEEDBACK_CHANNEL),
(1, 3, 1, PAD_FEEDBACK_CHANNEL),
(2, 3, 2, PAD_FEEDBACK_CHANNEL),
(3, 3, 3, PAD_FEEDBACK_CHANNEL))
| [
"will@willmarshall.me"
] | will@willmarshall.me |
30488140bbafd8c992e8f0a3ebaaf7280fad3cd7 | 6f5ffb519ea0e346b5b92b916a88e414b4566dfd | /Python/Q12.py | f15dcd8790497ad11c6e2e3d1c28dce78ec53a50 | [
"MIT"
] | permissive | ThoAppelsin/project-euler | 453b880cc8a6b1affcab5e7a43920fe70a323176 | ee7fcac9ec725c9eddfbccd60b47de65987d22b4 | refs/heads/master | 2021-05-11T16:08:35.852063 | 2018-01-17T13:05:24 | 2018-01-17T13:05:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from functools import reduce
factordicts = {1 : {}}
def factordict(num):
if num not in factordicts:
def factordict2(factor):
if factor ** 2 > num:
return {num : 1}
if num % factor == 0:
subdict = factordict(num // factor).copy()
if factor in subdict:
subdict[factor] += 1
else:
subdict[factor] = 1
return subdict
else:
return factordict2(factor + 1)
factordicts[num] = factordict2(2)
return factordicts[num]
def product(nums):
pr = 1
for x in nums:
pr *= x
return pr
def divcount(fdict):
return product(count + 1 for f, count in fdict.items())
wanted = 500
triindex = 1
while True:
tridict = factordict(triindex + 1).copy()
for factor, count in factordict(triindex).items():
if factor in tridict:
tridict[factor] += count
else:
tridict[factor] = count
tridict[2] -= 1
if divcount(tridict) > wanted:
ans = triindex * (triindex + 1) // 2
break
triindex += 1
print(ans) | [
"utkangezer@gmail.com"
] | utkangezer@gmail.com |
58f06f1dde50772714fdb17fd1ad7f34f000a3d2 | f21d54d57f931da26ea21fe0dc8060d213fd94ad | /example/custom_image/my_library.py | 9e4b2268435e036f3ff157ac51eec6c135cb7d3f | [
"MIT"
] | permissive | wwoods/dask-ssh-docker | 0fb59c330aee66fbee915d588ca177ae20a1de96 | 05237931bab59dd7d16d73fc13dcccb92b1e25ed | refs/heads/master | 2020-09-09T19:46:34.508396 | 2019-11-27T18:45:15 | 2019-11-27T18:45:15 | 221,548,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | def func(x):
return [i+1 for i in x]
| [
"waltw@galois.com"
] | waltw@galois.com |
9bd894fbe23ed6809095558802393c5e1ce40235 | be497e9cbfc18059af0b852709d4f026bf65b253 | /cover_page/forms.py | 9c351080d6d42a241a75563cb2fb92ab4379c580 | [
"MIT"
] | permissive | kenton1818/my_web | d776a0ceea2ded91fa235f84bceb8e32053734dd | 96f3cce2fd8d89b9e39e54af1cc789cd17224577 | refs/heads/master | 2020-04-28T01:04:05.297102 | 2019-03-13T12:33:40 | 2019-03-13T12:33:40 | 174,840,500 | 0 | 0 | null | 2019-03-10T15:29:27 | 2019-03-10T15:12:13 | null | UTF-8 | Python | false | false | 614 | py | from django import forms
class LoginForm(forms.Form):
usname = forms.CharField(label = 'username', required = True)
# email = forms.EmailField(label = 'email',required=True)
psw = forms.CharField(label = 'password', widget = forms.PasswordInput)
''' def clean(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password']
user = authenticate(username = username , password = password)
if user is None:
raise forms.ValidationError('username or password not correct')
else:
self.cleaned_data['user'] = user''' | [
"31765235+kenton18@users.noreply.github.com"
] | 31765235+kenton18@users.noreply.github.com |
87732f2f62d632004a2020e6b6bc2aeeab6d4e49 | 21559592f6393593f1df9bb40075137fa94cdb75 | /train.py | ae62a799d007cd0ab4a8dd147245f4103d39add4 | [
"MIT"
] | permissive | rskmoi/landmark-retrieval-2020-with-pytorch | bf8907ffaa55ff418c6d2b42cbd98a1782b9dadb | 41917b1f588b5ad396cb1095867a0f042c611675 | refs/heads/master | 2022-12-08T12:45:08.680934 | 2020-09-08T05:17:09 | 2020-09-08T05:17:09 | 293,096,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | from pathlib import Path
import hydra
import torch
from omegaconf import DictConfig
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset.landmark_dataset import LandmarkDataset
from metric.loss import ArcFaceLoss
from model.model import arcface_model
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def train_one_epoch(loader: DataLoader,
model: torch.nn.Module,
optimizer: Optimizer,
criterion: torch.nn.Module,
epoch: int,
out: str):
"""
Train 1 epoch.
"""
model.train()
pbar = tqdm(loader, total=len(loader))
for step, sample in enumerate(pbar):
images, labels = sample['image'].type(torch.FloatTensor).to(DEVICE), \
sample['label'].type(torch.LongTensor).to(DEVICE)
optimizer.zero_grad()
cosine = model(images)
loss = criterion(cosine, labels)
loss.backward()
optimizer.step()
pbar.set_postfix(loss=loss.data.cpu().numpy(), epoch=epoch)
if (step + 1) % 5000 == 0:
torch.save(model.state_dict(), Path(out) / f"{epoch}epoch_{step}_step.pth")
torch.save(model.state_dict(), Path(out) / f"{epoch}epoch_final_step.pth")
@hydra.main(config_path="config/config.yaml")
def train(cfg: DictConfig):
"""
Entry point of training.
:param cfg: Config of training, parsed by hydra.
:return: None
"""
out_dir = Path(cfg.path.output)
if not out_dir.exists():
out_dir.mkdir(parents=True)
dataset = LandmarkDataset(batch_size=cfg.train.batch_size, mode="train")
model: torch.nn.Module = arcface_model(num_classes=dataset.dataset.num_classes,
backbone_model_name=cfg.model.name,
head_name=cfg.model.head,
extract_feature=False)
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.train.lr)
criterion = ArcFaceLoss()
for epoch in range(cfg.train.epochs):
train_one_epoch(loader=dataset.get_loader(),
model=model,
optimizer=optimizer,
criterion=criterion,
epoch=epoch,
out=out_dir)
if __name__ == '__main__':
train()
| [
"zn5ps5@gmail.com"
] | zn5ps5@gmail.com |
b4faa39e94757cc304633ff3ee828649bbc42322 | 8289fb4b9f2242a2f0a1b92b391058f421788682 | /Ros/Kumara_ws/kumara_xprize/kumara_xprize/script/serial/kumara_head_serial.py | 9750666c54eef676daced8fce1d47a92b2529b64 | [] | no_license | BawornsakS/XPrize | 5fedf7c7f0838b1da5c0749ef846ed7b2135356d | e6e900c6c63f0cef5cb3717a5b6724ec9ae00b69 | refs/heads/master | 2021-07-20T06:28:09.666385 | 2020-09-04T06:39:13 | 2020-09-04T06:39:13 | 211,299,370 | 3 | 3 | null | 2019-11-13T08:07:18 | 2019-09-27T10:56:38 | Makefile | UTF-8 | Python | false | false | 2,485 | py | #!/usr/bin/env python
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import rospy
from rosserial_python import SerialClient, RosSerialServer
from serial import SerialException
from time import sleep
import multiprocessing
import sys
if __name__=="__main__":
rospy.init_node("kumara_head_serial")
rospy.loginfo("ROS Serial Python Node")
port_name = rospy.get_param('~port','/dev/ttyACM0')
baud = int(rospy.get_param('~baud','512000'))
# for systems where pyserial yields errors in the fcntl.ioctl(self.fd, TIOCMBIS, \
# TIOCM_DTR_str) line, which causes an IOError, when using simulated port
fix_pyserial_for_test = rospy.get_param('~fix_pyserial_for_test', False)
# TODO: should these really be global?
tcp_portnum = int(rospy.get_param('/rosserial_embeddedlinux/tcp_port', '11411'))
fork_server = rospy.get_param('/rosserial_embeddedlinux/fork_server', False)
# TODO: do we really want command line params in addition to parameter server params?
sys.argv = rospy.myargv(argv=sys.argv)
if len(sys.argv) >= 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
while not rospy.is_shutdown():
rospy.loginfo("Connecting to %s at %d baud" % (port_name,baud) )
try:
client = SerialClient(port_name, baud, fix_pyserial_for_test=fix_pyserial_for_test)
client.run()
except KeyboardInterrupt:
break
except SerialException:
sleep(1.0)
continue
except OSError:
sleep(1.0)
continue
except:
rospy.logwarn("Unexpected Error.%s", sys.exc_info()[0])
client.port.close()
sleep(1.0)
continue
| [
"44831071+aminballoon@users.noreply.github.com"
] | 44831071+aminballoon@users.noreply.github.com |
f14a7289069f811ad09e62b6e0e0941c562b28dd | b69a800af16afef6514698bff280866e0ef69065 | /Stats.py | be99111092fc0a869abdc7f87e218a823854f7f4 | [] | no_license | MatteoLusso/Python-Analizzatore-Campioni | ad70bddf720e2418f8cd866905b22d9f92bd4269 | 8e084c23309b0f13075ea9477f085039e1fc3678 | refs/heads/master | 2023-07-24T10:02:18.201762 | 2021-09-04T12:40:02 | 2021-09-04T12:40:02 | 390,394,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,815 | py | import statistics
import Graphs as grp
import KDE as kde
import Reader as rdr
import pickle as pk
import numpy as np
import Serializer as sr
#import MultiKDE as mkde
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
class Set():
def __init__( self, setName = "None", filesNames = [], filesPath = "", savesPath = "", separator = "\n", real = None, distances = None, alphas = None, sameDist = None, sameAlpha = None ):
self.setName = setName
self.names = filesNames
self.path = filesPath
self.savesPath = savesPath
self.separator = separator
self.real = real
self.dist = distances
self.alpha = alphas
self.sameDist = sameDist
self.sameAlpha = sameAlpha
self.samplesData = []
self.samplesAnalysis = []
self.loadGraphs = True
self.sampleGraphs = []
self.setChi2 = []
self.setErr = []
self.setErrPer = []
self.setErrAr = []
self.setErrRelative = []
#--------------------------------------------------------------------------------------#
def updateSamplesStatsWithProb( self ):
for i in range ( len( self.samplesAnalysis ) ):
sampleKDEX = self.sampleGraphs[i].kdeX
samplePDFY = self.sampleGraphs[i].pdfY
self.samplesAnalysis[i].ProbMean = linearInterpolation( self.samplesAnalysis[i].mean, sampleKDEX, samplePDFY )
self.samplesAnalysis[i].ProbMeanAr = linearInterpolation( self.samplesAnalysis[i].meanAr, sampleKDEX, samplePDFY )
if self.samplesAnalysis[i].real is not None:
self.samplesAnalysis[i].ProbReal = linearInterpolation( self.samplesAnalysis[i].real, sampleKDEX, samplePDFY )
self.samplesAnalysis[i].saveStats()
#--------------------------------------------------------------------------------------#
def areMeansUnderstimated( self, near ):
temp = []
for sample in self.samplesAnalysis:
if sample.mean < self.real:
temp.append(True)
else:
temp.append(False)
return temp
#--------------------------------------------------------------------------------------#
def areMeansNearReal( self, near ):
temp = []
for sample in self.samplesAnalysis:
if sample.mean >= ( self.real - near ) and sample.mean <= ( self.real + near ):
temp.append(True)
else:
temp.append(False)
return temp
#--------------------------------------------------------------------------------------#
def isRealInRange( self ):
temp = []
for sample in self.samplesAnalysis:
if self.real >= sample.min and self.real <= sample.max:
temp.append(True)
else:
temp.append(False)
return temp
#--------------------------------------------------------------------------------------#
def printSetAnalysis( self, outputFolder, near = 0.0, show = False ):
isInRange = self.isRealInRange()
areNear = self.areMeansNearReal( near )
areUnder = self.areMeansUnderstimated( near )
perInRange = 0
perNear = 0
perUnder = 0
inRangeString = []
for varBool in isInRange:
if varBool is True:
perInRange += 1
inRangeString.append(" Sì ")
else:
inRangeString.append(" No ")
nearString = []
for varBool in areNear:
if varBool is True:
perNear += 1
nearString.append(" Sì ")
else:
nearString.append(" No ")
underString = []
for varBool in areUnder:
if varBool is True:
perUnder += 1
underString.append(" Sì ")
else:
underString.append(" No ")
txt = []
#txt.append( " ╔═══════════════════════════════════════════════════════════════════════╗ " )
#txt.append( " ║ SET " + str( self.setName ) + " - Campione ║ " )
#txt.append( " ╠═══════╦═══════╦═══════╦═══════╦═══════╦═══════╦═══════╦═══════╦═══════╣ " )
#txt.append( " ║ 1 ║ 2 ║ 3 ║ 4 ║ 5 ║ 6 ║ 7 ║ 8 ║ 9 ║ " )
#txt.append( " ╠═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╣ " )
#txt.append( " ║" + str(inRangeString[0]) + "║" + str(inRangeString[1]) + "║" + str(inRangeString[2]) + "║" + str(inRangeString[3]) + "║" + str(inRangeString[4]) + "║" + str(inRangeString[5]) + "║" + str(inRangeString[6]) + "║" + str(inRangeString[7]) + "║" + str(inRangeString[8]) + "║ " )
#txt.append( " ╠═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╣ " )
#txt.append( " ║" + str(nearString[0]) + "║" + str(nearString[1]) + "║" + str(nearString[2]) + "║" + str(nearString[3]) + "║" + str(nearString[4]) + "║" + str(nearString[5]) + "║" + str(nearString[6]) + "║" + str(nearString[7]) + "║" + str(nearString[8]) + "║ " )
#txt.append( " ╠═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╬═══════╣ " )
#txt.append( " ║" + str(overString[0]) + "║" + str(overString[1]) + "║" + str(overString[2]) + "║" + str(overString[3]) + "║" + str(overString[4]) + "║" + str(overString[5]) + "║" + str(overString[6]) + "║" + str(overString[7]) + "║" + str(overString[8]) + "║ " )
#txt.append( " ╚═══════╩═══════╩═══════╩═══════╩═══════╩═══════╩═══════╩═══════╩═══════╝ " )
txt.append( " | SET " + str( self.setName ) + " - Campione | " )
txt.append( " ------------------------------------------------------------------------- " )
txt.append( " | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | " )
txt.append( " |-------|-------|-------|-------|-------|-------|-------|-------|-------| " )
txt.append( " |" + str(inRangeString[0]) + "|" + str(inRangeString[1]) + "|" + str(inRangeString[2]) + "|" + str(inRangeString[3]) + "|" + str(inRangeString[4]) + "|" + str(inRangeString[5]) + "|" + str(inRangeString[6]) + "|" + str(inRangeString[7]) + "|" + str(inRangeString[8]) + "| Il valore reale è compreso tra la minima e massima misurazione?" )
txt.append( " |-------|-------|-------|-------|-------|-------|-------|-------|-------| " )
txt.append( " |" + str(nearString[0]) + "|" + str(nearString[1]) + "|" + str(nearString[2]) + "|" + str(nearString[3]) + "|" + str(nearString[4]) + "|" + str(nearString[5]) + "|" + str(nearString[6]) + "|" + str(nearString[7]) + "|" + str(nearString[8]) + "| La media è vicina al valore reale?" )
txt.append( " |-------|-------|-------|-------|-------|-------|-------|-------|-------| " )
txt.append( " |" + str(underString[0]) + "|" + str(underString[1]) + "|" + str(underString[2]) + "|" + str(underString[3]) + "|" + str(underString[4]) + "|" + str(underString[5]) + "|" + str(underString[6]) + "|" + str(underString[7]) + "|" + str(underString[8]) + "| La media è inferiore al valore reale?" )
txt.append( "\n")
txt.append( "Risultato:")
txt.append( " - Il {}""%"" delle volte il valore reale di questo set ({}m) è compreso tra la minima e massima misurazione dei singoli campioni.".format( round( ( perInRange / 9 ) * 100, 2 ), self.real ) )
txt.append( " - Il {}""%"" delle medie campionarie del set hanno un errore assoluto in metri minore di {}m.".format( round( ( perNear / 9 ) * 100, 2 ), near ) )
txt.append( " - Il {}""%"" delle medie campionarie del set sono state sottostimata rispetto al valore valore reale di questo set ({}m).".format( round( ( perUnder / 9 ) * 100, 2 ), self.real ) )
outputFile = open( outputFolder + "\\SET_" + self.setName + "_STATS.txt", "w" )
for line in txt:
if show is True:
print( line )
outputFile.write(line + "\n")
outputFile.close()
#--------------------------------------------------------------------------------------#
def generateSamplesGraphs( self, histStep = 0.001, showHist = True, showAsciiHist = False, loadHist = True, kdeBinsNumber = 50, kdeCoeffBw = 0, kdeShow = True, loadKDE = True, outputImagesFolder = ""):
if self.samplesData is not None and self.samplesAnalysis is not None and len( self.samplesAnalysis ) == len( self.samplesData ):
for i in range( len( self.samplesData ) ):
self.sampleGraphs.append(grp.GraphsInfo(self.names[i], self.samplesData[i].dataFinal, histStep, loadHist, kdeBinsNumber, kdeCoeffBw, loadKDE, self.savesPath))
self.sampleGraphs[i].generateHistogram()
self.sampleGraphs[i].generateKDE()
self.sampleGraphs[i].displayHistogram(self.real, self.samplesAnalysis[i].mean, self.samplesAnalysis[i].meanAr, "default", outputImagesFolder, showHist)
self.sampleGraphs[i].displayKDE("default", outputImagesFolder, kdeShow)
self.sampleGraphs[i].displayKDE("pdf", outputImagesFolder, kdeShow)
self.updateSamplesStatsWithProb()
#--------------------------------------------------------------------------------------#
def generateSetStats( self, load = True ):
i = 0
if self.names is not None:
print( "\n*** Generazione dati statistici set " + str( self.setName ) + " ***" )
for name in self.names:
self.samplesData.append( rdr.TXT( name, self.path, self.separator ) )
#self.samplesData[i].printData()
#if load is True and sr.loadVariable( name + "_STATS_", True ) is not None:
#self.samplesAnalysis.append( sr.loadVariable( name + "_STATS_" ) )
###da cancellare
#toSave = [self.samplesAnalysis[i].n, self.samplesAnalysis[i].median, self.samplesAnalysis[i].mode, self.samplesAnalysis[i].mean, self.samplesAnalysis[i].meanAr, self.samplesAnalysis[i].var, self.samplesAnalysis[i].varAr, self.samplesAnalysis[i].std, self.samplesAnalysis[i].stdAr, self.samplesAnalysis[i].range, self.samplesAnalysis[i].coeff, self.samplesAnalysis[i].chi2, self.samplesAnalysis[i].ProbMean, self.samplesAnalysis[i].ProbMeanAr, self.samplesAnalysis[i].varReal, self.samplesAnalysis[i].stdReal, self.samplesAnalysis[i].ProbReal, self.samplesAnalysis[i].errMean, self.samplesAnalysis[i].errMeanAr]
#sr.saveVariable(self.names[i] + "_ALL_STATS_", toSave)
#else:
#print( "\n - Campione " + str( self.names[i] ) + " | Calcolo statistiche in corso\n" )
self.samplesAnalysis.append( Sample( self.samplesData[ i ], self.savesPath, self.real) )
self.samplesAnalysis[i].calculateStatistics()
#sr.saveVariable( name + "_STATS_", self.samplesAnalysis[i] )
self.setChi2.append( self.samplesAnalysis[i].chi2 )
self.setErr.append( self.samplesAnalysis[i].errMean )
self.setErrAr.append( self.samplesAnalysis[i].errMeanAr )
self.setErrRelative.append( self.samplesAnalysis[i].errMeanRelative )
#self.setErrPer.append( equivalentRange(x, rangeA, rangeB))
i += 1
for err in self.setErr:
self.setErrPer.append( equivalentRange( self.real + np.abs( err ), [self.real, self.real + max( np.abs( self.setErr ) )], [0, 100] ))
print( self.setErrPer )
#--------------------------------------------------------------------------------------#
def generateSetGraphs( self, show = True, outputFolder = "" ):
print( "*** Generazione grafici set " + str( self.setName ) + " ***" )
grp.displayErrDist( setX = self.dist, setY = self.setErr, name = self.setName, show = show, outputFolder = outputFolder, same = self.sameDist )
grp.displayErrArDist( setX = self.dist, setY = self.setErrAr, name = self.setName, show = show, outputFolder = outputFolder, same = self.sameDist )
grp.displayErrAlpha( setX = self.alpha, setY = self.setErr, name = self.setName, show = show, outputFolder = outputFolder, same = self.sameAlpha )
grp.displayErrArAlpha( setX = self.alpha, setY = self.setErrAr, name = self.setName, show = show, outputFolder = outputFolder, same = self.sameAlpha )
grp.displayErrChi2( setX = self.setChi2, setY = self.setErr, name = self.setName, show = show, outputFolder = outputFolder )
grp.displayErrArChi2( setX = self.setChi2, setY = self.setErrAr, name = self.setName, show = show, outputFolder = outputFolder )
grp.displayChi2Dist( setX = self.dist, setY = self.setChi2, name = self.setName, show = show, outputFolder = outputFolder )
grp.displayChi2Alpha( setX = self.alpha, setY = self.setChi2, name = self.setName, show = show, outputFolder = outputFolder )
grp.display3DErr( setX = self.alpha, setY = self.dist, setZ = self.setErr, setName = "SET_" + self.setName, show = show, outputFolder = outputFolder, zLabel = "Errore media campionaria (abs) [m]", fileDescription = "[Errore 3D]" )
grp.display3DErr( setX = self.alpha, setY = self.dist, setZ = self.setErrAr, setName = "SET_" + self.setName, show = show, outputFolder = outputFolder, zLabel = "Errore media armonica (abs) [m]", fileDescription = "[Errore armonica 3D]" )
grp.display3DErr( setX = self.alpha, setY = self.dist, setZ = self.setChi2, setName = "SET_" + self.setName, show = show, outputFolder = outputFolder, zLabel = "Risultato test chi2", fileDescription = "[Chi2 3D]", limXPer = 0.1, limYPer = 0.1, limZPer = 0.01)
grp.display3DErr( setX = self.alpha, setY = self.dist, setZ = self.setErrPer, setName = "SET_" + self.setName, show = show, outputFolder = outputFolder, zLabel = "Errore percentuale media campionaria rispetto massimo valore modulo errore", fileDescription = "[Errore percentuale 3D]" )
grp.display3DErr( setX = self.alpha, setY = self.dist, setZ = [errRel * 100 for errRel in self.setErrRelative], setName = "SET_" + self.setName, show = show, outputFolder = outputFolder, zLabel = "Errore relativo media campionaria rispetto all'altezza " + str( round( self.real, 3 ) ) + " m [%]", fileDescription = "[Errore relativo 3D]" )
#--------------------------------------------------------------------------------------#
def printAllSet( self, outputPath, show = True ):
if self.samplesAnalysis is not None:
for analysis in self.samplesAnalysis:
analysis.printAnalysis( outputPath, show )
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
class Sample(): # Classe che si occupa dell'analisi dei dati
def chi2Gauss ( self ):
obs = [ 0, 0, 0, 0 ]
prob = [ 0.16, 0.34, 0.34, 0.16 ]
for mis in self.data:
if mis < ( self.mean - self.std ):
obs[ 0 ] = obs[ 0 ] + 1
elif ( self.mean - self.std ) <= mis and mis < self.mean:
obs[ 1 ] = obs[ 1 ] + 1
elif self.mean <= mis and mis < ( self.mean + self.std ):
obs[ 2 ] = obs[ 2 ] + 1
else:
obs[ 3 ] = obs[ 3 ] + 1
chi2 = 0.0
for k in range(4):
chi2 += pow( obs[k] - ( self.n * prob[ k ] ), 2) / ( self.n * prob[ k ] )
return chi2
#--------------------------------------------------------------------------------------#
def calculateStatistics( self, load = True ):
if self.data is not None:
if load is True and sr.loadVariable( self.name + "_STATS_", True ) is not None:
toLoad = sr.loadVariable( self.name + "_STATS_")
self.n = toLoad[0]
self.median = toLoad[1]
self.mode = toLoad[2]
self.mean = toLoad[3]
self.meanAr = toLoad[4]
self.var = toLoad[5]
self.varAr = toLoad[6]
self.std = toLoad[7]
self.stdAr = toLoad[8]
self.range = toLoad[9]
self.coeff = toLoad[10]
self.chi2 = toLoad[11]
self.ProbMean = toLoad[12]
self.ProbMeanAr = toLoad[13]
self.varReal = toLoad[14]
self.stdReal = toLoad[15]
self.ProbReal = toLoad[16]
self.errMean = toLoad[17]
self.errMeanAr = toLoad[18]
self.errMeanRelative = toLoad[19]
self.min = toLoad[20]
self.max = toLoad[21]
else:
self.n = len( self.data )
self.median = statistics.median( self.data )
self.mode = statistics.mode(self.data)
self.mean = momentOrR( self.data, 1 )
self.meanAr = armonicMean( self.data )
self.var = momentCenR( self.data, self.mean, 2 )
self.varAr = momentCenR( self.data, self.meanAr, 2 )
self.std = np.sqrt( self.var )
self.stdAr = np.sqrt( self.varAr )
self.min = min( self.data )
self.max = max( self.data )
self.range = self.max - self.min
self.coeff = self.std / self.mean
self.chi2 = self.chi2Gauss()
self.ProbMean = None
self.ProbMeanAr = None
if self.real is not None:
self.varReal = momentCenR( self.data, self.real, 2 )
self.stdReal = np.sqrt( self.varReal )
self.ProbReal = None
self.errMean = self.mean - self.real
self.errMeanAr = self.meanAr - self.real
self.errMeanRelative = abs( self.errMean ) / self.real
#self.saveStats()
#--------------------------------------------------------------------------------------#
def printAnalysis( self, outputFolder, show = True ):
txt = []
txt.append( " Campione | " + str( self.name ) )
txt.append( " Misurazioni | " + str( self.n ) )
txt.append( "\n" )
txt.append( " Moda (camp.) | " + str( self.mode ) )
txt.append( " Mediana (camp.) | " + str( self.median ) )
txt.append( " Media (camp.) | " + str( self.mean ) )
txt.append( " PDF Media (camp.) | {:.2f}".format( self.ProbMean ) + "%")
txt.append( " Media armonica (camp.) | " + str( self.meanAr ) )
txt.append( " PDF Media armonica (camp.) | {:.2f}".format( self.ProbMeanAr ) + "%")
if self.real is not None:
txt.append( " Altezza reale | " + str( self.real ) )
txt.append( " PDF altezza reale | {:.2f}".format( self.ProbReal ) + "%")
txt.append( "\n" )
txt.append( " Errore (media) : | " + str( self.errMean ) + " m" )
txt.append( " Errore relativo (media) : | " + str( self.errMeanRelative * 100 ) + "%" )
txt.append( "\n" )
txt.append( " Errore (media armonica) : | " + str( self.errMeanAr ) + " m" )
txt.append( "\n" )
txt.append( " Varianza (Val. reale) | " + str( self.varReal ) )
txt.append( " Deviazione standard (Val. reale) | " + str( self.stdReal ) )
txt.append( "\n" )
txt.append( " Varianza (Media camp.) | " + str( self.var ) )
txt.append( " Deviazione standard (Media camp.) | " + str( self.std ) )
txt.append( "\n" )
txt.append( " Varianza (Media arm.) | " + str( self.varAr ) )
txt.append( " Deviazione standard (Media arm.) | " + str( self.stdAr ) )
txt.append( "\n" )
txt.append( " Ampiezza campo di variazione | " + str( self.range ) + " m" )
txt.append( " Coefficiente di variazione | " + str( self.coeff ) )
txt.append( "\n" )
txt.append( " Test Chi^2 | " + str( self.chi2 ) )
outputFile = open( outputFolder + "\\" + self.name + "STATS.txt", "w" )
for line in txt:
if show is True:
print( line )
outputFile.write(line + "\n")
outputFile.close()
#--------------------------------------------------------------------------------------#
def __init__( self, data, savesPath = "", real = None):
self.name = data.nameFile
self.data = data.dataFinal
self.savesPath = savesPath
self.n = 0
self.median = 0.0
self.mode = 0.0
self.real = real
self.mean = 0.0
self.meanAr = 0.0
self.var = 0.0
self.varAr = 0.0
self.varReal = 0.0
self.std = 0.0
self.stdAr = 0.0
self.stdReal = 0.0
self.range = 0.0
self.coeff = 0.0
self.chi2 = 0.0
self.ProbReal = 0.0
self.ProbMean = 0.0
self.ProbMeanAr = 0.0
self.errMean = 0.0
self.errMeanAr = 0.0
self.varReal = 0.0
self.varAr = 0.0
self.errMeanRelative = 0.0
self.min = 0.0
self.max = 0.0
#self.graphs = grp.GraphsInfo( self.name, self.data, self.histBinWidth)
#self.pBW = perGaussBandwich
#--------------------------------------------------------------------------------------#
def saveStats( self ):
toSave = [self.n, self.median, self.mode, self.mean, self.meanAr, self.var, self.varAr, self.std, self.stdAr, self.range, self.coeff, self.chi2, self.ProbMean, self.ProbMeanAr, self.varReal, self.stdReal, self.ProbReal, self.errMean, self.errMeanAr, self.errMeanRelative, self.min, self.max]
sr.saveVariable(self.name + "_STATS_", toSave, self.savesPath)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
def armonicMean( inputVariables ):
sum_inv = 0.0
for variable in inputVariables:
sum_inv += 1 / variable
return len( inputVariables ) / sum_inv
#--------------------------------------------------------------------------------------#
def momentOrR( inputVariables, inputR ): # Il MOMENTO ORDINARIO (O DALL’ORIGINE) DI ORDINE r corrisponde alla media dei valori della variabile elevati alla potenza r-esima.
summation = 0.0
for variable in inputVariables:
summation += pow( variable, inputR )
return summation / len( inputVariables )
#--------------------------------------------------------------------------------------#
def momentCenR( inputVariables, inputEV, inputR ): #Il MOMENTO CENTRALE DI ORDINE r corrisponde alla media degli scarti dalla media della variabile elevati alla potenza r-esima.
summation = 0.0
#print ("EV " + str(inputEV))
for variable in inputVariables:
summation += pow( ( variable - inputEV ), inputR )
return summation / len( inputVariables )
def linearInterpolation ( x, xArray, yArray ):
if x < min( xArray ) or x > max( xArray ):
return 0.0
else:
i = 0
while x > xArray[ i ]:
i += 1
x1 = xArray[ i - 1 ]
x2 = xArray[ i ]
#print( "\n\n\n\n ********************** x1 =" + str(x1))
#print( "\n\n\n\n ********************** x2 =" + str(x2))
y1 = yArray[ i - 1 ]
y2 = yArray[ i ]
#print( "\n\n\n\n ********************** y1 =" + str(y1))
#print( "\n\n\n\n ********************** y2 =" + str(y2))
dX = ( x2 - x1 )
dY = ( y2 - y1 )
dXx = ( x - x1 )
#print( "\n\n\n\n ********************** Y% =" + str(y))
return y1 + ( ( dXx / dX ) * dY )
#--------------------------------------------------------------------------------------#
def equivalentRange ( x, rangeA, rangeB ):
return ( ( rangeB[1] - rangeB[0] ) * ( x - rangeA[0] ) / ( rangeA[1] - rangeA[0] ) ) + rangeB[0]
#--------------------------------------------------------------------------------------#
| [
"matteo.lusso.unica@gmail.com"
] | matteo.lusso.unica@gmail.com |
14d31c408580cc856757c09018f09571518ff2db | e231faae5d375db058cc563fb385d9cd486148dd | /core/tests/queue_test.py | ddbcf85fc734eeb2a25194dfaca1c99dd6342b83 | [] | no_license | Smarsh/norc | 0cea9cf2ed9860d4ef2d587a6fb173ca4f02c591 | c3608c97f6d6c292689e3d693c2a3e211df24138 | refs/heads/master | 2020-12-24T22:06:09.765846 | 2013-04-05T18:12:00 | 2013-04-05T18:12:00 | 9,247,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py |
from django.test import TestCase
from norc.core.models import DBQueue, QueueGroup, QueueGroupItem, Instance
from norc.norc_utils import wait_until
from norc.norc_utils.testing import *
class DBQueueTest(TestCase):
"""Super simple test that pushes and pops something from the queue."""
def setUp(self):
self.queue = DBQueue.objects.create(name='test')
self.item = make_instance()
def test_push_peek_pop(self):
self.queue.push(self.item)
self.assertEqual(self.queue.peek(), self.item)
self.assertEqual(self.queue.pop(), self.item)
def test_invalid(self):
self.assertRaises(AssertionError, lambda: self.queue.push(self.queue))
def tearDown(self):
pass
class QueueGroupTest(TestCase):
"""Tests and demonstrates the usage of QueueGroups."""
def setUp(self):
self.group = g = QueueGroup.objects.create(name='TestGroup')
self.q1 = DBQueue.objects.create(name="Q1")
self.q2 = DBQueue.objects.create(name="Q2")
self.q3 = DBQueue.objects.create(name="Q3")
QueueGroupItem.objects.create(group=g, queue=self.q1, priority=1)
QueueGroupItem.objects.create(group=g, queue=self.q2, priority=2)
QueueGroupItem.objects.create(group=g, queue=self.q3, priority=3)
self.task = make_task()
def new_instance(self):
return Instance.objects.create(task=self.task)
def test_push_peek_pop(self):
"""Test that all three queues work."""
item = self.new_instance()
self.q1.push(item)
self.assertEqual(self.group.peek(), item)
self.assertEqual(self.group.pop(), item)
self.q2.push(item)
self.assertEqual(self.group.peek(), item)
self.assertEqual(self.group.pop(), item)
self.q3.push(item)
self.assertEqual(self.group.peek(), item)
self.assertEqual(self.group.pop(), item)
def test_priority(self):
"""Test that things get popped in priority order."""
p1 = [self.new_instance() for _ in range(10)]
p2 = [self.new_instance() for _ in range(10)]
p3 = [self.new_instance() for _ in range(10)]
for i in p3: self.q3.push(i)
for i in p2: self.q2.push(i)
for i in p1: self.q1.push(i)
popped = [self.group.pop() for _ in range(30)]
self.assertEqual(popped, p1 + p2 + p3)
def test_no_push(self):
"""Test that pushing to a QueueGroup fails."""
self.assertRaises(NotImplementedError, lambda: self.group.push(None))
def tearDown(self):
pass
| [
"elnaril@gmail.com"
] | elnaril@gmail.com |
18a5f4f2b9cb10d1124b430d45dd8a188bb934f8 | d08258132b59630b6e1973ddc86d3907ceeccffc | /chat/views.py | 49cf828c4486f6158397b13dc31084e80c2d17b2 | [
"MIT"
] | permissive | sushil11-art/E-Health-Care | a9ea6cdaa5f7ec5fd0cb6900aa73744e3aa5bd8f | 5b546f2e5606d049a86058907fb8e90435df8839 | refs/heads/master | 2023-06-18T17:28:41.006774 | 2021-07-18T11:43:28 | 2021-07-18T11:43:28 | 307,590,539 | 0 | 0 | MIT | 2021-03-08T09:15:01 | 2020-10-27T05:03:11 | JavaScript | UTF-8 | Python | false | false | 3,946 | py | from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http.response import JsonResponse, HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from chat.models import Message, UserProfile
from chat.serializers import MessageSerializer, UserSerializer
def index(request):
if request.user.is_authenticated:
return redirect('chats')
if request.method == 'GET':
return render(request, 'chat/index.html', {})
if request.method == "POST":
username, password = request.POST['username'], request.POST['password']
user = authenticate(username=username, password=password)
print(user)
if user is not None:
login(request, user)
else:
return HttpResponse('{"error": "User does not exist"}')
return redirect('chats')
@csrf_exempt
def user_list(request, pk=None):
"""
List all required messages, or create a new message.
"""
if request.method == 'GET':
if pk:
users = User.objects.filter(id=pk)
else:
# users = User.objects.all()
if request.user.groups.filter(name="PATIENT").exists():
users = User.objects.filter(groups__name="DOCTOR")
print(users)
else:
users = User.objects.filter(groups__name="PATIENT")
serializer = UserSerializer(users, many=True, context={'request': request})
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
try:
user = User.objects.create_user(username=data['username'], password=data['password'])
UserProfile.objects.create(user=user)
return JsonResponse(data, status=201)
except Exception:
return JsonResponse({'error': "Something went wrong"}, status=400)
@csrf_exempt
def message_list(request, sender=None, receiver=None):
"""
List all required messages, or create a new message.
"""
if request.method == 'GET':
messages = Message.objects.filter(sender_id=sender, receiver_id=receiver, is_read=False)
serializer = MessageSerializer(messages, many=True, context={'request': request})
for message in messages:
message.is_read = True
message.save()
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = MessageSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
def register_view(request):
"""
Render registration template
"""
if request.user.is_authenticated:
return redirect('chats')
return render(request, 'chat/register.html', {})
def chat_view(request):
if not request.user.is_authenticated:
return redirect('index')
if request.method == "GET":
return render(request, 'chat/chat.html',
{'users': User.objects.exclude(username=request.user.username)})
def message_view(request, sender, receiver):
if not request.user.is_authenticated:
return redirect('index')
if request.method == "GET":
return render(request, "chat/messages.html",
{'users': User.objects.exclude(username=request.user.username),
'receiver': User.objects.get(id=receiver),
'messages': Message.objects.filter(sender_id=sender, receiver_id=receiver) |
Message.objects.filter(sender_id=receiver, receiver_id=sender)})
| [
"thapapradip542@gmail.com"
] | thapapradip542@gmail.com |
1d23a160753c363ac20d98f4b01c0504fea76660 | c36177d78e12177758925385f61979eb4866e690 | /summer2018-a01-XinSu902/mapper.py | 6128a8f58e30bee75e46fe26b09282e2a70112fb | [] | no_license | XinSu902/Big-Data | afb182b264515284b42f1f5e8df9f942e53b56b7 | c1f6f5780fa532503696c53f7420dd4b8b287d7c | refs/heads/master | 2020-04-02T01:50:00.558165 | 2018-10-20T06:46:40 | 2018-10-20T06:46:40 | 153,877,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | #!/usr/bin/env python
#
# This file has been provided as a starting point. You need to modify this file.
# Reads whole lines stdin; writes key/value pairs to stdout
# --- DO NOT MODIFY ANYTHING ABOVE THIS LINE ---
import sys
if __name__ == "__main__":
for line in sys.stdin:
for word in line.split():
sys.stdout.write("{}\t1\n".format(word))
| [
"noreply@github.com"
] | XinSu902.noreply@github.com |
954f4db93cab052b5db457d58fe907df47966546 | 048a38ac3692a8970a11d792702db9be6752cb84 | /2+2+2.py | 242cd7e91f998ce3f38331e1a8d538465a0c2781 | [] | no_license | erkitammeaid/school-exercises | bb9960473bc6eae8ccd40c82607d6093315cbda4 | 553e774ec92a8c3eb6c41de424857df2281bf354 | refs/heads/master | 2022-04-14T05:44:27.606165 | 2020-04-17T12:24:47 | 2020-04-17T12:24:47 | 209,001,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | n = input("sisesta üks arv 1st - 9ni: ")
nr1 = n
nr2= n+n
nr3 = n+n+n
nr_total = int(nr1) + int(nr2) + int(nr3)
print("n + nn + nnn = " + str(nr_total)) | [
"noreply@github.com"
] | erkitammeaid.noreply@github.com |
d44c38b442f4ea146ddaa3080d9f72ed2c617fa8 | 025c1cc826722d558d43854f3e319917e199d609 | /infra_macros/macro_lib/convert/container_image/compiler/compiler.py | 6cfddefcce8817aa2d14a9321c253ab846c2820a | [
"BSD-3-Clause"
] | permissive | martarozek/buckit | 73440be29a2ce64084016fc395a5a8cc9bc1e602 | 343cc5a5964c1d43902b6a77868652adaefa0caa | refs/heads/master | 2020-03-26T06:07:35.468491 | 2018-08-12T17:34:04 | 2018-08-12T17:45:46 | 144,590,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,345 | py | #!/usr/bin/env python3
'''
This is normally invoked by the `image_layer` Buck macro converter.
This compiler builds a btrfs subvolume in
<--subvolumes-dir>/<--subvolume-name>:<subvolume-version>
To do so, it parses `--child-feature-json` and the `--child-dependencies`
that referred therein, creates `ImageItems`, sorts them in dependency order,
and invokes `.build()` to apply each item to actually construct the subvol.
'''
import argparse
import itertools
import os
import subprocess
import sys
from subvol_utils import Subvol
from .dep_graph import dependency_order_items
from .items import gen_parent_layer_items
from .items_for_features import gen_items_for_features
from .subvolume_on_disk import SubvolumeOnDisk
# At the moment, the target names emitted by `image_feature` targets seem to
# be normalized the same way as those provided to us by `image_layer`. If
# this were to ever change, this would be a good place to re-normalize them.
def make_target_filename_map(targets_followed_by_filenames):
'Buck query_targets_and_outputs gives us `//target path/to/target/out`'
if len(targets_followed_by_filenames) % 2 != 0:
raise RuntimeError(
f'Odd-length --child-dependencies {targets_followed_by_filenames}'
)
it = iter(targets_followed_by_filenames)
d = dict(zip(it, it))
# A hacky check to ensures that the target corresponds to the path. We
# can remove this if we absolutely trust the Buck output.
if not all(
t.replace('//', '/').replace(':', '/') in f for t, f in d.items()
):
raise RuntimeError(f'Not every target matches its output: {d}')
return d
def parse_args(args):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--subvolumes-dir', required=True,
help='A directory on a btrfs volume to store the compiled subvolume '
'representing the new layer',
)
parser.add_argument(
'--subvolume-name', required=True,
help='The first part of the subvolume directory name',
)
parser.add_argument(
'--subvolume-version', required=True,
help='The second part of the subvolume directory name',
)
parser.add_argument(
'--parent-layer-json',
help='Path to the JSON output of the parent `image_layer` target',
)
parser.add_argument(
'--child-layer-target', required=True,
help='The name of the Buck target describing the layer being built',
)
parser.add_argument(
'--child-feature-json', required=True,
help='The path of the JSON output of the `image_feature` that was '
'auto-generated for the layer being built',
)
parser.add_argument(
'--child-dependencies',
nargs=argparse.REMAINDER, metavar=['TARGET', 'PATH'], default=(),
help='Consumes the remaining arguments on the command-line, with '
'arguments at positions 1, 3, 5, 7, ... used as Buck target names '
'(to be matched with the targets in per-feature JSON outputs). '
'The argument immediately following each target name must be a '
'path to the output of that target on disk.',
)
return parser.parse_args(args)
def build_image(args):
subvol = Subvol(os.path.join(
args.subvolumes_dir,
f'{args.subvolume_name}:{args.subvolume_version}',
))
for item in dependency_order_items(
itertools.chain(
gen_parent_layer_items(
args.child_layer_target,
args.parent_layer_json,
args.subvolumes_dir,
),
gen_items_for_features(
[args.child_feature_json],
make_target_filename_map(args.child_dependencies),
),
)
):
item.build(subvol)
try:
return SubvolumeOnDisk.from_subvolume_path(
subvol.path().decode(),
args.subvolumes_dir,
args.subvolume_name,
args.subvolume_version,
)
except Exception as ex:
raise RuntimeError(f'Serializing subvolume {subvol.path()}') from ex
if __name__ == '__main__': # pragma: no cover
build_image(parse_args(sys.argv[1:])).to_json_file(sys.stdout)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
3de5136de3696c4e49370d8ef3420a67e721f6b3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_149/82.py | 0c7ae5570829193e068ba4471f6d4bad7fb4b56a | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/env python
import itertools
def solve(a):
ans=0
while a:
x=min(a)
i=a.index(x)
del a[i]
ans+=min(i,len(a)-i)
return ans
for t in xrange(1,1+int(raw_input())):
n=int(raw_input())
a=map(int,raw_input().split())
ans=solve(a)
print"Case #%d:"%t,
print ans
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e9ae21fffa00128cbdf44f2fb9ed719514c6b395 | 0b99d29b1c9fd8256b52866e338228291f2273ff | /csvcorrector.py | 0eb854af9df49c1c0eb715ce5d457517bb59cd26 | [] | no_license | ad1lkhan/TwitchCommentScrape | b4baeb53eb3153b2602b89b59b594b4c2476e3bf | 592b23177a9736963853df42e7cda5721e6536b0 | refs/heads/master | 2020-03-22T15:42:35.414112 | 2018-07-20T14:43:54 | 2018-07-20T14:43:54 | 140,271,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | import glob, os
import sys
import csv
dirToVideos = sys.argv[1]
os.chdir(dirToVideos)
if not os.path.exists(os.path.dirname('altered/')):
os.makedirs(os.path.dirname('altered/'))
def find_nth(s, x, n):
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i
for file in glob.glob("*.csv"):
with open(file,'r') as csvinput:
with open("altered/"+file, 'w+') as csvoutput:
writer = csv.writer(csvoutput, lineterminator='\n')
reader = csv.reader(csvinput)
all = []
if(file.count('_')>4):
n = find_nth(file,'_',2)
first_part = file[:n]
last_part = file[n+1:]
file = first_part + last_part
vid,name,game,comments=file.split("_")
row = next(reader)
row.append('name')
row.append('game')
all.append(row)
for row in reader:
row.append(name)
row.append(game)
all.append(row)
writer.writerows(all)
print("Completed: "+vid)
| [
"piratemuffin@hotmail.co.uk"
] | piratemuffin@hotmail.co.uk |
8cfbe05b9aeb068e2eea79df986d823110f9c019 | 899bac17acf97252a33d91af076ff1f16b975210 | /eduiddashboard/scripts/decode_session.py | 787b570f274e635d56156b7461f3b6945ebfead4 | [] | no_license | isabella232/eduid-dashboard | 91a209f7833f26a7949cecec60df2e501a82f840 | 99cffaa90f41b13ec34f9d057f19630c644df6ee | refs/heads/master | 2023-03-16T05:19:36.184687 | 2018-07-27T12:27:53 | 2018-07-27T12:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- coding: utf-8 -*-
import sys
import pprint
from pyramid.paster import bootstrap
from eduid_common.session.session import SessionManager
import logging
logger = logging.getLogger(__name__)
__author__ = 'ft'
"""
Read and decode a session from Redis. Supply the token (id starting with lower-case 'a')
from an existing session.
"""
default_config_file = '/opt/eduid/eduid-dashboard/etc/eduid-dashboard.ini'
def main(token):
env = bootstrap(default_config_file)
settings = env['request'].registry.settings
secret = settings.get('session.secret')
manager = SessionManager(cfg = settings, ttl = 3600, secret = secret)
session = manager.get_session(token = token)
print('Session: {}'.format(session))
print('Data:\n{}'.format(pprint.pformat(dict(session))))
return True
if __name__ == '__main__':
try:
if len(sys.argv) != 2:
print('Syntax: decode_session.py aTOKEN')
sys.exit(1)
res = main(sys.argv[1])
if res:
sys.exit(0)
sys.exit(1)
except KeyboardInterrupt:
pass
| [
"fredrik@thulin.net"
] | fredrik@thulin.net |
d65943f13d875ed1bf01ff0e0dce8c81a948776d | aecfe34460b8aad03f356a80f46aa299b09b755a | /Entrega2_DefinirAnalizadorLexico/Codigo Funete/AnalizadorLexico.py | b1ce692b4d37db99c62d99c8dab95410a86f5045 | [] | no_license | JorgeBesnier/LenguajesTraductores | 6536ed1a004946acd797c691db45a37f84a50b6f | 739d46efaf1bea1d7055d1c9ba9be7705eb84d62 | refs/heads/master | 2023-01-21T07:03:17.698599 | 2020-11-23T18:46:06 | 2020-11-23T18:46:06 | 296,751,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import ply.lex as lex
import ply.yacc as yacc
import sys
tokens = [
'HOLA',
'COMA',
'QUE',
'TAL'
]
t_ignore = r' '
def t_HOLA(t):
r'hola'
t.type = 'HOLA'
print("HOLA")
return t
def t_COMA(t):
r','
t.type = 'COMA'
print(",")
return t
def t_QUE(t):
r'que'
t.type = 'QUE'
print("QUE")
return t
def t_TAL(t):
r'tal'
t.type = 'TAL'
print("TAL")
return t
def t_error(t):
print("Illegal characters!")
t.lexer.skip(1)
lexer = lex.lex()
def p_S(p):
'''
S : HOLA X
| HOLA Y
| HOLA
'''
print("\tCORRECTO")
def p_X(p):
'''
X : COMA S
|
'''
def p_Y(p):
'''
Y : QUE TAL
'''
def p_error(p):
print("\tINCORRECTO")
parser = yacc.yacc()
while True:
try:
s = input('')
except EOFError:
break
parser.parse(s)
| [
"noreply@github.com"
] | JorgeBesnier.noreply@github.com |
27246c4bd9d20b95b4470062d75988deefab139e | 32dd999a869fcbf5f996d29efd8be2ce9adff9c7 | /OnTime/forms.py | 9dd1931efda36de1fdf6254787b14bf025707ae7 | [] | no_license | aamathur02/MBTA_OnTime | 089c674f6ffb0ac7021109f5ad1b6dfec4ca7271 | 42af8b249f392780fb79366a36522d35bf85bbc7 | refs/heads/master | 2022-12-20T00:17:16.480082 | 2020-10-05T20:25:37 | 2020-10-05T20:25:37 | 290,897,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | from django import forms
from .models import TripInput,Starting_Stop, Ending_Stop
class TripInputForm(forms.ModelForm):
class Meta:
model = TripInput
fields = {'name', 'line', 'starting_stop', 'ending_stop', 'starting_time', 'ending_time'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['starting_stop'].queryset = Starting_Stop.objects.none()
self.fields['ending_stop'].queryset = Ending_Stop.objects.none()
if 'line' in self.data:
try:
line_id = int(self.data.get('line'))
self.fields['starting_stop'].queryset = Starting_Stop.objects.filter(line_id=line_id).order_by('name')
self.fields['ending_stop'].queryset = Ending_Stop.objects.filter(line_id=line_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['starting_stop'].queryset = self.instance.line.starting_stop_set.order_by('name')
self.fields['ending_stop'].queryset = self.instance.line.ending_stop_set.order_by('name') | [
"aayushmathur@lawn-128-61-15-237.lawn.gatech.edu"
] | aayushmathur@lawn-128-61-15-237.lawn.gatech.edu |
030ded5953721581dcd9189e21169ab41d77f904 | 7fb63a526d4e21b25f67601c65b460ed081169de | /main.py | 6bdd5c2e5a2383f2c2e9e70ffd178b3210240f1b | [
"MIT"
] | permissive | ramondalmau/atcenv | 511426f0611c78aacf630383698689b8c667f489 | 9725ebf9aaf5367aa66973d5604e3eb576f44d5b | refs/heads/main | 2023-04-16T10:42:52.549127 | 2022-02-28T10:51:48 | 2022-02-28T10:51:48 | 456,930,114 | 5 | 7 | null | 2022-02-09T12:09:27 | 2022-02-08T12:49:02 | Python | UTF-8 | Python | false | false | 1,192 | py | """
Example
"""
if __name__ == "__main__":
import random
random.seed(42)
from jsonargparse import ArgumentParser, ActionConfigFile
from atcenv import Environment
import time
from tqdm import tqdm
parser = ArgumentParser(
prog='Conflict resolution environment',
description='Basic conflict resolution environment for training policies with reinforcement learning',
print_config='--print_config',
parser_mode='yaml'
)
parser.add_argument('--episodes', type=int, default=1)
parser.add_argument('--config', action=ActionConfigFile)
parser.add_class_arguments(Environment, 'env')
# parse arguments
args = parser.parse_args()
# init environment
env = Environment(**vars(args.env))
# run episodes
for e in tqdm(range(args.episodes)):
# reset environment
obs = env.reset()
# set done status to false
done = False
# execute one episode
while not done:
# perform step with dummy action
obs, rew, done, info = env.step([])
env.render()
time.sleep(0.05)
# close rendering
env.close()
| [
"ramon.dalmau-codina@eurocontrol.int"
] | ramon.dalmau-codina@eurocontrol.int |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.