blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9bcd33a91d4146f6fd25fb08d24604e3dcc54c6 | 49091095f5a603b5b71dda5e3d8ca326d6ca3cee | /catalog/migrations/0008_productparameter_product.py | 032a07cab77622701267597967c62c201741009a | [] | no_license | akhmad-john/viessmann | 790804dc41d0b50f03bd1d3589784243a26be1cf | 2ae23f6a472174cc5d2bfd2a0bee23157f082b11 | refs/heads/master | 2022-12-28T03:12:48.830919 | 2020-10-10T11:28:48 | 2020-10-10T11:28:48 | 302,362,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # Generated by Django 3.1.2 on 2020-10-07 07:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0007_auto_20201007_1227'),
]
operations = [
migrations.AddField(
model_name='productparameter',
name='product',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='catalog.product'),
preserve_default=False,
),
]
| [
"shansudinov_a@domain.artelgroup.org"
] | shansudinov_a@domain.artelgroup.org |
a3c5e1837cf34a200cf1242deeb65b071638745a | b1dd6135a84ad80301b8c85731ba277f6da4d44d | /todo/testapp/admin.py | 45e9691ba5f74e03b8f1b98bae679672012aad1e | [] | no_license | chiragkaushik/TodoList | 018c7bcfbaa255dc293d8e070a665178bd6e907e | 12e0ec1293eadb29e21201a46b3a7c3f354bcd54 | refs/heads/master | 2022-11-17T20:45:16.837171 | 2020-07-15T01:53:31 | 2020-07-15T01:53:31 | 279,262,953 | 0 | 0 | null | 2020-07-13T11:21:43 | 2020-07-13T09:51:01 | Python | UTF-8 | Python | false | false | 227 | py | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.user)
admin.site.register(models.TodoList)
admin.site.register(models.Access)
admin.site.register(models.TodoItem)
| [
"chirag@Chirags-MacBook-Pro.local"
] | chirag@Chirags-MacBook-Pro.local |
7fd0208b9639daf5011460a29c186bc7c01996ad | 64159d10f8db2f6a0719de2493d970e1c2624efc | /split.py | 0ecbaeda6f61ee8d08a8074d9953d8736e4664e0 | [] | no_license | AlexisK/ws_dsua_2017 | 50521b3bb1122325a788a1b759499fab1cd1c5eb | a91017b42097561c25389be21b7250d5650ee65f | refs/heads/master | 2021-08-19T12:49:03.547930 | 2017-11-26T11:12:30 | 2017-11-26T11:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | import sys
import codecs
import math
class WrongFileLength(Exception):
pass
def file_len(data_path):
with codecs.open(data_path, encoding='utf-8', mode='r') as fname:
for i, l in enumerate(fname):
pass
print('File %s length: %s' % (data_path, str(i + 1)))
return i + 1
def run(data_path1, data_path2, train_percent):
input_file1 = codecs.open(data_path1, encoding='utf-8', mode='r')
input_file2 = codecs.open(data_path2, encoding='utf-8', mode='r')
output_file1 = codecs.open('train.inp', encoding='utf-8', mode='w')
output_file2 = codecs.open('devel.inp', encoding='utf-8', mode='w')
output_file3 = codecs.open('train.out', encoding='utf-8', mode='w')
output_file4 = codecs.open('devel.out', encoding='utf-8', mode='w')
a = file_len(data_path1)
b = file_len(data_path2)
if a != b:
raise WrongFileLength('Input and output files must have the same length!')
train_count = math.trunc(a * train_percent)
print('Number of items for training %s' % str(train_count))
sent_num = 0
print('Splitting input file.')
for line in input_file1:
if sent_num < train_count:
output_file1.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
else:
output_file2.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
sent_num = 0
print('Splitting output file.')
for line in input_file2:
if sent_num < train_count:
output_file3.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
else:
output_file4.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
if __name__ == '__main__':
path1 = str(sys.argv[1])
path2 = str(sys.argv[2])
train_percent = float(sys.argv[3])
run(path1, path2, train_percent) | [
"korobov.alex@gmail.com"
] | korobov.alex@gmail.com |
7a81a710368d8388719fd9da8283fa4d6989e5c2 | d13ee6238418d047f9fe6ddbd5525fd0487d4233 | /hc/front/tests/test_channels.py | 1007a821f8abff8784f2b2d318f195c0357cf4d7 | [
"BSD-3-Clause"
] | permissive | iphoting/healthchecks | b4ffb7cd2a254c1a8daa490608ff4d5a96c560da | 924fc7df60dbf97b82a1f82989507459802f7028 | refs/heads/heroku | 2022-03-06T08:32:11.626016 | 2019-10-07T14:37:20 | 2022-02-19T09:37:57 | 82,822,882 | 11 | 7 | BSD-3-Clause | 2021-09-28T07:59:39 | 2017-02-22T15:51:02 | Python | UTF-8 | Python | false | false | 6,130 | py | import json
from hc.api.models import Channel
from hc.test import BaseTestCase
class ChannelsTestCase(BaseTestCase):
def test_it_formats_complex_slack_value(self):
ch = Channel(kind="slack", project=self.project)
ch.value = json.dumps(
{
"ok": True,
"team_name": "foo-team",
"incoming_webhook": {"url": "http://example.org", "channel": "#bar"},
}
)
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "foo-team", status_code=200)
self.assertContains(r, "#bar")
def test_it_shows_webhook_post_data(self):
ch = Channel(kind="webhook", project=self.project)
ch.value = json.dumps(
{
"method_down": "POST",
"url_down": "http://down.example.com",
"body_down": "foobar",
"headers_down": {},
"method_up": "GET",
"url_up": "http://up.example.com",
"body_up": "",
"headers_up": {},
}
)
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
# These are inside a modal:
self.assertContains(r, "http://down.example.com")
self.assertContains(r, "http://up.example.com")
self.assertContains(r, "foobar")
def test_it_shows_pushover_details(self):
ch = Channel(kind="po", project=self.project)
ch.value = "fake-key|0"
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(normal priority)")
def test_it_shows_unconfirmed_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = "alice@example.org"
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "Unconfirmed")
def test_it_shows_down_only_note_for_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = json.dumps(
{"value": "alice@example.org", "up": False, "down": True}
)
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(down only)")
def test_it_shows_up_only_note_for_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = json.dumps(
{"value": "alice@example.org", "up": True, "down": False}
)
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(up only)")
def test_it_shows_sms_number(self):
ch = Channel(kind="sms", project=self.project)
ch.value = json.dumps({"value": "+123"})
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "SMS to +123")
def test_it_shows_channel_issues_indicator(self):
Channel.objects.create(kind="sms", project=self.project, last_error="x")
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "broken-channels", status_code=200)
def test_it_hides_actions_from_readonly_users(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
Channel.objects.create(project=self.project, kind="webhook", value="{}")
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertNotContains(r, "Add Integration", status_code=200)
self.assertNotContains(r, "ic-delete")
self.assertNotContains(r, "edit_webhook")
def test_it_shows_down_only_note_for_sms(self):
channel = Channel(project=self.project, kind="sms")
channel.value = json.dumps({"value": "+123123123", "up": False, "down": True})
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(down only)")
def test_it_shows_up_only_note_for_sms(self):
channel = Channel(project=self.project, kind="sms")
channel.value = json.dumps({"value": "+123123123", "up": True, "down": False})
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(up only)")
def test_it_shows_disabled_note(self):
ch = Channel(kind="slack", project=self.project)
ch.value = "https://example.org"
ch.disabled = True
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "label-danger", status_code=200)
def test_it_shows_fix_button_for_disabled_email(self):
ch = Channel(kind="email", project=self.project)
ch.value = "bob@example.org"
ch.disabled = True
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "Fix…", status_code=200)
| [
"cuu508@gmail.com"
] | cuu508@gmail.com |
dd204fb7755f02a4ca68858415abff22df84df26 | 1b48a489647fc11352c5f44ab25d1cc32df17499 | /bot/migrations.py | 6b1dd3ec9c614effb2996fa32b588d1876b35166 | [] | no_license | mircearoata/TimetableDiscordBot | 120c6982a29f7745563ba273b04aa0315f4425b3 | 0416e316ffa36d8ae7bcb723c985660fd968d965 | refs/heads/master | 2023-08-22T00:15:08.170776 | 2021-10-20T07:48:40 | 2021-10-20T07:48:40 | 320,065,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | def no_notify_migration(configs):
for guild_id, config in configs.items():
courses = config.get('courses', [])
for day in courses:
for timeSlotCourses in courses[day]:
for course in timeSlotCourses:
if 'noNotify' not in course:
course['noNotify'] = False
config.save('courses', courses)
def migrations(configs):
no_notify_migration(configs) | [
"mircearoatapalade@yahoo.ro"
] | mircearoatapalade@yahoo.ro |
6eacf0ad7871a5a01c4b7178181798f3abaf6a48 | 1536a6cabaa9b6429603d57df6705a14063e016a | /src/networkx/algorithms/isomorphism/vf2weighted.py | a3142485aa277bd6563c933fd41e6917b9d77885 | [] | no_license | lorenzoriano/Graph-Evolve | ac7da40f7b27afdca7d929ae3e10308874929f1c | 11b6f43beaa88254776da959da7bcd9c4cd21eba | refs/heads/master | 2020-05-02T20:10:38.282396 | 2011-12-16T11:01:45 | 2011-12-16T11:01:45 | 1,967,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,794 | py | """
VF2 implementations for weighted graphs.
"""
import networkx as nx
from networkx.algorithms.isomorphism.isomorphvf2 \
import GraphMatcher,DiGraphMatcher,GMState,DiGMState
__all__ = ['WeightedGraphMatcher',
'WeightedDiGraphMatcher',
'WeightedMultiGraphMatcher',
'WeightedMultiDiGraphMatcher']
## VF2 is a recursive algorithm, so the call/lookup overhead is already high.
## Each implementation needs to be as fast as possible.
##
## Within the semantic feasibility function, we provide local variables
## Also, we don't want the function checking if the graph is a multigraph
## or if it is directed each time it is called. So we provide separate
## implementations.
def close(x, y, rtol, atol):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol
The relative tolerance.
atol
The absolute tolerance.
"""
# assumes finite weights
return abs(x-y) <= atol + rtol * abs(y)
class WeightedGraphMatcher(GraphMatcher):
"""Implementation of VF2 algorithm for undirected, weighted graphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.Graph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
GraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_adj = self.G1.adj
G2_adj = self.G2.adj
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for neighbor in G1_adj[G1_node]:
if neighbor is G1_node:
if not close(G1_adj[G1_node][G1_node].get('weight',1),
G2_adj[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif neighbor in core_1:
if not close(G1_adj[G1_node][neighbor].get('weight',1),
G2_adj[G2_node][core_1[neighbor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that neighbors are symmetric
return True
class WeightedDiGraphMatcher(DiGraphMatcher):
"""Implementation of VF2 algorithm for directed, weighted graphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.DiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
DiGraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_succ = self.G1.succ
G1_pred = self.G1.pred
G2_succ = self.G2.succ
G2_pred = self.G2.pred
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for successor in G1_succ[G1_node]:
if successor is G1_node:
if not close(G1_succ[G1_node][G1_node].get('weight',1),
G2_succ[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif successor in core_1:
if not close(G1_succ[G1_node][successor].get('weight',1),
G2_succ[G2_node][core_1[successor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that successors are symmetric
for predecessor in G1_pred[G1_node]:
if predecessor is G1_node:
if not close(G1_pred[G1_node][G1_node].get('weight',1),
G2_pred[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif predecessor in core_1:
if not close(G1_pred[G1_node][predecessor].get('weight',1),
G2_pred[G2_node][core_1[predecessor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that predecessors are symmetric
return True
class WeightedMultiGraphMatcher(GraphMatcher):
"""Implementation of VF2 algorithm for undirected, weighted multigraphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.MultiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
GraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_adj = self.G1.adj
G2_adj = self.G2.adj
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for neighbor in G1_adj[G1_node]:
if neighbor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_adj[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_adj[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif neighbor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_adj[G1_node][neighbor].items()]
data2 = [d.get('weight',1)
for k,d in G2_adj[G2_node][core_1[neighbor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that neighbors are symmetric
return True
class WeightedMultiDiGraphMatcher(DiGraphMatcher):
"""Implementation of VF2 algorithm for directed, weighted multigraphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.MultiDiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
DiGraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_succ = self.G1.succ
G1_pred = self.G1.pred
G2_succ = self.G2.succ
G2_pred = self.G2.pred
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for successor in G1_succ[G1_node]:
if successor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_succ[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_succ[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif successor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_succ[G1_node][successor].items()]
data2 = [d.get('weight',1)
for k,d in G2_succ[G2_node][core_1[successor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that successors are symmetric
for predecessor in G1_pred[G1_node]:
if predecessor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_pred[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_pred[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif predecessor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_pred[G1_node][predecessor].items()]
data2 = [d.get('weight',1)
for k,d in G2_pred[G2_node][core_1[predecessor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that predecessors are symmetric
return True
| [
"lorenzo.riano@gmail.com"
] | lorenzo.riano@gmail.com |
131da4ef6887fa5704722436717046f8e50c0a34 | 2f0bde4d37b7ea1aad91ab44b5b4526d0bec30ce | /examples/strike-slip-example/okada_driver.py | b09fae0728d457d440530d09f1f90b57ca4f9062 | [
"MIT"
] | permissive | kmaterna/Elastic_stresses_py | 5c78a628136f610ec68e7ee38d8bc76515319e4f | 549a13c6c7fa3c80aac9d63548fdbf3b1ec7b082 | refs/heads/master | 2023-08-28T21:54:42.500337 | 2023-08-18T01:45:18 | 2023-08-18T01:45:18 | 141,371,162 | 42 | 11 | MIT | 2022-08-09T14:22:15 | 2018-07-18T02:37:59 | Python | UTF-8 | Python | false | false | 1,128 | py | #!/usr/bin/env python
import Elastic_stresses_py.PyCoulomb.fault_slip_object as fso
from Elastic_stresses_py.PyCoulomb import run_dc3d, configure_calc, output_manager, io_additionals
# Definitions
lon0_sys, lat0_sys = -120.5, 36;
bbox = (-121.5, -119.5, 35.2, 36.8);
lonlatfile = "Inputs/lon_lats.txt";
source_slip_dist = "Inputs/s2004PARKFI01CUST.fsp";
# Inputs
parkfield_faults = fso.file_io.io_srcmod.read_srcmod_distribution(source_slip_dist);
coulomb_fault_model = fso.fault_slip_object.fault_object_to_coulomb_fault(parkfield_faults, lon0_sys, lat0_sys);
disp_points = io_additionals.read_disp_points(lonlatfile);
# Configure, Compute, Output
params = configure_calc.configure_default_displacement_params();
inputs = configure_calc.configure_default_displacement_input(coulomb_fault_model, zerolon=lon0_sys,
zerolat=lat0_sys, bbox=bbox, domainsize=100);
outobj = run_dc3d.do_stress_computation(params, inputs, disp_points=disp_points, strain_points=[]);
output_manager.produce_outputs(params, inputs, disp_points, obs_strain_points=[], out_object=outobj);
| [
"kathrynmaterna@gmail.com"
] | kathrynmaterna@gmail.com |
82e10eeddf1ba15903d965b66e49673fcd4caa9d | f9081a6428cf3cbbefe8de70abede996a03387d5 | /app/migrations/0009_remove_account_is_patient.py | 49faa2e009c25a9172927f63c862587602ff65d6 | [] | no_license | AThomas99/haha | c00b72d66382b935628991eeaad364f3e7d1cf2b | 4c67bc95d0dcf811fa350fbb052e55116b6bad17 | refs/heads/master | 2023-06-17T17:13:26.119063 | 2021-07-13T11:37:55 | 2021-07-13T11:37:55 | 383,420,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # Generated by Django 3.2.5 on 2021-07-13 08:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0008_account_is_patient'),
]
operations = [
migrations.RemoveField(
model_name='account',
name='is_patient',
),
]
| [
"rishaelibenson@gmail.com"
] | rishaelibenson@gmail.com |
f3e8df6eeb1ec9952a151a19f157255fcab78423 | 1ee9081e345c125eddaa88931197aed0265aafb8 | /glearn/task_scheduler/__init__.py | 0dcd17d4997e7fd770ca54277a53d1ef15fe2dca | [] | no_license | WeiShiwei/tornado_classify | 1d45bc16473842fea8d853ba5e2c57a773fed978 | 57faa997c205630c7f84a64db0c2f5ffd8fda12a | refs/heads/master | 2021-01-01T04:44:53.981312 | 2016-05-02T12:06:29 | 2016-05-02T12:06:29 | 57,887,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # -*- coding: utf-8 -*-
from tasks_classify import gldjc, gldzb
IDENTITY_APP_DICT = {
'gldjc':gldjc,
'gldzb':gldzb
}
class TaskScheduler(object):
"""docstring for TaskScheduler"""
def __init__(self, arg):
super(TaskScheduler, self).__init__()
self.arg = arg
@classmethod
def apply_async(self, identity, docs):
# import pdb;pdb.set_trace()
try:
res = IDENTITY_APP_DICT[identity].predict.apply_async( (identity, docs), queue = identity )
except KeyError, e:
print e
res = None
return res
| [
"weishiwei920@163.com"
] | weishiwei920@163.com |
6ba36b1becb11ae39d0aabcd7da73e7d8f7fe1c4 | c106d822a3895e4775e229a82fa44fc5a98dc732 | /bin/todo-sort.py | 4fd694a411fdf7f3ca99234b8580242e2dc5dbc2 | [
"MIT"
] | permissive | derek-pryor/dotfiles | f356af2ff88f4929abb30d01ed99f7f758a7fabc | 1fbbb2bbf80c007fc9d6c9f1ef3a2bf5b5841d41 | refs/heads/master | 2023-06-11T19:11:54.615467 | 2023-06-01T20:27:05 | 2023-06-01T20:27:05 | 162,472,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | import sys
import re
dueDate = re.compile(r"due:(\d{4}-\d{2}-\d{2})")
if __name__ == '__main__':
lines = {}
for line in sys.stdin.readlines():
m = dueDate.search(line)
if m is None:
k = None
else:
k = m.group(1)
if k not in lines:
lines[k] = []
lines[k].append(line)
keys = list(lines.keys())
keys.remove(None)
keys.sort()
if None in lines:
keys.append(None)
for k in keys:
lines[k].sort()
for line in lines[k]:
print(line, end='', flush=True)
| [
"derek.m.pryor@gmail.com"
] | derek.m.pryor@gmail.com |
a386ccedf20d5d2d49c4c93a445d9977d09b5804 | 20c6cb5af100c0208ba66b995655cebc661d6916 | /options.py | baecbda8c6727ac4d3bc16c05218570675b17f57 | [] | no_license | Anonymous-2611/AdaRec | 35cb231cf8b1baf364aa3df9198cb7109dca2b71 | 501c0082e0090e3bb5831197b9c054592b513a7a | refs/heads/master | 2023-02-20T16:08:53.824420 | 2021-01-21T08:08:04 | 2021-01-21T08:08:04 | 331,169,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,859 | py | import argparse
"""
==========================
All options' parsers.
==========================
Except for global setting (e.g. name/gpu/seed/seed/etc.), options are designed like `{scope}_{option}`.
Note: `scope` is not something related to real code, they just make
options easy to arrange and look pretty when printing.
"""
def add_common_args(parser):
parser.add_argument("--name", type=str, default="NoName")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--preset", type=str, default=None, help="Use preset template, see presets.py for detail.")
parser.add_argument("--resume", type=str, default=None, help="path/to/FOLDER/from/where/you/want/to/resume")
parser.add_argument("--aux_store_root", type=str, help="/path/to/store", default=None)
parser.add_argument("--aux_console_output", action="store_true", help="Print logger info to console")
parser.add_argument("--aux_eval_ks", nargs="+", type=int, default=None, help="ks for Metric@k")
return parser
def add_dataset_args(parser):
parser.add_argument("--dataset", type=str, default=None, help="Preset for dataset and dataloader.")
# mask : masked language model data loader
# seq : normal next item data loader
parser.add_argument("--dataset_type", type=str, default="mask", choices=["mask", "seq"])
parser.add_argument("--data_folder", type=str, default=None)
parser.add_argument("--data_main", type=str, default=None)
parser.add_argument("--data_neg", type=str, default=None)
parser.add_argument("--loader_generate_sub_session", action="store_true", default=None)
parser.add_argument("--loader_train_batch_size", type=int, default=None)
parser.add_argument("--loader_val_batch_size", type=int, default=None)
parser.add_argument("--loader_test_batch_size", type=int, default=None)
parser.add_argument("--loader_mask_prob", type=float, default=None)
parser.add_argument("--loader_max_len", type=int, default=None)
parser.add_argument("--loader_num_items", type=int, default=None, help="Number of real items, without PAD and MASK")
parser.add_argument("--loader_num_aux_vocabs", type=int, default=None, help="+1 when seq, +2 when mask")
return parser
def add_bert_args(parser):
parser.add_argument("--bert_hidden_units", type=int, default=None, help="Size of hidden vectors (d_model)")
parser.add_argument("--bert_num_blocks", type=int, default=None, help="Number of transformer layers")
parser.add_argument("--bert_num_heads", type=int, default=None, help="Number of heads for multi-attention")
parser.add_argument("--bert_dropout", type=float, default=None, help="Dropout probability")
parser.add_argument("--bert_use_eps", action="store_true", default=None, help="Use x_{i+1} = x_{i} + eps*F(x_{i})")
return parser
def add_nin_args(parser):
parser.add_argument("--nin_num_blocks", type=int, default=None)
parser.add_argument("--nin_block_dilations", nargs="+", type=int, default=None)
parser.add_argument("--nin_hidden_units", type=int, default=None)
parser.add_argument("--nin_kernel_size", type=int, default=None)
parser.add_argument("--nin_use_eps", action="store_true", default=None)
return parser
def add_sas_args(parser):
parser.add_argument("--sas_num_blocks", type=int, default=None)
parser.add_argument("--sas_hidden_units", type=int, default=None)
parser.add_argument("--sas_num_heads", type=int, default=None)
parser.add_argument("--sas_dropout", type=float, default=None)
parser.add_argument("--sas_use_eps", action="store_true", default=None)
return parser
def add_student_model_args(parser):
parser.add_argument("--model_dropout", type=float, default=None, help="Dropout probability in cells.")
parser.add_argument("--model_num_hidden", type=int, default=None, help="Hidden in cells.")
parser.add_argument("--model_num_cell", type=int, default=None, help="Number of cells.")
parser.add_argument("--model_num_node", type=int, default=None, help="Number of intermediate node in a cell.")
return parser
def add_training_args(parser, is_search=False):
parser.add_argument("--train_iter", type=int, default=None, help="Number of epochs for training")
parser.add_argument("--train_log_every", type=int, default=None, help="Log every T*b.")
parser.add_argument("--train_grad_clip_norm", type=float, default=None, help="Clip gradient by norm.")
if not is_search: # single model training, maybe teacher model or finetune student model
parser.add_argument("--train_lr", type=float, default=None, help="Learning rate")
parser.add_argument("--train_lr_decay_step", type=int, default=None, help="Decay step for StepLR")
parser.add_argument("--train_lr_decay_gamma", type=float, default=None, help="Gamma for StepLR")
parser.add_argument("--train_wd", type=float, default=None, help="l2 regularization")
else:
parser.add_argument("--train_model_lr", type=float, default=None, help="Initial learning rate for model")
parser.add_argument("--train_model_lr_decay_step", type=int, default=None)
parser.add_argument("--train_model_lr_decay_gamma", type=float, default=None)
parser.add_argument("--train_model_wd", type=float, default=None, help="l2 regularization for model")
parser.add_argument("--train_alpha_lr", type=float, default=None, help="Initial learning rate for alpha")
parser.add_argument("--train_alpha_lr_decay_step", type=int, default=None)
parser.add_argument("--train_alpha_lr_decay_gamma", type=float, default=None)
parser.add_argument("--train_alpha_wd", type=float, default=None, help="l2 regularization for alpha")
return parser
def add_gru4rec_args(parser):
parser.add_argument("--gru_num_layers", type=int, default=None)
parser.add_argument("--gru_hidden_units", type=int, default=None)
parser.add_argument("--gru_dropout", type=float, default=None)
return parser
def add_caser_args(parser):
parser.add_argument("--caser_hidden_units", type=int, default=None)
parser.add_argument("--caser_dropout", type=float, default=None)
parser.add_argument("--caser_num_hf", type=int, default=None)
parser.add_argument("--caser_num_vf", type=int, default=None)
parser.add_argument("--caser_hf_size", type=int, nargs="+", default=None)
return parser
def gru4rec_parser():
# Baseline: GRU4Rec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_gru4rec_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def caser_parser():
# Baseline: Caser
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_caser_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def bert4rec_parser():
# Train Teacher-BERT network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_bert_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def nextitnet_parser():
# Train Teacher-NextItNet network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_nin_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def nextitnet_distill_parser():
# Distill NextItNet into NextItNet
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--nin_student_hidden_units", type=int, default=None)
parser.add_argument("--nin_student_num_blocks", type=int, default=None)
parser.add_argument("--nin_student_block_dilations", nargs="+", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_nin_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def sasrec_distill_parser():
# Distill SASRec into SASRec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--sas_student_num_heads", type=int, default=None)
parser.add_argument("--sas_student_hidden_units", type=int, default=None)
parser.add_argument("--sas_student_num_blocks", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_sas_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def bert4rec_distill_parser():
# Distill Bert4rec into Bert4rec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--bert_student_num_heads", type=int, default=None)
parser.add_argument("--bert_student_hidden_units", type=int, default=None)
parser.add_argument("--bert_student_num_blocks", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_bert_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def sasrec_parser():
# Train Teacher-SASRec network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_sas_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_search_preset_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-T", type=str, required=True, help="teacher's type")
parser.add_argument("-D", type=str, required=True, help="dataset's name")
return parser
def student_search_parser(teacher_type):
# Search student network architecture using pretrained Teacher model
teacher_type = teacher_type.strip().lower()
assert teacher_type in ["bert", "nin", "sas"]
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_teacher", type=int, default=0)
parser.add_argument("--gpu_student", type=int, default=0)
# Student-Search
parser.add_argument("--search_temperature", type=float, default=None, help="Initial gumbel sampling temperature.")
parser.add_argument("--search_temperature_decay_rate", type=float, default=None, help="Temperature decay rate.")
parser.add_argument("--search_temperature_decay_epochs", type=float, default=None, help="Temperature decay every.")
parser.add_argument("--search_teacher_folder", type=str, default=None)
parser.add_argument("--search_teacher_layers", type=int, default=None, help="Number of layers in teacher network.")
parser.add_argument("--search_teacher_hidden", type=int, default=None, help="Hidden units in teacher network.")
parser.add_argument("--search_distill_loss", type=str, default=None, help="KD loss type.") # hierarchical|emd|ada
parser.add_argument("--search_hierarchical_select_method", type=str, default=None, help="Hierarchical KD method.")
parser.add_argument("--search_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--search_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--search_loss_beta", type=float, default=None, help="Loss factor for model efficiency.")
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_training_args(parser, is_search=True)
parser = add_student_model_args(parser)
if teacher_type == "bert":
parser = add_bert_args(parser)
elif teacher_type == "nin":
parser = add_nin_args(parser)
elif teacher_type == "sas":
parser = add_sas_args(parser)
return parser
def student_finetune_parser():
# Finetune student network architecture after architecture is generated
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--search_folder", type=str, default=None)
parser.add_argument("--search_teacher_type", type=str, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_student_model_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_augment_parser():
# Augment student network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
# e.g. Using 30music's alpha to train ml2k
parser.add_argument("--augment_source_folder", type=str, default=None) # get 30music's alpha (arch)
parser.add_argument("--augment_target_folder", type=str, default=None) # get ml2k's embedding and linear
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_student_model_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_augment_preset_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-T", type=str, required=True, help="teacher's type")
parser.add_argument("-D_src", type=str, required=True, help="from which alpha file is searched")
return parser
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Unsupported value encountered.")
def empty_parser():
return argparse.ArgumentParser()
| [
"cl15179779122@163.com"
] | cl15179779122@163.com |
745408aababcdbfb801e2e1fb73f38bf07d71784 | bc2834d254b555787b5802cff09cc405d0f4f2c4 | /uco/venv/lib/python3.5/stat.py | 48a9727e0595edab35b72576524d33f30626491c | [] | no_license | lindatxia/UCO-Verison-Checker | a491bc82c7eb8da37536ae3d1262ec9153e50e9e | 3e4610dc00420e9bab88f8cd730782fccb55fbb3 | refs/heads/master | 2021-03-30T16:31:52.006291 | 2018-05-05T02:44:50 | 2018-05-05T02:44:50 | 122,348,339 | 0 | 0 | null | 2018-05-05T02:44:51 | 2018-02-21T14:41:49 | Python | UTF-8 | Python | false | false | 48 | py | /Users/lindaxia/miniconda3/lib/python3.5/stat.py | [
"lindax@andrew.cmu.edu"
] | lindax@andrew.cmu.edu |
172c8c6387685566c45d7452523e04e900cd8301 | 765465c33a4dc02774a3a0d10594fe9d7defe84b | /quiz_ex.py | ac892312244be4b99f6b4d57014d72fb2bed3098 | [] | no_license | Ijinyanen/Y7_Quiz | d287d8151db5133dc42baf92eb10dc090c5699d4 | cbda42f28145c849197d9794ea3b068204e0a959 | refs/heads/main | 2023-02-26T16:52:21.412645 | 2021-01-27T11:29:06 | 2021-01-27T11:29:06 | 331,145,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | # challenge 1
print("\nWelcome to Mr Higgin's ultimate Star Trek Quiz!")
print("There will be 10 questions.")
print("You shalt be scored for each.\n\n")
# challenge 2
bucket = input("Please enter your first name. -> ")
surname = input("Please enter your surname. -> ")
age = input("Please enter your age. -> ")
print(bucket, surname, age)
print("\nWelcome to the Ulitmate Quiz, ", bucket, "\n")
# challeng 3/4
answer1 = input("Q1. What is the name of the ship that features in most Star Trek series?\n-> ").upper()
if answer1 == "ENTERPRISE":
print("\ncorrect.\n")
else:
print("\nwrong.\n")
#challenge 5
answer2 = input("\nQ2 What is the surname of the captain of the Entprise D?\n-> ").lower()
if answer2 == "picard":
print("\nCorrect\n")
else:
print("\nwrong\n") | [
"ryanhchiba@hotmail.com"
] | ryanhchiba@hotmail.com |
21b1364b9ce6a60ca2445bf020c1f3135e4512ad | e3c6348e8683353994ca76c434a23c96176a0c4e | /backend/wsgi.py | ce5cf66ea0253d0a263484bcd0d09e2cdeb59e4c | [] | no_license | achudy/bd_biblioteka | ffab2f44dfb2aa8dbc9adacee7dba0bfde24ee22 | 24569ff03c4937be1712a6119e6672af258a37a3 | refs/heads/main | 2023-02-19T00:41:04.815067 | 2021-01-19T15:48:32 | 2021-01-19T15:48:32 | 321,102,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | from project import create_app
application = create_app()
| [
"chudy.arkadiusz@gmail.com"
] | chudy.arkadiusz@gmail.com |
923f312b0fe97996949434d897235ae1e3af93db | 1d21f23e6b8bfbcae07f41f4566c617975e8a34f | /application/main/services/user_service.py | 34fcef2760b6cbedb1652a2f6b356bfb46a880e9 | [] | no_license | timmy224/react-slack-server | 55cd78669ae708c92122b9e8410f56e09abe45f6 | f9eeda7c77c8f236bbcda729957db1d4f75d808b | refs/heads/master | 2023-05-09T00:19:14.663603 | 2020-11-05T17:07:28 | 2020-11-05T17:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from ... import db
from ...models.User import User
def get_user(username):
return User.query.filter_by(username=username).one() | [
"kodezen@protonmail.com"
] | kodezen@protonmail.com |
ca34e03ef4a90a8d5d4c34a0ada17be32fc3c867 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_evan176_solve.py | 176dc3d014d5ea7ed28dc4e8bea96de713789acf | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 581 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import math
import time
def compute(word):
result = [word[0]]
for i in range(1, len(word)):
alpha = word[i]
if alpha >= result[0]:
result.insert(0, alpha)
else:
result.append(alpha)
return ''.join(result)
if __name__ == "__main__":
with open(sys.argv[1], 'r') as f:
cases = int(f.readline())
for i in range(cases):
word = f.readline().strip()
result = compute(word)
print('Case #{}: {}'.format(i+1, result))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
fd2392ab49d5f7a023acdd65aee092160c44e8e4 | 4133fad3c4da7e6db4de4b36e87c994273ef4b81 | /indexModificado.py | 67640a7a68dd38d3b2dacfcf2b0b41027ca34ed4 | [] | no_license | manuelARuiz9712/sincronizadores | 5462278538460dd67362266c2fd21a7ee1f5836f | 31733523fe57a9656d442f6486d47101ca341378 | refs/heads/master | 2022-12-26T12:13:16.623296 | 2020-10-02T05:21:15 | 2020-10-02T05:21:15 | 300,509,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,301 | py | import pyodbc
import requests
import datetime
import json
server = '192.168.1.252\SQL2008, 60313'
database = 'POSManu'
username = 'DeveloperWeb2'
password = 'Web163BDis'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
print("Connected")
cursor = cnxn.cursor()
UrlApi = "https://pitagoras.dismelltda.com/apitest/public"
#cursor.execute("select top 100* from Ex_POS_UsuarioServicio")
#rows = cursor.fetchall()
#print(rows)
def GetDateFacturacion():
x = datetime.datetime.now()
return ""+x.strftime("%Y")+x.strftime("%m")+x.strftime("%d")
def NonHumanFacturation (Pedido,Turno,Consecutivo,IdDocumento,ExtraData):
QueryItems = ""
ConceptosIcos = 0
Fecha =GetDateFacturacion()
IdentificacionUsuario = Pedido['Info']["NumeroIdentificacion"]
TotalIvaDef = 0
PLaza = ''
Banco == ''
if Pedido['Info']['NombreTipoPago'] == 'Datafono':
PLaza = 'CTG'
for ItemPedido in Pedido['items']:
if ItemPedido["Tipo"] == 'Producto':
TotalIva = (( int( ItemPedido["IVA"] )/100) * ItemPedido['PrecioBase'])* int( ItemPedido['Cantidad'])
TotalIvaDef = TotalIvaDef + TotalIva
PrecioUnidad = ItemPedido['PrecioBase'] + TotalIva
PrecioTotal = ItemPedido['PrecioBase'] * int( ItemPedido['Cantidad'] )
ConceptosIcos += int( ItemPedido['ICO'] )*int( ItemPedido['Cantidad'] )
QueryItems += """
Exec spItemsDocumentos @Op = 'I', @IDEN_Documentos = @Iden_Documento_Select, @Cantidad = {Cantidad}, @PrecioUnidad = {PrecioUnidad}, @PrecioTotal = {PrecioTotal}, @CostoTotal = 0,
@PorcentajeIVA = {IVA}, @PorcentajeDescuento = 0, @PorcentajeSobreDescuento = 0, @TotalDescuento = 0, @TotalIVA = {TotalIva}, @IdArticulo = {IdArticulo}, @VentaPorEmpaque = 0,
@Bodega = '13', @Ubicacion = '', @Lote = '0', @Clasificacion = 0, @Serie = '', @PrecioTotalOtraMoneda = 0, @PrecioUnidadOtraMoneda = 0, @Requerido = 1,
@SobreDescuento = 0, @Detalle = '', @PorcentajeINC = 0, @TotalINC = 0, @Serial = null
""".format(TotalIva=TotalIva,Cantidad=ItemPedido['Cantidad'],PrecioUnidad=PrecioUnidad,PrecioTotal=PrecioTotal,IVA=ItemPedido['IVA'],IdArticulo=ItemPedido['Codigo'])
else:
for SubItem in ItemPedido["Composicion"]:
UnidadesTotal = int( SubItem['Cantidad'] ) * int( ItemPedido['Cantidad'] )
TotalIva = (int( SubItem["Iva"] )/100) * SubItem['PrecioBase'] * UnidadesTotal
TotalIvaDef = TotalIvaDef + TotalIva
PrecioUnidad = SubItem['PrecioBase'] + TotalIva
PrecioTotal = SubItem['PrecioBase'] * UnidadesTotal
ConceptosIcos += int( SubItem['Ico'] ) * UnidadesTotal
QueryItems += """
Exec spItemsDocumentos @Op = 'I', @IDEN_Documentos = @Iden_Documento_Select, @Cantidad = {Cantidad}, @PrecioUnidad = {PrecioUnidad}, @PrecioTotal = {PrecioTotal}, @CostoTotal = 0,
@PorcentajeIVA = {IVA}, @PorcentajeDescuento = 0, @PorcentajeSobreDescuento = 0, @TotalDescuento = 0, @TotalIVA = {TotalIva}, @IdArticulo = {IdArticulo}, @VentaPorEmpaque = 0,
@Bodega = '13', @Ubicacion = '', @Lote = '0', @Clasificacion = 0, @Serie = '', @PrecioTotalOtraMoneda = 0, @PrecioUnidadOtraMoneda = 0, @Requerido = 1,
@SobreDescuento = 0, @Detalle = '', @PorcentajeINC = 0, @TotalINC = 0, @Serial = null
""".format(TotalIva=TotalIva,Cantidad= UnidadesTotal,PrecioUnidad=PrecioUnidad,PrecioTotal=PrecioTotal,IVA=SubItem['Iva'],IdArticulo=SubItem['Codigo'])
#print( QueryItems ,"\n")
QueryConsecutivos = """
EXEC spGenerarMovimientoInventario @TipoDocumento = 1,@Consecutivo=@Consecutivo_Select
EXEC spContabilizador @TipoDocumento=1,@Consecutivo=@Consecutivo_Select
EXEC SpSincronizarVenta @TipoDocumento=1,@Consecutivo=@Consecutivo_Select
---------------------------------------------
IF @@TRANCOUNT>0 BEGIN COMMIT TRAN END
"""
QueryMedioPago = """
Exec spPagosDocumentos @Op='I',@IDen_Documentos=@Iden_Documento_Select,@FormaPago='{MedioPago}',@Valor={ValorTotalPedido},@Cambio=0,@Comision=0,@Retencion=0,@IvaInformado={TotalIvaDef},@ReteIVA=0,
@Vencimiento='{Fecha}',@Cliente='99 ',@Banco='{Banco}',@Plaza='{Plaza}',@DocumentoPago='{DocumentoPago}',@EsUnAnticipo=0
""".format(DocumentoPago= Pedido['Info']['CodigoTransaccion'],Plaza=PLaza,Banco=Pedido['Info']['Banco'],IdDocumento =IdDocumento,ValorTotalPedido =float( Pedido['Info']['ValorPedido'] ),Fecha=Fecha,TotalIvaDef=TotalIvaDef,MedioPago=Pedido['Info']["CodigoPago"])
QueryConeptiosIcos = """
Exec spConceptosDocumentos @Op='I', @IDEN_Documentos=@Iden_Documento_Select, @Concepto='44', @Cantidad=1, @Valor={ValorIco}, @BaseRetencion=0, @PorcentajeImpuesto=0, @ValorUnidad1=0,
@ValorUnidad2=0, @ValorUnidad3=0, @cliente=' ', @Proveedor=' ', @DocumentoCartera='', @TipoDocumentoCartera='', @Vencimiento='20200403',
@Referencia='', @Vendedor='', @Moneda='', @DocumentoCaja='', @Fecha='{Fecha}', @Banco=' ', @plaza=' ', @AuxiliarAbierto=' ',
@CentroCosto='', @Autorizacion=' ', @ReferenciaCaja=' ', @Tercero='99 ', @ItemsContable='', @ValorEnOtraMoneda={ValorIco},
@SubTotal={ValorIco}, @TotalDescuento=0, @Total={ValorIco}, @TotalIVA=0, @TotalConcepto={ValorIco}, @PorcentajeIVA=0, @PorcentajeDcto=0, @Detalle='IMPOCONSUMO LICORES',
@IdenTiquetera = 0, @ConsecutivoTiquetera = 0
""".format(ValorIco=ExtraData['TotalIco'],Fecha=Fecha)
QueryConeptiosDomicilio = """
Exec spConceptosDocumentos @Op='I', @IDEN_Documentos=@Iden_Documento_Select, @Concepto='{IdConcepto}', @Cantidad=1, @Valor={ValorIco}, @BaseRetencion=0, @PorcentajeImpuesto=0, @ValorUnidad1=0,
@ValorUnidad2=0, @ValorUnidad3=0, @cliente=' ', @Proveedor=' ', @DocumentoCartera='', @TipoDocumentoCartera='', @Vencimiento='20200403',
@Referencia='', @Vendedor='', @Moneda='', @DocumentoCaja='', @Fecha='{Fecha}', @Banco=' ', @plaza=' ', @AuxiliarAbierto='DOMICILIOS ',
@CentroCosto='', @Autorizacion=' ', @ReferenciaCaja=' ', @Tercero='99 ', @ItemsContable='', @ValorEnOtraMoneda={ValorIco},
@SubTotal={ValorIco}, @TotalDescuento=0, @Total={ValorIco}, @TotalIVA=0, @TotalConcepto={ValorIco}, @PorcentajeIVA=0, @PorcentajeDcto=0, @Detalle='{DetalleConcepto}',
@IdenTiquetera = 0, @ConsecutivoTiquetera = 0
""".format(ValorIco=Pedido['Info']['ValorConceptoDomicilio'],Fecha=Fecha,IdConcepto=Pedido['Info']['IdConceptoDismel'],DetalleConcepto=Pedido['Info']['NombreConcepto'])
QueryIdentificacionUsuario = """
Exec spPOS_ManejoDeTerceros @xOperacion = 'IR', @xCaja = '01', @xIdentificacion = '99-{DocumentoItem}', @xTipoTercero = '0', @xTipoIdentificacion = null,
@xNombre1 = null, @xNombre2 = null, @xApellido1 = null, @xApellido2 = null, @xTipoRazonSocial = 'NA'
""".format(DocumentoItem=IdentificacionUsuario)
LargeConsulta = """
BEGIN TRAN
Declare @Error Int
Declare @Iden_Documento_Select Numeric
Declare @Consecutivo_Select Numeric
Declare @Comprobante_Select Varchar(10)
Set @Error = -1
---------------------------------------------CABECERA DE DOCUMENTO-------------------------------------------------------------------
Execute @Error = spDocumentos @Op = 'I', @Turno = {TurnoPedido}, @Estado = 'Procesado', @Fecha = '{FechaPedido} 00:00:00', @TipoDocumento = 1,
@Moneda = 'EFE', @TasaCambio = 1, @Cliente = '99 ', @Vendedor = '82 ', @UsuariodeServicio = '99-{IdentificacionUsuario}', @NombreUsuariodeServicio = 'Turista ',
@TelefonoUsuariodeServicio = '1', @direccionUsuarioServicio = '.', @Identificacion = '{IdentificacionUsuario}', @FechaNacimiento = '19000101 00:00:00',
@PrestadordeServicio = null, @NombrePrestadordeServicio = null, @NroAutorizacion = null, @Fuente = '51', @Inicio = '00', @Comprobante = null,
@DescuentoComercial = 0, @Cargo1 = 0, @cargo2 = 0, @cargo3 = 0, @Usuario = 117, @AutorizadoPor = 0, @ValorDevuelto = 0, @Cupo = 1, @DocumentoconPendiente = 0,
@Anotaciones = null, @DocumentoDevuelto = 0, @ConsecutivoRelacionado = 0, @ValorAnticipo = 0, @PlanSepare_Consecutivo = null,
@PlanSepare_Consecutivo_Pago = null, @PlanSepare_Valor = 0, @Iden_Documento_Select = @Iden_Documento_Select OUTPUT,
@Consecutivo_Select = @Consecutivo_Select OUTPUT, @Comprobante_Select = @Comprobante_Select OUTPUT, @ModalidadesVentas = null, @Asociado = null
{ItesmPedidoQuery}
{ConceptosQuery}
{ConceptosDomiclioQuery}
{MedioPagoQuery}
{IdentificacionQuery}
{QueryConsecutivosEx}
""".format(MedioPago=Pedido['Info']["CodigoPago"],ConceptosDomiclioQuery=QueryConeptiosDomicilio,QueryConsecutivosEx=QueryConsecutivos,IdentificacionUsuario=IdentificacionUsuario,TurnoPedido=Turno,FechaPedido=Fecha,ItesmPedidoQuery=QueryItems,MedioPagoQuery=QueryMedioPago,ConceptosQuery=QueryConeptiosIcos,IdentificacionQuery=QueryIdentificacionUsuario)
#print("PEDIDO::","\n",LargeConsulta)
return LargeConsulta
def QueryExistUsuarios(identificacion):
return "select Codigo,Identificacion from Ex_POS_UsuarioServicio where Identificacion = '{}'".format(identificacion)
def QueryInsertUsuario(Identificacion,Nombre,Direccion,Telefono):
CodigoIdentificacion = '99-'+Identificacion
return "insert into Ex_POS_UsuarioServicio(Codigo,Nombre,Identificacion,Direccion,GrupoUsuarioServicio,Telefono) values('{}','{}','{}','{}','{}','{}')".format(CodigoIdentificacion,Nombre,Identificacion,Direccion,'vc99',Telefono)
def QueryGetTurno():
return "select IDEN from Turno order by IDEN desc"
def QueryDocumentoInfo():
return "select IDEN,Consecutivo from Documentos order by IDEN desc"
def ComprobarExistencias(items):
EstadoComprobado = True
TotalIva = 0
TotalIco = 0
Respuesta = {
"TotalIva":0,
"TotalIco":0,
"CanFacure":True
}
for itemPedido in items:
if itemPedido['Tipo'] == 'Actividad':
for subItems in itemPedido["Composicion"]:
cursor.execute("""select Articulo.IdArticulo,Articulo.Nombre,Existencia.Existencias from Articulo
inner join Existencia on Articulo.IdArticulo = Existencia.Articulo
where Existencia.Bodega = 13 and IdArticulo = {}""".format(subItems['Codigo']))
ResultExistencias = cursor.fetchone()
if ResultExistencias == None:
EstadoComprobado = False
break
else:
if int( subItems['Cantidad']) <= int( ResultExistencias[2] ) :
TotalIva = TotalIva + ((int( subItems['Iva'] )/100 * subItems['PrecioBase'])*subItems["Cantidad"])*subItems['Cantidad']
TotalIco = TotalIco + (int( subItems['Ico'] ) * subItems["Cantidad"] )*subItems['Cantidad']
else:
EstadoComprobado = False
break
if EstadoComprobado == False:
break
else:
cursor.execute("""select Articulo.IdArticulo,Articulo.Nombre,Existencia.Existencias from Articulo
inner join Existencia on Articulo.IdArticulo = Existencia.Articulo
where Existencia.Bodega = 13 and IdArticulo = {}""".format(itemPedido['Codigo']))
ResultExistencias = cursor.fetchone()
if ResultExistencias == None:
EstadoComprobado = False
break
else:
if int( itemPedido['Cantidad']) <= int( ResultExistencias[2] ) :
TotalIva = TotalIva + (int( itemPedido['IVA'] )/100 * itemPedido['PrecioBase'])*itemPedido["Cantidad"]
TotalIco = TotalIco + (int( itemPedido['ICO'] ) * itemPedido["Cantidad"] )
else:
EstadoComprobado = False
break
Respuesta['TotalIva'] = TotalIva;
Respuesta['TotalIco'] = TotalIco;
Respuesta['CanFacure'] = EstadoComprobado;
return Respuesta
r = requests.get( UrlApi+'/api/mobile/Prueba/GetPedidosDisponibles')
pedidosDisponibles = r.json()
PedidosCompletados = []
for pedido in pedidosDisponibles:
CanFacturePedido = ComprobarExistencias(pedido['items'])
print( CanFacturePedido )
if CanFacturePedido["CanFacure"] == True:
cursor.execute( QueryGetTurno() )
Turno = int( cursor.fetchone()[0] )
cursor.execute( QueryDocumentoInfo() )
DocumentoInfo = cursor.fetchone()
#print("Documento","\n",DocumentoInfo)
queyconsulta = QueryExistUsuarios(pedido['Info']['NumeroIdentificacion'])
#print("consulta",queyconsulta)
cursor.execute( queyconsulta )
usuario = cursor.fetchone()
if usuario == None:
cursor.execute( QueryInsertUsuario( pedido['Info']['NumeroIdentificacion'] ,pedido['Info']['Nombres'],pedido['Info']['DireccionDomicilo'],pedido['Info']['Telefono']) )
cnxn.commit()
ConsultaCompleta = NonHumanFacturation(pedido,Turno,int( DocumentoInfo[1] ),int( DocumentoInfo[0] ),CanFacturePedido)
#print( ConsultaCompleta,"\n \n" )
#break
try:
# #print( ConsultaCompleta )
cursor.execute( ConsultaCompleta )
cnxn.commit()
#print("Fetching one \n",cursor.fetchone())
PedidosCompletados.append( {
"pedidoId":pedido['Info']['idPedido'],
"estado":True
} )
#PedidosCompletados.append( pedido['Info']['idPedido'] )
except:
PedidosCompletados.append( {
"pedidoId":pedido['Info']['idPedido'],
"estado":False
} )
print("None")
print("Pedidos completados \n",PedidosCompletados)
payLoad = {"ArrayPedidos":json.dumps(PedidosCompletados)}
r = requests.post(UrlApi+"/api/mobile/SyncronizarFacturacionPedidos", data=payLoad)
print("Pedidos Completados","\n",r) | [
"manuelanguloruiz@gmail.com"
] | manuelanguloruiz@gmail.com |
0af923ce9c5d19f830d123e01cb16ea667201bcf | ec2d21b9cb56bad8608e8bf724c86956f929c70d | /Mission_to_Mars.py | f9e719b6edadcddc93b9ba577f1b9d86e21322aa | [] | no_license | Natasha-Nelson/W10-Splinter-Mars | 319801eb9bb210eda5c4b751907c12ebfe3423fb | fd81ff4cb04ac81253bf903787707b6a99400d7b | refs/heads/main | 2023-06-03T06:20:18.663798 | 2021-06-27T21:19:31 | 2021-06-27T21:19:31 | 379,471,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | # import dependencies
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit the mars nasa news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
news_title = slide_elem.find('div', class_='content_title').text
news_title
news_p = slide_elem.find('div', class_='article_teaser_body').text
news_p
#### Featured Images
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url_rel
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
img_url
# ### Table Data Scrape
df = pd.read_html('https://galaxyfacts-mars.com')[0]
df.columns=['Description','Mars','Earth']
df.set_index('Description', inplace=True)
df
df.to_html()
browser.quit() | [
"natasha.leah.nelson@gmail.com"
] | natasha.leah.nelson@gmail.com |
dd667c800eb393916b21a98a3a859f389237f10f | b2e88f2791b1a118657db84bb5a60cf26d2b6327 | /knapSack_problem/script/knapSack.py | f8ec9158e5dcb6eaa24edba1b4f2ecefc6745999 | [] | no_license | carola173/Knapsack_Algorithm | 52b3519924c2b9b5968a6885a334a3aaa0ae2e89 | 4ce9076e6c2aedc8bf766bc6935661c7be5888ec | refs/heads/master | 2022-03-06T11:47:50.221143 | 2019-11-19T10:56:25 | 2019-11-19T10:56:25 | 222,665,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | '''
Using the dynamic programming for implementing the knapsack problem
Time complexity of the algorithm is : O(nW) where n is the number of items and W is the capacity of knapsack.
'''
def knapSack(capacity, volume, value, n):
K = [[0 for x in range(capacity + 1)] for x in range(n + 1)]
for i in range(n + 1):
for w in range(capacity + 1):
if i == 0 or w == 0:
K[i][w] = 0
elif volume[i-1] <= w:
K[i][w] = max(value[i-1] + K[i-1][w-volume[i-1]], K[i-1][w])
else:
K[i][w] = K[i-1][w]
for i in range(n+1):
for j in range(capacity+1):
print(K[i][j], end=" ")
print()
return K[n][capacity]
# Driver program to test above function
try_count=0
while(try_count < 5):
try:
try_count+=1
value=list(map(int,input("Enter the list of value :").split(' ')))
volume=list(map(int,input("Enter the list of volume :").split(' ')))
capacity=int(input("Enter the total knapsack capacity :"))
if(len(value)!=len(volume)):
print("Length of value and volumne parameter doesn't matches")
else:
n = len(value)
break
except:
print("Enter valid integer")
print(knapSack(capacity, volume, value, n)) | [
"carola.amu@gmail.com"
] | carola.amu@gmail.com |
2eb7c0f1ac90448da79ffa3061e1de8a208d3d51 | 83fc27eb524842ae12bed6a5f75cfd2557cffbc2 | /antcolor/WIP/WIP Segmentations/MorphSnakesTest.py | e0f19e57263c7c04fe639bbc31ff48a4534ff2ef | [] | no_license | calacademy-research/antcolor | c6dd4891b20bf8f055d640326e7ca4ff78e313a4 | 93832e0c570faed1c629834f4adaa83d7f3998cd | refs/heads/master | 2021-11-23T11:37:48.460709 | 2021-10-30T04:53:05 | 2021-10-30T04:53:05 | 140,758,363 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from itertools import cycle
import cv2
import numpy as np
from Tests import morphsnakes
from scipy import ndimage
from scipy.ndimage import binary_dilation, binary_erosion, \
gaussian_filter, gaussian_gradient_magnitude
from scipy.misc import imread
from matplotlib import pyplot as ppl
from Tests import tests
#WIP implementation of morphsnakes
def rgb2gray(img):
"""Convert a RGB image to gray scale."""
return 0.2989*img[:,:,0] + 0.587*img[:,:,1] + 0.114*img[:,:,2]
def circle_levelset(shape, center, sqradius, scalerow=1.0):
"""Build a binary function with a circle as the 0.5-levelset."""
grid = np.mgrid[list(map(slice, shape))].T - center
phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))
u = np.float_(phi > 0)
return u
# Load the image.
imgcolor = imread("AntImages/highhead1.jpg") / 255.0
img = rgb2gray(imgcolor)
# g(I)
gI = morphsnakes.gborders(img, alpha=1000, sigma=2)
# Morphological GAC. Initialization of the level-set.
mgac = morphsnakes.MorphGAC(gI, smoothing=2, threshold=0.3, balloon=-1)
mgac.levelset = circle_levelset(img.shape, (163, 137), 135, scalerow=0.75)
# Visual evolution.
ppl.figure()
morphsnakes.evolve_visual(mgac, num_iters=110, background=imgcolor)
ppl.show()
| [
"idecjj@hendrix.edu"
] | idecjj@hendrix.edu |
58a0b89d9a44e9b44d96235ba45354df6142d209 | b15848c78b6ed07d27cae74b90ae99a27d7acf24 | /DataParser/DataParser/settings.py | dae1081c2349a6f3414aead9e32dbee48c5bbd29 | [
"MIT"
] | permissive | CodeBoss86/DataParser | ba988462de6e1cc1ae156e3407fbdea06fa5efc8 | c9e09f0975145a4ca0a3645699ee91adee49cd2c | refs/heads/main | 2023-01-19T01:51:31.178645 | 2020-11-17T13:38:47 | 2020-11-17T13:38:47 | 316,596,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,872 | py | """
Django settings for DataParser project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# from corsheaders.defaults import default_headers
import os
from dotenv import load_dotenv
from pathlib import Path
# from celery.schedules import crontab
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
ENV_PATH = BASE_DIR / '.env'
load_dotenv(ENV_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG')
splittedHosts = os.getenv('ALLOWED_HOSTS').split(',')
ALLOWED_HOSTS = splittedHosts
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
DJANGO_ALLOW_ASYNC_UNSAFE = True
ROOT_URLCONF = 'DataParser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DataParser.wsgi.application'
ASGI_APPLICATION = 'DataParser.asgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
ASGI_APPLICATION = 'DataParser.routing.application'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = False
| [
"mattode@outlook.com"
] | mattode@outlook.com |
d067e6af959341425c4f4216701f299296312454 | 8fefb8371047173a69576f994cf6e4a911d05f6f | /results/acorr_xx_comparision.py | 0c20ef8c3613348633a6e42e27bfa78c2f7a3d8d | [] | no_license | mannisen/Hybrid-Monte-Carlo | 0ebf8b1b054ea5df6735c3636ed004e8816bfdac | 3b9626bd8e39b7f5a66cce81b431f6bd7fd2dd6e | refs/heads/master | 2022-04-30T02:03:13.479369 | 2018-06-20T08:17:03 | 2018-06-20T08:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,631 | py | #!/usr/bin/env python
import numpy as np
from results.data.store import load, store
from hmc.potentials import Klein_Gordon as KG
from correlations.corr import twoPoint
from correlations.acorr import acorr as myAcorr
from correlations.errors import gW, windowing, uWerr, getW, autoWindow, acorrnErr
from common.acorr import plot
from common.utils import saveOrDisplay
file_name = __file__
save = True
m = 1.0
n_steps = 40
step_size = 1/((3.*np.sqrt(3)-np.sqrt(15))*m/2.)/float(n_steps)
n, dim = 10, 1
spacing = 1.
n_samples = 100000
n_burn_in = 25
c_len = 10000
pot = KG(m=m)
x0 = np.random.random((n,)*dim)
separations = range(c_len) # define how many separations for a/c
tau = n_steps*step_size
opFn = lambda samples: twoPoint(samples, separation=0)
op_name = r'$\hat{O} = \sum_{pq} \Omega \phi_p\phi_q :\Omega = \delta_{p0}\delta_{q0}$'
my_loc = "results/data/numpy_objs/acorr_xx_comparison_alex.json"
comparison_loc = 'results/data/other_objs/acorr_xx_comparison_christian.pkl'
mixing_angle = .5*np.pi
subtitle = r"Potential: {}; Lattice: ${}$; $a={:.1f}; \delta\tau={:.2f}; n={}$".format(
pot.name, x0.shape, spacing, step_size, n_steps)
def reObtain():
"""Re-obtains the MCMC samples - note that these are position samples
not the sampled operator!
Expectations :: this func relies on lots of variables above!
"""
print 'Running Model: {}'.format(file_name)
rng = np.random.RandomState()
model = Model(x0, pot=pot, spacing=spacing, rng=rng, step_size = step_size,
n_steps = n_steps, rand_steps=rand_steps)
c = acorr.Autocorrelations_1d(model)
c.runModel(n_samples=n_samples, n_burn_in=n_burn_in, mixing_angle = mixing_angle, verbose=True)
c._getSamples()
acs = c.getAcorr(separations, opFn, norm = False) # non norm for uWerr
store(c.samples, file_name, '_alex')
print 'Finished Running Model: {}'.format(file_name)
traj = c.trajs
p = c.model.p_acc
xx = c.op_mean
return c.samples
#
def preparePlot(my_data, comparison_data, my_err=None, comparison_err=None):
"""Compares and plots the two datasets
Required Inputs
my_data :: np array :: y data as a benchmark
comparison_data :: np array :: y data to compare to my_data
my_err :: np array :: y errs as a benchmark
comparison_err :: np array :: y errs to compare to my_data
"""
x = np.asarray(separations)*step_size*n_steps
my_x = x[:my_data.size]
comparision_x = x[:my_data.size]
# create the dictionary item to pass to plot()
acns = {}
acns[r'My $C_{\phi^2}(\theta = \pi/2)$'] = (my_x, my_data, my_err) # add my data
acns[r'C.H. $C_{\phi^2}(\theta = \pi/2)$'] = (my_x, comparison_data, comparison_err) # add christians data
# Bundle all data ready for Plot() and store data as .pkl or .json for future use
all_plot = {'acns':acns, 'lines':{}, 'subtitle':subtitle, 'op_name':op_name}
return all_plot
import math as math
class Christian_Autocorrelation(object):
"""Written by Christian Hanauer"""
def __init__(self,data):
self.data = np.asarray(data)
self.N = len(data)
def acf(self,t=None):
#Normalised autocorrelation funciton according to (E.7) in [2]
mean = np.mean(self.data)
var = np.sum((self.data - mean) ** 2) / float(self.N)
if t!=None:
return ((self.data[:self.N - t] - mean) * (self.data[t:] - mean)).sum() / float(self.N-t) / var
else:
def r(t):
acf_lag = ((self.data[:self.N - t] - mean) * (self.data[t:] - mean)).sum() / float(self.N-t) / var
return acf_lag
x = np.arange(self.N) # Avoiding lag 0 calculation
acf_coeffs = np.asarray(map(r, x))
return acf_coeffs
def tauintW(self,plateau_plot=True,spare=100,S=1):
#calculation of tauintf with findng W according to [1]
g = 1.
mathexp = math.exp
mathsqrt = math.sqrt
mathlog = math.log
tint = 0.5
tplot = [0.5]
for i in range(1, self.N):
tint += self.acf(i)
tplot = np.append(tplot,tint)
tau = S/mathlog( (2.*tint+1)/(2.*tint-1) )
g = mathexp(-i/tau) - tau/mathsqrt(i*self.N)
if g < 0:
W = i
thelp = tint
for j in range(spare):
thelp += self.acf(i)
tplot = np.append(tplot,thelp)
break
if plateau_plot == True:
fig_plateau = plt.figure()
ax_plateau = fig_plateau.add_subplot(111)
ax_plateau.plot(tplot)
ax_plateau.plot((W, W), (0,tplot[W]))
ax_plateau.set_xlabel(r'$W$')
ax_plateau.set_ylabel(r'$\tau_{int}$')
ax_plateau.set_title(r'$\tau_{int}$ in dependence of $W$')
dtint = np.sqrt(4*(W + 0.5-tint)/float(self.N)) * tint
return tint,dtint,W
# load data from christian and use his data
comparison_xx = load(comparison_loc)
comparison_xx = np.asarray(comparison_xx)
print 'Comparing autocorrelation calculations...'
# assert that the autocorrelation routine is the same
av_xx = comparison_xx.mean()
norm = ((comparison_xx-av_xx)**2).mean()
my_acorr = np.asarray(map(lambda s: myAcorr(comparison_xx, av_xx, s), np.asarray(separations)))
christian_class = Christian_Autocorrelation(comparison_xx)
christian_acorr = christian_class.acf()[:c_len]
christian_acorr = np.asarray(christian_acorr)
diffs = christian_acorr[:my_acorr.size] - my_acorr
print " > av. difference: {}".format(diffs.mean())
print 'Checking integration window calculation:'
christian_tint, christian_dtint, christian_w = christian_class.tauintW(False)
_,_,my_w = windowing(comparison_xx, av_xx, 1.0, comparison_xx.size, fast=True)
print "Christian Window:{}".format(christian_w)
print "My Window:{}".format(my_w)
ans = uWerr(comparison_xx, acorr=my_acorr) # get errors
_, _, _, itau, itau_diff, _, acns = ans # extract data
my_w = getW(itau, itau_diff, n=comparison_xx.size) # get window length
my_err = acorrnErr(acns, my_w, comparison_xx.size) # get autocorr errors
christian_acorr = np.asarray(christian_acorr)
christian_err = acorrnErr(christian_acorr, christian_w, comparison_xx.size)
all_plot = preparePlot(christian_acorr[:2*my_w], acns[:2*my_w], my_err[:2*my_w], christian_err[:2*my_w])
store(all_plot, file_name, '_allPlot')
plot(save = saveOrDisplay(save, file_name+"_compareAc"), **all_plot) | [
"alexander.mcfarlane@physics.org"
] | alexander.mcfarlane@physics.org |
f8b71f47242faeeccc05326262d862d05d57a7fe | e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1 | /BinarySearch/q374_guess_number_higher_or_lower.py | 000686ff073c0f98c294124c4f8a8ca531d32f01 | [] | no_license | sevenhe716/LeetCode | 41d2ef18f5cb317858c9b69d00bcccb743cbdf48 | 4a1747b6497305f3821612d9c358a6795b1690da | refs/heads/master | 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # Time: O(n)
# Space: O(1)
# 解题思路:
# 二分查找
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
import bisect
pick = 0
def guess(num):
if num == pick:
return 0
elif num > pick:
return -1
else:
return 1
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
lo, hi = 1, n
while lo <= hi:
mid = lo + (hi - lo) // 2
result = guess(mid)
if result == 0:
return mid
elif result == 1:
lo = mid + 1
else:
hi = mid - 1
return -1
# 三分查找,时间复杂度降为log3(2n)
class Solution1:
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
low, high = 1, n
while low <= high:
mid1 = low + (high - low) // 3
mid2 = high - (high - low) // 3
res1, res2 = guess(mid1), guess(mid2)
if res1 == 0:
return mid1
if res2 == 0:
return mid2
elif res1 < 0:
high = mid1 - 1
elif res2 > 0:
low = mid2 + 1
else:
low, high = mid1 + 1, mid2 - 1
return -1
def guessNumber1(self, n):
class C: __getitem__ = lambda _, i: -guess(i)
return bisect.bisect(C(), -1, 1, n)
| [
"429134862@qq.com"
] | 429134862@qq.com |
7a511eb135175ba2fd1d17a4e839bf0087c8804b | 975cead91fffdfa58cc17e9e29b1bc26442af8d8 | /monitor.py | a3b93d2ab58c6d77d7590d66db4b97612aedaea3 | [] | no_license | Hellfire01/PostreSQL-Monitor | a3167d884b32a963fc6361ccef7f89f514271c8c | a911f18c0a0ada692b99f4c3de566907086a8f9c | refs/heads/master | 2022-11-21T12:36:32.802563 | 2020-07-24T01:22:35 | 2020-07-24T01:22:35 | 282,025,466 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,941 | py | # pip install psycopg2-binary
import psycopg2
import os
import re
class Monitor:
# prepares the file
def prep_file(self):
if self.writeToFile:
try:
os.remove(self.filename)
except OSError:
pass
return open(self.filename, "w+")
else:
return None
# gets the query results
def get_records(self, query):
self.cursor.execute(query)
return self.cursor.fetchall()
# writes the text to either the console nad / or the output file
def output_text(self, text):
if self.writeOnConsole:
print(text)
if self.writeToFile:
self.output_file.write(text)
# adds a '0' in from of values < 0 in order to make the time values as easy to read as possible
def format_time_value(self, value):
if value < 10:
return '0' + str(value)
else:
return str(value)
# formats a time duration in order to make it easier to read
def milliseconds_to_time(self, duration):
milliseconds = (duration % 1000) / 100
seconds = (duration / 1000) % 60
minutes = (duration / (1000 * 60)) % 60
hours = (duration / (1000 * 60 * 60)) % 24
dispRest = False
if int(hours) != 0:
hours_s = self.format_time_value(int(hours)) + "h "
dispRest = True
else:
hours_s = ''
if int(minutes) != 0 or dispRest:
minutes_s = self.format_time_value(int(minutes)) + "m "
dispRest = True
else:
minutes_s = ''
if int(seconds) != 0 or dispRest:
seconds_s = self.format_time_value(int(seconds)) + "s "
dispRest = True
else:
seconds_s = ''
if dispRest:
return hours_s + minutes_s + seconds_s + "{0:.2f}".format(milliseconds) + "ms"
else:
return hours_s + minutes_s + seconds_s + "{0:.4f}".format(milliseconds) + "ms"
# only formats the value if it a time value
def format_time_values(self, value, timeValue):
if timeValue:
return self.milliseconds_to_time(value)
else:
return str(value)
# formats the id to make sure it lines up correctly
def format_id(self, id):
ref_id = id
if ref_id < 0:
ref_id = ref_id * -1 # ensures that the buffering will correctly take into account the length of the id
ref_id = ref_id * 10 # to take into account the '-' at the start of a negative number
i = 0
value = 20
buff = ''
ref = 10
while i < value:
if ref >= ref_id:
break
i += 1
ref = ref * 10
value = value - i
while value > -1:
value -= 1
buff += ' '
return buff + str(id)
# displays just the start of the query
def print_start_of_query(self, query):
buff = re.sub(' +', ' ', ''.join(query.splitlines()))
if len(buff) > self.truncatedQuerySize:
buff = buff[0 : self.truncatedQuerySize - 4] + " ..."
return buff
# gets the rows of the queries as tuples and makes them more user friendly
def tuple_to_readable_string(self, _tuple, timeValue):
buff = "Query id : " + self.format_id(_tuple[0]) + " => "
indent = self.format_time_values(_tuple[2], timeValue)
while len(indent) < self.indentBeforeCalls:
indent += ' '
buff += indent + " | " + str(_tuple[3])
if _tuple[3] > 1:
buff += " calls"
else:
buff += " call"
if len(buff) < self.indentBeforeQuery:
while len(buff) < self.indentBeforeQuery:
buff += ' '
buff += " Query : " + self.print_start_of_query(_tuple[1]) + "\n"
return buff
# makes a human-readable display of the query results
def format_and_display(self, results, test, timeFormat):
buff = "\n"
buff += " =========\n"
buff += test + "\n"
buff += " =========\n"
buff += "\n"
for x in results:
buff += self.tuple_to_readable_string(x, timeFormat)
self.output_text(buff)
# generates the instruction that is added to the query in order to only get the relevant information
def generate_ignore_queries(self):
if self.useIgnore is False:
return ""
i = 0
buff = " WHERE query NOT LIKE "
for x in self.ignore:
if i != 0:
buff += " AND query NOT LIKE "
buff += "'%" + x + "%'"
i += 1
return buff
# generates the query while taking into account the parameters
def get_query(self, query, orderBy):
buff = "SELECT queryid, query, "
buff += query + self.generate_ignore_queries() + " " + orderBy + " limit " + str(self.limit) + ";"
if self.displayUsedQueries:
self.output_text("\nused query : " + buff + "\n")
return buff
# gets the most used queries
def get_most_used(self):
self.format_and_display(self.get_records(self.get_query("mean_time, calls FROM pg_stat_statements", "ORDER BY calls DESC")),
'Queries that where the most used by amount', True)
# gets the queries that required the most time accumulated
def get_biggest_time_accumulated(self):
self.format_and_display(self.get_records(self.get_query("total_time, calls FROM pg_stat_statements", "ORDER BY total_time DESC")),
'Queries that required the most time accumulated', True)
# gets the queries that required the most time on average
def get_biggest_time_average(self):
self.format_and_display(self.get_records(self.get_query("mean_time, calls FROM pg_stat_statements", "ORDER BY mean_time DESC")),
'Queries that required the most time per use on average', True)
# gets the queries that returned the most rows on average
def get_most_rows_returned_average(self):
self.format_and_display(self.get_records(self.get_query("rows / calls, calls FROM pg_stat_statements", "ORDER BY rows / calls DESC")),
'Queries that returned the most rows on average', False)
# gets the queries that returned the most rows accumulated
def get_most_rows_returned_accumulated(self):
self.format_and_display(self.get_records(self.get_query("rows, calls FROM pg_stat_statements", "ORDER BY rows DESC")),
'Queries that returned the most rows accumulated', False)
# adds the view to the database should it not exist ( needed in order to get the stats )
def init_the_database(self):
self.cursor.execute('CREATE EXTENSION IF NOT EXISTS pg_stat_statements;')
self.dbConnexion.commit()
# checks if the string is an int
def is_int(self, string):
try:
int(string)
return True
except ValueError:
return False
# parses all of the arguments given to the script in order to know what to display
def parse_instructions(self, argv):
i = 0
for x in argv:
i += 1
if i < 2: # ignoring the first argument ( connection string )
continue
if i == len(argv): # extracting the amount of required queries
if self.is_int(x) is False:
self.output_text("The limit ( last argument ) was not recognised as an integer. Got '" + x + "'")
return False
self.limit = int(x)
if self.limit <= 0:
self.output_text("Error : the limit should not be <= 0, got : " + str(self.limit))
return False
continue
if x == "mostUsed":
self.displayMostUsed = True
continue
if x == "longestTimeAccumulated":
self.displayLongestTimeAccumulated = True
continue
if x == "longestTimeOnAverage":
self.displayLongestTimeOnAverage = True
continue
if x == "mostRowsReturnedAccumulated":
self.displayMostRowsReturnedAccumulated = True
continue
if x == "mostRowsReturnedAverage":
self.displayMostRowsReturnedAverage = True
continue
if x == "all":
self.displayMostUsed = True
self.displayLongestTimeAccumulated = True
self.displayLongestTimeOnAverage = True
self.displayMostRowsReturnedAccumulated = True
self.displayMostRowsReturnedAverage = True
continue
self.output_text("Error : unknown argument '" + x + "'\n'")
self.output_text("Known arguments are : 'mostUsed', 'longestTimeAccumulated', 'longestTimeOnAverage', 'mostRowsReturnedAccumulated', 'mostRowsReturnedAverage' and 'all' \n")
return False
return True
# prints the query from the given id
def print_query_from_id(self, argv):
if self.is_int(argv[1]) is False:
self.output_text("Error : unknown argument '" + argv[1] + "\nExpected a query id as argument if no other instructions are given")
return False
buff = "\n"
buff += " =========\n"
buff += "displaying query of queryid " + str(argv[1]) + "\n"
buff += " =========\n"
buff += "\n"
self.cursor.execute("SELECT query FROM pg_stat_statements WHERE queryid = %s", (str(argv[1]),))
res = self.cursor.fetchall()
if len(res) == 0:
buff += "Error : there is no queries matching that id"
else:
buff += res[0][0]
buff += "\n"
self.output_text(buff)
return
# runs the display print
def exec_instructions(self):
if self.displayMostUsed:
self.get_most_used()
if self.displayLongestTimeAccumulated:
self.get_biggest_time_accumulated()
if self.displayLongestTimeOnAverage:
self.get_biggest_time_average()
if self.displayMostRowsReturnedAverage:
self.get_most_rows_returned_average()
if self.displayMostRowsReturnedAccumulated:
self.get_most_rows_returned_accumulated()
# executes the instruction with the given arguments
def run(self, *argv):
if len(argv) < 2:
self.output_text("This class needs at least 2 arguments to be able to run")
exit(0)
try:
# get a connection, if a connect cannot be made an exception will be raised here
self.dbConnexion = psycopg2.connect(argv[0])
self.cursor = self.dbConnexion.cursor()
self.init_the_database()
except Exception as e:
self.output_text("Error : could not connect to the database with connection string \"" + argv[0] + "\"")
self.output_text("Message is : " + str(e))
exit(2)
if len(argv) == 2:
self.print_query_from_id(argv)
else:
if self.parse_instructions(argv):
self.exec_instructions()
def __init__(self):
# ============================
# ========== CONFIG ==========
# ============================
# bool to choose to use ( True ) the ignore array or not ( False )
self.useIgnore = True
# array of strings used to ignore queries that have at least one of these strings
self.ignore = [
"pg_stat_statements", # ignoring these requests as they are generated while looking or relevant information
"pg_catalog", # ignoring these requests as they are generated while looking or relevant information
"ALTER TABLE",
"TRUNCATE TABLE",
"CREATE TABLE"
]
# === output configuration ===
# bool used to determine if the output is to be written on the console or not
self.writeOnConsole = True
# bool used to determine if the output is to be written in a file or not
self.writeToFile = True
# path of the file in witch the output can be written. Must include the file name
self.filename = "output.txt"
# === display ===
# Size of the truncated query being displayed ( must be an integer > 5 to work properly )
# Will not truncate queries that are shorter than the truncatedQuerySize
self.truncatedQuerySize = 60
# size of the alignment ( start of line until the Query keyword ) for the queries ( must me a positive integer to work )
self.indentBeforeQuery = 65
# size of the alignment for calls ( from end of '=>' until '|', must me a positive integer to work )
self.indentBeforeCalls = 10
# === debug ===
# bool used to display the queries that the program uses itself to get the displayed information ( True ) or not ( False )
self.displayUsedQueries = False
# ============================
# ==== RUNTIME VARIABLES =====
# ============================
# preparing the output file
self.output_file = self.prep_file()
# booleans used to determine what needs printing ( set to true with arguments of the same name passed to the script )
# these values are NOT configuration related, they are overwritten by user input by command line
self.displayMostUsed = False
self.displayLongestTimeAccumulated = False
self.displayLongestTimeOnAverage = False
self.displayMostRowsReturnedAccumulated = False
self.displayMostRowsReturnedAverage = False
self.limit = -1
| [
"matthieu.raynaud-de-fitte@epitech.eu"
] | matthieu.raynaud-de-fitte@epitech.eu |
cffa411da03c28e702e520074cc6b27270806d47 | 163efa79c0e9e7b05de1896f04568149a0c2a3c1 | /class2/mavros培训课程 - mavros源码/mavros/devel/lib/python2.7/dist-packages/mavros_msgs/msg/_VFR_HUD.py | 133140eed9c0bde170fb0ab23c2174568d373196 | [] | no_license | ssb418r/PX4_class | 123e4a896d94481a63eecf97e07c10925894bd82 | e6ca2ccfc7e643186fea1edd65064804eff943e2 | refs/heads/master | 2020-06-02T00:44:13.158636 | 2018-05-20T05:50:28 | 2018-05-20T05:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | /home/nvidia/mavros/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/msg/_VFR_HUD.py | [
"sundxfansky@sjtu.edu.cn"
] | sundxfansky@sjtu.edu.cn |
b1347c88770f1eb0a81a06dfaf9e693cbf5b465a | b4afd14e3b4e9cff0a99906a69587e348b243aeb | /mocc/beida/pythonds/stackop.py | 424989f76facb6d29739792e959118a1d1b1b7d9 | [] | no_license | zhankq/pythonlearn | d694df23826cda6ba662e852e531e96a10ab2092 | cb714fbb8257193029f958e73e0f9bd6a68d77f1 | refs/heads/master | 2021-12-16T13:51:23.381206 | 2021-12-03T01:13:36 | 2021-12-03T01:13:36 | 205,632,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
| [
"zhankq@163.com"
] | zhankq@163.com |
838c794f9381fde24a555dad49568e7e201e913b | 6bd9630ac5b8454b0e82417d515f849f7781de9d | /github_practice/github_practice/asgi.py | fba9048d342befee7fcb5752636a4ef5a7025093 | [] | no_license | sumin1007/github_practice | 8738d52aa5f8300f2ab144738b89c8f64f5bc566 | ee9deccc78b84fd1ecd304cc78210d4f1209453a | refs/heads/master | 2022-12-07T16:12:34.745446 | 2020-08-24T07:05:26 | 2020-08-24T07:05:26 | 289,850,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | """
ASGI config for github_practice project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'github_practice.settings')
application = get_asgi_application()
| [
"suminmusic99@gmail.com"
] | suminmusic99@gmail.com |
6c9c986c0aa21d179925835cfeb9197301d89797 | fcd83de98a15b86d8da5c2f1fb06c4539384b24d | /피보나치/동적계획법-기본(2).py | ec2f3f2167a964a3d1da28651809a23a724e56a7 | [] | no_license | ckdgns3167/Algorithm-study | ce81197763e283fe9ae547a90a5844531a014724 | d5c64cf5349ae68602ffc64894d2b0c1e2e68579 | refs/heads/main | 2023-03-04T15:23:58.918116 | 2021-02-14T13:15:06 | 2021-02-14T13:15:06 | 338,556,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def fibo(n):
if n < 2:
return n
cache = [0 for _ in range(n + 1)]
cache[1] = 1
for i in range(2, n + 1):
cache[i] = cache[i - 1] + cache[i - 2]
return cache[n]
print(fibo(10))
| [
"ckdgns3167@gmail.com"
] | ckdgns3167@gmail.com |
e2c838e054da771fdf7fc3c86b52fcdc82e340ba | f4407ed3bdca1b6dfb2387c83dc965f8afefe70b | /venv/Scripts/tabulate-script.py | 67ed7da0790e6645ef01b55722a4e04d5bd2316f | [] | no_license | HanshengT/Event-Receiver | 0932619c468624406b27cb1400ef76bce5b57e35 | cdbd15373a032bbb450d6e97da1333031c2afabd | refs/heads/master | 2021-05-25T21:59:01.998846 | 2020-04-15T09:21:20 | 2020-04-15T09:21:20 | 253,937,011 | 0 | 0 | null | 2021-05-06T20:05:28 | 2020-04-07T23:37:06 | Python | UTF-8 | Python | false | false | 461 | py | #!"C:\Users\15819\Desktop\CIT\3855 Architectures\sharing_power_bank\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'tabulate==0.8.6','console_scripts','tabulate'
__requires__ = 'tabulate==0.8.6'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('tabulate==0.8.6', 'console_scripts', 'tabulate')()
)
| [
"hansheng.tao@gmail.com"
] | hansheng.tao@gmail.com |
3cd329b8c34f33fda57e67ec19ffd58aa08cc7d6 | 6044266e775c87afed99397c8bb88366fbbca0e7 | /scrapy_projt/python_itertools/zip_longest_fillvalue.py | b9edce215a1bab2bb5e70645bae16021409cd99a | [] | no_license | ranafge/all-documnent-projects | e4434b821354076f486639419598fd54039fb5bd | c9d65ddea291c53b8e101357547ac63a36406ed9 | refs/heads/main | 2023-05-08T20:01:20.343856 | 2021-05-30T10:44:28 | 2021-05-30T10:44:28 | 372,186,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | from itertools import zip_longest
import re
from itertools import chain
dates = ['21/11/2044', '31/12/2018', '23/9/3000', '25/12/2007']
text = ['What are dates? ', ', is an example.\n', ', is another format as well.\n',
', also exists, but is a bit ludicrous\n', ', are examples but more commonly used']
print([w for x in zip_longest(text, dates, fillvalue='') for w in x if w])
ls = ['1 Paris-SG 42 20 13 3 4 +33',
'2 Lille 42 20 12 6 2 +20',
'3 Lyon 40 20 11 7 2 +20',
'4 Monaco 36 20 11 3 6 +10']
convert_2d_list = [i.split(maxsplit=2) for i in ls]
print(convert_2d_list)
my_list_dict = {
'L1': ['a', 'b', 'c', 'd'],
'L2': ['e', 'f', 'g', 'h']
}
def check_value_return_key(c):
for k, v in my_list_dict.items():
if c in v:
return k
else:
return None
print(check_value_return_key('g'))
def find_key(c):
for k, v in my_list_dict.items():
if c in v:
return k
else:
raise Exception("value '{}' not found".format(c))
find_key("a")
a = [[[5],[3]],[[4],[5]],[[6],[7]]]
print([list(chain.from_iterable(l)) for l in a])
my_list = [0, 1, 2, 2, 1, 20, 21, 21, 20, 3, 23, 22]
num_map = {j:i for i, j in enumerate(sorted(set(my_list)))}
print(num_map)
| [
"ranafge@gmail.com"
] | ranafge@gmail.com |
567e938c3da300c10dac470fe7bba73fefa308e1 | 8ca34f6da28f4b2cb2ae7a242e2156581426a950 | /apps/customer/migrations/0006_remove_job_job_type_remove_job_status.py | 501fd52e93fbb98072802b9b099caa2cb8297ea6 | [] | no_license | gray-adeyi/prime | 7e2360424560beb24742f93aa3f7b3b5cd484150 | 83b728db767e6f1b2237e10400fa95861ce1c8f3 | refs/heads/main | 2022-06-17T19:00:52.432315 | 2022-05-19T10:19:56 | 2022-05-19T10:19:56 | 225,469,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Generated by Django 4.0.3 on 2022-05-04 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0005_alter_job_copies'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='job_type',
),
migrations.RemoveField(
model_name='job',
name='status',
),
]
| [
"adeyigbenga005@gmail.com"
] | adeyigbenga005@gmail.com |
3c8b15aa00bf6c7f9730f113b9984681a6c8a727 | e5744bf041d5cb2ae5521ad4596c8d32f10332bf | /simpleBlockchain/lib/python3.6/__future__.py | 9ee18e45166aa7670a767c30efba5fe9825c771f | [] | no_license | barkbaek/blockchains | 8678bd1f47993c6f72a0c5562efe81f7bc2f06ca | a79ac433fa97db7a4e48bee6acc7dce8770ceb21 | refs/heads/master | 2020-04-11T11:48:45.400140 | 2018-12-16T10:38:30 | 2018-12-16T10:38:30 | 161,760,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | /Users/maway/.pyenv/versions/3.6.0/lib/python3.6/__future__.py | [
"barkbaek@gmail.com"
] | barkbaek@gmail.com |
62fc3c89e7939ee66309da0c228d3a0ca205b6c6 | 71eb367210e8ffd3b4964a8c99e3ac6f2920fdbb | /wedding/management/commands/make_backup.py | f92cd208fa723e3a4afbcc78c347424c2bb91e03 | [
"MIT"
] | permissive | jsayles/wedding | 392771dc894fb311414b2d34ceb4319318d8eefb | 242d28d0271d58909b2c5ff5457d909efaecd3c0 | refs/heads/master | 2020-04-18T01:26:57.433729 | 2015-09-04T15:18:03 | 2015-09-04T15:18:03 | 28,720,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | import os
import time
import urllib
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from wedding.backup import BackupManager
class Command(BaseCommand):
help = "Creates a backup containing an SQL dump and the media files."
args = ""
requires_model_validation = False
def handle(self, *labels, **options):
manager = BackupManager()
print manager.make_backup()
# Copyright 2011 Trevor F. Smith (http://trevor.smith.name/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| [
"jsayles@gmail.com"
] | jsayles@gmail.com |
b581261136eb5820caa1c37ee4e42eee9145a808 | 32dda10669e459cf37c31f426fa709001d2c75b0 | /leetcode_cn/solved/pg_709.py | 3d384ea50d36704b8ae5931bf4436c70958659b5 | [] | no_license | fastso/learning-python | 3300f50d06871245d0bfcbe9d201224580f70852 | d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572 | refs/heads/master | 2023-02-10T14:43:53.726247 | 2023-01-26T10:14:59 | 2023-01-26T10:14:59 | 193,454,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | class Solution:
def toLowerCase(self, s: str) -> str:
l = list(s)
for i in range(len(l)):
o = ord(l[i])
if 64 < o < 91:
o += 32
l[i] = chr(o)
return ''.join(l)
| [
"fastso.biko@gmail.com"
] | fastso.biko@gmail.com |
f31d850912894f5a92afd874db04ad58b6356a80 | 0c0e152a3e2fc5670635beb0ec96024cd2863ca6 | /e2e/test_mkldnn.py | 5a928bf07df258dc4f461defe4850880cb060548 | [] | no_license | yzhliu/topi-intel | 69ec599c18b83b93691ca724ec6a9b42beb3c9ef | 67e273bd4bb10a316b50cc1879c1cebac1ea91f8 | refs/heads/master | 2021-09-15T10:36:01.825237 | 2018-05-30T19:26:17 | 2018-05-30T19:26:17 | 116,441,458 | 1 | 0 | null | 2018-03-29T14:13:14 | 2018-01-06T00:48:47 | Python | UTF-8 | Python | false | false | 3,148 | py | import mxnet as mx
import numpy as np
import time
import nnvm.testing
import tvm
from collections import namedtuple
from tvm.contrib import graph_runtime
from mxnet.gluon.model_zoo.vision import get_model
# Batch = namedtuple('Batch', ['data'])
num_pass = 1000
def end2end_benchmark(model, target, batch_size):
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_classes)
data_array = np.random.uniform(0, 255, size=data_shape).astype("float32")
mx_data = mx.nd.array(data_array)
block = get_model(model, pretrained=True)
block.hybridize()
block(mx_data)
net, params = nnvm.frontend.from_mxnet(block)
ctx = tvm.cpu()
opt_level = 2
with nnvm.compiler.build_config(opt_level=opt_level):
graph, lib, params = nnvm.compiler.build(net, target, shape={"data": data_shape}, params=params)
module = graph_runtime.create(graph, lib, ctx)
module.set_input(**params)
block.export("symbol/" + model)
sym, arg_params, aux_params = mx.model.load_checkpoint("symbol/" + model, 0)
args = dict(arg_params, **{'data': mx.nd.empty(data_shape)})
exec_ = sym.bind(ctx=mx.cpu(), args=args, aux_states=aux_params)
mx_data.copyto(exec_.arg_dict['data'])
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)
mod.bind(for_training=False, data_shapes=[('data', data_shape)],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
times = []
for i in range(num_pass):
s = time.time()
exec_.forward()
for output in exec_.outputs:
output.wait_to_read()
mkl_time = time.time() - s
times.append(mkl_time)
mxnet_out = output
print("MKL %s inference time for batch size of %d: %f" % (model, batch_size, np.mean(times) * 1000))
input_data = tvm.nd.array(data_array, ctx=ctx)
module.set_input('data', input_data)
times = []
for i in range(20):
s = time.time()
module.run()
tvm_time = time.time() - s
times.append(tvm_time)
print("TVM %s inference time for batch size of %d: %f" % (model, batch_size, np.mean(times) * 1000))
tvm_out = module.get_output(0, out=tvm.nd.empty(out_shape))
np.testing.assert_array_almost_equal(tvm_out.asnumpy(), mxnet_out.asnumpy(), decimal=2)
if __name__ == "__main__":
import logging
# logging.basicConfig(level=logging.DEBUG)
import sys
batch_size = 1
# target = "llvm"
# target = "llvm -mcpu=core-avx2"
target = 'llvm -mcpu=skylake-avx512' # export TVM_NUM_THREADS=4 on c5xlarge
if len(sys.argv) == 2:
end2end_benchmark(sys.argv[1], target, batch_size)
else:
# end2end_benchmark('mobilenet1.0', target, batch_size)
# end2end_benchmark('resnet18_v1', target, batch_size)
# end2end_benchmark('resnet34_v1', target, batch_size)
end2end_benchmark('resnet50_v1', target, batch_size)
# end2end_benchmark('resnet101_v1', target, batch_size)
# end2end_benchmark('resnet152_v1', target, batch_size)
| [
"yizhiliu@amazon.com"
] | yizhiliu@amazon.com |
b6c69394d9cb24e853932d6a9d1f96608694f81a | 79b1d3d8ffbda5297fff6fefe2528e303bf2110a | /RSGGenFragment/RSToQQ/RSGravitonToQuarkQuark_W-0p25_M_1500_TuneCUETP8M1_13TeV_pythia8_cfi.py | 6e503b562929e62717577f7d52137212a9732aca | [] | no_license | yguler/MCFragments-1 | 25745a043653d02be3a4c242c1a85af221fc34b3 | 7c4d10ee59e00f997221109bf006819fd645b92f | refs/heads/master | 2021-01-13T14:09:12.811554 | 2016-12-11T15:57:37 | 2016-12-11T15:57:37 | 76,184,433 | 0 | 0 | null | 2016-12-11T15:59:22 | 2016-12-11T15:59:22 | null | UTF-8 | Python | false | false | 1,323 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.00000782),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:ffbar2G* = on',
'ExtraDimensionsG*:kappaMG = 2.276101242',
'5100039:m0 = 1500',
'5100039:onMode = off',
'5100039:onIfAny = 1 2 3 4 5'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"emine.gurpinar@cern.ch"
] | emine.gurpinar@cern.ch |
a1ce232ec915a2c3e2d5b990c4d6988132fc0f29 | 760376c7f94710f415eefb5445410b609d64b85a | /Actions/keypairCheck.py | cd55b0764412aabd0e5d4c5e2285a9b8e7ce59f0 | [] | no_license | paoloromagnoli/cas-blueprint | 5e1d4507b4827dbdb87276d2b7ef769f391bcfc1 | 3055197711b3d1542dd406f60f2424cd4770ebe6 | refs/heads/master | 2020-05-18T17:57:33.532885 | 2019-09-20T16:08:12 | 2019-09-20T16:08:12 | 184,571,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | import boto3
import json
def handler(context, inputs):
ec2 = boto3.client('ec2')
tags = inputs["tags"]
key_name = tags["Deploy"] #Name of the tag used to create the key pair
outputs = {}
try:
response = ec2.describe_key_pairs(KeyNames=[key_name,],)
except:
outputs["keypair_exists"] = 0
else:
if response is not None:
outputs["keypair_exists"] = 1
outputs["response"] = response
return outputs
| [
"noreply@github.com"
] | noreply@github.com |
12f95b7b1627cb9a90694bcbf2fbb0476b9970c6 | b47835df229ca830d0c9723c869ba489a10d62cc | /exercise/exercise5.py | 594e79b951a28572467e11c7cedb4c66d13c391a | [
"MIT"
] | permissive | LilyHeAsamiko/Audio-signal-processing | 0e734529c86cbc1e9bb709d874da511c8fd58fa8 | 7401b851f4cad73f2d48f20cf5d8dd2c239278cc | refs/heads/master | 2020-06-02T05:12:04.910365 | 2019-09-30T09:22:47 | 2019-09-30T09:22:47 | 191,048,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | py |
# coding: utf-8
import numpy
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import pi
import scipy
from scipy import signal
from scipy.io import wavfile
import librosa
# Q1: math exercise (1 pt in total)
## A. (0.5 pt)
# If the vocal tract length is 15cm, and you discretize it with a 48 kHz sampling rate,
# how many discrete sampling periods it does take for a sound wave (340 m/s) to travel the vocal tract?
#0.15/340*48000 = 21.176470588235293
## B. (0.5 pt)
# What is the reflection coefficient k when a sound passes from section with area 1cm^2 to 2cm^2?
# k=1/3
# Q2: programming exercise (1 pt in total)
# read in the audio file
# fs,x = wavfile.read('rhythm_birdland.wav')
fs,x = wavfile.read('oboe59.wav')
x = signal.decimate(x,4,ftype='iir')
fs=fs/4
# normalize x so that its value is between [-1.00, 1.00] (0.1 pt)
x = x.astype('float64') / float(numpy.max(numpy.abs(x)))
## A. (0.5 pt)
# MFCCs are useful features in many speech applications.
# Follow instructions below to practice your skills in feature extraction.
# use librosa to extract 13 MFCCs
mfccs = librosa.feature.mfcc(y=x, sr=fs, S=None, n_mfcc=13)
# Visualize the MFCC series
plt.figure(figsize=(10, 4))
plt.pcolormesh(mfccs)
plt.colorbar()
plt.title('MFCC')
plt.tight_layout()
plt.show()
## B. (0.5 pt)
# extract pitch using librosa
# set a windowsize of 30 ms
window_time = 30
fmin = 80
fmax = 350
# set an overlap of 10 ms
overlap = 10
total_samples = len(x)
# there are sample_f/1000 samples per ms
# calculate number of samples in one window
window_size = fs/1000 * window_time
hop_length = total_samples / window_size
# calculate number of windows needed
needed_nb_windows = total_samples / (window_size - overlap)
n_fft = needed_nb_windows * 2.0
# extract pitch
# th_value is sensitive in pitch tracking.
# change the th_value and check if the pitch track is what you desired.
th_value = 100
pitches, magnitudes = librosa.core.piptrack(x, int(fs), n_fft= int(n_fft), hop_length=hop_length, fmin=fmin, fmax=fmax, threshold=th_value)
shape = numpy.shape(pitches)
nb_samples = shape[0]
nb_windows = shape[1]
# some post-processing
def extract_max(pitches,magnitudes, shape):
new_pitches = []
new_magnitudes = []
for i in range(0, shape[1]):
new_pitches.append(numpy.max(pitches[:,i]))
new_magnitudes.append(numpy.max(magnitudes[:,i]))
return (new_pitches,new_magnitudes)
def smooth(x,window_len=11,window='hanning'):
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len-1::-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len:-window_len+1]
pitches, magnitudes = extract_max(pitches, magnitudes, shape)
pitches1 = smooth(pitches,window_len=30)
plt.figure(figsize=(20, 22))
t_vec_pitch = np.linspace(0,float(len(x))/float(fs),len(pitches1))
f,t,X = signal.spectrogram(x,fs=fs,window=scipy.signal.get_window('hann',1024))
plt.pcolormesh(t,f,20*np.log10(1e-6+np.abs(X)))
plt.xlabel('time (s)')
plt.ylabel('Frequency (Hz)')
plt.plot(t_vec_pitch,pitches1,'m.')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
6e638314f02ee8aa6919f68c5b79ab506004a312 | df9a467c0d47eafde9bf5d2181347ad00bf53c06 | /leetcode/most_liked/739_daily_temperatures.py | b1783bb29cf96d7abdb26011f592ae371ea26b9f | [] | no_license | eunjungchoi/algorithm | 63d904d92e16ab0917faa585326e9281d61d6000 | 1c9528e26752b723e1d128b020f6c5291ed5ca19 | refs/heads/master | 2023-01-06T20:54:06.567512 | 2020-11-14T11:13:05 | 2020-11-14T11:13:05 | 288,323,344 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | # Given a list of daily temperatures T, return a list such that, for each day in the input,
# tells you how many days you would have to wait until a warmer temperature.
# If there is no future day for which this is possible, put 0 instead.
#
# For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73],
# your output should be [1, 1, 4, 2, 1, 1, 0, 0].
#
# Note: The length of temperatures will be in the range [1, 30000].
# Each temperature will be an integer in the range [30, 100].
# 매일의 화씨 온도(F) 리스트 T를 받아, 더 따듯한 날씨를 위해서는 며칠을 더 기다려야 하는 지를 출력하라
from typing import List
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
# 스택값 비교
stack = [0]
results = [0] * len(T)
# 현재의 인덱스를 계속 스택에 쌓아두다가, 이전보다 상승하는 지점에서 현재 온도와 스택에 쌓아둔 인덱스 지점의 온도 차이를 비교해서,
# 더 높다면 스택의 값을 pop으로 꺼내고, 현재 인덱스와 스택에 쌓아둔 인덱스의 차이를 정답으로 처리한다.
for i, temp in enumerate(T):
while stack and temp > T[stack[-1]]:
last = stack.pop()
results[last] = i - last
stack.append(i)
return results
# 37 / 37 test cases passed.
# Status: Accepted
# Runtime: 492 ms
# Memory Usage: 17.2 MB
#
# Your runtime beats 71.54 % of python3 submissions.
# Your memory usage beats 89.19 % of python3 submissions.
# <파이썬 알고리즘 인터뷰> 참고.
| [
"im.your.energy@gmail.com"
] | im.your.energy@gmail.com |
f863c0011b6e40babc977ab674974c286bd9f909 | 9b84085a3f08441148d4808f400cf3fdf98be5cb | /some_termos_serie_02.py | 7a9542abe16298a28039c2c54e7a515321d31f5a | [] | no_license | gabipires/ExerciciosImpactaADS | 546da7d77af8d37f11a9fb8447f1a0985ab8aebd | c57683f90d085d2129800d0b647a683b4607a399 | refs/heads/master | 2022-07-02T03:16:57.059847 | 2020-05-06T01:05:55 | 2020-05-06T01:05:55 | 261,590,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # Escreva um programa em Python 3 para somar os n primeiros termos da seguinte série:
# - 1 + 1/2 - 1/3 + 1/4 - 1/5 + ...
# A saída deve ser uma unica linha contendo apenas o resultado da somatória formatado para exibir 6 casas de precisão.
n = int(input("Quantos termos você deseja somar? "))
soma = 0
for i in range(1,n+1):
fracao= 1/(i)
if i%2 == 1:
fracao = -fracao
soma += fracao
print("{:6f}".format(soma)) | [
"gabrielatrindadepires@gmail.com"
] | gabrielatrindadepires@gmail.com |
c949fe10046ed1243b9b5b457337815e7cd492b2 | 124df74bce796598d224c4380c60c8e95756f761 | /pythonPackages/matplotlib/doc/conf.py | f5e23c3021a3bf6281ee5318d4e0041ff5fd7269 | [] | no_license | Mapoet/AWIPS-Test | 19059bbd401573950995c8cc442ddd45588e6c9f | 43c5a7cc360b3cbec2ae94cb58594fe247253621 | refs/heads/master | 2020-04-17T03:35:57.762513 | 2017-02-06T17:17:58 | 2017-02-06T17:17:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,924 | py | # -*- coding: utf-8 -*-
#
# Matplotlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 2 12:33:25 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['matplotlib.sphinxext.mathmpl', 'math_symbol_table',
'sphinx.ext.autodoc', 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive', 'inheritance_diagram',
'gen_gallery', 'gen_rst']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Matplotlib'
copyright = '2008, John Hunter, Darren Dale, Michael Droettboom'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import matplotlib
version = matplotlib.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Plot directive configuration
# ----------------------------
plot_formats = ['png', 'hires.png', 'pdf']
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'matplotlib.css'
html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If nonempty, this is the file name suffix for generated HTML files. The
# default is ``".html"``.
html_file_suffix = '.html'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.
html_use_opensearch = 'False'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Matplotlibdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'Matplotlib.tex', 'Matplotlib', 'Darren Dale, Michael Droettboom, Eric Firing, John Hunter', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = None
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{txfonts}
"""
# Documents to append as an appendix to all manuals.
latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
latex_use_parts = True
# Show both class-level docstring and __init__ docstring in class
# documentation
autoclass_content = 'both'
| [
"joshua.t.love@saic.com"
] | joshua.t.love@saic.com |
446ce17555820f29c98376ca99908115ac9d473d | fd465e82d9b9f4ea36842fc15c0a3afc8f1d535c | /app/__init__.py | 58b4e7874583397e2373759849bd87b4c9d81731 | [] | no_license | raghav198/arugula | 792012a92ad8ec4ec90098e332e5f82d57164f0f | 98f51b0583e6b1008a0eeda2ec48f88b132a6742 | refs/heads/master | 2023-05-08T07:56:15.763272 | 2021-06-02T17:53:54 | 2021-06-02T17:53:54 | 369,033,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import os
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
__all__ = ['app', 'db']
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
SQLALCHEMY_DATABASE_URI='sqlite:///data.db')
db = SQLAlchemy(app)
app.config.from_pyfile('config.py', silent=True)
# make sure the app directories exist
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Add some basic routing handlers
@app.route('/')
@app.route('/index.html')
def index():
return render_template('index.html')
| [
"maliragh@amazon.com"
] | maliragh@amazon.com |
0abd41508f48aed20dd2896d2d078b491b31a910 | 8e9707c2f01d8ff08117a48e35092fbfcec59f44 | /lexer.py | 1351e24183b0093a1dffed38ea1ac0d993e2d4f0 | [] | no_license | aallai/WIG-Compiler | 42319d1f2f4bb09131268484cfd1b03aab76aaf8 | 2c855d76244c919fa6d8dccc104ea96b73feb54c | refs/heads/master | 2020-04-07T18:23:12.368319 | 2012-10-20T15:53:10 | 2012-10-20T15:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | # ply lexing
import ply.lex as lex
states = (
('html', 'exclusive'),
('tags', 'exclusive'),
('holes', 'exclusive'),
)
reserved = {
'service' : 'SERVICE',
'const' : 'CONST',
'html' : 'HTML',
'schema' : 'SCHEMA',
'int' : 'INT',
'bool' : 'BOOL',
'string' : 'STRING',
'void' : 'VOID',
'tuple' : 'TUPLE',
'session' : 'SESSION',
'show' : 'SHOW',
'receive' : 'RECEIVE',
'exit' : 'EXIT',
'return' : 'RETURN',
'if' : 'IF',
'else' : 'ELSE',
'while' : 'WHILE',
'plug' : 'PLUG',
'true' : 'TRUE',
'false' : 'FALSE',
}
# reserved words in html tags
tag_reserved = {
'input' : 'INPUT',
'select' : 'SELECT',
}
tokens = [
'START_HTML_LITERAL',
'END_HTML_LITERAL',
'INT_LITERAL',
'STRING_LITERAL',
'IDENTIFIER',
'META',
'WHATEVER',
'START_TAG',
'START_CLOSE_TAG',
'END_TAG',
'START_HOLE',
'END_HOLE',
'OR',
'AND',
'LSHIFT',
'EQ',
'NEQ',
'LTEQ',
'GTEQ',
'TADD',
'TSUB',
] + reserved.values() + tag_reserved.values()
literals = ('+', '-', '/', '*', '%', '(', ')', '{', '}', '[', ']', '=', '<', '>', ';', ',', '!', '.')
#
# html
#
def t_START_HTML_LITERAL(t) :
'<html>'
t.lexer.push_state('html')
return t
def t_html_END_HTML_LITERAL(t) :
'</html>'
t.lexer.pop_state()
return t
t_holes_IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_html_META = r'<!--(.|\n)*-->'
t_html_WHATEVER = r'[^<>]+'
def t_html_START_CLOSE_TAG(t) :
r'</'
t.lexer.push_state('tags')
return t
def t_html_START_HOLE(t) :
r'<\['
t.lexer.push_state('holes')
return t
def t_holes_END_HOLE(t) :
r'\]>'
t.lexer.pop_state()
return t
def t_html_START_TAG(t) :
r'<'
t.lexer.push_state('tags')
return t
def t_tags_END_TAG(t) :
r'>'
t.lexer.pop_state()
return t
def t_tags_IDENTIFIER(t) :
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = tag_reserved.get(t.value, 'IDENTIFIER')
return t
#
# Wig
#
def t_IDENTIFIER(t) :
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = reserved.get(t.value, 'IDENTIFIER')
return t
def t_INT_LITERAL(t) :
r'0|([1-9][0-9]*)'
t.value = int(t.value)
return t
def t_tags_INITIAL_STRING_LITERAL(t) :
r'"[^"]*"'
# get rid of "s
t.value = t.value[1:-1]
return t
t_EQ = '=='
t_NEQ = '!='
t_LTEQ = '<='
t_GTEQ = '>='
t_AND = '&&'
t_OR = r'\|\|'
t_LSHIFT = '<<'
# tuple add and subtract??
t_TADD = r'\\\+'
t_TSUB = r'\\-'
def t_ANY_newline(t) :
r'\n+'
t.lexer.lineno += len(t.value)
t_ANY_ignore = ' \t'
# print token and skip ahead?
def t_ANY_error(t) :
tok = t.value.split()[0]
print 'Illegal token ' + tok
t.lexer.skip(len(tok))
lexer = lex.lex()
| [
"aallai@functor.local"
] | aallai@functor.local |
5b1a7e60318fa305ae7fd21a00322a47c06c2deb | c6630662769c54de64fbce86b1bb64549922072c | /crawlGitHubForStar.py | 2090d5fba5cbace5956768b8a0c544147872e2aa | [] | no_license | wangying2016/CrawlGitHubForStar | efca9c9538b52822e705360944c3293fa9a6ee46 | 67acfda47d561682909404a21030b63c8039060e | refs/heads/master | 2021-08-06T20:12:58.911589 | 2017-11-07T01:21:11 | 2017-11-07T01:21:11 | 108,128,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | # This version just crawl stars.
# from urllib.request import urlopen
# from urllib.error import HTTPError
# from bs4 import BeautifulSoup
#
#
# print('Please input github user name:')
# userName = input()
# url = userName + '?tab=repositories'
# openFailed = False
# count = 0
#
# while True:
# # 1. Open repositories page.
# try:
# html = urlopen('https://github.com/' + url)
# bsObj = BeautifulSoup(html, 'html.parser')
# except HTTPError as e:
# print('open ' + 'https://github.com/' + url + ' failed.')
# openFailed = True
# break
#
# # 2. Count stars at one page.
# for star in bsObj.findAll('svg', {'aria-label': 'star'}):
# count += int(star.parent.get_text().replace(',', ''))
#
# # 3. Find next page.
# nextPage = bsObj.find('a', {'class': 'next_page'})
# if nextPage is None:
# break
# else:
# url = nextPage.attrs['href']
#
# if openFailed is False:
# print(userName + ' has ' + str(count) + ' stars.')
# This version crawl stars and forks, in addition output repository name.
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
print('Please input github user name:')
userName = input()
url = userName + '?tab=repositories'
openFailed = False
countStars = 0
countForks = 0
while True:
# 1. Open repositories page.
try:
html = urlopen('https://github.com/' + url)
bsObj = BeautifulSoup(html, 'html.parser')
except HTTPError as e:
print('open ' + 'https://github.com/' + url + ' failed.')
openFailed = True
break
# 2. Count stars at one page.
for star in bsObj.findAll('svg', {'aria-label': 'star'}):
# i. Count star numbers.
starNumber = int(star.parent.get_text().replace(',', ''))
countStars += starNumber
# ii. Input repository name.
print(star.parent.parent.parent.h3.a['href'] + ' has ' + str(starNumber)
+ (' stars.' if starNumber > 1 else ' star.'))
# 3. Count forks at one page.
for fork in bsObj.findAll('svg', {'aria-label': 'fork'}):
# i. Count fork numbers.
forkNumber = int(fork.parent.get_text().replace(',', ''))
countForks += forkNumber
# ii. Input repository name.
print(fork.parent.parent.parent.h3.a['href'] + ' has ' + str(forkNumber)
+ (' forks.' if forkNumber > 1 else ' fork.'))
# 3. Find next page.
nextPage = bsObj.find('a', {'class': 'next_page'})
if nextPage is None:
break
else:
url = nextPage.attrs['href']
if openFailed is False:
print()
print('In totally:')
print(userName + ' has ' + str(countStars) + ' stars.')
print(userName + ' has ' + str(countForks) + ' forks.')
| [
"wy867504146@outlook.com"
] | wy867504146@outlook.com |
f49f5161688bb91518b40963e42484af38165648 | d529b7cbcc818db8d419e91776ae35d7d31e42ad | /weatherapp.api.py | 5bdd4bff95508355af6011ec42a33e0e594ed31e | [] | no_license | tamsynsteed/My.Weather.App | 5be24b3581048f0b05a8c804b1ac56c143a0cfca | 6ea6f98a7711efc9002671e08721aef956ab8dcf | refs/heads/main | 2023-01-24T11:14:32.546199 | 2020-11-20T12:15:13 | 2020-11-20T12:15:13 | 314,544,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,471 | py | import PIL
import requests
from datetime import datetime
import tkinter as Tk
from tkinter import *
from tkinter import simpledialog ,messagebox
#weather app data
def weather_app():
location = str(en1.get())
#api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
api_address= "http://api.openweathermap.org/data/2.5/weather?appid=4d5c07cefc1bd6ffd727c23d9fd03ea5&q="
url = api_address + location
json_data= requests.get(url).json()
#function to display data and extract the relevant data from the json file.
if json_data ['cod'] == '404':
print("Invalid City: {} Please check your city name".format(location))
else:
#create variables to store and display jaosn data
temp_city = ((json_data['main']['temp'])-273.15) #kelvin - 273.15 gives us the tempreture in celcius
weather_desc = json_data['weather'][0]['description']
hmdt= json_data['main']['humidity']
wnd_spd= json_data['wind']['speed']
date_time=datetime.now().strftime ("%d %b %Y | %I %M %S %p")
lb2.config(text=str("Weather Stats for - {} | {}".format(location.upper(), date_time)))
lb3.config(text=str("Current Weather Description:" + str( weather_desc)))
lb4.config(text=str("Current Humidity :"+ str(hmdt)+"%"))
lb5.config(text=str("Current Temperature is: {:.2f}\N{DEGREE SIGN}C ".format(temp_city)))
lb6.config(text=str("Current Wind Speed:" + str(wnd_spd)+ "kmph"))
if temp_city > 15:
master.configure(bg="orange")
else:
master.configure(bg="powder blue")
master= Tk()
master.title('Weather App')
master.config(bg="cadet blue", relief="solid")
master.geometry("700x600")
lbheading = Label(master, text="My Weather App" ,font="arial 22 bold", bg='powderblue')
lbheading.pack()
lb1 = Label(master,text="Enter City:", font='arial 18 bold')
lb1.pack(pady=20)
en1= Entry(master)
en1.pack(pady=20)
checkbutton = Button(master, text="Search",font= "bold", command=weather_app)
checkbutton.pack(pady=20)
lb2 = Label(master, font="arial 16", bg="cadet blue")
lb2.pack(pady=20)
lb3 = Label(master, font="arial 14", bg="cadet blue")
lb3.pack(pady=20)
lb4 = Label(master, font="arial 14", bg="cadet blue")
lb4.pack(pady=20)
lb5 = Label(master, font="arial 14", bg="cadet blue")
lb5.pack(pady=20)
lb6 = Label(master, font="arial 14", bg="cadet blue")
lb6.pack(pady=20)
master.mainloop()
| [
"tamsynsteed@gmail.com"
] | tamsynsteed@gmail.com |
92e1a0f58228447205c83797e9822ff91f7960bf | 80352efd5dd9f2ed186a42b30813be3fde9a417d | /trunk/modules/python/ae/encoder.py | a2173e0edd41557019afb08e2580f10e07606c52 | [
"MIT"
] | permissive | mbojrab/playbox | 4e5042f91b1f4d00cd38bb12c825e7c0c33c7aa5 | abbe116fc943e727119ffb47faeb41d29ffe56bb | refs/heads/master | 2020-04-04T05:59:58.158037 | 2017-07-09T02:58:00 | 2017-07-09T02:58:00 | 52,119,608 | 1 | 2 | null | 2017-07-09T02:47:54 | 2016-02-19T22:05:10 | Python | UTF-8 | Python | false | false | 960 | py | class AutoEncoder () :
def __init__ (self, forceSparse=True, contractionRate=0.01) :
'''This class describes an abstract AutoEncoder.
contractionRate : variance (dimensionality) reduction rate
'''
self._contractionRate = contractionRate
self._forceSparse = forceSparse
def buildDecoder(self, input) :
raise NotImplementedError('Implement the buildDecoder() method')
def getUpdates(self) :
raise NotImplementedError('Implement the getUpdates() method')
def getCostLabels() :
raise NotImplementedError('Implement the getCostLabels() method')
# DEBUG: For Debugging purposes only
def saveReconstruction(self, image, ii) :
from dataset.debugger import saveNormalizedImage
saveNormalizedImage(np.resize(self.reconstruction(image),
image.shape[-2:]),
'chip_' + str(ii) + '_reconst.png')
| [
"micah.bojrab@mdaus.com"
] | micah.bojrab@mdaus.com |
50833987fe1c99942c6386dcbf5dab13fb529e2c | 920075678b5141129d1b0f9c6a17411ea1844ab8 | /08/04_拓展.py | d4ced51754a884331471c29683329dcb73c3b08b | [] | no_license | ucookie/basic-python | e8869e74c7429e938291689257c7c6d17aa75187 | b7f29743883739e3b298d49a170f367944ee0d9a | refs/heads/master | 2020-04-20T14:36:23.483379 | 2019-07-07T23:49:06 | 2019-07-07T23:49:06 | 168,904,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def add(new_item, save_list=[]):
save_list.append(new_item)
return save_list
print(add('1'))
print(add('2'))
# 初始化后,就有了函数名到函数对象这样一个映射关系,
# 可以通过函数名访问到函数对象了,并且,
# 函数的一切属性也确定下来,包括所需的参数,默认参数的值 | [
"huangsihan.cn@gmail.com"
] | huangsihan.cn@gmail.com |
af02fbc6bb99ed2342dfc37bdb51a8589e25000e | 72402cd547a1c3309ea72b8d37f8a0f57a0d73e3 | /leetcode/queue_stack/code/day_temperature.py | 098fd38af27e3ecb5771894688f7ac0a4241f0ae | [] | no_license | skyxyz-lang/CS_Note | a5a4d428e17b56e2fc37118e4a14ceea39f7435e | a75310a96d2b165b15d5ee10ec409a17cdc880ba | refs/heads/master | 2023-02-24T02:53:53.557058 | 2021-01-23T06:54:47 | 2021-01-23T16:11:03 | 302,805,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: skyxyz-lang
@file: day_temperature.py
@time: 2020/11/17 23:27
@desc: 每日温度
https://leetcode-cn.com/leetbook/read/queue-stack/genw3/
"""
class Solution(object):
"""
"""
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
result = [0] * len(T)
st = []
for i in range(len(T)):
cur_val = T[i]
while len(st) > 0 and T[st[-1]] < cur_val:
index = st.pop()
result[index] = i - index
st.append(i)
return result
if __name__ == '__main__':
obj = Solution()
print obj.dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]) | [
"yuanxiangsky@bupt.edu.cn"
] | yuanxiangsky@bupt.edu.cn |
88749037d67309bfe19ab2998e0794b415132d8f | 0d16ed8dffd7b951abd66bf895c254749ed8195a | /Lab4_Data_Structure__And_Iternation/Tuple/Seven_4th_item.py | a638c7a55bb24fd21cf7e867c3f38fb403b96413 | [] | no_license | sanjiv576/LabExercises | 1cfea292e5b94537722b2caca42f350ab8fc7ab8 | ef5adb4fbcff28162fe9e3e80782172b93127b33 | refs/heads/master | 2023-07-22T08:13:04.771784 | 2021-08-17T12:23:21 | 2021-08-17T12:23:21 | 372,397,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | # Write a Python program to get the 4th element and 4th element from last of a tuple.
t = (0, 1, 2, 3, '4th', 5, 6)
slicing = t [-3:3:-1]
print(slicing) | [
"83968516+sanjiv576@users.noreply.github.com"
] | 83968516+sanjiv576@users.noreply.github.com |
679396908814bf47132bc58065644309db986e62 | 8135c6dbf52ceb4d2b95e77e0f7e9eff770d3267 | /stadsbudsservice/urls.py | 2feb23cf8901f4f6195cc3f0a18316d4c1fc6d63 | [] | no_license | charleslofblad/stadsbudsservice-site | 19deb2b4da01f4029fcd693d6a4e58b4d88ee046 | 2bf94c69a64a917c8067faa75799b4c11b243592 | refs/heads/master | 2016-08-11T17:36:23.290644 | 2016-04-09T13:48:39 | 2016-04-09T13:48:39 | 53,272,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | """stadsbudsservice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import *
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('stadsbudsite.urls')),
url(r'^index/', TemplateView.as_view(template_name="index.html")),
url(r'^flytt_calculator/', TemplateView.as_view(template_name="flytt_calculator/flytt_calculator.html")),
url(r'^kontakt/', TemplateView.as_view(template_name="kontakt.html")),
url(r'^komponenter/', TemplateView.as_view(template_name="komponenter.html")),
]
| [
"charleslofblad@gmail.com"
] | charleslofblad@gmail.com |
6cc3477aa0bab5aadbc0a0e54d07b45a6c8092b1 | ce3e6ef48b9cfd7f40191a436b0fa7ecfa2de7e2 | /mysite/settings.py | 63f31cd36335bbb71639130b22711ac4f76ce051 | [] | no_license | Benknightdark/webapp | 73f7424de7ca65a615d358d7e37a76f0bd2ede78 | 812d0a83fff4bc5a94c9f402e28687731324aae3 | refs/heads/master | 2022-07-18T05:33:52.657230 | 2017-02-27T13:23:20 | 2017-02-27T13:23:20 | 83,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,170 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q(%6jho5&c@w!swc9anodou64f5-5))r2!^a_5t-i_l=mjw116'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'yuan'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"Ben@gongyanbinde-MacBook-Pro.local"
] | Ben@gongyanbinde-MacBook-Pro.local |
25eed9a8841c9c6e18e8892558a3112696f21804 | a3687607bcd67a5e18093ab7354c58615470ebbb | /estructuras_de_datos/grafo/bfs.py | 2718a7108cfb50b38de5f327535b2a57a8a82ff5 | [
"MIT"
] | permissive | ilitteri/7541-Algo2 | cef7f0dfbbd42920502d92af8f35455b4842dfe4 | 6835e40818b9f84fd0271b6341f6fd3aaa46df5e | refs/heads/main | 2023-04-13T04:15:15.778082 | 2021-04-21T21:52:43 | 2021-04-21T21:52:43 | 350,750,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | def bfs(grafo, origen):
visitados = set()
padres = {}
orden = {}
q = Cola()
visitados.add(origen)
padres[origen] = None
orden[origen] = 0
q.encolar(origen)
while not q.esta_vacia():
v = q.desencolar()
for w in grafo.adyacentes(v):
visitados.add(w)
padres[w] = v
orden[w] = orden[v] + 1
q.encolar(w)
return padres, orden, visitados
| [
"ilitteri@fi.uba.ar"
] | ilitteri@fi.uba.ar |
a98d4898506c6d720e656ed8f12c44d8c8dd7c6b | ce5a16be85587556d57cc5cb15fd4aa1f43b56ad | /tasks/models.py | e8ac8b5dba47495a2a444a76aa5abc9563ab6a54 | [] | no_license | lncr/todolist | ce119260c2bdb7f737600567e519f824586c9308 | 28dc1bf970f3234aedaacbcb5600c7cb0fa0c902 | refs/heads/main | 2023-07-08T20:10:33.975846 | 2021-08-14T14:37:02 | 2021-08-14T14:37:02 | 394,661,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Task(models.Model):
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='tasks', null=True)
body = models.TextField()
estimated_finish_time = models.DateTimeField()
created_datetime = models.DateTimeField(auto_now_add=True)
is_completed = models.BooleanField(default=False)
| [
"b.soltobaev@disoft.dev"
] | b.soltobaev@disoft.dev |
5d36b195a2408d0a9aba4e9f22e3db6b82313d49 | 50ceb4a100bf0193cc2ae3d9dac62985c6b82947 | /commons/fileio/handlers/base.py | 9b4dbec0fde89bf261e1f148b1cf796cca3d0aef | [] | no_license | splionar/soho | ed7341fd7acc607ce68b31d9334464353efc5307 | d98b2ba52ffda2ba857aa4bc0d4e9239efcfd806 | refs/heads/master | 2023-07-03T08:52:52.185285 | 2021-07-21T12:17:07 | 2021-07-21T12:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from abc import ABCMeta, abstractmethod
class BaseFileHandler(object):
__metaclass__ = ABCMeta # python 2 compatibility
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode='r', **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode='w', **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs)
| [
"664587718@qq.com"
] | 664587718@qq.com |
d5b520dadfbbdd4d46f80f779e68c7bee555ae7c | 0613b082bd90462e190bc51943356ce6ce990815 | /baseinfo/forms.py | f92317a93a34016d026958d648ff845db9dae301 | [] | no_license | Hamidnet220/salary | 1068aac4bc921436c03b627899370a86ca5e99be | 4dc1f32dfa1d990e6c9f527b4a8d0e1df939262a | refs/heads/master | 2020-05-04T18:09:24.086491 | 2019-04-22T20:22:32 | 2019-04-22T20:22:32 | 179,342,004 | 0 | 1 | null | 2019-04-11T10:43:29 | 2019-04-03T17:53:36 | Python | UTF-8 | Python | false | false | 6,158 | py | from django import forms
from .models import *
from django.utils.translation import ugettext_lazy as _
class EmployerForm(forms.ModelForm):
class Meta:
model= Employer
fields='__all__'
def save_record(self):
Employer.objects.create(**self.cleaned_data)
def update_record(self,id):
Employer.objects.filter(id=id).update(**self.cleaned_data)
class EmployeeStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت کارکنان:",max_length=50)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea)
def save_record(self):
EmployeeStatus.objects.create(**self.cleaned_data)
class WorkStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت کاری:",max_length=50)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea,required=False)
def save_record(self):
WorkStatus.objects.create(**self.cleaned_data)
class MaritalStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت تاهل:",max_length=20)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea,required=False)
def save_record(self):
MaritalStatus.objects.create(**self.cleaned_data)
class BankForm(forms.Form):
title = forms.CharField(label="نام بانک:",max_length=50)
description = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
Bank.objects.create(**self.cleaned_data)
class WorkGroupForm(forms.Form):
title = forms.CharField(label="عنوان گروه شغلی:",max_length=100)
child_benefit = forms.DecimalField(label="مبلغ حق اولاد برای یک نفر:",max_digits=50,decimal_places=2)
dwelling_benefit= forms.DecimalField(label="مبلغ حق مسکن:",max_digits=50,decimal_places=2)
Bon_benefit = forms.DecimalField(label="مبلغ بن:",max_digits=50,decimal_places=2)
def save_record(self):
WorkGroup.objects.create(**self.cleaned_data)
class WorkPlaceForm(forms.Form):
title = forms.CharField(label="عنوان محل کار:",max_length=60)
description = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
WorkPlace.objects.create(**self.cleaned_data)
class PostPlaceForm(forms.Form):
title = forms.CharField(label="عنوان محل پست:",max_length=60)
number_of_employee = forms.IntegerField(label="تعداد نفرات پست")
post_status = forms.ModelChoiceField(WorkStatus.objects.all(),label="وضعیت پست")
decription = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
PostPlace.objects.create(**self.cleaned_data)
class AddMilitarySerStatus(forms.ModelForm):
class Meta:
model=MilitaryServiceStat
fields= '__all__'
def save_record(self):
MilitaryServiceStat.objects.create(**self.cleaned_data)
def update_record(self,id):
MilitaryServiceStat.objects.filter(id=id).update(**self.cleaned_data)
class AddCityForm(forms.ModelForm):
class Meta:
model=City
fields= '__all__'
def save_record(self):
City.objects.create(**self.cleaned_data)
def update_record(self,id):
City.objects.filter(id=id).update(**self.cleaned_data)
class AddCountryForm(forms.ModelForm):
class Meta:
model=Country
fields= '__all__'
def save_record(self):
Country.objects.create(**self.cleaned_data)
def update_record(self,id):
Country.objects.filter(id=id).update(**self.cleaned_data)
class EmployeeForm(forms.Form):
employer = forms.ModelChoiceField(Employer.objects.all(),label="نام کارفرما:")
firstname = forms.CharField(label="نام:",max_length=50)
lastname = forms.CharField(label="نام خانوادگی:",max_length=50)
fathername = forms.CharField(label="نام پدر:",max_length=50)
national_code = forms.CharField(label="شماره ملی:",max_length=10)
id_number = forms.CharField(label="شماره شناسنامه:",max_length=10)
insurance_id = forms.CharField(label="کد بیمه:",max_length=10)
employee_status = forms.ModelChoiceField(EmployeeStatus.objects.all(),label="وضعیت پرسنل:")
work_place = forms.ModelChoiceField(WorkPlace.objects.all(),label="محل کار:")
post_place = forms.ModelChoiceField(PostPlace.objects.all(),label="محل پست:")
work_status = forms.ModelChoiceField(WorkStatus.objects.all(),label="وضعیت شغلی:")
marital_status = forms.ModelChoiceField(MaritalStatus.objects.all(),label="وضعیت تاهل:")
children_count = forms.IntegerField(label="تعداد فرزند")
work_group = forms.ModelChoiceField(WorkGroup.objects.all(),label="گروه شغلی:")
tax_exempt = forms.BooleanField(label="معافیت از پرداخت مالیات:")
indsurence_exempt= forms.BooleanField(label="معافیت از پرداخت بیمه:")
tel = forms.CharField(label="تلفن تماس:",max_length=19,required=False)
mobile = forms.CharField(label="شماره همراه:",max_length=19,required=False)
description = forms.CharField(label="توضسحات:",required=False,widget=forms.Textarea)
def save_record(self):
Employee.objects.create(**self.cleaned_data)
class EmployeeFormModel(forms.ModelForm):
class Meta:
model=Employee
fields='__all__'
def update_record(self,id):
Employee.objects.filter(id=id).update(**self.cleaned_data)
# Constant form
class ConstantForm(forms.ModelForm):
class Meta:
model=Constant
fields="__all__"
def save_record(self):
Constant.objects.create(**self.cleaned_data)
def update_record(self,id):
Constant.objects.filter(id=id).update(**self.cleaned_data)
| [
"kiani.hamidreza@gmail.com"
] | kiani.hamidreza@gmail.com |
805056a25de493b432d80c6096bb9e9609fc3573 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /EjjBGn7hkmhgxqJej_11.py | ad87f45d4681248fbbf11c2febfac2a7ccef7ffa | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """
A word nest is created by taking a starting word, and generating a new string
by placing the word _inside_ itself. This process is then repeated.
Nesting 3 times with the word "incredible":
start = incredible
first = incre|incredible|dible
second = increin|incredible|credibledible
third = increinincr|incredible|ediblecredibledible
The final nest is `"increinincrincredibleediblecredibledible"` (depth = 3).
Given a _starting word_ and the _final word nest_ , return the _depth_ of the
word nest.
### Examples
word_nest("floor", "floor") ➞ 0
word_nest("code", "cocodccococodededeodeede") ➞ 5
word_nest("incredible", "increinincrincredibleediblecredibledible") ➞ 3
### Notes
N/A
"""
def word_nest(word, nest,c=0):
if nest == word: return c
else:
nest=nest.replace(word,'')
return word_nest(word,nest,c+1)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
272868baa2c8585d06965009e810346a179ce280 | d9cb1459c7a8086517659714d43b44b049ac9d5a | /res/assets/readRes.py | 658ef0b11355b9a160207a2c4fcc7c2e02f0ddfb | [] | no_license | abelHan/xcGane | 9b528108b297efd5bf1f3f6c385bca37a8756ed0 | 6b51519b28e62b9fae7f95d0346c89a1ff8c8948 | refs/heads/master | 2020-04-14T15:23:09.096595 | 2015-08-31T10:50:11 | 2015-08-31T10:50:11 | 40,640,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py |
import re
import os
import time
import json
lua_file_name = "ResManager.lua"
lua_file_head = "local ResManager =nil \n ResManager = {"
lua_file_end = "}\n return ResManager"
lua_root = "g_load_assets .. "
#获取当前工作空间的目录
def getcwd():
return os.getcwd()
#获取指定文件夹下的所有文件及文件夹
def listall(dir_path):
if os.path.exists(dir_path):
return os.listdir(dir_path)
else:
print(dir_path + "路径不存在")
#判断是否是文件
def isfile(file_path):
if os.path.exists(file_path):
return os.path.isfile(file_path)
else:
print("")
#判断是否是路径
def isdir(dir_path):
if os.path.exists(dir_path):
return os.path.isdir(dir_path)
else:
print("")
#获取当前路径下的所有路径名
def listdir(dir_path):
templist = listall(dir_path)
resultlist = []
for value in templist:
if isdir(dir_path + "\\" + value):
resultlist.append(value)
return resultlist
#获取当前路径下的说有文件名
def listfile(dir_path):
templist = listall(dir_path)
resultlist = []
for value in templist:
if isfile(dir_path + "\\" + value):
resultlist.append(value)
return resultlist
#将绝对路径转换为lua可以用的路径
def toluapath(path):
cwd = os.getcwd()
tempStr = path[(len(cwd + "\\")):]
return lua_root + "\"" + tempStr.replace("\\","/")+ "\""
if __name__ == '__main__':
print('当前的工作空间是:{0}'.format(getcwd()))
print('#' * 40)
print('#' * 40)
rootDir = [getcwd()]
filePath = {}
for index in range(4):
#print("搜寻目录:",rootDir)
newDir = {}
for value in rootDir:
print("搜寻目录:",value)
print("文件:",listfile(value))
#记录文件
tempfile = listfile(value)
if len(tempfile) != 0:
filePath[value] = tempfile
#记录路径
templist = listdir(value)
if len(templist) != 0:
newDir[value] = templist
del rootDir[:]
for key,value in newDir.items():
if value:
for v in value:
rootDir.append(key + "\\" + v)
if len(rootDir) <= 0:
break
print('#' * 40)
print('#' * 40)
#记录结果
curPath = getcwd()
fp = open(curPath + "\\" + lua_file_name,'w+')
fp.write(lua_file_head)
for key,value in filePath.items():
if value:
for v in value:
fullPath = key + "\\" + v
name = os.path.splitext(v)
#content = json.dumps({"name":name[0],"fullPath":fullPath})
#fp.write(content + "\n")
fp.write("\t" + name[0] + " = " + toluapath(fullPath) + ",\n")
fp.write(lua_file_end)
fp.flush()
fp.close()
| [
"talance@126.com"
] | talance@126.com |
7f90feadd2bcc8ba24ab32df96035f424b84f81b | b283ef9c499668555be643975fccf6f8fe1864a6 | /ud120-projects/decision_tree/dt_author_id_alf3.py | 193ac92f639f416043113c5e6331ecb7c49835cd | [] | no_license | alf808/dandy-machine | f874edd23bf0dba551d8246dbe04a028ea3f67a6 | 1e96d5936c80dfc99272cedcfd1911f6268a84c4 | refs/heads/master | 2021-05-01T03:43:21.709233 | 2016-07-01T15:47:34 | 2016-07-01T15:47:34 | 60,564,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
## this is with ../tools/email_preprocess percentile of 1
from sklearn.metrics import accuracy_score
from sklearn import tree
clf_minsplt40 = tree.DecisionTreeClassifier(min_samples_split = 40)
clf_minsplt40.fit(features_train, labels_train)
pred40 = clf_minsplt40.predict(features_test)
acc_min_samples_split_40 = accuracy_score(pred40, labels_test)
print acc_min_samples_split_40
print len(features_train[0])
#########################################################
| [
"alf@akademe.net"
] | alf@akademe.net |
4d2f360e4a216628c60f184aecf8d775eacc2f41 | 93c59b527d34147b995fbf8f3f3ab792f00ad06e | /dj4e/wizards/admin.py | 05b681fedb427be43a050a29df58620ca022aa38 | [] | no_license | shtsai97/django_projects | b9f3c1a9d713af1d2837c063acf6bfe537a475ab | 14aae2a3d0e83886c1205f5e2767b58304d2bf7f | refs/heads/master | 2020-04-17T01:10:59.904950 | 2019-04-19T14:19:53 | 2019-04-19T14:19:53 | 165,890,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.contrib import admin
from wizards.models import Wizard, House
# Register your models here.
admin.site.register(House)
admin.site.register(Wizard) | [
"shtsai@umich.com"
] | shtsai@umich.com |
c32add492b99ec0945daa765e931a7f2b3a0fa5c | c542fc153346270ae7e30e68bc574a97341a293e | /dbhelper.py | 6f3d3762a71273db053803bbd7d1f30e3a9da19f | [] | no_license | daehub/crimemap | 005861a322299b7e3366f2f4cc2832fe3ff68f86 | e2ef4a7fc0e085dca602fb3f452545bc09da529d | refs/heads/master | 2020-03-11T06:27:23.423152 | 2018-04-23T06:52:48 | 2018-04-23T06:52:48 | 129,676,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Daehub'
import datetime
import pymysql
import dbconfig
class DBHelper(object):
def connect(self, database = 'crimemap'):
return pymysql.connect(host = 'localhost',
user = dbconfig.db_user,
passwd = dbconfig.db_password,
db=database)
def get_all_inputs(self):
connection = self.connect()
try:
query = 'SELECT description FROM crimes;'
with connection.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
finally:
connection.close()
def add_input(self,data):
connection = self.connect()
try:
# The following introduces a deliberate security flaw. See section on SQL injection below
query = "INSERT INTO crimes (description) VALUES(%s);"
with connection.cursor() as cursor:
cursor.execute(query,data)
connection.commit()
finally:
connection.close()
def clear_all(self):
connection = self.connect()
try:
query = 'DELETE FROM crimes'
with connection.cursor() as cursor:
cursor.execute(query)
connection.commit()
finally:
connection.close()
def add_crime(self,category,date,latitude,longitude,description):
connection = self.connect()
try:
query = "INSERT INTO crimes (category,date,latitude,longitude,description) VALUES(%s,%s,%s,%s,%s);"
with connection.cursor() as cursor:
cursor.execute(query,(category,date,latitude,longitude,description))
connection.commit()
finally:
connection.close()
def get_all_crimes(self):
connection = self.connect()
try:
query = 'SELECT latitude,longitude,date,category,description FROM crimes;'
with connection.cursor() as cursor:
cursor.execute(query)
named_crimes = []
for crime in cursor:
named_crime = {
'latitude':crime[0],
'longitude':crime[1],
'date':datetime.datetime.strftime(crime[2],'%Y-%m-%d'),
'category':crime[3],
'description':crime[4]
}
named_crimes.append(named_crime)
return named_crimes
finally:
connection.close() | [
"daehub@126.com"
] | daehub@126.com |
9506d331e19fb7b3de2ea2f31d64527873b94667 | 5d2c6ee38cea33cd436d9817939e0ced248ddad0 | /modules/editorconfig/mod.py | e72f7c3006f8fe2091dc3f05eb77c8858ec9d1c1 | [] | no_license | fhofherr/dot-files | 34c37bf0e6a45be927ef2b84a37f60bbb7ac192f | a71edf212d190f47d13870a213ac716d471a256f | refs/heads/main | 2022-10-08T20:42:46.750167 | 2022-10-05T05:28:02 | 2022-10-05T05:28:02 | 1,208,202 | 3 | 0 | null | 2022-09-28T07:14:26 | 2010-12-30T11:00:51 | Python | UTF-8 | Python | false | false | 434 | py | import os
from dotfiles import fs, module
class Editorconfig(module.Definition):
@property
def editorconfig_src(self):
return os.path.join(self.mod_dir, "editorconfig")
@property
def editorconfig_dest(self):
return os.path.join(self.home_dir, ".editorconfig")
@module.update
@module.install
def configure(self):
fs.safe_link_file(self.editorconfig_src, self.editorconfig_dest)
| [
"mail@ferdinandhofherr.de"
] | mail@ferdinandhofherr.de |
bceec50928f3d2382b8e0575b6918c9538c23f91 | 6bd223ac5bbfe95d45a5f2f052b8b26cf4a4722d | /hydrocode/scripts/dump_replayer.py | bf86ab19442023e3bed9a08314cbb4866c61ebf3 | [
"BSD-3-Clause"
] | permissive | ajaykumarr123/software | ff2ddf9589571e5ed62f6f1e2325e4553686f436 | e0b46eed87636afedc9be3a671edf70fc6cc6cb5 | refs/heads/master | 2022-04-23T11:36:55.535254 | 2020-04-27T02:16:34 | 2020-04-27T02:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | #!/usr/bin/env python3
#Script for replaying raw FPGA data dumps. Read Hydrophones Code wiki entry.
import socket, time, sys
import scipy.io
import numpy
PKT_LEN = 512 #total number of samples in an FPGA packet
NO_CH = 4 #number of channels
SAMPL_RATE = 200000
ADDR = "127.0.0.1" #local host because we are sending the data to the same machine
PORT = 8899 #hydromathd listens on this port
#loading mat file specified from terminal
data = scipy.io.loadmat(sys.argv[1])
#initializing UDP networking
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#sending packets
for pkt_no in range(len(data["raw_samples_interleaved"]) // PKT_LEN):
#forming a packet from the data. 'H' is unsigned 16 bit integer
send_buff = data["raw_samples_interleaved"][pkt_no * PKT_LEN : (pkt_no + 1) * PKT_LEN].astype('H')
#converting packet into a bytes array
payload = numpy.asarray(send_buff)
payload.tobytes()
#sending packet
sock.sendto(payload, (ADDR, PORT))
#waiting for the amount of time the FPGA would take to send another packet
time.sleep(float(PKT_LEN) / float(NO_CH) / float(SAMPL_RATE))
| [
"leader@cuauv.org"
] | leader@cuauv.org |
4dcff123efe2a6ff65429e07f16b006e506d493a | 9617a10ff1d60b2423ea21466704d4a2b3fbc79c | /matplotlib/m05.py | 9016d8dc4c54078443733e0e97cc23e716418bf0 | [] | no_license | oxxostudio/python | 4e7ed29e2b5a03ad69618a0391d5268bb6adcbd5 | aefde16a72582787901c7ff201fd37dcc0208e03 | refs/heads/master | 2022-12-10T11:35:43.068072 | 2020-01-20T08:48:54 | 2020-01-20T08:48:54 | 218,332,949 | 0 | 0 | null | 2022-06-21T23:33:01 | 2019-10-29T16:28:13 | Python | UTF-8 | Python | false | false | 1,199 | py | import numpy as np
from matplotlib import pyplot as plt
ax = np.linspace(-20, 20, 100)
ay = ax*0.5
by = np.sin(ax)
cx = 10
cy = cx*0.5
plt.plot(ax, ay, color='red', linewidth=3.0, linestyle='dashed', label='x0.5', zorder=2)
plt.plot(ax, by, color='blue', linewidth=2.0, linestyle='solid', label='sin', zorder=2)
# 繪製垂直虛線
plt.plot([cx, cx,],[cy, 0,], color='black', linewidth=1.0, linestyle='dashed', zorder=1, alpha=0.5)
# 加上單一圓點
# https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.scatter.html
plt.scatter(cx, cy, s=100, color='red', zorder=2)
# 繪製 annotate
# https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.annotate.html
plt.annotate('test', xy=(cx+0.5, cy-0.2), xycoords='data', xytext=(+36, -36),
textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=.2"))
plt.legend(loc='best')
plt.ylim((-10, 10)) # 設定 x 和 y 的邊界值
plt.xlim((-20, 20))
xx = plt.gca() # 設定座標軸位置
xx.spines['right'].set_color('none')
xx.spines['top'].set_color('none')
xx.spines['bottom'].set_position(('data', 0))
xx.spines['left'].set_position(('data', 0))
plt.show()
| [
"oxxo.studio@gmail.com"
] | oxxo.studio@gmail.com |
683a28d0340f87a8191e1927fc8070ca8b4046cd | 636d6125dc2a926915507a438b54f2b4a0bc8f5c | /app/view_models/book.py | 773f3eb057a3171d803c13a411e2a31cd5186499 | [] | no_license | YihuiLu/fishbook | e58b890ec0e2248673f4ce056d1005f0a5393432 | 7147ed5780be5c27b7382092ff6eeeae70813207 | refs/heads/master | 2020-04-07T09:16:31.751186 | 2018-12-05T14:29:12 | 2018-12-05T14:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | # -*- coding: utf-8 -*-
# @Time : 2018/11/20 12:45
# @Author : YH
# @Site :
# @File : book.py
# @Software: PyCharm
class BookViewModel:
def __init__(self, book):
self.title = book['title']
self.publisher = book['publisher']
self.pages = book['pages'] or ''
self.price = book['price']
self.isbn = book['isbn']
self.author = '、'.join(book['author'])
self.summary = book['summary'] or ''
self.image = book['image']
self.pubdate = book['pubdate']
self.binding = book['binding']
@property
def intro(self):
intros = filter(lambda x: True if x else False,
[self.author, self.publisher, self.price])
return ' / '.join(intros)
class BookCollection:
def __init__(self):
self.total = 0
self.books = []
self.keyword = ''
def fill(self, yushu_book, keyword):
self.total = yushu_book.total
self.keyword = keyword
self.books = [BookViewModel(book) for book in yushu_book.books]
class _BookViewModel():
@classmethod
def package_single(cls, data, keyword):
returned = {
'books': [],
'total': 0,
'keyword': keyword
}
if data:
returned['total'] = 1
returned['books'] = [cls.__cut_book_data(data)]
return returned
@classmethod
def package_collection(cls, data, keyword):
returned = {
'books': [],
'total': 0,
'keyword': keyword
}
if data:
returned['total'] = data['total']
returned['books'] = [cls.__cut_book_data(book) for book in data['books']]
return returned
@classmethod
def __cut_book_data(cls, data):
book = {
'title': data['title'],
'publisher': data['publisher'],
'pages': data['pages'] or '',
'price': data['price'],
'author': '、'.join(data['author']),
'summary': data['summary'] or '',
'image': data['image']
}
return book
| [
"yihuiwork1@163.com"
] | yihuiwork1@163.com |
da871d42db342efdd70e5b459051f4d9f36e5a77 | 329fc210c90e50eda4876a59a2a7a0ce10e48c51 | /EDMScripts/MapAppliedBField.py | 02aa72d62e295b9c56b7ccb8230005e41f5765b7 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | ColdMatter/EDMSuite | b111914e0d5a8171feebbca6e9ca91d6b3607f5a | aa7c584cb35f871fa3af71118ce2b5eed1e0aebf | refs/heads/master | 2023-08-25T02:52:27.287796 | 2023-08-23T14:07:18 | 2023-08-23T14:07:18 | 6,336,704 | 9 | 14 | MIT | 2023-04-14T13:03:50 | 2012-10-22T14:44:44 | C# | UTF-8 | Python | false | false | 1,225 | py | # MapLoop - asks ScanMaster to make a series of scans with one of the pg
# parameters incremented scan to scan
from DAQ.Environment import *
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def mapLoop(start, end, step, numScans):
powers_input = prompt("Enter attenuator volts for rf2: ")
powers = powers_input.split(",")
# setup
fileSystem = Environs.FileSystem
file = \
fileSystem.GetDataDirectory(\
fileSystem.Paths["scanMasterDataPath"])\
+ fileSystem.GenerateNextDataFileName()
print("Saving as " + file + "_*.zip")
print("")
# start looping
r = range(start, end, step)
for i in range(len(r)):
print "pg:rf2CentreTime -> " + str(r[i])
print "pg:rf2BlankingCentreTime -> " + str(r[i])
print "rf2 attenuator voltage -> " + powers[i]
sm.AdjustProfileParameter("pg", "rf2CentreTime", str(r[i]), False)
sm.AdjustProfileParameter("pg", "rf2BlankingCentreTime", str(r[i]), False)
sm.AdjustProfileParameter("out", "externalParameters", powers[i], False)
hc.SetRF2AttCentre(float(powers[i]))
sm.AcquireAndWait(numScans)
scanPath = file + "_" + str(i) + ".zip"
sm.SaveData(scanPath)
def run_script():
print "Use mapLoop(start, end, step, numScans)"
| [
"cjh211@ic.ac.uk"
] | cjh211@ic.ac.uk |
d22bb3b79c30e7e91a8ff5cecff9f0d78f81ba18 | 41ecc15e475d7f47ed4fae44cb5ffe773766db06 | /python-problems/hackerrank/python/merge-tool/run.py | c61c27492339459122ffc05ea6b53ce1a65c4d31 | [] | no_license | smoonmare/object_50071 | 98d99d89b5c2f85a6f8dbc6038b1fb24b6deb8f3 | d1cc63de48655e0a3d88e3ca14a69c4e014bfbf3 | refs/heads/main | 2023-02-18T05:07:20.967182 | 2021-01-21T22:09:31 | 2021-01-21T22:09:31 | 314,103,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | def merge_the_tools(string, k):
# your code goes here
split = len(string) / k
for i in range(0, len(string), k):
string_new = string[i:i+k]
string_sub = ''
for s in string_new:
if s not in string_sub:
string_sub += s
print(string_sub)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k) | [
"smoonmare@gmail.com"
] | smoonmare@gmail.com |
7f99dc8567d50aa19286f5b9c64c3a5a862304e9 | 4f1bc750c3b84bb03860689ab0e37b46c566a76c | /number/floor.py | 7428daf29fe4db1701dbe3ec5971816fd8f9b547 | [] | no_license | snowcity1231/python_learning | 6ceee0b9275b7414ca195169ce309b50a371fe71 | b69b72d4442a8baf7f78454f47e5008265225e32 | refs/heads/master | 2020-05-02T06:14:26.813717 | 2019-09-25T13:40:19 | 2019-09-25T13:40:19 | 177,790,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # -*- coding: UTF-8 -*-
# floor() 返回数字的下舍整数
import math
print "math.floor(-45.12): ", math.floor(-45.12)
print "math.floor(47.68): ", math.floor(47.68) | [
"xuechen911231@aliyun.com"
] | xuechen911231@aliyun.com |
7a3625294b8e51942c023f2066d9e6c05005bf18 | 009ed5abc0fa16ea4afe0786c5ce9f15a8b08a09 | /02_Python_Data_Structures/week_3/progAssign07-01.py | eeea3c00fa44a9c19cc29527113edce1cfc2c737 | [] | no_license | LeeBaker3/Python_for_Everybody_Specialization | f0ac2b2bcd9c0149d23b6c48373865b9591c1555 | 28c885fde2cc14925c0952168dcc966992a52302 | refs/heads/master | 2020-05-18T06:27:21.374951 | 2019-05-02T11:43:37 | 2019-05-02T11:43:37 | 184,234,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # Use words.txt as the file name
fname = raw_input("Enter file name: ")
try:
fh = open(fname)
except:
print ("Invalid file name")
quit()
for line in fh:
line = line.rstrip()
line = line.upper()
print line
| [
"lee.baker1@ymail.com"
] | lee.baker1@ymail.com |
f51940f105996e1de20f3f71f7d8c8906b77e4da | dcb219809f8d4b4f94d949a87c814fd9e35ff5f8 | /old/study-old/evaluation/mturk_validation.py | d06b407a9baf596a8899fd395b1aa0f58be8f8ef | [] | no_license | shilad/cartograph-alg | 00606e255c5aa9d1adf2d0a021eb7b8672cb21a9 | 4a36e79b6bc0d600c2eed886c376013a4f686531 | refs/heads/master | 2021-07-03T14:23:31.164855 | 2020-06-19T15:44:10 | 2020-06-19T15:44:10 | 190,458,606 | 2 | 1 | null | 2020-06-19T04:55:56 | 2019-06-05T19:48:52 | HTML | UTF-8 | Python | false | false | 2,210 | py | import pandas as pd
import re
VALIDATION_ARTICLES = ['Naruto', 'Gmail', 'Urdu', 'Mathematical Statistics', 'Computer Science', 'Blush', 'Painting',
'Earbuds', 'Braces', 'Hairstyle', 'Tamarind', 'Diapers', 'Baby Powder', 'Lmao', 'Satellite',
'Quiz', 'Vanilla', 'Mistake', 'Four-leaf clover', 'Mac n\' Cheetos', 'Bleach', 'Aroma of Tacoma',
'Cowboy', 'Birthday Cake', 'The Moon is made of Green Cheese', 'Vampire', '1896 Summer Olympics',
'Caribbean', 'Beach', 'Ramen', 'Braces', 'Chocolate', 'American Revolutionary War', 'Serum',
'Old Town Road', 'Sailor Moon', 'Limbo', 'The Lion King', 'Braces', 'Necklace', 'Abdomen',
'Bumblebee']
def check_workers(responses):
for worker in responses['WorkerId'].unique():
worker_responses = responses.loc[responses['WorkerId'] == worker]
if not validation_articles(worker_responses):
responses = responses[responses['WorkerId'] != worker]
return responses
def validation_articles(worker_responses):
"""
Input: Turker responses
Output: Whether or not the turker's responses should be kept, number of dummy articles caught by turkers,
and number of total dummy articles in HIT's
"""
num_validated = 0
num_discarded = 0
for index, row in worker_responses.iterrows():
for column in worker_responses.columns:
if 'Answer.dont-belong' in column:
if row[column]:
num_discarded += 1
article_num = re.findall("(?<=_)(.*)(?=\.)", column)[0]
if row['Input.article_' + str(article_num)] in VALIDATION_ARTICLES:
num_validated += 1
total_possible = worker_responses.shape[0] * 4
total_articles = worker_responses.shape[0] * 120
return num_discarded / total_articles <= 0.5 <= num_validated / total_possible
def main(responses):
cleaned_responses = check_workers(responses)
cleaned_responses.to_csv('study/evaluation/cleaned_mturk_results.csv')
responses = pd.read_csv('study/evaluation/mturk_results.csv')
main(responses)
| [
"lily.irvin@gmail.com"
] | lily.irvin@gmail.com |
9ab1f8167a282c96f3abdd82c8c3ceb07cf12556 | bf7b8efa7cb20e2fe885b70dc4bac2f2c42eb0a6 | /LVQ.py | 50a2d450b655ac9a5375b0a01ce0ce2447b4beff | [] | no_license | WilliamRayJohnson/LVQ | b03f6387714acd38afdbc6a8a68393d32480a16a | ea21cc53574458b3155d7f9c1f3627ebfa08cf32 | refs/heads/master | 2021-07-09T22:09:00.148214 | 2017-10-09T20:09:58 | 2017-10-09T20:09:58 | 106,273,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | '''
William Johnson
'''
import random
import LVQNet
def main():
pValues = [[-3,2], [0,3], [3,2], [-3,-2], [0,-3], [3,-2]]
outputLayer = ['a', 'b', 'c', 'c', 'b', 'a']
learningRate = 0.1
trainingThreshold = 0.15
valuesPerNode = 50
testValues = 5
inputs = []
inputCategories = []
testInputs = []
testInputCategories = []
for pValue in pValues:
for input in range(valuesPerNode):
newInput = []
for value in pValue:
newInput.append(random.uniform(value - 2, value + 2))
inputs.append(newInput)
inputCategories.append(outputLayer[pValues.index(pValue)])
print("Training....")
net = LVQNet.LVQNet(pValues, inputs, inputCategories, outputLayer, learningRate)
net.train(trainingThreshold)
for pValue in pValues:
for input in range(testValues):
newInput = []
for value in pValue:
newInput.append(random.uniform(value - 2, value + 2))
testInputs.append(newInput)
testInputCategories.append(outputLayer[pValues.index(pValue)])
print("\nResults:")
for weight in range(len(net.layer1)):
print("Weight for " + str(pValues[weight]) + ": " + str(net.layer1[weight]))
if __name__ == '__main__':
main() | [
"willrayjohnson@gmail.com"
] | willrayjohnson@gmail.com |
76cb59292f081ff5becc2899888ab0732f78b585 | c849fa997c2a41d27af1d7aa21e3f0c7d7b4523b | /plot_N_IPSM.py | bee85069c3cb70ed90358d5ce7c3817efb609e03 | [] | no_license | kinuskia/Heavy-Ion-Collision-Analysis | 8045ce9969872006f4f59a8d6938409658e5adb6 | 47ae07a9b219d9873975b30c89ac6da18652e526 | refs/heads/master | 2021-06-25T00:33:16.942994 | 2020-11-01T15:04:10 | 2020-11-01T15:04:10 | 154,057,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | import matplotlib.pyplot as plt
import numpy as np
# Read in IPSM fit result
percentiles = np.arange(100)
N_values = np.zeros(len(percentiles))
for k in range(0, len(percentiles)):
centrality_class = str(percentiles[k]) + '-' + str(percentiles[k]+1)
filename_fit = "../IPSM-Fit_One/output/" + centrality_class + ".txt"
N_value = np.loadtxt(filename_fit, unpack=True)
N_values[k] = N_value
plt.figure(2)
plt.figure(figsize=(10,5))
plt.rcParams.update({'font.size': 23})
plt.rcParams['axes.titlepad'] = 10
plt.plot(percentiles, N_values)
plt.xlabel("centrality class $[p, p+1]$ in %")
plt.ylabel("$\\mu_N$")
plt.title("Number $\\mu_N$ of sources")
plt.savefig("plots/N_IPSM.pdf", format="pdf", bbox_inches="tight")
| [
"kianusch@web.de"
] | kianusch@web.de |
10ae30770a42dcc775c02d1bdf25f569ef1c21f7 | 5d7b6a1ee7d2696e272a5dd08117f8a2a7163a84 | /sendmail/urls.py | 1b1e14641a6978b7a48668ec73ee94b7ef048f0c | [] | no_license | HassanNaseer14/Qari2 | ba094ee43105588d423ebcc9fab32988381c7c6e | cb981d8228aabbeea52ec288ecf1cdf21143dbe9 | refs/heads/master | 2023-01-08T11:10:08.394314 | 2020-11-02T09:42:21 | 2020-11-02T09:42:21 | 288,016,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django.urls import path
from . import views
urlpatterns = [
path('contact/', views.contact, name="contact"),
path('success/', views.success, name="success")
]
| [
"hasrayrah970@gmail.com"
] | hasrayrah970@gmail.com |
fccd134ed2431e7cce33642e9fc7705ec4904734 | 9092e62932da86fb2af69e0529e4cbb082cfea22 | /wifiName32Pwd63.py | 9c30ac8432ecedb77930c68c8c6746ec52684028 | [] | no_license | FengZiQ/flushbonding | d09915ce4285530e3d082c0aaea029790ffbdd9d | 5ce631c9d09790846a31332eb8e76460e5f3f08e | refs/heads/master | 2020-04-01T22:29:13.256997 | 2019-06-05T02:25:14 | 2019-06-05T02:25:14 | 153,711,075 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | # coding=utf-8
import time
from to_log import to_log
from QRCodeOfNetworkConfig import wifi_mode
from dmSupport import get_device_attribute
from configFile import data_for_networkTest, open_picture
from honorRouter import Configuration
rc = Configuration()
to_log('SSID长度32/密码长度63网络配置测试\n')
if rc.wc(name='123a'*8, pwd='12'*30 + 'abc', secure=2):
# 生成SSID长度32/密码长度63网络配置二维码
wifi_mode(name='123a'*8, pwd='12'*30 + 'abc', pr='usb', dh='dhcp')
# 配网时间
time.sleep(15)
# 获取系统当前时间
nowTimestamp = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
# 获取设备属性
da = get_device_attribute(data_for_networkTest.get('deviceNo'))
# 修正时间
correction_time = nowTimestamp[:-4] + str(int(nowTimestamp[-4]) + 1)
if da.get('time', 'failed')[:-3] == nowTimestamp[:-3] or da.get('time', 'failed')[:-3] == correction_time:
if da.get('persist.net.type') == 'wifi' and da.get('persist.net.dhcp') == 'true':
to_log('SSID长度32/密码长度63网络配置测试Pass\n')
to_log('配网方式:'+da.get('persist.net.type', ''))
to_log('DHCP:' + da.get('persist.net.dhcp', ''))
to_log('IP:' + da.get('sys.net.ip', ''))
to_log('MAC:' + da.get('system.net.wifi.mac', '') + '\n')
else:
to_log('请检查断言参数\n')
# 打开设备信息码
open_picture('deviceInfoCode.png')
else:
to_log('SSID长度32/密码长度63网络配置测试Failed\n')
rc.finished()
| [
"feng1025352529@qq.com"
] | feng1025352529@qq.com |
beae20a9e51d2812be3dc14ea324a5bf5983b7b4 | 75f6b7ad98674ccc60c10a548eecf9c0382adde3 | /applications/departamento/migrations/0004_auto_20200730_1640.py | 07864186282a2e9678b2246e24854766fd60afdc | [] | no_license | cdazaparra/empleadosproj | f20fa71b43d1968be64d7d69b0a90f86983e4f29 | dc3ddc0032fedf850fdce81344d81c4013a3362c | refs/heads/master | 2022-12-04T09:54:48.633978 | 2020-08-08T16:51:33 | 2020-08-08T16:51:33 | 285,849,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # Generated by Django 3.0.8 on 2020-07-30 21:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('departamento', '0003_auto_20200730_1302'),
]
operations = [
migrations.AlterUniqueTogether(
name='departamento',
unique_together=set(),
),
]
| [
"cdazaparra@gmail.com"
] | cdazaparra@gmail.com |
d94ad0d4184ebc4fb4df9f9e567f480fa0b69e93 | 5a7375bdcd7fba344d9d8e424c42e4ff6e58e5cd | /00_algo_prob/2529_ineuality.py | f007230e9a61a1f36461d2b4bf68aa212163e80e | [] | no_license | jhee514/Algorithms | 1d9d9f8bf11b957393ad1a169fa1a61f86d77da5 | 0ebed8f99a63eae2f9122033ab4e13b2b499fb52 | refs/heads/master | 2021-07-21T01:33:22.838431 | 2020-10-28T15:21:19 | 2020-10-28T15:21:19 | 226,996,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import sys
sys.stdin = open("2529_input.txt", "r")
"""
10개의 숫자 중에서 k 개를 순열로 가져와
부등호를 사이사이 넣어봐
중간에 가지치기 해주면서 쭉 돌아야
"""
import itertools
def sol(k, data):
nums = list(range(10))
min_num, max_num = 10 ** (k + 1), 0
perms = itertools.permutations(nums, k + 1)
for p in perms:
if p == (1, 0, 2, 3, 4, 5, 6, 7, 9, 8):
a = 1
for i in range(k):
if data[i] == '>' and p[i] < p[i + 1]:
break
elif data[i] == '<' and p[i] > p[i + 1]:
break
# > < < < > > > < <
else:
str_num = ''
for pp in p:
str_num += str(pp)
if int(str_num) < min_num:
min_num = int(str_num)
str_min = str_num
if int(str_num) > max_num:
max_num = int(str_num)
str_max = max_num
print(str_max)
print(str_min)
T = 2
for tc in range(T):
k = int(input())
data = list(map(str, input().split()))
sol(k, data)
| [
"514kim@gmail.com"
] | 514kim@gmail.com |
2d240f9380a6ea2106fea6e525ee26ecd7e8bb55 | 9a46c27eb89f690b0ab47f3beabb9492c894ebb0 | /test_Y3--Y5,Y2,Y4.py | 4e51a86f4949d6914f3cc7d9bad0fcbd6a1b72e7 | [] | no_license | Cavin-Lee/1SMT | 06c5f99847e695c76e53f59f9f84537f3478d1fb | 3b1ab012bab3699acec194305fb4ff9ae9d7fe20 | refs/heads/master | 2020-07-24T18:24:14.614498 | 2019-09-18T10:25:50 | 2019-09-18T10:25:50 | 208,008,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,330 | py | # -*- encoding:utf-8 -*-
'''
one source:
Y3:38
two targets:
Y5: 18 class:1-18
Y2:18 class:7-24
Y4: 18 class:7-18,25-30
'''
import numpy as np
import scipy.io
import source_domain,target_domain,RandIndex,source,target
from sklearn import metrics
from sklearn.decomposition import KernelPCA
from sklearn import preprocessing
classnum_source=38; classnum_target1=18; classnum_target2=18; classnum_target3=18
M=3 #the number of target domain
Source=scipy.io.loadmat('YaleB/3/Y3--Y5,Y2,Y4,Y1/Y3.mat')
Target1=scipy.io.loadmat('YaleB/3/Y3--Y5,Y2,Y4,Y1/Y5.mat') # first target
Target2 =scipy.io.loadmat('YaleB/3/Y3--Y5,Y2,Y4,Y1/Y2.mat') # second target
Target3 =scipy.io.loadmat('YaleB/3/Y3--Y5,Y2,Y4,Y1/Y4.mat') # third target
data_source=Source['Xs']; data_target1=Target1['Xt']; data_target2=Target2['Xt']; data_target3=Target3['Xt']
label_s=Source['Ys']; label_t1=Target1['Yt']; label_t2=Target2['Yt']; label_t3=Target3['Yt']
U_Source=np.mat(Source['Us'])
'''Dimensionality Reduction'''
pca_s=KernelPCA(n_components=100,kernel='rbf')
pca_t1=KernelPCA(n_components=100,kernel='rbf')
pca_t2=KernelPCA(n_components=100,kernel='rbf')
pca_t3=KernelPCA(n_components=100,kernel='rbf')
data_source=pca_s.fit_transform(data_source) #大小均为n*d'
data_target1=pca_t1.fit_transform(data_target1)
data_target2=pca_t2.fit_transform(data_target2)
data_target3=pca_t3.fit_transform(data_target3)
scipy.io.savemat('YaleB/3/Y3--Y5,Y2,Y4,Y1/Y3524_100.mat',{'Xs':data_source, 'X5':data_target1,
'X2': data_target2, 'X4':data_target3})
'''Preprocessing
data_source=np.mat(preprocessing.scale(data_source)).T #大小均为d'*n
data_target1=np.mat(preprocessing.scale(data_target1)).T
data_target2=np.mat(preprocessing.scale(data_target2)).T
'''
data_source=np.mat(preprocessing.StandardScaler().fit_transform(data_source)).T
data_target1=np.mat(preprocessing.StandardScaler().fit_transform(data_target1)).T
data_target2=np.mat(preprocessing.StandardScaler().fit_transform(data_target2)).T
data_target3=np.mat(preprocessing.StandardScaler().fit_transform(data_target3)).T
label_source=[]; label_target1=[]; label_target2=[]; label_target3=[]
for i in range(len(label_s)):
d=int(label_s[i])
label_source.append(d)
for i in range(len(label_t1)):
d=int(label_t1[i])
label_target1.append(d)
for i in range(len(label_t2)):
d=int(label_t2[i])
label_target2.append(d)
for i in range(len(label_t3)):
d=int(label_t3[i])
label_target3.append(d)
'''Imagenet:WS'''
Lambda_source=0.5
W_source=source_domain.SLMC_W(data_source,U_Source,Lambda_source,classnum_source)
Maxiter = 200; epsilon = 10**(-7); Lambda = 0.5
Y_target1 = np.mat(np.eye(classnum_target1, dtype=int))
Y_target2=np.mat(np.eye(classnum_target2, dtype=int))
Y_target3=np.mat(np.eye(classnum_target3, dtype=int))
U_target1 = np.mat(source_domain.Normization(np.random.random((classnum_target1,data_target1.shape[1]))))
U_target2=np.mat(source_domain.Normization(np.random.random((classnum_target2, data_target2.shape[1]))))
U_target3=np.mat(source_domain.Normization(np.random.random((classnum_target3, data_target3.shape[1]))))
U1 = U_target1; U2=U_target2; U3=U_target3
print("-------------------Y5 domain-------------------")
J = J_new = float('inf')
for j in range(Maxiter):
W1 = source.SLMC_W(data_target1, U1, Lambda, classnum_target1)
U1 = source.SLMC_U(data_target1, classnum_target1, W1)
J2 = 0
J = J_new
for k in range(classnum_target1):
for i in range(data_target1.shape[1]):
J2 = J2 + U1[k, i] ** 2 * np.linalg.norm((W1.T * data_target1[:, i] - Y_target1[:, k]), ord=2) ** 2
J_new = 0.5 * np.linalg.norm(W1, ord=2) ** 2 + 0.5 * Lambda * J2
# print(abs((J_new - J) / J_new))
if (abs((J_new - J) / J_new) < epsilon):
break
print(j)
Y1 = (W1.T * data_target1).T
raw_1, column_1 = Y1.shape
pre1 = []
for i in range(raw_1):
_positon = np.argmax(Y1[i])
m, n = divmod(_positon, column_1)
pre1.append(n + 1)
NMI_1 = metrics.normalized_mutual_info_score(label_target1, pre1)
RI_1 = RandIndex.rand_index_score(label_target1, pre1)
print('NMI:',round(NMI_1,4))
print('RI:',round(RI_1,4))
print("-------------------Y2 Domain-------------------")
J = J_new = float('inf')
for j in range(Maxiter):
W2 = source.SLMC_W(data_target2, U2, Lambda, classnum_target2)
U2 = source.SLMC_U(data_target2, classnum_target2, W2)
J2 = 0
J = J_new
for k in range(classnum_target2):
for i in range(data_target2.shape[1]):
J2 = J2 + U2[k, i] ** 2 * np.linalg.norm((W2.T * data_target2[:, i] - Y_target2[:, k]), ord=2) ** 2
J_new = 0.5 * np.linalg.norm(W2, ord=2) ** 2 + 0.5 * Lambda * J2
# print(abs((J_new - J) / J_new))
if (abs((J_new - J) / J_new) < epsilon):
break
print(j)
Y2 = (W2.T * data_target2).T
raw_2, column_2 = Y2.shape
pre2 = []
for i in range(raw_2):
_positon = np.argmax(Y2[i])
m, n = divmod(_positon, column_2)
pre2.append(n + 1)
#print(pre2)
NMI_2 = metrics.normalized_mutual_info_score(label_target2, pre2)
RI_2 = RandIndex.rand_index_score(label_target2, pre2)
print('NMI:',round(NMI_2,4))
print('RI:',round(RI_2,4))
print("-------------------Y4 Domain-------------------")
J = J_new = float('inf')
for j in range(Maxiter):
W3 = source.SLMC_W(data_target3, U3, Lambda, classnum_target3)
U3 = source.SLMC_U(data_target3, classnum_target3, W3)
J2 = 0
J = J_new
for k in range(classnum_target3):
for i in range(data_target3.shape[1]):
J2 = J2 + U3[k, i] ** 2 * np.linalg.norm((W3.T * data_target3[:, i] - Y_target3[:, k]), ord=2) ** 2
J_new = 0.5 * np.linalg.norm(W3, ord=2) ** 2 + 0.5 * Lambda * J2
# print(abs((J_new - J) / J_new))
if (abs((J_new - J) / J_new) < epsilon):
break
print(j)
Y3 = (W3.T * data_target3).T
raw_3, column_3 = Y3.shape
pre3 = []
for i in range(raw_3):
_positon = np.argmax(Y3[i])
m, n = divmod(_positon, column_3)
pre3.append(n + 1)
#print(pre2)
NMI_3 = metrics.normalized_mutual_info_score(label_target3, pre3)
RI_3 = RandIndex.rand_index_score(label_target3, pre3)
print('NMI:',round(NMI_3,4))
print('RI:',round(RI_3,4))
'''Target Domain'''
print('-------------------Target Domain-------------------')
Lambda_target1=1; Lambda_target2=1; Lambda_target3=1
beta = 0.1
gama = 0.01
eta =0.01
r = [20]
VT = [np.mat(np.random.random((classnum_target1, classnum_source))),
np.mat(np.random.random((classnum_target2, classnum_source))),
np.mat(np.random.random((classnum_target3, classnum_source)))]
for param_r in range(len(r)):
gama_tar = [gama, gama, gama]
print(' ')
print('the value of parameter:', r[param_r])
# gama_tar=[gama[param_eta],gama[param_eta]]
D = np.mat(np.random.random((data_target1.shape[0], r[param_r]))) # 随机初始化公共字典
V = [np.mat(np.random.random((r[param_r], classnum_target1))),
np.mat(np.random.random((r[param_r], classnum_target2))),
np.mat(np.random.random((r[param_r], classnum_target3)))]
VT_tar = VT;
U_target1_tar = U_target1;
U_target2_tar = U_target2;
U_target3_tar = U_target3;
D_tar = D;
V_tar = V
J_target1 = J_new_target1 = float("inf")
J_target2 = J_new_target2 = float("inf")
J_target3 = J_new_target3 = float("inf")
for m in range(Maxiter):
W_target1 = target_domain.target_WTj(data_target1, U_target1_tar, W_source, VT_tar[0], V_tar[0],
D_tar, Lambda_target1, gama_tar[0], beta)
W_target2 = target_domain.target_WTj(data_target2, U_target2_tar, W_source, VT_tar[1], V_tar[1],
D_tar, Lambda_target2, gama_tar[1], beta)
W_target3 = target_domain.target_WTj(data_target3, U_target3_tar, W_source, VT_tar[2], V_tar[2],
D_tar, Lambda_target3, gama_tar[2], beta)
U_target1_tar = target_domain.target_U(data_target1, classnum_target1, W_target1)
U_target2_tar = target_domain.target_U(data_target2, classnum_target2, W_target2)
U_target3_tar = target_domain.target_U(data_target3, classnum_target3, W_target3)
VT_tar[0] = target_domain.Source_Target_VTj(W_target1, W_source, beta, eta)
VT_tar[1] = target_domain.Source_Target_VTj(W_target2, W_source, beta, eta)
VT_tar[2] = target_domain.Source_Target_VTj(W_target3, W_source, beta, eta)
WT = [W_target1, W_target2, W_target3]
try:
D_tar = target_domain.Target_D(WT, V_tar, gama_tar, r[param_r])
except Exception as e:
print(e)
try:
D_tar = D_tar / (np.sum(D_tar, axis=0))
except Exception as e:
print(e)
V_tar[0] = target_domain.Target_Vj(W_target1, D_tar, gama_tar[0], eta)
V_tar[1] = target_domain.Target_Vj(W_target2, D_tar, gama_tar[1], eta)
V_tar[2] = target_domain.Target_Vj(W_target3, D_tar, gama_tar[2], eta)
J_target1 = J_new_target1
J_target2 = J_new_target2
J_target3 = J_new_target3
J2_target1 = 0
for k in range(classnum_target1):
for i in range(data_target1.shape[1]):
J2_target1 = J2_target1 + U_target1_tar[k, i] ** 2 * np.linalg.norm(
(W_target1.T * data_target1[:, i] - Y_target1[:, k]),
ord=2) ** 2
J2_target2 = 0
for k in range(classnum_target2):
for i in range(data_target2.shape[1]):
J2_target2 = J2_target2 + U_target2_tar[k, i] ** 2 * np.linalg.norm(
(W_target2.T * data_target2[:, i] - Y_target2[:, k]), ord=2) ** 2
J2_target3 = 0
for k in range(classnum_target3):
for i in range(data_target3.shape[1]):
J2_target3 = J2_target3 + U_target3_tar[k, i] ** 2 * np.linalg.norm(
(W_target3.T * data_target3[:, i] - Y_target3[:, k]), ord=2) ** 2
J3 = 0
for j in range(M):
J3 = J3 + 0.5 * beta * np.linalg.norm(W_source - WT[j] * VT_tar[j], ord=2) ** 2
+ 0.5 * gama_tar[j] * np.linalg.norm(WT[j] - D_tar * V_tar[j], ord=2) ** 2
classnum_Target = [classnum_target1, classnum_target2, classnum_target3]
J5 = 0
for j in range(M):
V_temp = V_tar[j];
VT_temp = VT_tar[j]
for i in range(r[param_r]):
J5 = J5 + np.linalg.norm(V_temp[i, :], ord=2)
for i in range(classnum_Target[j]):
J5 = J5 + np.linalg.norm(VT_temp[i, :], ord=2)
J5 = eta * J5
J_new_target1 = 0.5 * (np.linalg.norm(W_target1, ord=2) ** 2 + Lambda_target1 * J2_target1) + J3 + J5
J_new_target2 = 0.5 * (np.linalg.norm(W_target2, ord=2) ** 2 + Lambda_target2 * J2_target2) + J3 + J5
J_new_target3 = 0.5 * (np.linalg.norm(W_target3, ord=2) ** 2 + Lambda_target3 * J2_target3) + J3 + J5
# print(abs((J_new_Dslr - J_Dslr) / J_new_Dslr))
# print(abs((J_new_Webcam - J_Webcam) / J_new_Webcam))
if (abs((J_new_target1 - J_target1) / J_new_target1) < epsilon and
abs((J_new_target2 - J_target2) / J_new_target2) < epsilon and
abs((J_new_target3 - J_target3) / J_new_target3) < epsilon):
break
print(m)
print('--------------------------Y5 Domain--------------------------')
Y_t1 = (W_target1.T * data_target1).T
raw_t1, column_t1 = Y_t1.shape
target1_pre = []
for i in range(raw_t1):
_positon = np.argmax(Y_t1[i])
m, n = divmod(_positon, column_t1)
target1_pre.append(n + 1)
NMI_t1 = metrics.normalized_mutual_info_score(label_target1, target1_pre)
RI_t1 = RandIndex.rand_index_score(label_target1, target1_pre)
print('NMI:', round(NMI_t1, 4))
print('RI:', round(RI_t1, 4))
print('--------------------------Y2 Domain--------------------------')
Y_t2 = (W_target2.T * data_target2).T
raw_t2, column_t2 = Y_t2.shape
target2_pre = []
for i in range(raw_t2):
_positon = np.argmax(Y_t2[i])
m, n = divmod(_positon, column_t2)
target2_pre.append(n + 1)
NMI_t2 = metrics.normalized_mutual_info_score(label_target2, target2_pre)
RI_t2 = RandIndex.rand_index_score(label_target2, target2_pre)
print('NMI:', round(NMI_t2, 4))
print('RI:', round(RI_t2, 4))
print('--------------------------Y4 Domain--------------------------')
Y_t3 = (W_target3.T * data_target3).T
raw_t3, column_t3 = Y_t3.shape
target3_pre = []
for i in range(raw_t3):
_positon = np.argmax(Y_t3[i])
m, n = divmod(_positon, column_t3)
target3_pre.append(n + 1)
NMI_t3 = metrics.normalized_mutual_info_score(label_target3, target3_pre)
RI_t3 = RandIndex.rand_index_score(label_target3, target3_pre)
print('NMI:', round(NMI_t3, 4))
print('RI:', round(RI_t3, 4))
| [
"noreply@github.com"
] | noreply@github.com |
111f867f7740d9819866da0ee4464566409dd22c | 7c457dbba7d37f912c1136e464622c5b61eed1d7 | /x_temporal/train.py | d7d3386d5ca56441eec64e38c195094810fa8aec | [
"MIT"
] | permissive | Xlsean/X-Temporal | c6ffebe68a34675dd11120ff379fd8912118f4cc | 221b98bcedae86ed4eebec888d70c19da0a721d4 | refs/heads/master | 2022-10-01T08:23:04.756525 | 2020-05-18T10:31:38 | 2020-05-18T10:31:38 | 257,547,671 | 0 | 0 | MIT | 2020-05-18T10:31:39 | 2020-04-21T09:38:44 | null | UTF-8 | Python | false | false | 1,577 | py | import argparse
import yaml
from easydict import EasyDict
import torch
import numpy as np
from x_temporal.interface.temporal_helper import TemporalHelper
from x_temporal.utils.multiprocessing import mrun
parser = argparse.ArgumentParser(description='X-Temporal')
parser.add_argument('--config', type=str, help='the path of config file')
parser.add_argument("--shard_id", help="The shard id of current node, Starts from 0 to num_shards - 1",
default=0, type=int)
parser.add_argument("--num_shards", help="Number of shards using by the job",
default=1, type=int)
parser.add_argument("--init_method", help="Initialization method, includes TCP or shared file-system",
default="tcp://localhost:9999", type=str)
parser.add_argument('--dist_backend', default='nccl', type=str)
def main():
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config = EasyDict(config['config'])
if config.gpus > 1:
torch.multiprocessing.spawn(
mrun,
nprocs=config.gpus,
args=(config.gpus,
args.init_method,
args.shard_id,
args.num_shards,
args.dist_backend,
config,
'train',
),
daemon=False)
else:
temporal_helper = TemporalHelper(config)
temporal_helper.train()
if __name__ == '__main__':
torch.multiprocessing.set_start_method("forkserver")
main()
| [
"shaohao@sensetime.com"
] | shaohao@sensetime.com |
3b8df220c66e6bc64e99ddd5f73dcca6bdeccc90 | e616c5b4ded0b3df45cdd221a38622d85c2f6b22 | /send_me_email.py | b8509cf0d3264a2a939f8c28d241d34665dd05c9 | [] | no_license | sharnett/AzizComedyCellarEmails | b385d7e33b0048d9c4ddf4396a76cbdfdefbb3a6 | 325726a376dec5f958ee3c8fda9c95a00d2319b4 | refs/heads/master | 2016-09-01T17:39:59.077337 | 2011-10-24T05:03:06 | 2011-10-24T05:03:06 | 2,558,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | def send_me_email(date):
import smtplib, string
SUBJECT = "Aziz in the house!"
TO = "srharnett@gmail.com"
FROM = "srharnett@gmail.com"
text = "He's comin to town! Be there be there be there! Clear you schedule on %s" % date
BODY = string.join((
"From: %s" % FROM,
"To: %s" % TO,
"Subject: %s" % SUBJECT ,
"",
text
), "\r\n")
server = smtplib.SMTP("localhost")
server.sendmail(FROM, [TO], BODY)
server.quit()
| [
"sean@professorMurder.(none)"
] | sean@professorMurder.(none) |
f1dbcbaefd816eb6775aa3ac94503874610c340d | 8a6beae86b2b276c9685dfa2b06a5ffacd99ab4b | /app.py | bae66da8ab80c74e6911cbbade66a7fac8f382b6 | [] | no_license | stuhunter4/red_window_news | 4fd47b4f1436cb47b38b936a6fce8859fad97fa1 | cb34457c5682b3e667382a74603b69baae7dde19 | refs/heads/main | 2023-03-25T10:44:44.239758 | 2021-03-24T23:54:33 | 2021-03-24T23:54:33 | 311,017,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_news
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/rednews_app")
# Route to render index.html template using data from Mongo
@app.route("/")
def home():
new_listings = mongo.db.collection.find_one()
# Return template and data
return render_template("index.html", news=new_listings)
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# run the scrape function and save the results to a varialbe
news_data = scrape_news.scrape_info()
# update the mongo database using update and upsert=True
mongo.db.collection.update({}, news_data, upsert=True)
# Redirect back to home page
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) | [
"stuhunter4@users.noreply.github.com"
] | stuhunter4@users.noreply.github.com |
16840e785de669798985dd9040d55e3037b2f01a | 66a82c2eb7f9facff4cb0aa72f21a713dbb1cf61 | /devices/SIP04_FZJ/01_Test/test_sip04_01.py | 8f9e8dfee1412bdb8d75db1ffa146684f3c7300e | [
"MIT"
] | permissive | geophysics-ubonn/reda_testing | 894eefa8f5cddf288c639c00404c6bd12339dad7 | c32f3faa685b77974b88ba1126a02afabfe5fd2d | refs/heads/master | 2023-06-04T00:16:43.503287 | 2020-12-21T13:23:48 | 2020-12-21T13:23:48 | 110,421,246 | 0 | 1 | NOASSERTION | 2019-06-25T09:50:57 | 2017-11-12T09:50:26 | Python | UTF-8 | Python | false | false | 314 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import reda
basepath = os.path.dirname(__file__) + os.sep
def test_load_mat():
sip = reda.SIP()
sip.import_sip04(basepath + 'Data/sip_dataA.mat')
def test_load_csv():
sip2 = reda.SIP()
sip2.import_sip04(basepath + 'Data/sip_dataA.csv')
| [
"mweigand@geo.uni-bonn.de"
] | mweigand@geo.uni-bonn.de |
569ff5538e3b8de9ef46a5e70148a5035c0fe606 | aa83a19c80fbf37ec446f8987adfab76aebb0007 | /lionproject/lionproject/settings.py | acdc0e2b03a052392e56602a144ed69bb751a5a7 | [] | no_license | clark1015/likelion-blog | 42101ac54d3f9c0b030c6b484ed1dca9c45240a5 | e059a39bb5b8081680b3de2789f974f64871f4a8 | refs/heads/main | 2023-04-21T07:15:52.706997 | 2021-05-14T12:53:24 | 2021-05-14T12:53:24 | 364,558,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,580 | py | """
Django settings for lionproject project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ro7jyznfek3z_pq%!%9dn6o4*zf8kh2k%s%hh#98-jxds17gy9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'account.CustomUSer'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lionproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['lionproject/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lionproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'blog', 'static')
#현재 static 파일들이 어디에 있는지
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
#static 파일들을 어디에 모을건지
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#이용자가 업로드한 파일을 모으는 곳
MEDIA_URL = '/media/'
#이용자에게 사진을 보여줄 때의 url을 설정 | [
"seok626898@gmail.com"
] | seok626898@gmail.com |
75f1fa3b18d8add3c4ddf62520c0563fc4abb9b5 | 9d278423cb7c81cf0e7579428d580c47a1ecc4af | /claw_machine/version3.py | 79617b3cc72cc9f0660722ccb883aec1f7fd864f | [] | no_license | SanChainn/Claw_Machine | a940aa34b16725617ec6cadfd5d9f32b7b97f649 | 8a8c92b863edfce6b65d9d0646cca9682c117d80 | refs/heads/main | 2023-07-12T10:43:19.777673 | 2021-08-22T09:59:33 | 2021-08-22T09:59:33 | 398,766,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from time import sleep
import pigpio
DIR = 20 # Direction GPIO Pin
STEP = 21 # Step GPIO Pin
SWITCH = 16 # GPIO pin of switch
# Connect to pigpiod daemon
pi = pigpio.pi()
# Set up pins as an output
pi.set_mode(DIR, pigpio.OUTPUT)
pi.set_mode(STEP, pigpio.OUTPUT)
# Set up input switch
pi.set_mode(SWITCH, pigpio.INPUT)
pi.set_pull_up_down(SWITCH, pigpio.PUD_UP)
MODE = (14, 15, 18) # Microstep Resolution GPIO Pins
RESOLUTION = {'Full': (0, 0, 0),
'Half': (1, 0, 0),
'1/4': (0, 1, 0),
'1/8': (1, 1, 0),
'1/16': (0, 0, 1),
'1/32': (1, 0, 1)}
for i in range(3):
pi.write(MODE[i], RESOLUTION['Full'][i])
# Set duty cycle and frequency
pi.set_PWM_dutycycle(STEP, 128) # PWM 1/2 On 1/2 Off
pi.set_PWM_frequency(STEP, 500) # 500 pulses per second
try:
while True:
pi.write(DIR,pi.read(SWITCH )) # Set direction
sleep(1)
print("FULL STEPPING ")
except KeyboardInterrupt:
print ("\nCtrl-C pressed. Stopping PIGPIO and exiting...")
finally:
pi.set_PWM_dutycycle(STEP, 0) # PWM off
pi.stop()
| [
"noreply@github.com"
] | noreply@github.com |
b9f684d5b42556870a228170a9c9ad958dad7159 | 31c091b12e03297bec1e81974e3b5facc766a50a | /lib/vcrparser.py | ec7b7a95e5693b1293801c7fc7c1896ad838dcab | [
"MIT"
] | permissive | huntdog1541/aRNAPipe | f1a72c7603fdb0f84df6857ce4d8ce2a3563a16f | 87c6f9695830e8b9357f62530772f41ffcbabcce | refs/heads/master | 2020-12-30T15:21:08.754192 | 2016-12-12T19:35:33 | 2016-12-12T19:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,142 | py | # -*- coding: utf-8 -*-
import os
import config
import time
if config.mode == "LSF":
import sys_LSF as manager
elif config.mode == "LOCAL":
import sys_single as manager
else:
import sys_OTHER as manager
def change_environment(env):
if len(env) > 0:
for var, val in env.iteritems():
if val[1] == "overwrite":
os.environ[var] = val[0]
else:
if os.environ.has_key(var):
os.environ[var] = val[0] + ":" + os.environ[var]
else:
os.environ[var] = val[0]
def project_process(path_base, folder):
samples = get_samples(path_base, folder, path_base + "/" + folder + "/samples.list")
# Check main process
print "## MAIN PROCESS ###########################"
try:
f = open(path_base + "/" + folder + "/pid.txt", 'r')
i = f.readline().strip("\n").split("\t")[1]
f.close()
k = manager.job_status(i)
if k == 1:
st = "DONE"
elif k == 0:
st = "RUN"
else:
st = "ERROR"
print "- Main process (" + i + ") status: " + st
except:
print "- Main process not found or already finished"
# Check subprocesses
print "## SUBPROCESSES ###########################"
pids = dict()
try:
f = open(path_base + "/" + folder + "/temp/pids.txt", 'r')
for i in f:
i = i.strip("\n").split("\t")
pids[i[0]] = [i[1].split("|"),i[2].split("|")]
f.close()
except:
print "- No subprocesses file found"
f = open(path_base + "/" + folder + "/config.txt", 'r')
config = dict()
for i in f:
if not i.startswith("%"):
i = i.strip("\n").split("\t")
if i[0] in ["trimgalore", "fastqc", "kallisto", "star", "star-fusion", "picard", "htseq-gene", "htseq-exon", 'sam2sortbam', "picard_IS", "varscan", "gatk", "jsplice"]:
i[1] = i[1].split("/")[0]
if i[1] != "0":
config[i[0]] = i[1]
if (config.has_key("varscan") or config.has_key("gatk") or config.has_key("picard_IS")) and (not config.has_key("sam2sortbam")):
config["sam2sortbam"] = 1
if len(config) > 0:
for pg in ["trimgalore", "fastqc", "kallisto", "star", "star-fusion", "picard", "htseq-gene", "htseq-exon", "sam2sortbam", "picard_IS", "varscan", "gatk", "jsplice"]:
if config.has_key(pg):
print "Process: " + pg
if not pids.has_key(pg):
print "- Already done or waiting for previous module output"
else:
pid = pids[pg]
print "- ID: " + "|".join(pid[0])
n = list()
for i in pid[1]:
k = manager.job_status(i)
if k == 1:
n.append("DONE")
elif k == 0:
n.append("RUN")
else:
n.append("ERROR")
print "- Status: " + "|".join(n)
samples_v, stats = check_samples(samples, path_base, folder, pg, "update")
sok = str(round(100 * float(stats[1])/float(stats[0]),2))
sko = str(round(100 * float(stats[2])/float(stats[0]),2))
pending = str(round(100 * float(stats[0]-stats[1]-stats[2])/float(stats[0]),2))
print "- Progress: " + sok + "% succeeded / " + sko + "% exited / " + pending + "% pending"
#############################################################################
# MAIN FUNCTION FOR KILLING ALL THE PROCESSES RELATED TO A PROJECT RUN
#############################################################################
def project_kill(path_base, folder):
print "Main process:"
uds = ""
try:
f = open(path_base + "/" + folder + "/pid.txt",'r')
uds = f.readline().strip("\n").split("\t")[1]
f.close()
print "- Killing main process ("+uds+")"
manager.job_kill(uds)
except:
print "- No main process to kill. Already finished (" + uds + ")"
print "Submodule processes:"
try:
f = open(path_base + "/" + folder + "/temp/pids.txt", 'r')
for i in f:
i = i.strip("\n").split("\t")
if len(i) > 1:
j = i[1].split("|")
for jj in j:
print "- Killing process " + i[0] + " ("+jj+")"
manager.job_kill(jj)
f.close()
except:
print "No submodule processes to kill"
return 1
#############################################################################
## PARSES DE CONFIGURATION FILE
#############################################################################
def config_file(config, path_base, folder, paths):
# config: path to configuration file
# path_base: Absolute path to the location where the project folder has been created
# folder: Name of the project folder located in 'path_base'
mandatory_fields = ['genome_build', 'strandedness', 'trimgalore', 'fastqc',
'star', 'star-fusion', 'picard', 'htseq-gene', 'htseq-exon',
'kallisto', 'sam2sortbam', 'picard_IS', 'gatk', 'varscan',
'q', 'wt', 'star_args', 'star2pass', 'starfusion_args', 'kalboot',
'varscan_args', 'gatk_args', 'htseq-gene-mode', 'htseq-exon-mode', "jsplice"]
f = open(config, 'r')
var = dict()
for i in f:
if not i.startswith("%"):
i = i.strip('\n').split("\t")
if len(i) > 1:
var[i[0]] = i[1]
f.close()
for i in mandatory_fields:
if i not in var:
exit('Field "' + i + '" is missing in the configuration file.')
return [config, var]
def job_wait(path, secs):
## WAITS UNTIL A JOB ENDS SUCCESSFULLY (IF ERROR -> ABORTS) BASED ON THE LOG FILES CREATED BY THE RESOURCE MANAGER
## Input:
## path: String with the path to the log files corresponding to the jobs that will be checked (i.e. single job: '/some_path/job1.log', job array: '/some_path/job1.log|/some_path/job2.log')
## secs: Number of seconds between subsequent checks
## Returns:
## 1: Once all the jobs have finished
## If an error is detected -> exit()
if "|" in path:
jobarray = path.split("|")
else:
jobarray = [path]
for path in jobarray:
while 1:
rt = manager.job_status(path)
if rt == 1:
break
elif rt == -1:
exit("Error on: " + path)
else:
time.sleep(secs)
return 1
#############################################################################
## PARSES DE SAMPLES FILE
#############################################################################
def get_samples(path_base, folder, samplefile, get_phenos=False, no_check=False):
# SCAN FOR FASTQ SAMPLE FILES
try:
f = open(samplefile, 'r')
except:
exit("Error: Samples file not found")
samples = dict()
# CHECK COLUMN HEADERS AND SINGLE-END/PAIRED-END DATA
i = f.readline().strip("\n").split("\t")
idx = [-1, -1, -1]
idx_pheno = []
pheno_names = []
for j in range(len(i)):
if i[j] == "SampleID":
idx[0] = j
elif i[j] == "FASTQ_1":
idx[1] = j
elif i[j] == "FASTQ_2":
idx[2] = j
elif i[j] == "FASTQ":
idx[1] = j
elif i[j].startswith('PHENO_'):
pheno_names.append(i[j])
idx_pheno.append(j)
# 'SampleID' AND 'FASTQ' COLUMNS ARE REQUIRED
if (idx[0] < 0) or (idx[1] < 0):
exit("Error: Samples file headers must contain 'SampleID' and 'FASTQ' columns for single-end or 'SampleID', 'FASTQ_1' and 'FASTQ_2' for paired-end data")
# PARSE SAMPLE DATA
errors = dict({"ID duplication errors":[],"Missing input files":[], "Empty input files":[]})
phenos = {i: {} for i in pheno_names}
for i in f:
i = i.strip("\n").split("\t")
if len(i) > 1:
# PAIRED-END (TWO FASTQ FILES PROVIDED PER SAMPLE
if idx[2] >= 0:
# CHECKS FILE EXTENSION OK (*.fastq or *.fastq.gz)
if (i[idx[1]].endswith("fastq") and i[idx[2]].endswith("fastq")) or (i[idx[1]].endswith("fastq.gz") and i[idx[2]].endswith("fastq.gz")):
# NO DUPLICATE IDS
if samples.has_key(i[idx[0]]):
errors["ID duplication errors"].append(i[idx[0]])
else:
try:
if not no_check:
for ifile in range(1,3):
f = open(i[idx[ifile]], 'r')
f.close()
S = os.stat(i[idx[ifile]]).st_size
if S == 0:
errors["Empty input files"].append(i[idx[ifile]])
samples[i[idx[0]]] = [i[idx[1]], i[idx[2]], os.stat(i[idx[1]]).st_size, os.stat(i[idx[2]]).st_size]
else:
samples[i[idx[0]]] = [i[idx[1]], i[idx[2]], 0, 0]
if len(idx_pheno):
for ifil in range(len(idx_pheno)):
phenos[pheno_names[ifil]][i[idx[0]]] = i[idx_pheno[ifil]]
except:
errors["Missing input files"].append(i[idx[ifile]])
else:
exit("Error: Input sample files must be '.fastq' or '.fastq.gz'")
# SINGLE-END (ONE FASTQ FILES PROVIDED PER SAMPLE
else:
# CHECKS FILE EXTENSION OK (*.fastq or *.fastq.gz)
if i[idx[1]].endswith("fastq") or i[idx[1]].endswith("fastq.gz"):
# NO DUPLICATE IDS
if samples.has_key(i[idx[0]]):
errors["ID duplication errors"].append(i[idx[0]])
else:
try:
if not no_check:
for ifile in range(1,2):
f = open(i[idx[ifile]], 'r')
f.close()
S = os.stat(i[idx[ifile]]).st_size
if S == 0:
errors["Empty input files"].append(i[idx[ifile]])
samples[i[idx[0]]] = [i[idx[1]], os.stat(i[idx[1]]).st_size]
else:
samples[i[idx[0]]] = [i[idx[1]], 0]
if len(idx_pheno):
for ifil in range(len(idx_pheno)):
phenos[pheno_names[ifil]][i[idx[0]]] = i[idx_pheno[ifil]]
except:
errors["Missing input files"].append(i[idx[ifile]])
else:
exit("Error: Input sample files must be '.fastq' or '.fastq.gz'")
if len(samples) == 0:
exit("Error: No available samples identified.")
r = 0
for i,j in errors.iteritems():
if len(j) > 0:
r += 1
print i + ":"
for k in j:
print "- " + k
if r > 0:
exit("Samples file errors detected")
if get_phenos:
return samples, phenos
else:
return samples
#############################################################################
## CHECKS ARGUMENTS
#############################################################################
def check_args(path_base, folder):
if path_base == "":
# If not provided, default path_base set to the current working directory
path_base = os.getcwd()
if not path_base.endswith("/"):
path_base += "/"
# Check if the project folder exists
temp = os.listdir(path_base)
folder = folder.replace("/", "")
if not (folder in temp):
exit("Error: Project folder '" + folder + "' not found at path_base: '" + path_base + "'")
return [path_base, folder]
#############################################################################
# CHECKS OUTPUT FILES RELATED TO EACH ANALYSIS
#############################################################################
def check_samples(samples, path_base, folder, pg, m):
ld = os.listdir(path_base+"/"+folder)
if m == "update":
if "results_"+pg in ld:
v_samples = dict()
ld = os.listdir(path_base+"/"+folder+"/results_"+pg)
sok = dict()
sko = dict()
if os.path.exists(path_base+"/"+folder+"/results_"+pg + "/samples_ok.txt"):
f = open(path_base+"/"+folder+"/results_"+pg + "/samples_ok.txt", 'r')
for i in f:
i = i.strip("\n")
if len(i) > 0:
sok[i] = 1
f.close()
if os.path.exists(path_base+"/"+folder+"/results_"+pg + "/samples_ko.txt"):
f = open(path_base+"/"+folder+"/results_"+pg + "/samples_ko.txt", 'r')
for i in f:
i = i.strip("\n")
if len(i) > 0:
sko[i] = 1
if sok.has_key(i):
sok.pop(i)
f.close()
k = [len(samples), len(sok), len(sko)] # total, ok, ko
for sample_name in samples.keys():
if (not sample_name in sok.keys()) or (sample_name in sko.keys()):
v_samples[sample_name] = samples[sample_name]
return v_samples, k
else:
return samples, [len(samples), 0, 0]
elif m == "new":
return samples, [len(samples), 0, 0]
else:
exit("Error: Mode not valid ('update' or 'new')")
| [
"arnald.alonso@gmail.com"
] | arnald.alonso@gmail.com |
13588d94d6dca1b2727031036d756a83fd7954a9 | 604ad28d730882f44d3a52e4e8f6a4438e51fd01 | /Recognize.py | f11232a343279db7091b6d95af37a87c245a2022 | [] | no_license | domjanbaric/EESTech2017 | ffffb5be2ed2078c87820013bf4fcbe62635ef94 | 62dfa1e6129638c26d7dbe76e95c267d5df0177b | refs/heads/master | 2021-06-22T09:27:27.127548 | 2017-08-10T08:55:52 | 2017-08-10T08:55:52 | 99,901,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 10 07:23:57 2017
@author: Ana
"""
import numpy as np
from PIL import Image
from keras.models import load_model
import os
import argparse as ap
model = load_model("boja3.h5")
def test(slija="18_krava.jpg"):
ime=["Adele","Babette","Cecile","Doerte","Elsa","Fabala","Gesa","Helvetia","Isabella","Janette","Kiera","Letitia"]
root=os.getcwd()+'\\'
im = Image.open(root + slija)
im=im.resize((270,180))
xtest=np.zeros((1,180,270,3),dtype=np.int)
xtest[0,]=np.array(im)
xtest=xtest.astype('float32')
xtest/=255
t=model.predict(xtest)
br=np.argmax(t)
print(ime[br])
if __name__=="__main__":
parser=ap.ArgumentParser()
parser.add_argument('-i',"--image",help="Name of picture",required=True)
args=vars(parser.parse_args())
image_path=args["image"]
test(image_path) | [
"domjanbaric@live.com"
] | domjanbaric@live.com |
c8ae48a6f79a42bf74407f3d6801a041d64be011 | 6a63e40b1d30b6a810c89d910ac3f8f5954002ee | /src/pretalx/submission/migrations/0039_submission_created.py | c73cbfb2440b187adbb54d325d4ffb85e8724bf3 | [
"Apache-2.0"
] | permissive | orlando/pretalx | 47b7ab3e3258d667183066b84227b785199711b2 | 15f90dc2545f210eaf870ffbdfe0a27c70bfa0ec | refs/heads/master | 2020-09-10T20:26:49.867462 | 2019-11-15T01:19:07 | 2019-11-15T01:19:07 | 221,826,314 | 2 | 0 | NOASSERTION | 2019-11-15T02:21:05 | 2019-11-15T02:21:04 | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 2.2.1 on 2019-05-01 20:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0038_auto_20190429_0750'),
]
operations = [
migrations.AddField(
model_name='submission',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| [
"r@rixx.de"
] | r@rixx.de |
98f6efc042e1bdf171342027a6aa29fcfa032a39 | 8f1e134108bdcad95c61e1e5c588fdfed9b8236a | /tests/infrastructure/test_cluster.py | bbe0d3ee1c8b5a88b74a65090e07415cf3ab74f6 | [] | no_license | azmathasan92/elk-automation-challenge | 77bc82e74d0109289c8b2e6bd96abdbb29c9fa93 | b42b034f3a1ea78148504212b52224b6700b7ce2 | refs/heads/master | 2023-05-09T03:45:20.062542 | 2021-03-26T17:42:20 | 2021-03-26T17:42:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | from google.oauth2 import service_account
from google.cloud.container import ClusterManagerClient
from kubernetes import client, config
import os
import unittest
import config as cfg
class TestSetup(unittest.TestCase):
def setUp(self):
self.project_id = cfg.project_id
self.zone = cfg.zone
self.cluster_id = cfg.cluster_id
self.namespace = cfg.namespace
def test_cluster(self):
SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
credentials = service_account.Credentials.from_service_account_file('./service-account.json', scopes=SCOPES)
cluster_manager_client = ClusterManagerClient(credentials=credentials)
cluster = cluster_manager_client.get_cluster(self.project_id, self.zone, self.cluster_id)
configuration = client.Configuration()
configuration.host = "https://" + cluster.endpoint + ":443"
configuration.verify_ssl = False
configuration.api_key = {"authorization": "Bearer " + credentials.token}
client.Configuration.set_default(configuration)
self.assertEqual(cluster.name,"interview-cluster") | [
"imuge94@gmail.com"
] | imuge94@gmail.com |
f8d400ba97d30bfe07598361713b02eb48a9b54a | 4bd5c760337aee32f882c7dec53ac02767b63249 | /parser2.py | 17cc50e7a121a7da070e532a500499990f6fd375 | [] | no_license | denisfruza/PythonProject | a852024fac7964daf41a9d2d9a39d5359e2dd7e6 | 90b6ca9b343f7289ee25f5e8b7f32add0afdc709 | refs/heads/master | 2022-04-13T18:29:40.828964 | 2020-02-28T20:50:07 | 2020-02-28T20:50:07 | 241,696,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | # -*- coding: utf-8 -*-
import re
import os
from html.parser import HTMLParser
class Parser(HTMLParser):
"""
Parser HTML dokumenata
Upotreba:
parser = Parser()
parser.parse(FILE_PATH)
"""
def handle_starttag(self, tag, attrs):
"""
Metoda beleži sadržaj href atributa
Poziv metode vrši se implicitno prilikom nailaska na tag
unutar HTML fajla. Ukoliko je u pitanju anchor tag, beleži
se vrednost href atributa.
Argumenti:
- `tag`: naziv taga
- `attrs`: lista atributa
"""
if tag == 'a':
# typecast da izbegnem looping
attrs = dict(attrs)
link = attrs['href']
# ignoriši spoljnje linkove i uzmi u obzir samo html fajlove
if not link.startswith('http'):
# ukloni sekciju iz linka
hash_index = link.rfind('#')
if hash_index > -1:
link = link[:hash_index]
if link.endswith('html') or link.endswith('htm'):
relative_path = os.path.join(self.path_root, link)
link_path = os.path.abspath(relative_path)
self.links.append(link_path)
def handle_data(self, data):
"""
Metoda beleži pronađene reči
Poziv metode vrši se implicitno prilikom nailaska na sadržaj
HTML elemenata. Sadržaj elementa se deli u reči koje se beleže
u odgovarajuću listu.
Argument:
- `data`: dobijeni sadržaj elementa
"""
stripped_text = re.sub('[\W]', ' ', data).split()
if stripped_text:
self.words.extend(stripped_text)
def parse(self, path):
"""
Metoda učitava sadržaj fajla i prosleđuje ga parseru
Argument:
- `path`: putanja do fajla
"""
self.links = []
self.words = []
try:
with open(path, encoding = "utf-8") as document: #izmenjeno
self.path_root = os.path.abspath(os.path.dirname(path))
content = document.read()
self.feed(content)
# očisti duplikate
self.links = list(set(self.links))
except IOError as e:
print(e)
finally:
return self.links, self.words
| [
"denisfruza98@hotmail.com"
] | denisfruza98@hotmail.com |
fb7be942dde3ebb78f195e731981df98417bf374 | 01df468685c9f393b9559cb68df349ef7abcf5a6 | /panelapp/panels/urls.py | 2d9e2ae0ab52028ab81a18a96b567f8bf2b09c0b | [
"Apache-2.0"
] | permissive | victorskl/panelapp | 481af901472cd960da2d0abf17239b8d484524be | 4dfdd31f6036db5cb4e692961ef9bcbe92d39a23 | refs/heads/master | 2020-05-07T16:28:08.946472 | 2019-01-23T11:04:41 | 2019-01-23T11:04:41 | 180,684,104 | 1 | 0 | null | 2019-04-11T00:28:26 | 2019-04-11T00:28:26 | null | UTF-8 | Python | false | false | 10,310 | py | from django.conf.urls import url
from django.views.generic import RedirectView
from .views import AdminView
from .views import AdminUploadGenesView
from .views import AdminUploadPanelsView
from .views import AdminUploadReviewsView
from .views import EntitiesListView
from .views import CreatePanelView
from .views import EntityDetailView
from .views import GenePanelView
from .views import PanelsIndexView
from .views import UpdatePanelView
from .views import PromotePanelView
from .views import PanelAddEntityView
from .views import PanelEditEntityView
from .views import PanelMarkNotReadyView
from .views import GenePanelSpanshotView
from .views import EntityReviewView
from .views import MarkEntityReadyView
from .views import DownloadPanelTSVView
from .views import DownloadPanelVersionTSVView
from .views import MarkGeneNotReadyView
from .views import ComparePanelsView
from .views import CompareGeneView
from .views import CopyReviewsView
from .views import DownloadAllGenes
from .views import DownloadAllPanels
from .views import ActivityListView
from .views import DownloadAllSTRs
from .views import DownloadAllRegions
from .views import GeneDetailRedirectView
from .views import RedirectGenesToEntities
from .views import OldCodeURLRedirect
from .ajax_views import ClearPublicationsAjaxView
from .ajax_views import ClearPhoenotypesAjaxView
from .ajax_views import ClearModeOfPathogenicityAjaxView
from .ajax_views import ClearSourcesAjaxView
from .ajax_views import ClearSingleSourceAjaxView
from .ajax_views import DeletePanelAjaxView
from .ajax_views import DeleteEntityAjaxView
from .ajax_views import RejectPanelAjaxView
from .ajax_views import ApprovePanelAjaxView
from .ajax_views import UpdateEntityTagsAjaxView
from .ajax_views import UpdateEntityMOPAjaxView
from .ajax_views import UpdateEntityMOIAjaxView
from .ajax_views import UpdateEntityPhenotypesAjaxView
from .ajax_views import UpdateEntityPublicationsAjaxView
from .ajax_views import UpdateEntityRatingAjaxView
from .ajax_views import DeleteEntityEvaluationAjaxView
from .ajax_views import GetEntityCommentFormAjaxView
from .ajax_views import DeleteEntityCommentAjaxView
from .ajax_views import SubmitEntityCommentFormAjaxView
from .ajax_views import ApproveEntityAjaxView
app_name = 'panels'
entity_regex = '[\w\-\.\$\~\@\#\ ]+'
entity_types = 'gene|str|region'
urlpatterns = [
url(r'^$', PanelsIndexView.as_view(), name="index"),
url(r'^compare/$', ComparePanelsView.as_view(), name="compare_panels_form"),
url(r'^compare/(?P<panel_1_id>[0-9]+)/(?P<panel_2_id>[0-9]+)$', ComparePanelsView.as_view(), name="compare"),
url(r'^compare/(?P<panel_1_id>[0-9]+)/(?P<panel_2_id>[0-9]+)/(?P<gene_symbol>[\w\-]+)$',
CompareGeneView.as_view(), name="compare_genes"),
url(r'^copy/(?P<panel_1_id>[0-9]+)/(?P<panel_2_id>[0-9]+)$', CopyReviewsView.as_view(), name="copy_reviews"),
url(r'^(?P<pk>[0-9]+)/$', GenePanelView.as_view(), name="detail"),
url(r'^(?P<pk>[0-9]+)/update$', UpdatePanelView.as_view(), name="update"),
url(r'^(?P<pk>[0-9]+)/promote$', PromotePanelView.as_view(), name="promote"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/add'.format(types=entity_types), PanelAddEntityView.as_view(), name="add_entity"),
url(r'^(?P<pk>[0-9]+)/delete$', DeletePanelAjaxView.as_view(), name="delete_panel"),
url(r'^(?P<pk>[0-9]+)/reject$', RejectPanelAjaxView.as_view(), name="reject_panel"),
url(r'^(?P<pk>[0-9]+)/approve$', ApprovePanelAjaxView.as_view(), name="approve_panel"),
url(r'^(?P<pk>[0-9]+)/download/(?P<categories>[0-4]+)/$',
DownloadPanelTSVView.as_view(), name="download_panel_tsv"),
url(r'^(?P<pk>[0-9]+)/download_version/$',
DownloadPanelVersionTSVView.as_view(), name="download_old_panel_tsv"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_name>{})/$'.format(entity_regex), RedirectGenesToEntities.as_view(), name="redirect_previous_structure"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/$'.format(types=entity_types, regex=entity_regex), GenePanelSpanshotView.as_view(), name="evaluation"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/edit$'.format(types=entity_types, regex=entity_regex), PanelEditEntityView.as_view(), name="edit_entity"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/review$'.format(types=entity_types, regex=entity_regex), EntityReviewView.as_view(), name="review_entity"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/mark_as_ready$'.format(types=entity_types, regex=entity_regex),
MarkEntityReadyView.as_view(), name="mark_entity_as_ready"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/mark_as_not_ready$'.format(types=entity_types, regex=entity_regex),
MarkGeneNotReadyView.as_view(), name="mark_entity_as_not_ready"),
# AJAX endpoints
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/delete$'.format(types=entity_types, regex=entity_regex), DeleteEntityAjaxView.as_view(), name="delete_entity"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/approve$'.format(types=entity_types, regex=entity_regex), ApproveEntityAjaxView.as_view(), name="approve_entity"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/clear_entity_sources$'.format(types=entity_types, regex=entity_regex),
ClearSourcesAjaxView.as_view(), name="clear_entity_sources"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/clear_entity_source/(?P<source>(.*))/$'.format(types=entity_types, regex=entity_regex),
ClearSingleSourceAjaxView.as_view(), name="clear_entity_source"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/clear_entity_phenotypes$'.format(types=entity_types, regex=entity_regex),
ClearPhoenotypesAjaxView.as_view(), name="clear_entity_phenotypes"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/clear_entity_publications$'.format(types=entity_types, regex=entity_regex),
ClearPublicationsAjaxView.as_view(), name="clear_entity_publications"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/clear_entity_mode_of_pathogenicity$'.format(types=entity_types, regex=entity_regex),
ClearModeOfPathogenicityAjaxView.as_view(), name="clear_entity_mode_of_pathogenicity"),
# AJAX Review endpoints
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_tags/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityTagsAjaxView.as_view(), name="update_entity_tags"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_rating/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityRatingAjaxView.as_view(), name="update_entity_rating"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_moi/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityMOIAjaxView.as_view(), name="update_entity_moi"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_mop/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityMOPAjaxView.as_view(), name="update_entity_mop"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_phenotypes/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityPhenotypesAjaxView.as_view(), name="update_entity_phenotypes"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/update_entity_publications/$'.format(types=entity_types, regex=entity_regex),
UpdateEntityPublicationsAjaxView.as_view(), name="update_entity_publications"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/delete_evaluation/(?P<evaluation_pk>[0-9]+)/$'.format(types=entity_types, regex=entity_regex),
DeleteEntityEvaluationAjaxView.as_view(), name="delete_evaluation_by_user"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/edit_comment/(?P<comment_pk>[0-9]+)/$'.format(types=entity_types, regex=entity_regex),
GetEntityCommentFormAjaxView.as_view(), name="edit_comment_by_user"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/submit_edit_comment/(?P<comment_pk>[0-9]+)/$'.format(types=entity_types, regex=entity_regex),
SubmitEntityCommentFormAjaxView.as_view(), name="submit_edit_comment_by_user"),
url(r'^(?P<pk>[0-9]+)/(?P<entity_type>({types}))/(?P<entity_name>{regex})/delete_comment/(?P<comment_pk>[0-9]+)/$'.format(types=entity_types, regex=entity_regex),
DeleteEntityCommentAjaxView.as_view(), name="delete_comment_by_user"),
url(r'^(?P<pk>[0-9]+)/mark_not_ready$'.format(entity_regex), PanelMarkNotReadyView.as_view(), name="mark_not_ready"),
url(r'^(?P<pk>[a-z0-9]{24})/(?P<uri>.*|$)', OldCodeURLRedirect.as_view(), name="old_code_url_redirect"),
url(r'^create/', CreatePanelView.as_view(), name="create"),
url(r'^entities/$', EntitiesListView.as_view(), name="entities_list"),
url(r'^genes/$', RedirectView.as_view(url='/panels/entities'), name="genes_list"),
url(r'^entities/(?P<slug>{regex})$'.format(types=entity_types, regex=entity_regex), EntityDetailView.as_view(), name="entity_detail"),
url(r'^genes/(?P<slug>{regex})$'.format(types=entity_types, regex=entity_regex), GeneDetailRedirectView.as_view()),
url(r'^activity/$', ActivityListView.as_view(), name="activity"),
url(r'^admin/', AdminView.as_view(), name="admin"),
url(r'^upload_genes/', AdminUploadGenesView.as_view(), name="upload_genes"),
url(r'^download_genes/', DownloadAllGenes.as_view(), name="download_genes"),
url(r'^download_strs/', DownloadAllSTRs.as_view(), name="download_strs"),
url(r'^download_regions/', DownloadAllRegions.as_view(), name="download_regions"),
url(r'^upload_panel/', AdminUploadPanelsView.as_view(), name="upload_panels"),
url(r'^download_panel/', DownloadAllPanels.as_view(), name="download_panels"),
url(r'^upload_reviews/', AdminUploadReviewsView.as_view(), name="upload_reviews"),
]
| [
"oleg.gerasimenko@nhs.net"
] | oleg.gerasimenko@nhs.net |
ecff1d9109504140a699cf7d7ca9105c4e445e9b | f642417833054583556c450ff69d4d17e55011e1 | /users/models.py | f258e7326bfd35945b92ecc14cf8128f17bb3a70 | [] | no_license | dragonStar712/blog | efa1583829322853de1c0a310c2e4c8796170568 | 9f103ce8dba44365d179b59ad1938e994abe36cf | refs/heads/master | 2023-05-14T00:25:19.548419 | 2021-06-05T05:38:34 | 2021-06-05T05:38:34 | 374,030,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg',upload_to = 'profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self,*args, **kawrgs):
super().save(*args, **kawrgs)
img = Image.open(self.image.path)
if img.height>300 and img.width >300:
output_size = (300,300)
img.thumbnail(output_size)
img.save(self.image.path) | [
"maulikprajapati712@gmail.com"
] | maulikprajapati712@gmail.com |
e0b559b5929289ad8e62f95b425db9fb2268066b | a1da2caf2addcddadd5ce731c752c4e783502f14 | /custom/isolation_test_1.py | fdd8d344d7d6803b8974778c97c4c51992708cd6 | [
"Apache-2.0"
] | permissive | yunmingxiao/TUS_old | bf25d4b1482d5c22eac8c0c2e1fb6aab40e204c5 | 3865491f3fbd6eb438eae793a801b6fc18313cef | refs/heads/master | 2020-05-04T20:44:10.069990 | 2019-04-11T10:20:38 | 2019-04-11T10:20:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | from ryu.base import app_manager
from ryu import tus_core
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
class ISOtest1(tus_core.TUSInterface):
#class ISOtest1(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ISOtest1, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_FLOOD)]
out = ofp_parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions)
dp.send_msg(out)
print(actions)
fo = open("test.out", 'a+')
fo.write("This is test 1. Action sent: " + str(actions) + "\n")
fo.close() | [
"xyunming@live.com"
] | xyunming@live.com |
43178e2ed1238f75334f622fe978141d5825a140 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/certificateregistration/v20190801/app_service_certificate_order_certificate.py | bc9c20160c568bfb9fc5b6e93455dd3c70b706ac | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,243 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['AppServiceCertificateOrderCertificateArgs', 'AppServiceCertificateOrderCertificate']
@pulumi.input_type
class AppServiceCertificateOrderCertificateArgs:
def __init__(__self__, *,
certificate_order_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
key_vault_id: Optional[pulumi.Input[str]] = None,
key_vault_secret_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AppServiceCertificateOrderCertificate resource.
:param pulumi.Input[str] certificate_order_name: Name of the certificate order.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] key_vault_id: Key Vault resource Id.
:param pulumi.Input[str] key_vault_secret_name: Key Vault secret name.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the certificate.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "certificate_order_name", certificate_order_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if key_vault_secret_name is not None:
pulumi.set(__self__, "key_vault_secret_name", key_vault_secret_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="certificateOrderName")
def certificate_order_name(self) -> pulumi.Input[str]:
"""
Name of the certificate order.
"""
return pulumi.get(self, "certificate_order_name")
@certificate_order_name.setter
def certificate_order_name(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_order_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[pulumi.Input[str]]:
"""
Key Vault resource Id.
"""
return pulumi.get(self, "key_vault_id")
@key_vault_id.setter
def key_vault_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_id", value)
@property
@pulumi.getter(name="keyVaultSecretName")
def key_vault_secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Key Vault secret name.
"""
return pulumi.get(self, "key_vault_secret_name")
@key_vault_secret_name.setter
def key_vault_secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_secret_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the certificate.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class AppServiceCertificateOrderCertificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_order_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_vault_secret_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Key Vault container ARM resource for a certificate that is purchased through Azure.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_order_name: Name of the certificate order.
:param pulumi.Input[str] key_vault_id: Key Vault resource Id.
:param pulumi.Input[str] key_vault_secret_name: Key Vault secret name.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the certificate.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AppServiceCertificateOrderCertificateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Key Vault container ARM resource for a certificate that is purchased through Azure.
:param str resource_name: The name of the resource.
:param AppServiceCertificateOrderCertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AppServiceCertificateOrderCertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_order_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_vault_secret_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AppServiceCertificateOrderCertificateArgs.__new__(AppServiceCertificateOrderCertificateArgs)
if certificate_order_name is None and not opts.urn:
raise TypeError("Missing required property 'certificate_order_name'")
__props__.__dict__["certificate_order_name"] = certificate_order_name
__props__.__dict__["key_vault_id"] = key_vault_id
__props__.__dict__["key_vault_secret_name"] = key_vault_secret_name
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:certificateregistration/v20190801:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20150801:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20150801:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20180201:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20180201:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20200601:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20200601:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20200901:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20200901:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20201001:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20201001:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20201201:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20201201:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-native:certificateregistration/v20210101:AppServiceCertificateOrderCertificate"), pulumi.Alias(type_="azure-nextgen:certificateregistration/v20210101:AppServiceCertificateOrderCertificate")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AppServiceCertificateOrderCertificate, __self__).__init__(
'azure-native:certificateregistration/v20190801:AppServiceCertificateOrderCertificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AppServiceCertificateOrderCertificate':
"""
Get an existing AppServiceCertificateOrderCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AppServiceCertificateOrderCertificateArgs.__new__(AppServiceCertificateOrderCertificateArgs)
__props__.__dict__["key_vault_id"] = None
__props__.__dict__["key_vault_secret_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return AppServiceCertificateOrderCertificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> pulumi.Output[Optional[str]]:
"""
Key Vault resource Id.
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter(name="keyVaultSecretName")
def key_vault_secret_name(self) -> pulumi.Output[Optional[str]]:
"""
Key Vault secret name.
"""
return pulumi.get(self, "key_vault_secret_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Status of the Key Vault secret.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
26e617807d9d999827f851c37be1d219170066df | 484c462c29e3c2f8ac280b79c11db6982c6a8ca6 | /python-driver-master/tests/unit/test_policies.py | 56fd5440fee65e8a31452c8ce87cf75ac5064999 | [
"Apache-2.0"
] | permissive | thelma1944/Python_Stuff | b5fa53bf008bb5e865204201b144fe20e7f87565 | 077131a2c9f247396dca86fdf18933d38ae8d501 | refs/heads/master | 2021-06-05T12:25:35.779070 | 2020-10-03T18:20:16 | 2020-10-03T18:20:16 | 16,077,931 | 0 | 1 | null | 2021-03-26T00:30:14 | 2014-01-20T17:36:16 | Python | UTF-8 | Python | false | false | 31,810 | py | try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from itertools import islice, cycle
from mock import Mock
from random import randint
import sys
import struct
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.metadata import Metadata
from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy,
TokenAwarePolicy, SimpleConvictionPolicy,
HostDistance, ExponentialReconnectionPolicy,
RetryPolicy, WriteType,
DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy,
LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy)
from cassandra.pool import Host
from cassandra.query import Statement
class TestLoadBalancingPolicy(unittest.TestCase):
def test_non_implemented(self):
"""
Code coverage for interface-style base class
"""
policy = LoadBalancingPolicy()
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
class TestRoundRobinPolicy(unittest.TestCase):
def test_basic(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_multiple_query_plans(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in xrange(20):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_single_host(self):
policy = RoundRobinPolicy()
policy.populate(None, [0])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [0])
def test_status_updates(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
policy.on_down(0)
policy.on_remove(1)
policy.on_up(4)
policy.on_add(5)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [2, 3, 4, 5])
def test_thread_safety(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
def check_query_plan():
for i in range(100):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
threads = [Thread(target=check_query_plan) for i in range(4)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
def test_thread_safety_during_modification(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
errors = []
def check_query_plan():
try:
for i in xrange(100):
list(policy.make_query_plan())
except Exception as exc:
errors.append(exc)
def host_up():
for i in xrange(1000):
policy.on_up(randint(0, 99))
def host_down():
for i in xrange(1000):
policy.on_down(randint(0, 99))
threads = []
for i in range(5):
threads.append(Thread(target=check_query_plan))
threads.append(Thread(target=host_up))
threads.append(Thread(target=host_down))
# make the GIL switch after every instruction, maximizing
# the chace of race conditions
original_interval = sys.getcheckinterval()
try:
sys.setcheckinterval(0)
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
finally:
sys.setcheckinterval(original_interval)
if errors:
self.fail("Saw errors: %s" % (errors,))
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in range(4):
policy.on_down(i)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class DCAwareRoundRobinPolicyTest(unittest.TestCase):
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
def test_with_remotes(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
local_hosts = set(h for h in hosts if h.datacenter == "dc1")
remote_hosts = set(h for h in hosts if h.datacenter != "dc1")
# allow all of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
self.assertEqual(set(qplan[2:]), remote_hosts)
# allow only one of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
used_remotes = set(qplan[2:])
self.assertEqual(1, len(used_remotes))
self.assertIn(qplan[2], remote_hosts)
# allow no remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(2, len(qplan))
self.assertEqual(local_hosts, set(qplan))
def test_get_distance(self):
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(Mock(spec=Metadata), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(Mock(spec=Metadata), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(Mock(spec=Metadata), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_nodes(self):
"""
Ensure query plan for an empty cluster will execute without errors
"""
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, [])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class TokenAwarePolicyTest(unittest.TestCase):
def test_wrap_round_robin(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
return list(islice(cycle(hosts), index, index + 2))
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(RoundRobinPolicy())
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
other = set(h for h in hosts if h not in replicas)
self.assertEquals(replicas, qplan[:2])
self.assertEquals(other, set(qplan[2:]))
# Should use the secondary policy
for i in range(4):
qplan = list(policy.make_query_plan())
self.assertEquals(set(qplan), set(hosts))
def test_wrap_dc_aware(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
# return one node from each DC
if index % 2 == 0:
return [hosts[0], hosts[2]]
else:
return [hosts[1], hosts[3]]
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
# first should be the only local replica
self.assertIn(qplan[0], replicas)
self.assertEquals(qplan[0].datacenter, "dc1")
# then the local non-replica
self.assertNotIn(qplan[1], replicas)
self.assertEquals(qplan[1].datacenter, "dc1")
# then one of the remotes (used_hosts_per_remote_dc is 1, so we
# shouldn't see two remotes)
self.assertEquals(qplan[2].datacenter, "dc2")
self.assertEquals(3, len(qplan))
class FakeCluster:
def __init__(self):
self.metadata = Mock(spec=Metadata)
def test_get_distance(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings
"""
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
"""
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(self.FakeCluster(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class ConvictionPolicyTest(unittest.TestCase):
def test_not_implemented(self):
"""
Code coverage for interface-style base class
"""
conviction_policy = ConvictionPolicy(1)
self.assertRaises(NotImplementedError, conviction_policy.add_failure, 1)
self.assertRaises(NotImplementedError, conviction_policy.reset)
class SimpleConvictionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for SimpleConvictionPolicy
"""
conviction_policy = SimpleConvictionPolicy(1)
self.assertEqual(conviction_policy.add_failure(1), True)
self.assertEqual(conviction_policy.reset(), None)
class ReconnectionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for interface-style base class
"""
policy = ReconnectionPolicy()
self.assertRaises(NotImplementedError, policy.new_schedule)
class ConstantReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
"""
Test initialization values
"""
self.assertRaises(ValueError, ConstantReconnectionPolicy, -1, 0)
def test_schedule(self):
"""
Test ConstantReconnectionPolicy schedule
"""
delay = 2
max_attempts = 100
policy = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for i, delay in enumerate(schedule):
self.assertEqual(delay, delay)
def test_schedule_negative_max_attempts(self):
"""
Test how negative max_attempts are handled
"""
delay = 2
max_attempts = -100
try:
ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
self.fail('max_attempts should throw ValueError when negative')
except ValueError:
pass
class ExponentialReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1)
def test_schedule(self):
policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), 64)
for i, delay in enumerate(schedule):
if i == 0:
self.assertEqual(delay, 2)
elif i < 6:
self.assertEqual(delay, schedule[i - 1] * 2)
else:
self.assertEqual(delay, 100)
class RetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# we got enough responses but no data response, so retry
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, 'ONE')
def test_write_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if it's not a BATCH_LOG write, don't retry it
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# retry BATCH_LOG writes regardless of received responses
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, 'ONE')
def test_unavailable(self):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
policy = RetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class FallthroughRetryPolicyTest(unittest.TestCase):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
def test_read_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE",
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class DowngradingConsistencyRetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=4, received_responses=3,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.THREE)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.TWO)
# retry consistency level goes down based on the # of recv'd responses
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# if we got no responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=0,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough response but no data, retry
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=3, received_responses=3,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, 'ONE')
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency="ONE", required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# ignore failures on these types of writes
for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=write_type,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.IGNORE)
# downgrade consistency level on unlogged batch writes
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.UNLOGGED_BATCH,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# retry batch log writes at the same consistency level
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=WriteType.BATCH_LOG,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, "ONE")
# timeout on an unknown write_type
retry, consistency = policy.on_write_timeout(
query=None, consistency="ONE", write_type=None,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# downgrade consistency on unavailable exceptions
retry, consistency = policy.on_unavailable(
query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
| [
"thelma1944@gmail.com"
] | thelma1944@gmail.com |
01f5fea49d6cc68be50473d37f2a1db47236cf90 | 4d804319cb29d4b1fcd4396a87616f8ed5a20e84 | /algorithm/lin_ucb.py | 4de99b87938578c088c17953951b06968654d502 | [] | no_license | satoshi03/bandits | 77fa477acae8023d7ac4d6a281cfec7648574431 | fa74c30b87e2dd3d8f2c76fdeff7564d8c5016aa | refs/heads/master | 2022-10-27T07:25:08.064582 | 2020-06-13T12:12:57 | 2020-06-13T12:12:57 | 270,251,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import numpy as np
from .base import BaseAlgorithm
class LinUCBAlgorithm(BaseAlgorithm):
def __init__(self, K, d, alpha=0.2):
self.K = K
self.b = np.zeros((self.K, d))
self.A = np.array([np.identity(d) for a in range(K)])
self.th_hat = np.zeros((self.K, d))
self.alpha = alpha
def select(self, x_i):
selected = 0
max_p = 0
for a in range(self.K):
A_inv = np.linalg.inv(self.A[a])
self.th_hat[a] = A_inv.dot(self.b[a])
ta = x_i.dot(A_inv).dot(x_i)
upperbound = self.alpha * np.sqrt(ta)
linear_model = self.th_hat[a].dot(x_i)
p = linear_model + upperbound
if p > max_p:
selected = a
max_p = p
return selected
def update(self, arm_num, x_i, reward):
self.A[arm_num] += np.outer(x_i, x_i)
self.b[arm_num] += reward * x_i
| [
"innamisatoshi@gmail.com"
] | innamisatoshi@gmail.com |
3bdc61be7d60443b703c9034725d3e8a20781528 | a03024423d1664f937baab2d3fda01ba1f57596a | /MedAR/images/serializers/__init__.py | 576d4efd0277f8bc162575aba71cf8cc56045fa0 | [] | no_license | TheChipperDev/MedAR | d74d0451ec420443f0369027302cdd0e0787f2f2 | 17f284895cedfa0f4f17917303e96200f9e9cdef | refs/heads/main | 2023-08-30T04:15:20.087614 | 2021-09-12T03:10:50 | 2021-09-12T03:10:50 | 405,212,905 | 0 | 0 | null | 2021-09-12T03:10:51 | 2021-09-10T21:07:21 | Python | UTF-8 | Python | false | false | 64 | py | from .image import ImageSerializer
__all__ = [ImageSerializer]
| [
"TheChipperDev@gmail.com"
] | TheChipperDev@gmail.com |
450fbc924c74dc360419f8b44fe73a51637ee67a | 7cb099a861a25768ba8c82f5bc27149e887c1525 | /models/e4e/psp.py | bf9f75dbaa66997abfc1e3e0e4f19ddfec7fedac | [
"BSD-2-Clause",
"MIT",
"Apache-2.0"
] | permissive | johndpope/PTI | 8aaaf4e05511aacfabc22393c45c3e8edcf71b73 | b2594cdfbc02b73eb3bd4ecae7d7a77d5d369234 | refs/heads/main | 2023-08-18T01:08:56.602486 | 2021-09-13T04:46:11 | 2021-09-13T04:46:11 | 393,591,516 | 0 | 0 | MIT | 2021-08-20T03:38:56 | 2021-08-07T05:44:29 | null | UTF-8 | Python | false | false | 3,952 | py | import matplotlib
from configs import paths_config
matplotlib.use('Agg')
import torch
from torch import nn
from models.e4e.encoders import psp_encoders
from models.e4e.stylegan2.model import Generator
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class pSp(nn.Module):
def __init__(self, opts):
super(pSp, self).__init__()
self.opts = opts
# Define architecture
self.encoder = self.set_encoder()
self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2)
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
# Load weights if needed
self.load_weights()
def set_encoder(self):
if self.opts.encoder_type == 'GradualStyleEncoder':
encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
elif self.opts.encoder_type == 'Encoder4Editing':
encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
else:
raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
return encoder
def load_weights(self):
if self.opts.checkpoint_path is not None:
print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path))
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)
self.__load_latent_avg(ckpt)
else:
print('Loading encoders weights from irse50!')
encoder_ckpt = torch.load(paths_config.ir_se50)
self.encoder.load_state_dict(encoder_ckpt, strict=False)
print('Loading decoder weights from pretrained!')
ckpt = torch.load(self.opts.stylegan_weights)
self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
self.__load_latent_avg(ckpt, repeat=self.encoder.style_count)
def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,
inject_latent=None, return_latents=False, alpha=None):
if input_code:
codes = x
else:
codes = self.encoder(x)
# normalize with respect to the center of an average face
if self.opts.start_from_latent_avg:
if codes.ndim == 2:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
else:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
if latent_mask is not None:
for i in latent_mask:
if inject_latent is not None:
if alpha is not None:
codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
else:
codes[:, i] = inject_latent[:, i]
else:
codes[:, i] = 0
input_is_latent = not input_code
images, result_latent = self.decoder([codes],
input_is_latent=input_is_latent,
randomize_noise=randomize_noise,
return_latents=return_latents)
if resize:
images = self.face_pool(images)
if return_latents:
return images, result_latent
else:
return images
def __load_latent_avg(self, ckpt, repeat=None):
if 'latent_avg' in ckpt:
self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
if repeat is not None:
self.latent_avg = self.latent_avg.repeat(repeat, 1)
else:
self.latent_avg = None
| [
"danielroich@gmail.com"
] | danielroich@gmail.com |
54d94ac985c12d3fd4de8e5215f21d92676cded3 | 338284f2a26a3f43ea99142792c2f930cf033a1f | /YoutubeDjango/myporject/YoutubeFlow/views.py | 3f289b5be82bebb4112a1d781760f96466545b5f | [] | no_license | valosz66842/valosz66842 | 78c0781faef2e7ee5a7bf2292a28ebaebd630cbe | ca208f95ac281e0be5262c07459288626adf6235 | refs/heads/master | 2021-07-21T17:01:18.584448 | 2020-04-13T12:59:36 | 2020-04-13T12:59:36 | 250,428,346 | 0 | 0 | null | 2021-06-10T22:41:42 | 2020-03-27T03:11:12 | CSS | UTF-8 | Python | false | false | 2,930 | py | from django.shortcuts import render
from django import template
from django.contrib.auth import get_user_model
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.http import HttpResponse
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import json
from urllib import request, parse
import os
import time
import lxml.html
import re
import urllib.parse
import pandas as pd
import numpy as np
import csv
import jieba
import pymysql
from django.template.loader import get_template
from django.shortcuts import render_to_response
from django.http import JsonResponse
from sqlalchemy import create_engine
from confluent_kafka import Producer
from confluent_kafka import Consumer, KafkaException, KafkaError
# Create your views here.
# Create your views here.
import joblib
from django.views.decorators.csrf import csrf_exempt
import sys
pwd=os.path.dirname(__file__)
# sys.path.append(pwd+"../kafka_producer.py")
# import kafka_producer
def prediction(MainFollow,VideoAvgLike,FeatFollow,FeatAvgLike):
loaded_RFmodel = joblib.load(pwd+r"/RF_rg_model.sav")
loaded_LRmodel = joblib.load(pwd+r"/LR_rg_model.sav")
loaded_DTmodel = joblib.load(pwd+r"/DT_rg_model.sav")
loaded_KNNmodel = joblib.load(pwd+r"/KNN_rg_model.sav")
at=[[MainFollow,VideoAvgLike,FeatFollow,FeatAvgLike]]
Ln=loaded_LRmodel.predict(at)
Rf=loaded_RFmodel.predict(at)
KNN=loaded_KNNmodel.predict(at)
DT=loaded_DTmodel.predict(at)
return round(float(Ln),0),int(Rf),round(float(KNN),0),round(float(DT),0)
def YoutubeFlow(request):
if 'VideoAvgLike' in request.GET and request.GET['VideoAvgLike'] != '':
VideoAvgLike=float(request.GET["VideoAvgLike"])
ChannelFollow =float(request.GET["ChannelFollow"])
OneHourFlow=float(request.GET["OneHourFlow"])
ChannelMedian=float(request.GET["ChannelMedian"])
# YoutubeFlow = {"YoutubeFlow":{
# "ChannelVideoLookMedian": ChannelMedian,
# "ChannelVideoLookAvg": VideoAvgLike,
# "ChannelFollow": ChannelFollow,
# "OneHourFlow": OneHourFlow
# }
# }
# try:
# ip = request.META['REMOTE_ADDR']
# except:
# ip = request.META['HTTP_X_FORWARDED_FOR']
# try:
# kafka_producer.producer(kafka_producer.Els(ip,"YoutubeFlow", YoutubeFlow["YoutubeFlow"]))
# except:
# pass
LnPredict,RfPredict,KNNPredict,DTPredict=prediction(ChannelMedian,VideoAvgLike,ChannelFollow,OneHourFlow)
return render_to_response('YoutubeFlow.html',locals())
else:
return render_to_response('YoutubeFlow.html',locals())
def ajax_youtube(request):###可以直接進來這裡的嗎 還是要去 Flow那邊
word={}
return JsonResponse(word)
def ajax_youtube_Img(request):
word={}
return JsonResponse(word) | [
"valosz66842@gmail.com"
] | valosz66842@gmail.com |
8d8d61fd098f88052462a8ffca8726bedc8e0be5 | 25faf278aa86c2650a2b8725a2c16b82e0d8711c | /python/main.py | dbc049276a73410ea4bf67f57571a4dfff131207 | [] | no_license | E-Cell-VSSUT/coders | 9fae0a468d7756608311079ede0debe5b46fa8a9 | e220626ca8a65d35513ef129f17369634a8aed11 | refs/heads/main | 2023-09-04T09:53:31.435810 | 2021-10-31T15:17:44 | 2021-10-31T15:17:44 | 415,492,971 | 1 | 16 | null | 2021-10-31T15:17:45 | 2021-10-10T05:17:30 | C | UTF-8 | Python | false | false | 3,453 | py | Lefthand_player1 = 1
Lefthand_player2 = 1
Righthand_player1 = 1
Righthand_player2 = 1
print("Input1:Enter A for attack and S for split")
print("Input2:Enter attack combination LL or LR or RL or RR")
def error():
print("\nInvalid input,Wait for your move.")
def status():
print("Current Status:")
print("Player1:", Lefthand_player1, Righthand_player1)
print("Player2:", Lefthand_player2, Righthand_player2)
count = 1
while count > 0:
first_move = input("player 1's move: ").upper()
if first_move == 'A':
combination_a = input("Enter the combination: ")
combination_1 = combination_a.upper()
if combination_1 == 'LR':
Righthand_player2 = Righthand_player2 + Lefthand_player1
if Righthand_player2 >= 5:
Righthand_player2 = 0
elif combination_1 == 'RL':
Lefthand_player2 = Lefthand_player2 + Righthand_player1
if Lefthand_player2 >= 5:
Lefthand_player2 = 0
elif combination_1 == 'LL':
Lefthand_player2 = Lefthand_player2 + Lefthand_player1
if Lefthand_player2 >= 5:
Lefthand_player2 = 0
elif combination_1 == 'RR':
Righthand_player2 = Righthand_player2 + Righthand_player1
if Righthand_player2 >= 5:
Righthand_player2 = 0
else:
error()
elif first_move == 'S':
z, a, b = input("Enter your combination:").split()
a = int(a)
b = int(b)
Lefthand_player1 = a
Righthand_player1 = b
else:
error()
status()
if (Lefthand_player1 == 0 and Righthand_player1 == 0) or (Lefthand_player2 == 0 and Righthand_player2 == 0):
count = 0
if Lefthand_player1 == 0 and Righthand_player1 == 0:
print("Player2 has won!")
else:
print("Player1 has won!")
break
second_move = input("player 2's move: ").upper()
if second_move == 'A':
combination_b = input("Enter the combination: ")
combination_2 = combination_b.upper()
if combination_2 == 'LR':
Righthand_player1 = Righthand_player1 + Lefthand_player2
if Righthand_player1 >= 5:
Righthand_player1 = 0
elif combination_2 == 'RL':
Lefthand_player1 = Lefthand_player1 + Righthand_player2
if Lefthand_player1 >= 5:
Lefthand_player1 = 0
elif combination_2 == 'LL':
Lefthand_player1 = Lefthand_player1 + Lefthand_player2
if Lefthand_player1 >= 5:
Lefthand_player1 = 0
elif combination_2 == 'RR':
Righthand_player1 = Righthand_player1 + Righthand_player2
if Righthand_player1 >= 5:
Righthand_player1 = 0
else:
error()
elif second_move == 'S':
z, a, b = input("Enter your combination:").split()
a = int(a)
b = int(b)
Lefthand_player2 = a
Righthand_player2 = b
else:
error()
status()
if (Lefthand_player1 == 0 and Righthand_player1 == 0) or (Lefthand_player2 == 0 and Righthand_player2 == 0):
count = 0
if Lefthand_player1 == 0 and Righthand_player1 == 0:
print("Player2 has won!")
else:
print("Player1 has won!")
| [
"noreply@github.com"
] | noreply@github.com |
c04f540c224f089a209045fa1a8830c5b29a8c1e | 49859591ead0d2241d83bd4102e1c803c0b804a2 | /mdann_deepshinx/deepshinx/data.py | 2d2cea60f24e0aae058bd9b99f7f889f4927d969 | [] | no_license | joshinh/Domain-Adaptation | 73f5a0dfd7c97fe1ddaefee37028032ce3f120a5 | b0a81b8c8d1df3f5161db578697144e1666a5fb9 | refs/heads/master | 2021-03-27T08:49:36.731352 | 2018-05-04T17:04:01 | 2018-05-04T17:04:01 | 112,062,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,048 | py | '''Data utilities'''
import threading
import random
import numpy as np
from python_speech_features.base import fbank, delta
import tensorflow as tf
from deepsphinx.vocab import VOCAB_TO_INT
from deepsphinx.utils import FLAGS
from deepsphinx.fst import in_fst
import soundfile as sf
import csv
def get_features(audio_file):
'''Get features from a file'''
signal, sample_rate = sf.read(tf.gfile.FastGFile(audio_file, 'rb'))
feat, energy = fbank(signal, sample_rate, nfilt=FLAGS.nfilt)
feat = np.log(feat)
dfeat = delta(feat, 2)
ddfeat = delta(dfeat, 2)
return np.concatenate([feat, dfeat, ddfeat, np.expand_dims(energy, 1)],
axis=1)
def get_speaker_stats(set_ids):
'''Get mean and variance of a speaker'''
tf.logging.info('Getting speaker stats')
trans1 = tf.gfile.FastGFile(FLAGS.source_file_1).readlines()
trans2 = tf.gfile.FastGFile(FLAGS.dann_file).readlines()
trans3 = tf.gfile.FastGFile(FLAGS.source_file_2).readlines()
trans4 = tf.gfile.FastGFile(FLAGS.source_file_3).readlines()
trans5 = tf.gfile.FastGFile(FLAGS.source_file_4).readlines()
trans = trans1 + trans2 + trans3 + trans4
sum_speaker = {}
sum_sq_speaker = {}
count_speaker = {}
for _, set_id, speaker, audio_file in csv.reader(trans):
if set_id in set_ids:
n_feat = 3 * FLAGS.nfilt + 1
if speaker not in sum_speaker:
sum_speaker[speaker] = np.zeros(n_feat)
sum_sq_speaker[speaker] = np.zeros(n_feat)
count_speaker[speaker] = 0
feat = get_features(audio_file)
sum_speaker[speaker] += np.mean(feat, 0)
sum_sq_speaker[speaker] += np.mean(np.square(feat), 0)
count_speaker[speaker] += 1
mean = {k: sum_speaker[k] / count_speaker[k] for k, v in sum_speaker.items()}
var = {k: sum_sq_speaker[k] / count_speaker[k] -
np.square(mean[k]) for k, v in sum_speaker.items()}
# print(mean)
# print(var)
return mean, var
def read_data_queue(
set_id,
queue1,
queue2,
queue3,
queue4,
queue_,
sess,
mean_speaker,
var_speaker,
fst):
'''Start multiple threads to add data in a queue for each domain'''
input_data1 = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.nfilt * 3 + 1])
input_length1 = tf.placeholder(dtype=tf.int32, shape=[])
output_data1 = tf.placeholder(dtype=tf.int32, shape=[None])
output_length1 = tf.placeholder(dtype=tf.int32, shape=[])
input_data2 = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.nfilt * 3 + 1])
input_length2 = tf.placeholder(dtype=tf.int32, shape=[])
output_data2 = tf.placeholder(dtype=tf.int32, shape=[None])
output_length2 = tf.placeholder(dtype=tf.int32, shape=[])
input_data3 = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.nfilt * 3 + 1])
input_length3 = tf.placeholder(dtype=tf.int32, shape=[])
output_data3 = tf.placeholder(dtype=tf.int32, shape=[None])
output_length3 = tf.placeholder(dtype=tf.int32, shape=[])
input_data4 = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.nfilt * 3 + 1])
input_length4 = tf.placeholder(dtype=tf.int32, shape=[])
output_data4 = tf.placeholder(dtype=tf.int32, shape=[None])
output_length4 = tf.placeholder(dtype=tf.int32, shape=[])
input_data5 = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.nfilt * 3 + 1])
input_length5 = tf.placeholder(dtype=tf.int32, shape=[])
output_data5 = tf.placeholder(dtype=tf.int32, shape=[None])
output_length5 = tf.placeholder(dtype=tf.int32, shape=[])
enqueue_op1 = queue1.enqueue(
[input_data1, input_length1, output_data1, output_length1])
enqueue_op2 = queue2.enqueue(
[input_data2, input_length2, output_data2, output_length2])
enqueue_op3 = queue3.enqueue(
[input_data3, input_length3, output_data3, output_length3])
enqueue_op4 = queue4.enqueue(
[input_data4, input_length4, output_data4, output_length4])
enqueue_op_ = queue_.enqueue(
[input_data5, input_length5, output_data5, output_length5])
close_op1 = queue1.close()
close_op2 = queue2.close()
close_op3 = queue3.close()
close_op4 = queue4.close()
close_op_ = queue_.close()
thread1 = threading.Thread(
target=read_data_thread1,
args=(
set_id,
sess,
input_data1,
input_length1,
output_data1,
output_length1,
enqueue_op1,
close_op1,
mean_speaker,
var_speaker,
fst))
thread1.daemon = True # Thread will close when parent quits.
# thread1.start()
thread2 = threading.Thread(
target=read_data_thread2,
args=(
set_id,
sess,
input_data2,
input_length2,
output_data2,
output_length2,
enqueue_op2,
close_op2,
mean_speaker,
var_speaker,
fst))
thread2.daemon = True # Thread will close when parent quits.
thread3 = threading.Thread(
target=read_data_thread3,
args=(
set_id,
sess,
input_data3,
input_length3,
output_data3,
output_length3,
enqueue_op3,
close_op3,
mean_speaker,
var_speaker,
fst))
thread3.daemon = True # Thread will close when parent quits.
thread4 = threading.Thread(
target=read_data_thread4,
args=(
set_id,
sess,
input_data4,
input_length4,
output_data4,
output_length4,
enqueue_op4,
close_op4,
mean_speaker,
var_speaker,
fst))
thread4.daemon = True # Thread will close when parent quits.
thread_ = threading.Thread(
target=read_data_thread_,
args=(
set_id,
sess,
input_data5,
input_length5,
output_data5,
output_length5,
enqueue_op_,
close_op_,
mean_speaker,
var_speaker,
fst))
thread_.daemon = True # Thread will close when parent quits.
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread_.start()
def read_data_thread1(
set_id,
sess,
input_data,
input_length,
output_data,
output_length,
enqueue_op1,
close_op1,
mean_speaker,
var_speaker,
fst):
'''Enqueue data to queue for source domain1'''
trans = tf.gfile.FastGFile(FLAGS.source_file_1).readlines()
random.shuffle(trans)
for text, set_id_trans, speaker, audio_file in csv.reader(trans):
try:
text = [VOCAB_TO_INT[c]
for c in list(text)] + [VOCAB_TO_INT['</s>']]
except KeyError:
continue
if (set_id == set_id_trans and
((not FLAGS.use_train_lm) or in_fst(fst, text))):
feat = get_features(audio_file)
feat = feat - mean_speaker[speaker]
feat = feat / np.sqrt(var_speaker[speaker])
sess.run(enqueue_op1, feed_dict={
input_data: feat,
input_length: feat.shape[0],
output_data: text,
output_length: len(text)})
sess.run(close_op1)
def read_data_thread2(
set_id,
sess,
input_data,
input_length,
output_data,
output_length,
enqueue_op2,
close_op2,
mean_speaker,
var_speaker,
fst):
'''Enqueue data to queue for source domain 2'''
trans = tf.gfile.FastGFile(FLAGS.source_file_2).readlines()
random.shuffle(trans)
for text, set_id_trans, speaker, audio_file in csv.reader(trans):
try:
text = [VOCAB_TO_INT[c]
for c in list(text)] + [VOCAB_TO_INT['</s>']]
except KeyError:
continue
if (set_id == set_id_trans and
((not FLAGS.use_train_lm) or in_fst(fst, text))):
feat = get_features(audio_file)
feat = feat - mean_speaker[speaker]
feat = feat / np.sqrt(var_speaker[speaker])
sess.run(enqueue_op2, feed_dict={
input_data: feat,
input_length: feat.shape[0],
output_data: text,
output_length: len(text)})
sess.run(close_op2)
def read_data_thread3(
set_id,
sess,
input_data,
input_length,
output_data,
output_length,
enqueue_op3,
close_op3,
mean_speaker,
var_speaker,
fst):
'''Enqueue data to queue for source domain 3'''
trans = tf.gfile.FastGFile(FLAGS.source_file_3).readlines()
random.shuffle(trans)
for text, set_id_trans, speaker, audio_file in csv.reader(trans):
try:
text = [VOCAB_TO_INT[c]
for c in list(text)] + [VOCAB_TO_INT['</s>']]
except KeyError:
continue
if (set_id == set_id_trans and
((not FLAGS.use_train_lm) or in_fst(fst, text))):
feat = get_features(audio_file)
feat = feat - mean_speaker[speaker]
feat = feat / np.sqrt(var_speaker[speaker])
sess.run(enqueue_op3, feed_dict={
input_data: feat,
input_length: feat.shape[0],
output_data: text,
output_length: len(text)})
sess.run(close_op3)
def read_data_thread4(
set_id,
sess,
input_data,
input_length,
output_data,
output_length,
enqueue_op4,
close_op4,
mean_speaker,
var_speaker,
fst):
'''Enqueue data to queue for source domain 4'''
trans = tf.gfile.FastGFile(FLAGS.source_file_4).readlines()
random.shuffle(trans)
for text, set_id_trans, speaker, audio_file in csv.reader(trans):
try:
text = [VOCAB_TO_INT[c]
for c in list(text)] + [VOCAB_TO_INT['</s>']]
except KeyError:
continue
if (set_id == set_id_trans and
((not FLAGS.use_train_lm) or in_fst(fst, text))):
feat = get_features(audio_file)
feat = feat - mean_speaker[speaker]
feat = feat / np.sqrt(var_speaker[speaker])
sess.run(enqueue_op4, feed_dict={
input_data: feat,
input_length: feat.shape[0],
output_data: text,
output_length: len(text)})
sess.run(close_op4)
def read_data_thread_(
set_id,
sess,
input_data,
input_length,
output_data,
output_length,
enqueue_op_,
close_op_,
mean_speaker,
var_speaker,
fst):
'''Enqueue data to queue for the target domain'''
trans = tf.gfile.FastGFile(FLAGS.dann_file).readlines()
random.shuffle(trans)
for text, set_id_trans, speaker, audio_file in csv.reader(trans):
try:
text = [VOCAB_TO_INT[c]
for c in list(text)] + [VOCAB_TO_INT['</s>']]
except KeyError:
continue
if (set_id == set_id_trans and
((not FLAGS.use_train_lm) or in_fst(fst, text))):
feat = get_features(audio_file)
feat = feat - mean_speaker[speaker]
feat = feat / np.sqrt(var_speaker[speaker])
sess.run(enqueue_op_, feed_dict={
input_data: feat,
input_length: feat.shape[0],
output_data: text,
output_length: len(text)})
sess.run(close_op_)
| [
"noreply@github.com"
] | noreply@github.com |
58c9a5f0e565ccdbaf918e996f7f4175077c440a | fea9d6b8186cf07fb66229988fadc0d066ee5d86 | /pynrm/rl_deconv.py | 80ebf95585f52741f4125fd1b98f1c5e4814fa8b | [
"MIT"
] | permissive | mikeireland/pynrm | 71859f5e3f962a57c5078935adca58832a46b075 | ae25b01ba590ee581232dacdb906de6effc7870b | refs/heads/master | 2021-01-10T06:40:01.379315 | 2018-07-01T15:07:15 | 2018-07-01T15:07:15 | 35,964,865 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,547 | py | """This makes a simple Richardson-Lucy deconvolution on a cleaned data cube, with
some reference calibrator images. Input data have to be neatly packaged in a single
data cube.
To make a "good_ims.fits" file, run "choose_psfs.py" after cleaning the data
(e.g. with process_block called in a script go.py or run_clean)."""
from __future__ import print_function, division
import astropy.io.fits as pyfits
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pdb
import aplpy
import opticstools as ot
plt.ion()
def rl_deconv(tgt_fn=None, cal_fn=None, good_ims_fn=None, niter=50):
"""Deconvolve a target using RL deconvolution and either a pair of target and
calibrator cubes, or a set of manually selected "good" files
Parameters
----------
tgt_fn: string
Name of the target cube file
cal_fn: string
Name of the calibrator cube file
good_ims_fn: string
Name of the "good_ims" filename, from choose_psfs.
"""
if good_ims_fn is None:
header = pyfits.getheader(tgt_fn)
radec = [header['RA'],header['DEC']]
pa = np.mean(pyfits.getdata(tgt_fn,1)['pa'])
tgt_ims = pyfits.getdata(tgt_fn)
cal_ims = pyfits.getdata(cal_fn)
else:
header = pyfits.getheader(good_ims_fn)
radec = [header['RA'],header['DEC']]
pas = pyfits.getdata(good_ims_fn,2)['pa']
#Check for too much sky rotation.
pa_diffs = pas - pas[0]
pa_diffs = ((pa_diffs + 180) % 360) - 180
if np.max(np.abs(pa_diffs)) > 30:
raise UserWarning("Too much sky rotation! Re-write code or reduce number of files.")
#Average the pas modulo 360
pa = pas[0] + np.mean(pa_diffs)
tgt_ims = pyfits.getdata(good_ims_fn, 0)
cal_ims = pyfits.getdata(good_ims_fn, 1)
subtract_median=True
sz = tgt_ims.shape[1]
best_models = np.zeros( tgt_ims.shape )
best_rms = np.zeros( tgt_ims.shape[0] )
if subtract_median:
for i in range(len(cal_ims)):
for j in range(len(cal_ims[i])):
cal_ims[i][j] -= np.median(cal_ims[i])
for i in range(len(tgt_ims)):
for j in range(len(tgt_ims[i])):
tgt_ims[i][j] -= np.median(tgt_ims[i])
#Loop through all target images, and make the best deconvolution possible for each image.
for i in range(tgt_ims.shape[0]):
print("Working on image {0:d}".format(i))
#Create a blank model image
model_ims = np.zeros( cal_ims.shape )
#Create a blank array of RMS of the model fits to the data
rms = np.zeros( cal_ims.shape[0] )
#Extract the data image from the cube and normalise it
data = tgt_ims[i,:,:]
data /= np.sum(data)
#In order for RL deconvolution to work, we need to put in a background offset for
#flux. We'll subtract this at the end.
data += 1.0/data.size
#Find the peak pixel in the data.
max_ix_data = np.argmax(data)
max_ix_data = np.unravel_index(max_ix_data,data.shape)
#Try to deconvolve with each calibrator image one at a time.
for j in range(cal_ims.shape[0]):
#Extract and normalise the Point-Spread Function
psf = cal_ims[j,:,:]
psf /= np.sum(psf)
#Find the maximum pixel for the PSF, and roll the PSF so that (0,0) is the
#peak pixel.
max_ix = np.argmax(psf)
max_ix = np.unravel_index(max_ix, psf.shape)
psf = np.roll(np.roll(psf, -max_ix[0], axis=0), -max_ix[1],axis=1)
#To save computational time, pre-compute the Fourier transform of the PSF
psf_ft = np.fft.rfft2(psf)
#The initial model just has a "star" at the location of the data maximum
model = np.zeros(data.shape)
model += 1.0/data.size
model[max_ix_data] = 1.0
#Do the RL magical algorithm. See
for k in range(niter):
# u (convolved) p is our model of the data. Compute this first.
model_convolved = np.fft.irfft2(np.fft.rfft2(model)*psf_ft)
# Update the model according to the RL algorithm
model *= np.fft.irfft2(np.fft.rfft2(data / model_convolved)*np.conj(psf_ft))
model_convolved = np.fft.irfft2(np.fft.rfft2(model)*psf_ft)
#Record the RMS difference between the model and the data.
rms[j] = np.sqrt(np.mean( (model_convolved - data)**2)) * data.size
#Subtract off our offset.
model -= 1.0/data.size
#Shift the final model to the middle, so we can add together target images on
#different pixel coordinates
model_ims[j,:,:] = np.roll(np.roll(model,sz//2-max_ix_data[0], axis=0), sz//2-max_ix_data[1], axis=1)
#Only use the calibrator with the best RMS. i.e. we assume this is the best PSF for our data.
best_cal = np.argmin(rms)
best_models[i,:,:] = model_ims[best_cal,:,:]
best_rms[i] = rms[best_cal]
ptsrc_fluxes = best_models[:,sz//2,sz//2].copy()
#set the central pixel to zero.
best_models[:,sz//2,sz//2]=0
final_image = np.mean(best_models,axis=0)
image = final_image/np.max(final_image)
image_sub = image - np.roll(np.roll(image[::-1,::-1],1,axis=0),1,axis=1)
image[sz//2,sz//2]=1.0
image_sub[sz//2,sz//2]=1.0
plt.imshow(np.arcsinh(image/0.1), interpolation='nearest', cmap=cm.cubehelix)
plt.plot(sz//2,sz//2, 'r*', markersize=20)
tic_min = np.min(image)
tic_max = np.max(image)
tics = np.arcsinh(tic_min/0.1) + np.arange(8)/7.0*(np.arcsinh(tic_max/0.1) - np.arcsinh(tic_min/0.1))
tics = np.sinh(tics)*0.1
hdu = pyfits.PrimaryHDU(image)
costerm = np.cos(np.radians(pa))*0.01/3600.
sinterm = np.sin(np.radians(pa))*0.01/3600.
hdu.header['CRVAL1']=radec[0]
hdu.header['CRVAL2']=radec[1]
hdu.header['CTYPE1']='RA---TAN'
hdu.header['CTYPE2']='DEC--TAN'
hdu.header['CRPIX1']=sz//2
hdu.header['CRPIX2']=sz//2
hdu.header['CD1_1']=-costerm
hdu.header['CD2_2']=costerm
hdu.header['CD1_2']=sinterm
hdu.header['CD2_1']=sinterm
#hdu.header['RADECSYS']='FK5'
hdulist = pyfits.HDUList([hdu])
hdu.data = image
hdulist.writeto('deconv_image.fits', clobber=True)
fig = aplpy.FITSFigure('deconv_image.fits')
fig.show_colorscale(cmap=cm.cubehelix, stretch='arcsinh',vmax=1, vmid=0.05)
fig.add_colorbar()
fig.add_grid()
hdu.data=image_sub
hdulist.writeto('deconv_image_sub.fits', clobber=True)
fig2 = aplpy.FITSFigure('deconv_image_sub.fits')
fig2.show_colorscale(cmap=cm.cubehelix, stretch='arcsinh',vmax=1, vmid=0.05)
fig2.add_colorbar()
fig2.add_grid()
fig3 = aplpy.FITSFigure('deconv_image.fits')
fig3.show_colorscale(cmap=cm.cubehelix, stretch='linear',vmax=1, vmin=0.0)
fig3.add_colorbar()
fig3.add_grid()
plt.figure(1)
plt.clf()
rr, ii = ot.azimuthalAverage(image,returnradii=True,center=[64,64],binsize=0.7)
plt.plot(rr*0.01,ii)
plt.axis([0,.3,-0.05,0.8])
plt.xlabel('Radius (arcsec)')
plt.ylabel('Azi. Ave. Intensity (rel. to disk peak)')
plt.plot([0.11,0.11],[-0.1,1],'r')
plt.plot([0.17,0.17],[-0.1,1],'r')
plt.annotate("Companion Radius", [0.11,0.6],[0.18,0.6],arrowprops={"arrowstyle":"->"})
plt.annotate("Wall Radius", [0.17,0.3],[0.2,0.3],arrowprops={"arrowstyle":"->"})
| [
"michael.ireland@anu.edu.au"
] | michael.ireland@anu.edu.au |
ffbdf922a169191795e21b24f226334344e6b2b8 | 8a08d39142c7b5c7dc9300717f0db6dad295ec92 | /antelope_core/providers/parse_math.py | 8fb0f24a50ac68f93528c7d0a658cd62da7d7e04 | [
"BSD-3-Clause"
] | permissive | msm-sardar/core | 3eac85248914ada808882b9dedefd889756be504 | bc88a1ed3e4c1defcbc83fa86356451ac34c178c | refs/heads/master | 2023-08-24T03:56:31.892812 | 2021-10-14T01:12:02 | 2021-10-14T01:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import ast
def parse_math(expression):
"""
A function I got off stackoverflow that enables python to parse user input as a mathematical expression.
probably a huge security risk. but it enables the user to enter missing characterization values during runtime.
:param expression:
:return:
"""
try:
tree = ast.parse(expression, mode='eval')
except SyntaxError:
return # not a Python expression
if not all(isinstance(node, (ast.Expression, ast.UnaryOp, ast.unaryop, ast.BinOp, ast.operator, ast.Num))
for node in ast.walk(tree)):
return # not a mathematical expression (numbers and operators)
return eval(compile(tree, filename='', mode='eval'))
| [
"brandon.kuczenski@301south.net"
] | brandon.kuczenski@301south.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.