blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e57241ea2f17581520fe4e0c508b3724e1a22b6 | cfb1cfac4bac4a2bc73caf0e86719ff50da04c1d | /rrc_scraper.py | 7e8eded2a5e89d349874db305af26d6dae02e4c7 | [] | no_license | areed145/petroleous | b3456ecbbb926a5c95d4f9736d91adb9f4105d98 | 4de7f2be5f554c69d4db4a8ef642d5ffff97a92d | refs/heads/master | 2020-03-27T00:46:17.953207 | 2018-12-18T05:09:49 | 2018-12-18T05:09:49 | 145,655,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,273 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 18:47:10 2018
@author: areed145
"""
import urllib.request
import urllib.parse
import re
import io
import csv
import pandas as pd
import matplotlib.pyplot as plt
URL_BASE = 'http://webapps2.rrc.state.tx.us/EWA/'
WELLBORE_SEARCH_URL = URL_BASE + 'wellboreQueryAction.do'
LEASE_PRODUCTION_URL = URL_BASE + 'specificLeaseQueryAction.do'
DRILLING_PERMIT_URL = URL_BASE + 'drillingPermitsQueryAction.do'
GIS_BASE = 'http://wwwgisp.rrc.texas.gov/GISViewer2/index.html'
def production_from_lease(lease, district, well_type):
query_result = rrc_production_query(lease, district, well_type)
df = pd.DataFrame(parse_production_csv(query_result, well_type))
df['Lease'] = lease
df['District'] = district
df['Month'] = pd.to_datetime(df['Month'])
df['Well Type'] = well_type
return df
def lease_from_API(api):
if (len(api) not in (10, 12, 14)):
raise RuntimeError('Invalid API number.')
query_result = rrc_lease_query(api)
lease = extract_lease_no(query_result)
district = extract_district(query_result)
query_result = rrc_permit_query(api)
# depth = extract_depth(query_result)
if len(lease) == 6: well_type = 'G'
else: well_type = 'O'
return (lease, district, well_type)
def gis_query(api):
api = api[2:]
GIS_URL = GIS_BASE + '?api=' + api
request = urllib.request.Request(GIS_URL)
with urllib.request.urlopen(request) as response:
if response.status != 200:
raise RuntimeError('HTTP request failed.')
data = response.read()
return data.decode()
def rrc_permit_query(api):
api = api[2:]
request_params = {
'searchArgs.apiNoHndlr.inputValue' : api,
'methodToCall' : 'search'
}
request = urllib.request.Request(
DRILLING_PERMIT_URL,
urllib.parse.urlencode(request_params).encode('utf-8'),
{'user-agent': 'Mozilla/5.0'},
method='POST')
with urllib.request.urlopen(request) as response:
if response.status != 200:
raise RuntimeError('HTTP request failed.')
data = response.read()
return data.decode()
def rrc_lease_query(api):
(pre, suf) = (api[2:5], api[5:11])
request_params = {
'searchArgs.apiNoPrefixArg' : pre,
'searchArgs.apiNoSuffixArg' : suf,
'methodToCall' : 'search'
}
request = urllib.request.Request(
WELLBORE_SEARCH_URL,
urllib.parse.urlencode(request_params).encode('utf-8'),
{'user-agent': 'Mozilla/5.0'},
method='POST')
with urllib.request.urlopen(request) as response:
if response.status != 200:
raise RuntimeError('HTTP request failed.')
data = response.read()
return data.decode()
def extract_depth(permit_query_result):
if 'rgx' not in extract_depth.__dict__:
extract_depth.rgx = re.compile(r'leaseno=(\d+)', re.IGNORECASE)
match = extract_depth.rgx.search(permit_query_result)
if not match:
raise RuntimeError('Unable to find depth!')
return match.group(1)
def extract_lease_no(lease_query_result):
if 'rgx' not in extract_lease_no.__dict__:
extract_lease_no.rgx = re.compile(r'leaseno=(\d+)', re.IGNORECASE)
match = extract_lease_no.rgx.search(lease_query_result)
if not match:
raise RuntimeError('Unable to find lease number!')
return match.group(1)
def extract_district(lease_query_result):
if 'rgx' not in extract_district.__dict__:
extract_district.rgx = re.compile(r'district=(\d+)', re.IGNORECASE)
match = extract_district.rgx.search(lease_query_result)
if not match:
raise RuntimeError('Unable to find district!')
return match.group(1)
def extract_well_type(lease_query_result):
if 'detail_link_rgx' not in extract_well_type.__dict__:
extract_well_type.detail_link_rgx = re.compile(
r'href="(leaseDetailAction.do[^"]+)"', re.IGNORECASE)
match = extract_well_type.detail_link_rgx.search(lease_query_result)
if not match:
raise RuntimeError('No detail link found!')
detail_url = URL_BASE + match.group(1)
request = urllib.request.urlopen(detail_url)
if (request.status != 200):
raise RuntimeError('HTTP request failed.')
lease_detail = request.read().decode()
if 'well_type_rgx' not in extract_well_type.__dict__:
extract_well_type.well_type_rgx = re.compile(
r'Well Type:\s+<[^>]+>\s+(\w+)', re.IGNORECASE)
match = extract_well_type.well_type_rgx.search(lease_detail)
if not match:
raise RuntimeError('Unable to find well type!')
return match.group(1)
def rrc_production_query(lease, district, well_type):
request_params = {
'MIME Type' :'application/x-www-form-urlencoded;charset=utf-8',
'actionManager.actionRcrd[0].actionDisplayNmHndlr.inputValue':'Search Criteria',
'actionManager.actionRcrd[0].actionHndlr.inputValue':'/specificLeaseQueryAction.do',
'actionManager.actionRcrd[0].actionMethodHndlr.inputValue':'unspecified',
'actionManager.actionRcrd[0].actionParameterHndlr.inputValue':'methodToCall',
'actionManager.actionRcrd[0].actionParametersHndlr.inputValue':'',
'actionManager.actionRcrd[0].contextPathHndlr.inputValue':'/EWA',
'actionManager.actionRcrd[0].hostHndlr.inputValue':'webapps2.rrc.state.tx.us:80',
'actionManager.actionRcrd[0].pagerParameterKeyHndlr.inputValue':'',
'actionManager.actionRcrd[0].returnIndexHndlr.inputValue':'0',
'actionManager.currentIndexHndlr.inputValue':'0',
'actionManager.recordCountHndlr.inputValue':'1',
'methodToCall':'generateSpecificLeaseCSVReport',
'searchArgs.activeTabsFlagwordHndlr.inputValue':'0',
'searchArgs.leaseNumberArg' : lease,
'searchArgs.districtCodeArg' : district,
'searchArgs.oilOrGasArg' : well_type,
'searchArgs.startMonthArg':'01',
'searchArgs.startYearArg':'1993',
'searchArgs.endMonthArg':'12',
'searchArgs.endYearArg' : '2018',
'searchArgs.orderByHndlr.inputValue':'',
'searchArgs.searchType':'specificLease',
'searchType':'specificLease',
'submit':'Submit',
'viewType':'init'
}
request = urllib.request.Request(
LEASE_PRODUCTION_URL,
urllib.parse.urlencode(request_params).encode('utf-8'),
{'user-agent': 'Mozilla/5.0'},
method='POST')
with urllib.request.urlopen(request) as response:
if response.status != 200:
raise RuntimeError('HTTP request failed.')
data = response.read()
return data.decode()
def parse_production_csv(csv_data, well_type):
csv_stream = io.StringIO(csv_data)
csv_reader = csv.reader(csv_stream)
for i in range(10):
next(csv_reader) # skip header
data = []
if well_type == 'O':
for l in csv_reader:
data.append({
'Month' : l[0],
'Oil Production' : try_parse(l[1].replace(',', ''),
float, 0.0),
'Oil Disposition' : try_parse(l[2].replace(',', ''),
float, 0.0),
'Gas Production' : try_parse(l[3].replace(',', ''),
float, 0.0),
'Gas Disposition' : try_parse(l[4].replace(',', ''),
float, 0.0),
'Operator' : (l[5] if len(l) > 5
else (data[-1]['Operator'] if data else '')),
'Field' : (l[7] if len(l) > 7
else (data[-1]['Field'] if data else ''))
})
elif well_type == 'G':
for l in csv_reader:
data.append({
'Month' : l[0],
'Gas Production' : try_parse(l[1].replace(',', ''),
float, 0.0),
'Gas Disposition' : try_parse(l[2].replace(',', ''),
float, 0.0),
'Condensate Production' : try_parse(l[3].replace(',', ''),
float, 0.0),
'Condensate Disposition' : try_parse(l[4].replace(',', ''),
float, 0.0),
'Operator' : (l[5] if len(l) > 5
else (data[-1]['Operator'] if data else '')),
'Field' : (l[7] if len(l) > 7
else (data[-1]['Field'] if data else ''))
})
else:
raise RuntimeError('Invalid well type!')
del data[-1] # remove totals row
return data
def try_parse(val, typ, default):
try:
return typ(val)
except ValueError:
return default
def get_prod(api):
lease, district, well_type = lease_from_API(api)
p = ''
try:
p = production_from_lease(lease, district, well_type)
plt.plot(p['Month'],p['Oil Production'])
plt.plot(p['Month'],p['Gas Production'])
except:
pass
return lease, district, well_type, p
apis = ['4205130712',
'4230132329',
'4230130721',
'4213536313']
for api in apis:
lease, district, well_type, prod = get_prod(api)
resp = gis_query(api) | [
"areed145@gmail.com"
] | areed145@gmail.com |
05147268a82b27e5086c58f0ccec58fd4ba8cfb7 | 72a9d5019a6cc57849463fc315eeb0f70292eac8 | /Python-Algorithms/Clustering/SOM/mvpa2/tests/test_surfing.py | f194e68a89fd6eb3f531edb3dd3199b891bf3468 | [] | no_license | lydiawawa/Machine-Learning | 393ce0713d3fd765c8aa996a1efc9f1290b7ecf1 | 57389cfa03a3fc80dc30a18091629348f0e17a33 | refs/heads/master | 2020-03-24T07:53:53.466875 | 2018-07-22T23:01:42 | 2018-07-22T23:01:42 | 142,578,611 | 1 | 0 | null | 2018-07-27T13:08:47 | 2018-07-27T13:08:47 | null | UTF-8 | Python | false | false | 43,007 | py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA surface searchlight and related utilities"""
from mvpa2.testing import *
skip_if_no_external('nibabel')
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import nibabel as nb
import os
import tempfile
from mvpa2.testing.datasets import datasets
from mvpa2 import cfg
from mvpa2.base import externals
from mvpa2.datasets import Dataset, hstack
from mvpa2.measures.base import Measure
from mvpa2.datasets.mri import fmri_dataset
from mvpa2.misc.surfing import volgeom, volsurf, \
volume_mask_dict, surf_voxel_selection, \
queryengine
from mvpa2.support.nibabel import surf, surf_fs_asc, surf_gifti
from mvpa2.measures.searchlight import sphere_searchlight, Searchlight
from mvpa2.misc.neighborhood import Sphere
if externals.exists('h5py'):
from mvpa2.base.hdf5 import h5save, h5load
class SurfTests(unittest.TestCase):
"""Test for surfaces
NNO Aug 2012
'Ground truth' is whatever output is returned by the implementation
as of mid-Aug 2012"""
@with_tempfile('.asc', 'test_surf')
def test_surf(self, temp_fn):
"""Some simple testing with surfaces
"""
s = surf.generate_sphere(10)
assert_true(s.nvertices == 102)
assert_true(s.nfaces == 200)
v = s.vertices
f = s.faces
assert_true(v.shape == (102, 3))
assert_true(f.shape == (200, 3))
# another surface
t = s * 10 + 2
assert_true(t.same_topology(s))
assert_array_equal(f, t.faces)
assert_array_equal(v * 10 + 2, t.vertices)
# allow updating, but should not affect original array
# CHECKME: maybe we want to throw an exception instead
assert_true((v * 10 + 2 == t.vertices).all().all())
assert_true((s.vertices * 10 + 2 == t.vertices).all().all())
# a few checks on vertices and nodes
v_check = {40: (0.86511144, -0.28109175, -0.41541501),
10: (0.08706015, -0.26794358, -0.95949297)}
f_check = {10: (7, 8, 1), 40: (30, 31, 21)}
vf_checks = [(v_check, lambda x:x.vertices),
(f_check, lambda x:x.faces)]
eps = .0001
for cmap, f in vf_checks:
for k, v in cmap.iteritems():
surfval = f(s)[k, :]
assert_true((abs(surfval - v) < eps).all())
# make sure same topology fails with different topology
u = surf.generate_cube()
assert_false(u.same_topology(s))
# check that neighbours are computed correctly
# even if we nuke the topology afterwards
for _ in [0, 1]:
nbrs = s.neighbors
n_check = [(0, 96, 0.284629),
(40, 39, 0.56218349),
(100, 99, 0.1741202)]
for i, j, k in n_check:
assert_true(abs(nbrs[i][j] - k) < eps)
def assign_zero(x):
x.faces[:, :] = 0
return None
assert_raises((ValueError, RuntimeError), assign_zero, s)
# see if mapping to high res works
h = surf.generate_sphere(40)
low2high = s.map_to_high_resolution_surf(h, .1)
partmap = {7: 141, 8: 144, 9: 148, 10: 153, 11: 157, 12: 281}
for k, v in partmap.iteritems():
assert_true(low2high[k] == v)
# ensure that slow implementation gives same results as fast one
low2high_slow = s.map_to_high_resolution_surf(h, .1)
for k, v in low2high.iteritems():
assert_true(low2high_slow[k] == v)
# should fail if epsilon is too small
assert_raises(ValueError,
lambda x:x.map_to_high_resolution_surf(h, .01), s)
n2f = s.node2faces
for i in xrange(s.nvertices):
nf = [10] if i < 2 else [5, 6] # number of faces expected
assert_true(len(n2f[i]) in nf)
# test dijkstra distances
ds2 = s.dijkstra_distance(2)
some_ds = {0: 3.613173280799, 1: 0.2846296765, 2: 0.,
52: 1.87458018, 53: 2.0487004817, 54: 2.222820777,
99: 3.32854360, 100: 3.328543604, 101: 3.3285436042}
eps = np.finfo('f').eps
for k, v in some_ds.iteritems():
assert_true(abs(v - ds2[k]) < eps)
# test I/O (through ascii files)
surf.write(temp_fn, s, overwrite=True)
s2 = surf.read(temp_fn)
# test i/o and ensure that the loaded instance is trained
if externals.exists('h5py'):
h5save(temp_fn, s2)
s2 = h5load(temp_fn)
assert_array_almost_equal(s.vertices, s2.vertices, 4)
assert_array_almost_equal(s.faces, s2.faces, 4)
# test plane (new feature end of August 2012)
s3 = surf.generate_plane((0, 0, 0), (2, 0, 0), (0, 1, 0), 10, 20)
assert_equal(s3.nvertices, 200)
assert_equal(s3.nfaces, 342)
assert_array_almost_equal(s3.vertices[-1, :], np.array([18., 19, 0.]))
assert_array_almost_equal(s3.faces[-1, :], np.array([199, 198, 179]))
# test bar
p, q = (0, 0, 0), (100, 0, 0)
s4 = surf.generate_bar(p, q, 10, 12)
assert_equal(s4.nvertices, 26)
assert_equal(s4.nfaces, 48)
def test_surf_border(self):
s = surf.generate_sphere(3)
assert_array_equal(s.nodes_on_border(), [False] * 11)
s = surf.generate_plane((0, 0, 0), (0, 1, 0), (1, 0, 0), 10, 10)
b = s.nodes_on_border()
v = s.vertices
vb = reduce(np.logical_or, [v[:, 0] == 0, v[:, 1] == 0,
v[:, 0] == 9, v[:, 1] == 9])
assert_array_equal(b, vb)
assert_true(s.nodes_on_border(0))
def test_surf_border_nonconnected_nodes(self):
s = surf.generate_cube()
# add empty node
v = np.vstack((s.vertices, [2, 2, 2]))
# remove two faces
s2 = surf.Surface(v, s.faces[:-2])
is_on_border = [False, False, False, False,
True, True, True, True,
False]
assert_array_equal(s2.nodes_on_border(),
np.asarray(is_on_border))
def test_surf_normalized(self):
def assert_is_unit_norm(v):
assert_almost_equal(1., np.sum(v*v))
assert_equal(v.shape, (len(v),))
def assert_same_direction(v,w):
assert_almost_equal(v.dot(w),(v.dot(v)*w.dot(w))**.5)
def helper_test_vec_normalized(v):
v_norm=surf.normalized(v)
assert_is_unit_norm(v_norm)
assert_same_direction(v,v_norm)
return v_norm
sizes=[(8,),(7,4)]
for size in sizes:
v=np.random.normal(size=size)
if len(size)==1:
helper_test_vec_normalized(v)
else:
# test for vectors and for matrix
v_n = surf.normalized(v)
n_vecs=v.shape[1]
for i in xrange(n_vecs):
v_n_i=helper_test_vec_normalized(v[i,:])
assert_array_almost_equal(v_n_i, v_n[i,:])
@with_tempfile('.asc', 'test_surf')
def test_surf_fs_asc(self, temp_fn):
s = surf.generate_sphere(5) * 100
surf_fs_asc.write(temp_fn, s, overwrite=True)
t = surf_fs_asc.read(temp_fn)
assert_array_almost_equal(s.vertices, t.vertices)
assert_array_almost_equal(s.vertices, t.vertices)
theta = np.asarray([0, 0., 180.])
r = s.rotate(theta, unit='deg')
l2r = surf.get_sphere_left_right_mapping(s, r)
l2r_expected = [0, 1, 2, 6, 5, 4, 3, 11, 10, 9, 8, 7, 15, 14, 13, 12,
16, 19, 18, 17, 21, 20, 23, 22, 26, 25, 24]
assert_array_equal(l2r, np.asarray(l2r_expected))
sides_facing = 'apism'
for side_facing in sides_facing:
l, r = surf.reposition_hemisphere_pairs(s + 10., t + (-10.),
side_facing)
m = surf.merge(l, r)
# not sure at the moment why medial rotation
# messes up - but leave for now
eps = 666 if side_facing == 'm' else .001
assert_true((abs(m.center_of_mass) < eps).all())
@with_tempfile('.nii', 'test_vol')
def test_volgeom(self, temp_fn):
sz = (17, 71, 37, 73) # size of 4-D 'brain volume'
d = 2. # voxel size
xo, yo, zo = -6., -12., -20. # origin
mx = np.identity(4, np.float) * d # affine transformation matrix
mx[3, 3] = 1
mx[0, 3] = xo
mx[1, 3] = yo
mx[2, 3] = zo
vg = volgeom.VolGeom(sz, mx) # initialize volgeom
eq_shape_nvoxels = {(17, 71, 37): (True, True),
(71, 17, 37, 1): (False, True),
(17, 71, 37, 2): (True, True),
(17, 71, 37, 73): (True, True),
(2, 2, 2): (False, False)}
for other_sz, (eq_shape, eq_nvoxels) in eq_shape_nvoxels.iteritems():
other_vg = volgeom.VolGeom(other_sz, mx)
assert_equal(other_vg.same_shape(vg), eq_shape)
assert_equal(other_vg.nvoxels_mask == vg.nvoxels_mask, eq_nvoxels)
nv = sz[0] * sz[1] * sz[2] # number of voxels
nt = sz[3] # number of time points
assert_equal(vg.nvoxels, nv)
# a couple of hard-coded test cases
# last two are outside the volume
linidxs = [0, 1, sz[2], sz[1] * sz[2], nv - 1, -1, nv]
subidxs = ([(0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0),
(sz[0] - 1, sz[1] - 1, sz[2] - 1)]
+ [(sz[0], sz[1], sz[2])] * 2)
xyzs = ([(xo, yo, zo), (xo, yo, zo + d), (xo, yo + d, zo),
(xo + d, yo, zo),
(xo + d * (sz[0] - 1), yo + d * (sz[1] - 1), zo + d * (sz[2] - 1))]
+ [(np.nan, np.nan, np.nan)] * 2)
for i, linidx in enumerate(linidxs):
lin = np.asarray([linidx])
ijk = vg.lin2ijk(lin)
ijk_expected = np.reshape(np.asarray(subidxs[i]), (1, 3))
assert_array_almost_equal(ijk, ijk_expected)
xyz = vg.lin2xyz(lin)
xyz_expected = np.reshape(np.asarray(xyzs[i]), (1, 3))
assert_array_almost_equal(xyz, xyz_expected)
# check that some identities hold
ab, bc, ac = vg.lin2ijk, vg.ijk2xyz, vg.lin2xyz
ba, cb, ca = vg.ijk2lin, vg.xyz2ijk, vg.xyz2lin
identities = [lambda x: ab(ba(x)),
lambda x: bc(cb(x)),
lambda x: ac(ca(x)),
lambda x: ba(ab(x)),
lambda x: cb(bc(x)),
lambda x: ca(ac(x)),
lambda x: bc(ab(ca(x))),
lambda x: ba(cb(ac(x)))]
# 0=lin, 1=ijk, 2=xyz
identities_input = [1, 2, 2, 0, 1, 0, 2, 0]
# voxel indices to test
linrange = [0, 1, sz[2], sz[1] * sz[2]] + range(0, nv, nv // 100)
lin = np.reshape(np.asarray(linrange), (-1,))
ijk = vg.lin2ijk(lin)
xyz = vg.ijk2xyz(ijk)
for j, identity in enumerate(identities):
inp = identities_input[j]
x = {0: lin,
1: ijk,
2: xyz}[inp]
assert_array_equal(x, identity(x))
# check that masking works
assert_true(vg.contains_lin(lin).all())
assert_false(vg.contains_lin(-lin - 1).any())
assert_true(vg.contains_ijk(ijk).all())
assert_false(vg.contains_ijk(-ijk - 1).any())
# ensure that we have no rounding issues
deltas = [-.51, -.49, 0., .49, .51]
should_raise = [True, False, False, False, True]
for delta, r in zip(deltas, should_raise):
xyz_d = xyz + delta * d
lin_d = vg.xyz2lin(xyz_d)
if r:
assert_raises(AssertionError,
assert_array_almost_equal, lin_d, lin)
else:
assert_array_almost_equal(lin_d, lin)
# some I/O testing
img = vg.get_empty_nifti_image()
img.to_filename(temp_fn)
assert_true(os.path.exists(temp_fn))
vg2 = volgeom.from_any(img)
vg3 = volgeom.from_any(temp_fn)
assert_array_equal(vg.affine, vg2.affine)
assert_array_equal(vg.affine, vg3.affine)
assert_equal(vg.shape[:3], vg2.shape[:3], 0)
assert_equal(vg.shape[:3], vg3.shape[:3], 0)
assert_true(len('%s%r' % (vg, vg)) > 0)
def test_volgeom_masking(self):
maskstep = 5
vg = volgeom.VolGeom((2 * maskstep, 2 * maskstep, 2 * maskstep), np.identity(4))
mask = vg.get_empty_array()
sh = vg.shape
# mask a subset of the voxels
rng = range(0, sh[0], maskstep)
for i in rng:
for j in rng:
for k in rng:
mask[i, j, k] = 1
# make a new volgeom instance
vg = volgeom.VolGeom(vg.shape, vg.affine, mask)
data = vg.get_masked_nifti_image(nt=1)
msk = vg.get_masked_nifti_image()
dset = fmri_dataset(data, mask=msk)
vg_dset = volgeom.from_any(dset)
# ensure that the mask is set properly and
assert_equal(vg.nvoxels, vg.nvoxels_mask * maskstep ** 3)
assert_equal(vg_dset, vg)
dilates = range(0, 8, 2)
nvoxels_masks = [] # keep track of number of voxels for each size
for dilate in dilates:
covers_full_volume = dilate * 2 >= maskstep * 3 ** .5 + 1
# constr gets values: None, Sphere(0), 2, Sphere(2), ...
for i, constr in enumerate([Sphere, lambda x:x if x else None]):
dilater = constr(dilate)
img_dilated = vg.get_masked_nifti_image(dilate=dilater)
data = img_dilated.get_data()
assert_array_equal(data, vg.get_masked_array(dilate=dilater))
n = np.sum(data)
# number of voxels in mask is increasing
assert_true(all(n >= p for p in nvoxels_masks))
# results should be identical irrespective of constr
if i == 0:
# - first call with this value of dilate: has to be more
# voxels than very previous dilation value, unless the
# full volume is covered - then it can be equal too
# - every next call: ensure size matches
cmp = lambda x, y:(x >= y if covers_full_volume else x > y)
assert_true(all(cmp(n, p) for p in nvoxels_masks))
nvoxels_masks.append(n)
else:
# same size as previous call
assert_equal(n, nvoxels_masks[-1])
# if dilate is not None or zero, then it should
# have selected all the voxels if the radius is big enough
assert_equal(np.sum(data) == vg.nvoxels, covers_full_volume)
def test_volsurf(self):
vg = volgeom.VolGeom((50, 50, 50), np.identity(4))
density = 40
outer = surf.generate_sphere(density) * 25. + 25
inner = surf.generate_sphere(density) * 20. + 25
# increasingly select more voxels in 'grey matter'
steps_start_stop = [(1, .5, .5), (5, .5, .5), (3, .3, .7),
(5, .3, .7), (5, 0., 1.), (10, 0., 1.)]
mp = None
expected_keys = set(range(density ** 2 + 2))
selection_counter = []
voxel_counter = []
for sp, sa, so in steps_start_stop:
vs = volsurf.VolSurfMaximalMapping(vg, outer, inner, (outer + inner) * .5, sp, sa, so)
n2v = vs.get_node2voxels_mapping()
if mp is None:
mp = n2v
assert_equal(expected_keys, set(n2v.keys()))
counter = 0
for k, v2pos in n2v.iteritems():
for v, pos in v2pos.iteritems():
# should be close to grey matter
assert_true(-1. <= pos <= 2.)
counter += 1
selection_counter.append(counter)
img = vs.voxel_count_nifti_image()
voxel_counter.append(np.sum(img.get_data() > 0))
# hard coded number of expected voxels
selection_expected = [1602, 1602, 4618, 5298, 7867, 10801]
assert_equal(selection_counter, selection_expected)
voxel_expected = [1498, 1498, 4322, 4986, 7391, 10141]
assert_equal(voxel_counter, voxel_expected)
# check that string building works
assert_true(len('%s%r' % (vs, vs)) > 0)
def test_volsurf_surf_from_volume(self):
aff = np.eye(4)
aff[0, 0] = aff[1, 1] = aff[2, 2] = 3
sh = (40, 40, 40)
vg = volgeom.VolGeom(sh, aff)
p = volsurf.from_volume(vg).intermediate_surface
q = volsurf.VolumeBasedSurface(vg)
centers = [0, 10, 10000, (-1, -1, -1), (5, 5, 5)]
radii = [0, 10, 20, 100]
for center in centers:
for radius in radii:
x = p.circlearound_n2d(center, radius)
y = q.circlearound_n2d(center, radius)
assert_equal(x, y)
def test_volume_mask_dict(self):
# also tests the outside_node_margin feature
sh = (10, 10, 10)
msk = np.zeros(sh)
for i in xrange(0, sh[0], 2):
msk[i, :, :] = 1
vol_affine = np.identity(4)
vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 2
vg = volgeom.VolGeom(sh, vol_affine, mask=msk)
density = 10
outer = surf.generate_sphere(density) * 10. + 5
inner = surf.generate_sphere(density) * 5. + 5
intermediate = outer * .5 + inner * .5
xyz = intermediate.vertices
radius = 50
outside_node_margins = [None, 0, 100., np.inf, True]
expected_center_count = [87] * 2 + [intermediate.nvertices] * 3
for k, outside_node_margin in enumerate(outside_node_margins):
sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner,
outer, outside_node_margin=outside_node_margin)
assert_equal(intermediate, sel.source)
assert_equal(len(sel.keys()), expected_center_count[k])
assert_true(set(sel.aux_keys()).issubset(set(['center_distances',
'grey_matter_position'])))
msk_lin = msk.ravel()
sel_msk_lin = sel.get_mask().ravel()
for i in xrange(vg.nvoxels):
if msk_lin[i]:
src = sel.target2nearest_source(i)
assert_false((src is None) ^ (sel_msk_lin[i] == 0))
if src is None:
continue
# index of node nearest to voxel i
src_anywhere = sel.target2nearest_source(i,
fallback_euclidean_distance=True)
# coordinates of node nearest to voxel i
xyz_src = xyz[src_anywhere]
# coordinates of voxel i
xyz_trg = vg.lin2xyz(np.asarray([i]))
# distance between node nearest to voxel i, and voxel i
# this should be the smallest distancer
d = volgeom.distance(np.reshape(xyz_src, (1, 3)), xyz_trg)
# distances between all nodes and voxel i
ds = volgeom.distance(xyz, xyz_trg)
# order of the distances
is_ds = np.argsort(ds.ravel())
# go over all the nodes
# require that the node is in the volume
# mask
# index of node nearest to voxel i
ii = np.argmin(ds)
xyz_min = xyz[ii]
lin_min = vg.xyz2lin([xyz_min])
# linear index of voxel that contains xyz_src
lin_src = vg.xyz2lin(np.reshape(xyz_src, (1, 3)))
# when using multi-core support,
# pickling and unpickling can reduce the precision
# a little bit, causing rounding errors
eps = 1e-14
delta = np.abs(ds[ii] - d)
assert_false(delta > eps and ii in sel and
i in sel[ii] and
vg.contains_lin(lin_min))
def test_surf_voxel_selection(self):
vol_shape = (10, 10, 10)
vol_affine = np.identity(4)
vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5
vg = volgeom.VolGeom(vol_shape, vol_affine)
density = 10
outer = surf.generate_sphere(density) * 25. + 15
inner = surf.generate_sphere(density) * 20. + 15
vs = volsurf.VolSurfMaximalMapping(vg, outer, inner)
nv = outer.nvertices
# select under variety of parameters
# parameters are distance metric (dijkstra or euclidean),
# radius, and number of searchlight centers
params = [('d', 1., 10), ('d', 1., 50), ('d', 1., 100), ('d', 2., 100),
('e', 2., 100), ('d', 2., 100), ('d', 20, 100),
('euclidean', 5, None), ('dijkstra', 10, None)]
# function that indicates for which parameters the full test is run
test_full = lambda x: len(x[0]) > 1 or x[2] == 100
expected_labs = ['grey_matter_position',
'center_distances']
voxcount = []
tested_double_features = False
for param in params:
distance_metric, radius, ncenters = param
srcs = range(0, nv, nv // (ncenters or nv))
sel = surf_voxel_selection.voxel_selection(vs, radius,
source_surf_nodes=srcs,
distance_metric=distance_metric)
# see how many voxels were selected
vg = sel.volgeom
datalin = np.zeros((vg.nvoxels, 1))
mp = sel
for k, idxs in mp.iteritems():
if idxs is not None:
datalin[idxs] = 1
voxcount.append(np.sum(datalin))
if test_full(param):
assert_equal(np.sum(datalin), np.sum(sel.get_mask()))
assert_true(len('%s%r' % (sel, sel)) > 0)
# see if voxels containing inner and outer
# nodes were selected
for sf in [inner, outer]:
for k, idxs in mp.iteritems():
xyz = np.reshape(sf.vertices[k, :], (1, 3))
linidx = vg.xyz2lin(xyz)
# only required if xyz is actually within the volume
assert_equal(linidx in idxs, vg.contains_lin(linidx))
# check that it has all the attributes
labs = sel.aux_keys()
assert_true(all([lab in labs for lab in expected_labs]))
if externals.exists('h5py'):
# some I/O testing
fd, fn = tempfile.mkstemp('.h5py', 'test'); os.close(fd)
h5save(fn, sel)
sel2 = h5load(fn)
os.remove(fn)
assert_equal(sel, sel2)
else:
sel2 = sel
# check that mask is OK even after I/O
assert_array_equal(sel.get_mask(), sel2.get_mask())
# test I/O with surfaces
# XXX the @tempfile decorator only supports a single filename
# hence this method does not use it
fd, outerfn = tempfile.mkstemp('outer.asc', 'test'); os.close(fd)
fd, innerfn = tempfile.mkstemp('inner.asc', 'test'); os.close(fd)
fd, volfn = tempfile.mkstemp('vol.nii', 'test'); os.close(fd)
surf.write(outerfn, outer, overwrite=True)
surf.write(innerfn, inner, overwrite=True)
img = sel.volgeom.get_empty_nifti_image()
img.to_filename(volfn)
sel3 = surf_voxel_selection.run_voxel_selection(radius, volfn, innerfn,
outerfn, source_surf_nodes=srcs,
distance_metric=distance_metric)
outer4 = surf.read(outerfn)
inner4 = surf.read(innerfn)
vsm4 = vs = volsurf.VolSurfMaximalMapping(vg, inner4, outer4)
# check that two ways of voxel selection match
sel4 = surf_voxel_selection.voxel_selection(vsm4, radius,
source_surf_nodes=srcs,
distance_metric=distance_metric)
assert_equal(sel3, sel4)
os.remove(outerfn)
os.remove(innerfn)
os.remove(volfn)
# compare sel3 with other selection results
# NOTE: which voxels are precisely selected by sel can be quite
# off from those in sel3, as writing the surfaces imposes
# rounding errors and the sphere is very symmetric, which
# means that different neighboring nodes are selected
# to select a certain number of voxels.
sel3cmp_difference_ratio = [(sel, .2), (sel4, 0.)]
for selcmp, ratio in sel3cmp_difference_ratio:
nunion = ndiff = 0
for k in selcmp.keys():
p = set(sel3.get(k))
q = set(selcmp.get(k))
nunion += len(p.union(q))
ndiff += len(p.symmetric_difference(q))
assert_true(float(ndiff) / float(nunion) <= ratio)
# check searchlight call
# as of late Aug 2012, this is with the fancy query engine
# as implemented by Yarik
mask = sel.get_mask()
keys = None if ncenters is None else sel.keys()
dset_data = np.reshape(np.arange(vg.nvoxels), vg.shape)
dset_img = nb.Nifti1Image(dset_data, vg.affine)
dset = fmri_dataset(samples=dset_img, mask=mask)
qe = queryengine.SurfaceVerticesQueryEngine(sel,
# you can optionally add additional
# information about each near-disk-voxels
add_fa=['center_distances',
'grey_matter_position'])
# test i/o ensuring that when loading it is still trained
if externals.exists('h5py'):
fd, qefn = tempfile.mkstemp('qe.hdf5', 'test'); os.close(fd)
h5save(qefn, qe)
qe = h5load(qefn)
os.remove(qefn)
assert_false('ERROR' in repr(qe)) # to check if repr works
voxelcounter = _Voxel_Count_Measure()
searchlight = Searchlight(voxelcounter, queryengine=qe, roi_ids=keys, nproc=1,
enable_ca=['roi_feature_ids', 'roi_center_ids'])
sl_dset = searchlight(dset)
selected_count = sl_dset.samples[0, :]
mp = sel
for i, k in enumerate(sel.keys()):
# check that number of selected voxels matches
assert_equal(selected_count[i], len(mp[k]))
assert_equal(searchlight.ca.roi_center_ids, sel.keys())
assert_array_equal(sl_dset.fa['center_ids'], qe.ids)
# check nearest node is *really* the nearest node
allvx = sel.get_targets()
intermediate = outer * .5 + inner * .5
for vx in allvx:
nearest = sel.target2nearest_source(vx)
xyz = intermediate.vertices[nearest, :]
sqsum = np.sum((xyz - intermediate.vertices) ** 2, 1)
idx = np.argmin(sqsum)
assert_equal(idx, nearest)
if not tested_double_features: # test only once
# see if we have multiple features for the same voxel, we would get them all
dset1 = dset.copy()
dset1.fa['dset'] = [1]
dset2 = dset.copy()
dset2.fa['dset'] = [2]
dset_ = hstack((dset1, dset2), 'drop_nonunique')
dset_.sa = dset1.sa
# dset_.a.imghdr = dset1.a.imghdr
assert_true('imghdr' in dset_.a.keys())
assert_equal(dset_.a['imghdr'].value, dset1.a['imghdr'].value)
roi_feature_ids = searchlight.ca.roi_feature_ids
sl_dset_ = searchlight(dset_)
# and we should get twice the counts
assert_array_equal(sl_dset_.samples, sl_dset.samples * 2)
# compare old and new roi_feature_ids
assert(len(roi_feature_ids) == len(searchlight.ca.roi_feature_ids))
nfeatures = dset.nfeatures
for old, new in zip(roi_feature_ids,
searchlight.ca.roi_feature_ids):
# each new ids should comprise of old ones + (old + nfeatures)
# since we hstack'ed two datasets
assert_array_equal(np.hstack([(x, x + nfeatures) for x in old]),
new)
tested_double_features = True
# check whether number of voxels were selected is as expected
expected_voxcount = [22, 93, 183, 183, 183, 183, 183, 183, 183]
assert_equal(voxcount, expected_voxcount)
def test_h5support(self):
sh = (20, 20, 20)
msk = np.zeros(sh)
for i in xrange(0, sh[0], 2):
msk[i, :, :] = 1
vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)
density = 20
outer = surf.generate_sphere(density) * 10. + 5
inner = surf.generate_sphere(density) * 5. + 5
intermediate = outer * .5 + inner * .5
xyz = intermediate.vertices
radius = 50
backends = ['native', 'hdf5']
for i, backend in enumerate(backends):
if backend == 'hdf5' and not externals.exists('h5py'):
continue
sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner,
outer, results_backend=backend)
if i == 0:
sel0 = sel
else:
assert_equal(sel0, sel)
def test_agreement_surface_volume(self):
'''test agreement between volume-based and surface-based
searchlights when using euclidean measure'''
# import runner
def sum_ds(ds):
return np.sum(ds)
radius = 3
# make a small dataset with a mask
sh = (10, 10, 10)
msk = np.zeros(sh)
for i in xrange(0, sh[0], 2):
msk[i, :, :] = 1
vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)
# make an image
nt = 6
img = vg.get_masked_nifti_image(6)
ds = fmri_dataset(img, mask=msk)
# run the searchlight
sl = sphere_searchlight(sum_ds, radius=radius)
m = sl(ds)
# now use surface-based searchlight
v = volsurf.from_volume(ds)
source_surf = v.intermediate_surface
node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))
# check that the mask matches with what we used earlier
assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)
source_surf_nodes = np.nonzero(node_msk)[0]
sel = surf_voxel_selection.voxel_selection(v, float(radius),
source_surf=source_surf,
source_surf_nodes=source_surf_nodes,
distance_metric='euclidean')
qe = queryengine.SurfaceVerticesQueryEngine(sel)
sl = Searchlight(sum_ds, queryengine=qe)
r = sl(ds)
# check whether they give the same results
assert_array_equal(r.samples, m.samples)
@with_tempfile('.h5py', '_qe')
def test_surf_queryengine(self, qefn):
s = surf.generate_plane((0, 0, 0), (0, 1, 0), (0, 0, 1), 4, 5)
# add second layer
s2 = surf.merge(s, (s + (.01, 0, 0)))
ds = Dataset(samples=np.arange(20)[np.newaxis],
fa=dict(node_indices=np.arange(39, 0, -2)))
# add more features (with shared node indices)
ds3 = hstack((ds, ds, ds))
radius = 2.5
# Note: sweepargs it not used to avoid re-generating the same
# surface and dataset multiple times.
for distance_metric in ('euclidean', 'dijkstra', '<illegal>', None):
builder = lambda: queryengine.SurfaceQueryEngine(s2, radius,
distance_metric)
if distance_metric in ('<illegal>', None):
assert_raises(ValueError, builder)
continue
qe = builder()
# test i/o and ensure that the untrained instance is not trained
if externals.exists('h5py'):
h5save(qefn, qe)
qe = h5load(qefn)
# untrained qe should give errors
assert_raises(ValueError, lambda:qe.ids)
assert_raises(ValueError, lambda:qe.query_byid(0))
# node index out of bounds should give error
ds_ = ds.copy()
ds_.fa.node_indices[0] = 100
assert_raises(ValueError, lambda: qe.train(ds_))
# lack of node indices should give error
ds_.fa.pop('node_indices')
assert_raises(ValueError, lambda: qe.train(ds_))
# train the qe
qe.train(ds3)
# test i/o and ensure that the loaded instance is trained
if externals.exists('h5py'):
h5save(qefn, qe)
qe = h5load(qefn)
for node in np.arange(-1, s2.nvertices + 1):
if node < 0 or node >= s2.nvertices:
assert_raises(KeyError, lambda: qe.query_byid(node))
continue
feature_ids = np.asarray(qe.query_byid(node))
# node indices relative to ds
base_ids = feature_ids[feature_ids < 20]
# should have multiples of 20
assert_equal(set(feature_ids),
set((base_ids[np.newaxis].T + \
[0, 20, 40]).ravel()))
node_indices = list(s2.circlearound_n2d(node,
radius, distance_metric or 'dijkstra'))
fa_indices = [fa_index for fa_index, node in
enumerate(ds3.fa.node_indices)
if node in node_indices]
assert_equal(set(feature_ids), set(fa_indices))
# smoke tests
assert_true('SurfaceQueryEngine' in '%s' % qe)
assert_true('SurfaceQueryEngine' in '%r' % qe)
def test_surf_ring_queryengine(self):
s = surf.generate_plane((0, 0, 0), (0, 1, 0), (0, 0, 1), 4, 5)
# add second layer
s2 = surf.merge(s, (s + (.01, 0, 0)))
ds = Dataset(samples=np.arange(20)[np.newaxis],
fa=dict(node_indices=np.arange(39, 0, -2)))
# add more features (with shared node indices)
ds3 = hstack((ds, ds, ds))
radius = 2.5
inner_radius = 1.0
# Makes sure it raises error if inner_radius is >= radius
assert_raises(ValueError,
lambda: queryengine.SurfaceRingQueryEngine(surface=s2,
inner_radius=2.5,
radius=radius))
distance_metrics = ('euclidean', 'dijkstra', 'euclidean', 'dijkstra')
for distance_metric, include_center in zip(distance_metrics, [True, False]*2):
qe = queryengine.SurfaceRingQueryEngine(surface=s2, radius=radius,
inner_radius=inner_radius, distance_metric=distance_metric,
include_center=include_center)
# untrained qe should give errors
assert_raises(ValueError, lambda: qe.ids)
assert_raises(ValueError, lambda: qe.query_byid(0))
# node index out of bounds should give error
ds_ = ds.copy()
ds_.fa.node_indices[0] = 100
assert_raises(ValueError, lambda: qe.train(ds_))
# lack of node indices should give error
ds_.fa.pop('node_indices')
assert_raises(ValueError, lambda: qe.train(ds_))
# train the qe
qe.train(ds3)
for node in np.arange(-1, s2.nvertices + 1):
if node < 0 or node >= s2.nvertices:
assert_raises(KeyError, lambda: qe.query_byid(node))
continue
feature_ids = np.asarray(qe.query_byid(node))
# node indices relative to ds
base_ids = feature_ids[feature_ids < 20]
# should have multiples of 20
assert_equal(set(feature_ids),
set((base_ids[np.newaxis].T + \
[0, 20, 40]).ravel()))
node_indices = s2.circlearound_n2d(node,
radius, distance_metric or 'dijkstra')
fa_indices = [fa_index for fa_index, inode in
enumerate(ds3.fa.node_indices)
if inode in node_indices and node_indices[inode] > inner_radius]
if include_center and node in ds3.fa.node_indices:
fa_indices += np.where(ds3.fa.node_indices == node)[0].tolist()
assert_equal(set(feature_ids), set(fa_indices))
def test_surf_pairs(self):
o, x, y = map(np.asarray, [(0, 0, 0), (0, 1, 0), (1, 0, 0)])
d = np.asarray((0, 0, .1))
n = 10
s1 = surf.generate_plane(o, x, y, n, n)
s2 = surf.generate_plane(o + d, x, y, n, n)
s = surf.merge(s1, s2)
# try for small surface
eps = .0000001
pw = s.pairwise_near_nodes(.5)
for i in xrange(n ** 2):
d = pw.pop((i, i + 100))
assert_array_almost_equal(d, .1)
assert_true(len(pw) == 0)
pw = s.pairwise_near_nodes(.5)
for i in xrange(n ** 2):
d = pw.pop((i, i + 100))
assert_array_almost_equal(d, .1)
assert_true(len(pw) == 0)
# bigger one
pw = s.pairwise_near_nodes(1.4)
for i in xrange(n ** 2):
p, q = i // n, i % n
offsets = sum(([] if q == 0 else [-1],
[] if q == n - 1 else [+1],
[] if p == 0 else [-n],
[] if p == n - 1 else [n],
[0]), [])
for offset in offsets:
ii = i + offset + n ** 2
d = pw.pop((i, ii))
assert_true((d < .5) ^ (offset > 0))
assert_true(len(pw) == 0)
@with_tempfile('surf.surf.gii', 'surftest')
def test_surf_gifti(self, fn):
# From section 14.4 in GIFTI Surface Data Format Version 1.0
# (with some adoptions)
test_data = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE GIFTI SYSTEM "http://www.nitrc.org/frs/download.php/1594/gifti.dtd">
<GIFTI
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="http://www.nitrc.org/frs/download.php/1303/GIFTI_Caret.xsd"
Version="1.0"
NumberOfDataArrays="2">
<MetaData>
<MD>
<Name><![CDATA[date]]></Name>
<Value><![CDATA[Thu Nov 15 09:05:22 2007]]></Value>
</MD>
</MetaData>
<LabelTable/>
<DataArray Intent="NIFTI_INTENT_POINTSET"
DataType="NIFTI_TYPE_FLOAT32"
ArrayIndexingOrder="RowMajorOrder"
Dimensionality="2"
Dim0="4"
Dim1="3"
Encoding="ASCII"
Endian="LittleEndian"
ExternalFileName=""
ExternalFileOffset="">
<CoordinateSystemTransformMatrix>
<DataSpace><![CDATA[NIFTI_XFORM_TALAIRACH]]></DataSpace>
<TransformedSpace><![CDATA[NIFTI_XFORM_TALAIRACH]]></TransformedSpace>
<MatrixData>
1.000000 0.000000 0.000000 0.000000
0.000000 1.000000 0.000000 0.000000
0.000000 0.000000 1.000000 0.000000
0.000000 0.000000 0.000000 1.000000
</MatrixData>
</CoordinateSystemTransformMatrix>
<Data>
10.5 0 0
0 20.5 0
0 0 30.5
0 0 0
</Data>
</DataArray>
<DataArray Intent="NIFTI_INTENT_TRIANGLE"
DataType="NIFTI_TYPE_INT32"
ArrayIndexingOrder="RowMajorOrder"
Dimensionality="2"
Dim0="4"
Dim1="3"
Encoding="ASCII"
Endian="LittleEndian"
ExternalFileName="" ExternalFileOffset="">
<Data>
0 1 2
1 2 3
0 1 3
0 2 3
</Data>
</DataArray>
</GIFTI>'''
with open(fn, 'w') as f:
f.write(test_data)
# test I/O
s = surf.read(fn)
surf.write(fn, s)
s = surf.read(fn)
v = np.zeros((4, 3))
v[0, 0] = 10.5
v[1, 1] = 20.5
v[2, 2] = 30.5
f = np.asarray([[0, 1, 2], [1, 2, 3], [0, 1, 3], [0, 2, 3]],
dtype=np.int32)
assert_array_equal(s.vertices, v)
assert_array_equal(s.faces, f)
class _Voxel_Count_Measure(Measure):
# used to check voxel selection results
is_trained = True
def __init__(self, **kwargs):
Measure.__init__(self, **kwargs)
def _call(self, dset):
return dset.nfeatures
def suite(): # pragma: no cover
"""Create the suite"""
return unittest.makeSuite(SurfTests)
if __name__ == '__main__': # pragma: no cover
import runner
runner.run()
| [
"amir.h.jafari@okstate.edu"
] | amir.h.jafari@okstate.edu |
6f04fcd0cfa9b88135fdc8ea7230811503151a5e | 5ae040aa76f2b72fc4aec556480c41e79853c303 | /sk/auth/models.py | 1111bf8a8473bcdd0da2ba57b3eda06282d6906e | [] | no_license | captDaylight/sk | 59759b2b3ae1de0f7a13a47cd4022b61ce315463 | 6e7cd450209670527418b00c6eac51e9ca825bbb | refs/heads/master | 2020-05-17T22:22:07.859067 | 2011-11-15T01:26:44 | 2011-11-15T01:26:44 | 2,607,030 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from django.db import models
from django import forms
class SignupForm(forms.Form):
username = forms.CharField(max_length = 100)
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput(render_value=False),max_length=100)
| [
"paul.christophe6@gmail.com"
] | paul.christophe6@gmail.com |
fd3ff28f99ec1d307668e368027869859a779fd7 | 9078942d3596480beb3198d78022e11661cb462f | /day02/06列表.py | 9f6577199a75dbf4a38c62145f5e885c271a3aaf | [] | no_license | chenyangbin/pywork | 905ed0dd4e157cc80850e62a72add12c11985bfe | c93accaaf130dbed97510492780e7358ae67efbf | refs/heads/master | 2020-05-02T09:59:22.850935 | 2019-05-24T15:21:04 | 2019-05-24T15:21:04 | 177,420,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | # 工程目录:c:\Users\bin\OneDrive\share\pywork\day02\06列表.py
# 创建日期: 2019.02.14
# 工程目标:列表的使用
# 创建作者:binyang
# -*- coding:utf-8 -*-
# 序列是Python中最基本的数据结构。序列中的每个元素都分配一个数字 - 它的位置,或索引,第一个索引是0,第二个索引是1,依此类推。
'''
# Python有6个序列的内置类型,但最常见的是列表和元组
# 列表是最常用的Python数据类型,它可以作为一个方括号内的逗号分隔值出现。
# 列表的数据项不需要具有相同的类型
# 创建一个列表,只要把逗号分隔的不同的数据项使用方括号括起来即可
'''
# 01访问列表元素值
list1 = [1,2,3,4,5,'nihao',"中"]
print("访问元素:list1 的第一个元素,所引0:", list1[6])
print("单独测试:",list1)
for i in range(7):
print("循环访问测试:",list1[i])
# 02更新列表元素 增删改查
# 改:list[索引下标]
print("原列表:", list1)
list1[6] = 6
print("新列表:", list1)
# 03 删除元素
# 删 del list[索引]
del list1[2]
print("删除第三个元素的新列表:", list1)
# 04 列表的操作符,长度,组合,重复,判断元素存在,迭代访问
'''
Python 表达式 结果 描述
len([1, 2, 3]) 3 长度
[1, 2, 3] + [4, 5, 6] [1, 2, 3, 4, 5, 6] 组合
['Hi!'] * 4 ['Hi!', 'Hi!', 'Hi!', 'Hi!'] 重复
3 in [1, 2, 3] True 元素是否存在于列表中
for x in [1, 2, 3]: print(x, end=" ") 1 2 3 迭代
'''
# 05 拼接截取
# 拼接
list2 = ['ni', 'hao', 'shijie', 'haha']
list3 = [1, 2, 3, 4, 5, 6, 7]
list4 = list2 + list3 #全拼接
list5 = list3[2:] + list2 # 从第三个元素开始拼接
print(list2)
print(list3)
print("全拼接:", list4)
print("截取拼接:",list5)
# 06嵌套列表
'''
使用嵌套列表即在列表里创建其它列表,例如:
>>>a = ['a', 'b', 'c']
>>> n = [1, 2, 3]
>>> x = [a, n]
>>> x
[['a', 'b', 'c'], [1, 2, 3]]
>>> x[0]
['a', 'b', 'c']
>>> x[0][1]
'b'
'''
# 07列表操作函数 返回元素个数,返回元素最大值,返回最小值, 将元素不可更改的元组,转换尾列表
'''
Python包含以下函数:
序号 函数
1 len(list) 列表元素个数
2 max(list) 返回列表元素最大值
3 min(list) 返回列表元素最小值
4 list(seq) 将元组转换为列表
'''
print("str5的长度:", len(list5))
# 08 列表的相关方法
'''
Python包含以下方法:
序号 方法
1 list.append(obj) 在列表末尾添加新的对象
2 list.count(obj) 统计某个元素在列表中出现的次数
3 list.extend(seq) 在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)
4 list.index(obj) 从列表中找出某个值第一个匹配项的索引位置
5 list.insert(index, obj) 将对象插入列表
6 list.pop([index=-1]) 移除列表中的一个元素(默认最后一个元素),并且返回该元素的值
7 list.remove(obj) 移除列表中某个值的第一个匹配项
8 list.reverse() 反向列表中元素
9 list.sort(cmp=None, key=None, reverse=False) 对原列表进行排序
10 list.clear() 清空列l表
11 list.copy() 复制列表
'''
list5.append('89')
print("append 在末尾插入元素:",list5)
| [
"342529137@qq.com"
] | 342529137@qq.com |
993fdb71b1cfd755ab19dfa75580530c9d7055fc | c6548d34568618afa7edc4bfb358d7f22426f18b | /project-addons/acp_contrato_bufete/__init__.py | 8d0e6954a36c6247a7571913dfbe95f5bf9a15b6 | [] | no_license | Comunitea/CMNT_00071_2016_JUA | 77b6cbb6ec8624c8ff7d26b5833b57b521d8b2a4 | 206b9fb2d4cc963c8b20001e46aa28ad38b2f7f0 | refs/heads/master | 2020-05-21T16:22:32.569235 | 2017-10-04T12:10:00 | 2017-10-04T12:10:00 | 62,816,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import acp_contrato
import res_partner
import sale_order
import wizard
import account_voucher
import account_invoice
import product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"javierjcf@gmail.com"
] | javierjcf@gmail.com |
575034b371248054308cb2e6b02a6973dfc3768e | c8eb2007865a6918194214168c3018a6b8ff888a | /SE_Inception_resnet_v2_test.py | 8b4f21f42e87cf5372225cd3f4f715811765eff9 | [
"MIT"
] | permissive | dishen12/whale | c341aa27c4f59df365d131d3c041aaf6199195a4 | 2d5bc3aaccab87ecebe31663d2d6c99d52563cc8 | refs/heads/master | 2020-04-29T07:27:07.531557 | 2019-03-16T09:46:10 | 2019-03-16T09:46:10 | 175,953,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,945 | py | import tensorflow as tf
from tflearn.layers.conv import global_avg_pool
from tensorflow.contrib.layers import batch_norm, flatten
from tensorflow.contrib.framework import arg_scope
from whale import *
import numpy as np
weight_decay = 0.0005
momentum = 0.9
init_learning_rate = 0.1
reduction_ratio = 4
batch_size = 128
iteration = 936
# 128 * 936 ~ 119699
test_iteration = 10
total_epochs = 1000
def conv_layer(input, filter, kernel, stride=1, padding='SAME', layer_name="conv", activation=True):
with tf.name_scope(layer_name):
network = tf.layers.conv2d(inputs=input, use_bias=True, filters=filter, kernel_size=kernel, strides=stride, padding=padding)
if activation :
network = Relu(network)
return network
def Fully_connected(x, units=class_num, layer_name='fully_connected') :
with tf.name_scope(layer_name) :
return tf.layers.dense(inputs=x, use_bias=True, units=units)
def Relu(x):
return tf.nn.relu(x)
def Sigmoid(x):
return tf.nn.sigmoid(x)
def Global_Average_Pooling(x):
return global_avg_pool(x, name='Global_avg_pooling')
def Max_pooling(x, pool_size=[3,3], stride=2, padding='VALID') :
return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
def Batch_Normalization(x, training, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=True) :
return tf.cond(training,
lambda : batch_norm(inputs=x, is_training=training, reuse=None),
lambda : batch_norm(inputs=x, is_training=training, reuse=True))
def Concatenation(layers) :
return tf.concat(layers, axis=3)
def Dropout(x, rate, training) :
return tf.layers.dropout(inputs=x, rate=rate, training=training)
def Evaluate(sess):
test_acc = 0.0
test_loss = 0.0
test_pre_index = 0
add = 1000
for it in range(test_iteration):
test_batch_x = test_x[test_pre_index: test_pre_index + add]
test_batch_y = test_y[test_pre_index: test_pre_index + add]
test_pre_index = test_pre_index + add
test_feed_dict = {
x: test_batch_x,
label: test_batch_y,
learning_rate: epoch_learning_rate,
training_flag: False
}
loss_, acc_ = sess.run([cost, accuracy], feed_dict=test_feed_dict)
test_loss += loss_
test_acc += acc_
test_loss /= test_iteration # average loss
test_acc /= test_iteration # average accuracy
summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),
tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])
return test_acc, test_loss, summary
class SE_Inception_resnet_v2():
def __init__(self, x, training):
self.training = training
self.model = self.Build_SEnet(x)
def Stem(self, x, scope):
with tf.name_scope(scope) :
x = conv_layer(x, filter=32, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_conv1')
x = conv_layer(x, filter=32, kernel=[3,3], padding='VALID', layer_name=scope+'_conv2')
block_1 = conv_layer(x, filter=64, kernel=[3,3], layer_name=scope+'_conv3')
split_max_x = Max_pooling(block_1)
split_conv_x = conv_layer(block_1, filter=96, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv1')
x = Concatenation([split_max_x,split_conv_x])
split_conv_x1 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv2')
split_conv_x1 = conv_layer(split_conv_x1, filter=96, kernel=[3,3], padding='VALID', layer_name=scope+'_split_conv3')
split_conv_x2 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv4')
split_conv_x2 = conv_layer(split_conv_x2, filter=64, kernel=[7,1], layer_name=scope+'_split_conv5')
split_conv_x2 = conv_layer(split_conv_x2, filter=64, kernel=[1,7], layer_name=scope+'_split_conv6')
split_conv_x2 = conv_layer(split_conv_x2, filter=96, kernel=[3,3], padding='VALID', layer_name=scope+'_split_conv7')
x = Concatenation([split_conv_x1,split_conv_x2])
split_conv_x = conv_layer(x, filter=192, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv8')
split_max_x = Max_pooling(x)
x = Concatenation([split_conv_x, split_max_x])
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Inception_resnet_A(self, x, scope):
with tf.name_scope(scope) :
init = x
split_conv_x1 = conv_layer(x, filter=32, kernel=[1,1], layer_name=scope+'_split_conv1')
split_conv_x2 = conv_layer(x, filter=32, kernel=[1,1], layer_name=scope+'_split_conv2')
split_conv_x2 = conv_layer(split_conv_x2, filter=32, kernel=[3,3], layer_name=scope+'_split_conv3')
split_conv_x3 = conv_layer(x, filter=32, kernel=[1,1], layer_name=scope+'_split_conv4')
split_conv_x3 = conv_layer(split_conv_x3, filter=48, kernel=[3,3], layer_name=scope+'_split_conv5')
split_conv_x3 = conv_layer(split_conv_x3, filter=64, kernel=[3,3], layer_name=scope+'_split_conv6')
x = Concatenation([split_conv_x1,split_conv_x2,split_conv_x3])
x = conv_layer(x, filter=384, kernel=[1,1], layer_name=scope+'_final_conv1', activation=False)
x = x*0.1
x = init + x
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Inception_resnet_B(self, x, scope):
with tf.name_scope(scope) :
init = x
split_conv_x1 = conv_layer(x, filter=192, kernel=[1,1], layer_name=scope+'_split_conv1')
split_conv_x2 = conv_layer(x, filter=128, kernel=[1,1], layer_name=scope+'_split_conv2')
split_conv_x2 = conv_layer(split_conv_x2, filter=160, kernel=[1,7], layer_name=scope+'_split_conv3')
split_conv_x2 = conv_layer(split_conv_x2, filter=192, kernel=[7,1], layer_name=scope+'_split_conv4')
x = Concatenation([split_conv_x1, split_conv_x2])
x = conv_layer(x, filter=1152, kernel=[1,1], layer_name=scope+'_final_conv1', activation=False)
# 1154
x = x * 0.1
x = init + x
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Inception_resnet_C(self, x, scope):
with tf.name_scope(scope) :
init = x
split_conv_x1 = conv_layer(x, filter=192, kernel=[1,1], layer_name=scope+'_split_conv1')
split_conv_x2 = conv_layer(x, filter=192, kernel=[1, 1], layer_name=scope + '_split_conv2')
split_conv_x2 = conv_layer(split_conv_x2, filter=224, kernel=[1, 3], layer_name=scope + '_split_conv3')
split_conv_x2 = conv_layer(split_conv_x2, filter=256, kernel=[3, 1], layer_name=scope + '_split_conv4')
x = Concatenation([split_conv_x1,split_conv_x2])
x = conv_layer(x, filter=2144, kernel=[1,1], layer_name=scope+'_final_conv2', activation=False)
# 2048
x = x * 0.1
x = init + x
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Reduction_A(self, x, scope):
with tf.name_scope(scope) :
k = 256
l = 256
m = 384
n = 384
split_max_x = Max_pooling(x)
split_conv_x1 = conv_layer(x, filter=n, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv1')
split_conv_x2 = conv_layer(x, filter=k, kernel=[1,1], layer_name=scope+'_split_conv2')
split_conv_x2 = conv_layer(split_conv_x2, filter=l, kernel=[3,3], layer_name=scope+'_split_conv3')
split_conv_x2 = conv_layer(split_conv_x2, filter=m, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv4')
x = Concatenation([split_max_x, split_conv_x1, split_conv_x2])
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Reduction_B(self, x, scope):
with tf.name_scope(scope) :
split_max_x = Max_pooling(x)
split_conv_x1 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv1')
split_conv_x1 = conv_layer(split_conv_x1, filter=384, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv2')
split_conv_x2 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv3')
split_conv_x2 = conv_layer(split_conv_x2, filter=288, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv4')
split_conv_x3 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv5')
split_conv_x3 = conv_layer(split_conv_x3, filter=288, kernel=[3,3], layer_name=scope+'_split_conv6')
split_conv_x3 = conv_layer(split_conv_x3, filter=320, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv7')
x = Concatenation([split_max_x, split_conv_x1, split_conv_x2, split_conv_x3])
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
return x
def Squeeze_excitation_layer(self, input_x, out_dim, ratio, layer_name):
with tf.name_scope(layer_name) :
squeeze = Global_Average_Pooling(input_x)
excitation = Fully_connected(squeeze, units=out_dim / ratio, layer_name=layer_name+'_fully_connected1')
excitation = Relu(excitation)
excitation = Fully_connected(excitation, units=out_dim, layer_name=layer_name+'_fully_connected2')
excitation = Sigmoid(excitation)
excitation = tf.reshape(excitation, [-1,1,1,out_dim])
scale = input_x * excitation
return scale
def Build_SEnet(self, input_x):
#----------------------------------------------------------------------------------------------------------
#input_x = tf.pad(input_x, [[0, 0], [32, 32], [32, 32], [0, 0]])
#----------------------------------------------------------------------------------------------------------
# size 96x96
print(np.shape(input_x))
# only cifar10 architecture
x = self.Stem(input_x, scope='stem')
for i in range(5) :
x = self.Inception_resnet_A(x, scope='Inception_A'+str(i))
channel = int(np.shape(x)[-1])
x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_A'+str(i))
x = self.Reduction_A(x, scope='Reduction_A')
channel = int(np.shape(x)[-1])
x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_A')
for i in range(10) :
x = self.Inception_resnet_B(x, scope='Inception_B'+str(i))
channel = int(np.shape(x)[-1])
x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_B'+str(i))
x = self.Reduction_B(x, scope='Reduction_B')
channel = int(np.shape(x)[-1])
x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_B')
for i in range(5) :
x = self.Inception_resnet_C(x, scope='Inception_C'+str(i))
channel = int(np.shape(x)[-1])
x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_C'+str(i))
# channel = int(np.shape(x)[-1])
# x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_C')
x = Global_Average_Pooling(x)
x = Dropout(x, rate=0.2, training=self.training)
x = flatten(x)
x = Fully_connected(x, layer_name='final_fully_connected')
return x
def test(data_dir="/nfs/project/whale/",model_file = "./model/backup1/Inception_resnet_v2_.ckpt-36"):
cls_txt = open(os.path.join(data_dir,"cls.txt"),"r")
lines = cls_txt.readlines()
cls_txt.close()
my_cls = []
for line in lines:
my_cls.append(line.strip())
test_images= loadTestData(data_dir)
x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])
training_flag = tf.placeholder(tf.bool)
logits = SE_Inception_resnet_v2(x, training=training_flag).model
pred = logits
#pred = tf.argmax(logits,1)
#pred = np.argsort(-logits,1)
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=3)
saver.restore(sess,model_file)
res_txt = open(os.path.join(data_dir,"test_res.csv"),"w")
print("Image,Id",file=res_txt)
for step in range(0,63):
#print(step)
test_batch_x,image_names = loadTestDataBatch(test_images,batch_size,step)
test_feed_dict = {
x: test_batch_x,
training_flag: False
}
predict = sess.run([pred],feed_dict=test_feed_dict)
print("pred:",predict)
for i,img_name in enumerate(image_names):
top5 = np.argsort(-predict[0])[i,0:5]
#print("top5:",top5)
s = img_name+"," + my_cls[top5[0]]+" "+my_cls[top5[1]]+" "+my_cls[top5[2]]+" "+my_cls[top5[3]]+" "+my_cls[top5[4]]
print(s,file=res_txt)
print(s)
res_txt.close()
def train(data_dir="/nfs/project/whale/"):
#train_x, train_y, test_x, test_y = prepare_data()
#train_x, test_x = color_preprocessing(train_x, test_x)
train_images,train_labels_id = loadTrainData(data_dir)
print("p1")
x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])
label = tf.placeholder(tf.float32, shape=[None, class_num])
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits = SE_Inception_resnet_v2(x, training=training_flag).model
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))
l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
train = optimizer.minimize(cost + l2_loss * weight_decay)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("p2")
saver = tf.train.Saver(tf.global_variables(),max_to_keep=10)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('./model')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./logs', sess.graph)
print("p3")
epoch_learning_rate = init_learning_rate
for epoch in range(1, total_epochs + 1):
if epoch % 30 == 0 :
epoch_learning_rate = epoch_learning_rate / 10
pre_index = 0
train_acc = 0.0
train_loss = 0.0
print("p4")
for step in range(0, iteration):
batch_x,batch_y = loadTrainDataBatch(train_images,train_labels_id,batch_size,step)
print(step)
#batch_x = data_augmentation(batch_x)
train_feed_dict = {
x: batch_x,
label: batch_y,
learning_rate: epoch_learning_rate,
training_flag: True
}
_, batch_loss = sess.run([train, cost], feed_dict=train_feed_dict)
batch_acc = accuracy.eval(feed_dict=train_feed_dict)
train_loss += batch_loss
train_acc += batch_acc
pre_index += batch_size
print("iters:",pre_index," total_train_loss:",train_loss," total_train_acc",train_acc," batch_loss",batch_loss,"batch_acc",batch_acc)
train_loss /= iteration # average loss
train_acc /= iteration # average accuracy
train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),
tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])
#test_acc, test_loss, test_summary = Evaluate(sess)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
#summary_writer.add_summary(summary=test_summary, global_step=epoch)
summary_writer.flush()
#line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \n" % (
#epoch, total_epochs, train_loss, train_acc, test_loss, test_acc)
line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f" % (
epoch, total_epochs, train_loss, train_acc)
print(line)
with open('logs.txt', 'a') as f:
f.write(line)
f.close()
saver.save(sess=sess, save_path='./model/Inception_resnet_v2_.ckpt',global_step=epoch)
test()
#train() | [
"1178151687@qq.com"
] | 1178151687@qq.com |
7f7ff906673435c35bb3b7e76398d31528a9594a | b04e2c3d03697f9e35bbd336166ac5e5e07d2253 | /kml_generator.py | 897f90542322bb072ea909d0dcfae8e10873cbe4 | [] | no_license | UCSD-AUVSI/PathfinderV2 | e6a58a2b812c6c5e00d821e89ace91d3a23e1d1c | b275be5f2af1b8e33be9c85468ea7b54bdad92ab | refs/heads/master | 2020-05-14T10:49:26.963875 | 2014-06-18T00:56:36 | 2014-06-18T00:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,231 | py | class KMLGenerator:
"""
Outputs the path waypoints to a format that can be displayed by
Google Earth.
"""
def __init__(self, pathfinder):
self.pathfinder = pathfinder
def export_kml(self):
def print_header():
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">'
print '<Folder>'
def print_boundaries():
print '<Style id="boundarystyle">'
print '\t<PolyStyle>'
print '\t\t<color>8000ff00</color>'
print '\t</PolyStyle>'
print '\t</Style>'
print '<Placemark>'
print '\t<name>Flight Boundaries</name>'
print '\t<styleUrl>#boundarystyle</styleUrl>'
print '\t<Polygon>'
print '\t\t<extrude>1</extrude>'
print '\t\t<altitudeMode>clampToGround</altitudeMode>'
print '\t\t<outerBoundaryIs>'
print '\t\t\t<LinearRing>'
print '\t\t\t\t<coordinates>'
for index, (x,y) in enumerate(self.pathfinder.get_boundaries()):
lat = "%.6f"%x
lng = "%.6f"%y
print '\t\t\t\t\t%s,%s,%s'%(lng, lat, "0.0")
print '\t\t\t\t</coordinates>'
print '\t\t\t</LinearRing>'
print '\t\t</outerBoundaryIs>'
print '\t</Polygon>'
print '</Placemark>'
def print_searcharea():
print '<Style id="searchareastyle">'
print '\t<PolyStyle>'
print '\t\t<color>ccff0000</color>'
print '\t</PolyStyle>'
print '\t</Style>'
print '<Placemark>'
print '\t<name>Search Area</name>'
print '\t<styleUrl>#searchareastyle</styleUrl>'
print '\t<Polygon>'
print '\t\t<color>ccff0000</color>'
print '\t\t<extrude>1</extrude>'
print '\t\t<altitudeMode>clampToGround</altitudeMode>'
print '\t\t<outerBoundaryIs>'
print '\t\t\t<LinearRing>'
print '\t\t\t\t<coordinates>'
for index, (x,y) in enumerate(self.pathfinder.get_searcharea()):
lat = "%.6f"%x
lng = "%.6f"%y
print '\t\t\t\t\t%s,%s,%s'%(lng, lat, "0.0")
print '\t\t\t\t</coordinates>'
print '\t\t\t</LinearRing>'
print '\t\t</outerBoundaryIs>'
print '\t</Polygon>'
print '</Placemark>'
def print_path():
print '<Placemark>'
print '\t<name>Flight Path</name>'
print '\t<LineString>'
print '\t<extrude>1</extrude>'
print '\t<tesselate>1</tesselate>'
print '\t<coordinates>'
for index, (x, y) in enumerate(self.pathfinder.get_path()):
lat = "%.6f"%x
lng = "%.6f"%y
alt = str(self.pathfinder.get_altitude())
print '\t\t%s,%s,%s'%(lng, lat, alt)
print '\t</coordinates>'
print '\t</LineString>'
print '</Placemark>'
def print_points():
for index, (x, y) in enumerate(self.pathfinder.get_path()):
lat = "%.6f"%x
lng = "%.6f"%y
alt = str(self.pathfinder.get_altitude())
print '<Placemark>'
print '\t<name>WP %i</name>'%index
print '\t<Point>'
print '\t\t<coordinates>%s,%s,%s</coordinates>'%(lng, lat, alt)
print '\t</Point>'
print '\t</Placemark>'
def print_footer():
print '</Folder>'
print '</kml>'
print_header()
print_searcharea()
print_boundaries()
print_path()
print_points()
print_footer()
| [
"eric.lo.fh@gmail.com"
] | eric.lo.fh@gmail.com |
07c82d394da3ba0b803626b06fcf4e7201837ff3 | a6f393215fc105918742ddb8cc1506b4602c58be | /code/tango_with_django_project/rango/migrations/0005_userprofile_test_field.py | f446cf9a2be34f95c6938e3b5eb1f1eb295abe5a | [] | no_license | STAbraham/ProjectTango | 353435f0a237ec9048dc7bc0c4af4ce040a605e1 | 018386781ba68b2566b104a30d1a96625f2badb3 | refs/heads/master | 2020-06-06T03:04:29.678478 | 2016-01-27T00:11:11 | 2016-01-27T00:11:11 | 35,189,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0004_auto_20151106_1438'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='test_field',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| [
"steve.abraham@gmail.com"
] | steve.abraham@gmail.com |
22f40985b1fdd1d6176a76b811066b016d020fd4 | d607b74c70840bf21780edfae14310ece7448df7 | /server/server.py | 87c5e48a84a998d64bbd2d0825ebbe84a4fcdf98 | [] | no_license | decko/hackday | 4beb60ef7765c4c6c8f540bfb10a54e93a28409f | 9ee41e61b841e87b9afb4af3e1dd66ef84b9505f | refs/heads/master | 2020-12-25T14:12:45.045522 | 2014-06-09T07:37:17 | 2014-06-09T07:37:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | from flask import Flask, render_template, request, url_for, redirect, abort
from tools import jsonify, diasatras
from flask.ext.pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'monitorlegislativo'
#app.config["SECRET_KEY"] = "KeepThisS3cr3t"
app.jinja_env.filters['diasatras'] = diasatras
mongo = PyMongo(app)
@app.route("/")
def index():
return render_template('front.html')
@app.route("/busca")
def busca():
termo = request.args.get('termo', '')
try:
t = termo.split()
numero = str(int(t[0].strip())).zfill(4) #transforma em int e depois em str de novo com 4 casas
ano = str(int(t[1].strip()))
return redirect(url_for("projeto", tipo='pl',numero=numero, ano=ano))
except:
return termo #todo
@app.route('/legis/<tipo>/<numero>/<ano>')
@app.route('/legis/<tipo>/<numero>/<ano>/<json>')
def projeto(tipo, numero, ano, json=False):
pid = tipo + '-' + numero + '-' + ano
projeto = mongo.db.legis.find_one({"_id": pid})
if not projeto:
abort(404)
if json == 'json':
return jsonify(projeto)
elif json == False:
return render_template('legis.html', p=projeto)
if __name__ == "__main__":
app.run(debug=True) | [
"pedro@markun.com.br"
] | pedro@markun.com.br |
05485cae78eacda90f4eaa677725069a338af70a | 85a1dff39582bd46fbdc32dc07a5a7a2ea874cc3 | /python/hmm/__init__.py | 29d77e7d94b5214097ab7786fb4648657544b7f6 | [] | no_license | ravalan/AprendizajeAutomatico | 864f597a365cd2a3f19e396f58bb7c0e5b28229e | e06fd05a674b6ca2cbdec7d805032429c9051df0 | refs/heads/master | 2021-08-23T22:01:45.443238 | 2017-12-06T18:53:37 | 2017-12-06T18:53:37 | 113,353,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | """
Author: Jon Ander Gomez Adrian (jon@dsic.upv.es, http://www.dsic.upv.es/~jon)
Version: 2.0
Date: September 2016
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
from .Constants import Constants
from .Transitions import Transitions
from .State import State
from .HMM import HMM
#from .AcousticModel import AcousticModel
#__all__ = [ 'Utils', 'State', 'HMM', 'AcousticModel' ]
__all__ = [ 'Transitions', 'State', 'HMM' ]
| [
"rafavallejo85@gmail.com"
] | rafavallejo85@gmail.com |
18af1e5f960e1c94d2b3f181e850cbad5de73524 | efb1ccfa2312c59fe9a828e645490cdbd5ac979c | /pypan/pysegs/__init__.py | 671e81efbc2ad7288ff34aa786c84ef476748be0 | [] | no_license | jsgounot/PyPan | dd5be114b598a65208f06b908d6afd054b9b1426 | b8168478718300e670544873fb5fd36ac34f30eb | refs/heads/master | 2020-06-03T13:10:32.037457 | 2019-11-13T12:49:07 | 2019-11-13T12:49:07 | 191,578,960 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | __version__ = "0.0.1"
from pypan.pysegs.segments import * | [
"jsgounot@gmail.com"
] | jsgounot@gmail.com |
26df2198a4baa0285dd2905739ffa4925c2ea514 | 8d47bab52185041c574868fb4eed225c0c3d72e7 | /hw3/models.py | d89af8655a6b7586522d86c1ecdb48b731e6e773 | [] | no_license | clab/sp2013.11-731 | bb9bfe192e5e69d656b26f6db1cd2e5d0d3fc59c | 32da31c6f40cca49233f052575e2aa3efb7e2bbf | refs/heads/master | 2021-01-16T01:01:43.507444 | 2013-04-05T04:14:08 | 2013-04-05T04:14:08 | 7,564,821 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | #!/usr/bin/env python
# Simple translation model and language model data structures
import sys
from collections import namedtuple
# A translation model is a dictionary where keys are tuples of French words
# and values are lists of (english, logprob) named tuples. For instance,
# the French phrase "que se est" has two translations, represented like so:
# tm[('que', 'se', 'est')] = [
# phrase(english='what has', logprob=-0.301030009985),
# phrase(english='what has been', logprob=-0.301030009985)]
# k is a pruning parameter: only the top k translations are kept for each f.
phrase = namedtuple("phrase", "english, logprob")
def TM(filename, k):
sys.stderr.write("Reading translation model from %s...\n" % (filename,))
tm = {}
for line in open(filename).readlines():
(f, e, logprob) = line.strip().split(" ||| ")
tm.setdefault(tuple(f.split()), []).append(phrase(e, float(logprob)))
for f in tm: # prune all but top k translations
tm[f].sort(key=lambda x: -x.logprob)
del tm[f][k:]
return tm
# # A language model scores sequences of English words, and must account
# # for both beginning and end of each sequence. Example API usage:
# lm = models.LM(filename)
# sentence = "This is a test ."
# lm_state = lm.begin() # initial state is always <s>
# logprob = 0.0
# for word in sentence.split():
# (lm_state, word_logprob) = lm.score(lm_state, word)
# logprob += word_logprob
# logprob += lm.end(lm_state) # transition to </s>, can also use lm.score(lm_state, "</s>")[1]
ngram_stats = namedtuple("ngram_stats", "logprob, backoff")
class LM:
def __init__(self, filename):
sys.stderr.write("Reading language model from %s...\n" % (filename,))
self.table = {}
for line in open(filename):
entry = line.strip().split("\t")
if len(entry) > 1 and entry[0] != "ngram":
(logprob, ngram, backoff) = (float(entry[0]), tuple(entry[1].split()), float(entry[2] if len(entry)==3 else 0.0))
self.table[ngram] = ngram_stats(logprob, backoff)
def begin(self):
return ("<s>",)
def score(self, state, word):
ngram = state + (word,)
score = 0.0
while len(ngram)> 0:
if ngram in self.table:
return (ngram[-2:], score + self.table[ngram].logprob)
else: #backoff
score += self.table[ngram[:-1]].backoff if len(ngram) > 1 else 0.0
ngram = ngram[1:]
return ((), score + self.table[("<unk>",)].logprob)
def end(self, state):
return self.score(state, "</s>")[1]
| [
"cdyer@Chriss-MacBook-Air.local"
] | cdyer@Chriss-MacBook-Air.local |
ef2fee0e7afb514b19649960ca4548afc68cb456 | c3ddf42e1abce4c122dab546fa053b201ac3c447 | /aplpy_wrapper/overlays.py | 1d0515b0f6c53349879c05f02c80ffd4cab8600a | [] | no_license | anizami/aplpy_wrapper | a92aab69b72338e57e18ca3d2ba39266e3fa0d01 | 651f7defe61c1e79e7301cf3383c4674782aca53 | refs/heads/master | 2021-01-10T18:30:56.813410 | 2014-07-17T21:28:42 | 2014-07-17T21:28:42 | 21,249,192 | 1 | 0 | null | 2014-07-09T19:19:19 | 2014-06-26T17:37:21 | Python | UTF-8 | Python | false | false | 16,701 | py | from __future__ import absolute_import, print_function, division
import warnings
from mpl_toolkits.axes_grid.anchored_artists \
import AnchoredEllipse, AnchoredSizeBar
import numpy as np
from matplotlib.patches import FancyArrowPatch
from matplotlib.font_manager import FontProperties
from . import wcs_util
# from decorators import auto_refresh
from .decorators import auto_refresh
corners = {}
corners['top right'] = 1
corners['top left'] = 2
corners['bottom left'] = 3
corners['bottom right'] = 4
corners['right'] = 5
corners['left'] = 6
corners['bottom'] = 8
corners['top'] = 9
class Scalebar(object):
def __init__(self, parent):
# Retrieve info from parent figure
self._ax = parent.ax
self._wcs = parent._wcs
self._figure = parent._figure
# Save plotting parameters (required for @auto_refresh)
# self._parameters = parent._parameters
# Initialize settings
self._base_settings = {}
self._scalebar_settings = {}
self._label_settings = {}
self._label_settings['fontproperties'] = FontProperties()
# LAYOUT
# @auto_refresh
def show(self, length, label=None, corner='bottom right', frame=False,
borderpad=0.4, pad=0.5, **kwargs):
'''
Overlay a scale bar on the image.
Parameters
----------
length : float
The length of the scalebar
label : str, optional
Label to place below the scalebar
corner : int, optional
Where to place the scalebar. Acceptable values are:, 'left',
'right', 'top', 'bottom', 'top left', 'top right', 'bottom
left' (default), 'bottom right'
frame : str, optional
Whether to display a frame behind the scalebar (default is False)
kwargs
Additional arguments are passed to the matplotlib Rectangle and
Text classes. See the matplotlib documentation for more details.
In cases where the same argument exists for the two objects, the
argument is passed to both the Text and Rectangle instance.
'''
self._length = length
self._base_settings['corner'] = corner
self._base_settings['frame'] = frame
self._base_settings['borderpad'] = borderpad
self._base_settings['pad'] = pad
degrees_per_pixel = wcs_util.degperpix(self._wcs)
length = length / degrees_per_pixel
try:
self._scalebar.remove()
except:
pass
if isinstance(corner, basestring):
corner = corners[corner]
self._scalebar = AnchoredSizeBar(self._ax.transData, length, label,
corner, pad=pad, borderpad=borderpad,
sep=5, frameon=frame)
self._ax.add_artist(self._scalebar)
self.set(**kwargs)
# @auto_refresh
def _remove(self):
self._scalebar.remove()
# @auto_refresh
def hide(self):
'''
Hide the scalebar.
'''
try:
self._scalebar.remove()
except:
pass
# @auto_refresh
def set_length(self, length):
'''
Set the length of the scale bar.
'''
self.show(length, **self._base_settings)
self._set_scalebar_properties(**self._scalebar_settings)
self._set_label_properties(**self._scalebar_settings)
# @auto_refresh
def set_label(self, label):
'''
Set the label of the scale bar.
'''
self._set_label_properties(text=label)
# @auto_refresh
def set_corner(self, corner):
'''
Set where to place the scalebar.
Acceptable values are 'left', 'right', 'top', 'bottom', 'top left',
'top right', 'bottom left' (default), and 'bottom right'.
'''
self._base_settings['corner'] = corner
self.show(self._length, **self._base_settings)
self._set_scalebar_properties(**self._scalebar_settings)
self._set_label_properties(**self._scalebar_settings)
# @auto_refresh
def set_frame(self, frame):
'''
Set whether to display a frame around the scalebar.
'''
self._base_settings['frame'] = frame
self.show(self._length, **self._base_settings)
self._set_scalebar_properties(**self._scalebar_settings)
self._set_label_properties(**self._scalebar_settings)
# APPEARANCE
# @auto_refresh
def set_linewidth(self, linewidth):
'''
Set the linewidth of the scalebar, in points.
'''
self._set_scalebar_properties(linewidth=linewidth)
# @auto_refresh
def set_linestyle(self, linestyle):
'''
Set the linestyle of the scalebar.
Should be one of 'solid', 'dashed', 'dashdot', or 'dotted'.
'''
self._set_scalebar_properties(linestyle=linestyle)
# @auto_refresh
def set_alpha(self, alpha):
'''
Set the alpha value (transparency).
This should be a floating point value between 0 and 1.
'''
self._set_scalebar_properties(alpha=alpha)
self._set_label_properties(alpha=alpha)
# @auto_refresh
def set_color(self, color):
'''
Set the label and scalebar color.
'''
self._set_scalebar_properties(color=color)
self._set_label_properties(color=color)
# @auto_refresh
def set_font(self, family=None, style=None, variant=None, stretch=None,
weight=None, size=None, fontproperties=None):
'''
Set the font of the tick labels
Parameters
----------
common: family, style, variant, stretch, weight, size, fontproperties
Notes
-----
Default values are set by matplotlib or previously set values if
set_font has already been called. Global default values can be set by
editing the matplotlibrc file.
'''
if family:
self._label_settings['fontproperties'].set_family(family)
if style:
self._label_settings['fontproperties'].set_style(style)
if variant:
self._label_settings['fontproperties'].set_variant(variant)
if stretch:
self._label_settings['fontproperties'].set_stretch(stretch)
if weight:
self._label_settings['fontproperties'].set_weight(weight)
if size:
self._label_settings['fontproperties'].set_size(size)
if fontproperties:
self._label_settings['fontproperties'] = fontproperties
self._set_label_properties(fontproperties=self._label_settings['fontproperties'])
# @auto_refresh
def _set_label_properties(self, **kwargs):
'''
Modify the scalebar label properties.
All arguments are passed to the matplotlib Text class. See the
matplotlib documentation for more details.
'''
for kwarg in kwargs:
self._label_settings[kwarg] = kwargs[kwarg]
self._scalebar.txt_label.get_children()[0].set(**kwargs)
# @auto_refresh
def _set_scalebar_properties(self, **kwargs):
'''
Modify the scalebar properties.
All arguments are passed to the matplotlib Rectangle class. See the
matplotlib documentation for more details.
'''
for kwarg in kwargs:
self._scalebar_settings[kwarg] = kwargs[kwarg]
self._scalebar.size_bar.get_children()[0].set(**kwargs)
# @auto_refresh
def set(self, **kwargs):
'''
Modify the scalebar and scalebar properties.
All arguments are passed to the matplotlib Rectangle and Text classes.
See the matplotlib documentation for more details. In cases where the
same argument exists for the two objects, the argument is passed to
both the Text and Rectangle instance.
'''
for kwarg in kwargs:
kwargs_single = {kwarg: kwargs[kwarg]}
try:
self._set_label_properties(**kwargs_single)
except AttributeError:
pass
try:
self._set_scalebar_properties(**kwargs_single)
except AttributeError:
pass
# DEPRECATED
# @auto_refresh
def set_font_family(self, family):
warnings.warn("scalebar.set_font_family is deprecated - use scalebar.set_font instead", DeprecationWarning)
self.set_font(family=family)
# @auto_refresh
def set_font_weight(self, weight):
warnings.warn("scalebar.set_font_weight is deprecated - use scalebar.set_font instead", DeprecationWarning)
self.set_font(weight=weight)
# @auto_refresh
def set_font_size(self, size):
warnings.warn("scalebar.set_font_size is deprecated - use scalebar.set_font instead", DeprecationWarning)
self.set_font(size=size)
# @auto_refresh
def set_font_style(self, style):
warnings.warn("scalebar.set_font_style is deprecated - use scalebar.set_font instead", DeprecationWarning)
self.set_font(style=style)
# For backward-compatibility
ScaleBar = Scalebar
# Only for certain types of input files
# class Beam(object):
# def __init__(self, parent):
# # Retrieve info from parent figure
# self._figure = parent._figure
# self._header = parent._header
# self._ax = parent._ax1
# self._wcs = parent._wcs
# # Save plotting parameters (required for @auto_refresh)
# self._parameters = parent._parameters
# # Initialize settings
# self._base_settings = {}
# self._beam_settings = {}
# # LAYOUT
# # @auto_refresh
# def show(self, major='BMAJ', minor='BMIN', angle='BPA',
# corner='bottom left', frame=False, borderpad=0.4, pad=0.5,
# **kwargs):
# '''
# Display the beam shape and size for the primary image.
# By default, this method will search for the BMAJ, BMIN, and BPA
# keywords in the FITS header to set the major and minor axes and the
# position angle on the sky.
# Parameters
# ----------
# major : float, optional
# Major axis of the beam in degrees (overrides BMAJ if present)
# minor : float, optional
# Minor axis of the beam in degrees (overrides BMIN if present)
# angle : float, optional
# Position angle of the beam on the sky in degrees (overrides
# BPA if present) in the anticlockwise direction.
# corner : int, optional
# The beam location. Acceptable values are 'left', 'right',
# 'top', 'bottom', 'top left', 'top right', 'bottom left'
# (default), and 'bottom right'.
# frame : str, optional
# Whether to display a frame behind the beam (default is False)
# kwargs
# Additional arguments are passed to the matplotlib Ellipse classe.
# See the matplotlib documentation for more details.
# '''
# if isinstance(major, basestring):
# major = self._header[major]
# if isinstance(minor, basestring):
# minor = self._header[minor]
# if isinstance(angle, basestring):
# angle = self._header[angle]
# degrees_per_pixel = wcs_util.degperpix(self._wcs)
# self._base_settings['minor'] = minor
# self._base_settings['major'] = major
# self._base_settings['angle'] = angle
# self._base_settings['corner'] = corner
# self._base_settings['frame'] = frame
# self._base_settings['borderpad'] = borderpad
# self._base_settings['pad'] = pad
# minor /= degrees_per_pixel
# major /= degrees_per_pixel
# try:
# self._beam.remove()
# except:
# pass
# if isinstance(corner, basestring):
# corner = corners[corner]
# self._beam = AnchoredEllipse(self._ax.transData, width=minor,
# height=major, angle=angle, loc=corner,
# pad=pad, borderpad=borderpad,
# frameon=frame)
# self._ax.add_artist(self._beam)
# self.set(**kwargs)
# # @auto_refresh
# def _remove(self):
# self._beam.remove()
# # @auto_refresh
# def hide(self):
# '''
# Hide the beam
# '''
# try:
# self._beam.remove()
# except:
# pass
# # @auto_refresh
# def set_major(self, major):
# '''
# Set the major axis of the beam, in degrees.
# '''
# self._base_settings['major'] = major
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_minor(self, minor):
# '''
# Set the minor axis of the beam, in degrees.
# '''
# self._base_settings['minor'] = minor
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_angle(self, angle):
# '''
# Set the position angle of the beam on the sky, in degrees.
# '''
# self._base_settings['angle'] = angle
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_corner(self, corner):
# '''
# Set the beam location.
# Acceptable values are 'left', 'right', 'top', 'bottom', 'top left',
# 'top right', 'bottom left' (default), and 'bottom right'.
# '''
# self._base_settings['corner'] = corner
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_frame(self, frame):
# '''
# Set whether to display a frame around the beam.
# '''
# self._base_settings['frame'] = frame
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_borderpad(self, borderpad):
# '''
# Set the amount of padding within the beam object, relative to the
# canvas size.
# '''
# self._base_settings['borderpad'] = borderpad
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # @auto_refresh
# def set_pad(self, pad):
# '''
# Set the amount of padding between the beam object and the image
# corner/edge, relative to the canvas size.
# '''
# self._base_settings['pad'] = pad
# self.show(**self._base_settings)
# self.set(**self._beam_settings)
# # APPEARANCE
# # @auto_refresh
# def set_alpha(self, alpha):
# '''
# Set the alpha value (transparency).
# This should be a floating point value between 0 and 1.
# '''
# self.set(alpha=alpha)
# # @auto_refresh
# def set_color(self, color):
# '''
# Set the beam color.
# '''
# self.set(color=color)
# # @auto_refresh
# def set_edgecolor(self, edgecolor):
# '''
# Set the color for the edge of the beam.
# '''
# self.set(edgecolor=edgecolor)
# # @auto_refresh
# def set_facecolor(self, facecolor):
# '''
# Set the color for the interior of the beam.
# '''
# self.set(facecolor=facecolor)
# # @auto_refresh
# def set_linestyle(self, linestyle):
# '''
# Set the line style for the edge of the beam.
# This should be one of 'solid', 'dashed', 'dashdot', or 'dotted'.
# '''
# self.set(linestyle=linestyle)
# # @auto_refresh
# def set_linewidth(self, linewidth):
# '''
# Set the line width for the edge of the beam, in points.
# '''
# self.set(linewidth=linewidth)
# # @auto_refresh
# def set_hatch(self, hatch):
# '''
# Set the hatch pattern.
# This should be one of '/', '\', '|', '-', '+', 'x', 'o', 'O', '.', or
# '*'.
# '''
# self.set(hatch=hatch)
# # @auto_refresh
# def set(self, **kwargs):
# '''
# Modify the beam properties. All arguments are passed to the matplotlib
# Ellipse classe. See the matplotlib documentation for more details.
# '''
# for kwarg in kwargs:
# self._beam_settings[kwarg] = kwargs[kwarg]
# self._beam.ellipse.set(**kwargs)
| [
"anizami@macalester.edu"
] | anizami@macalester.edu |
1eb337a91fba49e0d21bb0111796ad7754e21348 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_340/ch40_2020_03_25_17_24_54_674946.py | 90382adbb982876f65ccf3ac36f1ba2dc7c75c02 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def soma_valores(elementos):
s=0
i=0
while i<len(elementos):
s+=elementos[i]
i+=1
return s
| [
"you@example.com"
] | you@example.com |
ea7a2ef3739aa9fc580220294c7fc7f0fb121279 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnmason.py | 808dc1dea271099aff64c07de7f8411475b2b469 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,382 | py | ii = [('CookGHP3.py', 2), ('CoolWHM2.py', 1), ('WilbRLW.py', 18), ('RennJIT.py', 6), ('LeakWTI2.py', 1), ('WilkJMC3.py', 6), ('WilbRLW5.py', 2), ('LeakWTI3.py', 2), ('MarrFDI3.py', 1), ('PeckJNG.py', 6), ('GellWPT.py', 1), ('AdamWEP.py', 6), ('WilbRLW2.py', 3), ('ClarGE2.py', 9), ('GellWPT2.py', 1), ('WilkJMC2.py', 1), ('CarlTFR.py', 3), ('SeniNSP.py', 3), ('GrimSLE.py', 1), ('RoscTTI3.py', 1), ('KiddJAE.py', 3), ('AdamHMM.py', 1), ('CoolWHM.py', 1), ('CrokTPS.py', 7), ('ClarGE.py', 3), ('IrviWVD.py', 1), ('LyelCPG.py', 3), ('GilmCRS.py', 2), ('DaltJMA.py', 13), ('WestJIT2.py', 7), ('DibdTRL2.py', 3), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('WadeJEB.py', 5), ('FerrSDO2.py', 1), ('GodwWLN.py', 1), ('SoutRD2.py', 9), ('LeakWTI4.py', 1), ('LeakWTI.py', 5), ('BachARE.py', 4), ('SoutRD.py', 2), ('WheeJPT.py', 22), ('MereHHB3.py', 2), ('HowiWRL2.py', 3), ('WilkJMC.py', 2), ('HogaGMM.py', 2), ('MartHRW.py', 5), ('MackCNH.py', 1), ('WestJIT.py', 4), ('EdgeMHT.py', 1), ('RoscTTI.py', 1), ('ThomGLG.py', 2), ('StorJCC.py', 6), ('LewiMJW.py', 1), ('MackCNH2.py', 1), ('SomeMMH.py', 1), ('WilbRLW3.py', 3), ('MereHHB2.py', 2), ('JacoWHI.py', 2), ('ClarGE3.py', 21), ('MartHRW2.py', 1), ('DibdTRL.py', 3), ('FitzRNS2.py', 9), ('HogaGMM2.py', 4), ('MartHSI.py', 4), ('NortSTC.py', 1), ('SadlMLP2.py', 2), ('LyelCPG3.py', 1), ('WaylFEP.py', 1), ('ClarGE4.py', 16), ('HowiWRL.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
f89832f2ff6b0d2d7737f3b03a9b5cbda56ea5cf | a86c4c78a38f3bdd998c52111693da1002bf79dc | /love_calculator/urls.py | 188b105c41371e1e9c09ba119bbd2a77a8803820 | [] | no_license | AshishPandagre/django-love-calculator-prank | 9d4a8ffbad14f9f3336109a557a01fa4593c8292 | 1aae01a800f0de3b9f19ba593a89fddc947c2883 | refs/heads/main | 2023-06-08T01:41:16.206821 | 2021-06-30T14:17:16 | 2021-06-30T14:17:16 | 381,719,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | """love_calculator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from calculator.views import error, Profile
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/email/', error, name='error page'), # error page coz not supporting changing email
path('accounts/profile/', Profile.as_view(), name='profile page of a user'),
path('accounts/', include('allauth.urls')),
path('', include('calculator.urls')),
]
handler404 = 'calculator.views.error_404'
handler500 = 'calculator.views.error_500'
| [
"ashishpandagre9@gmail.com"
] | ashishpandagre9@gmail.com |
9d1fc26be81915a1837b00bb59291de594a83bfb | 558aeaadc9b5994adce5c20cefcef3102850da8f | /sent-analysis.py | 8872f9369dd1cf7402dc1f1a6d6089e5f1ceab8f | [] | no_license | 10ego/VaderVaderVader | cfe27b6271d56b4e1cfdc0797b1ab70f89b452f4 | 3b0689af9f523e0ecb91ad69948943324d2ccc56 | refs/heads/master | 2020-03-25T22:46:10.590067 | 2018-08-10T05:57:07 | 2018-08-10T05:57:07 | 144,241,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
analyzer = SentimentIntensityAnalyzer()
is_neu_p = 0
is_neu_n = 0
pos_count = 0
is_pos = 0
neg_count = 0
is_neg = 0
threshold = 0.05
pos_subj = 0
neg_subj = 0
neu_subj = 0
with open('positive.txt','r') as f:
for line in f.read().split('\n'):
subj = TextBlob(line)
score = analyzer.polarity_scores(line)
#if score['neu'] > score['pos']:
if score['compound'] < threshold and score['compound'] > -threshold:
is_neu_p += 1
if subj.sentiment.subjectivity >= 0.5:
pos_subj+=1
else:
if not score['neg'] > threshold:
if score['pos']-score['neg'] > 0:
is_pos +=1
if subj.sentiment.subjectivity >= 0.5:
pos_subj+=1
pos_count +=1
with open('negative.txt','r') as f:
for line in f.read().split('\n'):
subj = TextBlob(line)
score = analyzer.polarity_scores(line)
#if score['neu'] > score['neg']:
if score['compound'] < threshold and score['compound'] > -threshold:
is_neu_n += 1
if subj.sentiment.subjectivity >= 0.5:
neg_subj+=1
else:
if not score['pos'] > threshold:
if score['neg']-score['pos'] > 0:
is_neg +=1
if subj.sentiment.subjectivity >= 0.5:
neg_subj+=1
neg_count +=1
print("Positive accuracy = {}% via {} samples".format(is_pos/pos_count*100, pos_count))
print("Negative accuracy = {}% via {} samples".format(is_neg/neg_count*100, neg_count))
print("Total of {} positive messages are subjective".format(pos_subj))
print("Total of {} negative messages are subjective".format(neg_subj))
print("{} positive messages are actually neutral".format(is_neu_p))
print("{} negative messages are actually neutral".format(is_neu_n))
| [
"noreply@github.com"
] | 10ego.noreply@github.com |
cf675bd3815ccba1199db75ba87e928c72f120d5 | ac6bca79c14d9c62f8498047337688573b6f281e | /rpp_scrapper.py | 0b4856d58ea03a4ac833422b515f284e5e09ac1d | [] | no_license | falcone-gk/RppTranslator | b845efd632846c1f36359ff8cc88c9a3fc3d6018 | b3220149e585f3df059a0a98b404a3cfabd8ba48 | refs/heads/main | 2023-03-06T19:52:33.492142 | 2021-02-22T04:15:56 | 2021-02-22T04:15:56 | 339,580,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | import requests
from bs4 import BeautifulSoup
from googletrans import Translator
def __translate_text(text):
clean_text = text.replace('\xa0', '')
translator = Translator()
return translator.translate(clean_text).text
def rpp_news(url):
"""
Scrap the content news and image from the url which must be from Rpp webpage.
Parameters
----------
url: string
It is the news url from Rpp webpage.
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
# Getting headers elements.
header = soup.find('header', class_='story-header')
title = header.find('h1').text
date = header.find('time')['datetime']
summary = header.find('div', class_='sumary').find('p').text
# Getting image cover.
img_url = soup.find('div', class_='cover').find('img')['src']
# Getting news body.
body = [__translate_text(p.text) for p in soup.find(id='article-body').find_all('p')]
return {
"title": __translate_text(title),
"date": date,
"summary": __translate_text(summary),
"img_url": img_url,
"body": body
}
def main():
val = rpp_news('https://rpp.pe/politica/gobierno/francisco-sagasti-martin-vizcarra-cambia-de-version-y-pone-en-tela-de-juicio-todo-el-proceso-de-prueba-de-las-vacunas-noticia-1320932')
print(val)
if __name__ == '__main__':
main() | [
"falcone134@gmail.com"
] | falcone134@gmail.com |
c33243019cc60a6e407e2127e34882f56d46d5ab | f11c1ea81cfbd6af9445abc7f4947a973ee6042f | /src/modules/generic_module/module.py | 824e8ceb7fe73bf81f968b2c4b614be9b5e948bc | [] | no_license | TheBicPen/bic-bot-py | 85ca860d1f7b58d51e36df13a97b12efd01cf093 | 02eff8cd1905621ba55f01bf7efe6268352d3efa | refs/heads/master | 2021-06-16T22:26:44.539560 | 2020-11-14T03:37:03 | 2020-11-14T03:37:03 | 163,704,476 | 0 | 0 | null | 2021-03-24T19:25:05 | 2019-01-01T00:17:52 | Python | UTF-8 | Python | false | false | 1,495 | py |
import module_class
from . import adapter
def module():
return module_class.BicBotModule(name="Base module",
module_help_string="This is the base module. It contains commands for managing the bot, and demonstrating some of its functionality",
command_matches={
"isbot": adapter.isbot,
"ping": adapter.ping,
"version": adapter.version,
# "settings": adapter.settings,
"hello": adapter.hello,
"commit": adapter.commit,
"nut": adapter.nut,
"extrathicc": adapter.extrathicc,
"leet": adapter.leet,
"keeb": adapter.keeb,
"callme": adapter.callme,
"myname": adapter.myname,
"call": adapter.call,
"name": adapter.name,
"deleteuser": adapter.deleteuser,
"defexplicit": adapter.defexplicit,
"defpattern": adapter.defpattern})
| [
"mashkal2000@gmail.com"
] | mashkal2000@gmail.com |
7e4b440271ac1722d7e9e00288bad57b3902c1c2 | 4f125d7e4af8d123fe0f7a5a2c81bdd3e7fb34a4 | /tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/tacotron2_evaluator.py | 51fb6c870ac64cc71845e111551a2e925f433a9c | [
"Apache-2.0"
] | permissive | vladimir-dudnik/open_model_zoo | 2f1b03d45664be43b9951658e40447c9d0f82952 | 94a2811917ffa49e69769aa214876b4cb68c089a | refs/heads/master | 2022-12-05T22:34:54.789100 | 2022-09-23T08:20:24 | 2022-09-23T08:20:24 | 202,728,205 | 2 | 2 | Apache-2.0 | 2021-09-15T13:39:23 | 2019-08-16T13:00:36 | Python | UTF-8 | Python | false | false | 27,328 | py | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .text_to_speech_evaluator import TextToSpeechEvaluator, TTSDLSDKModel, TTSOVModel
from .base_models import BaseCascadeModel, BaseONNXModel, create_model
from ...adapters import create_adapter
from ...config import ConfigError
from ...utils import contains_all, sigmoid, generate_layer_name, parse_partial_shape, postprocess_output_name
class Synthesizer(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, adapter_info, is_blob=None, delayed_model_loading=False):
super().__init__(network_info, launcher)
parts = ['encoder', 'decoder', 'postnet']
network_info = self.fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading)
if not contains_all(network_info, parts) and not delayed_model_loading:
raise ConfigError('network_info should contain encoder, decoder and postnet fields')
self._encoder_mapping = {
'dlsdk': EncoderDLSDKModel,
'openvino': EncoderOpenVINOModel,
'onnx_runtime': EncoderONNXModel,
}
self._decoder_mapping = {
'dlsdk': DecodeDLSDKModel,
'openvino': DecodeOpenVINOModel,
'onnx_runtime': DecoderONNXModel
}
self._postnet_mapping = {
'dlsdk': PostNetDLSDKModel,
'openvino': PostNetOpenVINOModel,
'onnx_runtime': PostNetONNXModel
}
self.encoder = create_model(network_info['encoder'], launcher, self._encoder_mapping, 'encoder',
delayed_model_loading)
self.decoder = create_model(network_info['decoder'], launcher, self._decoder_mapping, 'decoder',
delayed_model_loading)
self.postnet = create_model(network_info['postnet'], launcher, self._postnet_mapping, 'postnet',
delayed_model_loading)
self.adapter = create_adapter(adapter_info)
self.with_prefix = False
self._part_by_name = {'encoder': self.encoder, 'decoder': self.decoder, 'postnet': self.postnet}
self.max_decoder_steps = int(network_info.get('max_decoder_steps', 500))
self.gate_threshold = float(network_info.get('gate_treshold', 0.6))
def predict(self, identifiers, input_data, input_meta=None, input_names=None, callback=None):
assert len(identifiers) == 1
encoder_outputs = self.encoder.predict(identifiers, input_data[0])
encoder_outputs = send_callback(encoder_outputs, callback)
postnet_outputs = []
mel_outputs = []
n = 0
j = 0
scheduler = [20] + [10] * 200
offset = 20
encoder_output = encoder_outputs[self.encoder.output_mapping['encoder_outputs']]
feed_dict = self.decoder.init_feed_dict(encoder_output)
for _ in range(self.max_decoder_steps):
decoder_outs, feed_dict = self.decoder.predict(identifiers, feed_dict)
decoder_outs = send_callback(decoder_outs, callback)
decoder_input = decoder_outs[self.decoder.output_mapping['decoder_input']]
finished = decoder_outs[self.decoder.output_mapping['finished']]
# padding for the first chunk for postnet
if len(mel_outputs) == 0:
mel_outputs = [decoder_input] * 10
mel_outputs += [decoder_input]
n += 1
if n == scheduler[j]:
postnet_input = np.transpose(np.array(mel_outputs[-scheduler[j] - offset:]), (1, 2, 0))
postnet_outs = self.postnet.predict(identifiers,
{self.postnet.input_mapping['mel_outputs']: postnet_input})
postnet_outs = send_callback(postnet_outs, callback)
postnet_out = postnet_outs[self.postnet.output_mapping['postnet_outputs']]
for k in range(postnet_out.shape[2]):
postnet_outputs.append(postnet_out[:, :, k])
# yield here
n = 0
j += 1
# process last chunk of frames, that might be shorter that scheduler
if sigmoid(finished[0][0]) > self.gate_threshold:
# right padding for the last chunk
mel_outputs += [mel_outputs[-1]] * 10
n += 10
postnet_input = np.transpose(np.array(mel_outputs[-n - offset:]), (1, 2, 0))
postnet_outs = self.postnet.predict(identifiers,
{self.postnet.input_mapping['mel_outputs']: postnet_input})
postnet_outs = send_callback(postnet_outs, callback)
postnet_out = postnet_outs[self.postnet.output_mapping['postnet_outputs']]
for k in range(postnet_out.shape[2]):
postnet_outputs.append(postnet_out[:, :, k])
break
out_blob = {'postnet_outputs': np.array(postnet_outputs)[:, 0].reshape(1, -1, 22)}
return {}, self.adapter.process(out_blob, identifiers, input_meta)
def load_model(self, network_list, launcher):
super().load_model(network_list, launcher)
self.update_inputs_outputs_info()
def load_network(self, network_list, launcher):
super().load_network(network_list, launcher)
self.update_inputs_outputs_info()
def update_inputs_outputs_info(self):
current_name = next(iter(self.encoder.inputs))
with_prefix = current_name.startswith('encoder_')
if with_prefix != self.with_prefix:
self.encoder.update_inputs_outputs_info(with_prefix)
self.decoder.update_inputs_outputs_info(with_prefix)
self.postnet.update_inputs_outputs_info(with_prefix)
self.with_prefix = with_prefix
def send_callback(outs, callback):
if isinstance(outs, tuple):
outs, raw_outs = outs
else:
raw_outs = outs
if callback:
callback(raw_outs)
return outs
class EncoderModel:
def predict(self, identifiers, input_data):
feed_dict = self.prepare_inputs(input_data)
return self.infer(feed_dict)
def prepare_inputs(self, feed):
feed[0] = feed[0].reshape(1, -1, self.text_enc_dim)
feed[2] = feed[2].reshape(1, -1)
feed[3] = feed[3].reshape(1, -1, self.bert_dim)
return dict(zip(self.input_mapping.values(), feed))
def update_inputs_outputs_info(self, with_prefix):
for input_id, input_name in self.input_mapping.items():
self.input_mapping[input_id] = generate_layer_name(input_name, 'encoder_', with_prefix)
if hasattr(self, 'outputs'):
for out_id, out_name in self.output_mapping.items():
o_name = postprocess_output_name(
out_name, self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
if o_name not in self.outputs:
o_name = postprocess_output_name(
generate_layer_name(out_name, 'encoder_', with_prefix),
self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
self.output_mapping[out_id] = o_name
class DecoderModel:
def predict(self, identifiers, input_data):
feed_dict = self.prepare_inputs(input_data)
outputs = self.infer(feed_dict)
if isinstance(outputs, tuple):
return outputs, self.prepare_next_state_inputs(feed_dict, outputs)
return outputs, self.prepare_next_state_inputs(feed_dict, outputs)
def prepare_next_state_inputs(self, feed_dict, outputs):
common_layers = set(self.input_mapping).intersection(set(self.output_mapping))
if isinstance(outputs, tuple):
outs = outputs[0]
else:
outs = outputs
for common_layer in common_layers:
feed_dict[self.input_mapping[common_layer]] = outs[self.output_mapping[common_layer]]
return feed_dict
def update_inputs_outputs_info(self, with_prefix):
for input_id, input_name in self.input_mapping.items():
self.input_mapping[input_id] = generate_layer_name(input_name, 'decoder_', with_prefix)
if hasattr(self, 'outputs'):
for out_id, out_name in self.output_mapping.items():
o_name = postprocess_output_name(
out_name, self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
if o_name not in self.outputs:
o_name = postprocess_output_name(
generate_layer_name(o_name, 'decoder_', with_prefix),
self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
self.output_mapping[out_id] = o_name
def init_feed_dict(self, encoder_output):
decoder_input = np.zeros((1, self.n_mel_channels), dtype=np.float32)
attention_hidden = np.zeros((1, self.attention_rnn_dim), dtype=np.float32)
attention_cell = np.zeros((1, self.attention_rnn_dim), dtype=np.float32)
decoder_hidden = np.zeros((1, self.decoder_rnn_dim), dtype=np.float32)
decoder_cell = np.zeros((1, self.decoder_rnn_dim), dtype=np.float32)
attention_weights = np.zeros((1, encoder_output.shape[1]), dtype=np.float32)
attention_weights_cum = np.zeros((1, encoder_output.shape[1]), dtype=np.float32)
attention_context = np.zeros((1, self.encoder_embedding_dim), dtype=np.float32)
return {
self.input_mapping['decoder_input']: decoder_input,
self.input_mapping['attention_hidden']: attention_hidden,
self.input_mapping['attention_cell']: attention_cell,
self.input_mapping['decoder_hidden']: decoder_hidden,
self.input_mapping['decoder_cell']: decoder_cell,
self.input_mapping['attention_weights']: attention_weights,
self.input_mapping['attention_weights_cum']: attention_weights_cum,
self.input_mapping['attention_context']: attention_context,
self.input_mapping['encoder_outputs']: encoder_output
}
class PostNetModel:
def predict(self, identifiers, input_data):
feed_dict = self.prepare_inputs(input_data)
return self.infer(feed_dict)
def update_inputs_outputs_info(self, with_prefix):
for input_id, input_name in self.input_mapping.items():
self.input_mapping[input_id] = generate_layer_name(input_name, 'postnet_', with_prefix)
if hasattr(self, 'outputs'):
for out_id, out_name in self.output_mapping.items():
o_name = postprocess_output_name(
out_name, self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
if o_name not in self.outputs:
o_name = postprocess_output_name(
generate_layer_name(out_name, 'postnet_', with_prefix),
self.outputs, additional_mapping=self.additional_output_mapping, raise_error=False)
self.output_mapping[out_id] = o_name
class EncoderDLSDKModel(EncoderModel, TTSDLSDKModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'text_encoder_outputs': 'text_encoder_outputs',
'domain': 'domain',
'f0s': 'f0s',
'bert_embedding': 'bert_embedding'
}
self.output_mapping = {'encoder_outputs': 'encoder_outputs'}
self.text_enc_dim = 384
self.bert_dim = 768
def prepare_inputs(self, feed):
feed_dict = super().prepare_inputs(feed)
if (
self.input_mapping['text_encoder_outputs'] in self.dynamic_inputs or
feed_dict[self.input_mapping['text_encoder_outputs']].shape !=
self.inputs[self.input_mapping['text_encoder_outputs']].input_data.shape
):
if not self.is_dynamic:
new_shapes = {}
for input_name in self.inputs:
new_shapes[input_name] = (
feed_dict[input_name].shape if input_name in feed_dict else self.inputs[input_name].shape)
self._reshape_input(new_shapes)
return feed_dict
def infer(self, feed_dict):
return self.exec_network.infer(feed_dict)
class EncoderOpenVINOModel(EncoderModel, TTSOVModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'text_encoder_outputs': 'text_encoder_outputs',
'domain': 'domain',
'f0s': 'f0s',
'bert_embedding': 'bert_embedding'
}
self.output_mapping = {'encoder_outputs': 'encoder_outputs/sink_port_0'}
self.text_enc_dim = 384
self.bert_dim = 768
def prepare_inputs(self, feed):
feed_dict = super().prepare_inputs(feed)
if (
self.input_mapping['text_encoder_outputs'] in self.dynamic_inputs or
feed_dict[self.input_mapping['text_encoder_outputs']].shape !=
parse_partial_shape(self.inputs[self.input_mapping['text_encoder_outputs']].shape)
):
if not self.is_dynamic:
new_shapes = {}
for input_name in self.inputs:
new_shapes[input_name] = (
feed_dict[input_name].shape if input_name in feed_dict else parse_partial_shape(
self.inputs[input_name].shape))
self._reshape_input(new_shapes)
return feed_dict
class EncoderONNXModel(BaseONNXModel, EncoderModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'text_encoder_outputs': 'text_encoder_outputs',
'domain': 'domain',
'f0s': 'f0s',
'bert_embedding': 'bert_embedding'
}
self.output_mapping = {'encoder_outputs': 'encoder_outputs'}
self.text_enc_dim = 384
self.bert_dim = 768
outputs = self.inference_session.get_outputs()
self.output_names = [output.name for output in outputs]
@property
def inputs(self):
inputs_info = self.inference_session.get_inputs()
return {input_layer.name: input_layer.shape for input_layer in inputs_info}
def infer(self, feed_dict):
outs = self.inference_session.run(self.output_names, feed_dict)
return dict(zip(self.output_names, outs))
class DecoderONNXModel(BaseONNXModel, DecoderModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'decoder_input': 'decoder_input',
'attention_hidden': 'attention_hidden',
'attention_cell': 'attention_cell',
'decoder_hidden': 'decoder_hidden',
'decoder_cell': 'decoder_cell',
'attention_weights': 'attention_weights',
'attention_weights_cum': 'attention_weights_cum',
'attention_context': 'attention_context',
'encoder_outputs': 'encoder_outputs'
}
self.output_mapping = {
'finished': '109',
'decoder_input': '108',
'attention_hidden': '68',
'attention_cell': '66',
'decoder_hidden': '106',
'decoder_cell': '104',
'attention_weights': '85',
'attention_weights_cum': '89',
'attention_context': '88'
}
self.n_mel_channels = 22
self.attention_rnn_dim = 800
self.encoder_embedding_dim = 512
self.decoder_rnn_dim = 800
self.additional_inputs_filling = network_info.get('additional_input_filling', 'zeros')
if self.additional_inputs_filling not in ['zeros', 'random']:
raise ConfigError(
'invalid setting for additional_inputs_filling: {}'.format(self.additional_inputs_filling)
)
self.seed = int(network_info.get('seed', 666))
if self.additional_inputs_filling == 'random':
np.random.seed(self.seed)
outputs = self.inference_session.get_outputs()
self.output_names = [output.name for output in outputs]
@property
def inputs(self):
inputs_info = self.inference_session.get_inputs()
return {input_layer.name: input_layer.shape for input_layer in inputs_info}
def infer(self, feed_dict):
outs = self.inference_session.run(self.output_names, feed_dict)
return dict(zip(self.output_names, outs))
@staticmethod
def prepare_inputs(feed_dict):
return feed_dict
class DecodeDLSDKModel(DecoderModel, TTSDLSDKModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'decoder_input': 'decoder_input',
'attention_hidden': 'attention_hidden',
'attention_cell': 'attention_cell',
'decoder_hidden': 'decoder_hidden',
'decoder_cell': 'decoder_cell',
'attention_weights': 'attention_weights',
'attention_weights_cum': 'attention_weights_cum',
'attention_context': 'attention_context',
'encoder_outputs': 'encoder_outputs'
}
self.output_mapping = {
'finished': '109',
'decoder_input': '108',
'attention_hidden': '68',
'attention_cell': '66',
'decoder_hidden': '106',
'decoder_cell': '104',
'attention_weights': '85',
'attention_weights_cum': '89',
'attention_context': '88'
}
self.n_mel_channels = 22
self.attention_rnn_dim = 800
self.encoder_embedding_dim = 512
self.decoder_rnn_dim = 800
self.additional_inputs_filling = network_info.get('additional_input_filling', 'zeros')
if self.additional_inputs_filling not in ['zeros', 'random']:
raise ConfigError(
'invalid setting for additional_inputs_filling: {}'.format(self.additional_inputs_filling)
)
self.seed = int(network_info.get('seed', 666))
if self.additional_inputs_filling == 'random':
np.random.seed(self.seed)
def infer(self, feed_dict):
return self.exec_network.infer(feed_dict)
def prepare_inputs(self, feed_dict):
if next(iter(self.input_mapping.values())) not in feed_dict:
feed_dict_ = {self.input_mapping[input_name]: data for input_name, data in feed_dict.items()}
feed_dict = feed_dict_
if (
self.input_mapping['encoder_outputs'] in self.dynamic_inputs or
feed_dict[self.input_mapping['encoder_outputs']].shape !=
self.inputs[self.input_mapping['encoder_outputs']].input_data.shape
):
if not self.is_dynamic:
new_shapes = {}
for input_name in self.inputs:
new_shapes[input_name] = (
feed_dict[input_name].shape if input_name in feed_dict else
self.inputs[input_name].input_data.shape)
self._reshape_input(new_shapes)
if len(feed_dict) != len(self.inputs):
extra_inputs = set(self.inputs).difference(set(feed_dict))
for input_layer in extra_inputs:
shape = self.inputs[input_layer].input_data.shape
if self.additional_inputs_filling == 'zeros':
feed_dict[input_layer] = np.zeros(shape, dtype=np.float32)
else:
feed_dict[input_layer] = np.random.uniform(size=shape)
return feed_dict
class DecodeOpenVINOModel(DecoderModel, TTSOVModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {
'decoder_input': 'decoder_input',
'attention_hidden': 'attention_hidden',
'attention_cell': 'attention_cell',
'decoder_hidden': 'decoder_hidden',
'decoder_cell': 'decoder_cell',
'attention_weights': 'attention_weights',
'attention_weights_cum': 'attention_weights_cum',
'attention_context': 'attention_context',
'encoder_outputs': 'encoder_outputs'
}
self.output_mapping = {
'finished': '109/sink_port_0',
'decoder_input': '108/sink_port_0',
'attention_hidden': '68/sink_port_0',
'attention_cell': '66/sink_port_0',
'decoder_hidden': '106/sink_port_0',
'decoder_cell': '104/sink_port_0',
'attention_weights': '85/sink_port_0',
'attention_weights_cum': '89/sink_port_0',
'attention_context': '88/sink_port_0'
}
self.n_mel_channels = 22
self.attention_rnn_dim = 800
self.encoder_embedding_dim = 512
self.decoder_rnn_dim = 800
self.additional_inputs_filling = network_info.get('additional_input_filling', 'zeros')
if self.additional_inputs_filling not in ['zeros', 'random']:
raise ConfigError(
'invalid setting for additional_inputs_filling: {}'.format(self.additional_inputs_filling)
)
self.seed = int(network_info.get('seed', 666))
if self.additional_inputs_filling == 'random':
np.random.seed(self.seed)
def prepare_inputs(self, feed_dict):
if next(iter(self.input_mapping.values())) not in feed_dict:
feed_dict_ = {self.input_mapping[input_name]: data for input_name, data in feed_dict.items()}
feed_dict = feed_dict_
if (
self.input_mapping['encoder_outputs'] in self.dynamic_inputs or
feed_dict[self.input_mapping['encoder_outputs']].shape !=
parse_partial_shape(self.inputs[self.input_mapping['encoder_outputs']].get_partial_shape())
):
if not self.is_dynamic:
new_shapes = {}
for input_name in self.inputs:
new_shapes[input_name] = (
feed_dict[input_name].shape if input_name in feed_dict else
parse_partial_shape(self.inputs[input_name].get_partial_shape()))
self._reshape_input(new_shapes)
if len(feed_dict) != len(self.inputs):
extra_inputs = set(self.inputs).difference(set(feed_dict))
for input_layer in extra_inputs:
shape = parse_partial_shape(self.inputs[input_layer].get_partial_shape())
if self.additional_inputs_filling == 'zeros':
feed_dict[input_layer] = np.zeros(shape, dtype=np.float32)
else:
feed_dict[input_layer] = np.random.uniform(size=shape)
return feed_dict
class PostNetONNXModel(BaseONNXModel, PostNetModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {'mel_outputs': 'mel_outputs'}
self.output_mapping = {'postnet_outputs': 'postnet_outputs'}
outputs = self.inference_session.get_outputs()
self.output_names = [output.name for output in outputs]
@staticmethod
def prepare_inputs(feed_dict):
return feed_dict
@property
def inputs(self):
inputs_info = self.inference_session.get_inputs()
return {input_layer.name: input_layer.shape for input_layer in inputs_info}
def infer(self, feed_dict):
outs = self.inference_session.run(self.output_names, feed_dict)
return dict(zip(self.output_names, outs))
class PostNetDLSDKModel(PostNetModel, TTSDLSDKModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {'mel_outputs': 'mel_outputs'}
self.output_mapping = {'postnet_outputs': 'postnet_outputs'}
def infer(self, feed_dict):
return self.exec_network.infer(feed_dict)
def prepare_inputs(self, feed_dict):
input_shape = next(iter(feed_dict.values())).shape
if input_shape != tuple(self.inputs[self.input_mapping['mel_outputs']].input_data.shape):
self._reshape_input({self.input_mapping['mel_outputs']: input_shape})
if next(iter(self.input_mapping.values())) not in feed_dict:
return {self.input_mapping[input_name]: data for input_name, data in feed_dict.items()}
return feed_dict
class PostNetOpenVINOModel(PostNetModel, TTSOVModel):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.input_mapping = {'mel_outputs': 'mel_outputs'}
self.output_mapping = {'postnet_outputs': 'postnet_outputs/sink_port_0'}
def prepare_inputs(self, feed_dict):
input_shape = next(iter(feed_dict.values())).shape
if input_shape != parse_partial_shape(self.inputs[self.input_mapping['mel_outputs']].get_partial_shape()):
self._reshape_input({self.input_mapping['mel_outputs']: input_shape})
if next(iter(self.input_mapping.values())) not in feed_dict:
return {self.input_mapping[input_name]: data for input_name, data in feed_dict.items()}
return feed_dict
class Tacotron2Evaluator(TextToSpeechEvaluator):
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
adapter_info = config['adapter']
model = Synthesizer(
config.get('network_info', {}), launcher, config.get('_models', []), adapter_info,
config.get('_model_is_blob'), delayed_model_loading
)
return cls(dataset_config, launcher, model, orig_config)
| [
"noreply@github.com"
] | vladimir-dudnik.noreply@github.com |
bd5495c4a3cd0e829dc596307d57419fc6cee8ed | aaa04c1cde44b0fa5297412f6ea09b1238e65493 | /users/forms.py | 872331d795cb58084dcc506ca4e3a59a08b00742 | [] | no_license | adityabohra007/Today-Entry | fae7be96288c0116088eff504190e07fdadd7fb3 | d9a934b96d39441ceeb2f87066a6d457027d6fcf | refs/heads/master | 2020-03-08T09:13:17.727613 | 2018-04-05T15:47:34 | 2018-04-05T15:47:34 | 128,041,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm,UserChangeForm
from .models import CustomUser,Post
from django.utils import timezone
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model=CustomUser
fields=('username','email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model=CustomUser
fields=UserChangeForm.Meta.fields
class PostForm(forms.ModelForm):
Title=forms.CharField()
Category=forms.CharField()
Content=forms.CharField()
Private=forms.CharField()
class Meta:
model = Post
fields = ['Title','Category','Content','Private',]
| [
"abohra@localhost.localdomain"
] | abohra@localhost.localdomain |
93d1a025dea19bab3a773d68e348547a7a9a15ff | e433829d7d17606c9402fc1df069542dcd1f2012 | /flights/tests.py | 050d6f74dd31f4c7190d1800e8f3d8a53e02bdde | [] | no_license | shirleynelson/airline | de84062b24fe352ac5042a73948633f97c30746c | 7e6e0543eaed05c5cc985ffa504d776f4cc2e080 | refs/heads/master | 2023-01-13T11:37:12.337024 | 2019-08-16T03:22:43 | 2019-08-16T03:22:43 | 202,600,787 | 0 | 0 | null | 2022-12-27T15:36:29 | 2019-08-15T19:36:04 | HTML | UTF-8 | Python | false | false | 3,667 | py | import os
from django.db.models import Max
from django.test import Client, TestCase
from .models import Airport, Flight, Passenger, PageView
from .database import info
# Create your tests here.
class FlightsTestCase(TestCase):
def setUp(self):
# Create airports.
a1 = Airport.objects.create(code="AAA", city="City A")
a2 = Airport.objects.create(code="BBB", city="City B")
# Create flights.
Flight.objects.create(origin=a1, destination=a2, duration=100)
Flight.objects.create(origin=a1, destination=a1, duration=200)
def test_departures_count(self):
a = Airport.objects.get(code="AAA")
self.assertEqual(a.departures.count(), 2)
def test_arrivals_count(self):
a = Airport.objects.get(code="AAA")
self.assertEqual(a.arrivals.count(), 1)
def test_valid_flight(self):
a1 = Airport.objects.get(code="AAA")
a2 = Airport.objects.get(code="BBB")
f = Flight.objects.get(origin=a1, destination=a2)
self.assertTrue(f.is_valid_flight())
def test_invalid_flight_destination(self):
a1 = Airport.objects.get(code="AAA")
f = Flight.objects.get(origin=a1, destination=a1)
self.assertFalse(f.is_valid_flight())
def test_invalid_flight_duration(self):
a1 = Airport.objects.get(code="AAA")
a2 = Airport.objects.get(code="BBB")
f = Flight.objects.get(origin=a1, destination=a2)
f.duration = -100
self.assertFalse(f.is_valid_flight())
def test_index(self):
c = Client()
response = c.get("/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["flights"].count(), 2)
def test_valid_flight_page(self):
a1 = Airport.objects.get(code="AAA")
f = Flight.objects.get(origin=a1, destination=a1)
c = Client()
response = c.get(f"/{f.id}")
self.assertEqual(response.status_code, 200)
def test_invalid_flight_page(self):
max_id = Flight.objects.all().aggregate(Max("id"))["id__max"]
c = Client()
response = c.get(f"/{max_id + 1}")
self.assertEqual(response.status_code, 404)
def test_flight_page_passengers(self):
f = Flight.objects.get(pk=1)
p = Passenger.objects.create(first="Alice", last="Adams")
f.passengers.add(p)
c = Client()
response = c.get(f"/{f.id}")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["passengers"].count(), 1)
def test_flight_page_non_passengers(self):
f = Flight.objects.get(pk=1)
p = Passenger.objects.create(first="Alice", last="Adams")
c = Client()
response = c.get(f"/{f.id}")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["non_passengers"].count(), 1)
# These basic tests are to be used as an example for running tests in S2I
# and OpenShift when building an application image.
class PageViewModelTest(TestCase):
def test_viewpage_model(self):
pageview = PageView.objects.create(hostname='localhost')
pagetest = PageView.objects.get(hostname='localhost')
self.assertEqual(pagetest.hostname, 'localhost')
class PageViewTest(TestCase):
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
class DbEngine(TestCase):
def setUp(self):
os.environ['ENGINE'] = 'SQLite'
def test_engine_setup(self):
settings = info()
self.assertEqual(settings['engine'], 'SQLite')
self.assertEqual(settings['is_sqlite'], True)
| [
"star2jem@gmail.com"
] | star2jem@gmail.com |
78b98468b9a7edb2ea24225120ae75d69714265e | 3359d11a2452d268d4166b91d8d6257f64cb934e | /hw1/pacmanAgents.py | 6a601b13d8c24f141eb68b1db56deababdd29def | [] | no_license | Calvin-Zikakis/Intro-to-AI-HW | 7744ef394e909fb89741f6cd737d27a2d1cef68a | 52891f059a396a4f8b4b70a3c81fe11e94a082bb | refs/heads/master | 2020-09-04T00:38:58.719120 | 2019-12-17T18:28:50 | 2019-12-17T18:28:50 | 219,619,406 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | # pacmanAgents.py
# ---------------
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from pacman import Directions
from game import Agent
import random
import game
import util
class LeftTurnAgent(game.Agent):
"An agent that turns left at every opportunity"
def getAction(self, state):
legal = state.getLegalPacmanActions()
current = state.getPacmanState().configuration.direction
if current == Directions.STOP: current = Directions.NORTH
left = Directions.LEFT[current]
if left in legal: return left
if current in legal: return current
if Directions.RIGHT[current] in legal: return Directions.RIGHT[current]
if Directions.LEFT[left] in legal: return Directions.LEFT[left]
return Directions.STOP
class GreedyAgent(Agent):
def __init__(self, evalFn="scoreEvaluation"):
self.evaluationFunction = util.lookup(evalFn, globals())
assert self.evaluationFunction != None
def getAction(self, state):
# Generate candidate actions
legal = state.getLegalPacmanActions()
if Directions.STOP in legal: legal.remove(Directions.STOP)
successors = [(state.generateSuccessor(0, action), action) for action in legal]
scored = [(self.evaluationFunction(state), action) for state, action in successors]
bestScore = max(scored)[0]
bestActions = [pair[1] for pair in scored if pair[0] == bestScore]
return random.choice(bestActions)
def scoreEvaluation(state):
return state.getScore()
| [
"cazi6864@colorado.edu"
] | cazi6864@colorado.edu |
f3fa6a313038553ec69e6b0fac7b52445884eef9 | 5a394c53a7099bc871401e32cf3fc782546f9f7d | /.history/lab1_d/lab1/exam/test_20210203181948.py | 73102cdae2950dabaa44d91e8cca8d6dfdad27c3 | [] | no_license | ajaygc95/advPy | fe32d67ee7910a1421d759c4f07e183cb7ba295b | 87d38a24ef02bcfe0f050840179c6206a61384bd | refs/heads/master | 2023-03-27T10:10:25.668371 | 2021-03-23T08:28:44 | 2021-03-23T08:28:44 | 334,614,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from collections import defaultdict, namedtuple
class Temperature:
def __init__(self):
self.data = defaultdict()
def readdata(self):
with open('temperature.csv',r):
| [
"gcajay95@gmail.com"
] | gcajay95@gmail.com |
17fe788185e85ee9c011ec72d5fe37a0a0d361ae | 6ad3a712468f88c7fb9ded5f2feee6246a5954f2 | /train.py | 408c9f9d560d320de178d1c959375396e3b90ee5 | [] | no_license | uncanny-valley/openai-car-racing-agent | 52b48c8d6906d329d47acabaf9fe091c434fc5f7 | 6648b282057bfea9cf5f6394ffd71c5c33aa0d71 | refs/heads/master | 2023-08-24T10:25:31.068522 | 2021-11-03T18:32:30 | 2021-11-03T18:32:30 | 390,816,188 | 0 | 0 | null | 2021-10-31T23:09:46 | 2021-07-29T18:26:26 | Python | UTF-8 | Python | false | false | 6,323 | py | from argparse import ArgumentParser
import logging
import numpy as np
import gym
from gym.envs.box2d import CarRacing, CarRacingV1
from pyvirtualdisplay import Display
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.optimizers import Adam
from agent import CarRacingV0Agent, CarRacingV1Agent
from experiment import Experiment
from preprocessing import SubframeQueue
display = Display(visible=0, size=(1400, 900))
display.start()
def main():
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser()
parser.add_argument('--env', type=int, default=1, help='Either CarRacing-v0 or CarRacing-v1 OpenAI gym environment')
parser.add_argument('--rng', type=int, default=0, help='Random seed to reproduce agent stochasticity')
parser.add_argument('-m', '--model', type=str, help='Path to load an existing model')
parser.add_argument('-n', '--num_epochs', type=int, default=300, help='The number of epoch with which to train the agent')
parser.add_argument('--steps_per_epoch', type=int, default=5000, help='The number of steps per epoch with which to train the agent')
parser.add_argument('-r', '--render', action='store_true', help='Whether to render the animated display')
parser.add_argument('-e', '--epsilon', type=np.float32, default=1., help='Initial epsilon for the agent')
parser.add_argument('-s', '--replay-buffer-size', type=int, default=10000, help='The size of the experience replay memory buffer')
parser.add_argument('-b', '--minibatch-size', type=int, default=128, help='The size of the minibatch that we will use to intermittently train the agent')
parser.add_argument('-g', '--discount-factor', type=np.float32, default=0.99, help='How much the agent considers long-term future rewards relative to immediate rewards [0, 1]')
parser.add_argument('-l', '--learning-rate', type=np.float32, default=1e-3, help='How sensitive the Q-network weights are to estimated errors during training [0, 1]')
parser.add_argument('-p', '--phi-length', type=int, default=3, help='The number of game frames to stack together, given that the environment doesn\'t provide this automatically')
parser.add_argument('--num-frames-to-skip', type=np.int64, default=3, help='Number of frames to skip. For example, if set to 3, wes process every 4th frame')
parser.add_argument('--epsilon-min', type=np.float32, default=0.1, help='A lower bound for the agent\'s decaying epsilon value')
parser.add_argument('--epsilon-decay', type=np.float32, default=0.9999, help='The proportion by which to scale the current epsilon down [0, 1]')
parser.add_argument('-u', '--update-frequency', type=np.int64, default=2, help='How often to update the target model\'s weights in epochs')
parser.add_argument('--save-frequency', type=int, default=25, help='How often to save the target model in epochs')
parser.add_argument('--test-frequency', type=int, default=25, help='How often to test the agent on a hold-out set of states, in epochs')
parser.add_argument('--update-by-episodes', action='store_true', help='Whether the specified update frequency is in episodes rather than total frames')
parser.add_argument('--initial-epoch', type=int, default=0, help='The starting epoch')
parser.add_argument('--initial-episode', type=int, default=0, help='The starting episode if we are running an existing model')
parser.add_argument('--nu', type=int, default=-1, help='The maximum number of consecutive negative rewards received before exiting the episode')
parser.add_argument('--nu-starting-frame', type=int, default=50, help='The number of frames that must complete before considering nu in early terminating the episode')
parser.add_argument('--memory', type=str, help='Path to saved experience replay memory (.pkl)')
args = parser.parse_args()
# Default hyperparameters
hyperparameters = {
'initial_epsilon': args.epsilon,
'model': args.model,
'epsilon_min': args.epsilon_min,
'epsilon_decay': args.epsilon_decay,
'rng': args.rng,
'nu': args.nu,
'nu_starting_frame': args.nu_starting_frame,
'num_epochs': args.num_epochs,
'steps_per_epoch': args.steps_per_epoch,
'replay_buffer_size': args.replay_buffer_size,
'minibatch_size': args.minibatch_size,
'discount_factor': args.discount_factor,
'optimizer': Adam(learning_rate=args.learning_rate, clipnorm=1.0),
'loss_function': MeanSquaredError(reduction='auto', name='mean_squared_error'),
'phi_length': args.phi_length,
'num_frames_to_skip': args.num_frames_to_skip,
'update_by_episodes': args.update_by_episodes,
'update_frequency': args.update_frequency,
'save_frequency': args.save_frequency,
'checkpoint_directory': './checkpoint',
'log_directory': './log',
}
if args.env == 0:
env = gym.make('CarRacing-v0')
agent = CarRacingV0Agent(env=env, **hyperparameters)
else:
env = CarRacingV1(
grayscale=1,
show_info_panel=0,
discretize_actions='hard',
frames_per_state=4,
num_lanes=1,
num_tracks=1
)
agent = CarRacingV1Agent(env=env, **hyperparameters)
if args.model is not None:
agent.load_model(args.model)
if args.memory:
print(len(agent.replay_memory))
agent.load_memory(args.memory)
print(len(agent.replay_memory))
experiment = Experiment(env=env, env_version=args.env, agent=agent, render=args.render, frames_to_skip=args.num_frames_to_skip, phi_length=args.phi_length,
num_epochs=args.num_epochs, num_steps_per_epoch=args.steps_per_epoch, target_model_update_frequency=args.update_frequency,
initial_epoch=args.initial_epoch, initial_episode=args.initial_episode, model_test_frequency=args.test_frequency,
model_save_frequency=args.save_frequency, target_model_update_by_episodes=args.update_by_episodes, checkpoint_directory=hyperparameters['checkpoint_directory'],
nu=args.nu, nu_starting_frame=args.nu_starting_frame)
experiment.run()
env.close()
if __name__ == '__main__':
main() | [
"brandon.fujii.diaz@gmail.com"
] | brandon.fujii.diaz@gmail.com |
82af9214e5be3dff48f2557bc198e66e6c7f4b38 | 1061216c2c33c1ed4ffb33e6211565575957e48f | /python-blueplanet/app/openapi_server/models/messages.py | d2be98d214f7226e95c1ec1fa6f9e267b77439f7 | [] | no_license | MSurfer20/test2 | be9532f54839e8f58b60a8e4587348c2810ecdb9 | 13b35d72f33302fa532aea189e8f532272f1f799 | refs/heads/main | 2023-07-03T04:19:57.548080 | 2021-08-11T19:16:42 | 2021-08-11T19:16:42 | 393,920,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,920 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from app.openapi_server.models.messages_all_of import MessagesAllOf # noqa: F401,E501
from app.openapi_server.models.messages_base import MessagesBase # noqa: F401,E501
from openapi_server import util
class Messages(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, avatar_url: object=None, client: object=None, content: object=None, content_type: object=None, display_recipient: object=None, id: object=None, is_me_message: object=None, reactions: object=None, recipient_id: object=None, sender_email: object=None, sender_full_name: object=None, sender_id: object=None, sender_realm_str: object=None, stream_id: object=None, subject: object=None, topic_links: object=None, submessages: object=None, timestamp: object=None, type: object=None): # noqa: E501
"""Messages - a model defined in Swagger
:param avatar_url: The avatar_url of this Messages. # noqa: E501
:type avatar_url: object
:param client: The client of this Messages. # noqa: E501
:type client: object
:param content: The content of this Messages. # noqa: E501
:type content: object
:param content_type: The content_type of this Messages. # noqa: E501
:type content_type: object
:param display_recipient: The display_recipient of this Messages. # noqa: E501
:type display_recipient: object
:param id: The id of this Messages. # noqa: E501
:type id: object
:param is_me_message: The is_me_message of this Messages. # noqa: E501
:type is_me_message: object
:param reactions: The reactions of this Messages. # noqa: E501
:type reactions: object
:param recipient_id: The recipient_id of this Messages. # noqa: E501
:type recipient_id: object
:param sender_email: The sender_email of this Messages. # noqa: E501
:type sender_email: object
:param sender_full_name: The sender_full_name of this Messages. # noqa: E501
:type sender_full_name: object
:param sender_id: The sender_id of this Messages. # noqa: E501
:type sender_id: object
:param sender_realm_str: The sender_realm_str of this Messages. # noqa: E501
:type sender_realm_str: object
:param stream_id: The stream_id of this Messages. # noqa: E501
:type stream_id: object
:param subject: The subject of this Messages. # noqa: E501
:type subject: object
:param topic_links: The topic_links of this Messages. # noqa: E501
:type topic_links: object
:param submessages: The submessages of this Messages. # noqa: E501
:type submessages: object
:param timestamp: The timestamp of this Messages. # noqa: E501
:type timestamp: object
:param type: The type of this Messages. # noqa: E501
:type type: object
"""
self.swagger_types = {
'avatar_url': object,
'client': object,
'content': object,
'content_type': object,
'display_recipient': object,
'id': object,
'is_me_message': object,
'reactions': object,
'recipient_id': object,
'sender_email': object,
'sender_full_name': object,
'sender_id': object,
'sender_realm_str': object,
'stream_id': object,
'subject': object,
'topic_links': object,
'submessages': object,
'timestamp': object,
'type': object
}
self.attribute_map = {
'avatar_url': 'avatar_url',
'client': 'client',
'content': 'content',
'content_type': 'content_type',
'display_recipient': 'display_recipient',
'id': 'id',
'is_me_message': 'is_me_message',
'reactions': 'reactions',
'recipient_id': 'recipient_id',
'sender_email': 'sender_email',
'sender_full_name': 'sender_full_name',
'sender_id': 'sender_id',
'sender_realm_str': 'sender_realm_str',
'stream_id': 'stream_id',
'subject': 'subject',
'topic_links': 'topic_links',
'submessages': 'submessages',
'timestamp': 'timestamp',
'type': 'type'
}
self._avatar_url = avatar_url
self._client = client
self._content = content
self._content_type = content_type
self._display_recipient = display_recipient
self._id = id
self._is_me_message = is_me_message
self._reactions = reactions
self._recipient_id = recipient_id
self._sender_email = sender_email
self._sender_full_name = sender_full_name
self._sender_id = sender_id
self._sender_realm_str = sender_realm_str
self._stream_id = stream_id
self._subject = subject
self._topic_links = topic_links
self._submessages = submessages
self._timestamp = timestamp
self._type = type
@classmethod
def from_dict(cls, dikt) -> 'Messages':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Messages of this Messages. # noqa: E501
:rtype: Messages
"""
return util.deserialize_model(dikt, cls)
@property
def avatar_url(self) -> object:
"""Gets the avatar_url of this Messages.
:return: The avatar_url of this Messages.
:rtype: object
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url: object):
"""Sets the avatar_url of this Messages.
:param avatar_url: The avatar_url of this Messages.
:type avatar_url: object
"""
self._avatar_url = avatar_url
@property
def client(self) -> object:
"""Gets the client of this Messages.
:return: The client of this Messages.
:rtype: object
"""
return self._client
@client.setter
def client(self, client: object):
"""Sets the client of this Messages.
:param client: The client of this Messages.
:type client: object
"""
self._client = client
@property
def content(self) -> object:
"""Gets the content of this Messages.
:return: The content of this Messages.
:rtype: object
"""
return self._content
@content.setter
def content(self, content: object):
"""Sets the content of this Messages.
:param content: The content of this Messages.
:type content: object
"""
self._content = content
@property
def content_type(self) -> object:
"""Gets the content_type of this Messages.
:return: The content_type of this Messages.
:rtype: object
"""
return self._content_type
@content_type.setter
def content_type(self, content_type: object):
"""Sets the content_type of this Messages.
:param content_type: The content_type of this Messages.
:type content_type: object
"""
self._content_type = content_type
@property
def display_recipient(self) -> object:
"""Gets the display_recipient of this Messages.
:return: The display_recipient of this Messages.
:rtype: object
"""
return self._display_recipient
@display_recipient.setter
def display_recipient(self, display_recipient: object):
"""Sets the display_recipient of this Messages.
:param display_recipient: The display_recipient of this Messages.
:type display_recipient: object
"""
self._display_recipient = display_recipient
@property
def id(self) -> object:
"""Gets the id of this Messages.
:return: The id of this Messages.
:rtype: object
"""
return self._id
@id.setter
def id(self, id: object):
"""Sets the id of this Messages.
:param id: The id of this Messages.
:type id: object
"""
self._id = id
@property
def is_me_message(self) -> object:
"""Gets the is_me_message of this Messages.
:return: The is_me_message of this Messages.
:rtype: object
"""
return self._is_me_message
@is_me_message.setter
def is_me_message(self, is_me_message: object):
"""Sets the is_me_message of this Messages.
:param is_me_message: The is_me_message of this Messages.
:type is_me_message: object
"""
self._is_me_message = is_me_message
@property
def reactions(self) -> object:
"""Gets the reactions of this Messages.
:return: The reactions of this Messages.
:rtype: object
"""
return self._reactions
@reactions.setter
def reactions(self, reactions: object):
"""Sets the reactions of this Messages.
:param reactions: The reactions of this Messages.
:type reactions: object
"""
self._reactions = reactions
@property
def recipient_id(self) -> object:
"""Gets the recipient_id of this Messages.
:return: The recipient_id of this Messages.
:rtype: object
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id: object):
"""Sets the recipient_id of this Messages.
:param recipient_id: The recipient_id of this Messages.
:type recipient_id: object
"""
self._recipient_id = recipient_id
@property
def sender_email(self) -> object:
"""Gets the sender_email of this Messages.
:return: The sender_email of this Messages.
:rtype: object
"""
return self._sender_email
@sender_email.setter
def sender_email(self, sender_email: object):
"""Sets the sender_email of this Messages.
:param sender_email: The sender_email of this Messages.
:type sender_email: object
"""
self._sender_email = sender_email
@property
def sender_full_name(self) -> object:
"""Gets the sender_full_name of this Messages.
:return: The sender_full_name of this Messages.
:rtype: object
"""
return self._sender_full_name
@sender_full_name.setter
def sender_full_name(self, sender_full_name: object):
"""Sets the sender_full_name of this Messages.
:param sender_full_name: The sender_full_name of this Messages.
:type sender_full_name: object
"""
self._sender_full_name = sender_full_name
@property
def sender_id(self) -> object:
"""Gets the sender_id of this Messages.
:return: The sender_id of this Messages.
:rtype: object
"""
return self._sender_id
@sender_id.setter
def sender_id(self, sender_id: object):
"""Sets the sender_id of this Messages.
:param sender_id: The sender_id of this Messages.
:type sender_id: object
"""
self._sender_id = sender_id
@property
def sender_realm_str(self) -> object:
"""Gets the sender_realm_str of this Messages.
:return: The sender_realm_str of this Messages.
:rtype: object
"""
return self._sender_realm_str
@sender_realm_str.setter
def sender_realm_str(self, sender_realm_str: object):
"""Sets the sender_realm_str of this Messages.
:param sender_realm_str: The sender_realm_str of this Messages.
:type sender_realm_str: object
"""
self._sender_realm_str = sender_realm_str
@property
def stream_id(self) -> object:
"""Gets the stream_id of this Messages.
:return: The stream_id of this Messages.
:rtype: object
"""
return self._stream_id
@stream_id.setter
def stream_id(self, stream_id: object):
"""Sets the stream_id of this Messages.
:param stream_id: The stream_id of this Messages.
:type stream_id: object
"""
self._stream_id = stream_id
@property
def subject(self) -> object:
"""Gets the subject of this Messages.
:return: The subject of this Messages.
:rtype: object
"""
return self._subject
@subject.setter
def subject(self, subject: object):
"""Sets the subject of this Messages.
:param subject: The subject of this Messages.
:type subject: object
"""
self._subject = subject
@property
def topic_links(self) -> object:
"""Gets the topic_links of this Messages.
:return: The topic_links of this Messages.
:rtype: object
"""
return self._topic_links
@topic_links.setter
def topic_links(self, topic_links: object):
"""Sets the topic_links of this Messages.
:param topic_links: The topic_links of this Messages.
:type topic_links: object
"""
self._topic_links = topic_links
@property
def submessages(self) -> object:
"""Gets the submessages of this Messages.
:return: The submessages of this Messages.
:rtype: object
"""
return self._submessages
@submessages.setter
def submessages(self, submessages: object):
"""Sets the submessages of this Messages.
:param submessages: The submessages of this Messages.
:type submessages: object
"""
self._submessages = submessages
@property
def timestamp(self) -> object:
"""Gets the timestamp of this Messages.
:return: The timestamp of this Messages.
:rtype: object
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp: object):
"""Sets the timestamp of this Messages.
:param timestamp: The timestamp of this Messages.
:type timestamp: object
"""
self._timestamp = timestamp
@property
def type(self) -> object:
"""Gets the type of this Messages.
:return: The type of this Messages.
:rtype: object
"""
return self._type
@type.setter
def type(self, type: object):
"""Sets the type of this Messages.
:param type: The type of this Messages.
:type type: object
"""
self._type = type
| [
"suyash.mathur@research.iiit.ac.in"
] | suyash.mathur@research.iiit.ac.in |
2fd74ff61ae9db9a414df6d04e490d3eb7017386 | f49eee894ea5e35b032976661496f681fb9eb815 | /mysite/urls.py | 0aebc6a7eb4fd17b9e80498e111e97148d5b95f7 | [] | no_license | youyuge34/VoteSite | 26c5e25a88836451c9fa9e48b8ec0fafd1934ef1 | 1c79532da5c2bfa2411f03092cdf106b7e532553 | refs/heads/master | 2021-01-23T14:19:44.467282 | 2017-09-14T03:46:27 | 2017-09-14T03:46:27 | 102,683,840 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^polls/', include('poll.urls', namespace='poll'))
]
| [
"1197993367@qq.com"
] | 1197993367@qq.com |
bbfbca4216e67c142d142a8e6aa8d775988c1f4d | e39117e739995759b9407a400846e5786e5b5f5d | /script/make_nopped_op_variants.py | 3b60f382adef774c554817601b4c6b2be0bb9ecd | [
"MIT"
] | permissive | perryk12/dishtiny | 5f18c5df1d97a49e58697ec7317773ae5c756ef6 | 4177f09eed90f3b73f952858677fc4001ac6175a | refs/heads/master | 2023-03-28T14:06:56.626589 | 2021-03-25T05:34:50 | 2021-03-25T05:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | #!/usr/bin/env python3
import copy
import gzip
import json
import sys
from keyname import keyname as kn
try:
__, applyto, basedon = sys.argv
except:
print('bad arguments')
print('USAGE: [applyto] [basedon]')
sys.exit(1)
assert 'ext' in kn.unpack( applyto )
assert 'ext' in kn.unpack( basedon )
def multiloader(target):
if kn.unpack( target )['ext'] == '.json':
with open( target, 'r') as f:
return json.load( f )
elif kn.unpack( target )['ext'] == '.json.gz':
try:
with gzip.open( target, 'rb') as f:
return json.loads( f.read().decode('ascii') )
except Exception:
pass
try:
with gzip.open( target, 'rb') as f:
return json.loads( f.read().decode('utf-8') )
except Exception:
pass
raise ValueError
applytodata = multiloader( applyto )
basedondata = multiloader( basedon )
assert (
len( applytodata['value0']['program'] )
== len( basedondata['value0']['program'] )
)
ops = [
idx
for idx, inst in enumerate(basedondata['value0']['program'])
if 'Nop-' not in inst['operation']
]
print(f'{basedon} has {len(basedondata["value0"]["program"])} instructions, {len(ops)} of which are ops')
print(f'nopping out corresponding {len(ops)} sites on {applyto}...')
for idx in ops:
variant = copy.deepcopy(applytodata)
variant['value0']['program'][ idx ]['operation'] = 'Nop-0'
attrs = kn.unpack(applyto)
attrs['variation'] = (
f'{attrs["variation"]}~i{idx}%Nop-0'
if 'variation' in attrs
and attrs['variation'] != 'master' else
f'i{idx}%Nop-0'
)
with (
open(kn.pack( attrs ), 'w', encoding='ascii')
if attrs['ext'] == '.json' else
gzip.open(kn.pack( attrs ), 'wt', encoding='ascii')
) as f:
json.dump(variant, f)
| [
"mmore500.login+git@gmail.com"
] | mmore500.login+git@gmail.com |
2f096768afa9f6f0836a187e39994e461fe13b6e | d5a3c744b70c9a68c8efcf4252c9f13eb9c9b551 | /动态下拉刷新页面爬取-demo45练习1.py | 0145b9812bcd692c970958bc542fe29ad9355c65 | [] | no_license | zhouf1234/untitled8 | 9689b33aa53c49fcd4e704976a79b1a65578f137 | c54634398800ba3c85f91885e6cf990e3645b2f6 | refs/heads/master | 2020-05-05T02:42:23.034426 | 2019-04-05T08:49:07 | 2019-04-05T08:49:07 | 179,648,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | import requests
import re
# 某一新浪微博主内容,转发数量,留言数量,点赞数量,未完成。微博内容有点问题。建议用demo45练习2
page = 1 #选择页数:第几页
uid = 1669879400 #选择微博主网页的uid:同https://m.weibo.cn/profile/1669879400的1669879400
nurl = '/api/container/getIndex?containerid=230413'
nurl = nurl+str(uid)+'_-_WEIBO_SECOND_PROFILE_WEIBO&page_type=03&page='+str(page)
# print('https://m.weibo.cn'+nurl) #连接拼接
# 爬取页面,获取的中文是unicode码
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
request = requests.get('https://m.weibo.cn'+nurl,headers=header)
c = request.text
# print(c)
# 微博主微博点赞数
# patt=re.compile('"created_at".*?"attitudes_count":(\d+)',re.S)
# titles=re.findall(patt,c)
# print(len(titles))
#
# # # 微博主微博评论数
# pat=re.compile('"created_at".*?"comments_count":(\d+)',re.S)
# title=re.findall(pat,c)
# print(len(title))
#
# # # 微博主微博转发数
# pa=re.compile('"created_at".*?"reposts_count":(\d+)',re.S)
# titl=re.findall(pa,c)
# print(len(titl))
# 微博主微博内容,总共10条,只取到8条,有些没出来,有些和上一条黏在一起了,建议不用此方法取内容
p = re.sub('<a.*?>|<.*?a>|@','',c)
# print(p)
p2 = re.compile('"text":"(.*?)"',re.S)
tit = re.findall(p2,p)
print(len(tit))
for i in tit:
print(i.encode('latin-1').decode('unicode_escape'))
| [
"="
] | = |
ef83acb1830849c0e46fdb0f33f0b4ee6b03c16e | e7efae2b83216d9621bd93390959d652de779c3d | /vsphere/tests/legacy/test_metadata_cache.py | 04695b37ae258f148227b4f2b37cb78669509635 | [
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 1,545 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.vsphere.legacy.metadata_cache import MetadataCache, MetadataNotFoundError
@pytest.fixture
def cache():
return MetadataCache()
def test_contains(cache):
with pytest.raises(KeyError):
cache.contains("instance", "foo")
cache._metadata["instance"] = {"foo_id": {}}
assert cache.contains("instance", "foo_id") is True
assert cache.contains("instance", "foo") is False
def test_set_metadata(cache):
cache._metadata["foo_instance"] = {}
cache.set_metadata("foo_instance", {"foo_id": {}})
assert "foo_id" in cache._metadata["foo_instance"]
def test_set_metrics(cache):
cache._metric_ids["foo_instance"] = []
cache.set_metric_ids("foo_instance", ["foo"])
assert "foo" in cache._metric_ids["foo_instance"]
assert len(cache._metric_ids["foo_instance"]) == 1
def test_get_metadata(cache):
with pytest.raises(KeyError):
cache.get_metadata("instance", "id")
cache._metadata["foo_instance"] = {"foo_id": {"name": "metric_name"}}
assert cache.get_metadata("foo_instance", "foo_id")["name"] == "metric_name"
with pytest.raises(MetadataNotFoundError):
cache.get_metadata("foo_instance", "bar_id")
def test_get_metrics(cache):
with pytest.raises(KeyError):
cache.get_metric_ids("instance")
cache._metric_ids["foo_instance"] = ["foo"]
assert cache.get_metric_ids("foo_instance") == ["foo"]
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
aeca3fe0dc524e97b5ed0ad10014dc55a06535f9 | 6c6eba36c3a735ab6131e8068e20a0a2ba739792 | /feedreader/admin.py | 7ea1b98a2a8887dab4a87826069448b4beebd723 | [] | no_license | ahernp/ahernp.com | b078333a93d49bbb897c665d34d2b7c3ed4958cc | 48db2021ca8119709fa1074a82290bfbaa04ba6a | refs/heads/master | 2023-09-04T07:16:49.301480 | 2023-08-01T18:22:25 | 2023-08-01T18:22:25 | 17,945,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | from django.contrib import admin
from .models import Group, Feed, Entry
class FeedAdmin(admin.ModelAdmin):
list_display = ["title", "xml_url", "group", "published_time", "last_polled_time"]
list_filter = ["group"]
search_fields = ["link", "title"]
readonly_fields = [
"title",
"link",
"description",
"published_time",
"last_polled_time",
]
fieldsets = (
(
None,
{
"fields": (
("xml_url", "group"),
("title", "link"),
("description",),
("published_time", "last_polled_time", "always_load"),
)
},
),
)
def mark_as_read(modeladmin, request, queryset):
queryset.update(read_flag=True)
mark_as_read.short_description = "Mark selected entries as read"
class EntryAdmin(admin.ModelAdmin):
list_display = ["title", "feed", "published_time"]
list_filter = ["read_flag", "feed"]
search_fields = ["title", "link"]
actions = [mark_as_read]
readonly_fields = [
"link",
"media_link",
"title",
"description",
"published_time",
"feed",
]
fieldsets = (
(
None,
{
"fields": (
("link",),
("media_link",),
("title", "feed"),
("description",),
("published_time", "read_flag"),
)
},
),
)
admin.site.register(Group)
admin.site.register(Feed, FeedAdmin)
admin.site.register(Entry, EntryAdmin)
| [
"ahernp@ahernp.com"
] | ahernp@ahernp.com |
19032f2105374d3ac43e9734fb0d9bde2ddee3a1 | 404ba73276fb78856a9c04ab6c13eac0b6509d46 | /run.py | bd3c0a4e9da3b1abf9b2b6499f4f79f44538a043 | [
"MIT"
] | permissive | LPLhock/apiAutoTest | c7c0e8c4bb98002706fb1b50c74c7d4a863c5ea5 | ca2267cde39b30902bc90f31de6a6c0a4b954f16 | refs/heads/master | 2023-04-18T11:21:02.095071 | 2021-05-06T15:32:31 | 2021-05-06T15:32:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # -*- coding: utf-8 -*-
# @Time : 2021/5/2 22:26
# @Author : RanyLra
# @Wechat : RanY_Luck
# @File : run.py
import os
import shutil
from test.conftest import pytest
from tools import logger
from tools.read_file import ReadFile
from tools.send_email import EmailServe
report = ReadFile.read_config('$.file_path.report')
logfile = ReadFile.read_config('$.file_path.log')
file_path = ReadFile.read_config('$.file_path')
s_email = ReadFile.read_config('$.email')
def run():
if os.path.exists('report/'):
shutil.rmtree(path='report/')
logger.add(logfile, enqueue=True, encoding='utf-8')
logger.info("""
_ _ _ _____ _
__ _ _ __ (_) / \\ _ _| |_ __|_ _|__ ___| |_
/ _` | '_ \\| | / _ \\| | | | __/ _ \\| |/ _ \\/ __| __|
| (_| | |_) | |/ ___ \\ |_| | || (_) | | __/\\__ \\ |_
\\__,_| .__/|_/_/ \\_\\__,_|\\__\\___/|_|\\___||___/\\__|
|_|
Starting ... ... ...
""")
pytest.main(args=['test/test_api.py', f'--alluredir={report}/data'])
# 生成本地生成报告
os.system(f'allure generate {report}/data -o {report}/html --clean')
logger.success('报告已生成,请查收')
# 启动allure服务
os.system(f'allure serve {report}/data') # 该方法会生成一个http服务 挂载报告文件 阻塞线程 (如果需要压缩报告,请注释)
def zip_report():
"""打包报告"""
EmailServe.zip_report('report/html', 'report.zip')
def send_email():
"""发送邮件"""
EmailServe.send_email(s_email, file_path['report'])
def del_report():
"""删除本地附件"""
os.remove(s_email['enclosures'])
logger.success('附件删除完成')
if __name__ == '__main__':
run()
# zip_report()
# send_email()
# del_report()
| [
"1311518086@qq.com"
] | 1311518086@qq.com |
0ea7326159f7d7d1d7ffb5aa9bed33170995aa9d | 0d28e718204984e6b1b4cd4ea203187f0514e0f0 | /ML_CNN_keras.py | 3520b09519f73c3215195bf2cbf28a80fd73cb0f | [
"Apache-2.0"
] | permissive | pauhsg/PS_SentimentAnalysis | 1fb6a0f425f6e842d84cd76a76fb3bb3738e8499 | fb7ebe4d2b05e1f86205708a13b5e238a0e1cba9 | refs/heads/master | 2022-03-27T01:01:32.607705 | 2019-12-20T07:36:25 | 2019-12-20T07:36:25 | 227,088,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,072 | py | #!/usr/bin/env python3
from __future__ import division, print_function
import numpy as np
import pandas as pd
from gensim import models
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Dropout, Reshape, Flatten, concatenate, Input, Conv1D, GlobalMaxPooling1D, Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import one_hot
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling1D
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models.keyedvectors import KeyedVectors
from get_GloVe_emb_ML import *
from proj2_helpers import *
MAX_SEQUENCE_LENGTH = 50
def get_train_df_CNN(pos, neg):
'''
given the preprocessed positive and negative tweets, creates a dataframe containing all pos
and neg tweets and their sentiment (1 for pos/ -1 for neg), then shuffles the rows and outputs it
'''
print('> create a Pandas DataFrame with preprocessed and shuffled pos and neg tweets to perform CNN')
# labels 1 for positive tweets + create dataFrame with mean word emb
label_pos = [1] * len(pos)
df_pos = pd.DataFrame(list(zip(label_pos, pos)), columns=['sentiment', 'twt'])
del label_pos
# labels -1 for negative tweets + create dataFrame with mean word emb
label_neg = [-1] * len(neg)
df_neg = pd.DataFrame(list(zip(label_neg, neg)), columns=['sentiment', 'twt'])
del label_neg
# drop NaN
df_pos.dropna(inplace = True)
df_neg.dropna(inplace = True)
# regroup the dfs, ignore index in order to get new ones (->no duplicate)
full_df = pd.concat([df_pos, df_neg], ignore_index=True)
# shuffles the rows
full_df = full_df.sample(frac=1)
print('full_df shape: ', full_df.shape)
return full_df
def get_test_df_CNN(test):
'''
given the preprocessed test tweets, creates a dataframe containing all tweets and their id
and outputs it
'''
print('> create a Pandas DataFrame with preprocessed test tweets to perform CNN')
# create test ids
test_ids = np.linspace(1,10000,10000, dtype=int)
# create dataFrame
df_test = pd.DataFrame(list(zip(test_ids, test)), columns=['Tweet_submission_id', 'twt'])
del test_ids
print('df_test shape: ', df_test.shape)
return df_test
def ConvNet(embeddings, max_sequence_length, num_words, embedding_dim, labels_index):
'''
Convolutional Neural Network from https://github.com/saadarshad102/Sentiment-Analysis-CNN
'''
embedding_layer = Embedding(num_words,
embedding_dim,
weights=[embeddings],
input_length=max_sequence_length,
trainable=False)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [2,3,4,5,6]
for filter_size in filter_sizes:
l_conv = Conv1D(filters=200,
kernel_size=filter_size,
activation='relu')(embedded_sequences)
l_pool = GlobalMaxPooling1D()(l_conv)
convs.append(l_pool)
l_merge = concatenate(convs, axis=1)
x = Dropout(0.1)(l_merge)
x = Dense(128, activation='relu')(x)
x = Dropout(0.2)(x)
preds = Dense(labels_index, activation='sigmoid')(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
model.summary()
return model
def train_ruby_CNN(pos, neg, dim_emb, testsize, vectors_path, num_epochs, batch_size):
'''
given preprocessed pos, neg and test data, the embedding dimension the vectors' file path
and the test size, runs the a convolutional neural netword (CNN) to predict if tweets
are positive or negative!
adapted from https://github.com/saadarshad102/Sentiment-Analysis-CNN
'''
print('> preparing data and training CNN with an embedding dimension of', dim_emb, 'and a test size of', testsize)
# get train DataFrame
data = get_train_df_CNN(pos, neg)
# tokenize keeping our tags like <user> in a single token and store them in a new column of the DataFram
tokens = [sen.split() for sen in data.twt]
data['tokens'] = tokens
# transform labels into one hot encoded columns
pos_lab = []
neg_lab = []
for l in data.sentiment:
if l == -1:
pos_lab.append(0)
neg_lab.append(1)
elif l == 1:
pos_lab.append(1)
neg_lab.append(0)
data['Pos']= pos_lab
data['Neg']= neg_lab
data = data[['twt', 'tokens', 'sentiment', 'Pos', 'Neg']]
# split data into train and test
data_train, data_test = train_test_split(data, test_size=testsize, random_state=42)
# build training vocabulary
all_training_words = [word for tokens in data_train["tokens"] for word in tokens]
TRAINING_VOCAB = sorted(list(set(all_training_words)))
# load GloVe pre-trained word embeddings
print('> loading GloVe pre-trained word embeddings (this step can take a while)')
glove2word2vec(glove_input_file=vectors_path, word2vec_output_file="./Data/produced/gensim_glove_vectors.txt")
glove_model = KeyedVectors.load_word2vec_format("./Data/produced/gensim_glove_vectors.txt", binary=False)
# train tokenizer on train, tokenize and pad sequences
tokenizer = Tokenizer(num_words=len(TRAINING_VOCAB), lower=True, char_level=False)
tokenizer.fit_on_texts(data_train['twt'].tolist())
training_sequences = tokenizer.texts_to_sequences(data_train['twt'].tolist())
train_word_index = tokenizer.word_index
train_cnn_data = pad_sequences(training_sequences, maxlen=MAX_SEQUENCE_LENGTH)
train_embedding_weights = np.zeros((len(train_word_index)+1, dim_emb))
for word,index in train_word_index.items():
train_embedding_weights[index,:] = glove_model[word] if word in glove_model else np.random.rand(dim_emb)
test_sequences = tokenizer.texts_to_sequences(data_test['twt'].tolist())
test_cnn_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# get labels
label_names = ['Pos', 'Neg']
y_train = data_train[label_names].values
# initialise model
print('> Model summary: ')
model = ConvNet(train_embedding_weights, MAX_SEQUENCE_LENGTH, len(train_word_index)+1, dim_emb, len(list(label_names)))
# train model
print('> Training CNN')
hist = model.fit(train_cnn_data, y_train, epochs=num_epochs, validation_split=0.2, shuffle=True, batch_size=batch_size)
# test model
print('> Testing CNN')
predictions = model.predict(test_cnn_data, batch_size=1024, verbose=1)
labels = [1, 0]
prediction_labels=[]
for p in predictions:
prediction_labels.append(labels[np.argmax(p)])
# convert 0, 1 labels into -1, 1 labels
prediction_labels=[-1 if pred == 0 else 1 for pred in prediction_labels]
# compute test accuracy
sum(data_test.sentiment==prediction_labels)/len(prediction_labels)
print('Obtained accuracy on test: ', sum(data_test.sentiment==prediction_labels)/len(prediction_labels))
return model, tokenizer
def run_ruby_CNN(pos, neg, test, dim_emb, testsize, vectors_path, num_epochs, batch_size, submission_path):
'''
given all needed data, will perform training of CNN and then apply it to the test set
and save a submission in Submissions folder using submission_path
adapted from https://github.com/saadarshad102/Sentiment-Analysis-CNN
'''
print('>> RUNNING CNN ')
# get test DataFrame
df_test = get_test_df_CNN(test)
# tokenize
tokens = [sen.split() for sen in df_test.twt]
df_test['tokens'] = tokens
model, tokenizer = train_ruby_CNN(pos, neg, dim_emb, testsize, vectors_path, num_epochs, batch_size)
# tokenize using trained tokenizer and pad
test_sequences_TEST = tokenizer.texts_to_sequences(df_test['twt'].tolist())
test_cnn_data_TEST = pad_sequences(test_sequences_TEST, maxlen=MAX_SEQUENCE_LENGTH)
# make prediction on the test set
predictions_TEST = model.predict(test_cnn_data_TEST, batch_size=1024, verbose=1)
# get labels
labels_TEST = [1, 0]
prediction_labels_TEST = []
for p in predictions_TEST:
prediction_labels_TEST.append(labels_TEST[np.argmax(p)])
# transform 0, 1 labels into -1, 1
prediction_labels_TEST = [-1 if pred == 0 else 1 for pred in prediction_labels_TEST]
# create and save submission using submission_path
create_submission(df_test, prediction_labels_TEST, submission_path)
print('---> submission ready in Submissions folder')
| [
"pauline.heusghem@epfl.ch"
] | pauline.heusghem@epfl.ch |
bae73ac3f4e697d38774bf7737abe3b3a46c99a4 | 65f95bf402843b6c7970bd278e3a9fbc3e85d2ae | /cdss/urls.py | 70d545b77f89bca879ed5aa25ad2c33b7b2c75e0 | [] | no_license | amitvishv/cdss | 4526c6e544bdd6fe6ff4f98db6f565b896b10f4c | 434c5020f0b5df23332043d8e708882c1887b81b | refs/heads/master | 2023-08-18T16:49:49.220846 | 2021-10-20T07:03:23 | 2021-10-20T07:03:23 | 415,764,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """cdss URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('doctor/api/', include('doctorapp.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"amit212316@gmail.com"
] | amit212316@gmail.com |
0d32823e3e09027dbd1a1a02366157bb10ffe36b | f57a2522139c8af7b7d1e6a240db3d239cba73e2 | /installation/catkin_ws/src/cp_gazebo/brass_cpu_monitor.py | 16047cef7a5385e4f13e2ed9dc3c049b05ed4a7d | [] | no_license | schmerl/LLStaging | 269e0f08b0786ba4b7b2558349e5e3b5823995df | f9a714be68fa8aeb725d8138fe5458f9760f8902 | refs/heads/master | 2021-01-11T04:04:00.665624 | 2017-03-21T17:24:22 | 2017-03-21T17:24:22 | 71,252,670 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | #!/usr/bin/env python
import os
import psutil
import rospy
from std_msgs.msg import Float64
import time
def find_gazebo():
gazebo = None
while gazebo is None:
time.sleep(1)
for proc in psutil.process_iter():
try:
if "gzserver" in proc.name:
gazebo = proc
except psutil.NoSuchProcess:
pass
return gazebo
def report_cpu_percentage():
gazebo = find_gazebo()
percent = psutil.cpu_percent(interval=None)
gzP = gazebo.get_cpu_percent(interval=None)
# For me, this returns 4
CORES = len(psutil.cpu_percent(percpu=True))
pub = rospy.Publisher("/energy_monitor/set_nuc_utilization", Float64, queue_size=10, latch=True)
while not rospy.is_shutdown():
time.sleep(2)
# The overall percentage of CPU since the last time we called it
percent = psutil.cpu_percent(interval=None)
# The total cpu percentage of gz since last call, divide by number of cores
gzP = gazebo.get_cpu_percent(interval=None) / CORES
real_percent = percent - gzP
rospy.loginfo('Percent = %s, GZ = %s, reporting = %s' %(percent, gzP,real_percent))
msg = Float64()
msg.data = real_percent
pub.publish(msg)
if __name__ == "__main__":
rospy.init_node('brass_cpu_monitor')
report_cpu_percentage() | [
"schmerl@cs.cmu.edu"
] | schmerl@cs.cmu.edu |
4ecf3bfbc531d03993585e3be018e1e16adef30a | eae7d2a6ccdfc25d68aea798ac7b0e5172ccbe45 | /spider/wireless_headphone_jd.py | b95822969be30c49c65485fd333756d8227858d2 | [] | no_license | Mark-ThinkPad/GraduationProject-2021 | 067637a879a874db86029f981af005dbf7a42b41 | 704851555706dd165ab266d6442944e23165cd4b | refs/heads/main | 2023-05-27T11:49:35.017104 | 2021-06-08T04:47:42 | 2021-06-08T04:47:42 | 309,945,052 | 14 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,966 | py | import re
import json
from time import sleep
from json.decoder import JSONDecodeError
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
from db.wireless_headphone_models import Commodity, JDExistedSku, JDTargetSku
from spider.utils import (get_chrome_driver, get_response_body, window_scroll_by, parse_jd_count_str,
open_second_window, back_to_first_window, waiting_content_loading)
# 获取京东无线耳机分类销量数据
def get_wireless_headphone_from_jd(browser: Chrome):
# 打开京东无线耳机分类
# url_list = [
# 'https://list.jd.com/list.html?cat=652%2C828%2C842&ev=235_58350%5E&cid3=842',
# 'https://list.jd.com/list.html?cat=652%2C828%2C842&ev=235_66906%5E&cid3=842'
# ]
# for url in url_list:
# print(f'------正在打开京东无线耳机分类页面------')
# browser.get(url)
# # 保存将要获取的所有商品SKU编号
# insert_jd_all_target_sku(browser)
# 保存所有商品信息
insert_jd_all_commodity(browser)
print('------京东无线耳机分类销量数据获取完成------')
# 京东无线耳机分类页面翻页
def turn_to_the_next_page(browser: Chrome):
while True:
try:
WebDriverWait(browser, 0.5).until(
ec.element_to_be_clickable((By.CLASS_NAME, 'pn-next'))
)
browser.execute_script('document.querySelector(".pn-next").click()')
waiting_content_loading(browser, 'gl-item')
break
except TimeoutException:
window_scroll_by(browser, 500)
# 打开并切换到当前商品页面
def switch_to_current_sku_page(browser: Chrome, sku_url: str):
open_second_window(browser)
print(f'------打开新窗口并正在加载当前商品页面: {sku_url}------')
browser.get(sku_url)
print('------当前商品页面加载完成------')
sleep(2)
# 从后端API接口获取并保存已上架的SKU
def get_jd_sku_from_api(browser: Chrome, sku: str):
try:
jd_sku_url = 'type=getstocks'
skus = get_response_body(browser, jd_sku_url, 'GET')
if skus is None:
raise WebDriverException()
skus = skus.rstrip(')')
skus = re.sub(r'^\w+?\(', '', skus)
skus = json.loads(skus)
for key in skus.keys():
JDExistedSku.get_or_create(sku=key)
print('------保存已上架SKU完成------')
except (WebDriverException, JSONDecodeError):
JDExistedSku.get_or_create(sku=sku)
print('------当前商品是单SKU商品------')
# 保存将要获取的商品SKU编号
def insert_jd_target_sku(browser: Chrome):
elements = browser.find_elements_by_class_name('gl-item')
print(f'当前页面共有{len(elements)}个商品')
for element in elements:
# 获取当前商品SKU编号
current_sku: str = element.get_attribute('data-sku')
JDTargetSku.get_or_create(sku=current_sku)
# 保存将要获取的所有商品SKU编号
def insert_jd_all_target_sku(browser: Chrome):
max_page = 141
current_page = 0
while current_page <= max_page:
# 获取最大页数和当前页数
mp_path = '/html/body/div[7]/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/span/i'
cp_path = '/html/body/div[7]/div/div[2]/div[1]/div/div[1]/div[1]/div[3]/span/b'
max_page = int(browser.find_element_by_xpath(mp_path).text)
current_page = int(browser.find_element_by_xpath(cp_path).text)
print(f'总页数: {max_page}, 当前页数: {current_page}')
# 下滑半页使页面加载后30个商品 (lazy-loading机制)
window_scroll_by(browser, 3200)
sleep(3)
# 保存将要获取的当前页面的商品SKU编号
insert_jd_target_sku(browser)
# 翻页
if current_page == max_page:
break
else:
turn_to_the_next_page(browser)
# 保存商品信息
def insert_jd_all_commodity(browser: Chrome):
for target_sku in JDTargetSku.select():
# 获取当前商品SKU编号
sku: str = target_sku.sku
# 检查当前SKU是否在数据库中保存的SKU中, 避免销量重复计数
result = JDExistedSku.get_or_none(JDExistedSku.sku == sku)
if result is not None:
# 删除已经保存的商品target_sku
delete_saved_commodity_sku(sku)
print(f'---SKU编号为 {sku} 的商品信息已保存过---')
continue
# 开始抓取商品信息
commodity = Commodity()
commodity.source = '京东'
commodity.url = 'https://item.jd.com/' + sku + '.html'
# 打开并切换到当前商品页面
switch_to_current_sku_page(browser, commodity.url)
# 从后端API接口获取并保存已上架的SKU
get_jd_sku_from_api(browser, sku)
try:
commodity.price = float(browser.find_element_by_css_selector('span.price:nth-child(2)').text)
except (ValueError, NoSuchElementException):
# 价格显示为待发布时或商品以下柜时, 抛出异常
commodity.price = -2
try:
commodity.title = browser.find_element_by_class_name('sku-name').text.strip()
except NoSuchElementException:
commodity.title = '无商品标题'
commodity.total = -1 # 商品销量预赋值
for item in browser.find_elements_by_css_selector('#detail > div.tab-main.large > ul > li'):
if '商品评价' in item.text:
total_str = item.find_element_by_tag_name('s').text.lstrip('(').rstrip(')')
commodity.total = parse_jd_count_str(total_str)
# 判断是否为京东自营
try:
self_str = browser.find_element_by_class_name('u-jd').text
if self_str == '自营':
self = True
else:
self = False
except NoSuchElementException:
self = False
commodity.is_self = self
try:
commodity.shop_name = browser.find_element_by_css_selector(
'#crumb-wrap > div > div.contact.fr.clearfix > div.J-hove-wrap.EDropdown.fr > div:nth-child(1) > div '
'> a').text
except NoSuchElementException:
commodity.shop_name = '店铺名称为空'
# 从商品介绍中获取商品信息
try:
commodity.brand = browser.find_element_by_css_selector('#parameter-brand > li > a').text
except NoSuchElementException:
commodity.brand = '未知'
intro = browser.find_elements_by_css_selector('.parameter2 > li')
intro_list = []
for i in intro:
intro_list.append(i.text)
# 预赋值, 防止注入空置报错
commodity.model = '未知'
for intro_item in intro_list:
if '商品名称' in intro_item:
commodity.model = intro_item.replace('商品名称:', '')
# 保存商品信息
commodity.save()
# 删除已经保存的商品target_sku
delete_saved_commodity_sku(sku)
print(f'------SKU编号为 {sku} 的商品信息保存完毕------')
# 回到无线耳机分类页面
back_to_first_window(browser)
# 删除已经保存的商品 target_sku
def delete_saved_commodity_sku(target_sku: str):
saved_sku = JDTargetSku.get(JDTargetSku.sku == target_sku)
saved_sku.delete_instance()
if __name__ == '__main__':
# 创建一个chrome实例
driver = get_chrome_driver()
# 获取京东无线耳机分类销量数据
get_wireless_headphone_from_jd(driver)
# 退出浏览器实例
driver.quit()
| [
"1372469698@qq.com"
] | 1372469698@qq.com |
d8539f1bf6ab8cbfd8fbabe5ef96bacc654049b3 | 0e5f7fbea53b56ddeb0905c687aff43ae67034a8 | /src/port_adapter/api/grpc/listener/BaseListener.py | fe0dc7ab7c4c74bdb126db44e244dc94027a5174 | [] | no_license | arkanmgerges/cafm.identity | 359cdae2df84cec099828719202b773212549d6a | 55d36c068e26e13ee5bae5c033e2e17784c63feb | refs/heads/main | 2023-08-28T18:55:17.103664 | 2021-07-27T18:50:36 | 2021-07-27T18:50:36 | 370,453,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | """
@author: Arkan M. Gerges<arkan.m.gerges@gmail.com>
"""
from src.resource.logging.decorator import debugLogger
class BaseListener:
@debugLogger
def _token(self, context) -> str:
metadata = context.invocation_metadata()
for key, value in metadata:
if "token" == key:
return value
return "" | [
"arkan.m.gerges@gmail.com"
] | arkan.m.gerges@gmail.com |
47f0db2046bb3a7eae2a1796815d4d94c1c5932d | d8a74cbba3aa14cfc813792d93e836395769deb8 | /etl/comments.py | bb39c489fc50b0aa37d95d094b4b22bd1b275d3f | [] | no_license | shurik88/datascience | 3d0ebd9067848f1b8d2aa8f383cfe53f4528fbd4 | ade1d45b07aeb24e8a21970a867e849fd4603444 | refs/heads/master | 2021-05-07T01:05:18.603959 | 2017-11-28T18:23:02 | 2017-11-28T18:23:02 | 110,322,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from lxml import etree
from datetime import datetime
import settings
from mongoRepository import mongoRep
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='absolute path for comments.xml')
args = parser.parse_args()
filePath= args.path
settingsData = settings.get()
rep = mongoRep(settingsData["connectionString"], "comments")
buffer = []
bufferLength = 1000
i = 1
context = etree.iterparse(filePath, events=('end',), tag='row')
for event, elem in context:
doc = {"_id": int(elem.attrib["Id"])}
if "UserId" in elem.attrib:
doc["user"] = int(elem.attrib["UserId"])
doc["post"] = int(elem.attrib["PostId"])
doc["date"] = datetime.strptime(str(elem.attrib["CreationDate"]), "%Y-%m-%dT%H:%M:%S.%f").replace(microsecond=0)
doc["text"] = str(elem.attrib["Text"])
doc["score"]= int(elem.attrib["Score"])
buffer.append(doc)
if(len(buffer) == bufferLength):
rep.insert_many(buffer)
print ("Inserted: {0} docs".format(i * bufferLength))
i = i + 1
buffer = []
if(len(buffer) != 0):
rep.insert_many(buffer)
print ("Inserted: {0} docs".format((i-1) * bufferLength + len(buffer)) | [
"budylsky@adeptik.com"
] | budylsky@adeptik.com |
f41c5c3d5fcffe3a0e415f78ebc62849aaed51b6 | d370990233ba3518491b103647e7616d350e440a | /Scraper.py | 576a163a43b05e600a5e188ae8f9974896c2232f | [] | no_license | IsaacSamuel/tumblr_scraper | 1d1a528d208c9ca3237242ca57ca9221806dacc9 | 12ae825bc168ca1906792f07517e4ebf15a0a6d4 | refs/heads/master | 2021-01-11T08:56:52.754916 | 2017-01-13T01:20:45 | 2017-01-13T01:20:45 | 77,501,010 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | from bs4 import BeautifulSoup
from bs4 import Comment
import requests
class Scraper:
posts_found = 0
def __init__(self, url):
html = requests.get(url, verify=False).content
self.soup = BeautifulSoup(html, "lxml")
#If the user elects to scrape an entire blog, we need to check when the blog has run out of page.
#We do this by checking if a page contains a comment labled ' .post '
def has_content(self):
for comment in self.soup.find_all(text=lambda text:isinstance(text, Comment)):
if comment == " .post ":
return True
return False
"""
Will implement sorting by date later. Use datetime module.
def extract_date(self, post):
soup = BeautifulSoup(str(post), 'lxml')
for each in soup.find_all("div", {"class" : "date"}):
self.extracted_posts.append(each)
"""
def extract_matching_post_types(self, options):
self.extracted_posts= []
if options["text"]:
for post in self.soup.find_all("div", {"class" : "post post-type-text"}):
self.extracted_posts.append(post)
if options["image"]:
for post in self.soup.find_all("div", {"class" : "post post-type-image"}):
self.extracted_posts.append(post)
if options["video"]:
for post in self.soup.find_all("div", {"class" : "post post-type-video"}):
self.extracted_posts.append(post)
if options["chat"]:
for post in self.soup.find_all("div", {"class" : "post post-type-chat"}):
self.extracted_posts.append(post)
if options["quote"]:
for post in self.soup.find_all("div", {"class" : "post post-type-quote"}):
self.extracted_posts.append(post)
def extract_posts_matching_char_limit(self, less_than, char_lim):
temp_posts = self.extracted_posts
self.extracted_posts = []
for post in temp_posts:
soup = BeautifulSoup(str(post), 'lxml')
for each in soup.find_all("div", {"class" : "post-content"}):
if less_than:
if len(str(each)) <= (char_lim + 7):
self.extracted_posts.append(post)
if not less_than:
if len(str(each)) <= (char_lim + 7):
self.extracted_posts.append(post)
| [
"getintouchwithisaac@gmail.com"
] | getintouchwithisaac@gmail.com |
93c6c7dd56c60fb13f08f2d97e65e9d1e39305a3 | c7cce6315bf8439faedbe44e2f35e06087f8dfb3 | /Lab_Excercises/Lab_06/task_1.py | 509866639df36f81fb0e45767cd470a3ad2b40b5 | [] | no_license | sipakhti/code-with-mosh-python | d051ab7ed1153675b7c44a96815c38ed6b458d0f | d4baa9d7493a0aaefefa145bc14d8783ecb20f1b | refs/heads/master | 2020-12-26T13:05:06.783431 | 2020-07-08T07:00:59 | 2020-07-08T07:00:59 | 237,517,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | str1 = list(input("Please input the string: "))
encrypted_string = ""
for i in range(len(str1)):
if str1[i].lower() in "aeiou" and i % 2 != 0:
str1[i] = "_"
for char in str1:
encrypted_string = encrypted_string + char
print(encrypted_string)
| [
"476061@gmail.com"
] | 476061@gmail.com |
d4fc54e40cb86fb6b362d9bfa28efb9f639c4f65 | 7dde8293d4ce030e4817783e3bcc144669e411a8 | /FoodWebModel/life/secondary/whiptail.py | f17db87b7c61e290e6d027afa2b3ce64c19df1ae | [] | no_license | mikeonator/MathModeling | 861143ad6c95879d38b01b7dbb4782dea9a5e096 | 0e51886f5926dca1a11a68499b78ea8e94c7fded | refs/heads/master | 2021-01-08T09:00:28.616275 | 2020-05-04T22:12:41 | 2020-05-04T22:12:41 | 241,979,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import random
class whiptail():
def __init__(self, population, time):
self.population = population
self.time = time
self.growth = 5/365
self.death = 1/(365*3)
def simulate(self,agent):
reproduction = (self.population * self.growth)
natmortality = (self.population * self.death)
coyote = (((agent.coyotepop)/3) * ((random.randint(0,10)/300)))
lion = (((agent.lionpop)/3) * ((random.randint(0,10)/300)))
carry = (1-(agent.whipop/200))
#print("carry " + str(carry) + "|| pop " + str(agent.whipop) + " || Time = " + str(agent.time))
popchange = (((reproduction) - (natmortality))) - (coyote) - (lion)
self.population += popchange
agent.whipop = self.population | [
"maudinyc@gmail.com"
] | maudinyc@gmail.com |
9d0079e1ca5505a1bfa70cf2f4e6a544afd4ead8 | eb6f1c78b7a38f5386c013a8b453ba7c07a5e76b | /textattack/shared/word_embedding.py | 02ea054cb7d3382f0b3f3bfa52a195eaeaa2aa46 | [
"MIT"
] | permissive | StatNLP/discretezoo | b143306297fe5590800853c71278cc0c4cdd5e68 | 565552b894a5c9632ac7b949d61a6f71123031e4 | refs/heads/master | 2023-07-29T04:12:36.355651 | 2021-09-17T13:21:26 | 2021-09-17T13:21:26 | 404,305,923 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,675 | py | """
Shared loads word embeddings and related distances
=====================================================
"""
from abc import ABC, abstractmethod
from collections import defaultdict
import csv
import os
import pickle
import numpy as np
import torch
from textattack.shared import utils
class AbstractWordEmbedding(ABC):
"""Abstract class representing word embedding used by TextAttack.
This class specifies all the methods that is required to be defined
so that it can be used for transformation and constraints. For
custom word embedding not supported by TextAttack, please create a
class that inherits this class and implement the required methods.
However, please first check if you can use `WordEmbedding` class,
which has a lot of internal methods implemented.
"""
@abstractmethod
def __getitem__(self, index):
"""Gets the embedding vector for word/id
Args:
index (Union[str|int]): `index` can either be word or integer representing the id of the word.
Returns:
vector (ndarray): 1-D embedding vector. If corresponding vector cannot be found for `index`, returns `None`.
"""
raise NotImplementedError()
@abstractmethod
def get_mse_dist(self, a, b):
"""Return MSE distance between vector for word `a` and vector for word
`b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): MSE (L2) distance
"""
raise NotImplementedError()
@abstractmethod
def get_cos_sim(self, a, b):
"""Return cosine similarity between vector for word `a` and vector for
word `b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): cosine similarity
"""
raise NotImplementedError()
@abstractmethod
def word2index(self, word):
"""
Convert between word to id (i.e. index of word in embedding matrix)
Args:
word (str)
Returns:
index (int)
"""
raise NotImplementedError()
@abstractmethod
def index2word(self, index):
"""
Convert index to corresponding word
Args:
index (int)
Returns:
word (str)
"""
raise NotImplementedError()
@abstractmethod
def nearest_neighbours(self, index, topn):
"""
Get top-N nearest neighbours for a word
Args:
index (int): ID of the word for which we're finding the nearest neighbours
topn (int): Used for specifying N nearest neighbours
Returns:
neighbours (list[int]): List of indices of the nearest neighbours
"""
raise NotImplementedError()
__repr__ = __str__ = utils.default_class_repr
class WordEmbedding(AbstractWordEmbedding):
"""Object for loading word embeddings and related distances for TextAttack.
This class has a lot of internal components (e.g. get consine similarity)
implemented. Consider using this class if you can provide the appropriate
input data to create the object.
Args:
emedding_matrix (ndarray): 2-D array of shape N x D where N represents size of vocab and D is the dimension of embedding vectors.
word2index (Union[dict|object]): dictionary (or a similar object) that maps word to its index with in the embedding matrix.
index2word (Union[dict|object]): dictionary (or a similar object) that maps index to its word.
nn_matrix (ndarray): Matrix for precomputed nearest neighbours. It should be a 2-D integer array of shape N x K
where N represents size of vocab and K is the top-K nearest neighbours. If this is set to `None`, we have to compute nearest neighbours
on the fly for `nearest_neighbours` method, which is costly.
"""
PATH = "word_embeddings"
def __init__(self, embedding_matrix, word2index, index2word, nn_matrix=None):
self.embedding_matrix = embedding_matrix
self._eps = np.finfo(self.embedding_matrix.dtype).eps
self.normalized_embeddings = self.embedding_matrix / np.expand_dims(
np.maximum(np.linalg.norm(embedding_matrix, ord=2, axis=-1), self._eps),
1)
self._word2index = word2index
self._index2word = index2word
self.nn_matrix = nn_matrix
# Dictionary for caching results
self._mse_dist_mat = defaultdict(dict)
self._cos_sim_mat = defaultdict(dict)
self._nn_cache = {}
def __getitem__(self, index):
"""Gets the embedding vector for word/id
Args:
index (Union[str|int]): `index` can either be word or integer representing the id of the word.
Returns:
vector (ndarray): 1-D embedding vector. If corresponding vector cannot be found for `index`, returns `None`.
"""
if isinstance(index, str):
try:
index = self._word2index[index]
except KeyError:
return None
try:
return self.embedding_matrix[index]
except IndexError:
# word embedding ID out of bounds
return None
def word2index(self, word):
"""
Convert between word to id (i.e. index of word in embedding matrix)
Args:
word (str)
Returns:
index (int)
"""
return self._word2index[word]
def index2word(self, index):
"""
Convert index to corresponding word
Args:
index (int)
Returns:
word (str)
"""
return self._index2word[index]
def get_mse_dist(self, a, b):
"""Return MSE distance between vector for word `a` and vector for word
`b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): MSE (L2) distance
"""
if isinstance(a, str):
a = self._word2index[a]
if isinstance(b, str):
b = self._word2index[b]
a, b = min(a, b), max(a, b)
try:
mse_dist = self._mse_dist_mat[a][b]
except KeyError:
e1 = self.embedding_matrix[a]
e2 = self.embedding_matrix[b]
e1 = torch.tensor(e1).to(utils.device)
e2 = torch.tensor(e2).to(utils.device)
mse_dist = torch.sum((e1 - e2)**2).item()
self._mse_dist_mat[a][b] = mse_dist
return mse_dist
def get_cos_nn(self, query_point: np.ndarray, topn: int):
"""Finds the nearest neighbors to the query point using cosine similarity.
Args:
query_point: The point in space of which we want to find nearest neighbors
<float32/64>[1, embedding_size]
topn: This controls how many neighbors to return
Returns:
A list of tokens in the embedding space.
A list of distances.
"""
normalizer = max(np.linalg.norm(query_point, ord=2),
np.finfo(query_point.dtype).eps)
query_point = query_point / normalizer
cosine_similarities = np.matmul(query_point, self.normalized_embeddings.T)
if topn == 1:
nearest_neighbors = list([np.argsort(cosine_similarities)[-1]])
else:
# argsort sorts lowest to highest, we want the largest values
nearest_neighbors = list(np.argsort(cosine_similarities)[-topn:])
nearest_neighbors.reverse()
distance_list = list(cosine_similarities[nearest_neighbors])
nearest_tokens = [self.index2word(index) for index in nearest_neighbors]
return nearest_tokens, distance_list
def get_euc_nn(self, query_point: np.ndarray, topn: int):
"""Finds the nearest neighbors to the query point using cosine similarity.
Args:
query_point: The point in space of which we want to find nearest neighbors
<float32/64>[1, embedding_size]
topn: This controls how many neighbors to return
Returns:
A list of tokens in the embedding space.
A list of distances.
"""
euclidean_distances = np.linalg.norm(self.embedding_matrix - query_point,
axis=-1,
ord=2)
if topn == 1:
nearest_neighbors = list([np.argsort(euclidean_distances)[0]])
else:
# argsort sorts lowest to highest, we want the smallest distance
nearest_neighbors = list(np.argsort(euclidean_distances)[:topn])
nearest_tokens = [self.index2word(index) for index in nearest_neighbors]
distance_list = list(euclidean_distances[nearest_neighbors])
return nearest_tokens, distance_list
def get_cos_sim(self, a, b):
"""Return cosine similarity between vector for word `a` and vector for
word `b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): cosine similarity
"""
if isinstance(a, str):
a = self._word2index[a.lower()]
if isinstance(b, str):
b = self._word2index[b.lower()]
a, b = min(a, b), max(a, b)
try:
cos_sim = self._cos_sim_mat[a][b]
except KeyError:
e1 = self.embedding_matrix[a]
e2 = self.embedding_matrix[b]
e1 = torch.tensor(e1).to(utils.device)
e2 = torch.tensor(e2).to(utils.device)
cos_sim = torch.nn.CosineSimilarity(dim=0)(e1, e2).item()
self._cos_sim_mat[a][b] = cos_sim
return cos_sim
def nearest_neighbours(self, index, topn):
"""
Get top-N nearest neighbours for a word
Args:
index (int): ID of the word for which we're finding the nearest neighbours
topn (int): Used for specifying N nearest neighbours
Returns:
neighbours (list[int]): List of indices of the nearest neighbours
"""
if isinstance(index, str):
index = self._word2index[index]
if self.nn_matrix is not None:
nn = self.nn_matrix[index][1:(topn + 1)]
else:
try:
nn = self._nn_cache[index]
except KeyError:
embedding = torch.tensor(self.embedding_matrix).to(utils.device)
vector = torch.tensor(self.embedding_matrix[index]).to(utils.device)
dist = torch.norm(embedding - vector, dim=1, p=None)
# Since closest neighbour will be the same word, we consider N+1 nearest neighbours
nn = dist.topk(topn + 1, largest=False)[1][1:].tolist()
self._nn_cache[index] = nn
return nn
@staticmethod
def counterfitted_GLOVE_embedding():
"""Returns a prebuilt counter-fitted GLOVE word embedding proposed by
"Counter-fitting Word Vectors to Linguistic Constraints" (Mrkšić et
al., 2016)"""
if ("textattack_counterfitted_GLOVE_embedding" in utils.GLOBAL_OBJECTS and
isinstance(
utils.GLOBAL_OBJECTS["textattack_counterfitted_GLOVE_embedding"],
WordEmbedding,
)):
# avoid recreating same embedding (same memory) and instead share across different components
return utils.GLOBAL_OBJECTS["textattack_counterfitted_GLOVE_embedding"]
word_embeddings_folder = "paragramcf"
word_embeddings_file = "paragram.npy"
word_list_file = "wordlist.pickle"
mse_dist_file = "mse_dist.p"
cos_sim_file = "cos_sim.p"
nn_matrix_file = "nn.npy"
# Download embeddings if they're not cached.
word_embeddings_folder = os.path.join(WordEmbedding.PATH,
word_embeddings_folder)
word_embeddings_folder = utils.download_if_needed(word_embeddings_folder)
# Concatenate folder names to create full path to files.
word_embeddings_file = os.path.join(word_embeddings_folder,
word_embeddings_file)
word_list_file = os.path.join(word_embeddings_folder, word_list_file)
mse_dist_file = os.path.join(word_embeddings_folder, mse_dist_file)
cos_sim_file = os.path.join(word_embeddings_folder, cos_sim_file)
nn_matrix_file = os.path.join(word_embeddings_folder, nn_matrix_file)
# loading the files
embedding_matrix = np.load(word_embeddings_file)
word2index = np.load(word_list_file, allow_pickle=True)
index2word = {}
for word, index in word2index.items():
index2word[index] = word
nn_matrix = np.load(nn_matrix_file)
embedding = WordEmbedding(embedding_matrix, word2index, index2word,
nn_matrix)
with open(mse_dist_file, "rb") as f:
mse_dist_mat = pickle.load(f)
with open(cos_sim_file, "rb") as f:
cos_sim_mat = pickle.load(f)
embedding._mse_dist_mat = mse_dist_mat
embedding._cos_sim_mat = cos_sim_mat
utils.GLOBAL_OBJECTS["textattack_counterfitted_GLOVE_embedding"] = embedding
return embedding
@staticmethod
def embeddings_from_file(path_to_embeddings):
"""Given a csv file using spaces as delimiters, use the first column
as the vocabulary and the rest of the columns as the word embeddings."""
embedding_file = open(path_to_embeddings)
lines = embedding_file.readlines()
vocab = []
vectors = []
for line in lines:
if line == "":
break
line = line.split()
vocab.append(line[0])
vectors.append([float(value) for value in line[1:]])
embedding_matrix = np.array(vectors)
word2index = {}
index2word = {}
for i, token in enumerate(vocab):
word2index[token] = i
index2word[i] = token
embedding = WordEmbedding(embedding_matrix, word2index, index2word)
return embedding
class GensimWordEmbedding(AbstractWordEmbedding):
"""Wraps Gensim's `KeyedVectors`
(https://radimrehurek.com/gensim/models/keyedvectors.html)"""
def __init__(self, keyed_vectors_or_path):
gensim = utils.LazyLoader("gensim", globals(), "gensim")
if isinstance(keyed_vectors_or_path, str):
if keyed_vectors_or_path.endswith(".bin"):
self.keyed_vectors = gensim.models.KeyedVectors.load_word2vec_format(
keyed_vectors_or_path, binary=True)
else:
self.keyed_vectors = gensim.models.KeyedVectors.load_word2vec_format(
keyed_vectors_or_path)
elif isinstance(keyed_vectors_or_path, gensim.models.KeyedVectors):
self.keyed_vectors = keyed_vectors_or_path
else:
raise ValueError(
"`keyed_vectors_or_path` argument must either be `gensim.models.KeyedVectors` object "
"or a path pointing to the saved KeyedVector object")
self.keyed_vectors.init_sims()
self._mse_dist_mat = defaultdict(dict)
self._cos_sim_mat = defaultdict(dict)
def __getitem__(self, index):
"""Gets the embedding vector for word/id
Args:
index (Union[str|int]): `index` can either be word or integer representing the id of the word.
Returns:
vector (ndarray): 1-D embedding vector. If corresponding vector cannot be found for `index`, returns `None`.
"""
if isinstance(index, str):
try:
index = self.keyed_vectors.vocab.get(index).index
except KeyError:
return None
try:
return self.keyed_vectors.vectors_norm[index]
except IndexError:
# word embedding ID out of bounds
return None
def word2index(self, word):
"""
Convert between word to id (i.e. index of word in embedding matrix)
Args:
word (str)
Returns:
index (int)
"""
vocab = self.keyed_vectors.vocab.get(word)
if vocab is None:
raise KeyError(word)
return vocab.index
def index2word(self, index):
"""
Convert index to corresponding word
Args:
index (int)
Returns:
word (str)
"""
try:
# this is a list, so the error would be IndexError
return self.keyed_vectors.index2word[index]
except IndexError:
raise KeyError(index)
def get_mse_dist(self, a, b):
"""Return MSE distance between vector for word `a` and vector for word
`b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): MSE (L2) distance
"""
try:
mse_dist = self._mse_dist_mat[a][b]
except KeyError:
e1 = self.keyed_vectors.vectors_norm[a]
e2 = self.keyed_vectors.vectors_norm[b]
e1 = torch.tensor(e1).to(utils.device)
e2 = torch.tensor(e2).to(utils.device)
mse_dist = torch.sum((e1 - e2)**2).item()
self._mse_dist_mat[a][b] = mse_dist
return mse_dist
def get_cos_sim(self, a, b):
"""Return cosine similarity between vector for word `a` and vector for
word `b`.
Since this is a metric, `get_mse_dist(a,b)` and `get_mse_dist(b,a)` should return the same value.
Args:
a (Union[str|int]): Either word or integer presenting the id of the word
b (Union[str|int]): Either word or integer presenting the id of the word
Returns:
distance (float): cosine similarity
"""
if not isinstance(a, str):
a = self.keyed_vectors.index2word[a]
if not isinstance(b, str):
b = self.keyed_vectors.index2word[b]
cos_sim = self.keyed_vectors.similarity(a, b)
return cos_sim
def nearest_neighbours(self, index, topn, return_words=True):
"""
Get top-N nearest neighbours for a word
Args:
index (int): ID of the word for which we're finding the nearest neighbours
topn (int): Used for specifying N nearest neighbours
Returns:
neighbours (list[int]): List of indices of the nearest neighbours
"""
word = self.keyed_vectors.index2word[index]
return [
self.keyed_vectors.index2word.index(i[0])
for i in self.keyed_vectors.similar_by_word(word, topn)
]
| [
"berger1954@gmail.com"
] | berger1954@gmail.com |
dc2296126fd9fbb96bfe024a037473fed5ce1fd4 | 46e3850c8aabeec799a7b1441bb9bc9a7846a865 | /service_logger.py | d1c0286a09df924543cb0849c22a567ee1a6a44c | [] | no_license | valiantljk/service_logger | 8fa7f79dc4a8c3845d0c172c9f97a71d7ac399d6 | 44cf1a707334cc112020e22bf316aab1d4123a36 | refs/heads/master | 2022-07-02T13:14:12.602034 | 2020-05-12T22:48:56 | 2020-05-12T22:48:56 | 263,466,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | #!/usr/bin/env python
# coding: utf-8
# Author: Jialin Liu
# Python Version: 3.7
# Redis Version: 6.0.1
# About: A simple logger based on Redis
import redis
import time
import pickle
class RedisLog():
"""
RedisLog structure
service_name: str, service name, eg., head-detection
func_name: str, function name within a service, e.g., detect()
status: str, status code, 1: ok, 0: error
error: str, error infor for the service crash, can paste from Exception
uuid: universal id for tracing back, sending from application level, down to base service
timestamp: auto-generated unix timestamp whenever a log is produced
"""
def __init__(self, sname = 'RedisLog', fname = 'NA', status = 1, error = None, uuid = 0):
self.service_name = sname
self.func_name = fname
self.status = status
self.error = error
self.uuid = uuid
self.timestamp = int(time.time())
def print(self):
print("Service Name:%s"%self.service_name)
print("Function Name:%s"%self.func_name)
print("Status:%s"%self.status)
print("Error:%s"%self.error)
print("UUID:%s"%self.uuid)
print("Timestamp:%s"%self.timestamp)
class Redis():
"""
Redis Class
serialize: serialize python objects using pickle
set_expire: set expire on a key
get_ttl: get expire of a key
put: put logs into redis
get: get logs from redis
"""
def __init__(self, host, port, password):
try:
self.redis = redis.StrictRedis(host = host,
port = port,
password = password)
except Exception as e:
#redis can not be connected
self.redis = None
#user should check if redis is none or not before proceeding
pass
def serialize(self, objs):
"""
objs: list of python objects
return: list of picked objects, [] if failed
"""
try:
pobjs=[]
for o in objs:
pobjs.append(pickle.dumps(o))
return pobjs
except Exception as e:
print (e)
return []
def set_expire(self, key, ts):
"""
key: service name
ts: time in seconds
return: -1 if fail
"""
try:
self.redis.pexpire(key,ts*1000)
except Exception as e:
print (e)
return -1
def get_ttl(self, key):
"""
key: service name
return: time (seconds) before expire, -1 if fail
"""
try:
t = self.redis.pttl(key)
return t/1000
except Exception as e:
print(e)
return -1
def put(self, key, values):
"""
key: service name
values: list of logs or a single log
return: number of logs inserted, 0 if nothing inserted
"""
if isinstance(values, list):
if(len(values) ==0):
return 0
else:
if values:
values = [values]
else:
# values is none
return 0
try:
#push all values into redis' list tail
#serialize first
vobjs = self.serialize(values)
#push all objects to redis
if self.redis:
self.redis.rpush(key,*vobjs)
return len(vobjs)
else:
return 0
except Exception as e:
#in case of expection, push a simple error log into redis
print (e)
rlog = RedisLog(fname = 'rpush', status = 0, error = e)
rlog_obj = self.serialize([rlog])
try:
self.redis.rpush('RedisLog',rlog_obj)
except Exception as e:
#redis failed with best try
print (e)
return 0
def get(self, key, num=None):
"""
key: service name
num: number of logs to get
return: list of RedisLog or [] if none found
"""
#get latest num logs from service key
Logs = []
try:
if num != None and num >0:
objs = self.redis.lrange(key, -num, -1)
else:
objs = self.redis.lrange(key,0,-1)
#print("objs:",objs)
for o in objs:
Logs.append(pickle.loads(o))
return Logs
except Exception as e:
print (e)
return []
| [
"valiantljk@gmail.com"
] | valiantljk@gmail.com |
285381c4261f4c277b0fea06bba2ff9ce9ed8a11 | a55fab6a4eef12c5476a5a26eac38ade9d3b6e05 | /comms_tutorial/mqttPublisherCustom.py | 9279bc04177c31aebb943a5dec341cf2a253636c | [] | no_license | thomaskost17/180DA-WarmUp | 64dc6f8ca0d12fb420f40df85404085f323533f7 | 03bf14c4db253b331a171a74dd6081a4fc2d9a22 | refs/heads/master | 2023-01-03T14:49:07.898460 | 2020-10-29T22:19:36 | 2020-10-29T22:19:36 | 302,253,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,231 | py | import paho.mqtt.client as mqtt
import numpy as np
import time
# 0. define callbacks - functions that run when events happen.
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connection returned result: "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# client.subscribe("ece180d/test")
# The callback of the client when it disconnects.
def on_disconnect(client, userdata, rc):
if rc != 0:
print('Unexpected Disconnect')
else:
print('Expected Disconnect')
# The default message callback.
# (won't be used if only publishing, but can still exist)
def on_message(client, userdata, message):
print('Received message: "' + str(message.payload) + '" on topic "' +
message.topic + '" with QoS ' + str(message.qos))
# 1. create a client instance.
client = mqtt.Client()
# add additional client options (security, certifications, etc.)
# many default options should be good to start off.
# add callbacks to client.
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
# 2. connect to a broker using one of the connect*() functions.
client.connect_async('mqtt.eclipse.org')
# 3. call one of the loop*() functions to maintain network traffic flow with the broker.
client.loop_start()
# 4. use subscribe() to subscribe to a topic and receive messages.
# 5. use publish() to publish messages to the broker.
# payload must be a string, bytearray, int, float or None.
while True:
for i in range(10):
client.publish('ece180d/team8', '{"messages": [{"message_type": "text", "data": "some text", "sender": "John", "reciever": "Jack", "time": {"hour": 1, "minute": 14, "second": 39}}, {"message_type": "weather", "data": {"conditions": "sunny", "temp": 69, "high": 75, "low": 50}}, {"message_type": "news", "data": "https://www.youtube.com/watch?v=oHg5SJYRHA0", "relevant_text": "important information"}]}', qos=1)
time.sleep(10)
# 6. use disconnect() to disconnect from the broker.
client.loop_stop()
client.disconnect()
| [
"thomaskost17@ucla.edu"
] | thomaskost17@ucla.edu |
0d62c7f36575a69c9d7e3296b783aa2763b7753d | 41def1017345fa46eb395b483984cfac646b89f8 | /plugins/related_posts.py | c549fdb33ecfc1961ee7da9f321b65d0ac5d7c28 | [] | no_license | workingmirror/blog | 18f024582881b0af2365a37545dd3bb0e1bd86c4 | 64baa808246f7794d444237fe23497924f09e2d4 | refs/heads/master | 2021-01-21T17:13:27.730617 | 2017-07-10T09:33:39 | 2017-07-10T09:36:55 | 91,941,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,203 | py | import random
from pelican import signals
MIN_POSTS = 3
MAX_POSTS = 5
def find_unique(items, item_set, content, unique_key='url'):
unique_items = []
for item in items:
if getattr(item, unique_key) != getattr(content, unique_key) and getattr(item, unique_key) not in item_set:
item_set.add(getattr(item, unique_key))
unique_items.append(item)
return unique_items
def find_tag_posts(generator, content, related_posts=None):
tags = random.sample(content.tags, min(len(content.tags), MAX_POSTS))
articles = []
article_set = set()
if related_posts:
for article in related_posts:
article_set.add(article.url)
for tag in tags:
if len(articles) > MAX_POSTS:
break
articles += find_unique(generator.tags[tag], article_set, content)
return random.sample(articles, min(len(articles), MAX_POSTS))
def find_category_posts(generator, content, related_posts=None):
article_set = set()
category_hash = {category[0]: category[1] for category in generator.categories}
if related_posts:
for article in related_posts:
article_set.add(article.url)
articles = find_unique(category_hash[content.category], article_set, content)
return random.sample(articles, min(len(articles), MAX_POSTS))
def find_author_posts(generator, content, related_posts=None):
articles = []
article_set = set()
author_hash = {author[0]: author[1] for author in generator.authors}
authors = random.sample(content.authors, min(len(content.authors), MAX_POSTS))
for author in authors:
if len(articles) > MAX_POSTS:
break
articles += find_unique(author_hash[author], article_set, content)
return random.sample(articles, min(len(articles), MAX_POSTS))
def set_related_posts(generator, content):
related_posts = find_tag_posts(generator, content)
if len(related_posts) < MIN_POSTS:
related_posts += find_category_posts(generator, content, related_posts)
if len(related_posts) < MIN_POSTS:
related_posts += find_author_posts(generator, content, related_posts)
if len(related_posts) > MAX_POSTS:
related_posts = random.sample(related_posts, MAX_POSTS)
content.related_posts = related_posts
def register():
signals.article_generator_write_article.connect(set_related_posts)
| [
"mockenoff@yahoo.com"
] | mockenoff@yahoo.com |
c052861150013f827343e2af6afa78a72d9cdde7 | 1f7bf79d85e2636c50132964d38023277cd91595 | /detect.py | a0db4b834b238cc51584c8678761589fa72ebdef | [
"MIT"
] | permissive | AhsanYousaf/Video_Classification_And_Indexing | 8f320eb1b057a95d308497471aa1d4d2a2437db7 | b143c90f2fe84cc372bb9b37f3494bcc90bdb4d1 | refs/heads/master | 2023-06-26T15:49:59.547830 | 2021-07-28T11:33:56 | 2021-07-28T11:33:56 | 383,148,372 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | import os
import tensorflow as tf
import cv2
import numpy as np
model_path='models'
source='C:/Users/Ahsan Yousaf/Downloads/Video/9convert.com - Prichard Colon VS Terrel Williams.mp4'
# load json and create model
json_file = open(os.path.join(model_path,'model.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(os.path.join(model_path,'model.h5'))
print("Loaded model from disk")
model.compile(tf.keras.optimizers.Adam(learning_rate=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
classes={0:'basketball',1: 'boxing',
2:'cricket',3: 'formula1',
4:'kabaddi', 5:'swimming',
6:'table_tennis',7: 'weight_lifting'}
cap = cv2.VideoCapture(source)
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
# print(frame.shape)
pred_img = cv2.resize(frame,(224,224))
pred_img=np.expand_dims(pred_img, axis=0)
# print(pred_img.shape)
prediction = model.predict(pred_img)
maxindex = int(np.argmax(prediction))
sport=classes[maxindex]
print("Sport is ",sport)
image = cv2.putText(frame, sport, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Predicted Sport',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"ahsanbhatti624@gmail.com"
] | ahsanbhatti624@gmail.com |
7fd847ac5dc43c77760ea5b2037fbc2c8bbd2429 | 118f0fe87dc70dd63f1c5b3f0e41be9142bafd20 | /actions.py | 636de8ec747f9883e14420ab72a0d0bdfbf396bf | [] | no_license | heraclex12/vietnamese-chat-with-rasa | c8e274b637a5060c624fdaaa2d00ce887a484a91 | ed3fae8a3b6a3434d154538c8dfaa38700981572 | refs/heads/master | 2021-07-10T00:57:05.667586 | 2020-09-30T14:56:40 | 2020-09-30T14:56:40 | 200,461,456 | 3 | 13 | null | null | null | null | UTF-8 | Python | false | false | 2,784 | py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import requests
import json
import re
class ActionLookUpWordDictionary(Action):
def name(self) -> Text:
return 'action_lookUp_en'
def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
word = str(tracker.get_slot('enword')).lower()
print(word)
if not word:
dispatcher.utter_message("Đôi lúc sự thông thái của tôi cũng có giới hạn!")
return []
url = 'https://api.tracau.vn/WBBcwnwQpV89/s/{}/en'.format(word)
response = requests.get(url).text
json_data = json.loads(response)['tratu'][0]['fields']['fulltext']
try:
pro = re.search(r"<\s*tr\s+id\s*=\s*\"pa\"[^>]*>.+?<\s*\/\s*tr>", json_data).group()
tl = re.search(r"<\s*tr\s+id\s*=\s*\"tl\"[^>]*>.+?<\s*\/\s*tr>", json_data).group()
except e1:
print(e1)
try:
meanings = re.findall(r"<\s*tr\s+id\s*=\s*\"mn\"[^>]*>.+?<\s*\/\s*tr>", json_data)
except Exception:
dispatcher.utter_message("Đôi lúc sự thông thái của tôi cũng có giới hạn!")
return []
pro = re.sub(r"<\s*[^>]+>", "", pro)
tl = re.sub(r"<\s*[^>]+>", "", tl)
for i in range(len(meanings)):
meanings[i] = re.sub(r"<\s*[^>]+>", "", meanings[i])
text_respond = "=> " + word.title()
if pro is not None:
text_respond += pro.replace("◘", " ")
if tl is not None:
text_respond += "\n" + tl.replace("*", "* ")
if meanings:
for mean in meanings:
if mean is not None:
text_respond += "\n" + mean.replace("■", " - ")
dispatcher.utter_message("Bằng sự thông thái của tôi, đây là thứ bạn cần tìm:\n" + text_respond)
else:
dispatcher.utter_message("Đôi lúc sự thông thái của tôi cũng có giới hạn!")
return []
return []
#
#
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message("Hello World!")
#
# return []
| [
"heraclex12@gmail.com"
] | heraclex12@gmail.com |
2909ce92088f0c5ee4ceca1c8043f38123ed0ca4 | b0ac19e6bf6da9c9eea59d47c8b77b00cebdf0c2 | /TicTacToe.1/Board.py | 7280018bf7094eea4e3c91bf1dec35288ea34927 | [] | no_license | faatihi/event-them-all | 1c7be58a06183b082c3c6587023e84fbfacc41c4 | d38d4bca3b8186fa2e9b776050128ce5988ff6cf | refs/heads/master | 2020-04-09T08:21:36.219449 | 2019-02-01T17:22:13 | 2019-02-01T17:22:13 | 160,191,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from lib.Component import Component
from lib.Console import console
from Tile import Tile
class Board (Component):
def __init__ (self, name = 'board'):
super().__init__(name)
self.dimension = [3, 3]
tiles_ids = range(self.dimension[0] * self.dimension[1])
self.tiles = list(map(lambda tile_id: Tile(tile_id + 1), tiles_ids))
self.listeners.append({ 'event': 'need-draw-of-board', 'callback': self.onRender })
def onRender (self, data):
self.render()
def render (self):
for tile in self.tiles:
tile.render()
if tile.id % self.dimension[0] == 0:
console.print('\n')
| [
"faatihi@yahoo.com"
] | faatihi@yahoo.com |
a1f19369199259fcb97d4c5f6bd64199fb6158f0 | e6ba1dde1f21e4817215668905565edc4616fff8 | /build_isolated/learning_joy/catkin_generated/generate_cached_setup.py | 79bda8756ce4741743bccc0a5bb96012e36dc36a | [] | no_license | shu-98/catkin_ws | dde162026e114dd6e96a90994e6699ac38e85e68 | 8b1094ee815227f3ca766db73103ae2306a948c7 | refs/heads/master | 2020-07-01T15:14:38.537402 | 2019-12-24T11:33:06 | 2019-12-24T11:33:06 | 201,206,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/sou/catkin_ws/devel_isolated/gscam;/home/sou/catkin_ws/devel_isolated/create_autonomy;/home/sou/catkin_ws/devel_isolated/cereal_port;/home/sou/catkin_ws/devel_isolated/ca_tools;/home/sou/catkin_ws/devel_isolated/ca_msgs;/home/sou/catkin_ws/devel_isolated/ca_description;/home/sou/catkin_ws/devel_isolated/arduino_roomba;/home/sou/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/sou/catkin_ws/devel_isolated/learning_joy/env.sh')
output_filename = '/home/sou/catkin_ws/build_isolated/learning_joy/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"jdayeissaw@outlook.jp"
] | jdayeissaw@outlook.jp |
f2fa7d6d85729c9220e25e76c3232185452ec7db | 32cf29b1d804fb4f42086e36928fef9536f57e65 | /nqslearn/heisenberg2d.py | 8024097b811acdea9a51a6a05722a62f84d47ca3 | [
"MIT"
] | permissive | gharib85/neural-quantum-states | 4f732f2b72d3e1bd4cb5195efe0604ab1ae7726b | 4058a120b95ea6fba64e43882b8c17cdaa8aacb3 | refs/heads/master | 2021-01-15T02:57:05.595394 | 2018-06-04T07:31:32 | 2018-06-04T07:31:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,634 | py | # Python implantation of metropolis-hastings sampler for quantum states
# The original programs we have modified require the following notice
############################ COPYRIGHT NOTICE #################################
#
# Code provided by G. Carleo and M. Troyer, written by G. Carleo, December 2016
#
# Permission is granted for anyone to copy, use, modify, or distribute the
# accompanying programs and documents for any purpose, provided this copyright
# notice is retained and prominently displayed, along with a complete citation
# of the published version of the paper:
# _____________________________________________________________________________
# | G. Carleo, and M. Troyer |
# | Solving the quantum many-body problem with artificial neural-networks |
# |___________________________________________________________________________|
#
# The programs and documents are distributed without any warranty, express or
# implied.
#
# These programs were written for research purposes only, and are meant to
# demonstrate and reproduce the main results obtained in the paper.
#
# All use of these programs is entirely at the user's own risk.
#
###############################################################################
import numpy as np
from .hamiltonian import Hamiltonian
class Heisenberg2D(Hamiltonian):
"""
Class represents the Hamiltonian of the 1D Heisenberg model with
transverse field h_x and exchange J_z=1
"""
def __init__(self, n_spins, lattice, j_z, periodic):
super().__init__()
if n_spins != lattice ** 2:
raise ValueError('N_spins not compatible with lattice size.')
self.l = lattice
self.min_flip = 2
self.n_spins = n_spins
self.j_z = j_z
self.periodic = periodic
self.nearest_neighbors, self.bonds = self.find_nearest_neighbors()
def min_flips(self):
return self.min_flip
def num_spins(self):
return self.n_spins
def field(self):
return self.j_z
def is_periodic(self):
return self.periodic
def pbc_h(self, nn, s):
if s % self.l == 0 and nn == s-1:
# s is at left side of lattice; return rightmost element
return s+self.l-1
elif (s+1) % self.l == 0 and nn == (s+1):
# s is at right side of lattice; return leftmost element
return s-self.l+1
else:
return nn # s is in middle of lattice; return element to left
def pbc_v_lower(self, nn):
if nn < self.l:
return self.l*(self.l-1) + nn
else:
return nn - self.l
def pbc_v_higher(self, nn):
if self.l*(self.l-1) <= nn <= self.n_spins:
return nn - self.l*(self.l-1)
else:
return nn + self.l
def find_nearest_neighbors(self):
nearest_neighbors = np.zeros((self.n_spins, 4))
bonds = []
for i in range(self.n_spins):
nearest_neighbors[i][0] = self.pbc_h(i-1, i)
nearest_neighbors[i][1] = self.pbc_h(i+1, i)
nearest_neighbors[i][2] = self.pbc_v_lower(i)
nearest_neighbors[i][3] = self.pbc_v_higher(i)
for i in range(self.n_spins):
for k in range(4):
j = int(nearest_neighbors[i][k])
if i < j:
bonds.append((i, j))
return nearest_neighbors, bonds
def find_matrix_elements(self, state):
"""
inputs
state: list of integers, with each corresponding to quantum number
returns:
transitions: list of states s such that <s|H|state> is nonzero.
s are represented as a list of integers corresponding to which
quantum variables got swapped
matrix_elements: complex list <s|H|state> for each s in transitions
"""
matrix_elements = [0]
spin_flip_transitions = [[]]
# computing interaction part Sz*Sz
for i in range(len(self.bonds)):
matrix_elements[0] += state[self.bonds[i][0]] * \
state[self.bonds[i][1]]
matrix_elements[0] *= self.j_z
# look for spin flips
for i in range(len(self.bonds)):
si = self.bonds[i][0]
sj = self.bonds[i][1]
if state[si] != state[sj]:
matrix_elements.append(-2)
spin_flip_transitions.append([si, sj])
return matrix_elements, spin_flip_transitions
| [
"fischer.kevin.a@gmail.com"
] | fischer.kevin.a@gmail.com |
a1800a2b4cb03a56e3f35bd0321e3b5d031bf456 | 395a0c16a2bc72adf670f8203ec2b3c469b6267b | /models.py | c6411c4ff942a43e837eed5f810b7bad00d58a8c | [] | no_license | a-packer/blogly_app | 5c2c2b67e6cc5c35f23c6a91f65bd9674ebfeb82 | 99f301a4cb1ed052c7a2f1b5ef01dab65510d4e2 | refs/heads/main | 2023-03-12T15:23:30.647714 | 2021-03-02T23:27:23 | 2021-03-02T23:27:23 | 341,394,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def connect_db(app):
"""Connects this database to Flask app."""
db.app = app
db.init_app(app)
# Models go below
class User(db.Model):
"""User Class"""
__tablename__ = 'users'
def __repr__(self):
u=self
return f"<User id={u.id} first_name={u.first_name} last_name={u.last_name} image_url={u.image_url}>"
# table schema
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(50), nullable=False)
last_name = db.Column(db.String(50), nullable=False)
img_url = db.Column(db.String(3000), nullable=False, default="https://i.pinimg.com/564x/85/21/df/8521df4e1ac0c6f1af2f3ac166e5390b.jpg")
posts = db.relationship("Post", backref="users", cascade="all, delete-orphan")
class Post(db.Model):
"""Blog post."""
__tablename__ = "posts"
def __repr__(self):
p = self
return f"<Post {p.id} {p.title}>"
# table schema
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text, nullable=False, default="Post Title")
content = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
#assignments = db.relationship('EmployeeProject', backref='employee')
subjects = db.relationship('PostTag', backref='post')
# projects = db.relationship('Project', secondary="employees_projects", backref="employees")
tags = db.relationship('Tag', secondary="post_tags", backref="posts")
class Tag(db.Model):
"""Tags are connected to posts in order to search for posts about certain topics"""
__tablename__ = "tags"
def __repr__(self):
return f"<Tag {self.id} {self.name}>"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
# assignments = db.relationship('EmployeeProject', backref="project")
subjects = db.relationship('PostTag', backref="tag")
class PostTag(db.Model):
__tablename__ = "post_tags"
def __repr__(self):
return f"<PostTag post-{self.post_id} tag-{self.tag_id}>"
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'), primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key=True)
| [
"aubreypacker5@gmail.com"
] | aubreypacker5@gmail.com |
c88714140762409924946388370a228c04b4333e | 69c46463545523d288abea2d2224bf6231b79471 | /build/lib.linux-x86_64-2.7/yowsup/layers/protocol_media/protocolentities/message_media_downloadable_audio.py | 70c47429968072f0fa7e2dd86fcffc2f77a2a850 | [] | no_license | xerosanyam/myntra | 7aca60099a91670a5c1d0e8fcfa69488259f77a2 | cf3aa90665eb7991a5a53f8ae3fbdec7b37c8ae2 | refs/heads/master | 2020-12-24T18:55:34.828359 | 2016-04-17T02:11:12 | 2016-04-17T02:11:12 | 56,417,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .message_media_downloadable import DownloadableMediaMessageProtocolEntity
class AudioDownloadableMediaMessageProtocolEntity(DownloadableMediaMessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="{{DOWNLOADABLE_MEDIA_TYPE: (image | audio | video)}}"
mimetype="{{MIME_TYPE}}"
filehash="{{FILE_HASH}}"
url="{{DOWNLOAD_URL}}"
ip="{{IP}}"
size="{{MEDIA SIZE}}"
file="{{FILENAME}}"
encoding="{{ENCODING}}"
height="{{IMAGE_HEIGHT}}"
width="{{IMAGE_WIDTH}}"
> {{THUMBNAIL_RAWDATA (JPEG?)}}
</media>
</message>
'''
def __init__(self,
mimeType, fileHash, url, ip, size, fileName,
abitrate, acodec, asampfreq, duration, encoding, origin, seconds, mediaKey = None,
_id = None, _from = None, to = None, notify = None, timestamp = None,
participant = None, preview = None, offline = None, retry = None):
super(AudioDownloadableMediaMessageProtocolEntity, self).__init__("audio",
mimeType, fileHash, url, ip, size, fileName, MediaKey, None,
_id, _from, to, notify, timestamp, participant, preview, offline, retry)
self.setAudioProps(abitrate, acodec, asampfreq, duration, encoding, origin, seconds)
def __str__(self):
out = super(AudioDownloadableMediaMessageProtocolEntity, self).__str__()
out += "Bitrate: %s\n" % self.abitrate
out += "Codec: %s\n" % self.acodec
out += "Duration: %s\n" % self.duration
out += "Encoding: %s\n" % self.encoding
out += "Origin: %s\n" % self.origin
out += "Sampling freq.: %s\n" % self.asampfreq
return out
def setAudioProps(self, abitrate = None, acodec = None, asampfreq = None,
duration = None, encoding = None, origin = None, seconds = None):
self.abitrate = abitrate
self.acodec = acodec
self.asampfreq = asampfreq
self.duration = duration
self.encoding = encoding
self.origin = origin
self.seconds = seconds
self.cryptKeys = '576861747341707020417564696f204b657973'
def toProtocolTreeNode(self):
node = super(AudioDownloadableMediaMessageProtocolEntity, self).toProtocolTreeNode()
mediaNode = node.getChild("media")
if self.abitrate:
mediaNode.setAttribute("abitrate", self.abitrate)
if self.acodec:
mediaNode.setAttribute("acodec", self.acodec)
if self.asampfreq:
mediaNode.setAttribute("asampfreq", self.asampfreq)
if self.duration:
mediaNode.setAttribute("duration", self.duration)
if self.encoding:
mediaNode.setAttribute("encoding", self.encoding)
if self.origin:
mediaNode.setAttribute("origin", self.origin)
if self.seconds:
mediaNode.setAttribute("seconds", self.seconds)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = DownloadableMediaMessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = AudioDownloadableMediaMessageProtocolEntity
mediaNode = node.getChild("media")
entity.setAudioProps(
mediaNode.getAttributeValue("abitrate"),
mediaNode.getAttributeValue("acodec"),
mediaNode.getAttributeValue("asampfreq"),
mediaNode.getAttributeValue("duration"),
mediaNode.getAttributeValue("encoding"),
mediaNode.getAttributeValue("origin"),
mediaNode.getAttributeValue("seconds"),
)
return entity
@staticmethod
def fromFilePath(fpath, url, ip, to, mimeType = None, preview = None, filehash = None, filesize = None):
entity = DownloadableMediaMessageProtocolEntity.fromFilePath(fpath, url, DownloadableMediaMessageProtocolEntity.MEDIA_TYPE_AUDIO, ip, to, mimeType, preview)
entity.__class__ = AudioDownloadableMediaMessageProtocolEntity
entity.setAudioProps()
return entity
| [
"xerosanyam@gmail.com"
] | xerosanyam@gmail.com |
ff0ad0075662ea760759e20241a28aadbef609d1 | d9d6f6d184ec1b415da301dc8fa0a21a2228c828 | /geeksforgeeks/test/test_practice_array.py | b5b97e6e55ce4bbd2bc880289aee924e0d2e8bf9 | [] | no_license | navneettomar11/learn-py | 3a28a799a46c911d82143d4874326b36d059444a | 69a85ea2c65686d59b464f40cd64db19c050e47e | refs/heads/master | 2022-12-01T09:44:51.734432 | 2020-08-04T22:26:44 | 2020-08-04T22:26:44 | 266,992,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import unittest
from geeksforgeeks import sort_array_where_subarray_reversed, is_possible_triange, print_all_triplets
class TestPracticeTest(unittest.TestCase):
def test_sort_array_where_subarray_reversed(self):
nums = [2,5,65,55,50,70,90]
sort_array_where_subarray_reversed(nums)
self.assertListEqual(nums, [2,5,50,55,65,70,90])
def test_is_possible_triangel(self):
nums = [5, 4, 3, 1, 2]
self.assertTrue(is_possible_triange(nums))
nums = [4, 1, 2]
self.assertFalse(is_possible_triange(nums))
def test_print_all_triplets(self):
nums = [2, 6, 9, 12, 17, 22, 31, 32, 35, 42]
result = print_all_triplets(nums)
self.assertListEqual(result, [[6,9,12], [2,12,22], [12,17,22], [2,17,32],[12,22,32], [9,22,35], [2,22,42], [22,32,42]])
| [
"navneet.singh2@emc.com"
] | navneet.singh2@emc.com |
a5e3e33122423cc50cefa31db7d4974439910c3b | 795d0debe9a09d47c95ebcd0b32534f8b5d3c0b5 | /TheBeginning/Kickstart/CountryLeader.py | 61b37b74746cb6a8c75d7a65a5a83c535d54b959 | [] | no_license | Binovizer/Python-Beginning | db8afb2362369a7ca2f098af6e021551dee960b5 | f8ee01355e859264269389a0de90f700b0052904 | refs/heads/master | 2020-05-26T15:24:18.378884 | 2017-05-07T13:51:35 | 2017-05-07T13:51:35 | 82,492,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | def getNoOfUniqueChars(person):
person = ("".join(sorted(person))).strip()
prev = person[0];
count = 1;
for ch in person:
if(ch == " "):
continue
if(ch != prev):
prev = ch;
count += 1
return count
t = int(input())
for i in range(t):
n = int(input())
persons = {}
for j in range(n):
person = input()
no_of_unique_chars = getNoOfUniqueChars(person)
persons.__setitem__(person, no_of_unique_chars)
#print(persons)
list_of_persons = [v[0] for v in sorted(persons.items(), key=lambda kv: (-kv[1], kv[0]))]
#sorted_persons = sorted(persons.items(), key=operator.itemgetter(1), reverse = True)
#print(sorted_persons)
print("Case #%d: %s" % (i+1, list_of_persons[0])) | [
"mohd.nadeem3464@gmail.com"
] | mohd.nadeem3464@gmail.com |
7f94f42d0f77618d0d0375de3e14f9c423f2f05f | c530897cb72b6943c7226b25824444cad5f3503b | /usaspending_api/download/migrations/0002_auto_20180216_2047.py | 6d7ad33e243301d2bf01713d9206caf53723187b | [
"CC0-1.0"
] | permissive | fedspendingtransparency/usaspending-api | fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a | 38f920438697930ae3ac57bbcaae9034877d8fb7 | refs/heads/master | 2023-09-01T22:00:36.633612 | 2023-08-29T18:39:18 | 2023-08-29T18:39:18 | 65,394,827 | 276 | 118 | CC0-1.0 | 2023-09-14T20:33:15 | 2016-08-10T15:39:45 | Python | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-16 20:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('download', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='downloadjob',
name='json_request',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='downloadjob',
name='monthly_download',
field=models.BooleanField(default=False),
),
]
| [
"hess_michael@bah.com"
] | hess_michael@bah.com |
78d2b15a55faf7418393802718a4f1794a6a0759 | 4827875497046f1c6aa0a4ecf8db6206bc7b6fd0 | /feature_MannWithney.py | 2aa0fc416a91840be71e5a6e8c4e468e2e37832d | [] | no_license | ulrichw/flare-forecasting | 95b250589a8e98a1cd39d9ad060450f4aa84a65f | 571c0f896a9dc9ceccd662067f16c166a0e7930d | refs/heads/master | 2021-01-01T16:56:34.528605 | 2015-04-15T01:39:20 | 2015-04-15T01:39:20 | 27,569,890 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,414 | py | ''' Program to perform Mann-Withney U tests on the active-region features.
Based on the feature_ttest.py code
Should work better on non-normal distributions than the independent-samples t-test.
However, the distributions of a given feature for flaring and non-flaring active regions
might not have exactly the same shape! Be careful therefore in the interpretation of the
Mann-Withney U test
'''
import numpy as np
from scipy import stats
import sys, getopt
def main():
#READ COMMAND-LINE ARGUMENTS
try:
opts, args = getopt.getopt(sys.argv[1:],"ha:",["help","a="]) #h is for help, a is for the significance level
except getopt.GetoptError as err:
print str(err)
print 'feature_ttest.py -a <significance level>'
sys.exit(2)
if len(opts) >= 1:
for opt, arg in opts:
if opt in ("-h", "--help"):
print 'feature_ttest.py -a <significance level>'
sys.exit()
elif opt in ("-a", "--a"):
alpha = float(arg)
else:
assert False, "unhandled option"
sys.exit(2)
else:
print 'wrong or missing argument:'
print 'feature_ttest.py -a <significance level>'
sys.exit(2)
#FLARE CATALOG
with open('flare_catalog_24h.txt','r') as flare_file: #the 25 SHARP parameters
Xtext=flare_file.readlines()
flare_file.close()
Xflare=[]
with open('flare_catalog_24h_times_d0_out.txt','r') as flare_file: #to add the fractal dimension
Xtext2=flare_file.readlines()
flare_file.close()
with open('flare_catalog_24h_times_beff_out.txt','r') as flare_file: #to add the B effective
Xtext3=flare_file.readlines()
flare_file.close()
for i in range(len(Xtext)):
res=Xtext[i].split()
res2=Xtext2[i].split()
res3=Xtext3[i].split()
Xflare.append([float(res[0]),float(res[1]),float(res[2]),float(res[3]),float(res[4]),float(res[5]),float(res[6]),float(res[7]),float(res[8]),float(res[9]),float(res[10]),float(res[11]),float(res[12]),float(res[13]),float(res[14]),float(res[15]),float(res[16]),float(res[17]),float(res[18]),float(res[19]),float(res[20]),float(res[21]),float(res[22]),float(res[23]),float(res[24]),float(res2[0]),float(res3[0])])
#NO-FLARE CATALOG
Xnoflare=[]
with open('noflare_catalog_48h.txt','r') as noflare_file: #the 25 SHARP parameters
Xtext=noflare_file.readlines()
noflare_file.close()
with open('noflare_catalog_48h_times_d0_out.txt','r') as noflare_file: #to add the fractal dimension
Xtext2=noflare_file.readlines()
noflare_file.close()
with open('noflare_catalog_48h_times_beff_out.txt','r') as noflare_file: #to add the B effective
Xtext3=noflare_file.readlines()
noflare_file.close()
for i in range(len(Xtext)):
res=Xtext[i].split()
res2=Xtext2[i].split()
res3=Xtext3[i].split()
Xnoflare.append([float(res[0]),float(res[1]),float(res[2]),float(res[3]),float(res[4]),float(res[5]),float(res[6]),float(res[7]),float(res[8]),float(res[9]),float(res[10]),float(res[11]),float(res[12]),float(res[13]),float(res[14]),float(res[15]),float(res[16]),float(res[17]),float(res[18]),float(res[19]),float(res[20]),float(res[21]),float(res[22]),float(res[23]),float(res[24]),float(res2[0]),float(res3[0])])
Xflare = np.array(Xflare,dtype=np.float64)
Xnoflare = np.array(Xnoflare,dtype=np.float64)
#names of SHARP features
names=['USFLUX','MEANGBT','MEANJZH','MEANPOT','SHRGT45','TOTUSJH','MEANGBH','MEANALP','MEANGAM','MEANGBZ','MEANJZD','TOTUSJZ','SAVNCPP','TOTPOT','MEANSHR','AREA_ACR','R_VALUE','TOTFX','TOTFY','TOTFZ','TOTBSQ','EPSX','EPSY','EPSZ','ABSNJZH','fractal','B effective']
names=np.array(names)
#PERFORM T-TEST
for i in range(Xflare.shape[1]):
u, p = stats.mannwhitneyu(Xflare[:,i],Xnoflare[:,i]) #Mann-Withney U test
print "t-test results for feature %s:" % names[i]
print "U statistic= %g p-value = %g" % (u, p)
if(p<alpha):
print "the p-value is lower than the significance level, therefore the null hypothesis can be rejected"
else:
print "the p-value is larger than the significance level, therefore the null hypothesis cannot be rejected"
if __name__ == '__main__':
main()
| [
"couvidat@stitch.Stanford.EDU"
] | couvidat@stitch.Stanford.EDU |
3f3868685b37e2240659631adac543f60ce7d330 | f93418daae9fd6af5329e65dd73140c711469280 | /mfdw_site_project/settings.py | 6bbc9d4efc2cf7da51f4531c3e17e7457354991f | [
"MIT"
] | permissive | hattwick/pycon2019 | 5a778edd9030531279b3593ed907c4fbdb492cd2 | b5e5d539cc6ee25cc64a24a90f167d21ee29072d | refs/heads/master | 2020-05-17T21:39:42.862108 | 2019-04-29T01:52:56 | 2019-04-29T01:52:56 | 183,979,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | """
Django settings for mfdw_site project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@4w-$74l$h*3wg^l0(bh1l^5sd^%8k9w220n=s8n6e&a&yvcm9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mfdw_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mfdw_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"phil@hattwick.com"
] | phil@hattwick.com |
08e57f662a5ed15727ebead11dbee1da91274819 | f0ee987789f5a6fe8f104890e95ee56e53f5b9b2 | /pythia-0.8/packages/pyre/pyre/odb/fs/CodecODB.py | 2d64e24630bc15ec801aa5092f03fd4056070e69 | [] | no_license | echoi/Coupling_SNAC_CHILD | 457c01adc439e6beb257ac8a33915d5db9a5591b | b888c668084a3172ffccdcc5c4b8e7fff7c503f2 | refs/heads/master | 2021-01-01T18:34:00.403660 | 2015-10-26T13:48:18 | 2015-10-26T13:48:18 | 19,891,618 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.odb.common.Codec import Codec
class CodecODB(Codec):
def open(self, db, mode='r'):
"""open the file <db> in mode <mode> and place its contents in a shelf"""
filename = self.resolve(db)
import os
exists = os.path.isfile(filename)
if mode in ['w'] and not exists:
raise IOError("file not found: '%s'" % filename)
shelf = self._shelf(filename, False)
self._decode(shelf)
if mode == 'r':
shelf._const = True
else:
shelf._const = False
return shelf
def resolve(self, db):
return db + '.' + self.extension
def __init__(self, encoding, extension=None):
if extension is None:
extension = encoding
Codec.__init__(self, encoding, extension)
# public data
self.renderer = self._createRenderer()
# private data
self._locker = self._createLocker()
return
def _shelf(self, filename, const):
"""create a shelf for the contents of the db file"""
from Shelf import Shelf
return Shelf(filename, const, self)
def _decode(self, shelf):
"""lock and then read the contents of the file into the shelf"""
stream = file(shelf.name)
self._locker.lock(stream, self._locker.LOCK_EX)
exec stream in shelf
self._locker.unlock(stream)
return
def _createRenderer(self):
"""create a weaver for storing shelves"""
from pyre.weaver.Weaver import Weaver
weaver = Weaver()
return weaver
def _createLocker(self):
from FileLocking import FileLocking
return FileLocking()
# version
__id__ = "$Id: CodecODB.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
# End of file
| [
"echoi2@memphis.edu"
] | echoi2@memphis.edu |
ef2263bd197b7de41252637445f0cc9e72b0d091 | 712f8dc13531bb827efb8c8934485528a39c6649 | /file_util.py | 89c05d34544f37992ef7c4c79600b810b49efd55 | [] | no_license | breez7/teslacam_auto | 0d687ee3c4e701214851e2f37810c77f5e530cf4 | f673dbe8285cec6d7d67a12a1d60a834586ab188 | refs/heads/master | 2020-07-02T23:21:13.926336 | 2019-08-11T14:06:52 | 2019-08-11T14:06:52 | 201,703,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | #cam_path = '/mnt/cam/TeslaCam'
cam_path = '/mnt/cam'
music_path = '/mnt/music'
audio_path = '/root/audio'
import os
from flask import Flask
from flask import send_from_directory
import commands
app = Flask(__name__)
@app.route('/file/<filename>')
def send_file(filename):
return send_from_directory(cam_path, filename)
@app.route('/files')
def send_files():
a,b = commands.getstatusoutput('umount /mnt/cam')
c,d = commands.getstatusoutput('sync')
e,f = commands.getstatusoutput('mount /mnt/cam')
if e != 0 :
raise Exception(f)
ret = get_files(cam_path)
print(ret)
return str(ret)
def get_http_path():
return 'http://192.168.219.183:5000'
def get_files(path):
folders = []
for root, dirs, files in os.walk(path):
ret_files = {}
dir = root.split(path)[1]
for file in files:
if file.endswith('mp4'):
ret_files[dir + '/' + file] = get_http_path() + '/file' + dir + '/' + file
if len(ret_files) > 0:
folders.append([dir, ret_files])
return folders
def get_http_stream(path):
pass
if '__main__' == __name__:
files = get_files(cam_path)
print(files)
app.run(host='192.168.219.183')
| [
"james777.lee@gmail.com"
] | james777.lee@gmail.com |
758d5ba1b42e509212e41714be460f72c95a8603 | 23935e62805f9304fa8ad7e7b7a9e0f5255338c0 | /python/TnPTreeProducer_cfg.py | 038412432508fde62cc01863a78c63874b7b8e3d | [] | no_license | choij1589/EgammaAnalysis-TnPTreeProducer | d2e6011cc467261397b70f5fa7e693db7908e80f | 11217136699408cfa735a6e676fbc320ab036ed3 | refs/heads/master | 2023-04-20T17:04:28.206386 | 2017-05-23T14:32:24 | 2017-05-23T14:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,839 | py | import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
import sys
process = cms.Process("tnpEGM")
###################################################################
## argument line options
###################################################################
varOptions = VarParsing('analysis')
varOptions.register(
"isMC", True,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Compute MC efficiencies"
)
varOptions.register(
"doEleID", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Include tree for photon ID SF"
)
varOptions.register(
"doPhoID", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Include tree for photon ID SF"
)
varOptions.register(
"doTrigger", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Include tree for Trigger SF"
)
varOptions.register(
"doRECO", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Include tree for Reco SF"
)
varOptions.register(
"calibEn", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"use EGM smearer to calibrate photon and electron energy"
)
varOptions.register(
"isAOD", False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"switch to run other AOD (for RECO SFs)"
)
#### HLTname is HLT2 in reHLT samples
varOptions.register(
"HLTname", "HLT",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"HLT process name (default HLT)"
)
varOptions.register(
"GT","auto",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Global Tag to be used"
)
varOptions.parseArguments()
###################################################################
## Define TnP inputs
###################################################################
options = dict()
options['useAOD'] = cms.bool(varOptions.isAOD)
options['HLTProcessName'] = varOptions.HLTname
### set input collections
options['ELECTRON_COLL'] = "slimmedElectrons"
options['PHOTON_COLL'] = "slimmedPhotons"
options['SUPERCLUSTER_COLL'] = "reducedEgamma:reducedSuperClusters" ### not used in AOD
if options['useAOD']:
options['ELECTRON_COLL'] = "gedGsfElectrons"
options['PHOTON_COLL' ] = "gedPhotons"
options['ELECTRON_CUTS'] = "ecalEnergy*sin(superClusterPosition.theta)>5.0 && (abs(-log(tan(superClusterPosition.theta/2)))<2.5)"
options['SUPERCLUSTER_CUTS'] = "abs(eta)<2.5 && et>5.0"
options['PHOTON_CUTS'] = "(abs(-log(tan(superCluster.position.theta/2)))<=2.5) && pt> 10"
options['ELECTRON_TAG_CUTS'] = "(abs(-log(tan(superCluster.position.theta/2)))<=2.1) && !(1.4442<=abs(-log(tan(superClusterPosition.theta/2)))<=1.566) && pt >= 30.0"
options['MAXEVENTS'] = cms.untracked.int32(varOptions.maxEvents)
options['DoTrigger'] = cms.bool( varOptions.doTrigger )
options['DoRECO'] = cms.bool( varOptions.doRECO )
options['DoEleID'] = cms.bool( varOptions.doEleID )
options['DoPhoID'] = cms.bool( varOptions.doPhoID )
options['OUTPUTEDMFILENAME'] = 'edmFile.root'
options['DEBUG'] = cms.bool(False)
options['isMC'] = cms.bool(False)
options['UseCalibEn'] = varOptions.calibEn
if (varOptions.isMC):
options['isMC'] = cms.bool(True)
options['OUTPUT_FILE_NAME'] = "TnPTree_mc.root"
if varOptions.isAOD : options['OUTPUT_FILE_NAME'] = "TnPTree_mc_aod.root"
# options['TnPPATHS'] = cms.vstring("HLT*")
# options['TnPHLTTagFilters'] = cms.vstring()
# options['TnPHLTProbeFilters'] = cms.vstring()
# options['HLTFILTERTOMEASURE'] = cms.vstring("")
options['TnPPATHS'] = cms.vstring("HLT_Ele27_eta2p1_WPTight_Gsf_v*")
options['TnPHLTTagFilters'] = cms.vstring("hltEle27erWPTightGsfTrackIsoFilter")
options['TnPHLTProbeFilters'] = cms.vstring()
options['HLTFILTERTOMEASURE'] = cms.vstring("hltEle27erWPTightGsfTrackIsoFilter")
options['GLOBALTAG'] = 'auto:run2_mc'
else:
options['OUTPUT_FILE_NAME'] = "TnPTree_data.root"
options['TnPPATHS'] = cms.vstring("HLT_Ele27_eta2p1_WPTight_Gsf_v*")
options['TnPHLTTagFilters'] = cms.vstring("hltEle27erWPTightGsfTrackIsoFilter")
options['TnPHLTProbeFilters'] = cms.vstring()
options['HLTFILTERTOMEASURE'] = cms.vstring("hltEle27erWPTightGsfTrackIsoFilter")
options['GLOBALTAG'] = 'auto:run2_data'
if varOptions.GT != "auto" :
options['GLOBALTAG'] = varOptions.GT
###################################################################
## Define input files for test local run
###################################################################
from EgammaAnalysis.TnPTreeProducer.etc.tnpInputTestFiles_cff import filesMiniAOD_23Sep2016 as inputs
if options['useAOD'] : from EgammaAnalysis.TnPTreeProducer.etc.tnpInputTestFiles_cff import filesAOD_23Sep2016 as inputs
options['INPUT_FILE_NAME'] = inputs['data']
if varOptions.isMC: options['INPUT_FILE_NAME'] = inputs['mc']
###################################################################
## import TnP tree maker pythons and configure for AODs
###################################################################
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load("Configuration.Geometry.GeometryRecoDB_cff")
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options['GLOBALTAG'] , '')
import EgammaAnalysis.TnPTreeProducer.egmTreesSetup_cff as tnpSetup
tnpSetup.setupTreeMaker(process,options)
###################################################################
## Init and Load
###################################################################
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.MessageLogger.cerr.threshold = ''
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.source = cms.Source("PoolSource",
fileNames = options['INPUT_FILE_NAME'],
)
process.maxEvents = cms.untracked.PSet( input = options['MAXEVENTS'])
if options['DoTrigger'] : print " -- Producing HLT (trigger ele) efficiency tree -- "
if options['DoRECO'] : print " -- Producing RECO SF tree -- "
if options['DoEleID'] : print " -- Producing electron SF tree -- "
if options['DoPhoID'] : print " -- Producing photon SF tree -- "
###################################################################
## Define sequences and TnP pairs
###################################################################
process.cand_sequence = cms.Sequence( process.init_sequence + process.tag_sequence )
if options['DoEleID'] or options['DoTrigger'] : process.cand_sequence += process.ele_sequence
if options['DoPhoID'] : process.cand_sequence += process.pho_sequence
if options['DoTrigger'] : process.cand_sequence += process.hlt_sequence
if options['DoRECO'] : process.cand_sequence += process.sc_sequence
process.tnpPairs_sequence = cms.Sequence()
if options['DoTrigger'] : process.tnpPairs_sequence *= process.tnpPairingEleHLT
if options['DoRECO'] : process.tnpPairs_sequence *= process.tnpPairingEleRec
if options['DoEleID'] : process.tnpPairs_sequence *= process.tnpPairingEleIDs
if options['DoPhoID'] : process.tnpPairs_sequence *= process.tnpPairingPhoIDs
##########################################################################
## TnP Trees
##########################################################################
import EgammaAnalysis.TnPTreeProducer.egmTreesContent_cff as tnpVars
if options['useAOD']: tnpVars.setupTnPVariablesForAOD()
tnpVars.mcTruthCommonStuff.isMC = cms.bool(varOptions.isMC)
process.tnpEleTrig = cms.EDAnalyzer("TagProbeFitTreeProducer",
tnpVars.CommonStuffForGsfElectronProbe, tnpVars.mcTruthCommonStuff,
tagProbePairs = cms.InputTag("tnpPairingEleHLT"),
probeMatches = cms.InputTag("genProbeEle"),
allProbes = cms.InputTag("probeEle"),
flags = cms.PSet(
passingHLT = cms.InputTag("probeElePassHLT"),
passingLoose80X = cms.InputTag("probeEleCutBasedLoose80X" ),
passingMedium80X = cms.InputTag("probeEleCutBasedMedium80X"),
passingTight80X = cms.InputTag("probeEleCutBasedTight80X" ),
),
)
process.tnpEleReco = cms.EDAnalyzer("TagProbeFitTreeProducer",
tnpVars.mcTruthCommonStuff, tnpVars.CommonStuffForSuperClusterProbe,
tagProbePairs = cms.InputTag("tnpPairingEleRec"),
probeMatches = cms.InputTag("genProbeSC"),
allProbes = cms.InputTag("probeSC"),
flags = cms.PSet(passingRECO = cms.InputTag("probeSCEle", "superclusters") ),
)
process.tnpEleIDs = cms.EDAnalyzer("TagProbeFitTreeProducer",
tnpVars.mcTruthCommonStuff, tnpVars.CommonStuffForGsfElectronProbe,
tagProbePairs = cms.InputTag("tnpPairingEleIDs"),
probeMatches = cms.InputTag("genProbeEle"),
allProbes = cms.InputTag("probeEle"),
flags = cms.PSet(
passingVeto = cms.InputTag("probeEleCutBasedVeto" ),
passingLoose = cms.InputTag("probeEleCutBasedLoose" ),
passingMedium = cms.InputTag("probeEleCutBasedMedium"),
passingTight = cms.InputTag("probeEleCutBasedTight" ),
passingVeto80X = cms.InputTag("probeEleCutBasedVeto80X" ),
passingLoose80X = cms.InputTag("probeEleCutBasedLoose80X" ),
passingMedium80X = cms.InputTag("probeEleCutBasedMedium80X"),
passingTight80X = cms.InputTag("probeEleCutBasedTight80X" ),
passingMVA80Xwp90 = cms.InputTag("probeEleMVA80Xwp90" ),
passingMVA80Xwp80 = cms.InputTag("probeEleMVA80Xwp80" ),
)
)
process.tnpPhoIDs = cms.EDAnalyzer("TagProbeFitTreeProducer",
tnpVars.mcTruthCommonStuff, tnpVars.CommonStuffForPhotonProbe,
tagProbePairs = cms.InputTag("tnpPairingPhoIDs"),
probeMatches = cms.InputTag("genProbePho"),
allProbes = cms.InputTag("probePho"),
flags = cms.PSet(
passingLoose = cms.InputTag("probePhoCutBasedLoose"),
passingMedium = cms.InputTag("probePhoCutBasedMedium"),
passingTight = cms.InputTag("probePhoCutBasedTight"),
passingMVA = cms.InputTag("probePhoMVA"),
# passingLoose80X = cms.InputTag("probePhoCutBasedLoose80X"),
# passingMedium80X = cms.InputTag("probePhoCutBasedMedium80X"),
# passingTight80X = cms.InputTag("probePhoCutBasedTight80X"),
# passingMVA80Xwp90 = cms.InputTag("probePhoMVA80Xwp90"),
# passingMVA80Xwp80 = cms.InputTag("probePhoMVA80Xwp80"),
)
)
## add pass HLT-safe flag, available for miniAOD only
if not options['useAOD'] :
setattr( process.tnpEleTrig.flags, 'passingHLTsafe', cms.InputTag("probeEleHLTsafe" ) )
setattr( process.tnpEleIDs.flags , 'passingHLTsafe', cms.InputTag("probeEleHLTsafe" ) )
tnpSetup.customize( process.tnpEleTrig , options )
tnpSetup.customize( process.tnpEleIDs , options )
tnpSetup.customize( process.tnpPhoIDs , options )
tnpSetup.customize( process.tnpEleReco , options )
process.tree_sequence = cms.Sequence()
if (options['DoTrigger']): process.tree_sequence *= process.tnpEleTrig
if (options['DoRECO']) : process.tree_sequence *= process.tnpEleReco
if (options['DoEleID']) : process.tree_sequence *= process.tnpEleIDs
if (options['DoPhoID']) : process.tree_sequence *= process.tnpPhoIDs
##########################################################################
## PATHS
##########################################################################
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string(options['OUTPUTEDMFILENAME']),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring("p"))
)
process.outpath = cms.EndPath(process.out)
if (not options['DEBUG']):
process.outpath.remove(process.out)
process.p = cms.Path(
process.hltFilter +
process.cand_sequence +
process.tnpPairs_sequence +
process.mc_sequence +
process.eleVarHelper +
process.tree_sequence
)
process.TFileService = cms.Service(
"TFileService", fileName = cms.string(options['OUTPUT_FILE_NAME']),
closeFileFast = cms.untracked.bool(True)
)
| [
"fabrice.couderc@cern.ch"
] | fabrice.couderc@cern.ch |
482d7b56dd358e962f6dedb3cd96e67a87f389dd | f6f1e8b6bf2bde4e3b9eef80cc7e942854bd2e83 | /bin_search.py | e502e1d9e10705648fe00c1841a0103e926de0a0 | [] | no_license | stevekutz/django_algo_exp1 | 178d84bda0520db39273b8f38b070c30758e222a | ef4e56b4f443868350deab7913b77678d093c6d6 | refs/heads/master | 2021-09-28T18:45:12.955842 | 2020-01-31T04:45:43 | 2020-01-31T04:45:43 | 236,881,770 | 0 | 0 | null | 2021-09-22T18:34:18 | 2020-01-29T01:35:41 | Python | UTF-8 | Python | false | false | 2,339 | py | dict_history = [];
def binary_search(list, item):
# array indices
low = 0
high = len(list) - 1
global dict_history;
def print_dic(dict):
for val in dict:
# print(val) # prints dict at index # {'item': 9, 'low': 0, 'high': 100, 'mid': 50, 'guess': 50}
# prints in nicely formatted python2
# search val: 9 low: 0 high: 100 mid: 50 guess: 50
print('search val: %s \t low: %s \t high: %s \t mid: %s \t guess: %s' % (val['item'], val['low'], val['high'], val['mid'], val['guess']))
# this will print out val
for k, v in val.items(): # we can use any variable for key, value positions
# print(f'\t Key: {k} \t Value: {v}') # python 3
print("\t Key: %s \t Value: %i " % (k, v))
while low <= high:
# print(f'search val: {item} low: {low} high: {high} mid: {mid}')
mid = (low + high) // 2 # gives floor, rounded down val
guess = list[mid] # check the middle val
# python3 syntax
# print(f'search val: {item} \t low: {low} \t high: {high} \t mid: {mid} \t guess: {guess}')
# python2 syntax
# print('search val: %s \t low: %s \t high: %s \t mid: %s \t guess: %s' % (item, low, high, mid, guess))
# dict_history.append({item: item, low: low ,high: high, mid: mid, guess: guess}) # saves k &v as same e.g. { 9: 9, 50: 50, ...}
dict_history.append({'item': item, 'low': low, 'high': high, 'mid': mid, 'guess': guess})
if guess == item:
# return mid # middle is actual item --> use with # print(binary_search(test_list, find))
# python 3 syntax
#return print(f' item located: {guess}')
# python 2 syntax
print("item located {} after {} iterations".format(guess, len(dict_history)) )
print_dic(dict_history)
return None
elif guess > item:
high = mid - 1 # look in lower half
else:
low = mid + 1 # look in upper half
return None
test_list = list(range(0,101)) # generate list 1 to 100
find = 9
# print(binary_search(test_list, find))
binary_search(test_list, find) | [
"stkutz@gmail.com"
] | stkutz@gmail.com |
a605d2a019956c7ce215d0b2d948919c7f05f214 | 3076bd73c41ed665c987d99218b8a3599fa05ec2 | /cellpylib/evoloop.py | 263af4027db03d4ceaf970ec86838890bbb9946c | [
"Apache-2.0"
] | permissive | lantunes/cellpylib | 5135a6986e68424d9ec8b09fb42421b3dcf046d1 | 743e936d48f8520f6f4ac652570ac7bb46414189 | refs/heads/master | 2023-03-07T03:31:32.380400 | 2023-02-21T12:34:28 | 2023-02-21T12:34:28 | 126,618,694 | 203 | 32 | Apache-2.0 | 2023-02-15T03:40:38 | 2018-03-24T16:33:15 | Python | UTF-8 | Python | false | false | 21,821 | py | import numpy as np
from .ctrbl_rule import CTRBLRule
class Evoloop(CTRBLRule):
"""
An implementation of H. Sayama's Evoloop. For more information, see:
.. code-block:: text
Sayama, H. (1998). Constructing evolutionary systems on a simple deterministic cellular automata space.
PhD, University of Tokyo, Department of Information Science.
"""
def __init__(self):
"""
Create an Evoloop.
"""
super().__init__(rule_table={
(0, 0, 0, 0, 1): 2,
(1, 0, 2, 0, 2): 1,
(1, 1, 2, 7, 2): 7,
(2, 0, 1, 7, 2): 2,
(2, 1, 3, 2, 2): 2,
(4, 0, 1, 2, 5): 0,
(0, 0, 0, 0, 4): 3,
(1, 0, 2, 1, 1): 1,
(1, 1, 2, 7, 3): 5,
(2, 0, 2, 0, 2): 2,
(2, 1, 4, 2, 2): 2,
(4, 0, 1, 6, 2): 0,
(0, 0, 0, 1, 2): 2,
(1, 0, 2, 1, 2): 1,
(1, 1, 3, 2, 2): 1,
(2, 0, 2, 0, 3): 2,
(2, 1, 6, 2, 2): 2,
(4, 0, 2, 1, 2): 0,
(0, 0, 0, 1, 5): 2,
(1, 0, 2, 1, 3): 1,
(1, 1, 3, 3, 2): 1,
(2, 0, 2, 0, 5): 2,
(2, 1, 7, 2, 2): 2,
(4, 0, 2, 1, 5): 0,
(0, 0, 0, 2, 1): 2,
(1, 0, 2, 2, 1): 1,
(1, 1, 5, 4, 2): 4,
(2, 0, 2, 0, 6): 5,
(2, 2, 2, 2, 4): 2,
(4, 0, 2, 2, 2): 1,
(0, 0, 0, 2, 4): 2,
(1, 0, 2, 2, 4): 4,
(1, 1, 5, 7, 2): 7,
(2, 0, 2, 0, 7): 3,
(2, 2, 2, 2, 7): 2,
(4, 0, 2, 3, 2): 1,
(0, 0, 0, 4, 2): 2,
(1, 0, 2, 2, 7): 7,
(1, 1, 6, 2, 4): 4,
(2, 0, 2, 1, 2): 2,
(2, 2, 2, 3, 4): 2,
(4, 0, 2, 6, 2): 6,
(0, 0, 0, 4, 5): 2,
(1, 0, 2, 3, 2): 4,
(1, 1, 6, 2, 7): 7,
(2, 0, 2, 1, 5): 2,
(2, 2, 2, 3, 7): 2,
(4, 0, 3, 1, 2): 0,
(0, 0, 0, 7, 5): 2,
(1, 0, 2, 4, 1): 4,
(1, 2, 2, 2, 4): 4,
(2, 0, 2, 2, 1): 2,
(2, 2, 2, 4, 3): 2,
(4, 0, 3, 2, 2): 1,
(0, 0, 1, 0, 2): 2,
(1, 0, 2, 4, 2): 4,
(1, 2, 2, 2, 7): 7,
(2, 0, 2, 2, 2): 2,
(2, 2, 2, 4, 4): 2,
(5, 0, 0, 0, 2): 5,
(0, 0, 2, 1, 4): 1,
(1, 0, 2, 4, 3): 4,
(1, 2, 2, 4, 3): 4,
(2, 0, 2, 2, 3): 2,
(2, 2, 2, 7, 3): 2,
(5, 0, 0, 1, 2): 5,
(0, 0, 2, 1, 7): 1,
(1, 0, 2, 5, 1): 1,
(1, 2, 2, 7, 3): 7,
(2, 0, 2, 3, 2): 3,
(2, 2, 2, 7, 7): 2,
(5, 0, 0, 2, 1): 5,
(0, 0, 2, 3, 2): 2,
(1, 0, 2, 5, 2): 7,
(1, 2, 3, 2, 4): 4,
(2, 0, 2, 4, 2): 2,
(2, 2, 3, 2, 4): 3,
(5, 0, 0, 2, 3): 2,
(0, 1, 1, 2, 2): 1,
(1, 0, 2, 5, 4): 3,
(1, 2, 3, 2, 7): 7,
(2, 0, 2, 4, 5): 2,
(2, 2, 3, 2, 7): 3,
(5, 0, 0, 2, 4): 5,
(0, 1, 2, 1, 2): 1,
(1, 0, 2, 5, 7): 7,
(1, 2, 4, 2, 6): 6,
(2, 0, 2, 5, 2): 5,
(3, 0, 0, 0, 1): 3,
(5, 0, 0, 2, 7): 5,
(0, 1, 2, 3, 2): 1,
(1, 0, 2, 7, 1): 7,
(1, 2, 4, 3, 3): 3,
(2, 0, 2, 6, 2): 0,
(3, 0, 0, 0, 2): 2,
(5, 0, 0, 4, 2): 5,
(0, 1, 2, 4, 2): 1,
(1, 0, 2, 7, 2): 7,
(1, 2, 6, 2, 7): 6,
(2, 0, 2, 6, 5): 0,
(3, 0, 0, 0, 3): 2,
(5, 0, 0, 7, 2): 5,
(0, 1, 2, 4, 5): 1,
(1, 0, 2, 7, 3): 5,
(2, 0, 0, 0, 1): 2,
(2, 0, 2, 7, 2): 2,
(3, 0, 0, 0, 4): 3,
(5, 0, 2, 0, 2): 2,
(0, 1, 2, 5, 2): 6,
(1, 0, 5, 1, 2): 1,
(2, 0, 0, 0, 2): 2,
(2, 0, 2, 7, 5): 2,
(3, 0, 0, 0, 7): 4,
(5, 0, 2, 0, 5): 2,
(0, 1, 2, 6, 2): 6,
(1, 0, 5, 4, 2): 4,
(2, 0, 0, 0, 4): 2,
(2, 0, 3, 1, 2): 2,
(3, 0, 0, 1, 2): 3,
(5, 0, 2, 1, 2): 5,
(0, 1, 2, 7, 2): 1,
(1, 0, 5, 7, 2): 7,
(2, 0, 0, 0, 5): 2,
(2, 0, 3, 2, 2): 2,
(3, 0, 0, 3, 2): 2,
(5, 0, 2, 1, 5): 2,
(0, 1, 2, 7, 5): 1,
(1, 0, 6, 2, 1): 1,
(2, 0, 0, 0, 6): 0,
(2, 0, 3, 4, 2): 2,
(3, 0, 0, 4, 2): 1,
(5, 0, 2, 4, 2): 5,
(0, 1, 3, 4, 2): 1,
(1, 0, 6, 2, 4): 4,
(2, 0, 0, 0, 7): 1,
(2, 0, 3, 4, 5): 2,
(3, 0, 1, 0, 2): 1,
(5, 0, 2, 7, 2): 5,
(0, 1, 3, 7, 2): 1,
(1, 0, 6, 2, 7): 7,
(2, 0, 0, 1, 2): 2,
(2, 0, 3, 7, 2): 2,
(3, 0, 1, 2, 5): 0,
(5, 0, 3, 1, 2): 0,
(0, 1, 4, 2, 2): 1,
(1, 1, 1, 1, 2): 1,
(2, 0, 0, 1, 5): 2,
(2, 0, 4, 1, 2): 2,
(3, 0, 2, 1, 2): 3,
(6, 0, 2, 0, 2): 2,
(0, 1, 4, 2, 5): 1,
(1, 1, 1, 2, 2): 1,
(2, 0, 0, 2, 1): 2,
(2, 0, 4, 2, 2): 2,
(3, 0, 2, 4, 2): 3,
(6, 0, 2, 1, 2): 2,
(0, 1, 4, 3, 2): 1,
(1, 1, 1, 2, 4): 4,
(2, 0, 0, 2, 2): 2,
(2, 0, 4, 4, 2): 2,
(3, 0, 2, 5, 2): 1,
(6, 0, 2, 2, 2): 0,
(0, 1, 4, 3, 5): 1,
(1, 1, 1, 2, 5): 1,
(2, 0, 0, 2, 3): 2,
(2, 0, 5, 1, 2): 2,
(3, 0, 2, 7, 2): 3,
(6, 0, 2, 4, 2): 2,
(0, 1, 4, 4, 2): 1,
(1, 1, 1, 2, 7): 7,
(2, 0, 0, 2, 4): 2,
(2, 0, 5, 4, 2): 5,
(3, 0, 3, 3, 2): 1,
(6, 0, 2, 7, 2): 2,
(0, 1, 4, 6, 2): 1,
(1, 1, 1, 6, 2): 1,
(2, 0, 0, 2, 6): 0,
(2, 0, 5, 7, 2): 5,
(3, 1, 2, 1, 2): 3,
(6, 1, 2, 2, 2): 0,
(0, 1, 7, 2, 2): 1,
(1, 1, 2, 1, 2): 1,
(2, 0, 0, 2, 7): 2,
(2, 0, 6, 1, 2): 5,
(3, 1, 2, 4, 2): 3,
(6, 2, 2, 2, 4): 0,
(0, 1, 7, 2, 5): 1,
(1, 1, 2, 1, 3): 1,
(2, 0, 0, 3, 2): 4,
(2, 0, 6, 2, 1): 2,
(3, 1, 2, 5, 2): 1,
(6, 2, 2, 2, 7): 0,
(0, 1, 7, 5, 6): 1,
(1, 1, 2, 1, 5): 1,
(2, 0, 0, 4, 2): 3,
(2, 0, 6, 4, 2): 5,
(3, 1, 2, 7, 2): 3,
(7, 0, 1, 0, 2): 0,
(0, 1, 7, 6, 2): 1,
(1, 1, 2, 2, 2): 1,
(2, 0, 0, 4, 5): 2,
(2, 0, 6, 7, 2): 5,
(3, 2, 4, 2, 4): 3,
(7, 0, 1, 1, 2): 0,
(0, 1, 7, 7, 2): 1,
(1, 1, 2, 2, 4): 4,
(2, 0, 0, 5, 4): 5,
(2, 0, 7, 1, 2): 2,
(3, 2, 4, 2, 5): 1,
(7, 0, 1, 2, 2): 0,
(1, 0, 0, 0, 1): 1,
(1, 1, 2, 2, 7): 7,
(2, 0, 0, 5, 7): 5,
(2, 0, 7, 2, 2): 2,
(3, 2, 4, 2, 7): 3,
(7, 0, 1, 2, 5): 0,
(1, 0, 0, 1, 2): 1,
(1, 1, 2, 3, 2): 1,
(2, 0, 0, 6, 2): 0,
(2, 0, 7, 7, 2): 2,
(3, 2, 5, 2, 7): 1,
(7, 0, 1, 6, 2): 0,
(1, 0, 0, 2, 1): 1,
(1, 1, 2, 4, 2): 4,
(2, 0, 0, 7, 2): 2,
(2, 1, 1, 2, 2): 2,
(3, 2, 7, 2, 7): 3,
(7, 0, 2, 1, 2): 0,
(1, 0, 0, 2, 4): 4,
(1, 1, 2, 4, 3): 4,
(2, 0, 0, 7, 5): 2,
(2, 1, 2, 2, 2): 2,
(4, 0, 0, 0, 0): 1,
(7, 0, 2, 1, 5): 0,
(1, 0, 0, 2, 7): 7,
(1, 1, 2, 5, 2): 7,
(2, 0, 1, 0, 2): 2,
(2, 1, 2, 2, 3): 2,
(4, 0, 0, 0, 2): 1,
(7, 0, 2, 2, 2): 1,
(1, 0, 1, 2, 1): 1,
(1, 1, 2, 5, 4): 3,
(2, 0, 1, 1, 2): 2,
(2, 1, 2, 2, 4): 2,
(4, 0, 1, 0, 2): 0,
(7, 0, 2, 3, 2): 0,
(1, 0, 1, 2, 4): 4,
(1, 1, 2, 5, 7): 7,
(2, 0, 1, 2, 2): 2,
(2, 1, 2, 2, 7): 2,
(4, 0, 1, 1, 2): 0,
(7, 0, 2, 6, 2): 6,
(1, 0, 1, 2, 7): 7,
(1, 1, 2, 6, 2): 6,
(2, 0, 1, 4, 2): 2,
(2, 1, 2, 3, 2): 3,
(4, 0, 1, 2, 2): 0,
(7, 0, 3, 1, 2): 0,
}, add_rotations=True)
def __call__(self, n, c, t):
"""
From:
Sayama, H. (1998). Constructing evolutionary systems on a simple deterministic cellular automata space.
PhD, University of Tokyo, Department of Information Science.
:param n: the neighbourhood
:param c: the index of the current cell
:param t: the current timestep
:return: the activity of the current cell at the next timestep
"""
current_activity = n[1][1]
top = n[0][1]
right = n[1][2]
bottom = n[2][1]
left = n[1][0]
key = (current_activity, top, right, bottom, left)
if key not in self._rule_table:
trbl = (top, right, bottom, left)
new_activity = None
# Let 8->0 with no condition.
if current_activity == 8:
new_activity = 0
# To all the undefined situations in whose four neighbourhood (TRBL) there is at least one site in state 8,
# apply the following:
if 8 in trbl:
# Let 0,1->8 if there is at least one site in state 2,3,...,7 in its four neighbourhood (TRBL),
# otherwise let 0->0 and 1->1
if current_activity == 0 or current_activity == 1:
if np.any([i in trbl for i in (2, 3, 4, 5, 6, 7)]):
new_activity = 8
elif current_activity == 0:
new_activity = 0
elif current_activity == 1:
new_activity = 1
# Let 2,3,5->0.
if current_activity in (2, 3, 5):
new_activity = 0
# Let 4,6,7->1.
if current_activity in (4, 6, 7):
new_activity = 1
# Clear up all the undefined situations by letting 0->0 and 1,2,...,7->8.
if new_activity is None and current_activity == 0:
new_activity = 0
if new_activity is None and current_activity in (1, 2, 3, 4, 5, 6, 7):
new_activity = 8
return new_activity
return self._rule_table[key]
@staticmethod
def init_species13_loop(dim, row, col):
"""
Create the initial conditions by specifying the a loop of species 13 and its starting position (as given by the
coordinates of the first cell of the first row of the loop).
:param dim: a 2-tuple representing the dimensions (number of rows and columns) of the CA
:param row: the row number of the loop
:param col: the column number of the loop
:return: the initial conditions
"""
initial_conditions = np.zeros(dim, dtype=np.int32)
# 1st row
initial_conditions[row][col] = 2
initial_conditions[row][col+1] = 2
initial_conditions[row][col+2] = 2
initial_conditions[row][col+3] = 2
initial_conditions[row][col+4] = 2
initial_conditions[row][col+5] = 2
initial_conditions[row][col+6] = 2
initial_conditions[row][col+7] = 2
initial_conditions[row][col+8] = 2
initial_conditions[row][col+9] = 2
initial_conditions[row][col+10] = 2
initial_conditions[row][col+11] = 2
initial_conditions[row][col+12] = 2
initial_conditions[row][col+13] = 2
initial_conditions[row][col+14] = 2
# 2nd row
initial_conditions[row+1][col-1] = 2
initial_conditions[row+1][col] = 0
initial_conditions[row+1][col+1] = 1
initial_conditions[row+1][col+2] = 7
initial_conditions[row+1][col+3] = 0
initial_conditions[row+1][col+4] = 1
initial_conditions[row+1][col+5] = 7
initial_conditions[row+1][col+6] = 0
initial_conditions[row+1][col+7] = 1
initial_conditions[row+1][col+8] = 7
initial_conditions[row+1][col+9] = 0
initial_conditions[row+1][col+10] = 1
initial_conditions[row+1][col+11] = 4
initial_conditions[row+1][col+12] = 0
initial_conditions[row+1][col+13] = 1
initial_conditions[row+1][col+14] = 4
initial_conditions[row+1][col+15] = 2
# 3rd row
initial_conditions[row+2][col-1] = 2
initial_conditions[row+2][col] = 7
initial_conditions[row+2][col+1] = 2
initial_conditions[row+2][col+2] = 2
initial_conditions[row+2][col+3] = 2
initial_conditions[row+2][col+4] = 2
initial_conditions[row+2][col+5] = 2
initial_conditions[row+2][col+6] = 2
initial_conditions[row+2][col+7] = 2
initial_conditions[row+2][col+8] = 2
initial_conditions[row+2][col+9] = 2
initial_conditions[row+2][col+10] = 2
initial_conditions[row+2][col+11] = 2
initial_conditions[row+2][col+12] = 2
initial_conditions[row+2][col+13] = 2
initial_conditions[row+2][col+14] = 0
initial_conditions[row+2][col+15] = 2
# 4th row
initial_conditions[row+3][col-1] = 2
initial_conditions[row+3][col] = 1
initial_conditions[row+3][col+1] = 2
initial_conditions[row+3][col+13] = 2
initial_conditions[row+3][col+14] = 1
initial_conditions[row+3][col+15] = 2
# 5th row
initial_conditions[row+4][col-1] = 2
initial_conditions[row+4][col] = 0
initial_conditions[row+4][col+1] = 2
initial_conditions[row+4][col+13] = 2
initial_conditions[row+4][col+14] = 1
initial_conditions[row+4][col+15] = 2
# 6th row
initial_conditions[row+5][col-1] = 2
initial_conditions[row+5][col] = 7
initial_conditions[row+5][col+1] = 2
initial_conditions[row+5][col+13] = 2
initial_conditions[row+5][col+14] = 1
initial_conditions[row+5][col+15] = 2
# 7th row
initial_conditions[row + 6][col - 1] = 2
initial_conditions[row + 6][col] = 1
initial_conditions[row + 6][col + 1] = 2
initial_conditions[row + 6][col + 13] = 2
initial_conditions[row + 6][col + 14] = 1
initial_conditions[row + 6][col + 15] = 2
# 8th row
initial_conditions[row + 7][col - 1] = 2
initial_conditions[row + 7][col] = 0
initial_conditions[row + 7][col + 1] = 2
initial_conditions[row + 7][col + 13] = 2
initial_conditions[row + 7][col + 14] = 1
initial_conditions[row + 7][col + 15] = 2
# 9th row
initial_conditions[row + 8][col - 1] = 2
initial_conditions[row + 8][col] = 7
initial_conditions[row + 8][col + 1] = 2
initial_conditions[row + 8][col + 13] = 2
initial_conditions[row + 8][col + 14] = 1
initial_conditions[row + 8][col + 15] = 2
# 10th row
initial_conditions[row + 9][col - 1] = 2
initial_conditions[row + 9][col] = 1
initial_conditions[row + 9][col + 1] = 2
initial_conditions[row + 9][col + 13] = 2
initial_conditions[row + 9][col + 14] = 1
initial_conditions[row + 9][col + 15] = 2
# 11th row
initial_conditions[row + 10][col - 1] = 2
initial_conditions[row + 10][col] = 0
initial_conditions[row + 10][col + 1] = 2
initial_conditions[row + 10][col + 13] = 2
initial_conditions[row + 10][col + 14] = 1
initial_conditions[row + 10][col + 15] = 2
# 12th row
initial_conditions[row + 11][col - 1] = 2
initial_conditions[row + 11][col] = 7
initial_conditions[row + 11][col + 1] = 2
initial_conditions[row + 11][col + 13] = 2
initial_conditions[row + 11][col + 14] = 1
initial_conditions[row + 11][col + 15] = 2
# 13th row
initial_conditions[row + 12][col - 1] = 2
initial_conditions[row + 12][col] = 1
initial_conditions[row + 12][col + 1] = 2
initial_conditions[row + 12][col + 13] = 2
initial_conditions[row + 12][col + 14] = 1
initial_conditions[row + 12][col + 15] = 2
# 14th row
initial_conditions[row + 13][col - 1] = 2
initial_conditions[row + 13][col] = 0
initial_conditions[row + 13][col + 1] = 2
initial_conditions[row + 13][col + 13] = 2
initial_conditions[row + 13][col + 14] = 1
initial_conditions[row + 13][col + 15] = 2
# 15th row
initial_conditions[row + 14][col - 1] = 2
initial_conditions[row + 14][col] = 7
initial_conditions[row + 14][col + 1] = 2
initial_conditions[row + 14][col + 2] = 2
initial_conditions[row + 14][col + 3] = 2
initial_conditions[row + 14][col + 4] = 2
initial_conditions[row + 14][col + 5] = 2
initial_conditions[row + 14][col + 6] = 2
initial_conditions[row + 14][col + 7] = 2
initial_conditions[row + 14][col + 8] = 2
initial_conditions[row + 14][col + 9] = 2
initial_conditions[row + 14][col + 10] = 2
initial_conditions[row + 14][col + 11] = 2
initial_conditions[row + 14][col + 12] = 2
initial_conditions[row + 14][col + 13] = 2
initial_conditions[row + 14][col + 14] = 1
initial_conditions[row + 14][col + 15] = 2
initial_conditions[row + 14][col + 16] = 2
initial_conditions[row + 14][col + 17] = 2
initial_conditions[row + 14][col + 18] = 2
initial_conditions[row + 14][col + 19] = 2
initial_conditions[row + 14][col + 20] = 2
initial_conditions[row + 14][col + 21] = 2
initial_conditions[row + 14][col + 22] = 2
initial_conditions[row + 14][col + 23] = 2
initial_conditions[row + 14][col + 24] = 2
initial_conditions[row + 14][col + 25] = 2
initial_conditions[row + 14][col + 26] = 2
initial_conditions[row + 14][col + 27] = 2
initial_conditions[row + 14][col + 28] = 2
# 16th row
initial_conditions[row + 15][col - 1] = 2
initial_conditions[row + 15][col] = 1
initial_conditions[row + 15][col + 1] = 0
initial_conditions[row + 15][col + 2] = 7
initial_conditions[row + 15][col + 3] = 1
initial_conditions[row + 15][col + 4] = 0
initial_conditions[row + 15][col + 5] = 7
initial_conditions[row + 15][col + 6] = 1
initial_conditions[row + 15][col + 7] = 0
initial_conditions[row + 15][col + 8] = 7
initial_conditions[row + 15][col + 9] = 1
initial_conditions[row + 15][col + 10] = 0
initial_conditions[row + 15][col + 11] = 7
initial_conditions[row + 15][col + 12] = 1
initial_conditions[row + 15][col + 13] = 0
initial_conditions[row + 15][col + 14] = 7
initial_conditions[row + 15][col + 15] = 1
initial_conditions[row + 15][col + 16] = 1
initial_conditions[row + 15][col + 17] = 1
initial_conditions[row + 15][col + 18] = 1
initial_conditions[row + 15][col + 19] = 1
initial_conditions[row + 15][col + 20] = 1
initial_conditions[row + 15][col + 21] = 1
initial_conditions[row + 15][col + 22] = 1
initial_conditions[row + 15][col + 23] = 1
initial_conditions[row + 15][col + 24] = 1
initial_conditions[row + 15][col + 25] = 1
initial_conditions[row + 15][col + 26] = 1
initial_conditions[row + 15][col + 27] = 1
initial_conditions[row + 15][col + 28] = 1
initial_conditions[row + 15][col + 29] = 2
# 17th row
initial_conditions[row + 16][col] = 2
initial_conditions[row + 16][col + 1] = 2
initial_conditions[row + 16][col + 2] = 2
initial_conditions[row + 16][col + 3] = 2
initial_conditions[row + 16][col + 4] = 2
initial_conditions[row + 16][col + 5] = 2
initial_conditions[row + 16][col + 6] = 2
initial_conditions[row + 16][col + 7] = 2
initial_conditions[row + 16][col + 8] = 2
initial_conditions[row + 16][col + 9] = 2
initial_conditions[row + 16][col + 10] = 2
initial_conditions[row + 16][col + 11] = 2
initial_conditions[row + 16][col + 12] = 2
initial_conditions[row + 16][col + 13] = 2
initial_conditions[row + 16][col + 14] = 2
initial_conditions[row + 16][col + 15] = 2
initial_conditions[row + 16][col + 16] = 2
initial_conditions[row + 16][col + 17] = 2
initial_conditions[row + 16][col + 18] = 2
initial_conditions[row + 16][col + 19] = 2
initial_conditions[row + 16][col + 20] = 2
initial_conditions[row + 16][col + 21] = 2
initial_conditions[row + 16][col + 22] = 2
initial_conditions[row + 16][col + 23] = 2
initial_conditions[row + 16][col + 24] = 2
initial_conditions[row + 16][col + 25] = 2
initial_conditions[row + 16][col + 26] = 2
initial_conditions[row + 16][col + 27] = 2
initial_conditions[row + 16][col + 28] = 2
return np.array([initial_conditions])
| [
"lantunes@gmail.com"
] | lantunes@gmail.com |
b468095583ee8de98b8833590892dca749b50194 | 362a9ebf606300ff3a0faac2a82afd89fedefea6 | /api/urls.py | f6a644a36b730b34afbb2921887f5068235d0b6b | [] | no_license | Alezzuwu/Caos_News--Javier-Quinteros--Alonso-Arteaga | 0e0991bc70fb7531812cbe8ff7659101e0631c6c | 57a22d2da00294a7e52d49479887179342cfbe22 | refs/heads/main | 2023-06-18T04:40:16.128828 | 2021-07-09T14:15:39 | 2021-07-09T14:15:39 | 384,449,932 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | from django.conf.urls import url
from rest_framework import urlpatterns
from api import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^api/noticia/$',views.NoticiaViewSet.as_view()),
url(r'^api/categoria/$',views.CategoriaViewSet.as_view()),
url(r'^api/buscar_noticia/(?P<titulo>.+)/$',views.NoticiaBuscarViewSet.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns) | [
"alon.arte.r@gmail.com"
] | alon.arte.r@gmail.com |
675724284077d36c8d4e2d814f985112987e18fe | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc003/A/4914205.py | be4bb822261b85d60bd74648d029ad41410d3a99 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | ans = 0
num = int(input())
for i in range(1, num+1):
ans += i*10000/num
print(int(ans)) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
ec4c1599755da578ec0f2c800a5f9e6cdb0a7a80 | 1c4c132b5a4f0b85c749123d436394ebb09bf07a | /data/tealogger/ext/TeaFiles.Py/stopwatch.py | 2b5b9adddbc31c7242fdc73001f478be086c8601 | [
"MIT"
] | permissive | mildred/venusos-peacefair-pzem | 0cbdd53fe7a0979562dd4955a6d5c8b1b6235f9e | b3d1b3a0652f2733702c590b7837988bb99a0c3a | refs/heads/master | 2022-04-24T04:57:25.326123 | 2020-04-19T22:29:02 | 2020-04-19T22:29:02 | 250,288,216 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | ''' from https://gist.github.com/1123871
modifications applied
'''
import time
class Stopwatch(object):
'''A stopwatch utility for timing execution that can be used as a regular
object or as a context manager.
NOTE: This should not be used an accurate benchmark of Python code, but a
way to check how much time has elapsed between actions. And this does not
account for changes or blips in the system clock.
Instance attributes:
start_time -- timestamp when the timer started
stop_time -- timestamp when the timer stopped
As a regular object:
>>> stopwatch = Stopwatch()
>>> stopwatch.start()
>>> time.sleep(1)
>>> 1 <= stopwatch.time_elapsed <= 2
True
>>> time.sleep(1)
>>> stopwatch.stop()
>>> 2 <= stopwatch.total_run_time
True
As a context manager:
>>> with Stopwatch() as stopwatch:
... time.sleep(1)
... print repr(1 <= stopwatch.time_elapsed <= 2)
... time.sleep(1)
True
>>> 2 <= stopwatch.total_run_time
True
'''
def __init__(self):
'''Initialize a new `Stopwatch`, but do not start timing.'''
self.start_time = None
self.stop_time = None
def start(self):
'''Start timing.'''
self.start_time = time.time()
def stop(self):
'''Stop timing.'''
self.stop_time = time.time()
@property
def time_elapsed(self):
'''Return the number of seconds that have elapsed since this
`Stopwatch` started timing.
This is used for checking how much time has elapsed while the timer is
still running.
'''
assert not self.stop_time, \
"Can't check `time_elapsed` on an ended `Stopwatch`."
return time.time() - self.start_time
@property
def total_run_time(self):
'''Return the number of seconds that elapsed from when this `Stopwatch`
started to when it ended.
'''
return self.stop_time - self.start_time
def __enter__(self):
'''Start timing and return this `Stopwatch` instance.'''
self.start()
return self
def __exit__(self, type_, value, traceback):
'''Stop timing.
If there was an exception inside the `with` block, re-raise it.
>>> with Stopwatch() as stopwatch:
... raise Exception
Traceback (most recent call last):
...
Exception
'''
self.stop()
print("execution time: " + str(self.total_run_time) + " seconds")
if type_:
raise Exception(type_, value, traceback)
| [
"mildred-pub.git@mildred.fr"
] | mildred-pub.git@mildred.fr |
9b3ac2d7b530dc6140c8a3ba781a071f7dc425e8 | db2ae9b2d769d768f685be8a1e830ad3f71e4ad3 | /torch_scatter/utils.py | fd4be5998ae87a9d375db9fc0e77353725ae74d2 | [
"MIT"
] | permissive | hmaarrfk/pytorch_scatter | 45623acd179bc309474f492d8d2358e0a9556b09 | 8d05f6108105d02b53b8fba35f28006cfdd1539f | refs/heads/master | 2023-08-24T17:32:08.331457 | 2021-10-22T13:45:02 | 2021-10-22T13:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | import torch
def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
if dim < 0:
dim = other.dim() + dim
if src.dim() == 1:
for _ in range(0, dim):
src = src.unsqueeze(0)
for _ in range(src.dim(), other.dim()):
src = src.unsqueeze(-1)
src = src.expand_as(other)
return src
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
5938b562efd79ff1228b349b3607eec641ea1f1d | 23c244c10130a6610615486d4d11f396834d2640 | /python_coursera/Segundos1.py | 597eea05159e5796cdb3d8c7bc316e39ee66d128 | [] | no_license | carolineduarte/Training | 4f54f2278d483ac556e6873908ae131a6818566d | 34aadc5d58d0ce1ce92a1a4ce54e369ec54b2654 | refs/heads/main | 2023-05-31T17:34:44.186091 | 2021-06-14T10:21:33 | 2021-06-14T10:21:33 | 355,674,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | segundos_str=input("Por favor, entre com o número de segundos que deseja converter: ")
total_segs=int(segundos_str)
horas=total_segs//3600
segs_restantes=total_segs%3600
minutos=segs_restantes//60
segs_restantes_final=segs_restantes%60
print(horas," horas",minutos," minutos e ",segs_restantes_final," segundos.")
| [
"duarte.carol@gmail.com"
] | duarte.carol@gmail.com |
bb0bc1b070cdb39864536526363f9329311660dd | a5f0e7c09c36bb2fc91f95e5f3ec7f95c0ed305e | /cafe_backend/core/constants/sizes.py | 962e36be4636824dd4958f081aa06e3356612c30 | [] | no_license | ecmascriptguru/cafe_backend | e703047c7f04d68596f76dcbff06828afbf5cc68 | 0c4152692d68e951481b39f0789bc58e94e0d20c | refs/heads/master | 2022-10-26T00:31:50.070430 | 2020-06-18T15:30:02 | 2020-06-18T15:30:02 | 184,465,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | MAX_IMAGE_WIDTH = 1920
MAX_IMAGE_HEIGHT = 768
class DEFAULT_IMAGE_SIZE:
tiny = (int(MAX_IMAGE_WIDTH / 30), int(MAX_IMAGE_HEIGHT / 16))
small = (int(MAX_IMAGE_WIDTH / 10), int(MAX_IMAGE_HEIGHT / 8))
normal = (int(MAX_IMAGE_WIDTH / 4), int(MAX_IMAGE_HEIGHT / 4))
big = (int(MAX_IMAGE_WIDTH / 2), int(MAX_IMAGE_HEIGHT / 2))
| [
"ecmascript.guru@gmail.com"
] | ecmascript.guru@gmail.com |
14110aabdd7d4ce67fd72c6108e04444d7defc05 | 3fac8219d8b5bbb10dbe5b57cc91f322d629a155 | /api.py | dfc3faeb478c1f9cd451d24251f87a1fa3ed2877 | [] | no_license | matheusfillipe/texsolver | 28c14ea801264002e5e8de5b4cdbe51483a665b1 | fd8f93f2a623a57a46a29a06fb42e18e13dad674 | refs/heads/main | 2023-08-14T19:36:00.380538 | 2021-09-21T04:28:24 | 2021-09-21T04:28:24 | 398,383,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from flask import Flask, request
from texsuggest import solve
app = Flask(__name__)
app.config["DEBUG"] = True
@app.route("/", methods=["POST"])
def index():
if request.method == "POST":
expression = request.json.get("latex")
try:
return {"latex": solve(expression)}
except Exception as e:
return {"error": str(e)}
if __name__ == "__main__":
app.run()
| [
"matheusfillipeag@gmail.com"
] | matheusfillipeag@gmail.com |
e11dbe8861cb3a6473c3e5ba7a8db431fe3625fb | ad4362d11b710a92d81a8116e1b3d098eabbac67 | /votar/apps.py | f4ad956ea134c77ef1ab6276f92eaf3ee4d2405f | [] | no_license | CebaJuaB/forum | fac87f4f544d8cec357719b7dc53a52809e434fd | fb3a57d89f98a2e49b31162497d541a216754a3f | refs/heads/main | 2023-03-19T23:23:42.568694 | 2021-03-14T16:26:05 | 2021-03-14T16:26:05 | 337,135,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from django.apps import AppConfig
class VoteConfig(AppConfig):
name = 'votar' | [
"juanceballos@mac.com"
] | juanceballos@mac.com |
a5f8c1e0ed8b8e671d5b82f9501e64ffe5623eb4 | 9ebfd133358cb0b1146fed2e571d31fcc15278c7 | /menu/admin.py | 6a95687b2c4c9abc2b9158d8c35ebee9e0c74249 | [] | no_license | Dhruvam-19/smartrestaurant | c90f55cba5fdfbbcac26577f93764326b056ac82 | 36bc8a8d42352a2732353d7ac1aba30e43831612 | refs/heads/master | 2023-03-30T07:44:42.863541 | 2021-04-14T06:00:25 | 2021-04-14T06:00:25 | 341,185,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.contrib import admin
from .models import Menu,Cart
# Register your models here.
admin.site.register(Menu)
admin.site.register(Cart) | [
"dhruvam.jhavericore@gmail.com"
] | dhruvam.jhavericore@gmail.com |
fb8c78ce35916204e06dc9f2fd71e7105d824080 | e990f3941e8fae0c863807a5a07d09b2508ec0d6 | /Botato/code_snippets.py | ff0253e2c6f659d8d8c0158d33b9e7abd005a93e | [] | no_license | Mets3D/Botato | 51815fcacd2ee8729502871cc9c13fb7200cc8a0 | 7d41d987e76eb309fdf9093f9a108f47cac0f118 | refs/heads/master | 2020-05-16T21:29:19.591046 | 2020-03-08T02:22:15 | 2020-03-08T02:22:15 | 183,308,031 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,491 | py | """This file is for code snippets that I'm unlikely to use in the future, but I'd rather not perma-delete them."""
""" Old utility functions """
def intersect_two_circles(x1,y1,r1, x2,y2,r2):
centerdx = x1 - x2
centerdy = y1 - y2
R = math.sqrt(centerdx * centerdx + centerdy * centerdy)
R2 = R*R
R4 = R2*R2
a = (r1*r1 - r2*r2) / (2 * R2)
r2r2 = (r1*r1 - r2*r2)
C = 2 * (r1*r1 + r2*r2) / R2 - (r2r2 * r2r2) / R4 - 1
if C < 0:
return
c = math.sqrt(C)
fx = (x1+x2) / 2 + a * (x2 - x1)
gx = c * (y2 - y1) / 2
ix1 = fx + gx
ix2 = fx - gx
fy = (y1+y2) / 2 + a * (y2 - y1)
gy = c * (x1 - x2) / 2
iy1 = fy + gy
iy2 = fy - gy
return [[ix1, iy1], [ix2, iy2]]
def z0(loc):
return Vector3(loc.x,loc.y,0)
def inside_arena(location) -> bool:
location = loc(location)
return abs(location.x) < arena.x and abs(location.y) < arena.y
def boost_needed(initial_speed, goal_speed):
p1 = 6.31e-06
p2 = 0.010383
p3 = 1.3183
boost_initial = p1*initial_speed**2 + p2*initial_speed + p3
boost_goal = p1*goal_speed**2 + p2*goal_speed + p3
boost_needed = boost_goal - boost_initial
return boost_needed
def rotate2D(vector, angle):
v = Vector3(vector.x,vector.y,0)
theta = math.radians(angle)
cs = math.cos(theta)
sn = math.sin(theta)
v.x = vector.x * cs - vector.y * sn
v.y = vector.x * sn + vector.y * cs
return v
def directional_angle(start, center, end, clockwise = False):
a0 = (start - center).angle
a1 = (end - center).angle
if clockwise:
return a0 - a1
else:
return a1 - a0
def get_steer_towards(s, target, dd = 1):
return clamp(dd * angle_to(s, target, dd) / 15, -1, 1)
def optimal_speed(dist, time_left, current_speed):
desired_speed = dist / max(0.01, time_left)
alpha = 1.3
return alpha * desired_speed - (alpha - 1) * current_speed
def turn_radius(speed):
spd = clamp(speed,0,2300)
return 156 + 0.1*spd + 0.000069*spd**2 + 0.000000164*spd**3 + -5.62E-11*spd**4
"""RLUtils ball prediction & rendering."""
self.game.read_game_information(packet, self.get_rigid_body_tick(), self.get_field_info())
b = Ball(self.game.ball)
ball_predictions = []
for i in range(330):
# simulate the forces acting on the ball for 1 frame for the first 100 frames, then only 5 frame at a time.
dt = (i+330)/330 * 5
b.step(dt / 120.0)
# and add a copy of new ball position to the list of predictions
ball_predictions.append(vec3(b.location))
if ball_predictions is not None:
for i in range(0, len(ball_predictions)):
prediction_slice = ball_predictions[i]
render_color = self.renderer.red()
omegalul = str(prediction_slice).split(" ")
loc = Vector3(float(omegalul[0]), float(omegalul[1]), float(omegalul[2]))
#self.renderer.draw_rect_3d(loc, 5, 5, True, render_color)
"""Old, very bad powerslide"""
"""It tries to determine at the beginning of powersliding how long we're planning to powerslide."""
"""This is bad because after the timer is up it will keep re-activating itself, so in the end the powersliding, vel_fac, yaw_fac values are completely useless and all that matters is what's in the if() requirements."""
if(self.powersliding):
controller.handbrake = True
if(self.game_seconds > self.powerslide_until):
self.powersliding=False
elif(not self.powersliding and
yaw_to_target * RAD_TO_DEG > 35 and
self.velocity.length > 300 ):
self.powersliding=True
self.drift_vel_fac = (self.velocity.length/2000/16)
self.drift_yaw_fac = (yaw_to_target * RAD_TO_DEG /65 /16)
self.powerslide_until = self.game_seconds + self.drift_vel_fac + self.drift_yaw_fac # Powerslide for some time depending on velocity and angle.
controller.handbrake = True
"""from Botimus or PythonExampleBot, I don't think I need it."""
def get_car_facing_vector(car):
pitch = float(car.physics.rotation.pitch)
yaw = float(car.physics.rotation.yaw)
facing_x = math.cos(pitch) * math.cos(yaw)
facing_y = math.cos(pitch) * math.sin(yaw)
return Vector2(facing_x, facing_y)
"""Vector2 from Botimus or PythonExampleBot or whatever"""
class Vector2:
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __add__(self, val):
return Vector2(self.x + val.x, self.y + val.y)
def __sub__(self, val):
return Vector2(self.x - val.x, self.y - val.y)
def correction_to(self, ideal):
# The in-game axes are left handed, so use -x
current_in_radians = math.atan2(self.y, -self.x)
ideal_in_radians = math.atan2(ideal.y, -ideal.x)
correction = ideal_in_radians - current_in_radians
# Make sure we go the 'short way'
if abs(correction) > math.pi:
if correction < 0:
correction += 2 * math.pi
else:
correction -= 2 * math.pi
return correction
"""Written for my Debug.py, but a bad idea."""
def field(car, color=None):
"""Draw a rectangle represending the field in 2D."""
r = car.renderer
color = ensure_color(r, color)
field = MyVec3(8200, 10280, 2050)
bottom_left = MyVec3(-field.x, field.y, 0) / local_ratio
bottom_right = MyVec3( field.x, field.y, 0) / local_ratio
top_left = MyVec3(-field.x, -field.y, 0) / local_ratio
top_right = MyVec3( field.x, -field.y, 0) / local_ratio
"""Local coords, don't do this :'D"""
# line_2d_local(bottom_left, bottom_right, color)
# line_2d_local(bottom_right, top_right, color)
# line_2d_local(top_right, top_left, color)
# line_2d_local(top_left, bottom_left, color)
"""Global coords with a backdrop, just as useless :)"""
# rect_2d_from_center(0, 0, width=int(field.x/local_ratio*2), height=int(field.y/local_ratio*2), color=r.gray())
# line_2d_from_center(bottom_left.x, bottom_left.y, bottom_right.x, bottom_right.y, color)
# line_2d_from_center(bottom_right.x, bottom_right.y, top_right.x, top_right.y, color)
# line_2d_from_center(top_right.x, top_right.y, top_left.x, top_left.y, color)
# line_2d_from_center(top_left.x, top_left.y, bottom_left.x, bottom_left.y, color)
"""Old shitty powerslides"""
class Powerslide1(Maneuver):
"""This tries to stop powersliding once the yaw threshold is hit. Doesn't work very well, over and under-slides are common, adjusting the threshold improves one but worsens the other."""
yaw_threshold = 90 # We want to powerslide if we're facing more than this many degrees away from target.
@classmethod
def get_output(cls, car, target) -> SimpleControllerState:
delta_yaw = abs((car.yaw_to_target - car.last_self.yaw_to_target))*(1/car.dt) # How fast we are approaching the correct alignment, in degrees/sec
time_to_aligned = car.yaw_to_target / (delta_yaw+0.00000001) # How long it will take(in seconds) at our current turning speed to line up with the target. Used for Powersliding.
time_threshold = 1 # We should keep powersliding if the estimated time to alignment based on delta_Yaw is greater than this many seconds.
if(
(abs(car.yaw_to_target) > cls.yaw_threshold # We're facing far away from the target.
or time_to_aligned > time_threshold) # Or the estimated time to alignment is high.
and car.location.z < 50 # We aren't on a wall.
and car.wheel_contact # We are touching the ground.
):
cls.controller.handbrake = True
else:
cls.controller.handbrake = False
return cls.controller
class Powerslide2(Maneuver):
"""This maneuver tries to determine at the beginning of the powerslide how long the powerslide should last. (WIP: Duration is currently a constant.)"""
powerslide_until = -1
last_ended = -1
@classmethod
def get_output(cls, car, target) -> SimpleControllerState:
yaw_threshold = 25 # Yaw to target has to be greater than this.
slide_duration = 0.3 # Max slide duration.
time_gap = 0.5 # Time that has to pass before this maneuver can be re-activated.
if(
Powerslide1.yaw_threshold > abs(car.yaw_to_target) > yaw_threshold
and (car.game_seconds < cls.powerslide_until
or car.game_seconds > cls.powerslide_until + time_gap)
and car.location.z < 50 # We aren't on a wall.
and car.wheel_contact # We are touching the ground.
):
cls.controller.handbrake = True
if( not car.powersliding ): # If We just started powersliding
# Activate this maneuver
print("started small powerslide")
cls.powerslide_until = car.game_seconds + slide_duration
elif(car.powersliding):
# Deactivate this maneuver
#print("ended small powerslide")
cls.controller.handbrake=False
return cls.controller
| [
"metssfm@gmail.com"
] | metssfm@gmail.com |
26143bd90f2fa44ed5c44914a6bc4c28ffc0b0be | bf2939e6f9974a9eae7427e6654a0738be9ba39f | /__main__.py | 26709d29ae7346e5f99b17684ea8adc366090bd9 | [] | no_license | acpaquette/mini_pf | 5fa7b97087e341db19e282e76209f287fda8ad28 | f94079a3afd461e43e098b9da50ac8f36e547c90 | refs/heads/master | 2020-04-10T01:17:39.344999 | 2018-12-11T20:17:30 | 2018-12-11T20:17:30 | 160,711,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from minipf.controllers.default_controller import create_isd
if __name__ == "__main__":
res = create_isd("/home/acpaquette/Desktop/EN0214547236M.LBL")
with res as r:
print(res)
| [
"acpaquette@usgs.gov"
] | acpaquette@usgs.gov |
265956323c6df2922aa762ecb0d33775c4fdf6cc | ebe7374b080e394f0cc935c92508803fbb0cc79d | /recepty/djangoweb/urls.py | 95c26b37c8b77a8ab313dcde8dfba7cf2048128a | [] | no_license | SaikiDean/final_project | 053daa9e90acfcf14efd8ff7739caefab92310d3 | 598a964cbc8fe1a495f9beba5ee394392f94cc7b | refs/heads/main | 2023-02-23T03:32:00.998677 | 2021-01-19T22:31:34 | 2021-01-19T22:31:34 | 313,294,369 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | """djangoweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('recipes/', include('recipes.urls')),
url(r'', include('recipes.urls')),
path('', RedirectView.as_view(url='recipes/')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"malinky.assassin@gmail.com"
] | malinky.assassin@gmail.com |
c934cbc782156852dd476482a5d236715cf5ff97 | 552a6f227dea50887a4bcbf1a120289f3ae90fc0 | /pandas/tests/tseries/frequencies/test_freq_code.py | 0aa29e451b1ba4513e1beb37ec82f83724339f9d | [
"BSD-3-Clause"
] | permissive | Lucifer82/pandas | bbf6132e84585aebcfefe098d14ab6fa9adcf6d3 | cdfdd77b65df350386ce27142ef3babd9e5186d2 | refs/heads/master | 2020-04-30T15:30:27.180080 | 2019-03-21T03:07:43 | 2019-03-21T03:07:43 | 176,922,084 | 1 | 0 | BSD-3-Clause | 2019-03-21T10:26:38 | 2019-03-21T10:26:38 | null | UTF-8 | Python | false | false | 4,707 | py | import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.frequencies import (
FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.compat as compat
import pandas.tseries.offsets as offsets
@pytest.fixture(params=list(compat.iteritems(_period_code_map)))
def period_code_item(request):
return request.param
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000),
("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("W", 4000), ("W-MON", 4001), ("W-FRI", 4005)
])
def test_freq_code(freqstr, expected):
assert get_freq(freqstr) == expected
def test_freq_code_match(period_code_item):
freqstr, code = period_code_item
assert get_freq(freqstr) == code
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000),
("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000),
(offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),
("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000),
(offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),
("T", FreqGroup.FR_MIN),
])
def test_freq_group(freqstr, expected):
assert resolution.get_freq_group(freqstr) == expected
def test_freq_group_match(period_code_item):
freqstr, code = period_code_item
str_group = resolution.get_freq_group(freqstr)
code_group = resolution.get_freq_group(code)
assert str_group == code_group == code // 1000 * 1000
@pytest.mark.parametrize("freqstr,exp_freqstr", [
("D", "D"), ("W", "D"), ("M", "D"),
("S", "S"), ("T", "S"), ("H", "S")
])
def test_get_to_timestamp_base(freqstr, exp_freqstr):
tsb = libfrequencies.get_to_timestamp_base
assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]
_reso = resolution.Resolution
@pytest.mark.parametrize("freqstr,expected", [
("A", "year"), ("Q", "quarter"), ("M", "month"),
("D", "day"), ("H", "hour"), ("T", "minute"),
("S", "second"), ("L", "millisecond"),
("U", "microsecond"), ("N", "nanosecond")
])
def test_get_str_from_freq(freqstr, expected):
assert _reso.get_str_from_freq(freqstr) == expected
@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H",
"T", "S", "L", "U", "N"])
def test_get_freq_roundtrip(freq):
result = _reso.get_freq(_reso.get_str_from_freq(freq))
assert freq == result
@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"])
def test_get_freq_roundtrip2(freq):
result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))
assert freq == result
@pytest.mark.parametrize("args,expected", [
((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")),
((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")),
((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L"))
])
def test_resolution_bumping(args, expected):
# see gh-14378
assert _reso.get_stride_from_decimal(*args) == expected
@pytest.mark.parametrize("args", [
(0.5, "N"),
# Too much precision in the input can prevent.
(0.3429324798798269273987982, "H")
])
def test_cat(args):
msg = "Could not convert to integer offset at any resolution"
with pytest.raises(ValueError, match=msg):
_reso.get_stride_from_decimal(*args)
@pytest.mark.parametrize("freq_input,expected", [
# Frequency string.
("A", (get_freq("A"), 1)),
("3D", (get_freq("D"), 3)),
("-2M", (get_freq("M"), -2)),
# Tuple.
(("D", 1), (get_freq("D"), 1)),
(("A", 3), (get_freq("A"), 3)),
(("M", -2), (get_freq("M"), -2)),
((5, "T"), (FreqGroup.FR_MIN, 5)),
# Numeric Tuple.
((1000, 1), (1000, 1)),
# Offsets.
(offsets.Day(), (get_freq("D"), 1)),
(offsets.Day(3), (get_freq("D"), 3)),
(offsets.Day(-2), (get_freq("D"), -2)),
(offsets.MonthEnd(), (get_freq("M"), 1)),
(offsets.MonthEnd(3), (get_freq("M"), 3)),
(offsets.MonthEnd(-2), (get_freq("M"), -2)),
(offsets.Week(), (get_freq("W"), 1)),
(offsets.Week(3), (get_freq("W"), 3)),
(offsets.Week(-2), (get_freq("W"), -2)),
(offsets.Hour(), (FreqGroup.FR_HR, 1)),
# Monday is weekday=0.
(offsets.Week(weekday=1), (get_freq("W-TUE"), 1)),
(offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)),
(offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)),
])
def test_get_freq_code(freq_input, expected):
assert get_freq_code(freq_input) == expected
def test_get_code_invalid():
with pytest.raises(ValueError, match="Invalid frequency"):
get_freq_code((5, "baz"))
| [
"jeff@reback.net"
] | jeff@reback.net |
4713dbf276d8f96fd74ab881a35cd5bb4782de60 | 02e8d0ceaadd388e0bd610075ee1bb287637745e | /behavior-cloning/drive.py | ea7e3b1d2bb17de084f2a1115dfc1c0e4a7e826d | [] | no_license | stunglan/CarND-term1 | 20b333d70aba919a302dd62b09e1b04084196139 | 492d9b78198d3e7b5fb608044371538e84bdd4e8 | refs/heads/master | 2021-01-01T17:37:02.797188 | 2017-04-27T17:13:06 | 2017-04-27T17:13:06 | 78,331,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
import cv2
import math
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
# crop the image
top = math.ceil(image_array.shape[0]*0.30)
bot = math.ceil(image_array.shape[0]-image_array.shape[0]*0.1)
image_array = image_array[top:bot, :]
# color the image
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2YUV)
# resize the image
rows,cols = 20,40
image_array = cv2.resize(image_array,(cols,rows))
transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 0.2
print(steering_angle, throttle,speed)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
model = model_from_json(json.loads(jfile.read()))\
#
# instead.
#model = model_from_json(jfile.read())
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| [
"stunglan@gmail.com"
] | stunglan@gmail.com |
fab8bf0bc56c68ed72518b00f009e9276733f98e | b14b4c7adc71511f24aaffd9361d49c38ca75d28 | /build_readme.py | c32545f8c63ec34e9c25d48dd090bb24e904974c | [
"Apache-2.0"
] | permissive | santiagoballadares/santiagoballadares | de3d543da2a804a0d890962e547c44c8ec059045 | cd71fb85bebaec0460d37349d1e4af92f43ef809 | refs/heads/master | 2023-01-03T00:37:01.911808 | 2020-10-23T03:18:37 | 2020-10-23T03:18:37 | 283,056,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | from python_graphql_client import GraphqlClient
import httpx
import json
import pathlib
import re
import os
root = pathlib.Path(__file__).parent.resolve()
client = GraphqlClient(endpoint="https://api.github.com/graphql")
WORKFLOW_TOKEN = os.environ.get("WORKFLOW_TOKEN", "")
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
def make_query(after_cursor=None):
return """
query {
viewer {
repositories(first: 100, privacy: PUBLIC, after:AFTER) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
description
url
releases(last:1) {
totalCount
nodes {
name
publishedAt
url
}
}
}
}
}
}
""".replace(
"AFTER", '"{}"'.format(after_cursor) if after_cursor else "null"
)
def fetch_releases(oauth_token):
repos = []
releases = []
repo_names = set()
has_next_page = True
after_cursor = None
while has_next_page:
data = client.execute(
query=make_query(after_cursor),
headers={"Authorization": "Bearer {}".format(oauth_token)},
)
print()
print(json.dumps(data, indent=2))
print()
for repo in data["data"]["viewer"]["repositories"]["nodes"]:
if repo["releases"]["totalCount"] and repo["name"] not in repo_names:
repos.append(repo)
repo_names.add(repo["name"])
releases.append(
{
"repo": repo["name"],
"repo_url": repo["url"],
"description": repo["description"],
"release": repo["releases"]["nodes"][0]["name"].replace(repo["name"], "").strip(),
"published_at": repo["releases"]["nodes"][0]["publishedAt"],
"published_day": repo["releases"]["nodes"][0]["publishedAt"].split("T")[0],
"url": repo["releases"]["nodes"][0]["url"],
}
)
has_next_page = data["data"]["viewer"]["repositories"]["pageInfo"]["hasNextPage"]
after_cursor = data["data"]["viewer"]["repositories"]["pageInfo"]["endCursor"]
return releases
def fetch_tils():
url = "https://raw.githubusercontent.com/santiagoballadares/til/master/entries.json"
res = httpx.get(url)
return res.json()
if __name__ == "__main__":
readme_md = root / "README.md"
releases_md = root / "releases.md"
all_releases = fetch_releases(WORKFLOW_TOKEN)
all_releases.sort(key=lambda r: r["published_at"], reverse=True)
# Update README.md file
readme_releases = "\n".join(
[
"* [{repo} {release}]({url}) - {published_day}".format(**release)
for release in all_releases[:10]
]
)
readme_md_content = readme_md.open().read()
rewritten_readme_md = replace_chunk(readme_md_content, "releases", readme_releases)
last_tils = fetch_tils()[::-1][:5]
readme_tils = "\n".join(
[
"* [{title}]({url}) - {created}".format(title=til["title"], url=til["url"], created=til["created"].split("T")[0])
for til in last_tils
]
)
rewritten_readme_md = replace_chunk(rewritten_readme_md, "tils", readme_tils)
readme_md.open("w").write(rewritten_readme_md)
# Update releases.md file
releases = "\n".join(
[
(
"* **[{repo}]({repo_url})**: [{release}]({url}) - {published_day}\n"
"<br>{description}"
).format(**release)
for release in all_releases
]
)
releases_md_content = releases_md.open().read()
rewritten_releases_md = replace_chunk(releases_md_content, "releases", releases)
rewritten_releases_md = replace_chunk(rewritten_releases_md, "releases_count", str(len(all_releases)), inline=True)
releases_md.open("w").write(rewritten_releases_md) | [
"santiago.balladares@outlook.com"
] | santiago.balladares@outlook.com |
1563a85779508967b46fb2b2a86060a4e95ecf2a | a80df8e2316c589a176d2e0cf2cef91eb1be9732 | /receitas/admin.py | 94d134f4c8f2bdb8d403ba6c73a47c72084ca6ca | [] | no_license | tiagoberwanger/django_recipes_app | 932806c3a2f6a7d1e0f3e53de41adfd85a3e75d9 | 346d5fd75e5ecba2d364c573db1d6321a57a8a65 | refs/heads/master | 2023-05-21T18:18:10.618436 | 2021-06-16T15:59:19 | 2021-06-16T15:59:19 | 377,521,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from django.contrib import admin
from .models import Receita
# Register your models here.
admin.site.register(Receita)
| [
"berwangertiago@gmail.com"
] | berwangertiago@gmail.com |
228f9ec73b55831affc0619512034b1526823b5c | 2218aeedf4cd787b64fdbff68447c8a3d9b4e2a5 | /dj_ninjas/apps/dojo_ninjas/urls.py | e1996712514d0969a2bef928ad988080b6c533c8 | [] | no_license | CDApprentiPy/lexm | 004bb126390ae55882dc57fec2a8445a3a6fb171 | fe64b489d157f8d8c261fc061857b0b3df2cf8a3 | refs/heads/master | 2021-07-07T12:25:49.574875 | 2017-10-05T05:33:04 | 2017-10-05T05:33:04 | 103,675,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.dojo),
]
| [
"lex.myers@gmail.com"
] | lex.myers@gmail.com |
867849b4a1a74bad8e87de49c3ee8b8079072654 | 3b78d0d2dda1e316d9be02ad05884102422484cf | /exercises/19_1_blog/blogs/models.py | fd86a36ea7f815487a3761af65455c2f3bf251a8 | [] | no_license | xerifeazeitona/PCC_WebApp | 4d28caedf44f5a5b6617a75393256bb0eb9d436c | 26f73805bf20a01f3879a05bf96e8ff6db0449fe | refs/heads/main | 2023-03-06T08:40:18.422416 | 2021-02-22T21:21:38 | 2021-02-22T21:21:38 | 340,138,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django.db import models
from django.contrib.auth.models import User
class BlogPost(models.Model):
"""Simple model of a basic blog post."""
title = models.CharField(max_length=200)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.title
| [
"juliano.amaral@gmail.com"
] | juliano.amaral@gmail.com |
11ee2673541bbcfbdbc0652c2d3f447c8ea2db1e | e29b9b42158b8b2dd332ad8f4c511fb6c385186d | /tests/cdk_tests.py | 0a097b9d8b544b8d1aaf0bc81969ac8e4ecfab12 | [] | no_license | thewritingstew/cdk | 3926353bf6d5e5cce17fd22b37e352ddfb75ec75 | 5621aac5ea05eaf3ead465f73503a5c76768eb3a | refs/heads/master | 2020-07-04T16:03:40.472357 | 2017-01-16T18:25:04 | 2017-01-16T18:25:04 | 74,154,036 | 0 | 0 | null | 2016-12-04T02:59:07 | 2016-11-18T17:59:25 | Python | UTF-8 | Python | false | false | 550 | py | from nose.tools import *
from cdk.Menu import *
from cdk.Engine import *
def test_menu_items():
engine = Engine('default')
menu = Menu(engine.menuList)
assert_equal(menu.welcomeText, "Welcome to Crags and Danger Kingdom!\nWhat would you like to do today?")
assert_equal(menu.promptText, "Please make your selection below:")
assert_equal(menu.menuOptions, {1:"Carson's game", 2:"Davis' game", 0:"Quit"})
assert_equal(menu.menuDecoration, (70*'='))
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
| [
"richard.o.stewart@gmail.com"
] | richard.o.stewart@gmail.com |
508188051f85a751455e09e12ddd895bbae5692d | b210d58b724c7199f43ddde031eba40996c82257 | /submissions/sm_012_gaurav/week_23/day_5/evaluation/backend/helpers_company.py | 09c85d20eeda4f2768aadd31c93f53c644fec0fc | [] | no_license | Tanmay53/cohort_3 | 0447efd2dc8b3c1284b03da7326b35393fdf5f93 | 351fb6e1d0c29995fb5cb3b6af411dbcf0ced64c | refs/heads/master | 2021-05-23T17:55:36.508185 | 2020-04-05T12:58:07 | 2020-04-05T12:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | def getCompany(cursor):
cursor.execute('''select * from company''')
result = []
for comp in cursor.fetchall():
result.append(comp)
return result
def addCompany(cursor, name, location):
cursor.execute('''insert into company(name, location) values(%s, %s)''', (name, location, ))
| [
"gaurav.arya.1297@gmail.com"
] | gaurav.arya.1297@gmail.com |
f3d0ece36268d4230551b11ed2f6406952f6e4f2 | f3606e213aab0388eaff3a8ef3b5065f3d20d24c | /test2/test14.py | a0c1a3ecb09ce93b7fa19a01d715fad8a252ec1d | [] | no_license | linchaohao1/pythontest | 242d292be9468345ff5db8982615c08e512018f2 | ec003d6747cb01ae518166de9fa25aa3c88e26dc | refs/heads/master | 2020-04-24T06:47:37.210491 | 2019-02-21T03:17:34 | 2019-02-21T03:17:34 | 171,777,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from sys import argv
script, user_name = argv
prompt = '>'
print(f"Hi {user_name},I'm the {script} script.")
print("I'd like to ask you a few questions.")
print(f"Do you like me, {user_name}?")
likes = input(prompt)
print(f"Where do you live, {user_name}?")
lives = input(prompt)
print("What kind of computer do you have?")
computer = input(prompt)
print(f"""
Alright,so you said {likes} about liking me.
You live in {lives}.Not sure where that is.
And you have a {computer} computer .Nice.
""") | [
"407410113@qq.com"
] | 407410113@qq.com |
58ef26c06bc8b02ca3b23e4a4e081a5f40f43eea | e1389a1002347f2216c29bcdbf4a26e2bd56906b | /src/stock_price_crawler.py | 0c20d667ad091396ac4a703957abd28cdaad292e | [] | no_license | goddoe/scitrader | 968a5f2a16d346963e8337fe51707503a27cb771 | 8425cf8db2aa7aa18b34ea31b782572641b5feb9 | refs/heads/master | 2021-01-01T17:44:43.476641 | 2017-07-24T03:14:27 | 2017-07-24T03:14:27 | 98,144,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | import urllib
import time
from urllib.request import urlopen
from bs4 import BeautifulSoup
stockItem = '005930'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, mpNum+1):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(1.50)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
break
| [
"goddoe2@gmail.com"
] | goddoe2@gmail.com |
db0cda62e81d8b6b8e962f75d75d6c2092029278 | 700d3633f9b389557666e79c914a8d64fb81ef71 | /python/rename.py | aa4d9e503773a30b63e212566808e35da28985a8 | [] | no_license | xi-studio/ANPR-1 | e544198393c820ca292f951210d593d38b59f2f1 | 5030cd84e85d36b9353ad6ebc87a7bc514563051 | refs/heads/master | 2021-01-22T23:53:13.028877 | 2013-12-04T06:41:55 | 2013-12-04T06:41:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/python2
import os
index = 0
for filename in os.listdir("."):
if filename != "rename.py":
os.rename(filename, "%03d.jpg" % index)
index += 1 | [
"anton.zhv@gmail.com"
] | anton.zhv@gmail.com |
0362844276cdcce64c66e350f09c947d57457c2f | 264ce32d9eebb594cc424ecb3b8caee6cb75c2f3 | /content/hw/02_bootstrap/ok/tests/q9.py | b2cd04951595906ae26cf1f60d6afb44d329d8d3 | [] | no_license | anhnguyendepocen/psych101d | a1060210eba2849f371d754e8f79e416754890f9 | 41057ed5ef1fd91e243ab41040f71b51c6443924 | refs/heads/master | 2022-03-24T02:20:32.268048 | 2019-12-21T02:51:02 | 2019-12-21T02:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | test = {
"name": "Putting It All Together",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
>>> ## Did you define the right variables?
>>> "easy_boot_delta_means" in globals().keys()
True
>>> "hard_boot_delta_means" in globals().keys()
True
>>> "no_difference_easy" in globals().keys()
True
>>> "no_difference_hard" in globals().keys()
True
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> ## Are the left sides located in the right spot relative to 0?
>>> np.percentile(easy_boot_delta_means, 5) < 0
True
>>> np.percentile(hard_boot_delta_means, 5) < 0
False
>>> ## Are the means reasonable?
>>> np.mean(easy_boot_delta_means) > 0.15
True
>>> np.mean(hard_boot_delta_means) > 1.5
True
>>> ## Are the final inferences correct?
>>> no_difference_easy, no_difference_hard
(True, False)
""",
"hidden": False,
"locked": False
}
],
"setup": r"""
>>> eps = 1e-5
""",
"teardown": r"""
""",
"type": "doctest"}]
}
| [
"charlesfrye@berkeley.edu"
] | charlesfrye@berkeley.edu |
636dead85b23a51077592edb5bc7d54905652abc | 4e3f9e3fc47a70c642ed19f1e2119a654bfccf0d | /xfstool.py | df23bdd853d8d70e54516d76b167b5aedd2aaf7a | [] | no_license | Seraphin-/crossbeats-tools | 9f9c477db7a4bec04d1595ac43073baf51081ca4 | 62b587c74f1bb47a0d2fd070a5851027b338ec2b | refs/heads/master | 2022-03-15T10:57:20.327811 | 2022-02-20T10:08:38 | 2022-02-20T10:08:38 | 188,546,368 | 6 | 2 | null | 2022-02-20T10:08:39 | 2019-05-25T09:26:29 | Python | UTF-8 | Python | false | false | 20,879 | py | class XFSCommon(object):
CLASSTYPES = {
0: "undefined",
1: "class",
2: "classref",
3: "bool",
4: "u8",
5: "u16",
6: "u32",
7: "u64",
8: "s8",
9: "s16",
10: "s32",
11: "s64",
12: "f32",
13: "f64",
14: "string",
15: "color",
16: "point",
17: "size",
18: "rect",
19: "matrix44",
20: "vector3",
21: "vector4",
22: "quaternion",
23: "property",
24: "event",
25: "group",
26: "pagebegin",
27: "pageend",
28: "event32",
29: "array",
30: "propertylist",
31: "groupend",
32: "cstring",
33: "time",
34: "float3",
35: "float4",
36: "float3x3",
37: "float4x3",
38: "float4x4",
39: "easecurve",
40: "line",
41: "linesegment",
43: "plane",
44: "sphere",
45: "capsule",
46: "aabb",
48: "cylinder",
49: "triangle",
50: "cone",
51: "torus",
52: "ellpsoid",
53: "range",
54: "rangef",
55: "rangeu16",
56: "hermitecurve",
57: "enumlist",
58: "float3x4",
59: "linesegment4",
60: "aabb4",
61: "oscillator",
62: "variable",
63: "vector2",
64: "matrix33",
65: "rect3d_xz",
66: "rect3d",
67: "rect3d_collision",
68: "plane_xz",
69: "ray_y",
70: "pointf",
71: "sizef",
72: "rectf",
128: "resource"
}
def __init__(self, filename, ios=False, oldIos=False):
self.file = open(filename, "rb")
self.ios = ios
self.oldIos = oldIos
self.logFile = None
if self.file.read(4) != b"XFS\x00":
raise ValueError("Invalid XFS")
def readIntDword(self):
return self.unpack("<i",self.file.read(4))[0]
def getAndReadIntDword(self):
tmp = self.file.read(4)
return (tmp, self.unpack("<i",tmp)[0])
def writeIntDword(self, arg):
self.output.write(self.pack("<i",arg))
def formatIntDword(self, arg):
return self.pack("<i", arg)
def readIntWord(self):
return self.unpack("<h",self.file.read(2))[0]
def readIntQword(self):
return self.unpack("<q",self.file.read(8))[0]
def getAndReadIntQword(self):
tmp = self.file.read(8)
return (tmp, self.unpack("<q",tmp)[0])
def readGeneralInt(self):
if self.ios:
return self.readIntQword()
else:
return self.readIntDword()
def getAndReadGeneralInt(self):
if self.ios:
return self.getAndReadIntQword()
else:
return self.getAndReadIntDword()
def formatGeneralInt(self, arg):
if self.ios:
return self.pack("<i",arg)
else:
return self.pack("<q",arg)
def formatIntWord(self, arg):
return self.pack("<H", arg)
def readStringFromOffset(self, offset):
returnOffset = self.file.tell()
self.file.seek(offset)
output = ""
while True:
buf = self.file.read(1)
if buf == b"\x00":
break
output += buf.decode("ascii")
self.file.seek(returnOffset)
return output
def readNullTerminatedString(self):
output = ""
while True:
buf = self.file.read(1)
if buf == b"\x00":
break
output += buf.decode("ascii")
return output
def readSingleByteInt(self):
return self.unpack("B",self.file.read(1))[0]
def getAndReadIntWord(self):
try:
tmp = self.file.read(2)
return (tmp, self.unpack("<h",tmp)[0])
except Exception as e:
print(self.file.tell())
raise e
def writeIntWord(self, arg):
self.output.write(self.pack("<h",arg))
def dPrint(self, log, *args):
if log:
print(args)
if not self.logFile:
self.logFile = open("log.txt", "w")
self.logFile.write(args[0] + str(args[1]) + "\n")
def readHeader(self, log=False):
self.version = self.readIntDword()
self.dPrint(log,"XFS Version: ", self.version)
self.int1 = self.readIntDword()
self.dPrint(log,"int1: ", self.int1)
self.xfsType = self.readIntDword()
self.dPrint(log,"XFS Type: ", self.xfsType)
self.structCount = self.readIntDword()
self.dPrint(log,"Struct Count: ", self.structCount)
self.startOffset = self.readIntDword() + 0x18
self.dPrint(log,"Adjusted Start Offset: ", self.startOffset)
self.structOffsets = []
self.names = []
for x in range(self.structCount):
offset = self.readGeneralInt()
self.dPrint(log,"Struct offset ", x, ": ", offset)
self.structOffsets.append(offset)
self.structList = []
for x in range(self.structCount):
self.dPrint(log,"==> Struct ", x, ":")
struct = {}
structhash = self.readGeneralInt()
self.dPrint(log,"==> Hash: ", structhash)
subcount = self.readGeneralInt()
self.dPrint(log,"==> Subclass count: ", subcount)
subclasses = []
for x in range(subcount):
self.dPrint(log,"====> Subclass ", x, ":")
nameOffset = self.readGeneralInt() + 0x18
name = self.readStringFromOffset(nameOffset)
self.dPrint(log,"====> Name: ", name)
subtype = self.readSingleByteInt()
self.dPrint(log,"====> Type: ", subtype)
unknown = self.readSingleByteInt()
self.dPrint(log,"====> Unknown: ", unknown)
size = self.readSingleByteInt()
self.dPrint(log,"====> Size: ", size)
if(self.ios):
self.file.seek(0x45, 1)
elif (self.oldIos):
self.file.seek(0x21, 1)
else:
self.file.seek(0x11, 1)
self.names.append(name)
subclasses.append({"name": name,
"type": subtype,
"size": size,
"unk": unknown})
self.structList.append({"structhash": structhash,
"subcount": subcount,
"subclasses": subclasses})
def writeHeader(self):
#first, write out the base header
self.output.write(b"XFS\x00")
self.writeIntDword(self.version)
self.writeIntDword(self.int1)
self.writeIntDword(self.xfsType)
self.writeIntDword(self.structCount)
#also, we have to calculate total length first for name...
totalLen = 0
generalIntLen = 8
subclassLen = 0x50
structOffsetHeader = b""
nameString = b""
nameMap = {}
for name in self.names:
if name not in nameMap:
nameMap[name] = len(nameString)
nameString += name.encode() + b"\x00"
if self.ios:
generalIntLen = 4
subclassLen = 0x18
for struct in self.structList:
structOffsetHeader += self.formatGeneralInt(totalLen + (generalIntLen * self.structCount))
totalLen += generalIntLen * 2
totalLen += struct['subcount'] * subclassLen
#now we can get where the name offset will be
structHeader = b""
for struct in self.structList:
structHeader += self.formatGeneralInt(struct['structhash'])
structHeader += self.formatGeneralInt(struct['subcount'])
for subclass in struct['subclasses']:
structHeader += self.formatGeneralInt(totalLen + len(structOffsetHeader) + nameMap[subclass['name']])
if subclass['type'] in [1,2,128]:
if self.ios:
subclass['size'] = 4
else:
subclass['size'] = 8
structHeader += bytes([subclass['type']])
structHeader += bytes([subclass['unk']])
structHeader += bytes([subclass['size']])
structHeader += b"\x00" * (subclassLen - 0x03 - generalIntLen)
pad = (len(structHeader) + len(nameString) + len(structOffsetHeader) + 0x02) % 0x04 #pad to 0x04 width
print("Padding:",pad)
self.writeIntDword(len(structHeader) + pad + len(nameString) + len(structOffsetHeader))
self.output.write(structOffsetHeader)
self.output.write(structHeader)
self.output.write(nameString)
self.output.write(b"\x00" * pad)
class XFSToXML(XFSCommon):
from struct import unpack
typeHandlers = {}
def __init__(self, filename, ios=False, oldIos=False):
XFSCommon.__init__(self, filename, ios, oldIos)
self.defineHandler(1, None)
self.defineHandler(2, None)
self.defineHandler(3, self.boolHandler)
self.defineHandler(6, self.u32Handler)
self.defineHandler(12, self.f32Handler)
self.defineHandler(10, self.s32Handler)
self.defineHandler(16, self.pointHandler)
self.defineHandler(32, self.cstringHandler)
self.defineHandler(128, self.resourceHandler)
def boolHandler(self):
a = self.file.read(1)
if a != b"\x00":
return ' value="true"'
else:
return ' value="false"'
def f32Handler(self):
return ' value="%s"' % self.unpack("<f", self.file.read(4))[0]
def s32Handler(self):
return ' value="%s"' % self.unpack("<i",self.file.read(4))[0]
def u32Handler(self):
return ' value="%s"' % self.readIntDword()
def pointHandler(self):
return ' x="%s" y="%s"' % self.unpack("<ii",self.file.read(8))
def cstringHandler(self):
return ' value="%s"' % self.readNullTerminatedString()
def resourceHandler(self, recursionLevel, out, name, length):
test = self.readSingleByteInt()
if test is 2 or length is not 1:
self.file.seek(-0x01, 1)
for x in range(length):
resType = self.readSingleByteInt()
if resType is not 2:
raise ValueError("Bad resource (list)!")
output = ' value="'
out.write("\t" * recursionLevel + "<resource type=\"")
out.write(self.readNullTerminatedString())
out.write('" value="')
out.write(self.readNullTerminatedString())
out.write('"/>\n')
if length is 1:
self.file.seek(0x04, 1)
else:
#nothing here!
self.file.seek(-0x01 + -0x04, 1)
def defineHandler(self, type, function):
self.typeHandlers[type] = function
def classHandler(self, recursionLevel, out):
classNo = self.readIntWord() >> 1
self.file.seek(0x02, 1)
out.write('type="%s" length="%s">\n' % (self.structList[classNo]["structhash"],self.readGeneralInt()))
for element in self.structList[classNo]['subclasses']:
length = self.readIntDword()
if length != 1:
out.write("\t" * recursionLevel + '<array name="%s" type="%s" count="%s">\n' % (element["name"], self.CLASSTYPES[element["type"]], length))
recursionLevel += 1
if element["type"] == 128:
self.resourceHandler(recursionLevel, out, element["name"], length)
for x in range(length):
if element["type"] not in self.typeHandlers:
raise ValueError("Unsupported type: %s @ %s" % (element["type"],self.file.tell()))
if element["type"] == 1:
out.write("\t" * recursionLevel + '<class name="%s" ' % element["name"])
self.classHandler(recursionLevel + 1, out)
out.write("\t" * (recursionLevel) + "</class>\n")
elif element["type"] == 2:
out.write("\t" * (recursionLevel) + "<classref ")
self.classHandler(recursionLevel + 1, out)
out.write("\t" * (recursionLevel) + "</classref>\n")
elif element["type"] == 128:
pass
else:
value = self.typeHandlers[element["type"]]()
out.write("\t" * recursionLevel + '<%s name="%s"%s/>\n' % (self.CLASSTYPES[element["type"]], element["name"], value))
if length != 1:
out.write("\t" * (recursionLevel - 1) + "</array>\n")
def parseData(self):
self.file.seek(self.startOffset + 0x04) #assuming top level is not array...
out = open(self.file.name + ".xml", "w")
out.write("""<?xml version="1.0" encoding="utf-8"?>
<xfs>
<meta name="properties">
<tag name="version">%s</tag>
<tag name="type">%s</tag>
<tag name="int1">%s</tag>
<tag name="platform">%s</tag>
</meta>
<meta name="structs">\n""" % (self.version, self.xfsType, self.int1, "ios" if self.ios else "ac"))
for struct in self.structList:
out.write("""\t\t<struct>
<tag name="hash">%s</tag>
<tag name="classcount">%s</tag>\n""" % (struct["structhash"], struct["subcount"]))
for subclass in struct["subclasses"]:
out.write("""\t\t\t<class name="%s">
<tag name="type">%s</tag>
<tag name="size">%s</tag>
<tag name="unknown">%s</tag>
</class>\n""" % (subclass["name"], subclass["type"], subclass["size"], subclass["unk"]))
out.write("""\t\t</struct>\n""")
out.write("""\t</meta>
\t<class name="XFS" type="%s" length="%s">\n""" % (self.structList[0]["structhash"],self.readGeneralInt()))
for topElement in self.structList[0]["subclasses"]:
length = self.readIntDword()
recursionLevel = 2
if length != 1:
out.write("\t" * recursionLevel + '<array name="%s" type="%s" count="%s">\n' % (topElement["name"], self.CLASSTYPES[topElement["type"]], length))
recursionLevel += 1
if topElement["type"] == 128:
self.resourceHandler(recursionLevel, out, topElement["name"], length)
for x in range(length):
if topElement["type"] not in self.typeHandlers:
raise ValueError("Unsupported type: %s @ %s" % (topElement["type"],self.file.tell()))
if topElement["type"] == 1:
out.write("\t" * recursionLevel + '<class name="%s" ' % topElement["name"])
self.classHandler(recursionLevel + 1, out)
out.write("\t" * (recursionLevel) + "</class>\n")
elif topElement["type"] == 2:
out.write("\t" * (recursionLevel) + "<classref ")
self.classHandler(recursionLevel + 1, out)
out.write("\t" * (recursionLevel) + "</classref>\n")
elif topElement["type"] == 128:
pass
else:
value = self.typeHandlers[topElement["type"]]()
out.write("\t" * recursionLevel + '<%s name="%s"%s/>\n' % (self.CLASSTYPES[topElement["type"]], topElement["name"], value))
if length != 1:
out.write("\t" * (recursionLevel - 1) + "</array>\n")
out.write("\t</class>\n</xfs>")
print("Written to %s.xml" % self.file.name)
class XMLToXFS(XFSCommon):
import xml.etree.ElementTree as ET
from struct import pack
formatHandlers = {}
def __init__(self, filename, output, outputAc=False, fixChartForiOS=False):
self.logFile = None
self.xml = self.ET.parse(filename).getroot()
self.output = open(output, "wb")
self.fixIosStruct = fixChartForiOS
self.version, self.xfsType, self.int1, self.origin = (int(self.xml[0][0].text),
int(self.xml[0][1].text),
int(self.xml[0][2].text),
self.xml[0][3].text)
self.originIos = True if self.origin == "ios" else False
self.ios = outputAc #odd name but for writeHeader kinda...
self.classtypesFromName = {v:k for k,v in self.CLASSTYPES.items()}
self.defineHandler(1, None)
self.defineHandler(2, None)
self.defineHandler(3, self.boolHandler)
self.defineHandler(6, self.u32Handler)
self.defineHandler(12, self.f32Handler)
self.defineHandler(10, self.s32Handler)
self.defineHandler(16, self.pointHandler)
self.defineHandler(32, self.cstringHandler)
self.defineHandler(128, self.resourceHandler)
def defineHandler(self, type, handler):
self.formatHandlers[type] = handler
def readHeader(self, log=False):
self.structCount = len(self.xml[1].getchildren())
if self.fixIosStruct and len(self.xml.findall("./class/class/array/classref[@type='8867325']")) > 0:
self.structCount -= 1
self.dPrint(log, "Struct Count: ", self.structCount)
self.names = []
self.structList = []
self.structDict = {}
self.hashToNumber = {}
counter = 0
for struct in self.xml[1]:
if self.fixIosStruct and struct[0].text == "8867325":
continue
self.dPrint(log,"==> Struct ", ":")
structhash = int(struct[0].text)
self.dPrint(log,"==> Hash: ", structhash)
subcount = int(struct[1].text)
self.dPrint(log,"==> Subclass count: ", subcount)
subclasses = []
for x in range(subcount):
self.dPrint(log,"====> Subclass ", x, ":")
name = struct[2 + x].attrib['name']
self.dPrint(log,"====> Name: ", name)
subtype = int(struct[2 + x][0].text)
self.dPrint(log,"====> Type: ", subtype)
unknown = int(struct[2 + x][2].text)
self.dPrint(log,"====> Unknown: ", unknown)
size = int(struct[2 + x][1].text)
self.dPrint(log,"====> Size: ", size)
self.names.append(name)
subclasses.append({"name": name,
"type": subtype,
"size": size,
"unk": unknown})
self.structList.append({"structhash": structhash,
"subcount": subcount,
"subclasses": subclasses})
self.structDict[structhash] = {"subcount": subcount,
"subclasses": subclasses}
self.hashToNumber[structhash] = counter
counter += 1
def boolHandler(self, data):
temp = True if data.attrib['value'] == "true" else False
return self.pack("<b", int(temp))
def f32Handler(self, data):
return self.pack("<f", float(data.attrib['value']))
def s32Handler(self, data):
return self.pack("<i", int(data.attrib['value']))
def u32Handler(self, data):
return self.pack("<I", int(data.attrib['value']))
def pointHandler(self, data):
return self.pack("<ii", int(data.attrib['x']), int(data.attrib['y']))
def cstringHandler(self, data):
return data.attrib['value'].encode() + b"\x00"
def resourceHandler(self, data):
#different than others since we know there must be a resource here
buf = b"\x02"
buf += data.attrib['type'].encode() + b"\x00"
buf += data.attrib['value'].encode() + b"\x00"
return buf
def parseSingle(self, data, eType, log=False):
if eType in [1,2]:
if self.fixIosStruct and data.attrib['type'] == "8867325":
return b""
return self.classHandler(data, log)
else:
return self.formatHandlers[eType](data)
def internalClassHandler(self, data, log=False):
buf = b""
for element in data:
if element.tag == "array":
length = int(element.attrib['count'])
eType = self.classtypesFromName[element.attrib['type']]
else:
length = 1
eType = self.classtypesFromName[element.tag]
if "name" in element.attrib and element.attrib["name"] == "mNoteSum":
length = 8 if self.ios else 4 #ok because will be at end
if self.fixIosStruct and element.attrib['name'] == "mpArray":
printLen = length
printLen -= len(self.xml.findall("./class/class/array/classref[@type='8867325']"))
buf += self.formatIntDword(printLen)
else:
buf += self.formatIntDword(length)
if length > 1:
for x in range(length):
buf += self.parseSingle(element[x], eType, log)
elif length == 0:
pass
else:
buf += self.parseSingle(element, eType, log)
if eType == 128:
buf += b"\x00" * 0x04
return buf
def classHandler(self, data, log=False):
classNo = self.hashToNumber[int(data.attrib['type'])]
classNo = (classNo << 1) + 1
base = self.formatIntWord(classNo)
base += self.formatIntWord(self.trueCounter)
self.trueCounter += 1
buf = self.internalClassHandler(data, log)
return base + self.formatGeneralInt(len(buf) + len(self.formatGeneralInt(0))) + buf
def parseData(self, log=False):
self.output.write(self.formatIntDword(1))
self.trueCounter = 1
buf = self.internalClassHandler(self.xml[2])
self.output.write(self.formatGeneralInt(len(buf) + len(self.formatGeneralInt(0))) + buf)
self.output.close()
print("Written to %s" % self.output.name)
class ConvertACIOS(XFSCommon):
from struct import unpack, pack
def __init__(self, filename, output, ios=False, oldIos=False): #ios means "is input ios"
XFSCommon.__init__(self, filename, ios, oldIos)
self.output = open(output, "wb")
def resourceHandler(self, length):
test = self.readSingleByteInt()
if test is 2 or length is not 1:
buf = self.formatIntDword(length) + b"\x02"
self.file.seek(-0x01, 1)
for x in range(length):
resType = self.readSingleByteInt()
if resType is not 2:
raise ValueError("Bad resource (list)!")
buf += self.readNullTerminatedString().encode() + b"\x00"
buf += self.readNullTerminatedString().encode() + b"\x00"
if length is 1:
self.file.seek(0x04, 1)
buf += b"\x00" * 4
return buf
else:
#nothing here!
self.file.seek(-0x01 + -0x04, 1)
return b""
def classHandler(self, log=False):
buf = b""
temp, classNo = self.getAndReadIntWord()
base = temp
classNo = classNo >> 1
base += self.getAndReadIntWord()[0]
self.readGeneralInt() #skip the offset
for element in self.structList[classNo]['subclasses']:
temp, length = self.getAndReadIntDword()
if element["type"] == 128:
buf += self.resourceHandler(length)
else:
buf += temp
for x in range(length):
if element["type"] in [1,2]:
buf += self.classHandler()
elif element["type"] == 32:
buf += readNullTerminatedString().encode() + b"\x00"
elif element["type"] == 128:
pass
else:
#print(element)
buf += self.file.read(element["size"])
return base + self.formatGeneralInt(len(buf) + len(self.formatGeneralInt(0))) + buf
def parseData(self, log=False):
self.file.seek(self.startOffset) #assuming top level is not array...
self.output.write(self.getAndReadIntDword()[0])
#need to buffer output recursively so can get total length hh
self.readGeneralInt()
buf = b""
for topElement in self.structList[0]["subclasses"]:
temp, length = self.getAndReadIntDword()
#specific case
if topElement["name"] == "mNoteSum":
length = 8 if self.ios else 4 #ok because will be at end
temp = b"\x04" + ("\x00" * 7 if self.ios else "\x00" * 3)
if topElement["type"] == 128:
k = self.resourceHandler(length)
#print(k)
buf += k
else:
buf += temp
for x in range(length):
if topElement["type"] in [1,2]:
buf += self.classHandler(log)
elif topElement["type"] == 32:
buf += readNullTerminatedString().encode() + b"\x00"
elif topElement["type"] == 128:
pass
else:
buf += self.file.read(topElement["size"])
self.output.write(self.formatGeneralInt(len(buf) + len(self.formatGeneralInt(0))) + buf)
self.output.close()
print("Written to %s" % self.output.name) | [
"noreply@github.com"
] | Seraphin-.noreply@github.com |
0811d6891a523db246ae901e3caaa94f48a7ec08 | 8fc999f5262b5a2dadc830f1cc345f51b6dde862 | /samples/conceptual_samples/remaining/tuple.py | cc388fd0fa5107256c5ce382ac3135df515ef79c | [] | no_license | pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate | 5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a | a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae | refs/heads/master | 2022-04-09T20:33:28.527653 | 2020-03-27T06:35:50 | 2020-03-27T06:35:50 | 250,226,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # this sample python script program is been created to demonstrate the tuple packing and tuple unpacking.
data=("Name: pandiyan","Wannabe: I want to be a pythoneer","Nationality: indian","Proffession: hacker","Mothertounge: tamil")
name,wannabe,nationality,proffession,mothertounge=data
def details():
print name
print wannabe
print nationality
print proffession
print mothertounge
print"Are you sure that you want to see my details ..??\t(y/n)"
option=raw_input("> ")
if option=='y':
details()
elif option=='n':
print'thank you for opening this file \n now just get lost..!!'
else:
print"please enter 'y' for yes or enter 'n' for no"
#the end of the program file . happy coding..!!
| [
"becool.pandiyan@gmail.com"
] | becool.pandiyan@gmail.com |
13738c8961c445bffba50b67d9f0696f92910984 | 2f2682f778512a75a1ff49d7e267c2f4d355c48e | /geoprocess/logging.py | ebf3b24a9a5d6e989a24983429c4050591a40109 | [] | no_license | beatcovid/geoprocess | 4a44f46b900c2e0ffed0dab18008e7884e759e3b | c2a7b1e4ede06583679db9dadebe2066b0274e54 | refs/heads/master | 2023-04-13T13:45:48.572825 | 2020-05-27T03:08:14 | 2020-05-27T03:08:14 | 260,215,049 | 0 | 1 | null | 2023-03-29T00:36:19 | 2020-04-30T13:11:38 | Python | UTF-8 | Python | false | false | 108 | py | import logging
logging.basicConfig(level=logging.INFO,)
logger = logging.getLogger("beatcovid.geoprocess")
| [
"nc9@protonmail.com"
] | nc9@protonmail.com |
3a51dd45ebf1dc29cfcc24dd9e715b0b50be80f4 | 38f8507ae5679309e862cdaafc565ad5e8bd7b04 | /tests/test_instagram.py | cb3a2d437244dc4d6ec558958e776bad079b01df | [] | no_license | ishandutta2007/pyinstagram | 6abc9dbe99153c6a4d589e68df4257d24402003c | 06ca6a5d6f53dc6e1868dc68c1055f0d87b36fb2 | refs/heads/master | 2021-01-09T19:16:47.697882 | 2017-05-23T16:07:51 | 2017-05-23T16:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from __future__ import unicode_literals, print_function
import os
import unittest
from pyinstagram.instagram import Instagram
from pyinstagram.response import Sync, Challenge
from pyinstagram.setting import Setting
from pyinstagram.response.super import Response
class InstagramTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
Setting.create_instance('file', {
'base_directory': './sessions'
})
@classmethod
def tearDownClass(cls):
Setting.instance().delete_user('testuser')
os.removedirs('./sessions')
def setUp(self):
# a new Instagram instance for every test methods
self.instagram = Instagram(Setting.instance())
def test_set_user(self):
self.assertIsNone(self.instagram.username)
self.instagram.set_user('testuser', 'testpassword')
self.assertEquals(self.instagram.username, 'testuser')
def test_sync_features_pre_login(self):
self.instagram.set_user('testuser', 'testpassword')
response = self.instagram.sync_features(True)
self.assertIsInstance(response, Response)
self.instagram.set_user('testuser', 'testpassword')
response = self.instagram.get_signup_challenge()
self.assertIsInstance(response, Response)
| [
"me@eseom.org"
] | me@eseom.org |
b49cc96396beee95aa535d05b7ed2be3897f7ec1 | 1160a607aa9b445ba96674f4e3b86079fede9bdc | /fichasManage/utils.py | bedae7e98cc8ae74ed1ceee6136f557b2de2618a | [] | no_license | astandre/fichas-geologicas-cliente | 70820bca77c9ffa4de28d207ff84490205a8cc56 | 90ae40afd6aa4a331316e5106950a8406a38cf1f | refs/heads/master | 2022-12-12T03:05:29.945240 | 2019-02-04T18:46:44 | 2019-02-04T18:46:44 | 165,874,901 | 0 | 0 | null | 2021-06-10T21:07:43 | 2019-01-15T15:26:21 | Python | UTF-8 | Python | false | false | 6,608 | py | from .constants import *
def build_ficha_geologica(ficha):
if "nomenclaturaUnidadGeologica" in ficha:
try:
ficha["nomenclaturaUnidadGeologica"] = UNIDAD_GEOLOGICA[ficha["nomenclaturaUnidadGeologica"]]
except KeyError:
print("Key error")
if "tipoContactoGeo" in ficha:
try:
ficha["tipoContactoGeo"] = UNIDAD_GEOLOGICA[ficha["tipoContactoGeo"]]
except KeyError:
print("Key error")
if "limiteContactoGeo" in ficha:
try:
ficha["limiteContactoGeo"] = UNIDAD_GEOLOGICA[ficha["limiteContactoGeo"]]
except KeyError:
print("Key error")
if "certezaContactoGeo" in ficha:
try:
ficha["certezaContactoGeo"] = UNIDAD_GEOLOGICA[ficha["certezaContactoGeo"]]
except KeyError:
print("Key error")
if "origenRoca" in ficha:
try:
ficha["origenRoca"] = UNIDAD_GEOLOGICA[ficha["origenRoca"]]
except KeyError:
print("Key error")
if "estructuraRoca" in ficha:
try:
ficha["estructuraRoca"] = UNIDAD_GEOLOGICA[ficha["estructuraRoca"]]
except KeyError:
print("Key error")
if "pliegue" in ficha:
if "tipo" in ficha["pliegue"]:
try:
ficha["pliegue"]["tipo"] = PLIEGUE_TIPO[ficha["pliegue"]["tipo"]]
except KeyError:
print("Key error")
if "posicion" in ficha["pliegue"]:
try:
ficha["posicion"] = PLIEGUE_POSICION[ficha["pliegue"]["posicion"]]
except KeyError:
print("Key error")
if "anguloEntreFlancos" in ficha["pliegue"]:
try:
ficha["pliegue"]["anguloEntreFlancos"] = PLIEGUE_ANGULO_ENTRE_FLANCOS[
ficha["pliegue"]["anguloEntreFlancos"]]
except KeyError:
print("Key error")
if "perfil" in ficha["pliegue"]:
try:
ficha["pliegue"]["perfil"] = PLIEGUE_PERFIL[ficha["pliegue"]["perfil"]]
except KeyError:
print("Key error")
if "sistema" in ficha["pliegue"]:
try:
ficha["pliegue"]["sistema"] = PLIEGUE_SISTEMA[ficha["pliegue"]["sistema"]]
except KeyError:
print("Key error")
if "eslineal" in ficha:
if "lineacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["lineacion"] = EST_LINEAL_LINEAMIENTO[ficha["eslineal"]["lineacion"]]
except KeyError:
print("Key error")
if "claseEstrLineal" in ficha["eslineal"]:
try:
ficha["eslineal"]["claseEstrLineal"] = EST_LINEAL_CLASE[ficha["eslineal"]["claseEstrLineal"]]
except KeyError:
print("Key error")
if "buzamiento" in ficha["eslineal"]:
try:
ficha["eslineal"]["buzamiento"] = EST_LINEAL_BUZAMIENTO[ficha["eslineal"]["buzamiento"]]
except KeyError:
print("Key error")
if "asociacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["asociacion"] = EST_LINEAL_ASOCIACION[ficha["eslineal"]["asociacion"]]
except KeyError:
print("Key error")
if "formacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["formacion"] = EST_LINEAL_FORMACION[ficha["eslineal"]["formacion"]]
except KeyError:
print("Key error")
if "diaclasaClase" in ficha["eslineal"]:
try:
ficha["eslineal"]["diaclasaClase"] = EST_LINEAL_DIACLASA_OR_ROCAS[ficha["eslineal"]["diaclasaClase"]]
except KeyError:
print("Key error")
if "esplanar" in ficha:
if "buzamientoIntensidad" in ficha["esplanar"]:
try:
ficha["esplanar"]["buzamientoIntensidad"] = EST_PLANAR_BUZ_INTEN[
ficha["esplanar"]["buzamientoIntensidad"]]
except KeyError:
print("Key error")
if "clivaje" in ficha["esplanar"]:
try:
ficha["esplanar"]["clivaje"] = EST_PLANAR_CLIVAJE[ficha["esplanar"]["clivaje"]]
except KeyError:
print("Key error")
if "estratificacion" in ficha["esplanar"]:
try:
ficha["esplanar"]["estratificacion"] = EST_PLANAR_ESTRAT[ficha["esplanar"]["estratificacion"]]
except KeyError:
print("Key error")
if "fotogeologia" in ficha["esplanar"]:
try:
ficha["esplanar"]["fotogeologia"] = EST_PLANAR_FOTO[ficha["esplanar"]["fotogeologia"]]
except KeyError:
print("Key error")
if "zonaDeCizalla" in ficha["esplanar"]:
try:
ficha["esplanar"]["zonaDeCizalla"] = EST_PLANAR_ZONA[ficha["esplanar"]["zonaDeCizalla"]]
except KeyError:
print("Key error")
if "rocasMetaforicas" in ficha["esplanar"]:
try:
ficha["esplanar"]["rocasMetaforicas"] = EST_LINEAL_DIACLASA_OR_ROCAS[
ficha["esplanar"]["rocasMetaforicas"]]
except KeyError:
print("Key error")
if "rocasIgneas" in ficha["esplanar"]:
try:
ficha["esplanar"]["rocasIgneas"] = EST_LINEAL_DIACLASA_OR_ROCAS[
ficha["esplanar"]["rocasIgneas"]]
except KeyError:
print("Key error")
if "afloramiento" in ficha:
if "dimension" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["dimension"] = AFL_DIMEN[
ficha["afloramiento"]["dimension"]]
except KeyError:
print("Key error")
if "origen" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["origen"] = AFL_ORIGEN_ROCA[
ficha["afloramiento"]["origen"]]
except KeyError:
print("Key error")
if "tipoRoca" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["tipoRoca"] = AFL_TIPO_ROCA[
ficha["afloramiento"]["tipoRoca"]]
except KeyError:
print("Key error")
if "sitio" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["sitio"] = AFL_SITIO[
ficha["afloramiento"]["sitio"]]
except KeyError:
print("Key error")
return ficha
| [
"andreherrera97@hotmail.com"
] | andreherrera97@hotmail.com |
4b30719c3f5f5b493869761eb8a80fdea7bb31ee | a58d476ee560eb25ec636ce8a66c243d76d322f1 | /.history/xunit_20200217210458.py | 567232855cd75f59dcd77f60e4775aec5b61b502 | [] | no_license | shellzu/tdd | ad0eebfececb3b390b99b6ebdb9b9f224f92d8ac | 644d55c0b647ff5656de57f25048d21de0187a7c | refs/heads/master | 2021-01-05T17:39:50.284874 | 2020-03-13T12:04:41 | 2020-03-13T12:04:41 | 241,092,421 | 0 | 0 | null | 2020-03-13T11:29:00 | 2020-02-17T11:35:39 | Python | UTF-8 | Python | false | false | 205 | py | test = WasRun("testMethod")
print(test.wasRun)
test.testMethod()
print(test.wasRun)
class WasRun:
def __init__(self, name):
self.wasRun = None
def testMethod(self):
self.wasRun = 1 | [
"kaizushiori@kaitsushiorinoMacBook-Air.local"
] | kaizushiori@kaitsushiorinoMacBook-Air.local |
7e0507aea1db4aa4dd1f74e39c165a93bd220915 | a9399b8162010a5f4ba8460acba725a626436fca | /.c9/metadata/environment/cart/models.py | 69d3aeaad5fb0f4924d5a3410c274b582df23cd3 | [] | no_license | johnny-don/crafty-django | 266a480b1f76ae133b805e4d8e70fd929c7fc399 | 3526c9e5c492005491dc1d6fc602136e0943f659 | refs/heads/master | 2022-12-22T19:10:40.708724 | 2020-03-29T17:18:03 | 2020-03-29T17:18:03 | 248,786,605 | 0 | 1 | null | 2022-11-22T05:07:56 | 2020-03-20T15:16:15 | Python | UTF-8 | Python | false | false | 418 | py | {"filter":false,"title":"models.py","tooltip":"/cart/models.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"bb2ade0b5906d4c34ad5489e26929421fa78ce5e","undoManager":{"mark":-1,"position":-1,"stack":[]},"timestamp":1585136435784} | [
"johnnydonnellan7@gmail.com"
] | johnnydonnellan7@gmail.com |
d52becaa8c882ebedbde683171421ae43a6d6d7b | 79d3fd089addc6a13ff1a83617398ffd1a0880b0 | /topics/complex_numbers.py | 5ecc6a01cc16be43797347bd88d1af7ab792b75a | [] | no_license | stoeckley/manim | 1ee27f5c73d028b5b1bd948c6067508a9e393d7b | 0af9b3005cb659c98226c8ad737bfc1e7b97517f | refs/heads/master | 2021-05-31T19:34:34.098497 | 2016-01-17T02:08:51 | 2016-01-17T02:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,362 | py | from helpers import *
from number_line import NumberPlane
from animation.transform import ApplyPointwiseFunction
from animation.simple_animations import Homotopy
from scene import Scene
def complex_string(complex_num):
return filter(lambda c : c not in "()", str(complex_num))
class ComplexPlane(NumberPlane):
DEFAULT_CONFIG = {
"color" : GREEN,
"unit_to_spatial_width" : 1,
"line_frequency" : 1,
"faded_line_frequency" : 0.5,
"number_at_center" : complex(0),
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
kwargs.update({
"x_unit_to_spatial_width" : self.unit_to_spatial_width,
"y_unit_to_spatial_height" : self.unit_to_spatial_width,
"x_line_frequency" : self.line_frequency,
"x_faded_line_frequency" : self.faded_line_frequency,
"y_line_frequency" : self.line_frequency,
"y_faded_line_frequency" : self.faded_line_frequency,
"num_pair_at_center" : (self.number_at_center.real,
self.number_at_center.imag),
})
NumberPlane.__init__(self, **kwargs)
def number_to_point(self, number):
number = complex(number)
return self.num_pair_to_point((number.real, number.imag))
def get_coordinate_labels(self, *numbers):
result = []
nudge = 0.1*(DOWN+RIGHT)
if len(numbers) == 0:
numbers = range(-int(self.x_radius), int(self.x_radius))
numbers += [
complex(0, y)
for y in range(-int(self.y_radius), int(self.y_radius))
]
for number in numbers:
point = self.number_to_point(number)
if number == 0:
num_str = "0"
else:
num_str = str(number).replace("j", "i")
num = TexMobject(num_str)
num.scale(self.number_scale_factor)
num.shift(point-num.get_corner(UP+LEFT)+nudge)
result.append(num)
return result
def add_coordinates(self, *numbers):
self.add(*self.get_coordinate_labels(*numbers))
return self
def add_spider_web(self, circle_freq = 1, angle_freq = np.pi/6):
self.fade(self.fade_factor)
config = {
"color" : self.color,
"density" : self.density,
}
for radius in np.arange(circle_freq, SPACE_WIDTH, circle_freq):
self.add(Circle(radius = radius, **config))
for angle in np.arange(0, 2*np.pi, angle_freq):
end_point = np.cos(angle)*RIGHT + np.sin(angle)*UP
end_point *= SPACE_WIDTH
self.add(Line(ORIGIN, end_point, **config))
return self
class ComplexFunction(ApplyPointwiseFunction):
def __init__(self, function, mobject = ComplexPlane, **kwargs):
if "path_func" not in kwargs:
self.path_func = path_along_arc(
np.log(function(complex(1))).imag
)
ApplyPointwiseFunction.__init__(
self,
lambda (x, y, z) : complex_to_R3(function(complex(x, y))),
instantiate(mobject),
**kwargs
)
class ComplexHomotopy(Homotopy):
def __init__(self, complex_homotopy, mobject = ComplexPlane, **kwargs):
"""
Complex Hootopy a function Cx[0, 1] to C
"""
def homotopy((x, y, z, t)):
c = complex_homotopy((complex(x, y), t))
return (c.real, c.imag, z)
Homotopy.__init__(self, homotopy, mobject, *args, **kwargs)
class ComplexMultiplication(Scene):
@staticmethod
def args_to_string(multiplier, mark_one = False):
num_str = complex_string(multiplier)
arrow_str = "MarkOne" if mark_one else ""
return num_str + arrow_str
@staticmethod
def string_to_args(arg_string):
parts = arg_string.split()
multiplier = complex(parts[0])
mark_one = len(parts) > 1 and parts[1] == "MarkOne"
return (multiplier, mark_one)
def construct(self, multiplier, mark_one = False, **plane_config):
norm = np.linalg.norm(multiplier)
arg = np.log(multiplier).imag
plane_config["faded_line_frequency"] = 0
plane_config.update(DEFAULT_PLANE_CONFIG)
if norm > 1 and "density" not in plane_config:
plane_config["density"] = norm*DEFAULT_POINT_DENSITY_1D
if "radius" not in plane_config:
radius = SPACE_WIDTH
if norm > 0 and norm < 1:
radius /= norm
else:
radius = plane_config["radius"]
plane_config["x_radius"] = plane_config["y_radius"] = radius
plane = ComplexPlane(**plane_config)
self.plane = plane
self.add(plane)
# plane.add_spider_web()
self.anim_config = {
"run_time" : 2.0,
"path_func" : path_along_arc(arg)
}
plane_config["faded_line_frequency"] = 0.5
background = ComplexPlane(color = "grey", **plane_config)
# background.add_spider_web()
labels = background.get_coordinate_labels()
self.paint_into_background(background, *labels)
self.mobjects_to_move_without_molding = []
if mark_one:
self.draw_dot("1", 1, True)
self.draw_dot("z", multiplier)
self.mobjects_to_multiply = [plane]
self.additional_animations = []
self.multiplier = multiplier
if self.__class__ == ComplexMultiplication:
self.apply_multiplication()
def draw_dot(self, tex_string, value, move_dot = False):
dot = Dot(
self.plane.number_to_point(value),
radius = 0.1*self.plane.unit_to_spatial_width,
color = BLUE if value == 1 else YELLOW
)
label = TexMobject(tex_string)
label.shift(dot.get_center()+1.5*UP+RIGHT)
arrow = Arrow(label, dot)
self.add(label)
self.play(ShowCreation(arrow))
self.play(ShowCreation(dot))
self.dither()
self.remove(label, arrow)
if move_dot:
self.mobjects_to_move_without_molding.append(dot)
return dot
def apply_multiplication(self):
def func((x, y, z)):
complex_num = self.multiplier*complex(x, y)
return (complex_num.real, complex_num.imag, z)
mobjects = self.mobjects_to_multiply
mobjects += self.mobjects_to_move_without_molding
mobjects += [anim.mobject for anim in self.additional_animations]
self.add(*mobjects)
full_multiplications = [
ApplyMethod(mobject.apply_function, func, **self.anim_config)
for mobject in self.mobjects_to_multiply
]
movements_with_plane = [
ApplyMethod(
mobject.shift,
func(mobject.get_center())-mobject.get_center(),
**self.anim_config
)
for mobject in self.mobjects_to_move_without_molding
]
self.dither()
self.play(*reduce(op.add, [
full_multiplications,
movements_with_plane,
self.additional_animations
]))
self.dither()
| [
"grantsanderson7@gmail.com"
] | grantsanderson7@gmail.com |
4c09cf2886294fd42abc9d53402ca8a349491871 | 5a652f995e4c0fdce505b88d9aff713803585d29 | /experiments/k_value_sat.py | e852fdbbdd5e9aaacf9782879204745b6e515b85 | [
"CC0-1.0"
] | permissive | Enrico-Call/SAT | 5f3e74c4f870e1151563c49fc500d2cf6fc4482a | 7f3dc01455e69ba2b7edb158e60305a24b6e6aea | refs/heads/main | 2023-08-15T05:13:02.124016 | 2021-10-12T07:52:22 | 2021-10-12T07:52:22 | 416,224,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,193 | py | from sys import stdin
from copy import copy, deepcopy
import time
import argparse
import numpy
def parseFileInput(in_file, cnf): # Parse
cnf.append(list())
for line in in_file:
tokens = line.split()
if len(tokens) > 0 and tokens[0] not in ("p", "c"):
for token in tokens:
lit = int(token)
if lit == 0:
cnf.append(list())
else:
cnf[-1].append(lit)
cnf.pop()
return cnf
def transformSudoku(in_file):
file = open("sudoku.txt", 'w')
for line in in_file:
row = 1
col = 1
if line != '.': file.write(str(row) + str(col) + str(line) + ' 0\n')
col += 1
if col == 10:
row += 1
col = 1
if row == 10: break
file.close()
def parse(files):
file1 = open(files[0], "r")
if len(files) == 1:
line = file1.readline()
if "p" in line:
cnf = parseFileInput(file1, list())
file1.close()
return True, cnf
transformSudoku(file1)
file1.close()
return False, list()
file2 = open(files[1], "r")
cnf = parseFileInput(file1, parseFileInput(file2, list()))
file1.close()
file2.close()
return True, cnf
def assignValue(cnf, lit):
for clause in copy(cnf):
if lit in clause:
cnf.remove(clause)
variables[abs(lit)] = variables.get(abs(lit), 0) + 2 ** -len(clause)
if -lit in clause:
clause.remove(-lit)
variables[abs(lit)] = variables.get(abs(lit), 0) + 2 ** -len(clause)
if lit > 0: solution.append(lit)
return cnf
def unitPropagation(cnf):
unit_clause = False
for clause in cnf:
if len(clause) == 1:
cnf = assignValue(cnf, clause[0])
unit_clause = True
break
return cnf, unit_clause
def pureLiteralElimination(cnf):
pure_rule = False
for clause in cnf:
if pure_rule == False:
for lit in clause:
pure = True
for c in cnf:
if -lit in c:
pure = False
break
if pure:
pure_rule = True
cnf = assignValue(cnf, lit)
break
return cnf, pure_rule
def printSudoku(literals):
sudoku = [[0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(9)]
for lit in literals:
row, col, digit = int(str(lit)[:1]) - 1, int(str(lit)[1:2]) - 1, int(str(lit)[2:3])
sudoku[row][col] = digit
for i in range(9): print(sudoku[i])
def createOutFile(filename, literals):
file = open(filename, "w")
for lit in literals: file.write(str(lit) + ' 0\n')
file.close
def chooseLit(cnf):
globals()['splits'] += 1
if heuristic == 1: return cnf[0][0]
if heuristic == 2: return MOM(cnf)
if heuristic == 3: return JW(cnf, solution)
def DP(cnf):
cnf, unit_clause = unitPropagation(cnf) # Satisfy unit clauses
while unit_clause: cnf, unit_clause = unitPropagation(cnf)
cnf, pure_rule = pureLiteralElimination(cnf) # Remove pure literals
while pure_rule: cnf, pure_rule = unitPropagation(cnf)
if len(cnf) == 0:
return True
if [] in cnf: return False # Empty clause
cnf = deepcopy(cnf)
lit = chooseLit(cnf)
cnf1 = assignValue(cnf, lit)
if DP(cnf1): return True
cnf2 = assignValue(cnf, -lit)
return DP(cnf2)
def MOM(cnf):
bestValue = 0
minClause = min(len(clause) for clause in cnf)
maxFunction = 0
# k = 1.8
count = dict()
for clause in cnf:
if len(clause) == minClause:
for lit in clause: count[lit] = count.get(lit, 0) + 1
for val in count.keys():
function = (count[val] * count.get(-val, 0)) * 2 ** k + count[val] * count.get(-val, 0)
if function > maxFunction:
maxFunction = function
lit = val
return lit
def JW(cnf, literals):
count = variables
for clause in cnf:
for lit in clause: count[abs(lit)] = count.get(abs(lit), 0) + 2 ** -len(clause)
lit = max(variables, key=count.get)
return lit
def main():
start_time = time.time()
sat = DP(cnf)
temp = time.time() - start_time
# print("--- %s seconds ---" % (time.time() - start_time))
ris.write(str(temp) + ' seconds ' + str(splits) + ' splits ' + str(k) + ' k value ' + '\n')
# printSudoku(solution)
# if sat == True: print("Satisfiable")
# elif sat == False: print("Unsatisfiable")
def parseArguments():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser()
parser.add_argument("-S", type=int)
parser.add_argument("files", nargs='+')
args = parser.parse_args()
return args.S, args.files
solution, variables = list(), dict()
heuristic = 2
ris = open("ResultsHard.txt", 'w')
k = 0
for i in range(1, 41):
globals()['splits'] = 0
print(i)
files = ['sudoku-rules.txt', 'Hard%s.txt' % (i)]
execute, cnf = parse(files)
if execute:
for i in numpy.arange(0, 4, 0.5):
k = i
main()
ris.close()
| [
"noreply@github.com"
] | Enrico-Call.noreply@github.com |
37fe0c69cf9ec213d248fdf548f5f32c16d4f273 | 22f010149a94fd72a5ec86133393460bafbf6fa5 | /-Python-for-Everybody-Specialization-master/Coursera---Using-Python-to-Access-Web-Data-master/Week-2/Extracting Data With Regular Expressions.py | 262a968a7accdc78fc8d96de23da919c15607d78 | [] | no_license | Pdshende/-Python-for-Everybody-Specialization-master | ef3a53ee2a152f05a530cb9bc552c2179e6c9544 | b25747d050a1ea1034136c8db0bd26430b7417f1 | refs/heads/master | 2022-09-07T13:32:21.998515 | 2020-06-01T14:46:35 | 2020-06-01T14:46:35 | 267,398,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import time
import re
start = time.time()
file = open("regex_sum_417433.txt",'r')
sum = 0
for line in file:
f = re.findall('[0-9]+',line)
for num in f:
if int(num) >= 0:
sum = sum+int(num)
print(list)
end = time.time()
print("The total excecution Time for this code is sec", (end-start))
#Output : -
# Answer = 331308
| [
"noreply@github.com"
] | Pdshende.noreply@github.com |
41409f82ccd2588398fdf051d1696b159d04542a | b122b0d43455c6af3344e4319bead23bb9162dac | /instagram/insta_hossem.py | 2dea5a7824669a6cb69d3c39770d92c21c404dde | [] | no_license | firchatn/scripts-python | 85c7704170404f8a2e531164258f6c8b7e0d27f8 | 25a6a298aae279f23f08c2ce4674d866c2fca0ef | refs/heads/master | 2021-03-16T06:09:23.484585 | 2018-11-02T10:09:36 | 2018-11-02T10:09:36 | 105,776,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
browser = webdriver.Firefox()
browser.get('http://instagram.com')
time.sleep(10)
browser.find_element_by_xpath("//a[contains(@class, '_b93kq')]").click()
compte = ''
password = ''
follow = 'css'
user_name = browser.find_element_by_name('username')
user_name.clear()
user_name.send_keys(compte)
password_el = browser.find_element_by_name('password')
password_el.clear()
password_el.send_keys(password)
password_el.send_keys(Keys.RETURN)
time.sleep(5)
search = browser.find_element_by_xpath("//input[contains(@class, '_avvq0 _o716c')]")
time.sleep(5)
search.send_keys(follow)
time.sleep(5)
browser.find_element_by_xpath("//span[contains(@class, '_sgi9z')]").click()
time.sleep(5)
browser.find_element_by_xpath("//div[contains(@class, '_mck9w _gvoze _f2mse')]").click()
time.sleep(5)
browser.find_element_by_xpath("//a[contains(@class, '_nzn1h _gu6vm')]").click()
time.sleep(3)
print("list now")
list = browser.find_elements(By.XPATH, "//button[contains(@class, '_qv64e _gexxb _4tgw8 _njrw0')]")
time.sleep(3)
for i in range(5):
if list[i].text == 'Follow':
list[i].click()
| [
"firaschaabencss@gmail.com"
] | firaschaabencss@gmail.com |
d25c31f1bf4a4fe5bfe3e31be5b3e8435213d236 | ad38d8b669a6e173773ee4eb61ace40d6b508e21 | /setup.py | 25a29626aa99ce9d64ae330b3062737e5c27f025 | [] | no_license | CJWorkbench/intercom | c3bf3eb407ea7c36460cb3ada8359e42938f31c9 | c8da8e94584af7d41e350b9bf580bcebc035cbc1 | refs/heads/main | 2021-06-19T01:16:32.996932 | 2021-03-19T20:47:58 | 2021-03-19T20:47:58 | 192,569,734 | 0 | 0 | null | 2021-03-19T20:48:41 | 2019-06-18T15:44:12 | Python | UTF-8 | Python | false | false | 392 | py | #!/usr/bin/env python
from setuptools import setup
setup(
name="intercom",
version="0.0.1",
description="Download user lists from Intercom",
author="Adam Hooper",
author_email="adam@adamhooper.com",
url="https://github.com/CJWorkbench/intercom",
packages=[""],
py_modules=["libraryofcongress"],
install_requires=["pandas==0.25.0", "cjwmodule>=1.3.0"],
)
| [
"adam@adamhooper.com"
] | adam@adamhooper.com |
341dcccb33672529c11a39e5c43c69ed1a42dcde | 9ddc7e34e287e8a072fea7eeb36de9cf91749852 | /sample.py | d924459c66bbb0ceb7b0652796fab9b0f448a5c3 | [
"MIT"
] | permissive | ntjmy/d-wave | 8bddaad2dac282bff99dbac88bd1d5f8af038087 | 479493ef3c6767a648608e468f9c9b4960d8c8ef | refs/heads/main | 2023-03-30T16:42:49.064384 | 2021-04-08T06:19:34 | 2021-04-08T06:19:34 | 355,774,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import blueqat.wq as wq
a = wq.Opt()
a.dwavetoken = "your token here"
a.qubo = [[0,0,0,0,-4],[0,2,0,0,-4],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,4]]
a.dw()
| [
"noreply@github.com"
] | ntjmy.noreply@github.com |
59bee76a747787225a3bd75e5a79388a59bf6b75 | aaf7124eff8766e83d302e162f2e3364b9fd3cc0 | /shop/models.py | b49ac3899b5bb7e808c3af4001c32bf0f5d96426 | [] | no_license | mr-vaibh/evolves--old | cfa221b61480a77335e3d621df2ec235f25357b0 | 3d28c71d4df4dd50f47524992f2ccd9b068abc42 | refs/heads/master | 2022-04-15T18:13:08.086759 | 2020-03-30T22:41:05 | 2020-03-30T22:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
import os
# Function to make right product image file path
def get_upload_path(instance, filename):
return f"shop/img/product/{instance.url}/{filename}"
# Create your models here.
class Country(models.Model):
country_name = models.CharField(max_length=50, blank=False)
def __str__(self):
return self.country_name
class Wishlist(models.Model):
name = models.CharField(max_length=50, default='')
wishlist = models.TextField(default=None, null=True, blank=True)
def __str__(self):
return self.name
class Address(models.Model):
name = models.CharField(max_length=50, default='')
address = models.TextField(default=None, null=True, blank=True)
def __str__(self):
return self.name
class Category(models.Model):
category_name = models.CharField(max_length=50, default='', blank=False)
def __str__(self):
return self.category_name
class Product(models.Model):
product_name = models.CharField(max_length=100, default='')
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='cat')
sub_category = models.CharField(max_length=50, default='')
price = models.PositiveIntegerField(blank=False)
short_description = models.CharField(max_length=500)
description = models.TextField()
new_price = models.PositiveIntegerField(null=True,blank=True)
pub_date = models.DateField()
rate = models.PositiveSmallIntegerField(default=0, blank=False, max_length=2)
warranty = models.CharField(max_length=20, default='No')
replacement = models.CharField(max_length=20, default='No')
available = models.BooleanField(default=True)
url = models.CharField(max_length=32, default=get_random_string(length=32))
product_img1 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=False)
product_img2 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=True)
product_img3 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=True)
product_img4 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=True)
product_img5 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=True)
product_img6 = models.ImageField(upload_to=get_upload_path,default='shop/img/product/default.png',blank=True)
def __str__(self):
return self.product_name
class Slider(models.Model):
product = models.CharField(max_length=26)
description = models.CharField(max_length=620)
price = models.PositiveIntegerField(blank=False)
img = models.ImageField(upload_to='shop/img/slider/', blank=False)
def __str__(self):
return self.product | [
"mrvaibh0@gmail.com"
] | mrvaibh0@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.