blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54e7a36e8c1c43a18f1b15cc9f17f5b2da8244a5 | 22de047f94e0739fc810a1787b959ed120518677 | /model/OurMCS/batch.py | 35886ad504a35cc5d3ac40604bad346360dc852d | [] | no_license | zhouaimin/GLSearch | 725c58bf593ed6a7c79f65a3d12eb116eaf8bd10 | 2bfd5269cf0123edd1d687c57e95a43d04fc017b | refs/heads/main | 2023-06-06T12:54:10.799459 | 2021-07-01T15:22:23 | 2021-07-01T15:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,683 | py | from config import FLAGS
from merged_graph import MergedGraphData
from node_feat import encode_node_features
from pair_processor import preproc_graph_pair, \
postproc_graph_pairs_assign_node_embeds
from torch_geometric.data import Data as PyGSingleGraphData
from torch_geometric.utils import to_undirected
import torch
import networkx as nx
from collections import defaultdict
class BatchData(object):
"""Mini-batch.
We assume the following sequential model architecture: Merge --> Split.
Merge: For efficiency, first merge graphs in a batch into a large graph.
This is only done for the first several `NodeEmbedding` layers.
Split: For flexibility, split the merged graph into individual pairs.
The `gen_list_view_by_split` function should be called immediately
after the last `NodeEmbedding` layer.
"""
def __init__(self, batch_gids, dataset):
self.dataset = dataset
self.merge_data, self.pair_list = self._merge_into_one_graph(
batch_gids)
# self.merge = mgd['merge']
# self.ind_list = mgd['ind_list'] # for split later
def _merge_into_one_graph(self, batch_gids):
single_graph_list = []
metadata_list = []
pair_list = []
# assert len(batch_gids) == 2
# gids1 = batch_gids[0]
# gids2 = batch_gids[1]
gids1 = batch_gids[:, 0]
gids2 = batch_gids[:, 1]
assert gids1.shape == gids2.shape
for (gid1, gid2) in zip(gids1, gids2):
self._preproc_gid_pair(gid1, gid2, single_graph_list, metadata_list, pair_list)
assert len(pair_list) == gids1.shape[0] == gids2.shape[0]
return MergedGraphData.from_data_list(single_graph_list, metadata_list), pair_list
def _preproc_gid_pair(self, gid1, gid2, single_graph_list, metadata_list, pair_list):
gid1 = gid1.item()
gid2 = gid2.item()
assert gid1 - int(gid1) == 0
assert gid2 - int(gid2) == 0
gid1 = int(gid1)
gid2 = int(gid2)
g1 = self.dataset.look_up_graph_by_gid(gid1)
g2 = self.dataset.look_up_graph_by_gid(gid2)
pair = self.dataset.look_up_pair_by_gids(g1.gid(), g2.gid())
preproc_g_list = preproc_graph_pair(g1, g2, pair) # possibly combine
this_single_graph_list = [self._convert_nx_to_pyg_graph(g.get_nxgraph())
for g in preproc_g_list]
# this_metadata_list = [(g.nxgraph.graph['dists_max'], g.nxgraph.graph['dists_argmax'])
# for g in preproc_g_list]
single_graph_list.extend(this_single_graph_list)
# metadata_list.extend(this_metadata_list)
pair.assign_g1_g2(g1, g2)
pair_list.append(pair)
def _convert_nx_to_pyg_graph(self, g): # g is a networkx graph object
"""converts_a networkx graph to a PyGSingleGraphData."""
# Reference: https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/datasets/ppi.py
if type(g) is not nx.Graph:
raise ValueError('Input graphs must be undirected nx.Graph,'
' NOT {}'.format(type(g)))
edge_index = create_edge_index(g)
data = PyGSingleGraphData(
x=torch.tensor(g.init_x,
dtype=torch.float32,
# required by concat with LocalDegreeProfile()
device=FLAGS.device),
edge_index=edge_index,
edge_attr=None,
y=None) # TODO: add one-hot
# print('before', data.x.shape)
data, nf_dim = encode_node_features(pyg_single_g=data)
assert data.is_undirected()
assert data.x.shape[1] == nf_dim
# print('after', data.x.shape)
return data
def split_into_pair_list(self, node_embed_merge, node_embed_name):
node_embed_list = MergedGraphData.to_data_list(
self.merge_data, node_embed_merge)
assert len(node_embed_list) == self.merge_data['merge'].num_graphs
postproc_graph_pairs_assign_node_embeds(
node_embed_list, node_embed_name, self.pair_list)
return self.pair_list
def create_edge_index(g):
edge_index = torch.tensor(list(g.edges),
device=FLAGS.device).t().contiguous()
edge_index = to_undirected(edge_index, num_nodes=g.number_of_nodes())
return edge_index
def create_adj_set(g):
adj_list = defaultdict(set)
eit = torch.t(create_edge_index(g))
for k, e in enumerate(eit):
v, w = e[0].item(), e[1].item()
adj_list[v].add(k)
adj_list[w].add(k)
return adj_list | [
"noreply@github.com"
] | zhouaimin.noreply@github.com |
2ce048865eb41895ba98396484da75c1705672fa | 649efd9fdea0cfcdb19eb91ffbd06c29ac2595a2 | /push_notice_note.py | 6e943c33339b8d555755c0511c3eb310ce6d9e2e | [] | no_license | wangdejun/PythonNotes | 80cc0ba01e89c5198ebc381378184eb6281a62c1 | 3556e621d5fa7af3d8b3d5f56a093dabd1791ffc | refs/heads/master | 2020-07-30T22:29:09.792105 | 2018-01-25T15:03:24 | 2018-01-25T15:03:24 | 73,616,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | **需求描述
1.server按照时区发送notice
2.分别发送中英文版本
3.筛选发送国家
**解决办法是:
1.每条notice是一个任务,一个task。server定期来run这个job,用crontab文件,每过一小时执行一次。
2.把符合要求的task按照时区筛选出来,把符合要求的用户筛选出来,执行发送行为
3.每次发送都要存库,一遍轮到下一个时区的时候先查表,如果表中已经记录了,则skip掉,不筛选已经被skip掉的用户
4.服务器定期run 这个job,轮播发送,每小时定期run job,发送给符合要求的用户合适他们的Notice
**任务完结思考:
1.一开始设置的Q(query)条件变量覆盖范围非常大,query条件变量根据条件不断在缩减
2.如果有国家,筛选,没有国家,不筛选,即,query条件不变,懒变化的一个概念
3.如果语言中有中文版本,筛选复合条件的用户,发送中文任务;如果语言中有英文版本,筛选复合条件的英文用户,则发送英文消息,非常条理,丝毫不乱。
4.一种漂亮的python写法,用函数返回的元组直接赋值给目标元组,如下
#noticehelper.py
def sendNotice:
pass
return (a,b)
import noticehelper
#exec.py
(a,b)=noticehelper.sendNotice(arguments)
| [
"1074201799@qq.com"
] | 1074201799@qq.com |
80c7eb57076a4ad26e5d853ba9cc867fd13dac58 | a60aa25d646f410747e2d5c3d81090f72da46e17 | /studentinfoviewset/manage.py | 43a005edd0268c3bd66db05e12c84adc4680f54f | [] | no_license | skr206/github_project | b11003705353896f6aba8b637ac870bd37d3c182 | 0b5e92973e1c1f961c098f07e50c636ac80c6605 | refs/heads/main | 2023-06-16T04:27:55.407525 | 2021-07-13T18:17:44 | 2021-07-13T18:17:44 | 385,695,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'studentinfoviewset.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"skrao206@gmail.com"
] | skrao206@gmail.com |
f3c78f9d639bb06b62d51f7eebbe1e6c8450cadb | 76df0d7c4fd543b49b578518112a65e38dc9059b | /taxcalculator/yearlytax.py | 87f0ed183d31db58adbb73681a8a0474b0e059d2 | [] | no_license | bengodwinweb/RentalTaxCalculator | 76c3434953fac14bb29b80d7d0e851777f7fe3d0 | 7f96c1b9ceeb4908b8209db4349eb55e69d40abe | refs/heads/main | 2023-04-10T19:07:00.462933 | 2021-04-26T19:50:07 | 2021-04-26T19:50:07 | 357,010,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | import json
from taxcalculator.config import Config
from taxcalculator.quarterlytax import QuarterlyTax
class YearlyTax:
def __init__(self):
self.quarter1 = QuarterlyTax(1)
self.quarter2 = QuarterlyTax(2)
self.quarter3 = QuarterlyTax(3)
self.quarter4 = QuarterlyTax(4)
self.year = Config.YEAR
self.gross = 0
self.taxableIncome = 0
self.nightsBooked = 0
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def process_stays(self, stays):
year_stays = [stay for stay in stays if stay.transactionDate.year == Config.YEAR]
quarters = [self.quarter1, self.quarter2, self.quarter3, self.quarter4]
for q in quarters:
q.add_stays(year_stays)
self.gross += q.gross
self.taxableIncome += q.taxableIncome
self.nightsBooked += q.nightsBooked
| [
"ben.godwin1@gmail.com"
] | ben.godwin1@gmail.com |
1ef74bbd7592936fff5d016713ea948ded5646f2 | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/cross_decomposition/_pls.py | bdad86189563c644c43df8232c49ae1312194937 | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 3,586 | py | import typing
from hpsklearn.components._base import validate
from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import cross_decomposition
import numpy as np
@scope.define
def sklearn_CCA(*args, **kwargs):
return cross_decomposition.CCA(*args, **kwargs)
@scope.define
def sklearn_PLSCanonical(*args, **kwargs):
return cross_decomposition.PLSCanonical(*args, **kwargs)
@scope.define
def sklearn_PLSRegression(*args, **kwargs):
return cross_decomposition.PLSRegression(*args, **kwargs)
def _pls_n_components(name: str):
"""
Declaration search space 'n_components' parameter
"""
return hp.choice(name, [1, 2])
def _pls_max_iter(name: str):
"""
Declaration search space 'max_iter' parameter
"""
return scope.int(hp.uniform(name, 350, 650))
def _pls_tol(name: str):
"""
Declaration search space 'tol' parameter
"""
return hp.loguniform(name, np.log(1e-7), np.log(1e-5))
def _pls_hp_space(
name_func,
n_components: typing.Union[int, Apply] = None,
scale: bool = True,
max_iter: typing.Union[int, Apply] = None,
tol: typing.Union[float, Apply] = None,
copy: bool = True
):
"""
Hyper parameter search space for
cca
pls canonical
pls regression
"""
hp_space = dict(
n_components=_pls_n_components(name_func("n_components")) if n_components is None else n_components,
scale=scale,
max_iter=_pls_max_iter(name_func("max_iter")) if max_iter is None else max_iter,
tol=_pls_tol(name_func("tol")) if tol is None else tol,
copy=copy
)
return hp_space
def cca(name: str, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.CCA model.
Args:
name: name | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.cca_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
return scope.sklearn_CCA(**hp_space)
@validate(params=["algorithm"],
validation_test=lambda param: not isinstance(param, str) or param in ["nipals", "svd"],
msg="Invalid parameter '%s' with value '%s'. Value must be in ['nipals', 'svd'].")
def pls_canonical(name: str, algorithm: typing.Union[str, Apply] = None, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.PLSCanonical model.
Args:
name: name | str
algorithm: algorithm for first singular vectors | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.pls_canonical_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
hp_space["algorithm"] = hp.choice(_name("algorithm"), ["nipals", "svd"]) if algorithm is None else algorithm
return scope.sklearn_PLSCanonical(**hp_space)
def pls_regression(name: str, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.PLSRegression model.
Args:
name: name | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.pls_regression_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
return scope.sklearn_PLSRegression(**hp_space)
| [
"38689620+mandjevant@users.noreply.github.com"
] | 38689620+mandjevant@users.noreply.github.com |
0a40eb4aeecec7e464a939eb5bdb04727b2856f4 | 9a4df86b4ee8a8dbaaa3e7dc9d85fc3d8ca3dd48 | /common/2021.08.19/jangayeon_15927.py | 714502c27777b63dff488d3d89a280b28a311e9d | [] | no_license | AMYMEME/algorithm-study | 3ff2997ef1f50a2a6d34d46bac49b5fb3c14d7a4 | f5bd767c46a6571d2d139a946bd3603c96877edb | refs/heads/main | 2023-07-10T06:38:07.444611 | 2021-08-24T14:17:07 | 2021-08-24T14:17:07 | 324,687,765 | 0 | 1 | null | 2021-08-24T14:17:08 | 2020-12-27T04:40:54 | Python | UTF-8 | Python | false | false | 369 | py | #https://www.acmicpc.net/problem/15927
word=input().strip()
word_reverse=''.join(reversed(word))
#회문인 경우
if word !=(word_reverse): #원래 글자와 뒤집은 글자가 같은 경우
ans=len(word)
elif len(set(word)) == 1: #모두 다 같은 문자로 이루어진 경우
ans= -1
#회문인 아닌 경우
else:
ans=len(word) - 1
print(ans)
| [
"ayeon3052@ewhain.net"
] | ayeon3052@ewhain.net |
d9973e5dc09730743d44bc6c07cd7b8fe2a93e61 | c384c37ed4c9c962209f4d709b494071728d23b1 | /tests/functional/channels/netcopy/ns_server.py | 801cfe35d5a42d6e3f02796c1f752050cd2b1d58 | [] | no_license | liamstask/zerovm | 6e4a5cad49e400a2868dc8de0e34882c61021b4c | ef7dd02ceaa7639a8ad11af1f1b12575fa6cb0bd | refs/heads/master | 2021-01-18T09:14:22.249213 | 2013-06-21T15:24:44 | 2013-06-21T15:24:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import socket
import sys
import struct
import ctypes
peers = int(sys.argv[1])
port = 0
if len(sys.argv) > 2 and sys.argv[2]:
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
print s.getsockname()[1]
bind_map = {}
conn_map = {}
peer_map = {}
while 1:
try:
message, address = s.recvfrom(65535)
print [str(message), len(message)]
offset = 0
alias = struct.unpack_from('!I', message, offset)[0]
print '%08x = %s:%d' % (alias, address[0], address[1])
offset += 4
count = struct.unpack_from('!I', message, offset)[0]
offset += 4
for i in range(count):
h, _junk, port = struct.unpack_from('!IIH', message, offset)[0:3]
bind_map.setdefault(alias, {})[h] = port
print '%08x:%d <- %08x' % (alias, port, h)
offset += 10
conn_map[alias] = ctypes.create_string_buffer(message[offset:])
peer_map.setdefault(alias, {})[0] = address[0]
peer_map.setdefault(alias, {})[1] = address[1]
if len(peer_map) == peers:
for src in peer_map.iterkeys():
reply = conn_map[src]
offset = 0
count = struct.unpack_from('!I', reply, offset)[0]
offset += 4
for i in range(count):
h = struct.unpack_from('!I', reply, offset)[0]
port = bind_map[h][src]
struct.pack_into('!4sH', reply, offset + 4, socket.inet_pton(socket.AF_INET, peer_map[src][0]), port)
offset += 10
s.sendto(reply, (peer_map[src][0], peer_map[src][1]))
print ['sending to: ', peer_map[src][0], peer_map[src][1]]
except (KeyboardInterrupt, SystemExit):
exit(1)
| [
"bortoq@gmail.com"
] | bortoq@gmail.com |
343b0e6f641f084184ddbd949cb261d668823c78 | 0c1f94e51df6f9631be8269466f2212690b8b8ec | /manage.py | bdf1a46c073aac48c042bdf717ffa93ce64bca3d | [] | no_license | lukaszszajkowski/Django-jQuery-Example-update-part-of-page | 98d7b2ac5f9ec19c310110dc586872774fe385df | 7e0ea3c68c3e697a5c4bf8d888e079198199f8cc | refs/heads/master | 2021-01-13T13:58:48.589818 | 2014-01-16T08:06:49 | 2014-01-16T08:06:49 | 15,941,760 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "s21133135.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"lukasz@logicallysoftware.co.uk"
] | lukasz@logicallysoftware.co.uk |
b862617ad8a52f6b272921f476d1bccc551c9ac2 | 8a133a51f903873cc51314c28201708d3da2c43d | /todo_board/forms.py | f6e4600b982cb605d16bfa977af377941153f909 | [] | no_license | hyunk-go/TODOLIST | 0977c23b83c22c45aa71fae45711ff161f09f7d6 | f81993fd4fa078b17630c4f7b604420cd35c420e | refs/heads/main | 2023-07-24T07:46:33.591596 | 2021-08-27T09:14:35 | 2021-08-27T09:14:35 | 345,844,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from django import forms
from .models import TodoList
class DateInput(forms.DateInput):
input_type='date'
class TodoForm(forms.ModelForm):
class Meta:
model=TodoList
fields=('title','content','end_date')
widgets = {
'end_date' : DateInput()
}
| [
"mintseijo0720@gmail.com"
] | mintseijo0720@gmail.com |
eb4a254fb36a421c34158c7005be74caf6428b30 | d56da1af021d7ebb876b70e7a7c3c3b2ac28087e | /dpxdt/server/operations.py | 4a4808f4ebcdde1cb227f4bb16d43c91e29929af | [
"Apache-2.0"
] | permissive | rudbaby/dpxdt | 7173146ea7d8e27a7db10732e4258605af8c7e82 | 8fbf14bb45ac01932fa5ca170d15c54880db5eff | refs/heads/master | 2022-11-19T18:03:57.823869 | 2013-08-05T08:23:06 | 2013-08-05T08:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,276 | py | #!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cacheable operations and eviction for models in the frontend."""
import functools
import logging
# Local libraries
import sqlalchemy
# Local modules
from . import app
from . import cache
from . import db
from dpxdt.server import models
from dpxdt.server import signals
class UserOps(object):
"""Cacheable operations for user-specified information."""
def __init__(self, user_id):
self.user_id = user_id
# For Flask-Cache keys
def __repr__(self):
return 'caching.UserOps(user_id=%r)' % self.user_id
@cache.memoize(per_instance=True)
def load(self):
if not self.user_id:
return None
user = models.User.query.get(self.user_id)
if user:
db.session.expunge(user)
return user
@cache.memoize(per_instance=True)
def get_builds(self):
if self.user_id:
user = models.User.query.get(self.user_id)
build_list = (
user.builds
.order_by(models.Build.created.desc())
.limit(1000)
.all())
else:
# Anonymous users see only public builds
build_list = (
models.Build.query
.filter_by(public=True)
.order_by(models.Build.created.desc())
.limit(1000)
.all())
for build in build_list:
db.session.expunge(build)
return build_list
@cache.memoize(per_instance=True)
def owns_build(self, build_id):
build = models.Build.query.get(build_id)
user_is_owner = False
if build:
user_is_owner = build.is_owned_by(self.user_id)
db.session.expunge(build)
return build, user_is_owner
def evict(self):
"""Evict all caches related to this user."""
logging.debug('Evicting cache for %r', self)
cache.delete_memoized(self.load)
cache.delete_memoized(self.get_builds)
cache.delete_memoized(self.owns_build)
class BuildOps(object):
"""Cacheable operations for build-specific operations."""
def __init__(self, build_id):
self.build_id = build_id
# For Flask-Cache keys
def __repr__(self):
return 'caching.BuildOps(build_id=%r)' % self.build_id
@staticmethod
def sort_run(run):
"""Sort function for runs within a release."""
# Sort errors first, then by name. Also show errors that were manually
# approved, so the paging sort order stays the same even after users
# approve a diff on the run page.
if run.status in models.Run.DIFF_NEEDED_STATES:
return (0, run.name)
return (1, run.name)
@staticmethod
def get_stats_keys(status):
if status in (models.Run.DIFF_APPROVED,
models.Run.DIFF_NOT_FOUND):
return ('runs_successful', 'runs_complete', 'runs_total')
elif status == models.Run.DIFF_FOUND:
return ('runs_failed', 'runs_complete', 'runs_total')
elif status == models.Run.NO_DIFF_NEEDED:
return ('runs_baseline',)
elif status == models.Run.NEEDS_DIFF:
return ('runs_total',)
return ()
@cache.memoize(per_instance=True)
def get_candidates(self, page_size, offset):
candidate_list = (
models.Release.query
.filter_by(build_id=self.build_id)
.order_by(models.Release.created.desc())
.offset(offset)
.limit(page_size + 1)
.all())
stats_counts = []
has_next_page = len(candidate_list) > page_size
if has_next_page:
candidate_list = candidate_list[:-1]
if candidate_list:
candidate_keys = [c.id for c in candidate_list]
stats_counts = (
db.session.query(
models.Run.release_id,
models.Run.status,
sqlalchemy.func.count(models.Run.id))
.join(models.Release)
.filter(models.Release.id.in_(candidate_keys))
.group_by(models.Run.status, models.Run.release_id)
.all())
for candidate in candidate_list:
db.session.expunge(candidate)
return has_next_page, candidate_list, stats_counts
@cache.memoize(per_instance=True)
def get_release(self, release_name, release_number):
release = (
models.Release.query
.filter_by(
build_id=self.build_id,
name=release_name,
number=release_number)
.first())
if not release:
return None, None, None
run_list = list(release.runs)
run_list.sort(key=BuildOps.sort_run)
stats_dict = dict(
runs_total=0,
runs_complete=0,
runs_successful=0,
runs_failed=0,
runs_baseline=0)
for run in run_list:
for key in self.get_stats_keys(run.status):
stats_dict[key] += 1
approval_log = None
if release.status in (models.Release.GOOD, models.Release.BAD):
approval_log = (
models.AdminLog.query
.filter_by(release_id=release.id)
.filter(models.AdminLog.log_type.in_(
(models.AdminLog.RELEASE_BAD,
models.AdminLog.RELEASE_GOOD)))
.order_by(models.AdminLog.created.desc())
.first())
for run in run_list:
db.session.expunge(run)
if approval_log:
db.session.expunge(approval_log)
return release, run_list, stats_dict, approval_log
def _get_next_previous_runs(self, run):
next_run = None
previous_run = None
# We sort the runs in the release by diffs first, then by name.
# Simulate that behavior here with multiple queries.
if run.status in models.Run.DIFF_NEEDED_STATES:
previous_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.filter(models.Run.name < run.name)
.order_by(models.Run.name.desc())
.first())
next_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.filter(models.Run.name > run.name)
.order_by(models.Run.name)
.first())
if not next_run:
next_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(
~models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.order_by(models.Run.name)
.first())
else:
previous_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(~models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.filter(models.Run.name < run.name)
.order_by(models.Run.name.desc())
.first())
next_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(~models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.filter(models.Run.name > run.name)
.order_by(models.Run.name)
.first())
if not previous_run:
previous_run = (
models.Run.query
.filter_by(release_id=run.release_id)
.filter(
models.Run.status.in_(models.Run.DIFF_NEEDED_STATES))
.order_by(models.Run.name.desc())
.first())
return next_run, previous_run
@cache.memoize(per_instance=True)
def get_all_runs(self, release_name, release_number):
run_list = (
models.Run.query
.join(models.Release)
.filter(models.Release.name == release_name)
.filter(models.Release.number == release_number)
.filter(models.Run.name == test_name)
.all())
run_list.sort(key=BuildOps.sort_run)
run_ids = [run.id for run in run_list]
approval_log_list = (
models.AdminLog.query
.filter(models.AdminLog.run_id.in_(run_ids))
.filter_by(log_type=models.AdminLog.RUN_APPROVED)
.group_by(models.AdminLog.run_id)
.order_by(models.AdminLog.created.desc())
.first())
@cache.memoize(per_instance=True)
def get_run(self, release_name, release_number, test_name):
run = (
models.Run.query
.join(models.Release)
.filter(models.Release.name == release_name)
.filter(models.Release.number == release_number)
.filter(models.Run.name == test_name)
.first())
if not run:
return None, None, None, None
next_run, previous_run = self._get_next_previous_runs(run)
approval_log = None
if run.status == models.Run.DIFF_APPROVED:
approval_log = (
models.AdminLog.query
.filter_by(run_id=run.id,
log_type=models.AdminLog.RUN_APPROVED)
.order_by(models.AdminLog.created.desc())
.first())
if run:
db.session.expunge(run)
if next_run:
db.session.expunge(next_run)
if previous_run:
db.session.expunge(previous_run)
if approval_log:
db.session.expunge(approval_log)
return run, next_run, previous_run, approval_log
def evict(self):
"""Evict all caches relating to this build."""
logging.debug('Evicting cache for %r', self)
cache.delete_memoized(self.get_candidates)
cache.delete_memoized(self.get_release)
cache.delete_memoized(self.get_run)
# Connect Frontend and API events to cache eviction.
def _evict_user_cache(sender, user=None, build=None):
UserOps(user.get_id()).evict()
def _evict_build_cache(sender, build=None, release=None, run=None):
BuildOps(build.id).evict()
signals.build_updated.connect(_evict_user_cache, app)
signals.release_updated_via_api.connect(_evict_build_cache, app)
signals.run_updated_via_api.connect(_evict_build_cache, app)
| [
"brett@haxor.com"
] | brett@haxor.com |
208dfe857d232ac4febce91aab6e3d729df5a02e | 6fa4975949c1779c937334552b1ad7c382bb6711 | /Homework/lesson1/task2.py | be473fdee763388c81af4e446a742f6105415c6d | [] | no_license | Dmitry-Valyaykin/Python_13_10_2020 | b4c8859b32a06c99c9fe6ddd8dc8212a52934873 | 202a51e287bc1c3a9955aafcab7f2d7d07dc66ec | refs/heads/main | 2023-01-08T17:47:27.897695 | 2020-10-18T17:22:19 | 2020-10-18T17:22:19 | 304,941,726 | 0 | 0 | null | 2020-11-03T10:00:48 | 2020-10-17T18:19:06 | Python | UTF-8 | Python | false | false | 469 | py | """
Пользователь вводит время в секундах. Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.
Используйте форматирование строк
"""
data = int(input('введите количество секунд: '))
hour = data // 3600
minut = (data % 3600) // 60
second = (data % 3600) % 60
time = f'{hour}.{minut}.{second}'
print(time)
| [
"72880960+Dmitry-Valyaykin@users.noreply.github.com"
] | 72880960+Dmitry-Valyaykin@users.noreply.github.com |
18850f5f0bba6371f9e3c22522a8498c43b54712 | 0665d8cfa9e59d3d61c04e0371526a0bf5c35f77 | /day1/passwd.py | 36cbe94d7794360b20497f842856d6876b8adf46 | [] | no_license | cccczl/oldboypy3 | 754ac683be59d95eaacb833a374f35515ea9bc92 | 1f4b568bdc7656200e3b369d319d0921aa6f7905 | refs/heads/master | 2022-11-23T15:00:04.947720 | 2019-12-29T09:29:09 | 2019-12-29T09:29:09 | 226,620,864 | 0 | 0 | null | 2022-11-17T18:33:09 | 2019-12-08T05:44:21 | Python | UTF-8 | Python | false | false | 513 | py | # Autthor:long zhang
import getpass
_user = "long"
_password = "791026"
username = input("username:")
password = getpass.getpass("password:")
if _user == username and _password == password:
print("Welcome user {name} login...".format(name=username))
else:
print("Invalid username or password")
#特殊的格式化拼接。
info = '''
---------info of {_username}------
Username:{_username}
Password:{_passowrd}
''' .format(_username=username,_passowrd=password)
print(info) | [
"cccczl@hotmail.com"
] | cccczl@hotmail.com |
f08c05a0e6558dd352966767e258ff03ee905da9 | 7a62d4b9f8ba085c69bce068d3f6750b0ac3c6ba | /stnm/stop.py | fe88fd8b0e5a88243160a8b1b4f59e5cb2ce3806 | [
"MIT"
] | permissive | talhasch/stnm | 365fb8c006c4a6b3d5b933f15f1bc36c4381bedb | 075dcf673cc7ac9c3c79687890d5f712bd44b632 | refs/heads/main | 2023-01-20T14:53:19.456207 | 2020-12-08T17:40:46 | 2020-12-08T17:40:46 | 317,970,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from stnm.response import error_response, success_response
from stnm.shell import get_node_process
def stop():
process = get_node_process()
if process is None:
error_response(2)
try:
process.kill()
success_response(2)
except Exception:
error_response(4)
| [
"talhabugrabulut@gmail.com"
] | talhabugrabulut@gmail.com |
095425d95f8755490347f9a7cf58dfece490bccf | cd3f2ee0c4ae1e838143856c74487b18f22eeefa | /python/strings/case_swap.py | 38d9950d6b2508eb2814a3066794b310563c2595 | [] | no_license | borkovsky/DS_practice | 7866dd4358265307316022d0d704fecb30cb7657 | 4be7c2901d79b15c4a693a4b48dd5f16067a3474 | refs/heads/master | 2020-03-30T11:21:19.582490 | 2018-11-25T20:13:53 | 2018-11-25T20:13:53 | 151,169,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | def case_swap(s):
swapped = []
for elem in list(s):
if elem.istitle():
swapped.append(elem.lower())
else:
wapped.append(elem.upper())
return ''.join(swapped) | [
"i.borkovsky@gmail.com"
] | i.borkovsky@gmail.com |
6be9a2f554cfe414a831a4e52764e37f7205a2d7 | 53b9432487fbb62a5f48d37754d0404e2672a0f7 | /facebookspider-master2/facebookspider/redyurl.py | 038eeda369fb5b503d2adbd739ca578de300743c | [] | no_license | swg0110/facebook_spider | ff7c3caeb76fccd3bbc2b23b586a2b5825a77598 | a5e3b12d56cb2759942b486b7f4da3b94dfa6839 | refs/heads/master | 2021-09-17T18:33:09.636336 | 2018-07-04T11:53:14 | 2018-07-04T11:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | #-*- coding: UTF-8 -*-
import pymongo
import time
from datetime import datetime
# import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client["singapore"]
db.authenticate("yufei", "xjtu@2017")
coll = db["facebook2"]
a = coll.find()
print (a.count())
coll = db["singaporeredyurl"]
coll.ensure_index('url', unique=True)
str1 = '全部好友'
for i in a:
try:
for j in i['friendsList'][str1.decode()]:
a = {}
a['name'] = j['name']
a['picture'] = j['picture']
a['url'] = j['url']
a['time'] = i['time']
a['sign'] = 'f'
try:
coll.insert(a)
except:
print ('重复')
pass
except:
print ('出错')
print (i['friendsList'])
| [
"492741071@qq.com"
] | 492741071@qq.com |
9df07d530379faa00e096532c86273a74a908433 | 008110416478867f5764d2e7030d3e79b73264fc | /therminator/tests/factories.py | c5e94a79cf53b5b0b0819b31675547fe90eeec99 | [
"MIT"
] | permissive | jparker/therminator_server | 4943db5a933467934711737821e08b963ef8db1d | 578d205d539edda0416a0636b57f327e1be97572 | refs/heads/master | 2021-01-22T08:52:37.164176 | 2017-09-13T14:53:53 | 2017-09-13T14:53:53 | 92,639,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | from datetime import datetime
from itertools import count
from therminator import db
from therminator.models import *
GENERATORS = {
'user.name': map(lambda n: 'User %d' % n, count()),
'user.name': map(lambda n: "User %d" % n, count()),
'user.email': map(lambda n: "user%d@example.com" % n, count()),
'home.name': map(lambda n: "Home %d" % n, count()),
'sensor.name': map(lambda n: "Sensor %d" % n, count()),
}
def build_user(name=None, email=None, password='secret'):
if not name:
name = next(GENERATORS['user.name'])
if not email:
email = next(GENERATORS['user.email'])
return User(name=name, email=email, password=password)
def create_user(commit=True, **kwargs):
user = build_user(**kwargs)
db.session.add(user)
if commit:
db.session.commit()
return user
def build_home(user=None, name=None, timezone='PST8PDT'):
if not user:
user = build_user()
if not name:
name = next(GENERATORS['home.name'])
return Home(user=user, name=name, timezone=timezone)
def create_home(commit=True, **kwargs):
home = build_home(**kwargs)
db.session.add(home)
if commit:
db.session.commit()
return home
def build_sensor(home=None, name=None):
if not home:
home = build_home()
if not name:
name = next(GENERATORS['sensor.name'])
return Sensor(home=home, name=name)
def create_sensor(commit=True, **kwargs):
sensor = build_sensor(**kwargs)
db.session.add(sensor)
if commit:
db.session.commit()
return sensor
def build_reading(
int_temp=50.0,
ext_temp=21.0,
humidity=60.0,
resistance=1500.0,
**kwargs,
):
if 'sensor' not in kwargs:
kwargs['sensor'] = build_sensor()
if 'timestamp' not in kwargs:
kwargs['timestamp'] = datetime.utcnow()
return Reading(
int_temp=int_temp,
ext_temp=ext_temp,
humidity=humidity,
resistance=resistance,
**kwargs,
)
def create_reading(commit=True, **kwargs):
reading = build_reading(**kwargs)
db.session.add(reading)
if commit:
db.session.commit()
return reading
| [
"jparker@urgetopunt.com"
] | jparker@urgetopunt.com |
261db80a7bca3e37fd05548f58346fc4def992dc | 4d428e817b581dd12cf96f26305561fecfa2d640 | /main.py | 856fae5beb4e9b83adf25c28137e29d2e11d654c | [
"CC0-1.0"
] | permissive | jumpycat/MinutiaeFingerprint | d4266a56dd94c833fbd09b223c378bdf310e3f59 | b1440e520aa1d82c084daabe2cb95bdfd97d78a7 | refs/heads/master | 2020-05-20T19:46:40.027709 | 2018-05-22T12:36:16 | 2018-05-22T12:36:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | import argparse
from myPackage import tools as tl
from myPackage import preprocess
from myPackage import minutiaeExtraction as minExtract
from enhancementFP import image_enhance as img_e
from os.path import basename, splitext, exists
import time
from numpy import mean, std
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True,
help="-p Source path where the images are stored.")
ap.add_argument("-r", "--results", required= False,
help="-r Destiny path where the results will be stored.")
args = vars(ap.parse_args())
# Configuration
image_ext = '.tif'
plot = False
path = None
# ratio = 0.2
# Create folders for results
# -r ../Data/Results/fingerprints
if args.get("results") is not None:
if not exists(args["results"]):
tl.makeDir(args["results"])
path = args["results"]
# Extract names
all_images = tl.natSort(tl.getSamples(args["path"], image_ext))
# Split train and test data
# train_data, test_data = tl.split_train_test(all_images, ratio)
print("\nAll_images size: {}\n".format(len(all_images)))
all_times= []
for image in all_images:
start = time.time()
name = splitext(basename(image))[0]
print("\nProcessing image '{}'".format(name))
cleaned_img = preprocess.blurrImage(image, name, plot)
enhanced_img = img_e.image_enhance(cleaned_img, name, plot)
cleaned_img = preprocess.cleanImage(enhanced_img, name, plot)
# skeleton = preprocess.zhangSuen(cleaned_img, name, plot)
skeleton = preprocess.thinImage(cleaned_img, name, plot)
minExtract.process(skeleton, name, plot, path)
all_times.append((time.time()-start))
mean = mean(all_times)
std = std(all_times)
print("\n\nAlgorithm takes {:2.3f} (+/-{:2.3f}) seconds per image".format(mean, std)) | [
"marcgonzmont@gmail.com"
] | marcgonzmont@gmail.com |
537c64e49caa04acd7b4a6d728ac838c195b5380 | d273ffcbf9f116d285afd07a8f55ffd3630cc20c | /tracker/middleware.py | e024eb186d3ea5d6cb9aa5c9092f3b895f3dbf0b | [] | no_license | balloonka135/covid_traker | 0c04e09ab113b39af9f29f32b74d9add1bbad3db | 715ab3388e722ce17c9e5b58f4cb0d9be4f39ec1 | refs/heads/master | 2023-05-18T12:07:22.879725 | 2020-06-15T15:20:40 | 2020-06-15T15:20:40 | 259,007,804 | 0 | 0 | null | 2021-06-10T22:49:25 | 2020-04-26T10:58:51 | HTML | UTF-8 | Python | false | false | 671 | py | from django.contrib.auth.middleware import MiddlewareMixin
from django.http import HttpResponseForbidden
from django.contrib import auth
class AutomaticUserLoginMiddleware(MiddlewareMixin):
def process_view(self, request, view_func, view_args, view_kwargs):
if not AutomaticUserLoginMiddleware._is_user_authenticated(request):
user = auth.authenticate(request)
if user is None:
return HttpResponseForbidden()
request.user = user
auth.login(request, user)
@staticmethod
def _is_user_authenticated(request):
user = request.user
return user and user.is_authenticated
| [
"iryna@gmail.com"
] | iryna@gmail.com |
48ad30ced9dff33d05da33f5507c2f4baa08a426 | a90b72cbabf1066e442ac6ee750a0c9aba2575cb | /송태경/06M02W/백준/19.큐, 덱/2164_카드2.py | 06bc08cbb672fdc2504da31140afe3a9e734ff3f | [] | no_license | song248/CodingTest-Study | 61b945ae8050a01cb9534908e1d4e24cb6ef696c | e14e5a8538104455798598d0a43034d5e729b50f | refs/heads/main | 2023-06-29T15:09:58.251506 | 2021-08-05T06:26:28 | 2021-08-05T06:26:28 | 361,177,207 | 0 | 0 | null | 2021-04-24T14:03:53 | 2021-04-24T14:03:52 | null | UTF-8 | Python | false | false | 245 | py | n = int(input())
qq = []
for i in range(1, n+1):
qq.append(i)
while len(qq) > 1:
if len(qq)%2 == 1:
temp_list = [qq[-1]]
temp_list.extend(qq[1::2])
qq = temp_list
else:
qq = qq[1::2]
print(qq[0]) | [
"songteagyong@gmail.com"
] | songteagyong@gmail.com |
d060878e6239534ef6ad763124a8786216c0b6f3 | aaa9c6cfa150c3bde0f05c72f01f5a52ada1344f | /src/generate_sample.py | 9f63a68b219d1483ed8203cc176c7cd7acfd2b3d | [] | no_license | kuro-beer/missing_data | 5838853350a71ec134c9d41ba343fc9a9d35434d | 3f8aa96ee726d13f07b047409037b7aab1e393ed | refs/heads/master | 2021-09-14T20:01:08.342779 | 2018-05-18T13:36:20 | 2018-05-18T14:04:10 | 117,308,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import norm
missing_ratio = 0.3
sample_size = 200
seed = 123
def main():
np.random.seed(seed)
x1 = np.random.randn(sample_size)
x2 = np.random.randn(sample_size)
y = 5 + 3*x1 - 2*x2 + np.random.normal(0, 4, sample_size)
df = pd.DataFrame({"y": y,
"x1": x1,
"x2": x2})
df['x1_case1'] = df.x1.map(
lambda x: 1 if np.random.rand() >= missing_ratio else 0
)
df['x1_case2'] = (df.x2 >= norm.ppf(missing_ratio)).map(
lambda x: np.int(x)
)
df['x1_case3'] = (df.y >= df.y.quantile(missing_ratio)).map(
lambda x: np.int(x)
)
print(df.head(10))
fig, ((ax11, ax12), (ax21, ax22)) = plt.subplots(nrows=2,
ncols=2,
figsize=(9,9),
dpi=80)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
xvar = df.x1
yvar = df.y
ax11.plot(xvar, yvar, 'o',
color='navy',
markerfacecolor='w',
alpha=0.7)
ax11.set_title('complete data')
ax11.set_xlabel('x1')
ax11.set_ylabel('y')
ax11.set_xlim(-3, 3)
ax11.set_ylim(-10, 20)
xvar = df.x1.loc[df.x1_case1 == 1]
yvar = df.y.loc[df.x1_case1 == 1]
ax12.plot(xvar, yvar, 'o',
color='navy',
markerfacecolor='w',
alpha=0.7)
ax12.set_title('independent missing')
ax12.set_xlabel('x1')
ax12.set_ylabel('y')
ax12.set_xlim(-3, 3)
ax12.set_ylim(-10, 20)
xvar = df.x1.loc[df.x1_case2 == 1]
yvar = df.x2.loc[df.x1_case2 == 1]
ax21.plot(xvar, yvar, 'o',
color='darkgreen',
markerfacecolor='w',
alpha=0.7)
ax21.set_title('x2-dependent missing')
ax21.set_xlabel('x1')
ax21.set_ylabel('x2', color="green")
ax21.set_xlim(-3, 3)
ax21.set_ylim(-3, 3)
ax21.hlines([norm.ppf(missing_ratio)], -3, 3, "red", linestyles='dashed')
xvar = df.x1.loc[df.x1_case3 == 1]
yvar = df.y.loc[df.x1_case3 == 1]
ax22.plot(xvar, yvar, 'o',
color='navy',
markerfacecolor='w',
alpha=0.7)
ax22.set_title('y-dependent missing')
ax22.set_xlabel('x1')
ax22.set_ylabel('y')
ax22.set_xlim(-3, 3)
ax22.set_ylim(-10, 20)
ax22.hlines([df.y.quantile(missing_ratio)], -10, 20, "red", linestyles='dashed')
plt.savefig('../fig/sample_scatter.png', format='png')
df.to_pickle('../data/sample_data.pkl')
if __name__ == "__main__":
main()
| [
"kharada201612@gmail.com"
] | kharada201612@gmail.com |
30e3f22d1d8b4a6674374adab98577f321b5b556 | 2ca4670ce96145cca28183afb717c3eb5230cc0d | /newton_raphson.py | 2c31b154d4a8afb6d7e014c4bced8b68a3f1f8ab | [] | no_license | cpared/tp1_numerico | 2a01d63410929908464dbdc455e210b8f28ae1ce | c2fc4aa47878a855f5d918cab32a629b8f7970b6 | refs/heads/master | 2023-01-20T14:56:24.519378 | 2020-12-07T20:52:42 | 2020-12-07T20:52:42 | 313,680,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | """NEWTON RAPHSON"""
import numpy as np
def newton_raphson_x_cota(f, df, p0, cota_max, mostrar_tabla = False):
if mostrar_tabla:
print("Iter:", 0, " Valor:", p0, " ", " |pn-pn-1|:---")
pn = p0
n = 0
cota_error = cota_max + 1
historia=[(0,pn)]
while cota_error > cota_max:
n+=1
pn_1 = pn
pn = pn_1 - f(pn_1) / df(pn_1)
cota_error = abs(pn - pn_1)
resultado=(n,pn)
historia.append(resultado)
if mostrar_tabla:
print("Iter:", n, " Valor:", pn, " ", " |pn-pn-1|:", cota_error)
return pn, n, historia
def newton_raphson_x_iteraciones(f, df, p0, iteraciones, mostrar_tabla = False):
if mostrar_tabla:
print("Iter:", 0, " Valor:", p0, " ", " |pn-pn-1|:---")
pn = p0
historia=np.zeros([iteraciones,2])
historia[0]=(0,pn)
for i in range(1, iteraciones):
n=i
pn_1 = pn
pn = pn_1 - f(pn_1) / df(pn_1)
cota_error = abs(pn - pn_1)
historia[i]=(i,pn)
if mostrar_tabla:
print("Iter:", n, " Valor:", pn, " ", " |pn-pn-1|:", cota_error)
return pn,n,historia
| [
"jiterman@fi.uba.ar"
] | jiterman@fi.uba.ar |
39e39e12899af657d4f5a9fb44f462f5bd8aa3b3 | ab6047ba4eda426c9e07a6575d7d10e0aa2478f8 | /communityc/community/forms.py | 6312c9531e917d0b1224701672d7c9dce71aa4a5 | [] | no_license | eermis1/SWE573 | 1e45bfa53282017d6a63fa5d63c551dd302a8ee9 | fe8d0974693a4a1f549303831791eb562edb3f1b | refs/heads/CommunityC-Test | 2022-12-08T15:13:50.950964 | 2020-01-10T21:30:42 | 2020-01-10T21:30:42 | 211,393,988 | 1 | 0 | null | 2022-12-08T07:01:27 | 2019-09-27T20:07:52 | CSS | UTF-8 | Python | false | false | 1,113 | py | from django import forms
from .models import (Community, Post, CommunityMembership, PostObject)
from django.contrib.auth.models import User
class CommunityCreateForm(forms.ModelForm):
class Meta :
model = Community
fields = ["community_name", "community_description","community_tag"]
class PostTypeCreateForm(forms.ModelForm):
class Meta:
model = Post
fields = ["post_title", "post_description", "post_tag"]
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(widget = forms.PasswordInput)
class Meta:
model = User
fields = ["username", "email", "password"]
class CommunityMembershipForm (forms.ModelForm):
class Meta:
model = CommunityMembership
fields = []
class PostObjectCreateForm(forms.ModelForm):
class Meta:
model = PostObject
fields = ["post_object_name", "post_object_description", "post_object_tag"]
class CommunityEditForm(forms.ModelForm):
class Meta:
model = Community
fields = ["community_name", "community_description","community_tag"]
| [
"36535914+eermis1@users.noreply.github.com"
] | 36535914+eermis1@users.noreply.github.com |
d1d2bb61c560e5e8cc6cca32013ff80ec60b9704 | fe4944d8be4ddff0860850519d3aa9bdceabe9bb | /Additional Content/Exploring/Project Code/learningzone.py | d8a42c7d63a876a89a3bb80511b31dca8dc6bcdf | [] | no_license | codingWithAndy/Thesis_Project | 4cbc73da369c7394f1b311b0a1c494572bb8ebdd | 89fc4fa9a7adc4d7c3035875db0b74a714bc5636 | refs/heads/master | 2023-09-02T23:17:12.175806 | 2021-11-20T09:14:40 | 2021-11-20T09:14:40 | 250,079,787 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,997 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'learningzonewindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
#from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
import mainmenu
import os
import sys
class LearningZone(object):
app = QApplication(sys.argv)
current_path = os.getcwd()
MainWindow = QMainWindow()
#print("current pathis:"+current_path)
def setupUi(self, MainWindow):
MainWindow = MainWindow
# Main Window set up
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1920, 1080)
MainWindow.setStyleSheet("background-color: rgb(47, 85, 151)")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
#Home Button set up
self.homeButton = QPushButton(self.centralwidget)
self.homeButton.setGeometry(QRect(670, 960, 121, 71))
self.homeButton.setStyleSheet("background-color: rgb(3, 193, 161);"
"border-radius: 15px;")
self.homeButton.setText("")
icon = QIcon()
icon.addPixmap(QPixmap(self.current_path+"/Code/home-solid.svg"), QIcon.Normal, QIcon.Off)
self.homeButton.setIcon(icon)
self.homeButton.setIconSize(QSize(50, 60))
self.homeButton.setObjectName("homeButton")
# Free play button set up
self.freePlayButton = QPushButton(self.centralwidget)
self.freePlayButton.setGeometry(QRect(800, 960, 211, 70))
font = QFont()
font.setPointSize(30)
self.freePlayButton.setFont(font)
self.freePlayButton.setStyleSheet("background-color: rgb(3, 193, 161);\n"
"border-radius: 15px;")
icon1 = QIcon()
icon1.addPixmap(QPixmap(self.current_path+"/Code/play-circle-regular.svg"), QIcon.Normal, QIcon.Off)
self.freePlayButton.setIcon(icon1)
self.freePlayButton.setIconSize(QSize(60, 60))
self.freePlayButton.setObjectName("freePlayButton")
# Quiz Button Set up
self.quizButton = QPushButton(self.centralwidget)
self.quizButton.setGeometry(QRect(1020, 960, 121, 71))
self.quizButton.setStyleSheet("background-color: rgb(3, 193, 161);\n"
"border-radius: 15px;")
self.quizButton.setText("")
icon2 = QIcon()
icon2.addPixmap(QPixmap(
self.current_path+"/Code/Screenshot 2020-06-26 at 11.46.35.png"), QIcon.Normal, QIcon.Off)
self.quizButton.setIcon(icon2)
self.quizButton.setIconSize(QSize(60, 60))
self.quizButton.setObjectName("quizButton")
#Web View Set up
self.widget = QWebEngineView(self.centralwidget)
self.widget.setGeometry(QRect(20, 20, 1871, 921))
self.widget.setObjectName("widget")
self.widget.setStyleSheet("border-radius: 15px;")
self.widget.setUrl(
QUrl("https://snappygames.co.uk/Andy/kmeans.html"))
self.widget.show()
'''
original code
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(20, 20, 1871, 921))
self.widget.setObjectName("widget")
'''
# Additional features
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QRect(0, 0, 1920, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
self.homeButton.clicked.connect(self.main_menu_clicked)
def main_menu_clicked(self):
self.window = QMainWindow()
self.ui = mainmenu.MainMenu()
self.ui.setupUi(self.window)
MainWindow.hide()
self.window.show()
def goHome(self):
# Create a pop up window for the test
msg = QMessageBox()
msg.setWindowTitle("Pop up window!")
msg.setText("This is the main text!")
x = msg.exec_() # This is needed to show the pop up!
def retranslateUi(self, MainWindow):
_translate = QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.freePlayButton.setText(_translate("MainWindow", "Free "))
'''
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = LearningZone()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
'''
| [
"30691631+codingWithAndy@users.noreply.github.com"
] | 30691631+codingWithAndy@users.noreply.github.com |
89c24559f05274044212ebc28a2a067b431f986b | e5920f7c9885f24d0b3b5d64d7b31e6b8f515900 | /ePuck.py | 2d184093c08ecc6db842e72d74cae2614d715e72 | [] | no_license | Kryword/sia | fddb7a851285e45c243e9a7310db8d8f465495f1 | 4a96c1009a0235e879858e713cf84e07fb65f976 | refs/heads/master | 2020-04-27T11:50:31.978758 | 2019-04-11T08:35:43 | 2019-04-11T08:35:43 | 174,310,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,149 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ePuck.py
#
# Copyright 2010 Manuel Martín Ortiz <manuel.martin@itrblabs.eu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# -- ePuck.py --
#
# The aim of this library is to provide access to the ePuck robots
# through a bluetooth connection. Thus, you can write a program that
# read from the ePuck's sensors and write in their actuators, This
# will allow us to create advanced programs that can develop a wide
# variety of complex tasks. It is necesary that the ePuck has installed
# the Webot's fimware 1.4.2 or 1.4.3. You can find this fantastic
# simulator on this site: http://www.cyberbotics.com/
#
# This library is written in Python 2.6, and you can import it from
# any program written in Python (same version or later). In addition
# to this, you will also need two extra libraries:
#
# -> Python Bluetooth or Pybluez
# -> Python Image Library (PIL)
#
# In this package you will find some examples of how to use this library.
#
# You may expetience some problems when you work with your ePuck, We
# recommend you take into consideration the following special
# characteristic: we use a bluetooth communciation, therefore our bandwith
# is limited and we cannot expect to do too many tasks in short
# time; i.e: If you put the wheels speed to max and want
# to make a quick process of the images, you will know what I'm saying.
# So remember, you are processing in your computer, not on the ePuck,
# and you need to take the sensors data and write on the actuators
# values on the ePuck
#
# For further information and updates visit http://www.itrblabs.eu
import sys # System library
import bluetooth # Used for communications
import time # Used for image capture process
import struct # Used for Big-Endian messages
from PIL import Image # Used for the pictures of the camera
__package__ = "ePuck"
__docformat__ = "restructuredtext"
"""
:newfield company: Company
"""
__version__ = "1.2.2"
__author__ = "Manuel Martin Ortiz"
__license__ = "GPL"
__company__ = "RTDI - ITRB Labs"
__contact__ = ["manuel.martin@itrblabs.eu"]
# This dictionary have as keys the first character of the message, that
# is used to know the number of lines. If no key for the message, 1 line is assumed
DIC_MSG = {
"v": 2, # Version
"\n": 23, # Menu
"\x0c": 2, # Welcome
"k": 3, # Calibration
"R": 2 # Reset
}
# You have to use the keys of this dictionary for indicate on "enable" function
# the sensor that you want to read
DIC_SENSORS = {
"accelerometer" : "a",
"selector" : "c",
"motor_speed" : "e",
"camera" : "i",
"floor" : "m",
"proximity" : "n",
"light" : "o",
"motor_position" : "q",
"microphone" : "u"
}
# You have to use the keys of this dictionary for indicate the operating
# mode of the camera
CAM_MODE = {
"GREY_SCALE" : 0,
"RGB_365" : 1,
"YUV" : 2,
"LINEAR_CAM" : 3
}
# You can use three diferents Zoom in the camera
CAM_ZOOM = (1, 4, 8)
class ePuck():
"""
This class represent an ePuck object
"""
def __init__(self, address, debug = False):
"""
Constructor process
:param address: Robot's direction in AA:BB:CC:DD:EE:FF format
:type address: MAC Address
:param debug: If you want more verbose information, useful for debugging
:type debug: Boolean
:return: ePuck object
"""
# Monitoring Variables
self.messages_sent = 0
self.messages_received = 0
self.version = __version__
self.debug = debug
# Connection Attributes
self.socket = None
self.address = address
self.conexion_status = False
# Camera attributes
self._cam_width = None
self._cam_height = None
self._cam_enable = False
self._cam_zoom = None
self._cam_mode = None
self._cam_size = None
# Sensors and actuators lists
self._sensors_to_read = []
self._actuators_to_write = []
# Sensors
self._accelerometer = (0, 0, 0)
self._accelerometer_filtered = False
self._selector = (0)
self._motor_speed = (0, 0) # left and right motor
self._motor_position = (0, 0) # left and right motor
self._camera_parameters = (0, 0, 0, 0)
self._floor_sensors = (0, 0, 0)
self._proximity = (0, 0, 0, 0, 0, 0, 0, 0)
self._light_sensor = (0, 0, 0, 0, 0, 0, 0, 0)
self._microphone = (0, 0, 0)
self._pil_image = None
# Leds
self._leds_status = [False] * 8
#
# Private methods
#
def _debug(self, *txt):
"""
Show debug information and data, only works if debug information
is enable (see "set_debug()")
:param txt: Data to be showed separated by comma
:type txt: Any
"""
if self.debug:
print >> sys.stderr, '\033[31m[ePuck]:\033[0m ', ' '.join([str(e) for e in txt])
return 0
def _recv(self, n = 4096):
"""
Receive data from the robot
:param n: Number of bytes you want to receive
:type n: int
:return: Data received from the robot as string if it was successful, raise an exception if not
:rtype: String
:raise Exception: If there is a communication problem
"""
if not self.conexion_status:
raise Exception, 'There is not connection'
try:
line = self.socket.recv(n)
self.messages_received += 1
except bluetooth.btcommon.BluetoothError, e:
txt = 'Bluetooth communication problem: ' + str(e)
self._debug(txt)
raise Exception, txt
else:
return line
def _send(self, message):
"""
Send data to the robot
:param message: Message to be sent
:type message: String
:return: Number of bytes sent if it was successful. -1 if not
:rtype: int
"""
if not self.conexion_status:
raise Exception, 'There is not connection'
try:
n = self.socket.send(message)
self.messages_sent += 1
except Exception, e:
self._debug('Send problem:', e)
return -1
else:
return n
def _read_image(self):
"""
Returns an image obtained from the robot's camera. For communication
issues you only can get 1 image per second
:return: The image in PIL format
:rtype: PIL Image
"""
# Thanks to http://www.dailyenigma.org/e-puck-cam.shtml for
# the code for get the image from the camera
msg = struct.pack(">bb", - ord("I"), 0)
try:
n = self._send(msg)
self._debug("Reading Image: sending " + repr(msg) + " and " + str(n) + " bytes")
# We have to add 3 to the size, because with the image we
# get "mode", "width" and "height"
size = self._cam_size + 3
img = self._recv(size)
while len(img) != size:
img += self._recv(size)
# Create the PIL Image
image = Image.frombuffer("RGB", (self._cam_width, self._cam_height),
img, "raw",
"BGR;16", 0, 1)
image = image.rotate(180)
self._pil_image = image
except Exception, e:
self._debug('Problem receiving an image: ', e)
def _refresh_camera_parameters(self):
"""
Method for refresh the camera parameters, it's called for some
private methods
"""
try:
msg = self.send_and_receive("I").split(',')
except:
return False
else:
self._cam_mode, \
self._cam_width, \
self._cam_height, \
self._cam_zoom, \
self._cam_size = [int(i) for i in msg[1:6]]
self._camera_parameters = self._cam_mode, self._cam_width, self._cam_height, self._cam_zoom
def _write_actuators(self):
"""
Write in the robot the actuators values. Don't use directly,
instead use 'step()'
"""
# Not all messages reply with AKC, only Ascii messages
acks = ['j', 't']
# We make a copy of the actuators list
actuators = self._actuators_to_write[:]
for m in actuators:
if m[0] == 'L':
# Leds
msg = struct.pack('<bbb', - ord(m[0]), m[1], m[2])
n = self._send(msg)
self._debug('Binary message sent of [' + str(n) + '] bytes: ' + str(struct.unpack('<bbb', msg)))
elif m[0] == 'D' or m[0] == 'P':
# Set motor speed or set motor position
msg = struct.pack('<bhh', - ord(m[0]), m[1], m[2])
n = self._send(msg)
self._debug('Binary message sent of [' + str(n) + '] bytes: ' + str(struct.unpack('<bhh', msg)))
else:
# Others actuators, parameters are separated by commas
msg = ",".join(["%s" % i for i in m])
reply = self.send_and_receive(msg)
if reply == 'j':
self._refresh_camera_parameters()
if reply not in acks:
self._debug('Unknown ACK reply from ePcuk: ' + reply)
self._actuators_to_write.remove(m)
return
def _read_sensors(self):
"""
This method is used for read the ePuck's sensors. Don't use directly,
instead use 'step()'
"""
# We can read sensors in two ways: Binary Mode and Ascii Mode
# Ascii mode is slower than Binary mode, therefore, we use
# Binary mode whenever we can. Not all sensors are available in
# Binary mode
def send_binary_mode(parameters):
# Auxiliar function for sent messages in binary modes
# Parameters: ('Char to be sent', 'Size of reply waited', 'Format of the teply')
self._debug('Sending binary message: ', ','.join('%s' % i for i in parameters))
message = struct.pack(">bb", - ord(parameters[0]), 0)
self._send(message)
reply = self._recv()
while len(reply) < parameters[1]:
reply += self._recv()
reply = struct.unpack(parameters[2], reply)
self._debug('Binary message recived: ', reply)
return reply
# Read differents sensors
for s in self._sensors_to_read:
if s == 'a':
# Accelerometer sensor in a non filtered way
if self._accelerometer_filtered:
parameters = ('A', 12, '@III')
else:
parameters = ('a', 6, '@HHH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._accelerometer = reply
elif s == 'n':
# Proximity sensors
parameters = ('N', 16, '@HHHHHHHH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._proximity = reply
elif s == 'm':
# Floor sensors
parameters = ('M', 10, '@HHHHH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._floor_sensors = reply
elif s == 'q':
# Motor position sensor
parameters = ('Q', 4, '@HH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._motor_position = reply
elif s == 'o':
# Light sensors
parameters = ('O', 16, '@HHHHHHHH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._light_sensor = reply
elif s == 'u':
# Microphone
parameters = ('u', 6, '@HHH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._microphone = reply
elif s == 'e':
# Motor Speed
parameters = ('E', 4, '@HH')
reply = send_binary_mode(parameters)
if type(reply) is tuple and type(reply[0]) is int:
self._motor_speed = reply
elif s == 'i':
# Do nothing for the camera, is an independent process
pass
else:
reply = self.send_and_receive(s).split(",")
t = reply[0]
response = tuple(reply[1:len(reply)])
if t == "c":
# Selector
self._selector = response[0]
else:
self._debug('Unknow type of sensor to read' + str(reply))
#
# Public methods
#
def connect(self):
"""
Connect with the physic ePuck robot
:return: If the connexion was succesful
:rtype: Boolean
:except Exception: If there are a communication proble, for example, the robot is off
"""
if self.conexion_status:
self._debug('Already connected')
return False
try:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.socket.connect((self.address, 1))
self.socket.settimeout(0.5)
except Exception, e:
txt = 'Connection problem: \n' + str(e)
self._debug(txt)
raise Exception, txt
self.conexion_status = True
self._debug("Connected")
self.reset()
return True
def disconnect(self):
"""
Disconnect from ePuck robot. Same as 'close()'
"""
self.close()
def close(self):
"""
Close the connection with the robot. Same as 'disconnect()'
:return: 0 if all ok
:rtype: int
:raise Exception: if it was a problem closing the connection
"""
if self.conexion_status:
try:
# Stop the robot
self.stop()
# Close the socket
self.socket.close()
self.conexion_status = False
except Exception, e:
raise Exception, 'Closing connection problem: \n' + str(e)
else:
return 0
def set_debug(self, debug):
"""
Set / unset debug information
:param debug: True or False, as you want or not Debug information
:type debug: Boolean
"""
self.debug = debug
def send_and_receive(self, msg):
"""
Send an Ascii message to the robot and return the reply. You can
use it, but I don't recommend, use 'enable()', 'disable()'
and 'step()' instead
:param msg: The message you want to send
:type msg: String
:return: Response of the robot
:rtype: String
"""
# Check the connection
if not self.conexion_status:
raise Exception, 'There is not connection'
# Make sure the Message is a string
message = str(msg)
# Add carriage return if not
if not message.endswith('\n'):
message += '\n'
# Check the lines of the waited reply
if message[0] in DIC_MSG:
lines = DIC_MSG[message[0]]
else:
lines = 1
self._debug('Waited lines:', lines)
# We make 5 tries before desist
tries = 1
while tries < 5:
# Send the message
bytes = self._send(message)
self._debug('Message sent:', repr(message))
self._debug('Bytes sent:', bytes)
try:
# Receive the reply. As we want to receive a line, we have to insist
reply = ''
while reply.count('\n') < lines:
reply += self._recv()
if message[0] == 'R':
# For some reason that I don't understand, if you send a reset
# command 'R', sometimes you recive 1 or 2 lines of 'z,Command not found\r\n'
# Therefor I have to remove it from the expected message: The Hello message
reply = reply.replace('z,Command not found\r\n','')
self._debug('Message received: ', reply)
return reply.replace('\r\n','')
except Exception, e:
tries += 1
self._debug('Communication timeout, retrying')
def save_image(self, name = 'ePuck.jpg'):
"""
Save image from ePuck's camera to disk
:param name: Image name, ePuck.jpg as default
:type name: String
:return: Operation result
:rtype: Boolean
"""
if self._pil_image:
return self._pil_image.save(name)
else:
return False
def get_accelerometer(self):
"""
Return Accelerometer values in (x, y, z)
:return: Accelerometer values
:rtype: Tuple
"""
return self._accelerometer
def get_selector(self):
"""
Return the selector position (0-15)
:return: Selector value
:rtype: int
"""
return self._selector
def get_motor_speed(self):
"""
Return the motor speed. Correct values are in the range [-1000, 1000]
:return: Motor speed
:rtype: Tuple
"""
return self._motor_speed
def get_camera_parameters(self):
"""
Return the camera parameters as a tuple
(mode, width, height, zoom)
:return: Camera parameters
:rtype: Tuple
"""
return self._camera_parameters
def get_floor_sensors(self):
"""
Return the floor sensors values as (left, center, right)
:return: Floor sensors values
:rtype: Tuple
"""
return self._floor_sensors
def get_proximity(self):
"""
Return the values of the 8 proximity sensors
:return: Proximity sensors values
:rtype: Tuple
"""
return self._proximity
def get_light_sensor(self):
"""
Return the value of the light sensor
:return: Ligth sensor value
:rtype: Tuple
"""
return self._light_sensor
def get_motor_position(self):
"""
Return the position of the left and right motor as a tuple
:return: Motor position
:rtype: Tuple
"""
return self._motor_position
def get_microphone(self):
"""
Return the volume of the three microphones
:return: Microphones values
:rtype: Tuple
"""
return self._microphone
def is_connected(self):
"""
Return a boolean value that indicate if the robot is connected to the PC
:return: If the robot is connected to the PC
:rtype: Boolean
"""
return self.conexion_status
def get_image(self):
"""
Return the last image captured from the ePuck's camera (after a 'step()').
None if there are not images captured. The image is an PIL object
:return: Image from robot's camera
:rtype: PIL
"""
return self._pil_image
def get_sercom_version(self):
"""
:return: Return the ePuck's firmware version
:rtype: String
"""
return self.send_and_receive("v")
def set_accelerometer_filtered(self, filter = False):
"""
Set filtered way for accelerometer, False is default value
at the robot start
:param filter: True or False, as you want
:type filter: Boolean
"""
self._accelerometer_filtered = filter
def disable(self, *sensors):
"""
Sensor(s) that you want to get disable in the ePuck
:param sensors: Name of the sensors, take a look to DIC_SENSORS. Multiple sensors can be separated by commas
:type sensors: String
:return: Sensors enabled
:rtype: List
:except Exception: Some wrong happened
"""
for sensor in sensors:
try:
if not DIC_SENSORS.has_key(sensor):
self._debug('Sensor "' + sensor + '" not in DIC_SENSORS')
break
if sensor == "camera":
self._cam_enable = False
if DIC_SENSORS[sensor] in self._sensors_to_read:
l = list(self._sensors_to_read)
l.remove(DIC_SENSORS[sensor])
self._sensors_to_read = tuple(l)
self._debug('Sensor "' + sensor + '" disabled')
else:
self._debug('Sensor "' + sensor + '" alrady disabled')
except Exception, e:
self._debug('Something wrong happened to disable the sensors: ', e)
return self.get_sensors_enabled()
def enable(self, *sensors):
"""
Sensor(s) that you want to get enable in the ePuck
:param sensors: Name of the sensors, take a look to DIC_SENSORS. Multiple sensors can be separated by commas
:type sensors: String
:return: Sensors enabled
:rtype: List
:except Exception: Some wrong happened
"""
# Using the * as a parameters, we get a tuple with all sensors
for sensor in sensors:
try:
if not DIC_SENSORS.has_key(sensor):
self._debug('Sensor "' + sensor + '" not in DIC_SENSORS')
break
if sensor == "camera":
# If the sensor is the Camera, then we refresh the
# camera parameters
if not self._cam_enable:
try:
self._refresh_camera_parameters()
self._cam_enable = True
self.timestamp = time.time()
except:
break
if DIC_SENSORS[sensor] not in self._sensors_to_read:
l = list(self._sensors_to_read)
l.append(DIC_SENSORS[sensor])
self._sensors_to_read = tuple(l)
self._debug('Sensor "' + sensor + '" enabled')
else:
self._debug('Sensor "' + sensor + '" alrady enabled')
except Exception, e:
self._debug('Something wrong happened to enable the sensors: ', e)
return self.get_sensors_enabled()
def get_sensors_enabled(self):
"""
:return: Return a list of sensors thar are active
:rtype: List
"""
l = []
for sensor in DIC_SENSORS:
if DIC_SENSORS[sensor] in self._sensors_to_read:
l.append(sensor)
return l
def set_motors_speed(self, l_motor, r_motor):
"""
Set the motors speed. The MAX and MIN speed of the ePcuk is [-1000, 1000]
:param l_motor: Speed of left motor
:type l_motor: int
:param r_motor: Speed of right motor
:type r_motor: int
"""
# I don't check the MAX and MIN speed because this check
# will be made by the ePuck's firmware. Here we need speed
# and we lose time mading recurrent chekings
self._actuators_to_write.append(("D", int(l_motor), int(r_motor)))
return True
def set_motor_position(self, l_wheel, r_wheel):
"""
Set the motor position, useful for odometry
:param l_wheel: left wheel
:type l_wheel: int
:param r_wheel: right wheel
:type r_wheel: int
"""
self._actuators_to_write.append(("P", l_wheel, r_wheel))
def set_led(self, led_number, led_value):
"""
Turn on/off the leds
:param led_number: If led_number is other than 0-7, all leds are set to the indicated value.
:type led_number: int
:param led_value:
- 0 : Off
- 1 : On (Red)
- 2 : Inverse
:type led_value: int
"""
led = abs(led_number)
value = abs(led_value)
if led < 9:
self._actuators_to_write.append(("L", led, value))
if value == 0:
self._leds_status[led] = False
elif value == 1:
self._leds_status[led] = True
else:
self._leds_status[led] = not self._leds_status[led]
return True
else:
return False
def set_body_led(self, led_value):
"""
Turn on /off the body led
:param led_value:
- 0 : Off
- 1 : On (green)
- 2 : Inverse
:type led_value: int
"""
value = abs(led_value)
self._actuators_to_write.append(("L", 8, value))
if value == 0:
self._leds_status[8] = False
elif value == 1:
self._leds_status[8] = True
else:
self._leds_status[8] = not self._leds_status[8]
return True
def set_front_led(self, led_value):
"""
Turn on /off the front led
:type led_value: int
:param led_value:
- 0 : Off
- 1 : On (green)
- 2 : Inverse
"""
value = abs(led_value)
self._actuators_to_write.append(("L", 9, value))
if value == 0:
self._leds_status[9] = False
elif value == 1:
self._leds_status[9] = True
else:
self._leds_status[9] = not self._leds_status[9]
return True
def set_sound(self, sound):
"""
Reproduce a sound
:param sound: Sound in the range [1,5]. Other for stop
:type sound: int
"""
self._actuators_to_write.append(("T", sound))
return True
def set_camera_parameters(self, mode, width, height, zoom):
"""
Set the camera parameters
:param mode: GREY_SCALE, LINEAR_CAM, RGB_365, YUM
:type mode: String
:param width: Width of the camera
:type width: int
:param height: Height of the camera
:type height: int
:param zoom: 1, 4, 8
:type zoom: int
"""
if mode in CAM_MODE:
self._cam_mode = CAM_MODE[mode]
else:
self._debug(ERR_CAM_PARAMETERS, "Camera mode")
return -1
if int(zoom) in CAM_ZOOM:
self._cam_zoom = zoom
else:
self._debug(ERR_CAM_PARAMETERS, "Camera zoom")
return -1
if self.conexion_status and int(width) * int(height) <= 1600:
# 1600 are for the resolution no greater than 40x40, I have
# detect some problems
self._actuators_to_write.append(("J",
self._cam_mode,
width,
height,
self._cam_zoom))
return 0
def calibrate_proximity_sensors(self):
"""
Calibrate proximity sensors, keep off any object in 10 cm
:return: Successful operation
:rtype: Boolean
"""
reply = self.send_and_receive("k",tries_timeout = 25)
if reply[1] == "k":
return True
else:
return False
def reset(self):
"""
Reset the robot
:return: Successful operation
:rtype: Boolean
:raise Exception: If there is not connection
"""
if not self.conexion_status:
raise Exception, 'There is not connection'
msg = self.send_and_receive("R")
self._debug(msg)
return True
def stop(self):
"""
Stop the motor and turn off all leds
:return: Successful operation
:rtype: Boolean
:raise Exception: If there is not connection
"""
if not self.conexion_status:
raise Exception, 'There is not connection'
reply = self.send_and_receive("S")
self._debug(reply)
if reply == "s":
return True
else:
return False
def step(self):
"""
Method to update the sensor readings and to reflect changes in
the actuators. Before invoking this method is not guaranteed
the consistency of the sensors
"""
if not self.conexion_status:
raise Exception, 'There is not connection'
self._write_actuators()
self._read_sensors()
# Get an image in 1 FPS
if self._cam_enable and time.time() - self.timestamp > 1:
self._read_image()
self.timestamp = time.time()
| [
"kryword@gmail.com"
] | kryword@gmail.com |
fe4d8e834dc7bbb53e1be5010c4e21712fe83976 | 3d95af684365e097175aa037aca30f3481652c0a | /файлы цикл for/student.py | d70c4563a3d0ebd65ee0e78143b69da32826dcd1 | [] | no_license | Nazira06/first_git_lesson | 56b2577a19028a10fbbb9074b367dffa0a4d9b4e | a43103c6239a31c1d2369967ee4833a7622fcd1f | refs/heads/main | 2023-01-05T12:31:45.920032 | 2020-11-08T15:53:39 | 2020-11-08T15:53:39 | 306,528,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | num = int(input("Ииедите кол-во студентов:"))
student_file = open('student.txt', 'w')
for i in range(num):
student = input()
student_file.write(student+ ' ')
student_file.close()
student_read = open('student.txt')
student_list = student_read.readlines()
for student in student_list:
print(student) | [
"nazikkydyralieva@gmail.com"
] | nazikkydyralieva@gmail.com |
a67fb27375b43889f824b029ae74c17f764a8f1a | 02abe19ffa1b832c989911807cc01c10c83c92bc | /src/djangosaml2_spid/views.py | e3e370420663cb3759b027e457c2ff651a02319b | [
"Apache-2.0"
] | permissive | peppelinux/djangosaml2_spid | b244a8b007486f815ac86aed44fa28466bd5373e | b7bda086e8da081e28aa337e6eab1b5ef84679d0 | refs/heads/main | 2023-03-03T21:07:01.046213 | 2021-02-16T09:24:38 | 2021-02-16T09:24:38 | 335,000,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,792 | py | import base64
import logging
import random
import saml2
import string
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.dispatch import receiver
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.urls import reverse
from djangosaml2.conf import get_config
from djangosaml2.cache import IdentityCache, OutstandingQueriesCache
from djangosaml2.cache import StateCache
from djangosaml2.conf import get_config
from djangosaml2.overrides import Saml2Client
from djangosaml2.signals import post_authenticated, pre_user_save
from djangosaml2.utils import (
available_idps, get_custom_setting,
get_idp_sso_supported_bindings, get_location,
validate_referral_url
)
from djangosaml2.views import (finish_logout,
_get_subject_id,
SPConfigMixin, View)
from saml2 import BINDING_HTTP_REDIRECT, BINDING_HTTP_POST
from saml2.authn_context import requested_authn_context
from saml2.metadata import entity_descriptor, sign_entity_descriptor
from saml2.sigver import security_context
from .utils import repr_saml
logger = logging.getLogger('djangosaml2')
def index(request):
""" Barebone 'diagnostics' view, print user attributes if logged in + login/logout links.
"""
if request.user.is_authenticated:
out = "LOGGED IN: <a href={0}>LOGOUT</a><br>".format(settings.LOGOUT_URL)
out += "".join(['%s: %s</br>' % (field.name, getattr(request.user, field.name))
for field in request.user._meta.get_fields()
if field.concrete])
return HttpResponse(out)
else:
return HttpResponse("LOGGED OUT: <a href={0}>LOGIN</a>".format(settings.LOGIN_URL))
# @receiver(pre_user_save, sender=User)
# def custom_update_user(sender, instance, attributes, user_modified, **kargs):
# """ Default behaviour does not play nice with booleans encoded in SAML as u'true'/u'false'.
# This will convert those attributes to real booleans when saving.
# """
# for k, v in attributes.items():
# u = set.intersection(set(v), set([u'true', u'false']))
# if u:
# setattr(instance, k, u.pop() == u'true')
# return True # I modified the user object
def spid_sp_authn_request(conf, selected_idp, binding,
name_id_format, authn_context,
sig_alg, dig_alg, next_url=''):
client = Saml2Client(conf)
logger.debug(f'Redirecting user to the IdP via {binding} binding.')
# use the html provided by pysaml2 if no template was specified or it didn't exist
# SPID want the fqdn of the IDP, not the SSO endpoint
location_fixed = selected_idp
location = client.sso_location(selected_idp, binding)
authn_req = saml2.samlp.AuthnRequest()
authn_req.destination = location_fixed
# spid-testenv2 preleva l'attribute consumer service dalla authnRequest (anche se questo sta già nei metadati...)
authn_req.attribute_consuming_service_index = "0"
# issuer
issuer = saml2.saml.Issuer()
issuer.name_qualifier = client.config.entityid
issuer.text = client.config.entityid
issuer.format = "urn:oasis:names:tc:SAML:2.0:nameid-format:entity"
authn_req.issuer = issuer
# message id
authn_req.id = saml2.s_utils.sid()
authn_req.version = saml2.VERSION # "2.0"
authn_req.issue_instant = saml2.time_util.instant()
name_id_policy = saml2.samlp.NameIDPolicy()
# del(name_id_policy.allow_create)
name_id_policy.format = name_id_format # settings.SPID_NAMEID_FORMAT
authn_req.name_id_policy = name_id_policy
# settings.SPID_AUTH_CONTEXT
authn_context = requested_authn_context(class_ref=authn_context)
authn_req.requested_authn_context = authn_context
# force_auth = true only if SpidL >= 2
# if 'SpidL1' in authn_context.authn_context_class_ref[0].text:
# force_authn = 'false'
# else:
force_authn = 'true'
authn_req.force_authn = force_authn
# end force authn
# settings.SPID_DEFAULT_BINDING
authn_req.protocol_binding = binding
assertion_consumer_service_url = client.config._sp_endpoints['assertion_consumer_service'][0][0]
authn_req.assertion_consumer_service_url = assertion_consumer_service_url
authn_req_signed = client.sign(authn_req, sign_prepare=False,
sign_alg=sig_alg,
digest_alg=dig_alg,
)
logger.debug(f'AuthRequest to {selected_idp}: {authn_req_signed}')
relay_state = next_url or reverse('djangosaml2:saml2_echo_attributes')
http_info = client.apply_binding(binding,
authn_req_signed, location,
sign=True,
sigalg=sig_alg,
relay_state = relay_state)
return dict(http_response = http_info,
authn_request = authn_req_signed,
relay_state = relay_state,
session_id = authn_req.id
)
def spid_login(request,
config_loader_path=None,
wayf_template='wayf.html',
authorization_error_template='djangosaml2/auth_error.html'):
"""SAML Authorization Request initiator
This view initiates the SAML2 Authorization handshake
using the pysaml2 library to create the AuthnRequest.
It uses the SAML 2.0 Http POST protocol binding.
"""
logger.debug('SPID Login process started')
next_url = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
if not next_url:
logger.warning('The next parameter exists but is empty')
next_url = settings.LOGIN_REDIRECT_URL
# Ensure the user-originating redirection url is safe.
if not validate_referral_url(request, next_url):
next_url = settings.LOGIN_REDIRECT_URL
if callable(request.user.is_authenticated):
redirect_authenticated_user = getattr(settings,
'SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN',
True)
if redirect_authenticated_user:
return HttpResponseRedirect(next_url)
else:
logger.debug('User is already logged in')
return render(request, authorization_error_template, {
'came_from': next_url})
# this works only if request came from wayf
selected_idp = request.GET.get('idp', None)
conf = get_config(config_loader_path, request)
# is a embedded wayf needed?
idps = available_idps(conf)
if selected_idp is None and len(idps) > 1:
logger.debug('A discovery process is needed')
return render(request, wayf_template,
{
'available_idps': idps.items(),
'next_url': next_url
}
)
else:
# otherwise is the first one
try:
selected_idp = selected_idp or list(idps.keys())[0]
except TypeError as e:
logger.error('Unable to know which IdP to use')
return HttpResponse(text_type(e))
binding = BINDING_HTTP_POST
logger.debug(f'Trying binding {binding} for IDP {selected_idp}')
# ensure our selected binding is supported by the IDP
supported_bindings = get_idp_sso_supported_bindings(selected_idp, config=conf)
if binding != BINDING_HTTP_POST:
raise UnsupportedBinding('IDP %s does not support %s or %s',
selected_idp, BINDING_HTTP_POST, BINDING_HTTP_REDIRECT)
# SPID things here
login_response = spid_sp_authn_request(conf,
selected_idp,
binding,
settings.SPID_NAMEID_FORMAT,
settings.SPID_AUTH_CONTEXT,
settings.SPID_SIG_ALG,
settings.SPID_DIG_ALG,
next_url
)
session_id = login_response['session_id']
http_response = login_response['http_response']
# success, so save the session ID and return our response
logger.debug(f'Saving session-id {session_id} in the OutstandingQueries cache')
oq_cache = OutstandingQueriesCache(request.saml_session)
oq_cache.set(session_id, next_url)
return HttpResponse(http_response['data'])
@login_required
def spid_logout(request, config_loader_path=None, **kwargs):
"""SAML Logout Request initiator
This view initiates the SAML2 Logout request
using the pysaml2 library to create the LogoutRequest.
"""
state = StateCache(request.saml_session)
conf = get_config(config_loader_path, request)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.saml_session))
# whatever happens, however, the user will be logged out of this sp
auth.logout(request)
state.sync()
subject_id = _get_subject_id(request.saml_session)
if subject_id is None:
logger.warning(
'The session does not contain the subject id for user %s',
request.user)
logger.error("Looks like the user %s is not logged in any IdP/AA", subject_id)
return HttpResponseBadRequest("You are not logged in any IdP/AA")
slo_req = saml2.samlp.LogoutRequest()
binding = settings.SPID_DEFAULT_BINDING
location_fixed = subject_id.name_qualifier
location = location_fixed
slo_req.destination = location_fixed
# spid-testenv2 preleva l'attribute consumer service dalla authnRequest (anche se questo sta già nei metadati...)
slo_req.attribute_consuming_service_index = "0"
issuer = saml2.saml.Issuer()
issuer.name_qualifier = client.config.entityid
issuer.text = client.config.entityid
issuer.format = "urn:oasis:names:tc:SAML:2.0:nameid-format:entity"
slo_req.issuer = issuer
# message id
slo_req.id = saml2.s_utils.sid()
slo_req.version = saml2.VERSION # "2.0"
slo_req.issue_instant = saml2.time_util.instant()
# oggetto
slo_req.name_id = subject_id
try:
session_info = client.users.get_info_from(slo_req.name_id,
subject_id.name_qualifier,
False)
except KeyError as e:
logger.error(f'SPID Logout error: {e}')
return HttpResponseRedirect('/')
session_indexes = [session_info['session_index']]
# aggiungere session index
if session_indexes:
sis = []
for si in session_indexes:
if isinstance(si, saml2.samlp.SessionIndex):
sis.append(si)
else:
sis.append(saml2.samlp.SessionIndex(text=si))
slo_req.session_index = sis
slo_req.protocol_binding = binding
assertion_consumer_service_url = client.config._sp_endpoints['assertion_consumer_service'][0][0]
slo_req.assertion_consumer_service_url = assertion_consumer_service_url
slo_req_signed = client.sign(slo_req, sign_prepare=False,
sign_alg=settings.SPID_SIG_ALG,
digest_alg=settings.SPID_DIG_ALG)
session_id = slo_req.id
_req_str = slo_req_signed
logger.debug('LogoutRequest to {}: {}'.format(subject_id.name_qualifier,
repr_saml(_req_str)))
# get slo from metadata
slo_location = None
# for k,v in client.metadata.metadata.items():
# idp_nq = v.entity.get(subject_id.name_qualifier)
# if idp_nq:
# slo_location = idp_nq['idpsso_descriptor'][0]['single_logout_service'][0]['location']
slo_location = client.metadata.single_logout_service(subject_id.name_qualifier,
binding,
"idpsso")[0]['location']
if not slo_location:
logger.error('Unable to know SLO endpoint in {}'.format(subject_id.name_qualifier))
return HttpResponse(text_type(e))
http_info = client.apply_binding(binding,
_req_str,
slo_location,
sign=True,
sigalg=settings.SPID_SIG_ALG
)
state.sync()
return HttpResponse(http_info['data'])
def spid_sp_metadata(conf):
metadata = entity_descriptor(conf)
# this will renumber acs starting from 0 and set index=0 as is_default
cnt = 0
for attribute_consuming_service in metadata.spsso_descriptor.attribute_consuming_service:
attribute_consuming_service.index = str(cnt)
cnt += 1
cnt = 0
for assertion_consumer_service in metadata.spsso_descriptor.assertion_consumer_service:
assertion_consumer_service.is_default = 'true' if not cnt else ''
assertion_consumer_service.index = str(cnt)
cnt += 1
# nameformat patch... non proprio standard
for reqattr in metadata.spsso_descriptor.attribute_consuming_service[0].requested_attribute:
reqattr.name_format = None #"urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
# reqattr.is_required = None
reqattr.friendly_name = None
# remove unecessary encryption and digest algs
# supported_algs = ['http://www.w3.org/2009/xmldsig11#dsa-sha256',
# 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256']
# new_list = []
# for alg in metadata.extensions.extension_elements:
# if alg.attributes.get('Algorithm') in supported_algs:
# new_list.append(alg)
# metadata.extensions.extension_elements = new_list
# ... Piuttosto non devo specificare gli algoritmi di firma/criptazione...
metadata.extensions = None
# attribute consuming service service name patch
service_name = metadata.spsso_descriptor.attribute_consuming_service[0].service_name[0]
service_name.lang = 'it'
service_name.text = conf._sp_name
##############
# avviso 29 v3
#
# https://www.agid.gov.it/sites/default/files/repository_files/spid-avviso-n29v3-specifiche_sp_pubblici_e_privati_0.pdf
saml2.md.SamlBase.register_prefix(settings.SPID_PREFIXES)
contact_map = settings.SPID_CONTACTS
cnt = 0
metadata.contact_person = []
for contact in contact_map:
spid_contact = saml2.md.ContactPerson()
spid_contact.contact_type = contact['contact_type']
contact_kwargs = {
'email_address' : [contact['email_address']],
'telephone_number' : [contact['telephone_number']]
}
if contact['contact_type'] == 'other':
spid_contact.loadd(contact_kwargs)
contact_kwargs['contact_type'] = contact['contact_type']
spid_extensions = saml2.ExtensionElement(
'Extensions',
namespace='urn:oasis:names:tc:SAML:2.0:metadata'
)
for k,v in contact.items():
if k in contact_kwargs: continue
ext = saml2.ExtensionElement(
k,
namespace=settings.SPID_PREFIXES['spid'],
text=v
)
spid_extensions.children.append(ext)
elif contact['contact_type'] == 'billing':
contact_kwargs['company'] = contact['company']
spid_contact.loadd(contact_kwargs)
spid_extensions = saml2.ExtensionElement(
'Extensions',
namespace='urn:oasis:names:tc:SAML:2.0:metadata'
)
elements = {}
for k,v in contact.items():
if k in contact_kwargs: continue
ext = saml2.ExtensionElement(
k,
namespace=settings.SPID_PREFIXES['fpa'],
text=v
)
elements[k] = ext
# DatiAnagrafici
IdFiscaleIVA = saml2.ExtensionElement(
'IdFiscaleIVA',
namespace=settings.SPID_PREFIXES['fpa'],
)
Anagrafica = saml2.ExtensionElement(
'Anagrafica',
namespace=settings.SPID_PREFIXES['fpa'],
)
Anagrafica.children.append(elements['Denominazione'])
IdFiscaleIVA.children.append(elements['IdPaese'])
IdFiscaleIVA.children.append(elements['IdCodice'])
DatiAnagrafici = saml2.ExtensionElement(
'DatiAnagrafici',
namespace=settings.SPID_PREFIXES['fpa'],
)
if elements.get('CodiceFiscale'):
DatiAnagrafici.children.append(elements['CodiceFiscale'])
DatiAnagrafici.children.append(IdFiscaleIVA)
DatiAnagrafici.children.append(Anagrafica)
CessionarioCommittente = saml2.ExtensionElement(
'CessionarioCommittente',
namespace=settings.SPID_PREFIXES['fpa'],
)
CessionarioCommittente.children.append(DatiAnagrafici)
# Sede
Sede = saml2.ExtensionElement(
'Sede',
namespace=settings.SPID_PREFIXES['fpa'],
)
Sede.children.append(elements['Indirizzo'])
Sede.children.append(elements['NumeroCivico'])
Sede.children.append(elements['CAP'])
Sede.children.append(elements['Comune'])
Sede.children.append(elements['Provincia'])
Sede.children.append(elements['Nazione'])
CessionarioCommittente.children.append(Sede)
spid_extensions.children.append(CessionarioCommittente)
spid_contact.extensions = spid_extensions
metadata.contact_person.append(spid_contact)
cnt += 1
#
# fine avviso 29v3
###################
# metadata signature
secc = security_context(conf)
sign_dig_algs = dict(sign_alg = conf._sp_signing_algorithm,
digest_alg = conf._sp_digest_algorithm)
eid, xmldoc = sign_entity_descriptor(metadata, None, secc, **sign_dig_algs)
return xmldoc
def metadata_spid(request, config_loader_path=None, valid_for=None):
"""Returns an XML with the SAML 2.0 metadata for this
SP as configured in the settings.py file.
"""
conf = get_config(config_loader_path, request)
xmldoc = spid_sp_metadata(conf)
return HttpResponse(content=str(xmldoc).encode('utf-8'),
content_type="text/xml; charset=utf8")
class EchoAttributesView(LoginRequiredMixin, SPConfigMixin, View):
"""Example view that echo the SAML attributes of an user
"""
def get(self, request, *args, **kwargs):
state, client = self.get_state_client(request)
subject_id = _get_subject_id(request.saml_session)
try:
identity = client.users.get_identity(subject_id, check_not_on_or_after=False)
except AttributeError:
return HttpResponse("No active SAML identity found. Are you sure you have logged in via SAML?")
return render(request, 'spid_echo_attributes.html', {'attributes': identity[0]})
| [
"giuseppe.demarco@unical.it"
] | giuseppe.demarco@unical.it |
fb9fff047ea9f91c6306fde600390b8cc180df7f | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/test-suite/python/cpp11_uniform_initialization_runme.py | ecb468ccbab5774868fae2adf8e1162f13d56457 | [
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 554 | py | import cpp11_uniform_initialization
var1 = cpp11_uniform_initialization.cvar.var1
if var1.x != 5:
raise RuntimeError
var2 = cpp11_uniform_initialization.cvar.var2
if var2.getX() != 2:
raise RuntimeError
m = cpp11_uniform_initialization.MoreInit()
if m.charptr != None:
raise RuntimeError, m.charptr
m.charptr = "hello sir"
if m.charptr != "hello sir":
raise RuntimeError, m.charptr
if m.more1(m.vi) != 15:
raise RuntimeError, m.vi
if m.more1([-1, 1, 2]) != 2:
raise RuntimeError, m.vi
if m.more1() != 10:
raise RuntimeError
| [
"inishchith@gmail.com"
] | inishchith@gmail.com |
e195fdc275495ddc8b7925c4bba3563b62d1a6d0 | 10fa5efb88edb63712a7bd7afea5b0937652aa1b | /myDevice.py | b14576ae85d8d6cd1bddedd818f1865e8f5bd208 | [] | no_license | ricardoxlopes/iot_smart_home | c524dfbbd816443b87463da99d25e07784d8b559 | a98857d84011d315d9ad9e882639e30d7e8bae49 | refs/heads/master | 2020-03-12T18:42:26.441851 | 2018-06-16T01:16:22 | 2018-06-16T01:16:22 | 130,767,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,668 | py | import paho.mqtt.client as PahoMQTT
import requests
import json
from sensorReader import SensorReader
import cherrypy
import socket
from message import Msg
import os
import datetime
"""
CONFIGURATIONS
webservice
device host port
catalog endpoint: host port
Modular device, can be modified to be reused.
To be set 3 options: [host port,catalog endpoint, available resources]
Has a MQTT publisher to send messages
Handles and makes REST requests
Initialy it registers to the catalog
Raspberrypy 3 breadboard connections:
GPIO17 - dht11 sensor
GPIO18 - push button
GPIO23 - motion sensor
GPIO27 - red LED
Ground
pin 6- dht11
pin 14- push button
pin 9- motion sensor
pin 20- red LED
Power
pin 1 3v3-dht11
pin 2 5v- motion
"""
class MyDevice(object):
exposed = True
def __init__(self, endpoint, catalogEndpoint, resources, filePath):
print "Init device"
self.endpoint = endpoint
self.catalogEndpoint = catalogEndpoint
self.resources = resources
self.filePath = filePath
self.runningResources = {}
self.broker = self.getBroker()
self.myDevice = self.registerDevice()
def GET(self, *uri, **params):
if len(uri) == 0:
return Msg("Raspberry device").info()
elif len(uri) == 1:
if uri[0] == "resource":
resourceId = params["id"]
if resourceId in self.resources:
if "info" in self.myDevice:
if resourceId in self.runningResources:
return self.stopResource(resourceId)
else:
return self.startResource(resourceId)
else:
return Msg("Not registered").error()
else:
return Msg("Resource not available").error()
elif uri[0] == "reboot":
self.myDevice = self.registerDevice()
return Msg("Device rebooted").info()
else:
return Msg("Invalid uri").error()
else:
return Msg("Invalid number of uris").error()
def getBroker(self):
"Get broker from catalog"
try:
r = requests.get(self.catalogEndpoint+'/broker')
except requests.exceptions.RequestException as e:
error = Msg("Unable to get broket").error()
print e
print error
return error
else:
print "Requested broker, received: "+r.text
return r.text
def registerDevice(self):
"Register device to catalog"
print "Registering device..."
device=None
if os.path.exists(self.filePath):
print "Read device from persistence..."
jsonData = open(self.filePath).read()
jsonData = json.loads(jsonData)
device = jsonData["device"]
device["timeStamp"]=str(datetime.datetime.now())
device = json.dumps(device)
else: device = json.dumps({"endPoints": self.endpoint,
"resources": self.resources})
try:
r = requests.post(self.catalogEndpoint +
'/addDevice', data=device)
except requests.exceptions.RequestException as e:
error = Msg("unable to register").error()
print e
print error
return error
else:
info = json.loads(r.text)["info"]
deviceInfo = info["device"]
if not os.path.exists(self.filePath):
with open(self.filePath, "a+") as outfile:
json.dump(deviceInfo, outfile)
outfile.close()
print "created deviceConfiguration.json"
return Msg("New device registered.").info()
else:
return Msg("Device already registered.").info()
def startResource(self, resourceId):
"Start resources by id. Possible to add new handlers for new resources such as sensors"
if resourceId == "humidity_temperature_sensor":
name = "humidity_temperature_sensor1"
mySensor = SensorReader(name, "humidity_temperature_sensor")
mySensor.start()
self.runningResources[resourceId] = mySensor
return Msg("Resource "+name+" started").info()
elif resourceId == "motion_sensor":
name = "motion_sensor1"
mySensor = SensorReader(name, "motion_sensor")
mySensor.start()
self.runningResources[resourceId] = mySensor
return Msg("Resource "+name+" started").info()
elif resourceId == "button_sensor":
name = "button_sensor1"
mySensor = SensorReader(name, "button_sensor")
mySensor.start()
self.runningResources[resourceId] = mySensor
return Msg("Resource "+name+" started").info()
elif resourceId == "stereo":
name = "stereo1"
mySensor = SensorReader(name, "stereo")
mySensor.start()
self.runningResources[resourceId] = mySensor
return Msg("Resource "+name+" started").info()
else:
Msg("Resource "+name+" not available").error()
def stopResource(self, resourceId):
"Stop resource by id"
# stop thread
self.runningResources[resourceId].stop()
# delete element from dictionary
del self.runningResources[resourceId]
return Msg("Stopped resource "+resourceId).info()
if __name__ == '__main__':
filePath = "Configuration/deviceConfiguration.json"
if os.path.exists(filePath):
jsonData = open(filePath).read()
jsonData = json.loads(jsonData)
device = jsonData["device"]
endpoint = device["endPoints"]
resources = device["resources"]
catalogEndpoint = jsonData["catalogEndpoint"]
newstr = endpoint.replace("/","")
newstr=newstr.split(":")
host = newstr[1]
port = int(newstr[2])
print "Reading device config..."
else: print "ERROR: Missing "+filePath+"!"
# Catalog endpoint
# catalogEndpoint = "http://192.168.1.6:8080"
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True
}
}
cherrypy.tree.mount(
MyDevice(endpoint, catalogEndpoint, resources, filePath), '/', conf)
cherrypy.config.update({'server.socket_host': host})
cherrypy.config.update({'server.socket_port': port})
cherrypy.engine.start()
cherrypy.engine.block() | [
"ricardoxlopes@hotmail.com"
] | ricardoxlopes@hotmail.com |
263ca44396449877e60487e7cc4d5e0973e022e1 | 51e56d62ba688b5cc323a3ee3890b87934ae7682 | /22_ISPAIN.py | f3503b81aff6601cf5cdb37df426f357e7be3e01 | [] | no_license | BogdansProgsCo/OLIMP_FREEDOM | b239df814af90e1dc5fd0aff15ee1c5e921a61f6 | 3e631a223b6215d136104eba70bc35203dfe47cf | refs/heads/main | 2023-05-14T13:19:14.760929 | 2021-06-14T18:21:40 | 2021-06-14T18:21:40 | 376,906,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75,217 | py | import requests
from bs4 import BeautifulSoup
import re
headers = {"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'}
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=513&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
a = "SPAIN.txt"
drw_NOdrw = 7
NOdrw_drw = 7
od_ev = 7
ev_od = 7
und15_ovr15 = 7
ovr15_und15 = 7
und25_ovr25 = 7
ovr25_und25 = 7
both_noboth = 7
noboth_both = 7
drw_NOdrw_ft = 7
NOdrw_drw_ft = 7
goal_NOgoal_ft = 7
NOgoal_goal_ft = 7
def adding_team():
c = "SPAIN"
b = "Alaves"
new_file = open(a, "a+")
new_file.write('\n _______ ' + c + ' _______')
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
def clean_file():
new_file = open(a, 'w+')
new_file.seek(0)
new_file.close()
def create_file():
new_file = open(a, "a+")
# print(new_file.name)
new_file.close()
def draws_NOdraws(x):
count = 0
olimp = []
for i in x:
if i == '0:0 ' or i == '1:1 ' or i == '2:2 ' or i == '3:3 ' or i == '4:4 ' or i == '5:5 ' or i == '6:6 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= drw_NOdrw:
print(f'ничья_НЕничья = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws = ' + b)
new_file.close()
def NOdraws_draws(x):
count = 0
olimp = []
for i in x:
if i == '0:0 ' or i == '1:1 ' or i == '2:2 ' or i == '3:3 ' or i == '4:4 ' or i == '5:5 ' or i == '6:6 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= NOdrw_drw:
print(f'НЕничья_ничья = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws = ' + b)
new_file.close()
def odd_even(x):
count = 0
olimp = []
for i in x:
if (i != '0:0 ' and i != '1:1 ' and i != '2:2 ' and i != '3:3 ' and i != '4:4 ' and i != '5:5 '
and i != '2:0 ' and i != '0:2 ' and i != '1:3 ' and i != '3:1 ' and i != '4:2 ' and i != '2:4 '
and i != '3:5 ' and i != '5:3 ' and i != '4:6 ' and i != '6:4 ' and i != '4:0 ' and i != '0:4 '
and i != '1:5 ' and i != '5:1 ' and i != '2:6 ' and i != '6:2 ' and i != '3:7 ' and i != '7:3 '
and i != '0:6 ' and i != '6:0 ' and i != '1:7 ' and i != '7:1 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= od_ev:
print(f'НЕчет_чет = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n odd_even = ' + b)
new_file.close()
def even_odd(x):
count = 0
olimp = []
for i in x:
if (i != '0:0 ' and i != '1:1 ' and i != '2:2 ' and i != '3:3 ' and i != '4:4 ' and i != '5:5 '
and i != '2:0 ' and i != '0:2 ' and i != '1:3 ' and i != '3:1 ' and i != '4:2 ' and i != '2:4 '
and i != '3:5 ' and i != '5:3 ' and i != '4:6 ' and i != '6:4 ' and i != '4:0 ' and i != '0:4 '
and i != '1:5 ' and i != '5:1 ' and i != '2:6 ' and i != '6:2 ' and i != '3:7 ' and i != '7:3 '
and i != '0:6 ' and i != '6:0 ' and i != '1:7 ' and i != '7:1 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= ev_od:
print(f'чет_НЕчет = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n even_odd = ' + b)
new_file.close()
def under15_over15(x):
count = 0
olimp = []
for i in x:
if i == '0:0 ' or i == '1:0 ' or i == '0:1 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= und15_ovr15:
print(f'мен_бол 1.5 = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 1.5 = ' + b)
new_file.close()
def over15_under15(x):
count = 0
olimp = []
for i in x:
if i == '0:0 ' or i == '1:0 ' or i == '0:1 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= ovr15_und15:
print(f'бол_мен 1.5 = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 1.5 = ' + b)
new_file.close()
def under25_over25(x):
count = 0
olimp = []
for i in x:
if (i == '0:0 ' or i == '1:1 ' or i == '1:0 '
or i == '0:1 ' or i == '2:0 ' or i == '0:2 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= und25_ovr25:
print(f'мен_бол 2.5 = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 2.5 = ' + b)
new_file.close()
def over25_under25(x):
count = 0
olimp = []
for i in x:
if (i == '0:0 ' or i == '1:1 ' or i == '1:0 '
or i == '0:1 ' or i == '2:0 ' or i == '0:2 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= ovr25_und25:
print(f'бол_мен 2.5 = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 2.5 = ' + b)
new_file.close()
def both_noboth_score(x):
count = 0
olimp = []
for i in x:
if (i != '0:0 ' and i != '1:0 ' and i != '0:1 ' and i != '2:0 ' and i != '0:2 ' and i != '0:3 ' and i != '3:0 '
and i != '4:0 ' and i != '0:4 ' and i != '0:5 ' and i != '5:0 ' and i != '0:6 ' and i != '6:0 '
and i != '0:7 ' and i != '7:0 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= both_noboth:
print(f'обе_необе забили = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_noboth score = ' + b)
new_file.close()
def noboth_both_score(x):
count = 0
olimp = []
for i in x:
if (i != '0:0 ' and i != '1:0 ' and i != '0:1 ' and i != '2:0 ' and i != '0:2 ' and i != '0:3 ' and i != '3:0 '
and i != '4:0 ' and i != '0:4 ' and i != '0:5 ' and i != '5:0 ' and i != '0:6 ' and i != '6:0 '
and i != '0:7 ' and i != '7:0 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= noboth_both:
print(f'необе_обе забили = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n noboth_both score = ' + b)
new_file.close()
def draws_NOdraws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0:0)' or i == '(1:1)' or i == '(2:2)' or i == '(3:3)' or i == '(4:4)' or i == '(5:5)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= drw_NOdrw_ft:
print(f'ничья_НЕничья 1-й тайм = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws 1st time = ' + b)
new_file.close()
def NOdraws_draws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0:0)' or i == '(1:1)' or i == '(2:2)' or i == '(3:3)' or i == '(4:4)' or i == '(5:5)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= NOdrw_drw_ft:
print(f'НЕничья_ничья 1-й тайм = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws 1st time = ' + b)
new_file.close()
def goal_NOgoal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0:0)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= goal_NOgoal_ft:
print(f'гол-НЕгол 1-й тайм = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n goal_NOgoal 1st time = ' + b)
new_file.close()
def NOgoal_goal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0:0)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if len(olimp) >= 12 and olimp[11] == '-':
count += 1
if len(olimp) >= 13 and olimp[12] == '+':
count += 1
if len(olimp) >= 14 and olimp[13] == '-':
count += 1
if len(olimp) >= 15 and olimp[14] == '+':
count += 1
if len(olimp) >= 16 and olimp[15] == '-':
count += 1
if len(olimp) >= 17 and olimp[16] == '+':
count += 1
if len(olimp) >= 18 and olimp[17] == '-':
count += 1
if len(olimp) >= 19 and olimp[18] == '+':
count += 1
if count >= NOgoal_goal_ft:
print(f'НЕгол_гол 1-й тайм = {count}')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOgoal_goal 1st time = ' + b)
new_file.close()
clean_file()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=511&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Atl_Bilbao"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=517&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Atlletico_Madrid"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=514&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Barcelona"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=505&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Cadis"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=507&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Celta_Vigo"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=525&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Eibar"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=526&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Elche"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=510&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Getafe"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=587&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Granada"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=567&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Huesca"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=521&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Levante"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=501&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Osasuna"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=520&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Real_Betis"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=506&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Real_Madrid"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=503&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Sevilla"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=512&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Sociedad"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=519&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Valencia"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=527&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Valladolid"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
url_both = 'http://allscores.club/soccer/new_ftour.php?champ=3305&f_team=502&chome=0&new_tid=3305'
r = requests.get(url_both, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print("_____________________________________")
print(soup.title.text)
allscores = soup.select(".bluelink")
one = ("...".join((str(i) for i in allscores)))
two = re.findall(r'[0-9]{1}[:-][0-9]{1}\s[(-][0-9]{1}[:-][0-9]{1}[\)-]', one)
three = (".".join((str(i) for i in two)))
four = (three.replace('.', ' '))
full_time = re.findall(r'[0-9]{1}[:-][0-9]{1}\s', four)
first_half_time = re.findall(r'[(][0-9]{1}[:][0-9]{1}[)]', four)
rev_full_time = list(reversed(full_time))
rev_first_half_time = list(reversed(first_half_time))
def adding_team():
b = "Villareal"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws_NOdraws(rev_full_time)
NOdraws_draws(rev_full_time)
odd_even(rev_full_time)
even_odd(rev_full_time)
under15_over15(rev_full_time)
over15_under15(rev_full_time)
under25_over25(rev_full_time)
over25_under25(rev_full_time)
both_noboth_score(rev_full_time)
noboth_both_score(rev_full_time)
draws_NOdraws_first_time(rev_first_half_time)
NOdraws_draws_first_time(rev_first_half_time)
goal_NOgoal_first_time(rev_first_half_time)
NOgoal_goal_first_time(rev_first_half_time)
| [
"noreply@github.com"
] | BogdansProgsCo.noreply@github.com |
68afe330c21ed952c00d70f50b885708fd1a5ba0 | 7ab510889b1a2fc8c81c3ffb7d0983f682b4b20a | /train_model.py | b35601becbe004d4116abad6d7accc362307fefa | [] | no_license | ravelantunes/CarND-Behavioral-Cloning-P3 | 625f87b2089297eefbcf97c8e3210efe94e7e6d7 | fd537d81060cb2cc19e2ed6b032da409bfa63b5c | refs/heads/master | 2021-01-19T18:42:13.715844 | 2017-04-17T02:49:46 | 2017-04-17T02:49:46 | 88,375,614 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,400 | py | import csv
import cv2
import numpy as np
import os
import glob
import pre_processing
path = './data'
extension = 'csv'
existing_csvs = [i for i in glob.glob('data/*.{}'.format(extension))]
print('Training with %s files' % len(existing_csvs))
# Put all training data from multiple files in the folder into the lines array
lines = []
for csv_file_name in existing_csvs:
with open('./'+csv_file_name) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
print('%s data points' % len(lines))
from sklearn.model_selection import train_test_split
import sklearn
from random import shuffle
train_samples, validation_samples = train_test_split(lines, test_size=0.3)
batch_size = 64
EPOCHS = 5
WIDTH, HEIGHT = pre_processing.size[0], pre_processing.size[1]
def generator(samples, batch_size=32):
samples = np.array(samples)
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
# Start of the batch pre-processing
# print("batch {}".format(offset))
batch_samples = samples[offset:offset + batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
side_camera_correction = 0.2
for i in range(3):
side_image_threshold = 0.3
measurement = float(batch_sample[3])
# Skip augmentation if it's a straight angle
if i != 0 and (-side_image_threshold > measurement > side_image_threshold):
continue
source_path = batch_sample[i]
filename = source_path.split('/')[-1]
current_path = "./data/IMG/" + filename
image = cv2.imread(current_path)
# image = image[55:135, 0:320]
image = pre_processing.process(image)
image = cv2.resize(image, (WIDTH, HEIGHT))
images.append(image)
# if measurement < side_image_threshold and measurement > -side_image_threshold:
if i == 1:
measurement += side_camera_correction
if i == 2:
measurement -= side_camera_correction
measurement = min(max(measurement, -1.0), 1.0)
measurements.append(float(measurement))
# Augment the images by flipping the image and the measurement
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
# Flipped
augmented_images.append(cv2.flip(image, 1))
augmented_measurements.append(measurement * -1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield sklearn.utils.shuffle(X_train, y_train)
augmentation_factor = 2
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, Input, Layer
from keras.layers import Cropping2D, Dropout, Reshape
def custom_model():
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(HEIGHT, WIDTH, 3)))
dropout = 0.2
model.add(Dropout(dropout))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Dropout(dropout))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Dropout(dropout))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Dropout(dropout))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Dropout(dropout))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Dropout(dropout))
model.add(Flatten())
# model.add(Dense(1164))
# model.add(Dropout(dropout))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
model.fit_generator(train_generator, samples_per_epoch=(len(train_samples) * augmentation_factor) / 1,
validation_data=validation_generator,
nb_val_samples=(len(validation_samples) * augmentation_factor)/2, nb_epoch=EPOCHS)
model.save('model.h5')
custom_model()
def vgg_network():
from keras.applications.vgg16 import VGG16
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Dense
input_shape = (HEIGHT, WIDTH, 3)
input_tensor = Input(shape=input_shape)
color_norm = Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape)(input_tensor)
crop = Cropping2D(cropping=((15, 3), (0, 0)))(color_norm)
base_model = VGG16(input_tensor=crop, weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
# Add top layer for regression
x = base_model.output
x = Flatten()(x)
x = Dense(1024)(x)
# x = Dropout(0.2)(x)
x = Dense(100)(x)
# x = Dropout(0.2)(x)
x = Dense(50)(x)
x = Dense(10)(x)
regression = Dense(1)(x)
model = Model(base_model.input, regression)
checkpoint = ModelCheckpoint('model-vgg-cp.h5', monitor='val_acc', verbose=1, save_best_only=False, mode='max')
model.compile(loss='mae', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples) * augmentation_factor,
validation_data=validation_generator,
callbacks=[checkpoint],
nb_val_samples=len(validation_samples) * augmentation_factor, nb_epoch=EPOCHS)
model.save('model-vgg.h5')
# vgg_network()
def inceptionv3_network():
from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Dense
input_tensor = Input(shape=(160, 320, 3))
color_norm = Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))(input_tensor)
crop = Cropping2D(cropping=((70, 10), (0, 0)))(color_norm)
base_model = InceptionV3(input_tensor=crop, input_shape=(160, 320, 3), weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
# Add top layer for regression
x = base_model.output
x = Flatten()(x)
x = Dense(1024)(x)
x = Dense(120)(x)
regression = Dense(1)(x)
model = Model(base_model.input, regression)
checkpoint = ModelCheckpoint('model-inception-cp.h5', monitor='val_acc', verbose=1, save_best_only=False, mode='max')
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples),
validation_data=validation_generator, callbacks=[checkpoint],
nb_val_samples=len(validation_samples), nb_epoch=3)
model.save('model-inception.h5')
# inceptionv3_network()
| [
"ravelantunes@gmail.com"
] | ravelantunes@gmail.com |
1234dc6f5996dc5dc05d4f7da7c2a600bb8d5d26 | ceb4eb2bf376ca8b83628c87307bca7c81fd0f7b | /model/utils.py | d9406f1fa14d91dc48ebf83572e62dc3ca3cc773 | [] | no_license | AhmedNasr7/FogProof | 245a5bd856d83473c18df2d7d16b7f137568121b | 9a24a240566079d5b3b513b3158c4b03d7e71470 | refs/heads/master | 2022-12-16T17:42:08.860924 | 2020-09-20T05:17:23 | 2020-09-20T05:17:23 | 296,832,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import cv2
import numpy as np
import glob
def build_video(path):
img_array = []
for filename in glob.glob(path + '/*.jpg'):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
| [
"ahmed.nasr9677@gmail.com"
] | ahmed.nasr9677@gmail.com |
5aad7e0cb8a7d3330496ab6719606b80e1ce9362 | d963fb56dbb92cc7317c0a042c9059239ebaa028 | /problems/LC31.py | f1c1a4832845498896b69b66f880c332d2a9f312 | [] | no_license | ClaudioCarvalhoo/you-can-accomplish-anything-with-just-enough-determination-and-a-little-bit-of-luck | 20572bde5482ddef379506ce298c21dd5e002492 | df287ed92a911de49ed4bc7ca5a997d18a96c3f6 | refs/heads/master | 2023-06-02T16:27:57.994351 | 2021-06-24T23:16:47 | 2021-06-24T23:16:47 | 284,845,707 | 1 | 0 | null | 2020-10-29T21:35:30 | 2020-08-04T01:24:23 | Python | UTF-8 | Python | false | false | 958 | py | # O(n)
# n = len(nums)
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
breakIndex = self.findFirstDescending(nums)
if breakIndex is None:
self.reverseSubList(nums, 0)
return
swapIndex = breakIndex + 1
for i in range(breakIndex + 2, len(nums)):
if nums[i] > nums[breakIndex] and nums[i] <= nums[swapIndex]:
swapIndex = i
self.swap(nums, breakIndex, swapIndex)
self.reverseSubList(nums, breakIndex + 1)
def findFirstDescending(self, nums):
for i in range(len(nums) - 2, -1, -1):
if nums[i] < nums[i + 1]:
return i
return None
def reverseSubList(self, nums, start):
i = start
j = len(nums) - 1
while i < j:
self.swap(nums, i, j)
i += 1
j -= 1
def swap(self, nums, i, j):
nums[i], nums[j] = nums[j], nums[i] | [
"carvalhooclaudio@gmail.com"
] | carvalhooclaudio@gmail.com |
a7cfff5f449329fb72c3ef91faeeb44b5d11f079 | ca2e2adeb284ed090e24a36b633354aab5230690 | /text.py | 0d4836dea70b9d25ac79782c7171ea78e09a55c5 | [] | no_license | LelioMarcos/lel-bot | 460726940291a25285878307707c7acfaf90bcc8 | d08b6f74cd95e4ba352969caf6d08c8aba1f481e | refs/heads/main | 2023-01-18T16:44:05.794960 | 2020-11-19T15:07:06 | 2020-11-19T15:07:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | import random
import discord
from discord.ext import commands
from getHello import *
class text(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(usage="", description="Ver o ping do bot.")
async def ping(self,ctx):
await ctx.trigger_typing()
embed = discord.Embed(title="pong", color=0xff0000)
await ctx.send(embed=embed)
@commands.command(usage="(pedra ou papel ou tesoura)", description="Jogar pedra papel tesoura com o bot.")
async def PPT(self, ctx, arg):
await ctx.trigger_typing()
choices = ["pedra","papel","tesoura"]
arg = arg.lower()
if arg not in choices:
await ctx.send("Você deve jogar:\n\t-pedra\n\t-papel\n\t-tesoura")
return
choice = random.choice(choices)
a = ""
if choice == arg: a = "Empatou :|"
elif choice == "pedra" and arg == "tesoura" or choice == "tesoura" and arg == "papel" or choice == "papel" and arg == "pedra":
a = "Ganhei :)"
else: a = "Perdi :("
await ctx.send("Joguei **" + choice + "**\n" + a)
@commands.command(usage="", description="Recebe um biscoito da melhor personagem.")
async def biscoito(self, ctx):
await ctx.trigger_typing()
await ctx.send("Para " + ctx.author.mention,file=discord.File("naoto.jpg"))
@commands.command(usage="linguagem", description="Escreve um programa Hello World em qualquer linguagem (pelo menos os que estão no repo https://github.com/leachim6/hello-world)")
async def hello(self, ctx, lang):
await ctx.trigger_typing()
helloReq = HelloWorld()
helloText = helloReq.print_hello(lang)
embed = discord.Embed(title=f"Hello World em {lang}", color=0x9200d6)
embed.add_field(name=helloText[1], value=f"```{helloText[0]}```")
await ctx.send(embed=embed) | [
"59713688+01737@users.noreply.github.com"
] | 59713688+01737@users.noreply.github.com |
e111bc24196ec4ebceb7895409d7550354077759 | b312eb828d66e36406b361c0563142660e097f01 | /linepy/config.py | a898433d6ba43178edbc2dc6c01c172c5cf72eae | [] | no_license | hnumlove5/Hnum2 | 70df74410b3d56ae91cdc4922c176dd813edda76 | 10b5a69c5b17f0c11bfe768795639ffee4e0ba26 | refs/heads/master | 2020-09-13T21:24:20.468138 | 2020-01-20T14:43:46 | 2020-01-20T14:43:46 | 222,907,300 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # -*- coding: utf-8 -*-
from akad.ttypes import ApplicationType
import re
class Config(object):
LINE_HOST_DOMAIN = 'https://gd2.line.naver.jp'
LINE_OBS_DOMAIN = 'https://obs-sg.line-apps.com'
LINE_TIMELINE_API = 'https://gd2.line.naver.jp/mh/api'
LINE_TIMELINE_MH = 'https://gd2.line.naver.jp/mh'
LINE_LOGIN_QUERY_PATH = '/api/v4p/rs'
LINE_AUTH_QUERY_PATH = '/api/v4/TalkService.do'
LINE_API_QUERY_PATH_FIR = '/S4'
LINE_POLL_QUERY_PATH_FIR = '/P4'
LINE_CALL_QUERY_PATH = '/V4'
LINE_CERTIFICATE_PATH = '/Q'
LINE_CHAN_QUERY_PATH = '/CH4'
LINE_SQUARE_QUERY_PATH = '/SQS1'
LINE_SHOP_QUERY_PATH = '/SHOP4'
LINE_LIFF_QUERY_PATH = '/LIFF1'
CHANNEL_ID = {
'LINE_TIMELINE': '1341209950',
'LINE_WEBTOON': '1401600689',
'LINE_TODAY': '1518712866',
'LINE_STORE': '1376922440',
'LINE_MUSIC': '1381425814',
'LINE_SERVICES': '1459630796'
}
APP_TYPE = 'DESKTOPWIN'
APP_VER = '5.9.0'
CARRIER = '51089, 1-0'
SYSTEM_NAME = 'TERRORBYTE'
SYSTEM_VER = '5.9.0600-XP-x64'
IP_ADDR = '8.8.8.8'
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def __init__(self):
self.APP_NAME = '%s\t%s\t%s\t%s' % (self.APP_TYPE, self.APP_VER, self.SYSTEM_NAME, self.SYSTEM_VER)
self.USER_AGENT = 'Line/%s' % self.APP_VER | [
"noreply@github.com"
] | hnumlove5.noreply@github.com |
b4a0e362cb132556cef9682a44f8eeaf550a6ca9 | 2a206a158d97e7b7efc33ea5d5d8e2267e9cb9ab | /src/devops-uzdo-build/py/uzdo/const.py | 1379271613024f3e50a85609a070ccbc2a6bb443 | [] | no_license | Shal1928/py-try | 63b9ffc5720352ead8ca0316b294f3c93f349c62 | 65a7ee0006cf1e9aef8593d868795d11f2676a60 | refs/heads/master | 2021-06-06T21:16:24.857508 | 2020-04-30T07:20:36 | 2020-04-30T07:20:36 | 144,483,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from core.const import Const
class IP(Const):
TST = '215'
HF = '223'
NF = '221'
DEV = '252'
| [
"Shal1928@yandex.ru"
] | Shal1928@yandex.ru |
e6b9a1d190ab9a139c5f1a5c25492fc638c511dc | aacc7b1dd17e6b39d986a52e4f7ec8a07b1ee8cc | /Competition/HCP/bd.py | 7693e05b8722aaf99acc6a6b59a358db69d428e3 | [] | no_license | andreapatri/SAP-IoT | 33830c3f8b94c7fe67ec2bb414daadaca9ceab6f | e860df824b4ebe9856bc4a1044046afc90dbbb58 | refs/heads/master | 2020-07-02T14:30:24.410306 | 2019-08-10T03:46:05 | 2019-08-10T03:46:05 | 201,557,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | import datetime
import time
import urllib3
# disable InsecureRequestWarning if your are working without certificate verification
# see https://urllib3.readthedocs.org/en/latest/security.html # be sure to use a recent enough urllib3 version if this fails
try:
urllib3.disable_warnings()
except:
print('urllib3.disable_warnings() failed - get a recentenough urllib3 version to avoid potential InsecureRequestWarning warnings! Can and will continue though.')
# use with or without proxy
http = urllib3.PoolManager()
url = 'https://iotmmsb7af91ae6.us1.hana.ondemand.com/com.sap.iotservices.mms/v1/api/http/data/'
#url = 'https://iotmmsi843568trial.hanatrial.ondemand.com/com.sap.iotservices.mms/v1/api/http/data/'
#deviceID = 'b7e75e6a-0364-494b-a6c4-7a25af775ea9'
deviceID = '34d074e3-ad45-42b7-bc9f-d3ee28765424'
url = url +deviceID
headers = urllib3.util.make_headers()
#headers['Authorization'] = 'Bearer ' + '64a33a2048da5ed80a854b6789084da'
headers['Authorization'] = 'Bearer ' + 'ecde36ac5bf57a96943c3bc34c338a6b'
headers['Content-Type'] = 'application/json;charset=utf-8'
#I just started with random numbers, you can choose what ever you like
MM_water =1
MM_pages =2
MM_car=3
MM_bike = 4
LN_water = 5
LN_pauses = 6
LN_bike = 7
LN_walk = 8
LN_pages = 9
LN_car = 10
#just put in 3 rows into the DB
for x in range(0, 10):
current_time = int (time.time() *100)
timestamp =str (current_time)
inumber = 1843568
MM_water = MM_water + 1
MM_pages = MM_pages + 2
MM_car= MM_car + 3
MM_bike = MM_bike + 4
LN_water = LN_water + 5
LN_pauses = LN_pauses + 6
LN_bike = LN_bike + 7
LN_walk = LN_walk + 8
LN_pages = LN_pages + 9
LN_car = LN_car + 10
stringID = str (inumber)
stringMM_water = str (MM_water)
stringMM_pages = str (MM_pages)
stringMM_car = str (MM_car)
stringMM_bike = str (MM_bike)
stringLN_water = str (LN_water)
stringLN_pauses = str (LN_pauses)
stringLN_bike = str (LN_bike)
stringLN_walk = str (LN_walk)
stringLN_pages = str (LN_pages)
stringLN_car = str (LN_car)
print (str (current_time))
# send message body and the corresponding payload layout that you defined in the IoT Services Cockpit
# replace messagetypeid with id from IOT cockpit
body='{"messageType":"90565350a776a483b8b9","mode":"sync","messages":[{"timestamp":'
body=body+timestamp
body = body +',"ID":'+ stringID
body = body +',"MMwater":'+stringMM_water
body = body +',"MMpages":'+stringMM_pages
body = body +',"MMcar":'+stringMM_car
body = body +',"MMbike":'+stringMM_bike
body = body +',"LNwater":'+stringLN_water
body = body +',"LNpauses":'+stringLN_pauses
body = body +',"LNbike":'+stringLN_bike
body = body +',"LNwalk":'+stringLN_walk
body = body +',"LNpages":'+stringLN_pages
body = body +',"LNcar":'+stringLN_car+'}]}'
print ("")
print (body)
r = http.urlopen('POST', url, body=body, headers=headers)
print ("")
print(r.status)
print(r.data)
| [
"noreply@github.com"
] | andreapatri.noreply@github.com |
057b86a9ee008318847438b04b5024be4c838bc8 | f494ae54074cba2b2b71266f272037b9e436f2fc | /profiles/api/filters.py | 48c48b10bdd1813836207574cbcc66d7455f97dc | [
"Apache-2.0"
] | permissive | Aditya-Kolla/portal | 58b2994b48c61ce0d6e1e4dd27ad0983f01ba8b2 | 80be8034151cbf438b2d0284f507df24b323f4f7 | refs/heads/master | 2020-04-12T18:28:28.153069 | 2018-12-24T13:31:30 | 2018-12-24T13:31:30 | 162,680,099 | 0 | 0 | null | 2018-12-21T07:17:00 | 2018-12-21T07:17:00 | null | UTF-8 | Python | false | false | 219 | py | from django_filters import rest_framework as filters
from ..models import PlayerCharacter
class PlayerCharacterFilter(filters.FilterSet):
class Meta:
model = PlayerCharacter
fields = ('owner', )
| [
"janusz.kamienski@deployed.pl"
] | janusz.kamienski@deployed.pl |
be52112c4d367913163a752e2fb94f6858f8d47d | 345ef1fab0e89782a7b50d1417da02f6f8210f02 | /pyThorAPT/flipper.py | 74676641ec2d59b4ac9f265d675ecbc2969a6880 | [
"Apache-2.0"
] | permissive | mfulghum/pyThorAPT | 3607b4969af159c039a63f5d5f8727c08413811f | 70eb1f5d8c8a0e32f4ca13c376476e6bdb0572b0 | refs/heads/master | 2021-01-10T06:34:12.437456 | 2016-02-22T00:21:26 | 2016-02-22T00:21:26 | 52,235,354 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 11 13:36:52 2014
@author: mfulghum
"""
from APT_device import APTdevice
import APT_messages
import numpy as np
import struct
import time
class flipper(APTdevice, object):
def __init__(self, SN):
APTdevice.__init__(self, SN, device_type='flipper')
self.__status = 0
self.__pos = 1
self.__moving = False
self.write({'code':'MOD_SET_CHANENABLESTATE', 'param1':0x01, 'param2':0x01, 'dest':'USB_UNIT', 'source':'HOST'})
time.sleep(0.01)
self.__start_updatemsgs()
self._ready = True
def __start_updatemsgs(self):
self.write({'code':'HW_START_UPDATEMSGS', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
def __stop_updatemsgs(self):
self.write({'code':'HW_STOP_UPDATEMSGS', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
def _processRX(self, message):
try:
header = message['header']
if 'data' in message:
data = message['data']
message_type = header['code']
if ((message_type == 'MOT_GET_STATUSBITS') or
(message_type == 'MOT_GET_STATUSUPDATE')):
self.__status = data['status']
# Check the limits of the motor
self.__pos = int((self.__status & 0x00000001) == 0) + 1
# Check if the motor is moving
if ((self.__status & 0x00000010) or (self.__status & 0x00000020)
or (self.__status & 0x00000040) or (self.__status & 0x00000080)):
self.__moving = True
else:
self.__moving = False
self._set_event('MOT_GET_STATUSBITS', 'USB_UNIT')
self._set_event('MOT_GET_STATUSUPDATE', 'USB_UNIT')
except Exception as ex:
if self._debug:
print('Error processing flipper RX: %s' % ex)
@property
def pos(self):
#self.write({'code':'MOT_REQ_STATUSBITS', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
self.wait_for('MOT_GET_STATUSBITS', 'USB_UNIT')
if self.require_update:
self._clear_event('MOT_GET_STATUSBITS', 'USB_UNIT')
return self.__pos
@pos.setter
def pos(self, value):
self.write({'code':'MOT_MOVE_JOG', 'param1':0x01, 'param2':value, 'dest':'USB_UNIT', 'source':'HOST'})
@property
def status(self):
#self.write({'code':'MOT_REQ_STATUSBITS', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
self.wait_for('MOT_GET_STATUSBITS', 'USB_UNIT')
if self.require_update:
self._clear_event('MOT_GET_STATUSBITS', 'USB_UNIT')
return self.__status
@property
def moving(self):
#self.write({'code':'MOT_REQ_STATUSBITS', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
self.wait_for('MOT_GET_STATUSBITS', 'USB_UNIT')
if self.require_update:
self._clear_event('MOT_GET_STATUSBITS', 'USB_UNIT')
return self.__moving
def identify(self, *args):
self.write({'code':'MOD_IDENTIFY', 'param1':0x01, 'param2':0x00, 'dest':'USB_UNIT', 'source':'HOST'})
| [
"matt.fulghum@oculus.com"
] | matt.fulghum@oculus.com |
f901eff5d7fb9faf694cb612fd1d8cc57d32500f | 9dc423fe2c14e3949a171b81da9d02f87b1c2063 | /day06/02_SQLAlchmey.py | c51e800634f4b59002e9eb4fdc5cfc37bf0b76a7 | [] | no_license | 1751660300/Flask | a09ca944f21070cc04116d5fb929cacf386e56cd | 9fbf6955649f0c5e2e7acd98b29e28ebfdb99cd7 | refs/heads/master | 2022-11-09T13:10:45.462516 | 2020-06-28T02:47:19 | 2020-06-28T02:47:19 | 271,776,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding:utf-8 -*-
"""
SQLAlchmey详细:https://www.cnblogs.com/wupeiqi/articles/8259356.html
1.SQLAlchmey 是python中一种orm框架
目标:将对类/对象的操作 -> sql语句(通过pymysql模块来执行sql语句) -> 对数据库的操作
"""
import time
import threading
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
engine = create_engine(
"mysql+pymysql://root:123@127.0.0.1:3306/t1?charset=utf8",
max_overflow=0, # 超过连接池大小外最多创建的连接
pool_size=5, # 连接池大小
pool_timeout=30, # 池中没有线程最多等待的时间,否则报错
pool_recycle=-1 # 多久之后对线程池中的线程进行一次连接的回收(重置)
)
def task(arg):
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute(
"select * from t1"
)
result = cursor.fetchall()
cursor.close()
conn.close()
for i in range(20):
t = threading.Thread(target=task, args=(i,))
t.start()
| [
"1751660300@qq.com"
] | 1751660300@qq.com |
4f0f0721ae213b6c4829a043550fa70936b9ba6f | 82a35f1e5a9fbe44ad4ef2188294a956eb8ebe6f | /final.py | 0bac6beb15951ff14e7c87b6c5799c5f3c7a7d5b | [] | no_license | yurikong/Two_Detention_Ponds_in_Series | 1592d694a0a90a9fbfc7650fb485e70d3d8bb6c3 | dad9f0edb7d693bd958e4bcd367216591fe8fe94 | refs/heads/master | 2021-03-24T22:00:47.947723 | 2020-03-18T02:54:16 | 2020-03-18T02:54:16 | 247,567,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,407 | py | import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
import math
plt.rc('text', usetex=True) # use latex
plt.rc('font', family='serif') # set fonts
# inflow function q_i,1(t): water inflow discharge rate
# convert unit of time from hours to seconds to match the unit of q
def q_i_1(t):
if 3600 <= t < 12600: # from t0 to t1
return 1 / 4500 * (t - 3600) # left line with positive slope
elif 12600 <= t < 21600: # from t1 to t2
return -1 / 4500 * (t - 21600) # right line with negative slope
else: # 0 otherwise
return 0
# system of ODE
# params (unit)
# D1 = D2 = outlet pipe diameters (m)
# A1 = bottom area of pond 1 (m^2)
# A2 = bottom area of pond 2 (m^2)
# H1 = max depth of pond 1 (m)
# H2 = max depth of pond 2 (m)
# h1_0 = h2_0 = initial water depth in both ponds (0)
def f(y, t, D1, D2, A1, A2, H1, H2):
h1 = y[0]
h2 = y[1]
if h1 > H1: # when water in pond 1 exceeds its max height
h1 = H1 # excessive water will be lost (cap at max height of pond 1)
if h2 > H2: # when water in pond 2 exceeds its max height
h2 = H2 # excessive water will be lost (cap at max height of pond 2)
if h1 < 0: # physically, height of water in pond 1 cannot be less than 0
h1 = 0 # less than empty = empty
if h2 < 0: # physically, height of water in pond 2 cannot be less than 0
h2 = 0 # less than empty = empty
dh1_dt = (q_i_1(t) - np.pi * D1 ** 2 / 4 * math.sqrt(2 * 9.8 * h1)) / A1
dh2_dt = (np.pi * D1 ** 2 / 4 * math.sqrt(2 * 9.8 * h1) - np.pi * D2 ** 2 / 4 * math.sqrt(2 * 9.8 * h2)) / A2
return [dh1_dt, dh2_dt]
# outflow function q_o,2(t): water outflow discharge rate
def q_o_2(h2, D2):
return np.pi * D2 ** 2 / 4 * math.sqrt(2 * 9.8 * h2)
# values os params
D1 = D2 = 0.2
A1 = 2000
A2 = 1000
H1 = 5
H2 = 4
h1_0 = h2_0 = 0
# time spans from 0 to 80 hours
# use seconds instead of hours to calculate
t = np.linspace(0, 80 * 3600 + 1, 1000)
IC = [h1_0, h2_0] # initial ponds depths
H = integrate.odeint(f, IC, t, args=(D1, D2, A1, A2, H1, H2)) # solve the system of ODE
h1, h2 = H.T
# excess water in each pond will be capped at the max height of the pond
# physically, water level cannot be lower than 0
for i in range(len(t)):
if h1[i] < 0:
h1[i] = 0
if h2[i] < 0:
h2[i] = 0
if h1[i] > H1:
h1[i] = H1
if h2[i] > H2:
h2[i] = H2
# plotting first graph: depth vs time
plt.plot(t / 3600, h1, 'r-', label='$h_1(t)$') # use hours instead of seconds in x-axis
plt.plot(t / 3600, h2, 'b--', label='$h_2(t)$') # use hours instead of seconds in x-axis
plt.title('Pond Water Depth vs. Time')
plt.xlabel('Time (hr)')
plt.ylabel('Depth (m)')
plt.legend() # show the legend
plt.show()
plt.close()
# plotting second graph: water discharge rates
plt.plot(t / 3600, [q_i_1(i) for i in t], 'r-', label='$q_{i,1}(t)$') # use hours instead of seconds in x-axis
plt.plot(t / 3600, [q_o_2(i, D2) for i in h2], 'b--', label='$q_{o,2}(t)$') # use hours instead of seconds in x-axis
plt.title('Pond Water Discharge Rates')
plt.xlabel('Time (hr)')
plt.ylabel('Discharge Rate ($m^3/s$)')
plt.legend()
plt.show()
| [
"noreply@github.com"
] | yurikong.noreply@github.com |
a7ee450a2d232c710316f7f9fcaa7f29ea7ffda0 | b5302ab6ce0f8dac0a190107e5b4b29a117b4ca5 | /LiaoXuefeng/Built-inModules/urllib/use_post.py | ab7044e9c3cc5eb918c791056d5efdefe0df2136 | [] | no_license | CamphortreeYH/Python | 2cc265d338b1bc6b092cbe78c0b13cc4e7be1060 | 78e4c27d9042026c092436c8e4610fb1b1e74873 | refs/heads/master | 2020-05-09T16:48:18.819935 | 2019-06-23T15:29:56 | 2019-06-23T15:29:56 | 181,284,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | # -*- coding: utf-8 -*-
from urllib import request, parse
print('Login to weibo.cn...')
email = input('Email: ')
passwd = input('Password: ')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mweibo'),
('client_id', ''),
('savestate', '1'),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entery=weibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'http://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')
with request.urlopen(req, data=login_data.encode('utf-8'))as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8')) | [
"noreply@github.com"
] | CamphortreeYH.noreply@github.com |
e65a211ec01a8325dfbe67feed06981232e36bb2 | a082b2fa5db9947cf7bd1d56b144dbf040e07cf8 | /rules/01_yaha_bowtie_index.smk | c4fa6b44a2d158aa93d9ec40ab3a34c555a8bb4d | [] | no_license | rimjhimroy/snakeTEPID | ffe75b2db3e2ef39c3b7328f0d126c48c0bb5564 | 1b30561100ce177b6b9dfbdefe281d2abfe09c9f | refs/heads/master | 2022-12-07T09:03:59.081344 | 2020-08-27T15:13:54 | 2020-08-27T15:13:54 | 217,424,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | smk | CLUSTER = json.load(open("config/cluster.json"))
base=args['REFERENCE'].rsplit('.', 1)[0]
#basepath=args['REFERENCE'].rsplit('/', 1)[0]
rule yaha_bowtie_index:
message: """
--- Creates yaha and bowtie index for the reference {input.ref}
"""
input:
ref = args['REFERENCE']
output:
'%s.nib2'% base ,
'%s.X11_01_02000S' % base ,
'%s.rev.1.bt2' % base ,
'%s.rev.2.bt2' % base ,
'%s.1.bt2' % base ,
'%s.2.bt2' % base ,
'%s.3.bt2' % base ,
'%s.4.bt2' % base
params:
basename=base
benchmark:
"benchmarks/index/index.json"
conda:
"envs/tepid.yaml"
threads: int(CLUSTER['yaha_bowtie_index']['cpu'])
shell:"""
yaha -g {input.ref} -L 11 -H 2000
bowtie2-build -f {input.ref} --threads {threads} {params.basename}
"""
| [
"rchoudhury@submit02.ubelix.unibe.ch"
] | rchoudhury@submit02.ubelix.unibe.ch |
a3f862fe3bd9ac8f7a52de2de2297a2fe4bf36b6 | fafe4925fcc99299d4295e6b4535c7fe0e308357 | /cora_gcn.py | 020e5a190065d68163a90dd24c742b94050b09ea | [] | no_license | christianlei/gcn_optimization | 53e387529fc2279ef121921beec24cb407992e94 | e2de3f8ebeecd9e9b080cf68cb23037fe307e09e | refs/heads/master | 2023-07-11T23:54:05.873341 | 2021-08-18T19:37:35 | 2021-08-18T19:37:35 | 391,147,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,655 | py | import pdb
import os
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
import networkx as nx
import tensorflow as tf
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from spektral.layers import GCNConv
import matplotlib.pyplot as plt
import statistics
#loading the data
all_data = []
all_edges = []
for root,dirs,files in os.walk('./cora'):
for file in files:
if '.content' in file:
with open(os.path.join(root,file),'r') as f:
all_data.extend(f.read().splitlines())
elif 'cites' in file:
with open(os.path.join(root,file),'r') as f:
all_edges.extend(f.read().splitlines())
#Shuffle the data because the raw data is ordered based on the label
random_state = 77
all_data = shuffle(all_data,random_state=random_state)
#_____________________________________________________________________
#parse the data
labels = []
nodes = []
X = []
for i,data in enumerate(all_data):
elements = data.split('\t')
labels.append(elements[-1])
X.append(elements[1:-1])
nodes.append(elements[0])
X = np.array(X,dtype=int)
N = X.shape[0] #the number of nodes
F = X.shape[1] #the size of node features
print('X shape: ', X.shape)
#parse the edge
edge_list=[]
for edge in all_edges:
e = edge.split('\t')
edge_list.append((e[0],e[1]))
print('\nNumber of nodes (N): ', N)
print('\nNumber of features (F) of each node: ', F)
print('\nCategories: ', set(labels))
num_classes = len(set(labels))
print('\nNumber of classes: ', num_classes)
#__________________________________________________________
def limit_data(labels,limit=20,val_num=500,test_num=1000):
'''
Get the index of train, validation, and test data
'''
label_counter = dict((l, 0) for l in labels)
train_idx = []
for i in range(len(labels)):
label = labels[i]
if label_counter[label]<limit:
#add the example to the training data
train_idx.append(i)
label_counter[label]+=1
#exit the loop once we found 20 examples for each class
if all(count == limit for count in label_counter.values()):
break
#get the indices that do not go to traning data
rest_idx = [x for x in range(len(labels)) if x not in train_idx]
val_idx = rest_idx[:val_num]
test_idx = rest_idx[val_num:(val_num+test_num)]
return train_idx, val_idx,test_idx
def create_node_degree_graph(figure_name, adj_mat):
node_degrees = {}
print(adj_mat.shape)
node_list = []
for row in adj_mat:
degrees = row.count_nonzero()
node_list.append(int(degrees))
if degrees in node_degrees:
node_degrees[degrees]+=1
else:
node_degrees[degrees] = 1
print("median: ", statistics.median(node_list))
fig = plt.figure()
fig.suptitle('Degrees of Nodes in Graph - CORA', fontsize=20)
plt.bar(list(node_degrees.keys()), node_degrees.values(), width=1.0, color='g')
plt.xlabel("Degrees")
plt.ylabel("Occurrences")
plt.xlim(0,30)
plt.show()
plt.savefig(figure_name)
train_idx,val_idx,test_idx = limit_data(labels)
#set the mask
train_mask = np.zeros((N,),dtype=bool)
train_mask[train_idx] = True
val_mask = np.zeros((N,),dtype=bool)
val_mask[val_idx] = True
test_mask = np.zeros((N,),dtype=bool)
test_mask[test_idx] = True
#_____________________________
#build the graph
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edge_list)
#obtain the adjacency matrix (A)
A = nx.adjacency_matrix(G)
print('Graph info: ', nx.info(G))
create_node_degree_graph('cora_degree_graph.png', A)
#__________________________________
def encode_label(labels):
label_encoder = LabelEncoder()
labels = label_encoder.fit_transform(labels)
labels = to_categorical(labels)
return labels, label_encoder.classes_
labels_encoded, classes = encode_label(labels)
#____________________________________________________________
# Parameters
channels = 16 # Number of channels in the first layer
dropout = 0.5 # Dropout rate for the features
l2_reg = 5e-4 # L2 regularization rate
learning_rate = 1e-2 # Learning rate
epochs = 200 # Number of training epochs
es_patience = 10 # Patience for early stopping
# Preprocessing operations
A = GCNConv.preprocess(A).astype('f4')
# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GCNConv(channels,
activation='relu',
kernel_regularizer=l2(l2_reg),
use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GCNConv(num_classes,
activation='softmax',
use_bias=False)([dropout_2, fltr_in])
# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
weighted_metrics=['acc'])
model.summary()
dot_img_file = 'model.png'
plot_model(model, to_file=dot_img_file, expand_nested=True, show_shapes=True)
tbCallBack_GCN = tf.keras.callbacks.TensorBoard(
log_dir='./Tensorboard_GCN_cora',
)
callback_GCN = [tbCallBack_GCN]
#_________________________________________-
# Train model
# validation_data = ([X, A], labels_encoded, val_mask)
# model.fit([X, A],
# labels_encoded,
# sample_weight=train_mask,
# epochs=epochs,
# batch_size=N,
# validation_data=validation_data,
# shuffle=False,
# callbacks=[
# EarlyStopping(patience=es_patience, restore_best_weights=True),
# tbCallBack_GCN
# ])
# # Evaluate model
# X_te = X[test_mask]
# A_te = A[test_mask,:][:,test_mask]
# y_te = labels_encoded[test_mask]
# M = X_te.shape[0]
# # print("batch size:", N)
# tf.profiler.experimental.start('logdir')
# pdb.set_trace()
# y_pred = model.predict([X_te, A_te], batch_size=M)
# tf.profiler.experimental.stop()
# report = classification_report(np.argmax(y_te,axis=1), np.argmax(y_pred,axis=1), target_names=classes)
# print('GCN Classification Report: \n {}'.format(report)) | [
"cclei@ucsc.edu"
] | cclei@ucsc.edu |
6b5ca75f51a1f02a583f4eca9408fce015c928af | 1d56d6ffae2ba19e7a1b7987ecf23d2e3ae3dd1f | /Create_datasets_for_Content-based_Filter.py | d70945fb382a34f5e2eff5c0d225d5d9229a6f42 | [] | no_license | Tonysssu/Recommendation_system | 08a43f45a9be7b6004680f0f47b88d648e7f9a34 | 94b710ec89d751773050c7bed269cb55a28270a5 | refs/heads/master | 2020-06-14T22:08:48.242082 | 2019-07-03T23:54:41 | 2019-07-03T23:54:41 | 195,140,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,610 | py | # collect the data via a collection of SQL queries from the publicly avialable Kurier.at dataset in BigQuery
import os
import tensorflow as tf
import numpy as np
import google.datalab.bigquery as bq
# PROJECT = 'cloud-training'
# BUCKET = 'cloud-training-ml'
# REGION = 'us-central1'
#
# os.environ['PROJECT'] = PROJECT
# os.environ['BUCKET'] = BUCKET
# os.environ['REGION'] = REGION
# os.environ['TFVERSION'] = '1.8'
# Helper functio to write list of info to local files
def write_list_to_disk(my_list, filename):
with open(filename, "w") as f:
for item in my_list:
line = "%s\n" % item
f.write(line.encode("utf8"))
# Pull data from BigQuery
# Content_id
import google.datalab.bigquery as bq
sql = """
SELECT
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
content_id
"""
content_ids_list = (
bq.Query(sql).execute().result().to_dataframe()["content_id"].tolist()
)
write_list_to_disk(content_ids_list, "content_ids.txt")
# print("Some sample content IDs {}".format(content_ids_list[:3]))
# print("The total number of articles is {}".format(len(content_ids_list)))
# Some sample content IDs ['299965853', '299972248', '299410466']
# The total number of articles is 15634
# Category
sql = """
SELECT
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
category
"""
categories_list = bq.Query(sql).execute().result().to_dataframe()["category"].tolist()
write_list_to_disk(categories_list, "categories.txt")
# print(categories_list)
# Only three different categories
# Author
sql = """
SELECT
REGEXP_EXTRACT((SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)), r"^[^,]+") AS first_author
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
first_author
"""
authors_list = bq.Query(sql).execute().result().to_dataframe()["first_author"].tolist()
write_list_to_disk(authors_list, "authors.txt")
# print("Some sample authors {}".format(authors_list[:10]))
# print("The total number of authors is {}".format(len(authors_list)))
# Create train and test set
## Use the concatenated values for visitor id and content id to create a farm fingerprint,
## taking approximately 90% of the data for the training set and 10% for the test set
sql = """
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL
AND MOD(ABS(FARM_FINGERPRINT(CONCAT(visitor_id, content_id))), 10) < 9
"""
training_set_df = bq.Query(sql).execute().result().to_dataframe()
training_set_df.to_csv("training_set.csv", header=False, index=False, encoding="utf-8")
# training_set_df.head()
sql = """
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL
AND MOD(ABS(FARM_FINGERPRINT(CONCAT(visitor_id, content_id))), 10) >= 9
"""
test_set_df = bq.Query(sql).execute().result().to_dataframe()
test_set_df.to_csv("test_set.csv", header=False, index=False, encoding="utf-8")
# test_set_df.head()
| [
"sutongok2015@gmail.com"
] | sutongok2015@gmail.com |
919764203196c49f8c1250c296620ab28b1efa2e | aa7260a4d9ed144c2c5ba32d27b164c8c9bf33b5 | /item.py | f5a7707471d8fb6ed0fd383c82df250ea3983f68 | [] | no_license | Shinara01/Text_Based_Adventure_Game | 32457664672a08fae480503a5519d5617a8216e1 | 5849197b24e017992fabb04d46cb440d0bbfefb1 | refs/heads/main | 2023-02-20T02:34:41.326910 | 2021-01-18T13:41:33 | 2021-01-18T13:41:33 | 319,338,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | class Item(object):
def __init__ (self, item_name):
self.name = item_name
self.description = None
self.value = None
def __repr__(self):
return self.name
def set_name(self, item_name):
self.name = item_name
def get_name(self):
return self.name
def print_name(self):
print(self.name)
def set_description(self, item_description):
self.description = item_description
def get_description(self):
return self.description
def print_description(self):
print(self.description)
def set_value(self, item_value):
self.value = item_value
def get_value(self):
return self.value
def print_value(self):
print(f"The dagger is worth {self.value} gold coin(s).")
| [
"tammimarron@gmail.com"
] | tammimarron@gmail.com |
263a7c9f68d5ce7a6fd9dc79cb656dde8d8af6d6 | f77cb46c23fe914bcd23f55bfd2050a9851ee5bc | /protocol.py | 87fa22942a33fd5d1daea9593972fbe14c6505e4 | [] | no_license | mnoreika/fmp | cffb216c25f91a213476e7bc8ad4d00dd236587b | d08fd86b6b0b0df367731c4a60048d45406baf51 | refs/heads/master | 2021-06-10T13:30:49.724002 | 2017-02-22T01:57:08 | 2017-02-22T01:57:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | name = 'FMP'
version = '1'
multicast_ip = '228.5.6.7'
multicast_group = (multicast_ip, 8889)
server_address = ('', 8889)
tcp_ip = '127.0.0.1'
tcp_port = 9999
data_payload_size = 20000
window_size = 800
udp_buffer = data_payload_size + 2048
tcp_buffer = 2048
transmission_delay = 0
start_packet_type = 'S'
end_packet_type = 'E'
data_packet_type = 'D'
request_packet_type = 'R'
success_packet_type = 'K'
socket_timeout = 0.2
time_to_live = 1
read_timeout = 0.5 | [
"mn55@st-andrews.ac.uk"
] | mn55@st-andrews.ac.uk |
f4fb185bc247bad9d2dabc53a238422af129b6fd | dc2403dc9674aae232e55bd2069e1f9863845cb6 | /data-structure/python/11-hashMap/01-hashmap/com/jqc/map/hashMap.py | 9165ef335083e42367e04b7aa3a4aec17980bebd | [] | no_license | appbanana/MachineLearningAction | ecf87fba1450c830c65f6e4a57c48ce116230dda | 0af2f354477c5cabda14950d0c6352e4a258493f | refs/heads/master | 2020-04-18T01:46:02.912154 | 2020-01-10T08:57:17 | 2020-01-10T08:57:17 | 167,133,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,639 | py | from .map import BaseMap
from enum import Enum
from typing import TypeVar
import operator
from .map import Visitor
from com.jqc.queue.queue import Queue
# from .testModel.key import Key
# from .testModel.subkey1 import SubKey1
T = TypeVar('T')
"""
思路":使用红黑树来实现,又跟之前实现的红黑树不太一样,这次node直接存放key,value
"""
class Color(Enum):
RED = 0
BLACK = 1
class Node(object):
def __init__(self, key, value, parent) -> None:
"""
自定义初始化
:param key: 键
:param value: 值
:param parent: 父节点
"""
self.key = key
self.value = value
self.parent = parent
self.color = Color.RED
self.left = None
self.right = None
self.hash = hash(self.key) if self.key else 0
# def __str__(self):
# parent_string = 'none'
# if self.parent is not None:
# parent_string = str(self.parent.element)
# return str(self.element) + '_p(' + parent_string + ')'
def is_leaf(self) -> bool:
"""
判断是否是叶子节点
:return:
"""
return self.left is None and self.right is None
def has_two_children(self) -> bool:
"""
判断是否是度为2的节点
:return:
"""
return self.left is not None and self.right is not None
def is_left_child(self) -> bool:
"""
判断是否是左子节点
:return:
"""
return self.parent is not None and self == self.parent.left
def is_right_child(self) -> bool:
"""
判断是否是右子节点
:return:
"""
return self.parent is not None and self == self.parent.right
def sibling(self):
"""
返回兄弟节点
:return:
"""
if self.is_left_child():
return self.parent.right
if self.is_right_child():
return self.parent.left
return None
class HashMap(BaseMap):
# 默认空间
__DEFAULT_CAPACITY = 1 << 4
def __init__(self):
"""
__table 是个容器,每个索引对应的是一颗红黑树
"""
self.__size = 0
self.__root = None
self.__table = [None for _ in range(HashMap.__DEFAULT_CAPACITY)]
def size(self) -> int:
return self.__size
def is_empty(self) -> bool:
return self.__size == 0
def clear(self):
if self.__size == 0:
return
self.__table = [None for _ in range(len(self.__table))]
self.__size = 0
def put(self, key, value) -> T:
"""
添加key:value
:param key:
:param value:
:return:
"""
self.__add(key, value)
def remove(self, key) -> T:
"""
更具key值 删除对应的节点
:param key:
:return:
"""
index = self.__get_index_with_key(key)
root = self.__table[index]
node = self.__node(root, key)
return self.__remove(node) if node else None
def get(self, key) -> T:
"""
更具key值返回value
:param key:
:return:
"""
index = self.__get_index_with_key(key)
root = self.__table[index]
node = self.__node(root, key)
return node.value if node else None
def contains_key(self, key) -> bool:
"""
是否包含某个key
:param key:
:return:
"""
index = self.__get_index_with_key(key)
root = self.__table[index]
node = self.__node(root, key)
return True if node else False
def contains_value(self, value) -> bool:
"""
是否包含某个value
:return:
"""
queue = Queue()
for i in range(len(self.__table)):
root = self.__table[i]
queue.en_queue(root)
while not queue.is_empty():
node = queue.de_queue()
if node is None:
continue
if node.value == value:
return True
if node.left:
queue.en_queue(node.left)
if node.right:
queue.en_queue(node.right)
return False
def __add(self, key, value) -> T:
"""
put实现部分
:param key:
:param value:
:return:
"""
# 由key获取对应索引
index = self.__get_index_with_key(key)
root = self.__table[index]
if root is None:
root = Node(key, value, None)
self.__table[index] = root
self.__size += 1
# 添加元素后,进行调整,使其满足红黑树性质
self.__after_add(root)
return None
# index索引对应的有元素(ps: 元素是以红黑树的形式存储),取出对应根节点
# 下面这段代码 跟前面红黑树,添加的逻辑是一样的
node = root
k1 = key
hash1 = hash(key) if key else 0
is_searched = False
cmp_result = None
while node is not None:
parent = node
k2 = node.key
hash2 = node.hash
if hash1 > hash2:
cmp_result = 1
elif hash1 < hash2:
cmp_result = -1
elif operator.eq(k1, k2):
# hash值相等 key也相等 覆盖
cmp_result = 0
elif (k1 is not None and k2 is not None) \
and (k1.__class__.__name__ == k2.__class__.__name__) \
and hasattr(k1, 'compare') \
and k1.compare(k2) != 0:
# 能走到这里,说明hash值相等, 当不eq, k1, k2都存在而且k1, k2可比较 但比较结果不相等
# compare比较结果相等,不一定是eq,要进行下面的扫描
cmp_result = k1.compare(k2)
elif is_searched:
# 如果已经扫描过一次,直接拿对象的地址的hash值比较就可以了
cmp_result = hash(id(k1)) - hash(id(k2))
else:
# 第一次扫描,拿根节点的左右子节点开始扫描
if (node.left and self.__node(node.left, key) is not None) \
or (node.right and self.__node(node.right, key) is not None):
result = self.__node(node.left, key)
if result is None:
result = self.__node(node.right, key)
node = result
cmp_result = 0
else:
is_searched = True
cmp_result = hash(id(k1)) - hash(id(k2))
if cmp_result > 0:
node = node.right
elif cmp_result < 0:
node = node.left
else:
old_val = node.value
node.key = key
node.value = value
return old_val
new_node = Node(key, value, parent)
if cmp_result > 0:
parent.right = new_node
else:
parent.left = new_node
self.__size += 1
self.__after_add(new_node)
return None
def __remove(self, node: Node):
"""
删除对应的节点
:param node:
:return:
"""
if node is None:
return
self.__size -= 1
old_val = node.value
# 度为2的节点直接删除它的前驱或者后继
# 在这里我们删除的是后继节点
if node.has_two_children():
# 找到后继节点
s = self.__successor(node)
# 将后继节点的值赋值给node
node.key = s.key
node.value = s.value
node.hash = s.hash
# 接下来让node指向s(后继节点)
node = s
index = self.__get_index_with_node(node)
# 下面删除就是度为0或者度为1的节点, 删除度为1的节点 直接让其子节点取代, 删除度为0的指点,直接删除即可
replace_node = node.left if node.left else node.right
if replace_node is not None:
# 删除度为1的节点
replace_node.parent = node.parent
if node.parent is None:
# 删除的是度为1的根节点
# self.__root = replace_node
self.__table[index] = replace_node
elif node == node.parent.left:
node.parent.left = replace_node
else:
node.parent.right = replace_node
# 删除之后调整使其满足红黑树性质
self.__after_remove(replace_node)
elif node.parent is None:
# 删除的是度为0的根节点
self.__table[index] = None
# 删除之后验证avl树
self.__after_remove(node)
else:
# 删除度为0的节点
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
# 删除之后调整使其满足红黑树性质
self.__after_remove(node)
return old_val
def traversal(self, visitor: Visitor):
"""
遍历
def test(key,value):
print(key, value)
return True if key == 10 else False
:param visitor: lambda函数, 类似于上面形式的
:return:
"""
if self.__size == 0:
return
assert visitor is not None, "visit不能为空,请传入一个lambda函数"
queue = Queue()
for i in range(len(self.__table)):
root = self.__table[i]
queue.en_queue(root)
while not queue.is_empty():
node = queue.de_queue()
if node is None:
continue
if visitor.visit(node.key, node.value):
return
if node.left:
queue.en_queue(node.left)
if node.right:
queue.en_queue(node.right)
@staticmethod
def __key_not_none_check(element):
"""
校验传入的元素
:param element:
:return:
"""
if element is None:
raise NameError('element 不能为None')
def __get_index_with_node(self, node):
"""
更具传进来的node 获取对应的索引
:param node:
:return:
"""
return (node.hash ^ (node.hash >> 16)) & (len(self.__table) - 1);
def __get_index_with_key(self, key) -> int:
"""
根据key生成对应的索引
:param key:
:return:
"""
if key is None:
return 0
hash_code = hash(key)
# 参考java官方的实现 虽然你实现hash 鬼知道你怎么实现的,保险起见,java官方对你的hash值又右移16在异或
return (hash_code ^ (hash_code >> 16)) & (len(self.__table) - 1)
def __after_add(self, node: Node):
"""
修复红黑树的性质
:param node:
:return:
"""
parent = node.parent
if parent is None:
# node 是根节点,直接染黑
self.__black(node)
return
if self.__is_black(parent):
# 如果添加节点的父节点是黑色 不做任何处理
return
# 能走到这里,父节点是红色的
# 获取添加节点的叔父节点和爷爷节点
uncle = parent.sibling()
grand = parent.parent
if self.__is_red(uncle):
# 叔父节点是红色,结合4阶B树,算上添加的节点,就会有4个节点
# 不满足B树性质:非根节点元素个数 1 <= y <= 3,就会长生上溢
self.__black(parent)
self.__black(uncle)
# 处理上溢
self.__after_add(self.__red(grand))
return
# 能走到这里, 叔父节点一定是黑色
if parent.is_left_child():
# L
self.__red(grand)
if node.is_left_child():
# LL
# self.__red(grand)
self.__black(parent)
# self.__rotate_right(grand)
else:
# LR
# self.__red(grand)
self.__black(node)
self.__rotate_left(parent)
# self.__rotate_right(grand)
self.__rotate_right(grand)
else:
# R
self.__red(grand)
if node.is_right_child():
# RR
# self.__red(grand)
self.__black(parent)
# self.__rotate_left(grand)
else:
# RL
# self.__red(grand)
self.__black(node)
self.__rotate_right(parent)
# self.__rotate_left(grand)
self.__rotate_left(grand)
def __after_remove(self, node: Node) -> None:
"""
删除节点,修复红黑树性质
:param node:
:return:
"""
# 真正被删除的节点一定是叶子节点
if self.__is_red(node):
# 能走到这里有两种情况:1)被删除的是红色叶子节点;2)被删除的是黑色节点但至少有一个红色叶子节点
self.__black(node)
return
# 能走到这里 删除的一定是黑色叶子节点(ps:注意黑色节点和黑色叶子节点的区别)
parent = node.parent
if parent is None:
# parent 为空,说明删除的是根节点
return
# 判断删除的节点是左还是右 不能使用node 这要结合4阶B树来理解 非根节点的子节点个数一定2 <= y <= 4
is_left = parent.left is None or node.is_left_child()
# 获取被删除节点的兄弟节点
sibling = parent.right if is_left else parent.left
if not is_left:
# 右边节点
# 删除的节点是右边黑色的叶子节点
if self.__is_red(sibling):
# 该删除的节点有红兄弟(红色的兄弟节点)
self.__black(sibling)
self.__red(parent)
# 右旋转 把红红兄弟的黑儿子变成被删除节点的黑兄弟
self.__rotate_right(parent)
# 旋转完毕 一定要更新被删除节点的兄弟节点 这样被删除的节点就有黑兄弟,就和下面处理黑兄弟的逻辑是一样的
sibling = parent.left
# 能走到这里,被删除节点的有黑兄弟(sibling是黑兄弟)
if self.__is_black(sibling.left) and self.__is_black(sibling.right):
# 黑兄弟的两个子节点都是黑色
is_parent_black = self.__is_black(parent)
self.__red(sibling)
self.__black(parent)
if is_parent_black:
# 处理下溢
self.__after_remove(parent)
else:
# 黑兄弟至少有一个红色的子节点,说明黑兄弟有可以借的元素
# 上面代码整理成下面的
if self.__is_black(sibling.left):
# 黑兄弟左子节点是黑色
self.__rotate_left(sibling)
sibling = parent.left
# 黑兄弟左子节点是红色
# 把兄弟节点染色,与父节点同色
self.__color(sibling, self.__color_of(parent))
self.__black(parent)
self.__black(sibling.left)
self.__rotate_right(parent)
else:
# 左边节点与右边节点对称
# 删除的节点是右边黑色的子节点
if self.__is_red(sibling):
# 该删除的节点有红色的兄弟节点
self.__black(sibling)
self.__red(parent)
self.__rotate_left(parent)
# 旋转玩 更新兄弟节点
sibling = parent.right
# 下面处理的是删除节点的兄弟节点是黑色兄弟(sibling是黑兄弟)
if self.__is_black(sibling.left) and self.__is_black(sibling.right):
# 黑兄弟的两个节点都是黑色
is_parent_black = self.__is_black(parent)
self.__red(sibling)
self.__black(parent)
if is_parent_black:
self.__after_remove(parent)
else:
if self.__is_black(sibling.right):
self.__rotate_right(sibling)
sibling = parent.right
self.__color(sibling, self.__color_of(parent))
self.__black(parent)
self.__black(sibling.right)
self.__rotate_left(parent)
def __rotate_left(self, grand: Node):
"""
左旋转
:param grand: 要旋转的节点
:return:
"""
parent = grand.right
child = parent.left
grand.right = child
parent.left = grand
self.__after_rotate(grand, parent, child)
def __rotate_right(self, grand: Node):
"""
右旋选
:param grand: 要旋转的节点
:return:
"""
parent = grand.left
child = parent.right
grand.left = child
parent.right = grand
# 封装后 直接使用这个方法代替下面一坨代码
self.__after_rotate(grand, parent, child)
def __after_rotate(self, grand: Node, parent: Node, child: Node):
"""
左旋,右旋之后的操作
:param grand: 爷爷节点
:param parent: 父节点
:param child: 节点
:return:
"""
# 更新grand,parent,child的父节点
# 更新parent的父节点
parent.parent = grand.parent
if grand.is_left_child():
# grand原来是它父节点的左子节点,就让grand.parent.left指向parent
grand.parent.left = parent
elif grand.is_right_child():
# grand原来是它父节点的右子节点,就让grand.parent.right指向parent
grand.parent.right = parent
else:
# grand既不是左子节点 又不是右子节点 如果grand的父节点是根节点
index = self.__get_index_with_node(grand)
self.__table[index] = parent
# 更新child, grand的父节点
if child is not None:
child.parent = grand
grand.parent = parent
def __node(self, node, key):
"""
根据root节点和key,找到key对应的节点
:param node: 根节点
:param key: key值
:return:
"""
k1 = key
hash1 = hash(key) if key else 0
while node is not None:
k2 = node.key
hash2 = node.hash
if hash1 > hash2:
node = node.right
elif hash1 < hash2:
node = node.left
elif operator.eq(k1, k2):
return node
elif (k1 and k2) \
and k1.__class__.__name__ == k2.__class__.__name__ \
and hasattr(k1, 'compare') \
and k1.compare(k2) != 0:
cmp_result = k1.compare(k2)
node = node.right if cmp_result > 0 else node.left
elif node.right and self.__node(node.right, k1):
return self.__node(node.right, k1)
print('*******' * 10)
else:
node = node.left
return None
@staticmethod
def __predecessor(node: Node):
"""
寻找前驱节点
:return:
"""
if node is None:
return None
node = node.left
# 左子节点存在 一路向右寻找
if node:
while node.right is not None:
node = node.right
return node
# 左子树为空 从他的祖先节点找前驱节点
while node.parent is not None and node == node.parent.left:
node = node.parent
return node.parent
@staticmethod
def __successor(node: Node):
"""
寻找前驱节点
:return:
"""
if node is None:
return None
node = node.right
# 右子节点存在 一路向左寻找
if node:
while node.left is not None:
node = node.left
return node
# 左子树为空 从他的祖先节点找前驱节点
while node.parent is not None and node == node.parent.right:
node = node.parent
return node.parent
def __red(self, node: Node) -> Node:
"""
节点染红
:param node:
:return:
"""
return self.__color(node, Color.RED)
def __black(self, node: Node) -> Node:
"""
节点染黑
:param node:
:return:
"""
return self.__color(node, Color.BLACK)
@staticmethod
def __color_of(node: Node) -> Color:
"""
返回传入节点的颜色
:param node:
:return:
"""
return Color.BLACK if node is None else node.color
@staticmethod
def __color(node: Node, color: Color) -> Node:
if node is None:
return None
node.color = color
return node
def __is_black(self, node: Node) -> bool:
"""
判断节点是否是黑色
:param node:
:return:
"""
return self.__color_of(node) == Color.BLACK
def __is_red(self, node: Node) -> bool:
"""
判断节点是否是红色
:param node:
:return:
"""
return self.__color_of(node) == Color.RED
| [
"1243684438@qq.com"
] | 1243684438@qq.com |
21aab26a807cf704fcfcdcca62bd673b573e2217 | b3ba90660590ba8a712962561791c4de6ae0107d | /Python_task_2.py | 53313c16249b5eee0b1a857b13f9555d8f2c5a49 | [] | no_license | Blaqnificent/python-tasks | 1c4b7027e5f0009d5cb0bbb7bd72349fa276d7ab | 7968fdf82695cd07d0458ee26ff927fd8e3e53a4 | refs/heads/master | 2021-06-14T07:20:00.907505 | 2020-04-23T18:03:20 | 2020-04-23T18:03:20 | 254,485,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,941 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 19:39:29 2020
@author: Bayo
"""
#Importing the necessary modules we need
import random
import string
#Getting the user details
def user_deets():
first_name = input("Please enter your first name here: ")
last_name = input("Please enter your last name here: ")
user_email = input("Please enter your email address here: ")
user_info = [first_name, last_name, user_email]
return user_info
#Random password generator
def random_password_gen(user_deets_entry):
rand_char = "".join([random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for char in range (5)]) #Generates five random characters in a list, and converts nto a string
usr_password = str(user_deets_entry[0][:2]) + str(user_deets_entry[1][:2]) + rand_char #Converts and then Concatenates the first two letters of the first and last name to the randomly generated character sequence above.
return usr_password
#Start of main program.
container = []
program_status = True
while program_status:
user_deets_entry = user_deets()
rand_pass = random_password_gen(user_deets_entry)
print()
print("Your randomly genearted password is " + rand_pass + ", would you like to keep it?") #Generates a random password from the user's first and last name
usr_choice = input ("Enter either 'yes' to keep it, or 'no' to choose your own: ")
usr_choice = usr_choice.lower() #Ensures uniformity of user's input choice. Always lower case
password_loop = True
while password_loop:
if usr_choice == 'yes':
user_deets_entry.append(rand_pass) #Adds the generated password to the other user's details
container.append(user_deets_entry) #Adds the user's details to a container
password_loop = False
else:
print ("Enter a password with at least 7 characters below: ")
usr_choice = input()
while len(usr_choice) < 7: #Keeps asking the user to enter a password till the length is 7 characters or longer
print ("Your password is less than 7 characters. Choose a longer password: ")
usr_choice =input()
if len(usr_choice) >= 7: #If paasword is longer than 7, add to the other user's details
user_deets_entry.append(usr_choice)
container.append(user_deets_entry)
password_loop = False #Breaks out of the password check loop
new_usr_choice = input ("Would you like to enter a new user? Enter 'yes' or 'no': ")
new_usr_choice = new_usr_choice.lower() #Ensures's user's choice is always lowercase.
if new_usr_choice == 'no':
program_status = False
for item in container: #Prints out all items in container, as long as we no longer collect any more details.
print (item)
else:
program_status = True
##END | [
"bayoduduyemi@gmail.com"
] | bayoduduyemi@gmail.com |
3782350d80e332c21b088402c6ca88813e652637 | 744bc87b112ad8ce11b00257194360d4861ccf85 | /otp.py | 1020aa15836276b116af91f7532f3ef2fe5e1303 | [] | no_license | Naveen-Shreeki/python | 011f7a0ce76b7bf953808000bbea2cbb5d66d1f5 | 9d7c7e4c5a42ad9b053cb8b861da7c1f5aa82860 | refs/heads/master | 2020-07-04T18:22:41.130705 | 2019-08-14T15:02:50 | 2019-08-14T15:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import math
import random
snum="0123456789"
length=len(snum)
otp=""
for i in range(4):
otp+=str(math.floor(random.random()*length))
print("Generating your otp")
print("Your OTP is ",otp) | [
"noreply@github.com"
] | Naveen-Shreeki.noreply@github.com |
807939997f0ace40c9934c507a9f2500d442d8a8 | f08a263fb922b90c1ce266561a1b97213b627a18 | /week-3/binary-search/binarysearch.py | 3f768c8625d93cd5d47b50af851c6dba73ff3eb0 | [] | no_license | Kbenjamin92/Afs-210 | b5541f8bc72857f41a190a86132b405a4558c03f | a9159c2b33d0bf37eefd2c91881afc407d0d98f8 | refs/heads/main | 2023-03-13T14:48:24.167856 | 2021-03-01T14:00:59 | 2021-03-01T14:00:59 | 330,972,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from List import newBST
newBST.append_binary_tree(4)
newBST.append_binary_tree(2)
newBST.append_binary_tree(1)
newBST.append_binary_tree(3)
newBST.append_binary_tree(5)
newBST.append_binary_tree(7) | [
"kipp.benjamin@bryanuniversity.edu"
] | kipp.benjamin@bryanuniversity.edu |
dc6c52f9206fc6a6f75426f41ab80f71dce44d51 | c1e1eae64fcdb4c366c229089265c09884c3996e | /Environment.py | 98fddd9fc80b7df9f81969362a9a520d37302572 | [] | no_license | pite2019/pite2019s-t4v2-g1-frageman | aa3adbdb20d40ab2920d84008ac5e8c469ed8229 | 87367bdb20f9aa86957373beb6430dda5949b893 | refs/heads/master | 2020-05-09T04:19:23.096443 | 2019-05-09T13:50:45 | 2019-05-09T13:50:45 | 180,984,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | import random
import logging
class Environment:
def __init__(self):
pass
def prepare_turb(self):
for i in range(10000):
yield random.gauss(0,15)
| [
"noreply@github.com"
] | pite2019.noreply@github.com |
7397afa12e4a2262330657cc6cba2765d23f42be | 3135f67392febe5f85a743d1545d00bac40beb5a | /cn_stock_holidays/cn_stock_holidays/gateway/__init__.py | 657b3d4964a852cdccfb0de7f59f08ca7fa443ff | [] | no_license | xiyongjian/gateway | b8ebda77fed3995f75c63c5f61dc520bfe9c7897 | 59d3c410ce3005c616b354f0d1ad64cf77798573 | refs/heads/master | 2022-10-22T21:01:02.063293 | 2018-07-08T04:48:49 | 2018-07-08T04:48:49 | 120,036,602 | 2 | 2 | null | 2022-10-01T12:06:20 | 2018-02-02T22:08:03 | Python | UTF-8 | Python | false | false | 231 | py | from cn_stock_holidays.gateway.exchange_calendar_hkex import HKExchangeCalendar
from cn_stock_holidays.gateway.exchange_calendar_shsz import SHSZExchangeCalendar
__all__ = [
'HKExchangeCalendar',
'SHSZExchangeCalendar',
] | [
"xiyongjian@hotmail.com"
] | xiyongjian@hotmail.com |
d30bc0c7e55dda9955dd8a25d1ead5b969592d85 | 90e02be4ea2461e4e4a0fd504ce516aadf427c69 | /old/polysem.py | 426ea7e7fdecb878ccd048fd5354c90808906cb3 | [] | no_license | thoppe/polysemous-emoji | df5150fce38663389926aff4991c85d3bc442963 | 50b2107b50a3d8ab9719d2736c8925cc68a75180 | refs/heads/master | 2021-01-10T02:34:17.728953 | 2017-06-05T20:59:27 | 2017-06-05T20:59:27 | 51,712,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | from ksvd import KSVD
import h5py, os
from gensim.models.word2vec import Word2Vec
# Load the config files
from configobj import ConfigObj
wcon = ConfigObj("config.ini")["word2vec"]
kcon = ConfigObj("config.ini")["kSVD"]
from gensim.models.word2vec import Word2Vec
f_features = wcon["f_features"].format(**wcon)
clf = Word2Vec.load(f_features)
X = clf.syn0
print clf
print X.shape
result = KSVD(X,
dict_size=kcon.as_int("basis_size"),
target_sparsity=kcon.as_int("sparsity"),
max_iterations=kcon.as_int("iterations"),
enable_printing=True,
enable_threading = True,
print_interval=1)
D,gamma = result
f_model = kcon["f_kSVD"].format(**kcon)
h5 = h5py.File(f_model,'w')
h5.create_dataset("D",data=D, compression="gzip")
h5.create_dataset("gamma",data=gamma, compression="gzip")
# Save the arguments (maybe later?)
#for key in args:
# g.attrs[key] = cargs[key]
h5.close()
| [
"travis.hoppe@gmail.com"
] | travis.hoppe@gmail.com |
8d4856c6c849a5bc71aa203dc7a9dd6ec06bbf27 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_042/ch44_2020_10_07_13_08_47_151008.py | 10a4ee19987b7143968c5c8eef6249ca9ea6b94d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | nome_mes = input('Qual o nome do mes?')
nome_meses = ['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']
i=0
while i < 12:
if nome_meses[i] == nome_mes :
print (i+1)
i += 1 | [
"you@example.com"
] | you@example.com |
13dd02f91f85257e3902e5a18f00535b603db19c | 8a9506cc1e49a013035750a3e4009f677c4c96d7 | /weekly.py | 4a155ec5a76bdbfe90c487aef679e18e88fe957a | [] | no_license | frgunawan82/ETLGoogleAnalytics | dd514f8532eb41cc48234f79c9f9109f42477736 | a54b130df8d118983ce209386ea01a7ba3704c3d | refs/heads/master | 2021-06-21T11:31:50.968723 | 2017-08-15T04:09:32 | 2017-08-15T04:09:32 | 100,258,614 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,775 | py | import sys, time, json, os
sys.path.append('.')
from datetime import datetime, timedelta, date
from db_connector import pg_connector
from google.google_analytics import ga_model, ga_engine
config = json.load(open(os.path.relpath(__file__).replace('.py','_config.json')))
class dbinfo:
hostname = config['Database']['host']
username = config['Database']['user']
password = config['Database']['pwd']
database = config['Database']['database']
DB = dbinfo()
class week:
def getWeek(self, date):
firstday = datetime(date.year, 1, 1) + timedelta(days=+ 5 - datetime(date.year, 1, 1).weekday())
week_index = 1
while (firstday + timedelta(days=+week_index*7)).date() < date:
week_index += 1
return week_index
def nextWeek(self):
#print(self.start_date + timedelta(days=+7))
self.__init__(ddate = self.start_date + timedelta(days=+7))
def setYearWeek(self, yearweek):
while self.yearweek != yearweek:
self.nextWeek()
def __init__(self, ddate):
#This needed to convert all date format
ddate = date(ddate.year, ddate.month, ddate.day)
self.start_date = ddate - timedelta(days=+ (ddate.weekday()+1)%7) #Sunday
self.end_date = self.start_date + timedelta(days=+ 6) #Saturday
self.year = self.end_date.year
self.week = self.getWeek(ddate)
self.yearweek = int(str(self.year)+ str(self.week).zfill(2))
#print(ddate, " - ", self.start_date, " - ", self.end_date, " - ", self.yearweek)
end_week = week(datetime.now())
if __name__ == '__main__':
# query = open('./query/table/bbmdiscover_calculatable.sql','r+').read()
# pg_connector.executeQuery(DB, str(query))
config_files = []
for (dirpath, dirnames, filenames) in os.walk(os.path.relpath(__file__).replace('.py', '_config/')):
for filename in filenames:
if filename[-5:] == '.json':
config_files.append(dirpath + filename)
for config_file in config_files:
cfg = json.load(open(config_file), strict=False)
ids = cfg["view_ids"]
TableName = cfg["table_name"]
metrics = cfg["metrics"]
dimensions = cfg["dimensions"]
table_query = cfg["table_query"]
pg_connector.executeQuery(DB, table_query)
for view in ids:
backfill_week = week(datetime.strptime(cfg["backfill_date"], '%Y-%m-%d').date())
#Get Last Week on Database
lastyearweek = pg_connector.getData(DB,
"select coalesce(max(yearweek),'" +
str(week(datetime.strptime(cfg["backfill_date"],'%Y-%m-%d')).yearweek) +
"') from " + TableName + " where view_id='" +
view['view_id'] + "'" )[0][0]
backfill_week.setYearWeek(lastyearweek)
#Delete Last Data so we could update and insert it from there
pg_connector.executeQuery(DB,
"delete from " + TableName + " where view_id='" + str(view['view_id']) +
"' and yearweek='" + str(backfill_week.yearweek) + "'")
#Start Filling Data
print("filling " + TableName + " with service " + view["view_name"] + " data . . .")
while backfill_week.yearweek < end_week.yearweek:
print("filling yearweek:" + str(backfill_week.yearweek))
v = ga_model.View_Template()
v.ids = view['view_id']
v.start_date = str(backfill_week.start_date)
v.end_date = str(backfill_week.end_date)
v.metrics = metrics
v.dimensions = dimensions
v.orderBys = [{'fieldName': 'ga:yearweek', 'orderType': 1, 'sortOrder': 1}]
BulkDATA = ga_engine.dumpData(v)
for DATA in BulkDATA:
pg_connector.storeManyData(DB, view, TableName, DATA)
time.sleep(1)
backfill_week.nextWeek()
print('filling service ' + view['view_name'] + " data is completed!")
| [
"frgunawan82@gmail.com"
] | frgunawan82@gmail.com |
3f8f76eb423bfd5c65548194330c618892396e75 | a14ccb8783d01c95b4adadbf99586e645efc8d0a | /in_class_excersizes/sum_integers.py | a64511c52a6c666795d203bdd0a493d5b188aa95 | [
"MIT"
] | permissive | JiayingWei/SoftwareDesign | 6b81ac91d53a0e3d590e1db7224ef95c8ac536c3 | 1af26d64156254e9d04a5f60167468650d312e7a | refs/heads/master | 2021-01-09T06:53:12.221535 | 2014-11-07T05:12:20 | 2014-11-07T05:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #sums integers in range x to y
def sum_integer(x,y):
sumo = x
for i in range (x+1,y+1):
sumo = sumo + i
print i
return sumo
print sum_integer(1,5) | [
"jywei02@gmail.com"
] | jywei02@gmail.com |
8aba8f2fd1cf433ebdceec92e8249b301399eedc | c7acfad49bed848a521a2c0b38bbab2c982df914 | /cmdb_pro/urls.py | af9b91bc7c2f432b258a331321af0de538d8b326 | [] | no_license | shaoxiaUdropUUNshort/cmdb_server | f3803ff4c97d3a9c2bb88599bf9d02ed6f229cea | 7ce91f9d83eb057db11e4856639612f8fbf7b263 | refs/heads/master | 2020-05-26T16:32:59.521646 | 2019-05-24T03:31:33 | 2019-05-24T03:31:33 | 188,303,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | """cmdb_pro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("api/", include(('api.urls', 'api'))),
path('web/', include(('web.urls', 'web'))),
]
| [
"591311452@qq.com"
] | 591311452@qq.com |
c1a761de7a32cd5bbf6b757a6d727c41f014d90d | 4ff7ec14dad9847f8d49b7a82ab5ffd4c9d6fdca | /vae/datasets.py | 2c27a6322450e94f83c6c92ba49389c7f18524b8 | [] | no_license | fwt-team/SVRAE-CvMFMM | 951582cc4fa4f0a3c204cbf3417fc4bc81395dff | 7da4eac03caaa4844ef31cf784b76eb07394bf7f | refs/heads/main | 2023-02-15T04:39:54.064840 | 2021-01-04T13:26:36 | 2021-01-04T13:26:36 | 326,687,253 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | # encoding: utf-8
"""
@Author: andy
@Contact: andy_viky@163.com
@Github: https://github.com/AndyandViky
@Csdn: https://blog.csdn.net/AndyViky
@File: cluster_process.py
@Time: 2020-03-14 15:45
@Desc: cluster_process.py
"""
try:
import torch
from nilearn import datasets
from torch.utils.data import Dataset
except ImportError as e:
print(e)
raise ImportError
class ADHD(Dataset):
def __init__(self, root, data, train=True, transform=None, download=False):
super(ADHD, self).__init__()
self.root = root
self.train = train
self.transform = transform
self.data = data
def __getitem__(self, index):
img = self.data[index]
if self.transform is not None:
img = self.transform(img)
return img.unsqueeze(1)
def __len__(self):
return len(self.data)
DATASET_FN_DICT = {
'adhd': ADHD,
}
dataset_list = DATASET_FN_DICT.keys()
def _get_dataset(dataset_name='adhd'):
if dataset_name in DATASET_FN_DICT:
return DATASET_FN_DICT[dataset_name]
else:
raise ValueError('Invalid dataset, {}, entered. Must be '
'in {}'.format(dataset_name, dataset_list))
# get the loader of all datas
def get_dataloader(data, dataset_path='../datasets/brain',
dataset_name='adhd', train=True, batch_size=50):
dataset = _get_dataset(dataset_name)
loader = torch.utils.data.DataLoader(
dataset(dataset_path, data, download=True, train=train, transform=lambda x: torch.tensor(x)),
batch_size=batch_size,
shuffle=False,
)
return loader
def get_adhd_data(data_dir='./datasets/brain', n_subjects=6):
dataset = datasets.fetch_adhd(data_dir=data_dir, n_subjects=n_subjects)
imgs = dataset.func
return imgs | [
"andy_viky@163.com"
] | andy_viky@163.com |
41f9a2d2a7c66ea609681e47f560bca49c5e415c | 5ba6f9db8e9064d0596c9abfa98a4855fce6b908 | /seed.py | 6d352182904c1272d8444155733a7e7c4784b3f8 | [] | no_license | ameliacgraham/Wanderlust-Ave | 1091192d2ca69b70d85e1e5a6f57877b41b6130d | 586416a23043fea0c91dff1a051e982253035edc | refs/heads/master | 2021-09-11T15:16:56.244161 | 2018-04-09T05:04:01 | 2018-04-09T05:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,159 | py | from sqlalchemy import func
from flask_sqlalchemy import SQLAlchemy
from model import PublicItem, Country, City, Airport, Airline
from model import connect_to_db, db
from server import app
import requests
import os
import json
travel_payouts_api = os.environ['TRAVEL_PAYOUTS_API']
def load_public_items():
"""Load public items into database."""
print "Users"
for row in open("seed-data/public-items.py"):
row = row.rstrip()
title, address, country, latitude, longitude, image = row.split("|")
item = PublicItem(title=title,
address=address,
country=country,
latitude=latitude,
longitude=longitude,
image=image)
db.session.add(item)
db.session.commit()
def load_airports():
print "Airports"
url = "http://api.travelpayouts.com/data/airports.json?token={}".format(travel_payouts_api)
r = requests.get(url)
data = r.text
results = json.loads(data)
for key in results:
airport_name = key['name'].encode('utf-8')
code = key['code']
country_code = key['country_code'].encode('utf-8')
coordinates = key['coordinates']
if coordinates:
latitude = key['coordinates']['lat']
longitude = key['coordinates']['lon']
country = Country.query.filter(Country.code==country_code).first()
city_code = key['code'].encode('utf-8')
city = City.query.filter(City.code==city_code).first()
if country and not city and not coordinates:
country_id = country.id
airport = Airport(name=airport_name,
code=code,
country_id=country_id,
city_id=city_id)
db.session.add(airport)
if city and not country and not coordinates:
city_id = city.id
airport = Airport(name=airport_name,
code=code,
city_id=city_id)
db.session.add(airport)
if city and coordinates and not country:
latitude = key['coordinates']['lat']
longitude = key['coordinates']['lon']
city_id = city.id
airport = Airport(name=airport_name,
code=code,
city_id=city_id,
latitude=latitude,
longitude=longitude)
db.session.add(airport)
if country and coordinates and not city:
country_id = country.id
latitude = key['coordinates']['lat']
longitude = key['coordinates']['lon']
airport = Airport(name=airport_name,
code=code,
country_id=country_id,
latitude=latitude,
longitude=longitude)
db.session.add(airport)
if coordinates and not country and not city:
latitude = key['coordinates']['lat']
longitude = key['coordinates']['lon']
airport = Airport(name=airport_name,
code=code,
latitude=latitude,
longitude=longitude)
db.session.add(airport)
if city and country and coordinates:
country_id = country.id
city_id = city.id
airport = Airport(name=airport_name,
code=code,
country_id=country_id,
city_id=city_id,
latitude=latitude,
longitude=longitude)
db.session.add(airport)
db.session.commit()
def load_cities():
print "Cities"
url = "http://api.travelpayouts.com/data/cities.json?token={}".format(travel_payouts_api)
r = requests.get(url)
data = r.text
results = json.loads(data)
for key in results:
city_name = key['name'].encode('utf-8')
city_code = key['code'].encode('utf-8')
country_code = key['country_code'].encode('utf-8')
country = Country.query.filter(Country.code==country_code).first()
if country:
country_id = country.id
city = City(name=city_name, code=city_code, country_id=country_id)
else:
city = City(name=city_name, code=city_code)
db.session.add(city)
db.session.commit()
def load_countries():
print "Countries"
url = "http://api.travelpayouts.com/data/countries.json?token={}".format(travel_payouts_api)
r = requests.get(url)
data = r.text
results = json.loads(data)
for key in results:
country_name = key['name']
country_code = key['code']
country = Country(name=country_name, code=country_code)
db.session.add(country)
db.session.commit()
def load_airlines():
print "Airlines"
url = "http://api.travelpayouts.com/data/airlines.json?token={}".format(travel_payouts_api)
r = requests.get(url)
data = r.text
results = json.loads(data)
for key in results:
airline_name = key['name']
code = key['iata']
if code:
airline = Airline(name=airline_name, code=code)
db.session.add(airline)
db.session.commit()
def set_val_user_id():
"""Set value for the next public_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(PublicItem.id==id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('public_items_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_public_items()
load_airlines()
load_airports()
load_cities()
load_countries() | [
"ameliacgreen@gmail.com"
] | ameliacgreen@gmail.com |
bf08cb4392822b8b2da983e3873d092ad3c2d211 | d2965b6178ae33665ce5c062b3ab3bcf4795fbfa | /com_framework/settings.py | 7237ae49ce8601ce03f3b84cf368936d752dc9ad | [] | no_license | royscher/com_framework | 2a5bdef89ebd26bee601413277650f46f9373509 | 44400422a212e9cda891bb76490fe3a91ca021b7 | refs/heads/master | 2021-01-12T01:32:00.395437 | 2017-01-09T07:44:19 | 2017-01-09T07:44:19 | 78,400,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,892 | py | # -*- coding:utf-8 -*-
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k5jt8kw79d256bp7*s17&86avl-0_+u0qnm#ykfmyiwzpqrs9p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'djcelery',
'core'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'com_framework.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'com_framework.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_home',
'HOST': '127.0.0.1',
'PORT': '3306',
'PASSWORD': '',
'USER': 'root'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
import djcelery
djcelery.setup_loader()
BROKER_URL = 'redis://127.0.0.1:6379'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
CELERY_IMPORTS = ('core.tasks')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
CELERY_IGNORE_RESULT = True
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
| [
"592819178@qq.com"
] | 592819178@qq.com |
e17bacc3710a7c4cc66aeef5a6a8e2fe03ff76f1 | 0100eb15bfbf079966c7a8569ef808a710f01735 | /design-of-computer-programs/Regular_Expression/exec1.py | e1461db2d1f90f5e76627d0e5ca9f56f88d2573c | [
"MIT"
] | permissive | JMwill/exercise | 4bc048a44f673b8bc52284dc054ab07ece5501ba | 603cf257c7d088fd12196bb77cdb8d15e4369fcb | refs/heads/master | 2022-12-09T23:31:15.228944 | 2020-06-01T17:02:56 | 2020-06-01T17:02:56 | 83,108,213 | 1 | 0 | MIT | 2018-12-28T08:11:07 | 2017-02-25T05:14:05 | JavaScript | UTF-8 | Python | false | false | 1,057 | py | def search(pattern, text):
"Return True if pattern appears anywhere in text."
if pattern.startswith('^'):
return match(pattern[1:], text)
else:
return ('.*' + pattern, text)
def match(pattern, text):
"Return True if pattern appears at the start of text."
if pattern == '':
return True
elif pattern == '$':
return (text == '')
elif len(pattern) > 1 and pattern[1] in '*?':
p, op, pat = pattern[0], pattern[1], pattern[2:]
if op == '*':
return match_star(p, pat, text)
elif op == '?':
if match(p, text) and match(pat, text[1:]):
return True
else:
return match(pat, text)
else:
return (match1(pattern[0], text) and
match(pattern[1:], text[1:]))
def match1(p, text):
"""Return true if first character of text matches
pattern character p."""
if not text: return False
return p == '.' or p == text[0]
def match_star(p, pattern, text):
"""Return true if any number of char P,
followed by pattern, matches text."""
return (match(pattern, text) or
(match1(p, text) and
match_star(p, pattern, text[1:]))) | [
"787505998@qq.com"
] | 787505998@qq.com |
0c25dd68f7abaaf858f4ffb5e3109947576dcbbe | 2e3d63726c1d05b73b9cc22e5bcbead30246a8dc | /Facepad/wsgi.py | ac7aeeae354627af07c474d0569514805a849d2c | [] | no_license | rolycg/tiny_social_network | 041f6e4ab503bb82eca4cf1efb436d3b5250343a | e7ec45d053d291d53bd9d58bbb882b4b3edb6355 | refs/heads/master | 2021-01-10T04:27:16.344700 | 2016-03-23T18:19:49 | 2016-03-23T18:19:49 | 54,581,800 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for Facepad project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Facepad.settings")
application = get_wsgi_application()
| [
"rolycg89@gmail.com"
] | rolycg89@gmail.com |
bc50458067d9e25e369b3c585a450d4b442ae38a | bf613947595e979a179eaf39b3eed6eee117d29b | /service/demo/train.py | 2a047eb6be19737d4908077a797e8050509729bb | [] | no_license | wlf061/search | e5c3b60f5f44299ef8a45a32472516b6f88c52d8 | c66959bc861a53fcfddc8d97693dc220d587435d | refs/heads/master | 2020-04-06T08:44:29.730156 | 2018-12-20T12:12:22 | 2018-12-20T12:12:22 | 157,314,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | import os
from collectFeatures import logFeatures, buildFeaturesJudgmentsFile
from loadFeatures import initDefaultStore, loadFeatures
from utils import Elasticsearch, ES_HOST, ES_AUTH
def trainModel(judgmentsWithFeaturesFile, modelOutput, whichModel=6):
# java -jar RankLib-2.6.jar -ranker 6 -train sample_judgments_wfeatures.txt -save model.txt
cmd = "java -jar RankLib-2.8.jar -ranker %s -train %s -save %s -frate 1.0" % (whichModel, judgmentsWithFeaturesFile, modelOutput)
print("*********************************************************************")
print("*********************************************************************")
print("Running %s" % cmd)
os.system(cmd)
pass
def saveModel(scriptName, featureSet, modelFname):
""" Save the ranklib model in Elasticsearch """
import requests
import json
from urllib.parse import urljoin
modelPayload = {
"model": {
"name": scriptName,
"model": {
"type": "model/ranklib",
"definition": {
}
}
}
}
with open(modelFname) as modelFile:
modelContent = modelFile.read()
path = "_ltr/_featureset/%s/_createmodel" % featureSet
fullPath = urljoin(ES_HOST, path)
modelPayload['model']['model']['definition'] = modelContent
print("POST %s" % fullPath)
head = {'Content-Type': 'application/json'}
print(json.dumps(modelPayload))
resp = requests.post(fullPath, data=json.dumps(modelPayload), headers=head, auth=ES_AUTH)
print(resp.status_code)
if (resp.status_code >= 300):
print(resp.text)
if __name__ == "__main__":
import configparser
from judgments import judgmentsFromFile, judgmentsByQid
es = Elasticsearch(timeout=1000)
# Load features into Elasticsearch
initDefaultStore()
loadFeatures()
# Parse a judgments
movieJudgments = judgmentsByQid(judgmentsFromFile(filename='search_sample_judgments.txt'))
# Use proposed Elasticsearch queries (1.json.jinja ... N.json.jinja) to generate a training set
# output as "sample_judgments_wfeatures.txt"
logFeatures(es, judgmentsByQid=movieJudgments)
buildFeaturesJudgmentsFile(movieJudgments, filename='search_sample_judgments_wfeatures.txt')
# Train each ranklib model type
for modelType in [0,1,2,3,4,5,6,7,8,9]:
# 0, MART
# 1, RankNet
# 2, RankBoost
# 3, AdaRank
# 4, coord Ascent
# 6, LambdaMART
# 7, ListNET
# 8, Random Forests
# 9, Linear Regression
print("*** Training %s " % modelType)
trainModel(judgmentsWithFeaturesFile='search_sample_judgments_wfeatures.txt', modelOutput='model.txt', whichModel=modelType)
saveModel(scriptName="test_%s" % modelType, featureSet='search_features', modelFname='model.txt')
| [
"wlf061@sina.com"
] | wlf061@sina.com |
d7b8a80cd367bdec515f4c5db145289408ae0a0e | cf66d0df78508a9cde375832cfe095f5521d28a4 | /Controlador/clientesEmpresa.py | 012d155888e103dca052f6d0142a6f708f893aa9 | [] | no_license | kennyvladim/Proyecto-Versi-n-2 | 7de0562a8ff6b4df9be98f025274adb20d4eb3bf | dcf6cc9a86565f5f16ed623ce38759a0a7bb12b6 | refs/heads/master | 2023-04-14T06:09:38.660404 | 2021-04-13T03:56:19 | 2021-04-13T03:56:19 | 356,116,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | class ClienteEmpresa:
def __init__(self, rucCE, razonSocialCE, direccionCE, telefonoCE):
self.__rucCE = rucCE
self.__razonSocialCE = razonSocialCE
self.__direccionCE= direccionCE
self.__telefonoCE = telefonoCE
def getRucCE(self):
return self.__rucCE
def getRazonSocialCE(self):
return self.__razonSocialCE
def getDireccionCE(self):
return self.__direccionCE
def getTelefonoCE(self):
return self.__telefonoCE
def setRucCe(self, rucCE):
self.__rucCE = rucCE
def setRazonSocialCE(self, razonSocialCE):
self.__razonSocialCE = razonSocialCE
def setDireccionCE(self, direccionCE):
self.__direccionCE = direccionCE
def setTelefonoCE(self, telefonoCE):
self.__telefonoCE = telefonoCE
| [
"kennyvladim@gmail.com"
] | kennyvladim@gmail.com |
4b8054cf8513cd3e3c9a893c882c986ee8724dde | dd0b5bf63973d4b9be8bb237245e90b8bef26d8f | /py_house.py | e911e108739ef4d4fd775022e6dfad74f510d553 | [] | no_license | zelaznik/election_scraping | dc7eaacbbaae2293e378b0a0c39825b0d927fcb6 | 23fc3b1da309a63367ace866fd14fcb13e3aaaf7 | refs/heads/master | 2020-06-27T19:08:15.310899 | 2016-11-24T18:32:19 | 2016-11-24T18:32:19 | 74,523,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | from collections import OrderedDict, defaultdict
from operator import itemgetter
import pandas as pd
import json
import sys
EPSILON = 10**-6
def show(obj):
print(json.dumps(obj, indent=2))
def get_totals(df):
totals_columns = ['contested','democrat_votes','democrat_winner','republican_votes','republican_winner']
totals = pd.DataFrame(df, columns=totals_columns).sum().map(int)
def default_args(chamber, year):
base_path = '%(year)s_%(chamber)s_results' % locals()
json_path = 'output/%(chamber)s/%(year)s/%(base_path)s_politico.json' % locals()
json_path_national = 'post_processing/%(chamber)s/%(year)s/%(base_path)s_national.json' % locals()
excel_path_national = 'post_processing/%(chamber)s/%(year)s/%(base_path)s_national.xlsx' % locals()
return locals().copy()
def main(base_path, json_path, json_path_national, excel_path_national, **kwargs):
try:
with open(json_path, 'r') as f:
raw_data = json.loads(f.read(), object_pairs_hook=OrderedDict)
ct = 0
national_districts = []
for state in raw_data:
for district in state['districts']:
new_district = OrderedDict()
new_district['state_name'] = state['state_name']
new_district['district_id'] = district['district_id']
vote_totals = defaultdict(int)
for candidate in district['candidates']:
vote_totals[candidate['party']] += candidate.get('votes') or 0
for party in sorted(vote_totals, key=vote_totals.get, reverse=True):
new_district['%s_votes' % (party.lower(),)] = vote_totals[party]
national_districts.append(new_district)
ct += 1
assert (ct == 435), "expected 435, got %s" % (ct,)
with open(json_path_national, 'w') as f:
f.write(json.dumps(national_districts, indent=2))
with open(json_path_national, 'r') as f:
national_json = json.loads(f.read())
excel_fields = ['state_name','district_id','democratic_votes','republican_votes','green_votes','libertarian_votes']
df = pd.DataFrame(national_json)[excel_fields]
df.to_excel(excel_path_national, index=True)
finally:
globals().update(locals())
if __name__ == '__main__':
year = int(sys.argv[1])
kwargs = default_args('house', year)
main(**kwargs) | [
"steve.zelaznik@iorahealth.com"
] | steve.zelaznik@iorahealth.com |
18f5b10455dd890ae37b523b85eaa5e0ed8b233c | 728e655e2291c1a03734ba1fd0d17af55e7c9b0e | /python/qa_asci_sink.py | f88b439bc14fc5d8963d6720cae0bc02d84bbd48 | [] | no_license | acrerd/gr-spectroscopy | 41ec9058d18d415ed1058187d7699945774ac936 | 9cdbfa039ccafa992ba5a27a280d2de370e6f9c4 | refs/heads/master | 2020-12-02T21:23:28.550807 | 2015-08-05T13:51:42 | 2015-08-05T13:51:42 | 39,574,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
from asci_sink import asci_sink
class qa_asci_sink (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_asci_sink, "qa_asci_sink.xml")
| [
"frith.ronnie@gmail.com"
] | frith.ronnie@gmail.com |
689b90a020d0df47a96c87657ee3a6532ccac798 | e7e5cc4353671d4cb410acf12fb3de92e8f4ac58 | /machine-learning/tensorflow_test_SNH/SNH_run.py | b17e76f1094e00d56c971d60e846ec7c2695910c | [] | no_license | zooniverse/hco-experiments | f10d128f4258e830098564477c66bfa2a13dc5d7 | fb9f4e476f2402fd0d66fb770f6d1b5c433dafbf | refs/heads/master | 2021-04-30T16:40:11.182769 | 2017-07-12T17:22:34 | 2017-07-12T17:22:34 | 80,105,222 | 4 | 2 | null | 2021-01-08T09:56:44 | 2017-01-26T10:27:23 | Python | UTF-8 | Python | false | false | 4,846 | py | # Train a simple CNN using Keras
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator, array_to_img
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import os
# Parameters
batch_size = 32
num_classes = 2
epochs = 15
data_augmentation = False
path_snh = "D:/Studium_GD/Zooniverse/Data/SNHuntersInception/images/"
# function to import one image
def import_one_image(path):
# this is a PIL image
img = load_img(path)
# this is a Numpy array with shape (3, x, y)
x = img_to_array(img)
# this is a Numpy array with shape (1, 3, x, y)
x = x.reshape((1,) + x.shape)
return x
# read all images from disk
real_files = []
for f in os.listdir(path_snh + 'real'):
# get path
real_files.append(f)
# get image
x = import_one_image(path_snh + 'real/' + f)
try:
x_real = np.vstack((x_real,x))
except:
x_real = x
# generate labels
y_real = [1 for i in range(0,len(real_files))]
bogus_files = []
for f in os.listdir(path_snh + 'bogus'):
# get path
bogus_files.append(f)
# get image
x = import_one_image(path_snh + 'bogus/' + f)
try:
x_bogus = np.vstack((x_bogus,x))
except:
x_bogus = x
# generate labels
y_bogus = [0 for i in range(0,len(bogus_files))]
# generate one big data set
x_data = np.vstack((x_real,x_bogus))
y_data = np.concatenate((y_real,y_bogus))
# generate train and test split using sklearn
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data,
test_size=0.33,
random_state=42)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test)) | [
"will5448@umn.edu"
] | will5448@umn.edu |
aaebaa7abad66843241a8ea30c34e91f74b56156 | 055e99955ff655edc0fb7c2f92c6e26bbff99c92 | /manage.py | 5c166ef91b9bcd112a45e1040befa9f06f6bee80 | [] | no_license | Feedour/PSDB | f4d5a266f86937720026fbe693a0ffa456843c45 | 608be5edd48986830772106d370817071ff95ecd | refs/heads/master | 2021-01-19T13:25:32.507202 | 2017-02-18T13:42:26 | 2017-02-18T13:42:26 | 82,389,329 | 0 | 1 | null | 2017-02-19T13:43:30 | 2017-02-18T13:41:47 | Python | UTF-8 | Python | false | false | 802 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PSDB.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"golovay14@gmail.com"
] | golovay14@gmail.com |
a8f76511b18e57bf52b5d6514b323e8cd58885ca | 7bb4250ca81848922133dc4946d8ff94866c0d85 | /Tillämpad Programmering 1/KURSOLLE05/kursollE05.py | 43fceee9b6f1850f2e1a4187ebcf0cab5bc34aa5 | [] | no_license | olivertd/repo | 1bd3f5ade45211befdb7b19014ebb019f8aff9b8 | fc082d0210de4b942baf73b2bf3e9f3f4fa892f6 | refs/heads/master | 2023-05-06T08:54:19.130768 | 2021-05-30T10:52:43 | 2021-05-30T10:52:43 | 290,454,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,004 | py | #KURSOLLE
#SELK BANKAPP
from pathlib import Path
import time
import sys
#VÄLKOMNMNAR ANVÄNDAREN IN I PROGRAMMET MED FINA PRINTS
#print("WELCOME TO SELK FINANCIAL BANK APP")
#print("YOU ARE STILL POOR")
#print("LEEETS GO!!!")
#time.sleep(1)
#print("3")
#time.sleep(1)
#print("2")
#time.sleep(1)
#print("1")
#print("GOOOO!!!!!!")
#MENY
mainmenuoptioners = """Enter number
1 : View Balance
2 : Deposit
3 : Withdraw
4 : View transactions
5 : Terminate account
6 : Exit
"""
#KOLLAR OM FILERNA FINNS (BALANCE.TXT) O TRANSACTIONS.TXT
if Path('balance.txt').is_file() and Path('transactions.txt').is_file():
pass
else:
with open('balance.txt', 'w+') as f:
f.write("1000")
with open('transactions.txt', 'w+') as f:
pass
exit = False
#GÖR EN WHILE LOOP
while exit != True:
#INITIERAR MENYN
while True:
try:
mainmenuinputer = int(input(mainmenuoptioners))
assert mainmenuinputer >= 1 and mainmenuinputer <= 6, "FRONXY DOINKSTER TO BIG OR SMALL NUBMERO"
break
except AssertionError as msg:
#PRINTAR UT ETT ERROR
print(msg)
except:
print("FRONXY ERROR [DID NOT ENTER A FRONXY]")
# STÄNGER NER PROGRAMMET
if mainmenuinputer == 6:
print("THANK YOU FOR USING SELK BANK APP")
print("CURRENTLY BREAKING OUT OF THE TUSK LOOP")
#for i in range(100+1):
#time.sleep(0.1)
#sys.stdout.write(('='*i)+(''*(100-i))+("\r [ %d"%i+"% ] "))
#sys.stdout.flush()
#print("BROKEN OUT!!")
exit = True
#KOLLAR HUR MÅNGA PENGAR SOM FINNS I BALANCE.TEXT
if mainmenuinputer == 1:
with open('balance.txt') as f:
balanceerz = f.read()
print("Your balance is currently ${}".format(balanceerz))
#DEPOSITERAR IN PENGAR I BLANCE.TEXT OCH SKRIVER IN TRANSAKTIONEN I TRANSACTIONS.TEXT
if mainmenuinputer == 2:
while True:
try:
depositamounters = int(input("Enter amount to depositors: "))
break
except:
print("YOUR DEPOSIT IS NOT AN NUMBERO!!! TRY AGAIN SIRSKY!!")
with open('balance.txt', 'r+') as f:
balancerzdepsoit = int(f.read())
print(balancerzdepsoit)
depositwriter = balancerzdepsoit + depositamounters
f.seek(0); f.truncate()
f.write(str(depositwriter))
with open('transactions.txt', 'a') as f:
f.write("Deposit of ${}\n".format(depositamounters))
#WITHDRAWAR UR PENGAR UR BALANCE.TEXT OCH SKRIVER IN TRANSAKTIONEN I TRASACTIONS.TEXT
if mainmenuinputer == 3:
while True:
try:
withdrawamounters = int(input("Enter amount to withdraw: "))
break
except:
print("YOUR WITHDRAWAL AMOUNT CAN*T BE THOSE LETTERS TRY INTEGER")
with open('balance.txt', 'r+') as f:
balancerzwithdraw = int(f.read())
print(balancerzwithdraw)
withdrawwriter = balancerzwithdraw - withdrawamounters
f.seek(0); f.truncate()
f.write(str(withdrawwriter))
with open('transactions.txt', 'a') as f:
f.write("Withdrawal of ${}\n".format(withdrawamounters))
# KOLLER TRANSAKTIONEN OCH PRINTAR DEM
if mainmenuinputer == 4:
with open('transactions.txt') as f:
print("\n{}".format(f.read()))
# TA BORT KONTOFILERNA OCH RADERA KONTOT
# if mainmenuinputer == 5:
# for i in range(100+1):
# time.sleep(0.5)
# sys.stdout.write(('='*i)+(''*(100-i))+("\r [ %d"%i+"% ] "))
# sys.stdout.flush()
# print("TERMINATOR INCOMERS!!!!")
# file_path1 = Path('balance.txt')
# file_path1.unlink()
# file_path2 = Path('transactions.txt')
# file_path2.unlink()
# time.sleep(3)
# print("TERMINATOR SUCCESS!!!!!!!")
# exit = True | [
"70135343+olivertd@users.noreply.github.com"
] | 70135343+olivertd@users.noreply.github.com |
5bda35b353dfdc0adf3e24d9267452c649d20907 | a40ad109229e186c48cf3f7a5aaa46b55acd300d | /12_using_proxy/main_http.py | ae551d2b33cf455b12d5ae94abf473d24f5e9242 | [] | no_license | olen2006/parsing_websites | 41320e368f092e8108004e78d2ee22d4e3fb88f7 | c7b64a315f3872427c00746045589bbf5199bc06 | refs/heads/master | 2022-12-11T18:35:52.374592 | 2019-11-03T01:59:50 | 2019-11-03T01:59:50 | 217,400,931 | 0 | 0 | null | 2022-12-08T06:46:43 | 2019-10-24T21:47:31 | Python | UTF-8 | Python | false | false | 1,096 | py | import requests
from bs4 import BeautifulSoup
from random import choice
def get_proxy():
html =requests.get('https://free-proxy-list.net/').text
soup = BeautifulSoup(html, 'lxml')
trs = soup.find('tbody').find_all('tr')
#trs = soup.find('table', id = 'proxylisttable').find_all('tr')[1:11]
proxies = []
for tr in trs:
tds = tr.find_all('td')
if tds[6].text.strip() == 'no':
ip = tds[0].text.strip()
port = tds[1].text.strip()
schema = 'http'
proxy = {'schema':schema,'address':ip + ':' + port}
proxies.append(proxy)
else:
continue
return choice(proxies)
def get_html(url):
#proxies = {'http/https':'ipaddress:port'}
p = get_proxy()#returns {'schema':'','address':''}
proxy = {p['schema']:p['address']}
r =requests.get(url,proxies=proxy,timeout=5)
#return r.json()['origin']
return r.json()['ip']
def main():
#url = 'http://httpbin.org/ip'
url ='https://ip4.seeip.org/json'
print(get_html(url))
if __name__=='__main__':
main()
| [
"olen2006@gmail.com"
] | olen2006@gmail.com |
9374b6f594a8254fc73a350ff09d8692c004e003 | 089ec3a7d17b07d388c7006cb50ce95ac746508d | /src/smali_class.py | 04eef7d9812bbf22cb3b35e70fe8bf589ab3ce04 | [
"BSD-3-Clause"
] | permissive | OXDEED/decrypticon | be10a7c97605021bf48290f86b486eded2b4d7ca | e5115261157719addd12d717f2862086dcc5462e | refs/heads/master | 2023-04-08T20:11:07.256668 | 2021-04-21T14:56:56 | 2021-04-21T14:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,008 | py | import logging
import typing as t
from src.error import Error
from src.smali_method import SmaliMethod
class SmaliClass:
def __init__(self, name: str):
self.name: str = name
self.sig: str = ''
self.methods: t.List[SmaliMethod] = []
self.header_block: t.List[str] = []
def __parse_class_header(self, idx: int,
file_lines: t.List[str]
) -> t.Tuple[int, t.List[str],
str, t.Optional[Error]]:
try:
sig = file_lines[0].split()[-1][1:-1].strip()
except IndexError:
return 0, [], '', Error(
'Could not parse class header: {}'.format(self.name))
i = 1 # Skipping the first line
header_block: t.List[str] = []
for i in range(idx, len(file_lines)):
line = file_lines[i]
if '.method' in line:
break
header_block.append(line)
return i, header_block, sig, None
def __parse_method(self,
idx: int,
file_lines: t.List[str]
) -> t.Tuple[int, t.Optional[SmaliMethod],
t.Optional[Error]]:
i = 0
method_block: t.List[str] = []
for i in range(idx, len(file_lines)):
line = file_lines[i]
method_block.append(line)
if '.end method' in line:
break
method = SmaliMethod(self.name, method_block[0])
err = method.parse(method_block[1:])
if err:
return -1, None, err
return i, method, None
def parse(self, file_path: str) -> t.Optional[Error]:
logging.debug("Parsing SmaliClass: [%s]...", file_path)
with open(file_path, 'r') as fd:
file_lines = fd.read().splitlines()
idx = 0
idx, self.header_block, self.sig, err = self.__parse_class_header(
idx, file_lines)
if err:
return err
while idx < len(file_lines):
line = file_lines[idx]
if '.method' in line:
idx, method, err = self.__parse_method(idx, file_lines)
if err:
return err
if not method:
raise Exception('FAIL')
self.methods.append(method)
idx += 1
return None
def write(self, fd: t.IO[t.Any]):
logging.debug('Writing clazz [%s]', self.name)
for line in self.header_block:
fd.write(line)
fd.write('\n')
for method in self.methods:
method.write(fd)
# Removes the package from a class's name
# `com.afjoseph.test.aaa` -> `aaa`
def get_simple_name(self) -> str:
if not '.' in self.name:
return self.name
return self.name.split('.')[-1].strip()
| [
"7126721+afjoseph@users.noreply.github.com"
] | 7126721+afjoseph@users.noreply.github.com |
ec37c5a25dfd605868df0b5af509666846b98592 | 90c2989e99e7a4d8a1a75bebfa94a9bbf96994c1 | /surf/plugin/manager.py | 172b4cdc1c0b34461596e3152cff159c67bc1a6c | [] | no_license | jurrian/surfrdf | 0373ede048978346e817441de32d532b6a1bc03c | 009689f5bf65db514efeca3a7b1e08bf95652ef8 | refs/heads/master | 2021-07-11T16:44:31.345765 | 2017-10-13T10:11:44 | 2017-10-13T10:11:44 | 106,808,951 | 0 | 0 | null | 2017-10-13T10:07:11 | 2017-10-13T10:07:11 | null | UTF-8 | Python | false | false | 5,595 | py | # Copyright (c) 2009, Digital Enterprise Research Institute (DERI),
# NUI Galway
# All rights reserved.
# author: Cosmin Basca
# email: cosmin.basca@gmail.com
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with
# the distribution.
# * Neither the name of DERI nor the
# names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY DERI ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DERI BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
import os
import pkg_resources
from surf.exceptions import PluginNotFoundException
from surf.plugin.reader import RDFReader
from surf.plugin.writer import RDFWriter
from surf.log import info
from .rdflib import ReaderPlugin as RdflibReader
from .rdflib import WriterPlugin as RdflibWriter
from .sparql_protocol import ReaderPlugin as SparqlReader
from .sparql_protocol import WriterPlugin as SparqlWriter
__author__ = 'Cosmin Basca'
_plugins_loaded = False
ENTRY_POINT_READER = 'surf.plugins.reader'
ENTRY_POINT_WRITER = 'surf.plugins.writer'
_readers = {}
_writers = {}
def _init_plugins(plugins, entry_point_name):
for entry_point in pkg_resources.iter_entry_points(entry_point_name):
plugin_class = entry_point.load()
plugins[entry_point.name] = plugin_class
info('loaded plugin [%s]'%entry_point.name)
def load_plugins(reload=False):
"""
Call this method to load the plugins into the manager. The method is called
by default when a :class:`surf.store.Store` is instantiated. To cause a reload, call the method with `reload`
set to *True*
:param bool reload: reload plugins if True
:param logger: the logger
"""
global _plugins_loaded
if not _plugins_loaded or reload:
_init_plugins(_readers, ENTRY_POINT_READER)
_init_plugins(_writers, ENTRY_POINT_WRITER)
_plugins_loaded = True
def register(name, reader, writer):
"""
register reader and writer plugins
:param str name: the plugin name
:param reader: the reader plugin
:param writer: the writer plugin
"""
assert issubclass(reader, RDFReader) or reader is None
assert issubclass(writer, RDFWriter) or writer is None
if reader:
_readers[name] = reader
if writer:
_writers[name] = writer
def _register_surf():
import surf
surf_parent = os.path.split(os.path.split(surf.__file__)[0])[0]
for dist in pkg_resources.find_distributions(surf_parent):
if dist.key == 'surf':
pkg_resources.working_set.add(dist)
break
def add_plugin_path(plugin_path):
"""
Loads plugins from `path`. Method can be called multiple times, with different locations. (Plugins are loaded only
once).
:param str plugin_path: register plugin search path
"""
_register_surf()
for dist in pkg_resources.find_distributions(plugin_path):
# only load SURF plugins!
if ENTRY_POINT_READER in dist.get_entry_map() or ENTRY_POINT_WRITER in dist.get_entry_map():
pkg_resources.working_set.add(dist)
def registered_readers():
"""
gets the registered reader plugins. Plugins are instances of :cls:`surf.plugin.reader.RDFReader`.
:return: the registered reader plugins
:rtype: list or set
"""
return _readers.keys()
def registered_writers():
"""
gets the registered writer plugins. Plugins are instances of :cls:`surf.plugin.reader.RDFWriter`.
:return: the registered writer plugins
:rtype: list or set
"""
return _writers.keys()
def get_reader(reader_id, *args, **kwargs):
global _readers
if reader_id in _readers:
return _readers[reader_id](*args, **kwargs)
raise PluginNotFoundException('reader plugin [{0}] was not found'.format(reader_id))
def get_writer(writer_id, reader, *args, **kwargs):
assert isinstance(reader, RDFReader), 'reader is not an instance of RDFReader!'
global _writers
if writer_id in _writers:
return _writers[writer_id](reader, *args, **kwargs)
# ----------------------------------------------------------------------------------------------------------------------
#
# register builtin plugins
#
# ----------------------------------------------------------------------------------------------------------------------
register("rdflib", RdflibReader, RdflibWriter)
register("sparql_protocol", SparqlReader, SparqlWriter)
# load the rest of the plugins
load_plugins(reload=False)
| [
"cosmin.basca@gmail.com"
] | cosmin.basca@gmail.com |
7ef205605195dd081d4c26f397c062c1884fbcad | 160bae957f7b74f0f06bbee6bf152585f6d43d5c | /event.py | a3bb24168026199cd13176544a21a6a03704e358 | [] | no_license | FHT360/image-quality | 4b98ad3c3ef994092fd3c8972414424cdf5cff9e | f8cc04f7aefcf75f295923448c8d57aa5779cfee | refs/heads/master | 2023-02-06T04:35:22.920595 | 2020-12-16T03:55:13 | 2020-12-16T03:55:13 | 321,529,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import logging
def handler(event, context):
logger = logging.getLogger()
logger.info('oss trigger event = %s', event)
return 'hello world'
| [
"imyuanjian@gmail.com"
] | imyuanjian@gmail.com |
103fa63cf3d31fa4a7bbddb52e36bd232beb8598 | c3687a1efe750d228e03ad126d94d0bb082f9468 | /실패율.py | 847e3a054c24eeb5fff2d42d71c65db148827aa2 | [] | no_license | taeyoung02/Algorithm | e15973871572a4cb31be42d0cae76bfe50e2b0d6 | 3976191b82d287577dcbc2739f746ed23adcc2da | refs/heads/master | 2023-07-17T10:26:30.024619 | 2021-08-17T14:08:44 | 2021-08-17T14:08:44 | 237,749,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | def solution(N, stages):
answer = []
stages.sort()
denominator = 0
arr = [0] * N
pctg = [0] * N
for i in reversed(stages):
denominator += 1
if i <= N:
arr[i - 1] += 1
pctg[i - 1] = arr[i - 1] / denominator
for i in range(N):
pctg[i] = [pctg[i], N-i]
pctg.sort()
answer = [N-i[1]+1 for i in pctg]
answer.reverse()
return answer
# 34215
print(solution( 5, [2, 1, 2, 6, 2, 4, 3, 3]))
| [
"dhrmsry777@naver.com"
] | dhrmsry777@naver.com |
bb4ae5117c550175916508e147944f41873ecbf2 | 5fb8e914f90cd79ecd4a499b7d6936d2898d11de | /matma.py | 9b340e308eb94cf7f37ba9fb909a16132c154528 | [] | no_license | JokurPL/matma | 0d4e415d10149d1b569d308d87a8082f91e26823 | d95514b7f701e34f5d4260e2fb91c4e0330d7f64 | refs/heads/master | 2021-01-22T15:10:53.422607 | 2017-09-04T15:51:43 | 2017-09-04T15:51:43 | 102,377,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from flask import Flask, render_template
import datetime
app = Flask(__name__)
@app.route('/')
def index():
year = datetime.date.today().year
return render_template("index.html", year=year)
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | JokurPL.noreply@github.com |
b8791ec151ea84768734d33a57f6c55decf0f5de | 2247f2197b43c0d6dfe794bc5648c12083ab6f9a | /src/lib/rclone.py | 7cd9781bd732bf045089a63c8ce814bd20d49431 | [] | no_license | wdtgbot/rclone-heroku | 4513095f291154054cd9a4ce643ef9ad79ff0918 | 2d27f34adc0588d166acb37df0a3d317e43a926d | refs/heads/master | 2023-07-18T14:18:49.918889 | 2021-08-31T18:59:02 | 2021-08-31T18:59:02 | 400,961,497 | 1 | 0 | null | 2021-09-01T00:00:36 | 2021-08-29T05:46:39 | Python | UTF-8 | Python | false | false | 5,888 | py | import os
import shutil
import subprocess
import zipfile
import requests
from lib.dir import PROJECT_ABSOLUTE_PATH
from lib.log import Loggers
from lib.md5 import get_md5_str
class Rclone:
def __init__(self):
self.rclone_path_of_zip = None
self.rclone_bin_path = os.path.join(PROJECT_ABSOLUTE_PATH, "rclone", "bin", "rclone")
self.rclone_logfile_dir = os.path.join(PROJECT_ABSOLUTE_PATH, "log", "tmp")
def job_copy(self, src, dst):
if not os.path.exists(self.rclone_logfile_dir):
os.makedirs(self.rclone_logfile_dir)
Loggers().get_logger("info").info(
"创建rclone临时日志目录: {rclone_logfile_dir}".format(rclone_logfile_dir=self.rclone_logfile_dir))
log_file_path = os.path.join(self.rclone_logfile_dir, get_md5_str(src + dst) + ".log")
if os.path.exists(log_file_path):
os.remove(log_file_path)
Loggers().get_logger("info").info("删除原任务: {src} {dst}".format(src=src, dst=dst))
cmd = 'nohup {rclone_bin} copy --config={config_file_path} --use-json-log -vv --stats 10s --ignore-existing --log-file={log_file_path} {src} {dst} > /rclone.log 2>&1 &'.format(
config_file_path=os.path.join(PROJECT_ABSOLUTE_PATH, "rclone.conf"), rclone_bin=self.rclone_bin_path,
log_file_path=log_file_path, src=src, dst=dst)
subprocess.Popen(cmd, shell=True)
def get_job_info(self, src, dst) -> (bool, dict):
"""
获取任务信息
:param src:
:param dst:
:return: bool: 任务是否存在
dict: {} for ts,tts,percentage,speed,eta,finish
"""
logfile_name = get_md5_str(src + dst) + ".log"
logfile_path = os.path.join(self.rclone_logfile_dir, logfile_name)
if os.path.exists(logfile_path):
infos = {
'finish': 0
}
with open(logfile_path) as fn:
for x in fn:
if 'go routines active' in x:
infos['finish'] = 1
if 'info' in x and 'nTransferred' in x:
infos["status"] = x
return True, infos
else:
return False, None
def get_job_logfile(self, src, dst) -> (bool, str):
logfile_name = get_md5_str(src + dst) + ".log"
logfile_path = os.path.join(self.rclone_logfile_dir, logfile_name)
if os.path.exists(logfile_path):
with open(logfile_path, "r") as fn:
msg = fn.read()
return True, msg
else:
return False, None
def check_rclone_installed(self) -> bool:
"""
判断是否安装rclone
:return: bool
"""
if os.path.exists(self.rclone_bin_path):
return True
else:
return False
def install_rclone(self) -> bool:
"""
安装rclone
:return: bool
"""
Loggers().get_logger("info").info("开始下载rclone")
rclone_install_url = "https://downloads.rclone.org/rclone-current-linux-amd64.zip"
i = 0
while i < 3:
try:
response = requests.get(rclone_install_url, timeout=20)
break
except requests.exceptions.RequestException:
Loggers().get_logger("info").info("重试{i} :下载rclone".format(i=i))
i += 1
if i == 3:
Loggers().get_logger("error").info("rclone 下载超时")
return False
# install rclone
rclone_zip_path = os.path.join(PROJECT_ABSOLUTE_PATH, "rclone.zip")
if os.path.exists(rclone_zip_path):
os.remove(rclone_zip_path)
if i < 3:
with open(rclone_zip_path, "wb") as fn:
fn.write(response.content)
Loggers().get_logger("info").info("rclone 下载完成")
zip_file = zipfile.ZipFile(rclone_zip_path)
zip_list = zip_file.namelist()
print(zip_list)
for f in zip_list:
if str(f).endswith("rclone"):
zip_file.extract(f, os.path.join(PROJECT_ABSOLUTE_PATH, "rclone"))
Loggers().get_logger("info").info("rclone 解压完成")
print("-- rclone 解压完成")
zip_file.close()
self.__find_rclone_from_path(os.path.join(PROJECT_ABSOLUTE_PATH, "rclone"))
if self.rclone_path_of_zip:
if not os.path.exists(os.path.join(PROJECT_ABSOLUTE_PATH, "rclone", "bin")):
os.mkdir(os.path.join(PROJECT_ABSOLUTE_PATH, "rclone", "bin"))
if os.path.exists(self.rclone_bin_path):
os.remove(self.rclone_bin_path)
shutil.move(self.rclone_path_of_zip, self.rclone_bin_path)
subprocess.Popen("chmod +x {rclone_path}".format(
rclone_path=self.rclone_bin_path), shell=True)
# clear
if os.path.exists(rclone_zip_path):
os.remove(rclone_zip_path)
Loggers().get_logger("info").info("清理临时文件: {path}".format(path=rclone_zip_path))
rclone_extract_path = os.path.join(PROJECT_ABSOLUTE_PATH, "rclone")
for dir in os.listdir(rclone_extract_path):
print(dir)
if dir != "bin":
print("删除")
os.removedirs(os.path.join(rclone_extract_path, dir))
Loggers().get_logger("info").info("清理临时文件夹: {path}".format(path=os.path.join(rclone_extract_path, dir)))
return True
def __find_rclone_from_path(self, path):
if path.endswith("bin"):
return
if os.path.isfile(path):
self.rclone_path_of_zip = path
if os.path.isdir(path):
for x in os.listdir(path):
self.__find_rclone_from_path(path=os.path.join(path, x))
| [
"e@e.com"
] | e@e.com |
660415f42be7ef29c4850382c665704d9b76f55e | 23d63436788f1528fe22fd391d2553261196b8b9 | /python/industry.py | fc786d9dcaf5eeff60ddfe35412f207028fa4e64 | [] | no_license | AmericasWater/fivecounties | 91b16ae840bc3f84d0518ed2e446ab660b0e0e47 | aa3c1d824b20abf5c8020431b1af63330b5e3e7d | refs/heads/master | 2021-01-13T00:51:58.096546 | 2016-01-12T20:31:28 | 2016-01-12T20:31:28 | 49,449,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | # -*- coding: utf-8 -*-
### Industry component
## Optimizes the amount of water and fuel to industry
## Simulates manufacturing as a Cobb-Douglas of water and energy
## The partial objective sells all of the manufactured good on the market
## The sole objective includes the cost of fossil fuels
## Call as `python industry.py` to optimize usage with only industry
import pandas
import numpy as np
import lib
# Load county data for parameters
county_data= pandas.read_csv('ComplexData.csv')
#################### GLOBAL PARAMETERS #####################
N= len(county_data['county']) # number of counties
p_mn = 1 # world price of the manufactured good (normalized to 1)
Area = list(county_data['co_area']) # total land area of the county, sq.km.
alpha= list(county_data['ag_area']/county_data['co_area']) # fraction of land used for agriculture
W_surf= list(county_data['water_cap']) # total water rights (i.e. max water drawn from river)
eta = list(county_data['eta']) # elasticity of commercial output with respect to energy
# (i.e., exponent of energy resource in commercial production function)
production_scaling = 1e-2
def generate():
# commercial water demand, liters per sq.km. per year
lambda_Cs = np.random.uniform(0, 1, N) * np.array(W_surf) / ((1 - np.array(alpha)) * np.array(Area))
# commercial energy demand, liters per sq.km. per year, avg for US is 500
sigma_Cs = np.random.uniform(0, 1000, N)
return lambda_Cs.tolist() + sigma_Cs.tolist()
def simulate_county(lambda_C, sigma_C, county):
"""Simulates manufacturing as a Cobb-Douglas of water and energy."""
Water_Draw = lambda_C * (1 - alpha[county]) * Area[county]
Mfct_good = production_scaling * (1 - alpha[county]) * Area[county] * (sigma_C ** eta[county]) * (lambda_C ** (1-eta[county]))
Energy_D = (sigma_C * (1 - alpha[county]) * (Area[county]))
return Water_Draw, Mfct_good, Energy_D
def simulate_all(lambda_Cs, sigma_Cs):
Water_Draws = []
Mfct_goods = []
Energy_Ds = []
for county in range(N):
Water_Draw, Mfct_good, Energy_D = simulate_county(lambda_Cs[county], sigma_Cs[county], county)
Water_Draws.append(Water_Draw)
Mfct_goods.append(Mfct_good)
Energy_Ds.append(Energy_D)
return Water_Draws, Mfct_goods, Energy_Ds
def partial_objective(Mfct_goods):
total = 0
for county in range(N):
total += Mfct_goods[county] * p_mn
return total
def sole_objective(params):
"""Partial objective, plus the cost of fossil fuels."""
lambda_Cs = params[0:N]
sigma_Cs = params[N:2*N]
Water_Draws, Mfct_goods, Energy_Ds = simulate_all(lambda_Cs, sigma_Cs)
return partial_objective(Mfct_goods) - sum(Energy_Ds)*lib.p_E
bounds = [(0, W_surf[county] / ((1 - alpha[county]) * Area[county])) for county in range(N)] + [(0, None)] * N
if __name__ == '__main__':
result = lib.maximize(sole_objective, generate, bounds)
print result
print bounds
| [
"jarising@gmail.com"
] | jarising@gmail.com |
4d5eb0c7a5cb65168b387a0796793a4f520508ea | c354a76f55309666f37d4843528e9b08bcae6b59 | /udpserver-1.py | d9537844c78252a14f05fb11c7ec697c9b7633c5 | [] | no_license | OsayiB/AnalyzingServers | 9424d2d0e793a554a1c43d1aa08152bb3d4054b6 | d8dc812abf0ab66186bec0b93bd9ff4775281a82 | refs/heads/master | 2021-04-02T23:59:38.821746 | 2020-03-18T20:50:44 | 2020-03-18T20:50:44 | 248,338,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import socket
serverPort = 12000
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverSocket.bind(('',serverPort))
print('The server is ready to receive')
while 1:
message, clientAddress = serverSocket.recvfrom(2048)
modifiedMessage = message.upper()
serverSocket.sendto(modifiedMessage, clientAddress)
| [
"imasuenosayi1@gmail.com"
] | imasuenosayi1@gmail.com |
49c61346ecb26523d86c356228a4a5bf4f71667f | 79c444fe3382e3a7ad413470cbff005a069ee574 | /Python/Simulation/ForwardKinematics.py | 96dc2db2ccce2bb44ab148a6f81d0a4ea89593db | [] | no_license | sschoedel/Articulated-Arm | 3fcd3de912a5af990d1bbd914b428b453faf6950 | 9e33a93a43a50e735012bf02ea7bbc59caab0b3a | refs/heads/master | 2023-03-10T07:27:30.842214 | 2023-02-19T00:21:25 | 2023-02-19T00:21:25 | 205,483,916 | 0 | 0 | null | 2022-09-14T05:23:38 | 2019-08-31T02:26:20 | Python | UTF-8 | Python | false | false | 9,196 | py | import numpy as np
import matrixHelpers as mh
numRotations = 2
thetas = np.array([360*numRotations/2] * 6)
alpha = np.array([0, -np.pi/2, 0, -np.pi/2, -np.pi/2, np.pi/2])
toolPosition = np.array([60, 0, 0])
# r1 = 47
# r2 = 110
# r3 = 26
# d1 = 133
# d3 = 0
# d4 = 117.5
# d6 = 28
r1_1 = 22.12
r2_1 = 135.7
r3_1 = 31.8
d1_1 = 300.32
d3_1 = 36.3
d4_1 = 293
d6_1 = 62
# for comparing ik code
# r1_1 = 0
# r2_1 = 135.7
# r3_1 = 0
# d1_1 = 300.32
# d3_1 = 0
# d4_1 = 293
# d6_1 = 62
xOff = np.array([r1_1, 0, 0, d4_1, 0, d6_1])
yOff = np.array([0, r3_1, 0, d3_1, 0, 0])
zOff = np.array([r2_1, 0, d1_1, 0, 0, 0])
r1_2 = 22.12
r2_2 = 31.8
r3_2 = 300.32
d1_2 = 135.7
d3_2 = 293
d4_2 = -36.3
d6_2 = 62
# xOff = np.array([22.12, 0, 300.32, 0, 0, 0])
# yOff = np.array([0, 31.8, 0, 293, 0, -62])
# zOff = np.array([135.7, 0, 0, -36.3, 0, 0])
def getEndEffectorData(theta): # more efficient version of update matrice that only returns end effector position array and rotation matrix
theta = theta * np.pi/180
toolPosition = np.array([0, 0, 60])
transform01 = np.array([[np.cos(theta[0]), -np.sin(theta[0]), 0, xOff[0]],
[np.sin(theta[0]), np.cos(theta[0]), 0, yOff[0]],
[0, 0, 1, zOff[0]],
[0, 0, 0, 1]])
transform12 = np.array([[np.cos(theta[1]), -np.sin(theta[1]), 0, xOff[1]],
[0, 0, 1, yOff[1]],
[-np.sin(theta[1]), -np.cos(theta[1]), 0, zOff[1]],
[0, 0, 0, 1]])
transform23 = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0, xOff[2]],
[np.sin(theta[2]), np.cos(theta[2]), 0, yOff[2]],
[0, 0, 1, zOff[2]],
[0, 0, 0, 1]])
transform34 = np.array([[np.cos(theta[3]), -np.sin(theta[3]), 0, xOff[3]],
[0, 0, 1, yOff[3]],
[-np.sin(theta[3]), -np.cos(theta[3]), 0, zOff[3]],
[0, 0, 0, 1]])
transform45 = np.array([[np.cos(theta[4]), -np.sin(theta[4]), 0, xOff[4]],
[0, 0, 1, yOff[4]],
[-np.sin(theta[4]), -np.cos(theta[4]), 0, zOff[4]],
[0, 0, 0, 1]])
transform56 = np.array([[np.cos(theta[5]), -np.sin(theta[5]), 0, xOff[5]],
[0, 0, -1, yOff[5]],
[np.sin(theta[5]), np.cos(theta[5]), 0, zOff[5]],
[0, 0, 0, 1]])
# Working position of tool in end effector coordinates
transform6Tool = np.array([[1, 0, 0, toolPosition[0]],
[0, 1, 0, toolPosition[1]],
[0, 0, 1, toolPosition[2]],
[0, 0, 0, 1]])
transform0Tool = transform01 @ transform12 @ transform23 @ transform34 @ transform45 @ transform56 @ transform6Tool
# Extract position and rotation data for each joint
toolPosition = np.array(transform0Tool[:-1,3])
toolRotation = np.array(transform0Tool[:-1,:-1])
return toolPosition, toolRotation
def updateMatrices(theta):
theta = theta * np.pi/180
print(f'thetas: {theta}')
# Rotation from 0 to 1 = Rx(alpha)Rz(theta)
# alpha is rotation to next joint location theta varies as arm moves
# Position is column matrix for translations from the previous frame to the next after rotation
# T from 0 to 1 = [Rotation matrix, position change]
# [0, 0, 0, 1 ]
# P0x = Mult all T01 * T12 * T23 * T34... * T56 * P6x
# End effector position to position zero cascades from end effector position in frame six
# cascading down from each transition matrix
# Transformation matrices
# transform01 = np.array([[np.cos(theta[0]), -np.sin(theta[0]), 0, xOff[0]],
# [np.sin(theta[0]), np.cos(theta[0]), 0, yOff[0]],
# [0, 0, 1, zOff[0]],
# [0, 0, 0, 1]])
# transform12 = np.array([[np.cos(theta[1]), -np.sin(theta[1]), 0, xOff[1]],
# [0, 0, 1, yOff[1]],
# [-np.sin(theta[1]), -np.cos(theta[1]), 0, zOff[1]],
# [0, 0, 0, 1]])
# transform23 = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0, xOff[2]],
# [np.sin(theta[2]), np.cos(theta[2]), 0, yOff[2]],
# [0, 0, 1, zOff[2]],
# [0, 0, 0, 1]])
transform01 = np.array([[np.cos(theta[0]), -np.sin(theta[0]), 0, xOff[0]],
[np.sin(theta[0]), np.cos(theta[0]), 0, yOff[0]],
[0, 0, 1, zOff[0]],
[0, 0, 0, 1]])
transform12 = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]), xOff[1]],
[0, 1, 0, yOff[1]],
[-np.sin(theta[1]), 0, np.cos(theta[1]), zOff[1]],
[0, 0, 0, 1]])
transform23 = np.array([[np.cos(theta[2]), 0, np.sin(theta[2]), xOff[2]],
[0, 1, 0, yOff[2]],
[-np.sin(theta[2]), 0, np.cos(theta[2]), zOff[2]],
[0, 0, 0, 1]])
# transform34 = np.array([[np.cos(theta[3]), -np.sin(theta[3]), 0, xOff[3]],
# [0, 0, 1, yOff[3]],
# [-np.sin(theta[3]), -np.cos(theta[3]), 0, zOff[3]],
# [0, 0, 0, 1]])
# transform45 = np.array([[np.cos(theta[4]), -np.sin(theta[4]), 0, xOff[4]],
# [0, 0, 1, yOff[4]],
# [-np.sin(theta[4]), -np.cos(theta[4]), 0, zOff[4]],
# [0, 0, 0, 1]])
# transform56 = np.array([[np.cos(theta[5]), -np.sin(theta[5]), 0, xOff[5]],
# [0, 0, -1, yOff[5]],
# [np.sin(theta[5]), np.cos(theta[5]), 0, zOff[5]],
# [0, 0, 0, 1]])
transform34 = np.array([[1, 0, 0, xOff[3]],
[0, np.cos(theta[3]), -np.sin(theta[3]), yOff[3]],
[0, np.sin(theta[3]), np.cos(theta[3]), zOff[3]],
[0, 0, 0, 1]])
transform45 = np.array([[np.cos(theta[4]), 0, np.sin(theta[4]), xOff[4]],
[0, 1, 0, yOff[4]],
[-np.sin(theta[4]), 0, np.cos(theta[4]), zOff[4]],
[0, 0, 0, 1]])
transform56 = np.array([[1, 0, 0, xOff[5]],
[0, np.cos(theta[5]), -np.sin(theta[5]), yOff[5]],
[0, np.sin(theta[5]), np.cos(theta[5]), zOff[5]],
[0, 0, 0, 1]])
# Working position of tool in end effector coordinates
transform6Tool = np.array([[1, 0, 0, toolPosition[0]],
[0, 1, 0, toolPosition[1]],
[0, 0, 1, toolPosition[2]],
[0, 0, 0, 1]])
transform = np.array([transform01, transform12, transform23, transform34, transform45, transform56, transform6Tool])
# Mult all matrices together to get transformation matrix from frame 0 to frame x
transform02 = transform[0] @ transform[1]
transform03 = transform[0] @ transform[1] @ transform[2]
transform04 = transform[0] @ transform[1] @ transform[2] @ transform[3]
transform05 = transform[0] @ transform[1] @ transform[2] @ transform[3] @ transform[4]
transform06 = transform[0] @ transform[1] @ transform[2] @ transform[3] @ transform[4] @ transform[5]
toolPos06 = transform06 @ transform6Tool
baseTransforms = np.array([transform01, transform02, transform03, transform04, transform05, transform06, toolPos06])
# Extract position and rotation data for each joint
jointPositions = np.array([baseTransforms[i,:-1,3] for i in range(0, baseTransforms.shape[0])])
jointRotationMatrices = np.array([baseTransforms[i,:-1,:-1] for i in range(0, baseTransforms.shape[0])])
return jointPositions, jointRotationMatrices, baseTransforms
def ForwardK(theta):
# forward kinematics
# input: Jfk - joints value for the calculation of the forward kinematics
# output: Xfk - pos value for the calculation of the forward kinematics
r = np.array([r1_2, r2_2, r3_2, d3_2, 0.0, d6_2])
d = np.array([d1_2, 0.0, 0.0, d4_2, 0.0, 0.0])
# Denavit-Hartenberg matrix
theTemp = np.array([0.0, 90.0, 0.0, 90.0, 0.0, -90.0])
theta = np.add(theTemp, theta)
alfa = np.array([-90.0, 0.0, -90.0, 90.0, -90.0, 0.0])
# r = np.array([r1_2, r2_2, r3_2, 0.0, 0.0, 0.0])
# d = np.array([d1_2, 0.0, d3_2, d4_2, 0.0, d6_2])
# from deg to rad
theta = theta * np.pi/180
alfa = alfa * np.pi/180
# work frame
Xwf = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # Xwf=[0 0 0 0 0 0]
# tool frame
Xtf = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # Xtf=[0 0 0 0 0 0]
# work frame transformation matrix
Twf = mh.pos2tran(Xwf) # Twf=mh.pos2tran(Xwf)
# tool frame transformation matrix
Ttf = mh.pos2tran(Xtf) # Ttf=mh.pos2tran(Xtf)
# DH homogeneous transformation matrix
T01 = mh.DH1line(theta[0], alfa[0], r[0], d[0]) # T01=mh.DH1line(theta(1),alfa(1),r(1),d(1))
T12 = mh.DH1line(theta[1], alfa[1], r[1], d[1]) # T12=mh.DH1line(theta(2),alfa(2),r(2),d(2))
T23 = mh.DH1line(theta[2], alfa[2], r[2], d[2]) # T23=mh.DH1line(theta(3),alfa(3),r(3),d(3))
T34 = mh.DH1line(theta[3], alfa[3], r[3], d[3]) # T34=mh.DH1line(theta(4),alfa(4),r(4),d(4))
T45 = mh.DH1line(theta[4], alfa[4], r[4], d[4]) # T45=mh.DH1line(theta(5),alfa(5),r(5),d(5))
T56 = mh.DH1line(theta[5], alfa[5], r[5], d[5]) # T56=mh.DH1line(theta(6),alfa(6),r(6),d(6))
Tw1 = Twf @ T01
Tw2 = Tw1 @ T12
Tw3 = Tw2 @ T23
Tw4 = Tw3 @ T34
Tw5 = Tw4 @ T45
Tw6 = Tw5 @ T56
Twt = Tw6 @ Ttf
# calculate pos from transformation matrix
Xfk = mh.tran2pos(Twt) # Xfk=mh.tran2pos(Twt)
# Xfk(4:6)=Xfk(4:6)/np.pi*180
Xfk[3] = Xfk[3]/np.pi*180.0
Xfk[4] = Xfk[4]/np.pi*180.0
Xfk[5] = Xfk[5]/np.pi*180.0
baseTransforms = np.array([Tw1, Tw2, Tw3, Tw4, Tw5, Tw6, Twt])
# Extract position and rotation data for each joint
jointPositions = np.array([baseTransforms[i,:-1,3] for i in range(0, baseTransforms.shape[0])])
jointRotationMatrices = np.array([baseTransforms[i,:-1,:-1] for i in range(0, baseTransforms.shape[0])])
return jointPositions, jointRotationMatrices, baseTransforms
print(updateMatrices(np.array([0,0,0,0,0,0]))[0])
if __name__ == '__main__':
pass | [
"sschoedel@vt.edu"
] | sschoedel@vt.edu |
7639349ca836d91db0810259cd7cc74992dd586b | e42b3731fea3e0d08bf77ff3afdf52498aed5224 | /app.py | ec29802bf5e63c8e31b13ef4bbc6bea3873bdfc4 | [] | no_license | nityaoberoi/git-pivotal | 4f713b6d1d390e002e310b99fc23fddaed7e367a | 6d6abc0836e0df9c14be7688d2658d43fe35af15 | refs/heads/master | 2016-09-02T05:58:11.920763 | 2014-06-16T22:31:51 | 2014-06-16T22:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | import requests
import logging
from lxml import etree
from os import environ
from os.path import abspath, dirname, join
from flask import Flask, request
app = Flask(__name__)
app_path = abspath(dirname(__file__))
PIVOTAL_URL = "http://www.pivotaltracker.com/services/v3/source_commits"
XML_nodes = {
'source_commit': ['message', 'author', 'commit_id', 'url']
}
def get_api_token(email):
"""
Looks at the pivotal token configuration file and returns the appropriate
API token associated with this email, else returns the default api token
specified. This is the email that will be used when displaying a commit on
the associated pivotal tracker ticket.
"""
json_file_path = abspath(join(dirname(__file__), 'pivotal_tokens.json'))
json_result = simplejson.loads(open(json_file_path).read())
for user, user_values in json_result.get('github_hook').get('user_api_tokens').iteritems():
if user_values.get('email') == email:
return user_values.get('api_token')
return json_result.get('github_hook').get('default_api_token')
def form_xml_post_data(commit):
"""
Builds and returns XML for the post data to pivotal tracker.
"<source_commit>
<message>%s</message>
<author>%s</author>
<commit_id>%s</commit_id>
<url>%s</url>
</source_commit>" % (message, author, commit_id, url)
"""
for key_node, nodes in XML_nodes.iteritems():
root = etree.Element(key_node)
for node in nodes:
child = etree.Element(node)
if node == 'commit_id':
node = 'id'
child.text = str(commit.get(node))
root.append(child)
return etree.tostring(root)
@app.route('/', methods=['POST'])
def process_hook():
url = PIVOTAL_URL
try:
payload_json = request.json()
commits = payload_json.get('commits')
if commits:
api_token = get_api_token(commits[0].get('author').get('email'))
for commit in commits:
xml_data = form_xml_post_data(commit)
req = requests.post(url, data=xml_data,
headers={
'X-TrackerToken': api_token,
'Content-type': 'application/xml',
}
)
if not req:
logging.debug(
u"Commiting ticket to pivotal resulted in an error."
" %s url with data %s and api_token %s",
url, xml_data, api_token
)
except Exception:
logging.exception("Exception when attempting to process payload")
return "Your hook made trouble so nothing done."
else:
return "Thank your for your hook"
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| [
"nitya@quirky.com"
] | nitya@quirky.com |
e4e2e6abb44d907f1312c75ae472bed4cafb9c24 | e36bc02ebc4700b66f563f6f51024c3c1cec71ec | /vision/scripts/taco_mergeAnnotations.py | ca78f1074658326274464e5ab9961ed9ce5f93e3 | [
"BSD-3-Clause"
] | permissive | TeamAutonomousCarOffenburg/TACO_2018 | cb1a283fea3a037ddfa74963ab41bc169391a320 | ebb63e466578fc3911269d4a714ebff0a516dbf6 | refs/heads/master | 2020-04-06T08:09:51.436623 | 2018-11-16T09:14:23 | 2018-11-16T09:14:23 | 157,297,005 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | import argparse
import os
import taco_tools as tt
def get_args():
parser = argparse.ArgumentParser(description='Merges annotation files to one final list')
parser.add_argument('-s', '--source-dir',
required=True,
help='source directory with all annotations files that should merged to one')
parser.add_argument('-tj', '--target-json',
required=False,
default="\\final_config.json",
help="filename for target json file, which will generated in source-dir")
args = parser.parse_args()
# append the trailing slash if needed
args.source_dir = os.path.join(args.source_dir, '')
if not os.path.isdir(args.source_dir):
raise NotADirectoryError("source-dir ist not a valid directory")
else:
if os.path.isfile(args.source_dir + args.target_json):
raise FileExistsError("targe-json already exists")
return args
def collect_json_annotations(path_to_files, json_files):
json_content = list()
for file_name in json_files:
json_data = tt.read_json(path_to_files + file_name)
# if the file is already a list if annotations
if type(json_data) == list:
json_content += json_data
else:
json_content.append(json_data)
return json_content
def main():
args = get_args()
json_files = [file for file in os.listdir(args.source_dir) if file.endswith('.json')]
print("OUTPUT_FILE: " + args.source_dir + args.target_json)
print("INPUT_FILES: " + str(len(json_files)))
json_data = collect_json_annotations(args.source_dir, json_files)
print(len(json_data))
tt.write_json(args.source_dir + args.target_json, json_data)
if __name__ == '__main__':
main()
| [
"ricoschillings@gmail.com"
] | ricoschillings@gmail.com |
139457e44167ae9c46d2efad9a137d44ba361061 | eb7506837e8b062309f8e3c32f3af0a63300d74b | /HomeWork/NeuralNetwork-forward.py | acdac28ac409f39e3fb8d78bd369c248bc376ad1 | [] | no_license | LiangyiHu/MachineLearning | 9a4773bf8b905b717cf6c931d4829694f7b7b140 | 351465643be800829100b133017b0fd528e6949d | refs/heads/master | 2021-01-11T18:36:57.245833 | 2017-01-20T18:13:29 | 2017-01-20T18:13:29 | 79,583,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import os
import matplotlib.pyplot as plt
import numpy as np
import PIL
os.chdir('C:\Users\mosimtec\Desktop\MachineLearning\mlclass-ex4-007\mlclass-ex4')
datain=np.loadtxt('input.csv', delimiter=',')
dataout=np.loadtxt('output.csv', delimiter=',')
finaldata=np.hstack((np.hstack((np.ones((datain.shape[0],1)),datain)),dataout.reshape(dataout.shape[0],1)))
finaldata[finaldata[:,-1]==10,-1]=0.0
theta1=np.loadtxt('theta1_calculated.csv', delimiter=',')
theta2=np.loadtxt('theta2_calculated.csv', delimiter=',')
identifyunable=0
def convertit(inputarray):
global identifyunable
if inputarray.max()<0.5:
identifyunable+=1
return inputarray.argmax()
def calculateoutput(inputarray, theta1, theta2):
a=np.append(1,1.0/(1.0+np.exp(-np.dot(theta1,inputarray[:-1].transpose()))))
a=1.0/(1.0+np.exp(-np.dot(theta2,a.transpose())))
return convertit(a)==inputarray[-1]
def statit(data,theta1,theta2):
right,wrong=0,0
for row in data:
if calculateoutput(row,theta1,theta2):
right+=1
else: wrong+=1
print right, wrong
def getoutput(inputarray, theta1, theta2):
a=np.append(1,1.0/(1.0+np.exp(-np.dot(theta1,inputarray[:-1].transpose()))))
a=1.0/(1.0+np.exp(-np.dot(theta2,a.transpose())))
return a,convertit(a)
statit(finaldata,theta1,theta2)
img=PIL.Image.open('newpic.png')
rsize=img.resize((20,20))
inputimg=1.0-np.average(np.asarray(rsize), axis=2)/255
inputimg=np.append(np.append(np.array([1.0]),inputimg),np.array([8.0]))
print getoutput(inputimg,theta1,theta2) | [
"larry.hu.8701@gmail.com"
] | larry.hu.8701@gmail.com |
9f63cabca3c79d7c53f05d58468c6588455bee9c | a89bb3e79e65f03d735da9b87d70aedee20222f5 | /src/wsgi.py | 0c8d93b266d836ba1a77ff5429c88a2f19496f8f | [] | no_license | timpandrews/tasllc | dcc049b5f74e37a4b441866c477e347d9b1b0f82 | 8d3316b7cb886c9ab975d04ce9765e92b90775ff | refs/heads/master | 2021-01-10T02:37:19.064130 | 2016-04-01T20:04:59 | 2016-04-01T20:04:59 | 55,258,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | """
WSGI config for tasllc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
application = get_wsgi_application()
| [
"tim.andrews@imagineifinc.com"
] | tim.andrews@imagineifinc.com |
338d572916bfdc40d07ac5264144cefdc150ba24 | 7c76be37c9dc9e9d44e59f5fc7e51659be68f8d7 | /Ex_5/train.py | 72a02c3c119747b774fed9a84ae187af44039c96 | [] | no_license | Andrew-Ma-2001/DL_experiment | e28402bbb6055eb6172fc543add7fe8c794d48d1 | 8c76ef7ba2020759c570d090a0f5c5af9e7166cc | refs/heads/master | 2023-07-18T20:30:47.634418 | 2021-09-18T01:59:10 | 2021-09-18T01:59:10 | 352,347,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data
from Ex_5.model import LeNet
url = 'https://zhuanlan.zhihu.com/p/355527103'
model = LeNet()
Epoch = 5
batch_size = 64
lr = 0.001
train_data = torchvision.datasets.MNIST(root='./data/', train=True, transform=torchvision.transforms.ToTensor(),
download=False)
train_loader = Data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
torch.set_grad_enabled(True)
model.train()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
for epoch in range(Epoch):
running_loss = 0.0
acc = 0.0
for step, data in enumerate(train_loader):
x, y = data
optimizer.zero_grad()
y_pred = model(x.to(device, torch.float))
loss = loss_function(y_pred, y.to(device, torch.long))
loss.backward()
running_loss += float(loss.data.cpu())
pred = y_pred.argmax(dim=1)
acc += (pred.data.cpu() == y.data).sum()
optimizer.step()
if step % 100 == 99:
loss_avg = running_loss / (step + 1)
acc_avg = float(acc / ((step + 1) * batch_size))
print('Epoch', epoch + 1, ',step', step + 1, '| Loss_avg: %.4f' % loss_avg, '| Acc_avg:%.4f' % acc_avg)
torch.save(model, './LeNet.pkl')
| [
"979148792@qq.com"
] | 979148792@qq.com |
147a83153243b6c3af4bed4247b1099ee9793600 | 29789d90224f4c08c5413dacb546572a44d95071 | /cases/hangar/richards_wing.py | 3f0a3b2324dbf005a9a655e6feaed338bde0da23 | [
"BSD-3-Clause"
] | permissive | Bo-Zhang1995/sharpy | 3388ef5937950a2a95f23086e0a1d1eac56b26c6 | fcc768cba988b2a2999c6ebe362ab59b84f3695c | refs/heads/master | 2020-09-13T03:36:05.537835 | 2019-11-17T17:36:04 | 2019-11-17T17:36:04 | 222,645,290 | 1 | 0 | BSD-3-Clause | 2019-11-19T08:23:50 | 2019-11-19T08:23:49 | null | UTF-8 | Python | false | false | 13,582 | py | """
Simple Horten Wing as used by Richards. Baseline and simplified models
"""
import numpy as np
from cases.hangar.horten_wing import HortenWing
import sharpy.utils.algebra as algebra
class Baseline(HortenWing):
def set_properties(self):
# Wing geometry
self.span = 20.0 # [m]
self.sweep_LE = 20 * np.pi / 180 # [rad] Leading Edge Sweep
self.c_root = 1.0 # [m] Root chord - Richards
self.taper_ratio = 0.25 # Richards
self.thrust_nodes = [self.n_node_fuselage - 1,
self.n_node_fuselage + self.n_node_wing + 1]
self.loc_cg = 0.45 # CG position wrt to LE (from sectional analysis)
# EA is the reference in NATASHA - defined with respect to the midchord. SHARPy is wrt to LE and as a pct of
# local chord
self.main_ea_root = 0.33
self.main_ea_tip = 0.33
self.n_mass = 2 * self.n_elem_wing
# FUSELAGE GEOMETRY
self.fuselage_width = 1.65/2
self.c_fuselage = self.c_root
# WASH OUT
self.washout_root = 0*np.pi/180
self.washout_tip = -2 * np.pi / 180
# Horseshoe wake
self.horseshoe = False
self.wake_type = 2
self.dt_factor = 1
self.dt = 1 / self.M / self.u_inf * self.dt_factor
# Dynamics
self.n_tstep = int(self.physical_time/self.dt)
self.gust_intensity = 0.1
# Numerics
self.tolerance = 1e-12
self.fsi_tolerance = 1e-10
self.relaxation_factor = 0.2
def update_mass_stiffness(self, sigma=1., sigma_mass=1.):
"""
Set's the mass and stiffness properties of the default wing
Returns:
"""
n_elem_fuselage = self.n_elem_fuselage
n_elem_wing = self.n_elem_wing
n_node_wing = self.n_node_wing
n_node_fuselage = self.n_node_fuselage
c_root = self.c_root
taper_ratio = self.taper_ratio
# Local chord to root chord initialisation
c_bar_temp = np.linspace(c_root, taper_ratio * c_root, n_elem_wing)
# Structural properties at the wing root section from Richards 2016
ea = 1e6
ga = 1e6
gj = 4.24e5
eiy = 3.84e5
eiz = 2.46e7
root_i_beam = IBeam()
root_i_beam.build(c_root)
root_i_beam.rotation_axes = np.array([0, self.main_ea_root-0.25, 0])
root_airfoil = Airfoil()
root_airfoil.build(c_root)
root_i_beam.rotation_axes = np.array([0, self.main_ea_root-0.25, 0])
mu_0 = root_i_beam.mass + root_airfoil.mass
j_xx = root_i_beam.ixx + root_airfoil.ixx
j_yy = root_i_beam.iyy + root_airfoil.iyy
j_zz = root_i_beam.izz + root_airfoil.izz
# Number of stiffnesses used
n_stiffness = self.n_stiffness
# Initialise the stiffness database
base_stiffness = self.base_stiffness
stiffness_root = sigma * np.diag([ea, ga, ga, gj, eiy, eiz])
stiffness_tip = taper_ratio ** 2 * stiffness_root
# Assume a linear variation in the stiffness. Richards et al. use VABS on the linearly tapered wing to find the
# spanwise properties
alpha = np.linspace(0, 1, self.n_elem_wing)
for i_elem in range(0, self.n_elem_wing):
base_stiffness[i_elem + 1, :, :] = stiffness_root*(1-alpha[i_elem]**2) + stiffness_tip*alpha[i_elem]**2
base_stiffness[0] = base_stiffness[1]
# Mass variation along the span
# Right wing centre of mass - wrt to 0.25c
cm = (root_airfoil.centre_mass * root_airfoil.mass + root_i_beam.centre_mass * root_i_beam.mass) \
/ np.sum(root_airfoil.mass + root_i_beam.mass)
cg = np.array([0, -(cm[0] + 0.25 * self.c_root - self.main_ea_root), 0]) * 1
n_mass = self.n_mass
# sigma_mass = 1.25
# Initialise database
base_mass = self.base_mass
mass_root_right = np.diag([mu_0, mu_0, mu_0, j_xx, j_yy, j_zz]) * sigma_mass
mass_root_right[:3, -3:] = -algebra.skew(cg) * mu_0
mass_root_right[-3:, :3] = algebra.skew(cg) * mu_0
mass_root_left = np.diag([mu_0, mu_0, mu_0, j_xx, j_yy, j_zz]) * sigma_mass
mass_root_left[:3, -3:] = -algebra.skew(-cg) * mu_0
mass_root_left[-3:, :3] = algebra.skew(-cg) * mu_0
mass_tip_right = taper_ratio * mass_root_right
mass_tip_left = taper_ratio * mass_root_left
ixx_dummy = []
iyy_dummy = []
izz_dummy = []
import matplotlib.pyplot as plt
for i_elem in range(self.n_elem_wing):
# Create full cross section
c_bar = self.c_root * ((1-alpha[i_elem]) + self.taper_ratio * alpha[i_elem])
x_section = WingCrossSection(c_bar)
print(i_elem)
print('Section Mass: %.2f ' %x_section.mass)
print('Linear Mass: %.2f' % (mu_0 * (1-alpha[i_elem]) + mu_0 * self.taper_ratio * alpha[i_elem]))
print('Section Ixx: %.4f' % x_section.ixx)
print('Section Iyy: %.4f' % x_section.iyy)
print('Section Izz: %.4f' % x_section.izz)
print('Linear Ixx: %.2f' % (j_xx * (1-alpha[i_elem]) + j_xx * self.taper_ratio * alpha[i_elem]))
# base_mass[i_elem, :, :] = mass_root_right*(1-alpha[i_elem]) + mass_tip_right*alpha[i_elem]
# base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1] = mass_root_left*(1-alpha[i_elem]) + mass_tip_left*alpha[i_elem]
base_mass[i_elem, :, :] = np.diag([x_section.mass, x_section.mass, x_section.mass,
x_section.ixx, x_section.iyy, x_section.izz])
cg = np.array([0, -(x_section.centre_mass[0] + (0.25 - self.main_ea_root) * c_bar / self.c_root), 0]) * 1
base_mass[i_elem, :3, -3:] = -algebra.skew(cg) * x_section.mass
base_mass[i_elem, -3:, :3] = algebra.skew(cg) * x_section.mass
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, :, :] = np.diag([x_section.mass, x_section.mass, x_section.mass,
x_section.ixx, x_section.iyy, x_section.izz])
cg = np.array([0, -(x_section.centre_mass[0] + (0.25 - self.main_ea_root) * c_bar / self.c_root), 0]) * 1
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, :3, -3:] = -algebra.skew(-cg) * x_section.mass
base_mass[i_elem + self.n_elem_wing + self.n_elem_fuselage - 1, -3:, :3] = algebra.skew(-cg) * x_section.mass
ixx_dummy.append(x_section.ixx)
iyy_dummy.append(x_section.iyy)
izz_dummy.append(x_section.izz)
# for item in x_section.items:
# plt.plot(item.y, item.z)
# plt.scatter(x_section.centre_mass[0], x_section.centre_mass[1])
# plt.show()
# print(x_section.centre_mass)
# print(cg)
# plt.plot(range(self.n_elem_wing), ixx_dummy)
# plt.plot(range(self.n_elem_wing), iyy_dummy)
# plt.plot(range(self.n_elem_wing), izz_dummy)
# plt.show()
# Lumped mass initialisation
lumped_mass_nodes = self.lumped_mass_nodes
lumped_mass = self.lumped_mass
lumped_mass_inertia = self.lumped_mass_inertia
lumped_mass_position = self.lumped_mass_position
# Lumped masses nodal position
# 0 - Right engine
# 1 - Left engine
# 2 - Fuselage
lumped_mass_nodes[0] = 2
lumped_mass_nodes[1] = n_node_fuselage + n_node_wing + 1
lumped_mass_nodes[2] = 0
# Lumped mass value from Richards 2013
lumped_mass[0:2] = 51.445 / 9.81
lumped_mass[2] = 150 / 9.81
# lumped_mass_position[2] = [0, 0, -10.]
# Lumped mass inertia
lumped_mass_inertia[0, :, :] = np.diag([0.29547, 0.29322, 0.29547])
lumped_mass_inertia[1, :, :] = np.diag([0.29547, 0.29322, 0.29547])
lumped_mass_inertia[2, :, :] = np.diag([0.5, 1, 1]) * lumped_mass[2]
# Define class attributes
self.lumped_mass = lumped_mass * 1
self.lumped_mass_nodes = lumped_mass_nodes * 1
self.lumped_mass_inertia = lumped_mass_inertia * 1
self.lumped_mass_position = lumped_mass_position * 1
self.base_stiffness = base_stiffness
self.base_mass = base_mass
class CrossSection(object):
def __init__(self):
self.rho = 2770
self.rotation_axes = np.array([0, 0.33-0.25, 0])
self.y = np.ndarray((2,))
self.z = np.ndarray((2,))
self.t = np.ndarray((2,))
@property
def mass(self):
"""
Mass of the I beam per unit length
"""
return np.sum(self.t * self.elem_length) * self.rho
@property
def ixx(self):
ixx_ = np.sum(self.elem_length * self.t * self.rho * (self.elem_cm_y ** 2 + self.elem_cm_z ** 2))
return ixx_ + self.mass * (self.centre_mass[0] - self.rotation_axes[1]) ** 2
@property
def elem_length(self):
elem_length = np.sqrt(np.diff(self.y) ** 2 + np.diff(self.z) ** 2)
return elem_length
@property
def elem_cm_y(self):
elem_cm_y_ = np.ndarray((self.n_elem, ))
elem_cm_y_[:] = 0.5 * (self.y[:-1] + self.y[1:])
return elem_cm_y_
@property
def elem_cm_z(self):
elem_cm_z_ = np.ndarray((self.n_elem, ))
elem_cm_z_[:] = 0.5 * (self.z[:-1] + self.z[1:])
return elem_cm_z_
@property
def centre_mass(self):
y_cm = np.sum(self.elem_cm_y * self.elem_length) / np.sum(self.elem_length)
z_cm = np.sum(self.elem_cm_z * self.elem_length) / np.sum(self.elem_length)
return np.array([y_cm, z_cm])
@property
def iyy(self):
x_dom = np.linspace(-0.5, 0.5, 100)
x_cg = 0.5 * (x_dom[:-1].copy() + x_dom[1:].copy())
dx = np.diff(x_dom)[0]
iyy_ = 0
for elem in range(len(self.elem_length)):
z_cg = np.ones_like(x_cg) * self.elem_cm_z[elem]
iyy_ += np.sum(self.elem_length[elem] * self.t[elem] * dx * self.rho * (x_cg ** 2 + z_cg ** 2))
return iyy_ #np.sum(self.elem_length * self.t * self.rho * 1 * self.elem_cm_z ** 2)
@property
def izz(self):
x_dom = np.linspace(-0.5, 0.5, 100)
x_cg = 0.5 * (x_dom[:-1].copy() + x_dom[1:].copy())
dx = np.diff(x_dom)[0]
iyy_ = 0
izz_ = 0
for elem in range(len(self.elem_length)):
y_cg = np.ones_like(x_cg) * self.elem_cm_y[elem]
izz_ += np.sum(self.elem_length[elem] * self.t[elem] * dx * self.rho * (x_cg ** 2 + y_cg ** 2))
return izz_ #np.sum(self.elem_length * self.t * self.rho * 1 * self.elem_cm_y ** 2)
@property
def n_node(self):
return self.y.shape[0]
@property
def n_elem(self):
return self.n_node - 1
def build(self, y, z, t):
self.y = y
self.z = z
self.t = t
class IBeam(CrossSection):
def build(self, c_root):
t_skin = 0.127e-2
t_c = 0.12
w_I = 10e-2 * c_root # Width of the Ibeam
self.rho = 2770
self.y = np.ndarray((self.n_node, ))
self.z = np.ndarray((self.n_node, ))
self.t = np.ndarray((self.n_node, ))
z_max = t_c * c_root
y = np.array([-w_I/2, w_I/2, 0, 0, -w_I/2, w_I/2])
z = np.array([z_max/2, z_max/2, z_max/2, -z_max/2, -z_max/2, -z_max/2])
t = np.array([t_skin, 0, t_skin, 0, t_skin])
self.y = y
self.z = z
self.t = t
class Airfoil(CrossSection):
def build(self, c_root):
t_c = 0.12
t_skin = 0.127e-2 * 1.5
y_dom = np.linspace(0, c_root, 100)
y = np.concatenate((y_dom, y_dom[:-1][::-1]))
z_dom = 5 * t_c * (0.2969 * np.sqrt(y_dom/c_root) -
0.1260 * y_dom/c_root -
0.3516 * (y_dom/c_root) ** 2 +
0.2843 * (y_dom/c_root) ** 3 -
0.1015 * (y_dom/c_root) ** 4) * c_root
z = np.concatenate((z_dom, -z_dom[:-1][::-1]))
self.y = y - 0.25 * c_root
self.z = z
self.t = t_skin * np.ones(self.n_elem)
class WingCrossSection:
def __init__(self, chord):
self.chord = chord
self.items = list()
self.items.append(Airfoil())
self.items.append(IBeam())
for item in self.items:
item.build(chord)
@property
def mass(self):
return np.sum([item.mass for item in self.items])
@property
def centre_mass(self):
y = np.sum([item.mass * item.centre_mass[0] for item in self.items]) / self.mass
z = np.sum([item.mass * item.centre_mass[1] for item in self.items]) / self.mass
return np.array([y, z])
@property
def ixx(self):
return np.sum([item.ixx for item in self.items])
@property
def iyy(self):
return np.sum([item.iyy for item in self.items])
@property
def izz(self):
return np.sum([item.izz for item in self.items])
if __name__ == '__main__':
ws = Baseline(M=4,
N=11,
Mstarfactor=5,
u_inf=28,
rho=1.225,
alpha_deg=4)
# ws.clean_test_files()
ws.update_mass_stiffness()
ws.update_fem_prop()
# ws.generate_fem_file()
ws.update_aero_properties()
# ws.generate_aero_file()
# ws.set_default_config_dict() | [
"ng213@ic.ac.uk"
] | ng213@ic.ac.uk |
2263a9868f1c0b5df26b8c2242674af8936506df | 19d1ea1f04be9e8c452408ce506cade79184dbb3 | /rules/__init__.py | 1c74c9e71fbd58995baa9e02b0eaf9d879f6f1c0 | [] | no_license | trarck/repack | 81a3f851ab8039fec95a19afd27b40637307d79c | fe77bec248e1ee97781b99a3c65b9dd77ca3ff32 | refs/heads/master | 2020-03-27T13:09:49.355145 | 2019-01-23T01:17:29 | 2019-01-23T01:17:29 | 146,593,624 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | from rule import * | [
"duanhh@duanhouhaideMac-Pro.local"
] | duanhh@duanhouhaideMac-Pro.local |
b6a7e1ab9605e82b18bee3cfb0ecfb202e981749 | 07f932c30bec781f383c16ccaa4bb0188bbeb719 | /class2/exercise2.py | e002d60e4211fc64a2a64d6e4ffc14d6a138b7a4 | [] | no_license | mrreyes512/pynet_ansible | 1e4b8381080a393ef74fce35a2cd31c2e7a43011 | 98d773443dd224c580ed60fc089ce9352cae9547 | refs/heads/master | 2021-01-11T03:29:13.879845 | 2017-01-19T07:08:17 | 2017-01-19T07:08:17 | 71,002,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | #!/usr/bin/env python
"""
Write a script that connects using telnet to the pynet-rtr1 router. Execute the 'show ip int brief' command on the router and return the output.
Try to do this on your own (i.e. do not copy what I did previously). You should be able to do this by using the following items:
telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
remote_conn.read_until(<string_pattern>, TELNET_TIMEOUT)
remote_conn.read_very_eager()
remote_conn.write(<command> + '\n')
remote_conn.close()
"""
import telnetlib
import time
import socket
import sys
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def send_command(remote_conn, cmd):
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output = remote_conn.read_until("assword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def telnet_connect(ip_addr):
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed-out")
def main():
ip_addr = '184.105.247.70'
#ip_addr = '8.8.8.8'
username = 'pyclass'
password = '88newclass'
remote_conn = telnet_connect(ip_addr)
output = login(remote_conn, username, password)
#print output
time.sleep(1)
output = remote_conn.read_very_eager()
#print output
output = send_command(remote_conn, 'terminal length 0')
output = send_command(remote_conn, 'show ip interface brief')
print output
remote_conn.close()
if __name__ == "__main__":
main()
| [
"mark.reyes@twcable.com"
] | mark.reyes@twcable.com |
6087cf22bb3cfcbdb38c804b91caa07f0eb784f3 | 6c7753fac46d59e995bc7e48985e0845801c33a4 | /apiApp/project/urls.py | 04c830e68d014d4a0e32a9346c693ffd0795bdae | [] | no_license | MarlonJD/projectBlue | fcf56907b8ed9b3b610ee0058737aece49d1d50b | 9cefc1564597d487459c9a013bcdcffd2e4775eb | refs/heads/master | 2022-10-26T03:58:56.939155 | 2020-06-18T21:02:33 | 2020-06-18T21:02:33 | 261,889,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
# from django.conf.urls.i18n import i18n_patterns
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', auth_views.LoginView.as_view(), name='login'),
path('passwordReset/', auth_views.PasswordResetView.as_view(),
name='password_reset'),
path('api/', include('api.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# urlpatterns += i18n_patterns(
# path('', include('panel.urls')),
# )
| [
"burak.karahan@mail.ru"
] | burak.karahan@mail.ru |
4837d295d67276d0a0b29eda004d84928b3040ac | bf2c390ed28071ac233265b1aa0d73cf2d48defb | /funcoesTeste.py | 24aefd19c594e242264891210060822131005c94 | [] | no_license | pedroppp/pythonTeste | feaca119aad8f521351a4fe8d514d9c38afeba2e | 62cbd91e77736ec8a59547279e304c62584a2ad2 | refs/heads/master | 2020-08-11T00:13:58.810170 | 2019-10-11T14:27:05 | 2019-10-11T14:27:05 | 214,451,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | # coding: utf-8
# teste de funções
def funcao_inicial():
print("teste funcao")
def soma(x,y):
total = x + y
print(" valor total ",total)
print(" valor total 2 %d" %total)
# parametros default
def login(usuario="teste",senha="123"):
print(" usuario: %s e senha: %s" %(usuario,senha))
def login2(sistema,usuario="teste",senha="123"):
print(" usuario: %s e senha: %s do sistema %s" %(usuario,senha,sistema))
def dados_pessoais(nome,sobrenome,idade,sexo):
print(" nome: {}\nSobrenome: {}\nIdade: {}\nSexo: {}\n"
.format(nome,sobrenome,idade,sexo))
def soma3():
return 10
def funcRetValMult():
return 1, 2
def potenciaN(x):
quadrado = x**2
cubo = x**3
return quadrado, cubo
funcao_inicial()
soma(2, 3)
login()
login("root","12345")
login2("Sistemateste")
dados_pessoais("Pedro","Pires","50","MASCULINO")
#ARGUMENTOS NOMEADOS
dados_pessoais("Pedro",sexo="MASCULINO",idade="53",sobrenome="Pires")
print(" resultado da soma3 %d" %soma3())
# retorno de valores multiplos
x, y = funcRetValMult()
print(" Valor retornado de x %d\n Valor retornado de y %d" %(x,y))
a,b = potenciaN(10)
print(" Quadrado %d "%a)
print(" Cubo %d "%b)
| [
"PedroPires@mpsp.mp.br"
] | PedroPires@mpsp.mp.br |
faf7d10c50a3d812e2b763b7f86e1ab4332a58cb | e645a5e03c2e25298042b6367025dac8109510f5 | /src/ytechblog/profiles/migrations/0002_profile_description.py | bae84ac0fc1cbb814f68f336b20735c5081d4c16 | [] | no_license | anthony-ogoke6/campushub | 39e3d0b7ae4fbc3a2d0492cb26265e1eb6dda2b8 | c156c7a8750f173349c78b9ec3b6e9de941cdb12 | refs/heads/master | 2022-10-21T22:35:24.487566 | 2018-12-27T23:33:55 | 2018-12-27T23:33:55 | 161,300,385 | 0 | 1 | null | 2022-10-21T08:19:15 | 2018-12-11T08:16:23 | Python | UTF-8 | Python | false | false | 401 | py | # Generated by Django 2.1.1 on 2018-09-25 01:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='description',
field=models.TextField(default='description default text'),
),
]
| [
"ogokeanthony187@gmail.com"
] | ogokeanthony187@gmail.com |
67d5a8da55c644e91aa539b2449116477ff95e23 | ba41dbc2183bd91e6e9a8669904b85f342775530 | /mgmt/dump-function-calls.py | d5620c7a17ded957d08beb4af09694250bb26f38 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | fish2000/libimread | 5d835f98083a897e1d0d9fde4f816cea4496e35f | 781e2484559136de5171d577d54afa624ca4c8b4 | refs/heads/master | 2022-04-28T18:14:27.189975 | 2022-03-20T23:57:15 | 2022-03-20T23:57:15 | 30,621,253 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | #!/usr/bin/env python
# dump_function_calls.py:
# Originally from: https://github.com/smspillaz/scripts/blob/master/list_non_inlined_symbols/dump_function_calls.py
#
# Copyright (c) 2014 Sam Spilsbury <smspillaz@gmail.com>
# Licenced under the MIT Licence.
#
# Looks at the DWARF data for a library and dumps to stdout where
# functions are called
#
# Usage: dump_function_calls.py object [regex]
import re
import sys
import subprocess
def get_function_calls (objdump_output, regex):
function_calls = []
for line in objdump_output.split ("\n"):
if "callq" in line and "<" in line and ">" in line:
if regex is None or (regex is not None and regex.match (line) != None):
mangled = line.split ("<")[1]
if "@" in mangled:
mangled = mangled.split("@")[0]
elif "." in mangled:
mangled = mangled.split(".")[0]
call = subprocess.check_output (["c++filt", mangled])[:-1]
function_calls.append (call)
return set (function_calls)
if (len (sys.argv) < 2):
print "Usage: dump_function_calls.py object [regex]"
object = sys.argv[1];
regex = None
if (len (sys.argv) == 3):
regex = re.compile (sys.argv[2])
objdump_output = subprocess.check_output (["gobjdump", "-S", object])
function_calls = get_function_calls (objdump_output, regex)
for call in function_calls:
print call | [
"fish2000@gmail.com"
] | fish2000@gmail.com |
87d5afbae9b740257b00478e0a65bfa7f48d916d | 9fa0a266717339cac7a819f8be0b0c0244b4bd4e | /autoencoder/data_prepocess.py | 58f083b79f5ba567b15df1947b9f78bc1a105bc2 | [] | no_license | BugHoppers/recommender | 45eb1bdf88042a38b28851ac941cd69f73a66f85 | 7fad8f7e3e1732186ef10adc8065684907a9b945 | refs/heads/master | 2020-04-18T14:51:19.875273 | 2019-01-29T17:50:50 | 2019-01-29T17:50:50 | 167,599,505 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # Importing the libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
def convert(data, n_users, n_movies):
new_data = []
for id_users in range(1, n_users + 1):
id_movies = data[:,1][data[:,0] == id_users]
id_ratings = data[:,2][data[:,0] == id_users]
ratings = np.zeros(n_movies)
ratings[id_movies - 1] = id_ratings
new_data.append(list(ratings))
return new_data
def load_data(csv):
dataset = pd.read_csv(csv)
dataset = dataset.iloc[:,:-1].astype(np.int64).values
training_set, test_set = train_test_split(dataset, test_size=0.2)
n_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
n_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))
training_set = convert(training_set, n_users, n_movies)
test_set = convert(test_set, n_users, n_movies)
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
return training_set, test_set, n_users, n_movies | [
"sourishsarmah@gmail.com"
] | sourishsarmah@gmail.com |
11c2023832c43498c55c8d64651a8ced78c36628 | 93cee8b7fd8e0975aa8514ab815fa3898106608d | /container_service_extension/utils.py | abf70c60ff3821c6cc6f9a53952e504e8171e188 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tsugliani/container-service-extension | 09c3e775af0cc9482c3ff4188622deeeda43c5da | 9e3e8dedf551065136caf4a58c06ee7403e4c9da | refs/heads/master | 2021-05-14T07:23:33.054372 | 2018-01-04T14:34:02 | 2018-01-04T14:34:02 | 116,264,605 | 0 | 0 | null | 2018-01-04T13:42:49 | 2018-01-04T13:42:49 | null | UTF-8 | Python | false | false | 794 | py | # container-service-extension
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import hashlib
import random
import socket
import ssl
import string
def hex_chunks(s):
return [s[i:i + 2] for i in range(0, len(s), 2)]
def get_thumbprint(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
wrappedSocket = ssl.wrap_socket(sock)
wrappedSocket.connect((host, port))
der_cert_bin = wrappedSocket.getpeercert(True)
thumb_sha1 = hashlib.sha1(der_cert_bin).hexdigest()
wrappedSocket.close()
return ':'.join(map(str, hex_chunks(thumb_sha1))).upper()
def random_word(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
| [
"contact@pacogomez.com"
] | contact@pacogomez.com |
304b54e70f42f4c9d58fbed379fc1dcc873823ea | f773c74c0301ebfcc09878a649dbe1b0d39c9de3 | /Handicapskydning/migrations/0031_auto_20191124_1738.py | 50ba38237b258fb21777ee1c023f4a5c00c2f2ed | [] | no_license | nikcio/Handicapskydning-Public | 877c67c47beb15df4745bcc1307b00dd1b01e780 | 6fdd9a65b9e32e9b16942da6fcb78f1d4c5e6acb | refs/heads/master | 2020-09-16T13:41:32.856424 | 2020-01-29T13:15:11 | 2020-01-29T13:15:11 | 223,786,759 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Generated by Django 2.2.7 on 2019-11-24 16:38
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Handicapskydning', '0030_auto_20191124_1737'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='date',
field=models.DateField(default=datetime.datetime(2019, 11, 24, 16, 38, 22, 820558, tzinfo=utc), verbose_name='Dato'),
),
migrations.AlterField(
model_name='news',
name='date',
field=models.DateField(default=datetime.datetime(2019, 11, 24, 16, 38, 22, 820558, tzinfo=utc), verbose_name='Dato'),
),
]
| [
"nikolajgive@gmail.com"
] | nikolajgive@gmail.com |
71233bc906c491693766d1a8f52f618ccd55b178 | a06489cf25ed924d5689fb2d125bb9c2e4c3cb60 | /UVS/Core/models.py | 2adba3d6de3307094948f535a845162f72755084 | [] | no_license | highbreed/Universty-Voting-System | f14c592b8645ada351a24b4092a25624cabe7af9 | cf4063db533957d41b6653a60e836b19ccd460af | refs/heads/master | 2020-09-06T14:41:51.659307 | 2019-11-08T11:37:31 | 2019-11-08T11:37:31 | 220,453,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | import datetime
from django.db import models
GENDER_CHOICE = (
('Male', 'Male'),
('Female', 'Female'),
)
class Student(models.Model):
"""
this is a database model that captures the students details
:returns FirstName and LastName
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
gender = models.CharField(choices=GENDER_CHOICE, max_length=10)
adm_number = models.CharField(max_length=100)
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
class Position(models.Model):
"""
A database model that captures the available seats for contest
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Contest(models.Model):
"""
model to set registration of voting
"""
STATUS_CHOICE = (
('active', 'active'),
('voting ongoing', 'voting ongoing'),
('registration ongoing', 'registration ongoing'),
('registration ended', 'registration ended'),
('results relesed', 'results relesed'),
('ended', 'ended'),
)
name = models.CharField(max_length=100)
open_positions = models.ManyToManyField(Position)
registration_start = models.DateField()
registration_end = models.DateField()
voting_start = models.DateField()
voting_end = models.DateField()
status = models.CharField(choices=STATUS_CHOICE, max_length=50, default='active')
def __str__(self):
return self.name
class Contestant(models.Model):
"""
model to register contestants
"""
name = models.ForeignKey(Student, on_delete=models.CASCADE)
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
position = models.ForeignKey(Position, on_delete=models.CASCADE)
image = models.ImageField(upload_to='contestant_images', blank=True, null=True)
votes = models.IntegerField(default=0, null=True, blank=True)
def __str__(self):
return str(self.name)
class Winner(models.Model):
"""
This model stores the final winners
"""
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
contestant = models.ForeignKey(Contestant, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.contestant)
| [
"noreply@github.com"
] | highbreed.noreply@github.com |
9a8d1f2c92d9086aa11eccf289f7c95d6b8f29d0 | b37d4c6ae5fa90c5afc6346088c272d3b7c8a37c | /backend/course/api/v1/viewsets.py | dafe5676cfef6a5d35ce42d0bb66ce7f52f6daf6 | [] | no_license | crowdbotics-apps/staeci-27357 | 3f18579fe25b97c64db661205c7398a8f27905e7 | e63469a44549e38d3d01046fbf75c394ef4ec435 | refs/heads/master | 2023-04-28T20:35:58.657031 | 2021-05-24T22:53:33 | 2021-05-24T22:53:33 | 370,503,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | from rest_framework import authentication
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
from .serializers import (
RecordingSerializer,
EventSerializer,
SubscriptionSerializer,
CourseSerializer,
GroupSerializer,
ModuleSerializer,
PaymentMethodSerializer,
SubscriptionTypeSerializer,
EnrollmentSerializer,
LessonSerializer,
CategorySerializer,
)
from rest_framework import viewsets
class CourseViewSet(viewsets.ModelViewSet):
serializer_class = CourseSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Course.objects.all()
class EventViewSet(viewsets.ModelViewSet):
serializer_class = EventSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Event.objects.all()
class GroupViewSet(viewsets.ModelViewSet):
serializer_class = GroupSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Group.objects.all()
class LessonViewSet(viewsets.ModelViewSet):
serializer_class = LessonSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Lesson.objects.all()
class SubscriptionViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Subscription.objects.all()
class RecordingViewSet(viewsets.ModelViewSet):
serializer_class = RecordingSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Recording.objects.all()
class PaymentMethodViewSet(viewsets.ModelViewSet):
serializer_class = PaymentMethodSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = PaymentMethod.objects.all()
class EnrollmentViewSet(viewsets.ModelViewSet):
serializer_class = EnrollmentSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Enrollment.objects.all()
class SubscriptionTypeViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionTypeSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = SubscriptionType.objects.all()
class ModuleViewSet(viewsets.ModelViewSet):
serializer_class = ModuleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Module.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
870321131070ff6d322c0e1ae472628dcd8acd4e | ef9deef72e808c9bbb3e2fa05ce0b22e1c7442ce | /Week8/Integration.py | a9eb3e22034869239ac6bf3e4ddcbd3d0712f330 | [] | no_license | Sukanya-Kothapally/Data_Engineering | 3b3038d9efe2a4d9014374262341a0a9aad5c572 | 378de7126d58efba7fecb65f62a248b307579ee8 | refs/heads/main | 2023-03-08T04:03:01.915105 | 2021-02-27T05:37:42 | 2021-02-27T05:37:42 | 329,715,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import pandas as pd
integrated_df= groubbycounties_census.join(groubbycounties_covid)
integrated_df['Totalcases'] = integrated_df.apply(lambda index: (index['cases'] * 100000) / index['TotalPop'], axis=1)
integrated_df['Totaldeaths'] = integrated_df.apply(lambda index: (index['deaths'] * 100000) / index['TotalPop'], axis=1)
#integrated_df.head(5)
| [
"noreply@github.com"
] | Sukanya-Kothapally.noreply@github.com |
5349d9c611e76c9771c545f3b6f22f418b21c9d2 | 595734a16085b52bd03e582a152f560125dbf199 | /code/main2.py | 2f46830f8d0f70034ff90b7ff56f0ae046c7db97 | [] | no_license | Fessence/tianchi_job_match | 2c6fce88c7458493b739339b8f90e947c961309e | 301b487c48b07e81d4b4169d82ca814a62a7e818 | refs/heads/master | 2022-02-26T03:04:35.172686 | 2019-10-06T13:27:59 | 2019-10-06T13:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,217 | py | # -*- coding:utf-8 _*-
import Levenshtein
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
from sklearn.metrics import roc_auc_score, mean_absolute_error
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
import copy
import lightgbm as lgb
from scipy import sparse
from sklearn.preprocessing import Normalizer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.metrics.pairwise import cosine_similarity
from gensim.models import TfidfModel
from gensim import corpora,similarities,models
import math
import networkx as nx
from nltk.metrics.distance import jaccard_distance, masi_distance
import difflib
from simhash import Simhash
from nltk.tokenize import word_tokenize
import re
import xgboost as xgb
from sklearn.preprocessing import StandardScaler, LabelEncoder
pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', 400)
scaler = StandardScaler()
train_user = pd.read_csv('../data/zhaopin_round1_train_20190716/table1_user', sep='\t')
train_jd = pd.read_csv('../data/zhaopin_round1_train_20190716/table2_jd.csv', sep='\t')
action = pd.read_csv('../data/zhaopin_round1_train_20190716/table3_action', sep='\t')
train = pd.merge(action, train_user, on='user_id', how='left')
train = pd.merge(train, train_jd, on='jd_no', how='left')
test_user = pd.read_csv('../data/zhaopin_round1_test_20190716/user_ToBePredicted', sep='\t')
test = pd.read_csv('../data/zhaopin_round1_user_exposure_A_20190723', sep=' ')
test = pd.merge(test, test_user, on='user_id', how='left')
test = pd.merge(test, train_jd, on='jd_no', how='left')
# print([i for i in train.columns if i not in list(test.columns)]) #['browsed', 'delivered', 'satisfied']
def creat_feas(train, type='train'):
train['user_id_count_jd_no'] = train.groupby('user_id').jd_no.transform('count')
train['jd_no_count_user_id'] = train.groupby('jd_no').user_id.transform('count')
# train['desire_jd_city_id'] = train['desire_jd_city_id'].apply(lambda x: [i for i in x.split(',')])
train['desire_city_nums'] = train['desire_jd_city_id'].apply(lambda x: len([i for i in x.split(',') if i != '-']))
train['0_city'] = train['desire_jd_city_id'].apply(lambda x: x.split(',')[0] if x.split(',')[0] != '-' else np.nan)
train['1_city'] = train['desire_jd_city_id'].apply(lambda x: x.split(',')[1] if x.split(',')[1] != '-' else np.nan)
train['2_city'] = train['desire_jd_city_id'].apply(lambda x: x.split(',')[2] if x.split(',')[2] != '-' else np.nan)
train['city'].fillna(-100, inplace=True)
train['desire_job_city_same'] = train.apply(lambda x: 1 if str(int(x['city'])) in x['desire_jd_city_id'].split(',') else 0, axis=1)
train['live_job_city_same'] = train.apply(lambda x: 1 if str(int(x['live_city_id'])) in x['desire_jd_city_id'].split(',') else 0, axis=1)
train['jd_sub_type_desire_jd_type_id_same'] = train.apply(lambda x: 1 if x['jd_sub_type'] == x['desire_jd_type_id'] else 0, axis=1)
# 切分工作,交集个数
def get_job_same_nums(x, y):
try:
jd_sub_type = re.split(',|/', x)
desire_jd_type_id = re.split(',|/', y)
nums = len([i for i in jd_sub_type if i in desire_jd_type_id])
except:
return -1
return nums
train['desire_jd_type_nums'] = train.apply(lambda x: get_job_same_nums(x['jd_sub_type'], x['desire_jd_type_id']), axis=1)
train['has_cur_jd_type'] = train['cur_jd_type'].apply(lambda x: 1 if x else 0)
degree_dict = {'\\N': 1, '初中': 1, '高中': 2, '其他': 2, '请选择': 2, '中专': 3, '中技': 3, '大专': 4, '本科': 5, '硕士': 6, 'MBA': 6,
'EMBA': 6, '博士': 7}
train['cur_degree_id'] = train['cur_degree_id'].fillna('高中')
train['degree'] = train['cur_degree_id'].apply(lambda x: int(degree_dict[x.strip()]))
train['min_edu_level'] = train['min_edu_level'].fillna('高中')
train['min_edu_level'] = train['min_edu_level'].apply(lambda x: int(degree_dict[x.strip()]))
train['degree_diff'] = train['degree'] - train['min_edu_level'] + 7
train['start_work_date'] = train['start_work_date'].replace('-', 2019)
train['work_year'] = train['start_work_date'].apply(lambda x: 2019 - int(x))
def get_year(x):
try:
x = str(int(x))
if len(x) == 1:
return int(x)
elif len(x) == 3:
return int(x[0])
else:
return int(x[:2])
except:
return 0
train['min_years'] = train['min_years'].apply(lambda x: get_year(x))
train['work_year_diff'] = train['work_year'] - train['min_years']
def get_salary(x, y):
if x == 0:
x = y
if x == '-':
x = y
x = str(x)
if len(x) == 10:
return int(x[:5])
elif len(x) == 9:
return int(x[:4])
elif len(x) == 11:
return int(x[:5])
train['desire_jd_salary_id'] = train.apply(lambda x: get_salary(x['desire_jd_salary_id'], x['cur_salary_id']), axis=1)
train['cur_salary_id'] = train.apply(lambda x: get_salary(x['cur_salary_id'], x['desire_jd_salary_id']), axis=1)
train['salary_diff'] = train['desire_jd_salary_id'] - train['cur_salary_id'] + 1000
no_feas = ['user_id', 'jd_no', 'browsed', 'delivered', 'satisfied', 'company_name', 'key', 'job_description\r\n', 'label']
encoder_feas = ['live_city_id', 'desire_jd_city_id', 'desire_jd_industry_id', 'desire_jd_type_id', 'cur_industry_id',
'cur_jd_type', 'cur_degree_id', 'experience', 'jd_title', 'jd_sub_type', 'start_date', 'end_date',
'max_edu_level', 'is_mangerial', 'resume_language_required']
# print(train[['city', 'desire_jd_city_id', 'desire_job_city_same']])
# print(train.head())
if type == 'train':
# train['label'] = train.apply(lambda x: x['satisfied']*10 + x['delivered']*3 + x['browsed']*1,axis=1)
train['label'] = train['satisfied']*0.7 + train['delivered']*0.3
return train, no_feas, encoder_feas
def get_label(train, test, encoder_feas):
df = pd.concat((train, test))
for i in encoder_feas:
le = LabelEncoder()
df[i] = df[i].astype(str)
le.fit(df[i].unique())
df[i] = le.transform(df[i])
train, test = df[:len(train)], df[len(train):]
return train, test
def lgb_para_reg_model(X_train, y_train, X_test, y_test):
param = {'num_leaves': 30,
'min_data_in_leaf': 30,
'objective': 'regression',
'max_depth': 4,
'learning_rate': 0.01,
"min_child_samples": 30,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
"bagging_seed": 11,
"metric": 'rmse',
"lambda_l1": 0.1,
"verbosity": -1}
trn_data = lgb.Dataset(X_train, y_train)
val_data = lgb.Dataset(X_test, y_test)
num_round = 100000
model = lgb.train(param,
trn_data,
num_round,
valid_sets=[trn_data, val_data],
verbose_eval=200,
early_stopping_rounds=500)
return model
def xgb_model_re(X_train, y_train, X_test, y_test):
model = xgb.XGBRegressor(colsample_bytree=0.3,
# objective= reg:gamma,
# eval_metric='mae',
gamma=0.0,
learning_rate=0.01,
max_depth=4,
min_child_weight=1.5,
n_estimators=1668,
reg_alpha=1,
reg_lambda=0.6,
subsample=0.2,
seed=42,
silent=1)
model = model.fit(X_train, y_train,eval_set=[(X_train, y_train), (X_test, y_test)],early_stopping_rounds=30, verbose=1)
return model
def get_result(train, test, label, my_model, need_sca=True, splits_nums=5):
if need_sca:
scaler.fit(train)
train = scaler.transform(train)
test = scaler.transform(test)
elif not need_sca:
train = np.array(train)
test = np.array(test)
oof = np.zeros(train.shape[0])
score_list = []
label = np.array(label)
k_fold = KFold(n_splits=splits_nums, shuffle=True, random_state=1024)
for index, (train_index, test_index) in enumerate(k_fold.split(train)):
X_train, X_test = train[train_index], train[test_index]
y_train, y_test = label[train_index], label[test_index]
model = my_model(X_train, y_train, X_test, y_test)
vali_pre = model.predict(X_test)
oof[test_index] = vali_pre
print(len(y_test), len(vali_pre))
print(y_test[:10])
print(vali_pre[:10])
try:
score = math.sqrt(mean_absolute_error(y_test, vali_pre))
score_list.append(score)
except:
pass
pred_result = model.predict(test)
sub['score'] = pred_result
if index == 0:
re_sub = copy.deepcopy(sub)
else:
re_sub['score'] = re_sub['score'] + sub['score']
re_sub['score'] = re_sub['score'] / splits_nums
print('score list:', score_list)
print(np.mean(score_list))
return re_sub, oof
train, no_feas, encoder_feas = creat_feas(train)
test, no_feas, encoder_feas = creat_feas(test, type='test')
feas = [i for i in train.columns if i not in no_feas]
train_df = train[feas]
test_df = test[feas]
label = train['label']
sub = test[['user_id', 'jd_no']]
train_df, test_df = get_label(train_df, test_df, encoder_feas)
re_sub, oof = get_result(train_df, test_df, label, lgb_para_reg_model, need_sca=True, splits_nums=5)
print(re_sub)
re_sub = re_sub.sort_values(['user_id', 'score'], ascending=False)
re_sub[['user_id', 'jd_no']].to_csv('../result/sub.csv', index=None)
print(re_sub)
| [
"lipengfei-s@inspur.com"
] | lipengfei-s@inspur.com |
0dd9f7cdf78fdb95067d0abe3a7ed2d4dc1e3bb0 | 65a74ee01d60f52dde49c1348ee8c59c1096c51f | /circulation.py | 056d241a2f10651d48962b875d13ef9668ea6bc9 | [] | no_license | observingClouds/trade-wind-course_2021 | 7c710eeb4d22bcc7577d0a643c72745f42382b73 | 2fa318909a37e3a46ceb51c9b28a02bfdbbd9e2e | refs/heads/master | 2023-03-13T18:20:35.634103 | 2021-03-05T16:03:45 | 2021-03-05T16:03:45 | 336,301,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | """
Functions to calculate circulations
"""
def streamfunction_p(omega, p_levels, firstProfile=None, alpha=1):
"""
Horizontal integrating the vertical velocity to get the
overall streamfunction as in Bretherton et al. (2005) Eq. page 4282)
but in pressure coordinates:
psi_i(p) = psi_(i-1)(p) + omega_{i-1}(p)/g
omega : vertical velocity [Pa/s]
p_levels : height [Pa]
firstProfile: vertical velocity in first profile (standard: 0)
"""
import numpy as np
g = 9.80665 #Gravitational acceleration [m/s]
psi = np.empty((np.shape(omega)[0]+1,np.shape(omega)[1]))
if firstProfile is None:
psi[0,:] = 0
else:
psi[0,:] = firstProfile
print(psi[0,:])
for l in range(len(p_levels)): #per height
for i in range(1,len(omega)+1): #per rank
psi[i,l] = psi[i-1,l]+omega[i-1,l]*alpha
psi = psi/g # to convert to kg/m**2/s
return psi
def wtg(T,Tpot,Qcool_day,dTpot_dp):
""" Calculate vertical velocity by using
the weak temperature gradient approximation
Input:
Tpot: potential temperature
Qcool: cooling [K/d]
Output:
vertical velocity in Pa/s
omega = Qcool/S where S= T/Tpot * dTpot/dp
"""
Qcool_sec = Qcool_day/(24*60*60.)
return Qcool_sec/(T/Tpot*(dTpot_dp))
| [
"43613877+observingClouds@users.noreply.github.com"
] | 43613877+observingClouds@users.noreply.github.com |
23da050fce3e839d574feba6c8f740f566a786ed | 87fe0e001d4a3f52a66103d3d16ab81b369d9bbb | /projecteuler/combinatoricsselections.py | 3acc15ff29d2ee755987a43c24c613aff972863e | [] | no_license | JanKesek/python-algorithm-automata-libraries | 4b25b114b29a893cf7cfe11af60ba49d2c986af7 | 8bad5e8b7a22e699479252357819490beeb251e6 | refs/heads/master | 2022-09-16T22:36:45.739611 | 2022-04-01T14:09:28 | 2022-04-01T14:09:28 | 194,309,056 | 0 | 0 | null | 2022-09-01T23:28:29 | 2019-06-28T17:14:13 | Python | UTF-8 | Python | false | false | 196 | py | import math
limit = 1000000
count=0
fact = lambda a : math.factorial(a)
for i in range(21,101):
for j in range(1,i):
comb = fact(i)//(fact(j)*fact(i-j))
if comb>=limit: count+=1
print(count) | [
"4chewbaca@gmail.com"
] | 4chewbaca@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.