blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d153edb29553b7d64ada0f59b1a91b11a9039869 | 2de54ad5b8432e3ac5e8901ffda0a7bb0935054b | /install/urdf_tutorial/share/urdf_tutorial/demo.launch1.py | 61b1c28272be3a8eee7d977ad0e452f0a96edb0b | [] | no_license | pw-eiti-anro-21l/kozubek_michalczewski | 789c4163a278db1c259d21de642050867fbaf244 | 98d821a1692fb4b8085fee3018c2fbd6295f5e01 | refs/heads/master | 2023-04-30T05:55:53.518465 | 2021-05-07T12:04:11 | 2021-05-07T12:04:11 | 345,424,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time', default='false')
urdf_file_name = 'r2d21.urdf.xml'
urdf = os.path.join(
get_package_share_directory('urdf_tutorial'),
urdf_file_name)
with open(urdf, 'r') as infp:
robot_desc = infp.read()
return LaunchDescription([
DeclareLaunchArgument(
'use_sim_time',
default_value='false',
description='Use simulation (Gazebo) clock if true'),
Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{'use_sim_time': use_sim_time, 'robot_description': robot_desc}],
arguments=[urdf]),
Node(
package='urdf_tutorial',
executable='state_publisher',
name='state_publisher',
output='screen'),
])
| [
"01149402@pw.edu.pl"
] | 01149402@pw.edu.pl |
bd3f59a9d11388780a80aad702971349799580c5 | 0320ac4a623f9153468952a64af9093430801dcb | /tests/callbacks/learning_rate_test.py | 9503c866bcca0a3f3d20446c89f8d9a9d3d4676a | [
"MIT"
] | permissive | carmocca/PyLaia | 330629610569f9347de5cb3eb479c2ed5abaceb6 | 65b0dde6211f96d061ce6264e50ba316e8f0e7f3 | refs/heads/master | 2023-02-25T06:23:51.755052 | 2021-01-24T13:16:48 | 2021-01-24T13:16:48 | 277,486,482 | 1 | 1 | MIT | 2020-12-02T03:08:13 | 2020-07-06T08:32:49 | Python | UTF-8 | Python | false | false | 1,556 | py | import pytest
import torch
from laia.callbacks import LearningRate
from laia.dummies import DummyEngine, DummyLoggingPlugin, DummyMNIST, DummyTrainer
def test_learning_rate_warns(tmpdir):
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[LearningRate()],
)
with pytest.warns(RuntimeWarning, match=r"You are using LearningRateMonitor.*"):
trainer.fit(DummyEngine(), datamodule=DummyMNIST())
class __TestEngine(DummyEngine):
def configure_optimizers(self):
optimizer = super().configure_optimizers()
return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, 1)]
@pytest.mark.parametrize("num_processes", (1, 2))
def test_learning_rate(tmpdir, num_processes):
log_filepath = tmpdir / "log"
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=3,
callbacks=[LearningRate()],
accelerator="ddp_cpu" if num_processes > 1 else None,
num_processes=num_processes,
plugins=[DummyLoggingPlugin(log_filepath)],
)
trainer.fit(__TestEngine(), datamodule=DummyMNIST())
if num_processes > 1:
log_filepath_rank1 = tmpdir.join("log.rank1")
assert log_filepath_rank1.exists()
assert not log_filepath_rank1.read_text("utf-8")
assert log_filepath.exists()
lines = [l.strip() for l in log_filepath.readlines()]
for e in range(1, trainer.max_epochs):
expected = f"E{e}: lr-Adam 1.000e-0{e + 2} ⟶ 1.000e-0{e + 3}"
assert lines.count(expected) == 1
| [
"carlossmocholi@gmail.com"
] | carlossmocholi@gmail.com |
bf90f45077472fed8d17b9fe5b29d1377b85b63a | 837ff1dd01d0f566e9687264d51d3f3409fe4924 | /hello/app1/urls.py | 6a9f862e3a833d34a7d43d97189159630102d275 | [] | no_license | LearnerYu3/hello | 59ab96bf09d577fded9547f71d066442a13b1c4f | b8cb9ecdc7894ca3e4910873c26b836e0947e2af | refs/heads/master | 2023-01-20T05:05:09.020113 | 2020-11-21T13:13:32 | 2020-11-21T13:13:32 | 302,605,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("brian", views.brian, name="brian"),
path("ycy", views.ycy, name="ycy"),
path("<str:name>", views.greet, name="greet")
] | [
"1330286215@qq.com"
] | 1330286215@qq.com |
d5f73f1067d80b9fe7ac514995b5b213bc2a9bd9 | 104049eef9c4f67e88a2a833f5bd8e203b9782f8 | /scheduler/tasker.py | 98847202cd1a31d0bc546d07fe251045364cdc91 | [] | no_license | kirimaks/weather_charts | 70996f2557f1e9ed78794260b3042cabe43f5040 | 497267c56f1c5e44c99a91113ba1cdac5819097c | refs/heads/master | 2021-01-09T07:36:11.120321 | 2016-07-03T23:58:06 | 2016-07-03T23:58:06 | 61,840,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from apscheduler.schedulers.background import BackgroundScheduler
import socket
import logging
from scheduler.tasks import yandex_temp, sochicamera_temp, worldseatemp_temp
from scheduler.tasks import wake_up
logging.basicConfig(level=logging.DEBUG)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 47200))
except socket.error:
logging.debug("Already started, skipping...")
else:
scheduler = BackgroundScheduler()
scheduler.add_job(wake_up, "interval", minutes=25)
scheduler.add_job(yandex_temp, "interval", hours=6)
scheduler.add_job(sochicamera_temp, "interval", hours=6)
scheduler.add_job(worldseatemp_temp, "interval", hours=6)
scheduler.start()
logging.debug("<< Scheduler started >>")
| [
"kirimaks@yahoo.com"
] | kirimaks@yahoo.com |
d9c0ac38eae4f5d3c7b46755517c7d58b8e1c72a | 30a29fe552854a838a3c3f4a87bcfca4a1716df6 | /Analysis/Analysis_TSNE_DataPoint.py | 4b8d985351be51601a1aea05d40859e2fce9a6f8 | [] | no_license | vichsuWah/Information_Extraction | 11f76c4dfa254a772280282d7b7c9c76ada62590 | eef95e14878d77182d50e95b3d3c830e277690dd | refs/heads/main | 2023-07-15T17:17:22.403923 | 2021-09-04T11:39:30 | 2021-09-04T11:39:30 | 402,982,458 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,685 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import glob
import numpy as np
import pandas as pd
from transformers import BertJapaneseTokenizer, BertModel
import re
import unicodedata
from model import *
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
###########################SETTING###################################
pretrained_weights = 'cl-tohoku/bert-base-japanese-whole-word-masking'
TAGs = ['調達年度', '都道府県', '入札件名', '施設名', '需要場所(住所)',
'調達開始日', '調達終了日', '公告日','仕様書交付期限', '質問票締切日時',
'資格申請締切日時', '入札書締切日時', '開札日時', '質問箇所所属/担当者', '質問箇所TEL/FAX',
'資格申請送付先', '資格申請送付先部署/担当者名', '入札書送付先', '入札書送付先部署/担当者名', '開札場所']
TAGs = [unicodedata.normalize("NFKC", re.sub('*|\*|\s+', '', t)) for t in TAGs]
TAGs = tuple(TAGs)
cinnamon_path = '/home/md531/ADL/Final/data/release/'
FINE_TUNE_BERT_MODEL_PATH = './epoch_34.pt'
######################################################################
tags_values = [[] for i in range(len(TAGs))]
data_type = ['train', 'dev']
for mode in data_type:
files = glob.glob(f'{cinnamon_path}/{mode}/ca_data/*')
for f_idx, file in enumerate(files):
dataframe = pd.read_excel(file, encoding="utf8")
for tag, value in zip(dataframe['Tag'], dataframe['Value']):
tag = unicodedata.normalize("NFKC", re.sub('*|\*|\s+', '', tag)) if isinstance(tag, str) else tag
value = unicodedata.normalize("NFKC", re.sub('*|\*|\s+', '', value)) if isinstance(value, str) else value
if (not isinstance(tag, str)) or (not isinstance(value, str)):
pass
else:
_tags = [t for t in tag.split(';')]
_values = [v for v in value.split(';')]
if len(_tags) == len(_values):
for t, v in zip(_tags, _values):
tags_values[ TAGs.index(t) ].append(v)
elif len(_tags) > len(_values):
assert len(_values) == 1, "the condition: diff. tags -> same value ?"
for idx, t in enumerate(_tags):
tags_values[ TAGs.index(t) ].append(_values[0])
elif len(_tags) < len(_values):
assert len(_tags) == 1, "the condition: diff. values -> same tag ?"
for idx, v in enumerate(_values):
tags_values[ TAGs.index(_tags[0]) ].append(v)
print("\r{}:[{}/{}]".format(mode, f_idx, len(files)), end=' \r')
# tags_values: group the values having same tag
print("Finish Collecting")
tokenizer = BertJapaneseTokenizer.from_pretrained(pretrained_weights, do_lower_case=True)
fine_tune_model = Model()
fine_tune_model.load(FINE_TUNE_BERT_MODEL_PATH)
model = fine_tune_model.bert_embedd
#model = BertModel.from_pretrained(pretrained_weights)
#model.load_state_dict(torch.load(FINE_TUNE_BERT_MODEL_PATH)['state_dict'])
#tags_tokens_ids = [[] for i in range(len(TAGs))]
tsne = TSNE(n_components=2, init='pca', random_state=501)
X, y = torch.tensor([]), torch.LongTensor([])
for t_idx in range(len(TAGs)):
#tags_values[t_idx]
for idx, sent in enumerate(tags_values[t_idx]):
sent_ids = torch.tensor(tokenizer.encode(sent))
outputs = model(input_ids=sent_ids.view(1, -1))
last_hidden_states = outputs[0]
# Ver.1: Take [CLS] embedding as the sentence embedding
#sent_embedding = last_hidden_states.squeeze(0)[0, :] # shape = [768]
# Ver.2: Average
sent_embedding = torch.mean(last_hidden_states.squeeze(0), dim=0) # shape = [768]
# prepare data for TSNE
X = torch.cat((X, sent_embedding.view(1, -1)), dim=0)
y = torch.cat((y, torch.tensor([t_idx])), dim=0)
print("\r{}({}/{}):[{}/{}]".format(TAGs[t_idx], t_idx+1, len(TAGs), idx+1, len(tags_values[t_idx])), end=' \r')
X = X.cpu().detach().numpy()
y = y.cpu().detach().numpy()
X_tsne = tsne.fit_transform(X) # dim from '768' reduce to '2' | X_tsne.shape = (2504, 2)
# Data Visualization
x_min, x_max = X_tsne.min(0), X_tsne.max(0)
# Normalization
X_norm = (X_tsne - x_min) / (x_max - x_min)
## color
color1 = plt.cm.Set1(range(9))
color2 = plt.cm.Set2(range(8))
color3 = plt.cm.Set3(range(12))
color = np.concatenate((color1, color2, color3[:20-len(color1)-len(color2)]), axis=0)
##
# 1. LABELS CENTERED ON CLUSTER MEANS
plt.figure(figsize=(8, 8))
for i in range(20):
# add data points
plt.scatter(x=X_norm[y==i,0],y=X_norm[y==i,1],c='C' + str(i), s=7, label=i+TAGs[i], marker='o', alpha=0.2)
# add label
plt.annotate(i, X_norm[y==i].mean(axis=0),
horizontalalignment='center',
verticalalignment='center',
size = 20, weight='bold',
color='C'+str(i) )
#plt.annotate(i, X_norm[y==i].mean(axis=0),
# horizontalalignment='center',
# verticalalignment='center',
# size = 20, weight='bold',
# color='white', backgroundcolor='C'+str(i) )
plt.legend(loc='upper right')
plt.savefig("figure/TSNE/TSNE_[TRAIN_DEV]_point.png")
plt.close()
# 2. TEXT Markers
plt.figure(figsize=(8, 8), dpi=600)
for i in range(X_norm.shape[0]):
plt.text(X_norm[i, 0], X_norm[i, 1], str(y[i]), color=color[ y[i] ],fontdict={'weight': 'normal', 'size': 5})
plt.savefig('figure/TSNE/TSNE_[TRAIN_DEV]_text.png')
plt.close() | [
"vich.4y4gj4@gmail.com"
] | vich.4y4gj4@gmail.com |
a687dd252ff8aa94bcc3e8419e6acb74acb68ee0 | da6bf15899bff60785b4659277a94c043c669f8f | /ulesanne12.py | df8d56aa5867f4eb954a4c0235fa765ff5f7c495 | [] | no_license | sandraonne/Python2 | 97aabe49b16043e3420dda1f49a18680b7268b01 | 7a07a02282afa1bb14390edf9082ee7da7200c6e | refs/heads/master | 2021-01-24T00:33:29.414648 | 2018-02-24T19:02:40 | 2018-02-24T19:02:40 | 122,771,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | deg_f = (float(input("What is the temperature in Fahrenheit?")))
deg_c = (deg_f - 32) / (9 / 5)
print(deg_f, " degrees Fahrenheit is", deg_c, " degrees Celsius.")
| [
"sandra.onne@khk.ee"
] | sandra.onne@khk.ee |
c32161bd88210e1a6c87cb5395adf9e602d68732 | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/cpu/testers/gpu_ruby_test/ProtocolTester.py | cf24aec71ce40a0c2c4589ca6fb05c77686a5dd2 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 3,587 | py | # Copyright (c) 2017-2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.objects.ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class ProtocolTester(ClockedObject):
type = 'ProtocolTester'
cxx_header = "cpu/testers/gpu_ruby_test/protocol_tester.hh"
cxx_class = 'gem5::ProtocolTester'
cpu_ports = VectorRequestPort("Ports for CPUs")
dma_ports = VectorRequestPort("Ports for DMAs")
cu_vector_ports = VectorRequestPort("Vector ports for GPUs")
cu_sqc_ports = VectorRequestPort("SQC ports for GPUs")
cu_scalar_ports = VectorRequestPort("Scalar ports for GPUs")
cu_token_ports = VectorRequestPort("Token ports for GPU")
cus_per_sqc = Param.Int(4, "Number of CUs per SQC")
cus_per_scalar = Param.Int(4, "Number of CUs per scalar cache")
wavefronts_per_cu = Param.Int(1, "Number of wavefronts per CU")
workitems_per_wavefront = Param.Int(64, "Number of workitems per wf")
max_cu_tokens = Param.Int(4, "Maximum number of tokens, i.e., the number"
" of instructions that can be uncoalesced"
" before back-pressure occurs from the"
" coalescer.")
cpu_threads = VectorParam.CpuThread("All cpus")
dma_threads = VectorParam.DmaThread("All DMAs")
wavefronts = VectorParam.GpuWavefront("All wavefronts")
num_atomic_locations = Param.Int(2, "Number of atomic locations")
num_normal_locs_per_atomic = Param.Int(1000, \
"Number of normal locations per atomic")
episode_length = Param.Int(10, "Number of actions per episode")
max_num_episodes = Param.Int(20, "Maximum number of episodes")
debug_tester = Param.Bool(False, "Are we debugging the tester?")
random_seed = Param.Int(0, "Random seed number. Default value (0) means \
using runtime-specific value.")
log_file = Param.String("Log file's name")
system = Param.System(Parent.any, "System we belong to")
| [
"sroger48@uncc.edu"
] | sroger48@uncc.edu |
ab874f54709718eb18eb3c5f718ae9204a92281a | b4bb9a937e0904db89c6496389f49ae555258fc5 | /apps/messages.py | b446216ae5a1105af91ba51c24960a4feb5e9fa3 | [] | no_license | vitoralves/python-api | 3e1f5f77ba61e0df2770c9d24240b46ee9c37449 | 125172ee7906392c49884f8e8fdf21bc9aa60c2c | refs/heads/master | 2020-05-24T04:21:48.857073 | 2019-05-22T16:19:52 | 2019-05-22T16:19:52 | 187,090,895 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | MSG_FIELD_REQUIRED = 'Campo obrigatório.'
MSG_INVALID_DATA = 'Ocorreu um erro nos campos informados.'
MSG_DOES_NOT_EXIST = 'Este(a) {} não existe.'
MSG_EXCEPTION = 'Ocorreu um erro no servidor. Contate o administrador.'
MSG_ALREADY_EXISTS = 'Já existe um(a) {} com estes dados.'
MSG_NO_DATA = "Os dados não podem ser nulos."
MSG_PASSWORD_WRONG = "As senhas não conferem."
MSG_RESOURCE_CREATED = "{} criado com sucesso."
MSG_RESOURCE_FETCHED_PAGINATED = 'Lista os/as {} paginados(as).'
MSG_RESOURCE_FETCHED = '{} retornado(a).'
MSG_RESOURCE_UPDATED = '{} atualizado(a).'
MSG_RESOURCE_DELETED = '{} deletado(a).'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
| [
"="
] | = |
01e1d26a9bccef926650b948be6118bd11c6dad4 | 148daf5b2e7bdff6bf43b6ea5532dd89f0a913b3 | /mozurestsdk/commerce/wishlists/wishlistitem.py | b80dc5c3704f74726f011835716b44478463b97e | [
"Apache-2.0"
] | permissive | sanjaymandadi/mozu-python-sdk | eea8ee9a88b73fd46031a4346b5bbb8988f6ff35 | ddf35791feef23705a5d84949856f59eb1c10d44 | refs/heads/master | 2021-01-21T08:44:14.790275 | 2015-09-30T19:51:29 | 2015-09-30T19:51:29 | 37,679,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,859 | py |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class WishlistItem(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getWishlistItem(self,wishlistId, wishlistItemId, responseFields = None):
""" Retrieves the details of an item in a shopper wish list.
Args:
| wishlistId (string) - Unique identifier of the wish list.
| wishlistItemId (string) - Unique identifier of the item to remove from the shopper wish list.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItem
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items/{wishlistItemId}?responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
url.formatUrl("wishlistId", wishlistId);
url.formatUrl("wishlistItemId", wishlistItemId);
self.client.withResourceUrl(url).execute();
return self.client.result();
def getWishlistItems(self,wishlistId, startIndex = None, pageSize = None, sortBy = None, filter = None, responseFields = None):
""" Retrieves a list of items in a shopper wish list according to any specified filter and sort criteria.
Args:
| wishlistId (string) - Unique identifier of the wish list.
| startIndex (int) - When creating paged results from a query, this value indicates the zero-based offset in the complete result set where the returned entities begin. For example, with a PageSize of 25, to get the 51st through the 75th items, use startIndex=3.
| pageSize (int) - The number of results to display on each page when creating paged results from a query. The maximum value is 200.
| sortBy (string) - The property by which to sort results and whether the results appear in ascending (a-z) order, represented by ASC or in descending (z-a) order, represented by DESC. The sortBy parameter follows an available property. For example: "sortBy=productCode+asc"
| filter (string) - A set of expressions that consist of a field, operator, and value and represent search parameter syntax when filtering results of a query. Valid operators include equals (eq), does not equal (ne), greater than (gt), less than (lt), greater than or equal to (ge), less than or equal to (le), starts with (sw), or contains (cont). For example - "filter=IsDisplayed+eq+true"
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItemCollection
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items?startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&filter={filter}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("filter", filter);
url.formatUrl("pageSize", pageSize);
url.formatUrl("responseFields", responseFields);
url.formatUrl("sortBy", sortBy);
url.formatUrl("startIndex", startIndex);
url.formatUrl("wishlistId", wishlistId);
self.client.withResourceUrl(url).execute();
return self.client.result();
def getWishlistItemsByWishlistName(self,customerAccountId, wishlistName, startIndex = None, pageSize = None, sortBy = None, filter = None, responseFields = None):
""" Retrieve a list of items in a customer wish list by supplying the wish list name.
Args:
| customerAccountId (int) - The unique identifier of the customer account for which to retrieve wish lists.
| wishlistName (string) - The name of the wish list to retrieve.
| startIndex (int) - When creating paged results from a query, this value indicates the zero-based offset in the complete result set where the returned entities begin. For example, with a PageSize of 25, to get the 51st through the 75th items, use startIndex=3.
| pageSize (int) - The number of results to display on each page when creating paged results from a query. The maximum value is 200.
| sortBy (string) - The property by which to sort results and whether the results appear in ascending (a-z) order, represented by ASC or in descending (z-a) order, represented by DESC. The sortBy parameter follows an available property. For example: "sortBy=productCode+asc"
| filter (string) - A set of expressions that consist of a field, operator, and value and represent search parameter syntax when filtering results of a query. Valid operators include equals (eq), does not equal (ne), greater than (gt), less than (lt), greater than or equal to (ge), less than or equal to (le), starts with (sw), or contains (cont). For example - "filter=IsDisplayed+eq+true"
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItemCollection
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/customers/{customerAccountId}/{wishlistName}/items?startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&filter={filter}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("customerAccountId", customerAccountId);
url.formatUrl("filter", filter);
url.formatUrl("pageSize", pageSize);
url.formatUrl("responseFields", responseFields);
url.formatUrl("sortBy", sortBy);
url.formatUrl("startIndex", startIndex);
url.formatUrl("wishlistName", wishlistName);
self.client.withResourceUrl(url).execute();
return self.client.result();
def addItemToWishlist(self,wishlistItem, wishlistId, responseFields = None):
""" Adds a product in a site's catalog as an item in a shopper wish list.
Args:
| wishlistItem(wishlistItem) - Properties of an item in a shopper wish list.
| wishlistId (string) - Unique identifier of the wish list.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItem
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items?responseFields={responseFields}", "POST", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
url.formatUrl("wishlistId", wishlistId);
self.client.withResourceUrl(url).withBody(wishlistItem).execute();
return self.client.result();
def updateWishlistItemQuantity(self,wishlistId, wishlistItemId, quantity, responseFields = None):
""" Updates the quantity of an item in a shopper wish list.
Args:
| wishlistId (string) - Unique identifier of the wish list.
| wishlistItemId (string) - Unique identifier of the item to remove from the shopper wish list.
| quantity (int) - The number of cart items in the shopper's active cart.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItem
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items/{wishlistItemId}/{quantity}?responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("quantity", quantity);
url.formatUrl("responseFields", responseFields);
url.formatUrl("wishlistId", wishlistId);
url.formatUrl("wishlistItemId", wishlistItemId);
self.client.withResourceUrl(url).execute();
return self.client.result();
def updateWishlistItem(self,wishlistItem, wishlistId, wishlistItemId, responseFields = None):
""" Updates the details of an item in a shopper wish list.
Args:
| wishlistItem(wishlistItem) - Properties of an item in a shopper wish list.
| wishlistId (string) - Unique identifier of the wish list.
| wishlistItemId (string) - Unique identifier of the item to remove from the shopper wish list.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| WishlistItem
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items/{wishlistItemId}?responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
url.formatUrl("wishlistId", wishlistId);
url.formatUrl("wishlistItemId", wishlistItemId);
self.client.withResourceUrl(url).withBody(wishlistItem).execute();
return self.client.result();
def removeAllWishlistItems(self,wishlistId):
""" Removes all items associated with a shopper wish list.
Args:
| wishlistId (string) - Unique identifier of the wish list.
Returns:
| Wishlist
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("wishlistId", wishlistId);
self.client.withResourceUrl(url).execute();
return self.client.result();
def deleteWishlistItem(self,wishlistId, wishlistItemId):
""" Removes an item from the wish list specified in the request.
Args:
| wishlistId (string) - Unique identifier of the wish list.
| wishlistItemId (string) - Unique identifier of the item to remove from the shopper wish list.
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/wishlists/{wishlistId}/items/{wishlistItemId}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("wishlistId", wishlistId);
url.formatUrl("wishlistItemId", wishlistItemId);
self.client.withResourceUrl(url).execute();
| [
"sanjay.mandadi@gmail.com"
] | sanjay.mandadi@gmail.com |
ac15ea773e3d6ad0045f1650fbcadb80efd77afe | 4d13a5c2a7f8a0add9f3341033478d431ae378eb | /nlputils2.py | 4222065a77c8e6f01d6646d3ab571214423b2c53 | [] | no_license | piegu/language-models | d3bb016c8bb838da21b6bc9f0cef7edab8d0d793 | 73d2ec6f19d9aefdbc00617d5ac4d38cbc0f56ba | refs/heads/master | 2023-07-20T03:49:42.744163 | 2023-06-23T08:37:07 | 2023-06-23T08:37:07 | 208,856,340 | 175 | 67 | null | 2022-09-17T19:16:15 | 2019-09-16T17:18:28 | Jupyter Notebook | UTF-8 | Python | false | false | 5,999 | py | from fastai.basics import *
import re
import urllib.request
def get_wiki_download(path,lang):
name = f'{lang}wiki'
xml_fn = f"{lang}wiki-latest-pages-articles.xml"
zip_fn = f"{xml_fn}.bz2"
if (path/zip_fn).exists():
print(f"{path/zip_fn} already exists; not downloading")
return
else:
print("downloading...")
download_url(f'https://dumps.wikimedia.org/{name}/latest/{zip_fn}', path/zip_fn)
def get_wiki_unzip(path,lang):
name = f'{lang}wiki'
xml_fn = f"{lang}wiki-latest-pages-articles.xml"
zip_fn = f"{xml_fn}.bz2"
if (path/xml_fn).exists():
print(f"{path/xml_fn} already exists; not unzip")
return
else:
print("unzipping...")
bunzip(path/zip_fn)
def get_wiki_extract(path,lang):
name = f'{lang}wiki'
xml_fn = f"{lang}wiki-latest-pages-articles.xml"
zip_fn = f"{xml_fn}.bz2"
with working_directory(path):
# get updated wikiextractor folder from albertvillanova, not attardi
if not (path/'wikiextractor').exists(): os.system('git clone https://github.com/attardi/wikiextractor.git')
# if not (path/'wikiextractor').exists(): os.system('git clone https://github.com/albertvillanova/wikiextractor.git')
# if you cloned the wikiextractor folder from attardi, get the platform-independent WikiExtractor.py file with this code
file_path = path/'wikiextractor/WikiExtractor.py'
os.unlink(file_path) # delete existing file
url = 'https://raw.githubusercontent.com/piegu/fastai-projects/master/WikiExtractor.py' # updated file url
urllib.request.urlretrieve(url, file_path) # get updated file
if (path/'wikiextractor/WikiExtractor.py').exists():
print("extracting...")
os.system("python wikiextractor/WikiExtractor.py --processes 4 --no_templates " +
f"--min_text_length 1800 --filter_disambig_pages --log_file log -b 100G -q {xml_fn}")
shutil.move(str(path/'text/AA/wiki_00'), str(path/name))
shutil.rmtree(path/'text')
else:
print(f"the file {path}\wikiextractor\WikiExtractor.py does not exist")
def split_wiki2(path,lang):
dest = path/'docs'
name = f'{lang}wiki'
if dest.exists():
print(f"{dest} already exists; not splitting")
return dest
dest.mkdir(exist_ok=True, parents=True)
title_re = re.compile(rf'<doc id="\d+" url="https://{lang}.wikipedia.org/wiki\?curid=\d+" title="([^"]+)">')
# re_punc = re.compile("([\"'().,;:/_?!—\-*])") # replace ponctuation
re_punc = re.compile("([^a-zA-Z0-9])") # replace ponctuation in title
# lines = (path/name).open()
lines = (path/name).open(encoding="utf8") # platform independent with utf8
f=None
for i,l in enumerate(lines):
if i%100000 == 0: print(i)
if l.startswith('<doc id="'):
# title = title_re.findall(l)[0].replace('/','_')
title = title_re.findall(l)[0]
title = re_punc.sub(r"_", title)
if len(title)>150: continue
if title == "Com8": continue # exception
if f: f.close()
# f = (dest/f'{title}.txt').open('w')
f = (dest/f'{title}.txt').open('w', encoding="utf8") # platform independent with utf8
else: f.write(l)
f.close()
return dest
def clean_files(path,folder):
dest = path/folder
doc_re = re.compile(rf'([\w\W]*)<\/doc>') # delete </doc>
for i,l in enumerate(dest.ls()):
# open file and get content without first line which is the title
f = l.open('r+', encoding="utf-8")
f.readline()
text = f.read()
# get content without </doc> and delete empty line and whitespaces at the head and tail
text = doc_re.findall(text)[0].strip()
# delete file content
f.seek(0)
f.truncate()
# write modificated text in file
f.write(text)
f.close()
def get_num_tokens(dest):
# Getting an idea of the number of words
files = dest.ls()
num_tokens = 0
for i,l in enumerate(files):
f = l.open('r', encoding="utf-8")
words = f.read()
num_tokens += len(words.split())
f.close()
num_files = i+1
return num_files, num_tokens
# Create a corpus of about obj_token words in a corpus_'obj_token' folder
def get_corpus(dest, path, num_tokens, obj_tokens=int(1e8)):
num_tokens_article_min = 100
if num_tokens >= obj_tokens:
# number of tokens by text
files = dest.ls()
sizes = []
list_idx = []
for i,f in enumerate(files):
sizes.append(os.path.getsize(f))
total_size = np.array(sizes).astype(np.int64).sum()
tokens_by_file = np.array(sizes)*(num_tokens/total_size)
# Sorted list of texts ids
num = 0
tokens_by_file_sorted = np.argsort(tokens_by_file)
#for i,idx in enumerate(tokens_by_file_sorted[:-len(tokens_by_file_sorted)-1:-1]):
for i,idx in enumerate(tokens_by_file_sorted):
if tokens_by_file[idx] >= num_tokens_article_min:
num += tokens_by_file[idx]
list_idx.append(i)
if num >= obj_tokens: break
articles_idxs = tokens_by_file_sorted[list_idx]
# creation of the corpus folder
folder = 'corpus_'+str(int(obj_tokens))
path_corpus = path/folder
path_corpus.mkdir(exist_ok=True, parents=True)
# copy text files to corpus folder
for idx in articles_idxs:
file = files[idx]
shutil.copy(str(file), str(path_corpus))
print(f'files copied to the new corpus folder: {path/folder}')
return path_corpus
else:
print('As there are less than 100 000 000 tokens in the initial corpus, we use it.')
return dest
| [
"noreply@github.com"
] | noreply@github.com |
6a73dd04ba81d6e84d0f974d2f9a95b9a2d41929 | ef583b730d473032f572f56c9c50a07fde9d3c03 | /CBV/CBV/settings.py | 509615f47faea6a2cd3078bd89cbe126beac8bae | [] | no_license | Kiran-MCA/django-deployment-example-2 | 94f7acfea5f77ffbcd5e95c2866c9621815b34d6 | acc80e76fe0bfe55cf03c08ee161bf6697e0ad32 | refs/heads/master | 2020-04-18T14:41:19.128830 | 2019-01-25T18:27:29 | 2019-01-25T18:27:29 | 167,595,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for CBV project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
STATIC_DIR = os.path.join(BASE_DIR,"static")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e_^!pxi^q!3*8lmcp_*2zf$@9-hiq0q!euevy1#197$+t!#hqm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'basic_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CBV.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CBV.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIR = [
STATIC_DIR,
]
| [
"kiranmca2015@gmail.com"
] | kiranmca2015@gmail.com |
a3b83d4a039ffcdc8189a315b49c2eaadfd1546c | 9943b042d7ef2c3c165a0762ce3c46facd85e1e2 | /myalert/livedemo.py | 91d4f0cac7abc67025793128c25e12c786baf19a | [] | no_license | shaikh9404/Driver-Drowsiness-Detection | 70605327e5298a5348b31f5a8a5f24200b6520a5 | f679a5f4f03142ce3ac6ba6a2f335b8b2268d0e2 | refs/heads/main | 2023-07-05T16:20:23.074174 | 2021-08-23T09:31:34 | 2021-08-23T09:31:34 | 398,802,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,929 | py | import dlib
import cv2
from imutils import face_utils
from scipy.spatial import distance
import math
import pandas as pd
import numpy as np
import pickle
import warnings
from django.shortcuts import render
p = "static/shape_predictor_68_face_landmarks0.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
def eye_aspect_ratio(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def mouth_aspect_ratio(mouth):
A = distance.euclidean(mouth[14], mouth[18])
C = distance.euclidean(mouth[12], mouth[16])
mar = (A ) / (C)
return mar
def circularity(eye):
A = distance.euclidean(eye[1], eye[4])
radius = A/2.0
Area = math.pi * (radius ** 2)
p = 0
p += distance.euclidean(eye[0], eye[1])
p += distance.euclidean(eye[1], eye[2])
p += distance.euclidean(eye[2], eye[3])
p += distance.euclidean(eye[3], eye[4])
p += distance.euclidean(eye[4], eye[5])
p += distance.euclidean(eye[5], eye[0])
return 4 * math.pi * Area /(p**2)
def mouth_over_eye(eye):
ear = eye_aspect_ratio(eye)
mar = mouth_aspect_ratio(eye)
mouth_eye = mar/ear
return mouth_eye
def average(y_pred):
for i in range(len(y_pred)):
if i % 240 == 0 or (i+1) % 240 == 0:
pass
else:
average = float(y_pred[i-1] + y_pred[i] + y_pred[i+1])/3
if average >= 0.5:
y_pred[i] = 1
else:
y_pred[i] = 0
return y_pred
def model(landmark,clf):
features = pd.DataFrame(columns=["EAR", "MAR", "Circularity", "MOE"])
eye = landmark[36:68]
ear = eye_aspect_ratio(eye)
mar = mouth_aspect_ratio(eye)
cir = circularity(eye)
mouth_eye = mouth_over_eye(eye)
df = features.append({"EAR": ear, "MAR": mar, "Circularity": cir, "MOE": mouth_eye}, ignore_index=True)
df["EAR_N"] = (df["EAR"] - mean["EAR"]) / std["EAR"]
df["MAR_N"] = (df["MAR"] - mean["MAR"]) / std["MAR"]
df["Circularity_N"] = (df["Circularity"] - mean["Circularity"]) / std["Circularity"]
df["MOE_N"] = (df["MOE"] - mean["MOE"]) / std["MOE"]
Result = clf.predict(df)
if Result == 1:
Result_String = "Drowsy"
else:
Result_String = "Alert"
return Result_String, df.values
# Callibration
def calibration():
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10, 400)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
data = []
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(image, 0)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
data.append(shape)
cv2.putText(image, "Calibrating...", bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
cv2.imshow("CalibrationOutput", image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
features_test = []
for d in data:
eye = d[36:68]
ear = eye_aspect_ratio(eye)
mar = mouth_aspect_ratio(eye)
cir = circularity(eye)
mouth_eye = mouth_over_eye(eye)
features_test.append([ear, mar, cir, mouth_eye])
features_test = np.array(features_test)
x = features_test
y = pd.DataFrame(x, columns=["EAR", "MAR", "Circularity", "MOE"])
df_means = y.mean(axis=0)
df_std = y.std(axis=0)
return df_means, df_std
# live demo
def live(clf):
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10, 400)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cap = cv2.VideoCapture(0)
data = []
result = []
while True:
_, image = cap.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(image, 0)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
Result_String, features = model(shape,clf)
cv2.putText(image, Result_String, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
data.append(features)
result.append(Result_String)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
cv2.imshow("Output", image)
k = cv2.waitKey(300) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
return data, result
def run(request,model):
mean, std = calibration()
data, result = live(model)
print(result)
return render(request, 'index.html')
| [
"shaikhqaiser9404@gmail.com"
] | shaikhqaiser9404@gmail.com |
2678427304e86a98502f35d1db2967dda840a57b | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/can-you-eat-your-favorite-candy-on-your-favorite-day.py | bdb18cca50672369c8859fc0b27ba4afe75dc6ed | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 429 | py | # Time: O(n)
# Space: O(n)
class Solution(object):
def canEat(self, candiesCount, queries):
"""
:type candiesCount: List[int]
:type queries: List[List[int]]
:rtype: List[bool]
"""
prefix = [0]*(len(candiesCount)+1)
for i, c in enumerate(candiesCount):
prefix[i+1] = prefix[i]+c
return [prefix[t]//c < d+1 <= prefix[t+1]//1 for t, d, c in queries]
| [
"noreply@github.com"
] | noreply@github.com |
cbb726d681cefa841cc037490ea8f842271a57c5 | 889810adc2234ff57b35afaea14065cb29ad4b8d | /manage.py | be7195ae175ff8e5b187d75da5d4f07954c03820 | [] | no_license | mlitfin123/AmeritradeTradingApp | 6d576932069cbc33d48229e72f33483724b4fa82 | 1f5ac3416ae47a2425650a6e780b599a2291b7f4 | refs/heads/master | 2022-12-28T23:28:20.354944 | 2020-10-15T19:30:57 | 2020-10-15T19:30:57 | 297,007,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trading_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mark.litfin@yahoo.com"
] | mark.litfin@yahoo.com |
cb5e53e5a56ab93a25f7a8f711cee7480e828eed | e4e43b09ac045c1647722c9a922b55f86837eec5 | /app/api/v1/views/meetup_view.py | dd89bcb558b9554ee82fe2219fc5abfeb92f07aa | [] | no_license | tirgei/Questioner-API | 13b8655472f09264a3b4472b1393ca99855a2a54 | aa0070fcfc14d7277921fe4dc2dc4595c6bdb03c | refs/heads/develop | 2022-12-10T08:20:23.021913 | 2019-10-22T16:37:02 | 2019-10-22T16:37:02 | 164,514,589 | 0 | 6 | null | 2022-12-08T01:31:32 | 2019-01-07T23:36:35 | Python | UTF-8 | Python | false | false | 4,015 | py | from flask import jsonify, request, make_response
from ..schemas.meetup_schema import MeetupSchema
from ..models.meetup_model import Meetup as MeetupModel
from marshmallow import ValidationError
from flask_jwt_extended import (jwt_required, get_jwt_identity)
from flask_restful import Resource
db = MeetupModel()
class Meetups(Resource):
""" Resource for meetup endpoints """
@jwt_required
def post(self):
""" Endpoint to create meetup """
message = ''
status_code = 200
response = {}
json_data = request.get_json()
if not json_data:
message = 'No data provided'
status_code = 400
else:
try:
data = MeetupSchema().load(json_data)
data['user_id'] = get_jwt_identity()
new_meetup = db.save(data)
result = MeetupSchema().dump(new_meetup)
status_code = 201
message = 'Meetup created successfully'
except ValidationError as err:
errors = err.messages
status_code = 400
message = 'Invalid data. Please fill all required fields'
response.update({'errors': errors})
response.update({'status': status_code, 'message': message})
return response, status_code
def get(self):
""" Endpoint to fetch all meetups """
meetups = db.all()
result = MeetupSchema(many=True).dump(meetups)
return {'status':200, 'data':result}, 200
class Meetup(Resource):
""" Resource for single meetup item """
def get(self, meetup_id):
""" Endpoint to fetch specific meetup """
status_code = 200
response = {}
if not db.exists('id', meetup_id):
status_code = 404
response.update({'message': 'Meetup not found'})
else:
meetup = db.find('id', meetup_id)
result = MeetupSchema().dump(meetup)
status_code = 200
response.update({'data': result})
response.update({'status': status_code})
return response, status_code
@jwt_required
def delete(self, meetup_id):
""" Endpoint to delete meetup """
message = ''
status_code = 200
response = {}
if not db.exists('id', meetup_id):
status_code = 404
message = 'Meetup not found'
else:
db.delete(meetup_id)
status_code = 200
message = 'Meetup deleted successfully'
response.update({'status': status_code, 'message': message})
return response, status_code
class MeetupsUpcoming(Resource):
""" Resource for upcoming meetups """
def get(self):
""" Endpoint to fetch all meetups """
meetups = db.all()
result = MeetupSchema(many=True).dump(meetups)
return {'status':200, 'data':result}, 200
class MeetupRsvp(Resource):
""" Resource for meetup rsvp """
@jwt_required
def post(self, meetup_id, rsvp):
""" Endpoint to RSVP to meetup """
message = ''
status_code = 200
response = {}
valid_responses = ('yes', 'no', 'maybe')
if not db.exists('id', meetup_id):
print('Meetup not found')
status_code = 404
message = 'Meetup not found'
elif rsvp not in valid_responses:
status_code = 400
message = 'Invalid rsvp'
else:
meetup = db.find('id', meetup_id)
status_code = 200
message = 'Meetup rsvp successfully'
response.update({
'data': {
'user_id': get_jwt_identity(),
'meetup_id': meetup['id'],
'topic' : meetup['topic'],
'status': rsvp
}
})
response.update({'status': status_code, 'message': message})
return response, status_code | [
"tirgeic@gmail.com"
] | tirgeic@gmail.com |
6adb8dbb66799900470cd4fd028f44790bfb5483 | 14f9b8768ae8220ef5e98f4413fcbff7464b3706 | /virtualenv/Lib/site-packages/django_elasticsearch_dsl/__init__.py | 515ee7df54d8d3bc5365201426f41a581caae93d | [
"BSD-3-Clause"
] | permissive | smbpgroup/upibo_tuan | 433fd25fa1e1180b3cce8211d6ce77b3c1f929e4 | 37abd7774fac7b45b6c6a0f0f84179937c47e19d | refs/heads/master | 2022-12-10T13:45:12.393757 | 2019-07-02T03:31:08 | 2019-07-02T03:31:08 | 162,318,482 | 0 | 0 | BSD-3-Clause | 2022-05-25T02:21:25 | 2018-12-18T16:45:52 | Python | UTF-8 | Python | false | false | 314 | py | from django.utils.module_loading import autodiscover_modules
from .documents import DocType # noqa
from .indices import Index # noqa
from .fields import * # noqa
__version__ = '0.5.0'
def autodiscover():
autodiscover_modules('documents')
default_app_config = 'django_elasticsearch_dsl.apps.DEDConfig'
| [
"vhbdragon@gmail.com"
] | vhbdragon@gmail.com |
d306243861c84fe774348629e0f6ea3b171e152b | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /duo_admin/komand_duo_admin/actions/enroll_user/__init__.py | 7def71a20edba019a1102a426347c6cd09323cbe | [
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 71 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import EnrollUser
| [
"jonschipp@gmail.com"
] | jonschipp@gmail.com |
604932035e16545dfa11b6e3aa2c9bd50418efe3 | cf715ecb806aab3c40b512d6027d686647fff354 | /Deployment/flask-app/server.py | c4dd6f71aaf42f582d26a9d3ff44eba332ea56f9 | [] | no_license | zhanghaoyue/mammogram_deeplearning | 780bc975bd0844cff9ff70b7abb78c2bda1c67f8 | a283373593845024c766378bbfcfb8342c8b9e99 | refs/heads/master | 2020-05-29T11:15:51.381981 | 2019-09-27T23:22:35 | 2019-09-27T23:22:35 | 189,110,965 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,931 | py | import os.path
import sys
import flask
from flask_uploads import UploadSet, IMAGES, configure_uploads, ALL
from flask import Flask, render_template, request, url_for
from PIL import Image
import time
import cv2
import numpy as np
import config
from werkzeug.utils import secure_filename
from model import Pytorchmodel
zichen_model_dir = os.path.join(os.getcwd(), r'Model_code')
sys.path.insert(0, zichen_model_dir)
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
print(sys.path)
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config.from_object(config)
photos = UploadSet('PHOTO')
configure_uploads(app, photos)
segmentation_model_path = './Model_code/checkpoint/segmentation_model.pth'
classification_model_path = './Model_code/checkpoint/classification_model.pth'
model = Pytorchmodel(segmentation_model_path, classification_model_path)
@app.route('/upload_image', methods=['POST', 'GET'])
def upload():
"""upload function is the main function page of this application. It takes uploaded
image and run predict_image, then return the result back to front-end
input -- None
Parameters:
img: uploaded image from user
data: predicted result
Returns:
render_template("upload.html") -- render the page with variable img_path and result pass to front-end
"""
if request.method == 'POST':
img = request.files['img'].filename
img = secure_filename(img)
new_name = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime()) + '_' + img
filename = photos.save(request.files['img'], name=new_name)
data = predict_img(photos.path(filename))
data['img_png'].save('static/'+filename[:-4]+'img_png.png')
data['attention_map_png'].save('static/'+filename[:-4]+'attention_map_png.png')
data['overlay_png'].save('static/'+filename[:-4]+'overlay_png.png')
img_path_1 = url_for('static', filename=filename[:-4]+'img_png.png')
img_path_2 = url_for('static', filename=filename[:-4]+'attention_map_png.png')
img_path_3 = url_for('static', filename=filename[:-4]+'overlay_png.png')
return flask.jsonify({"result": data['predictions'], "img_path_1": img_path_1,
"img_path_2": img_path_2, "img_path_3": img_path_3})
else:
img_path_1 = None
img_path_2 = None
img_path_3 = None
result = []
return render_template('upload.html', img_path_1=img_path_1,
img_path_2=img_path_2, img_path_3=img_path_3, result=result)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Initialize the data dictionary that will be returned from the view
ensure the image is properly uploaded to the folder
read in the image from string and preprocess the image
call function: predict_img()
Returns:
flask.jsonify(data) -- json version of prediction values
"""
data = {'state': False}
if request.method == 'POST':
img = request.files['image'].read()
img = np.fromstring(img, np.uint8)
img = cv2.imdecode(img, flags=1)
data = predict_img(img)
return flask.jsonify(data)
def predict_img(img):
"""run pytorch model prediction and get the output probablity and label result
to be called by predict()
Args:
is_numpy -- pass to model.predict
Returns:
data -- prediction values
"""
data = dict()
start = time.time()
result = model.predict(img)
cost_time = time.time() - start
data['predictions'] = list()
m_predict = {'label': result[0], 'probability': ("%.2f" % result[1])}
data['predictions'].append(m_predict)
data['img_png'] = result[2]
data['attention_map_png'] = result[3]
data['overlay_png'] = result[4]
data['state'] = True
data['time'] = cost_time
return data
@app.route('/')
def index():
"""render initial page
Returns:
render initial index.html loading page
"""
return render_template('index.html')
@app.route('/front_page')
def front_page():
"""render initial page
Returns:
render front_page.html loading page
"""
return render_template('front_page.html')
@app.route('/team_member')
def team_member():
"""render initial page
Returns:
render team_member.html loading page
"""
return render_template('team_member.html')
def shutdown_server():
"""shutdown the server function function for Apache deployment
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
"""run shuntdown_server() function
Returns:
'Server shutting down' -- message of server status
"""
shutdown_server()
return 'Server shutting down...'
| [
"harryzhangbruins@gmail.com"
] | harryzhangbruins@gmail.com |
db1d8e83bfc3b7b8960e859584bca4f782413fee | 6490ee652408103e865d168ce9ac79309cf662a7 | /src/camvid_pipeline.py | cc5755e6a47313ebf702e47e8a5ccd25afdad743 | [] | no_license | ml-edu/Enet | 0d5d0f3bc68ae4d135450299bc5a44b6640f83d9 | 5f8ddfae324c44ba23ae41198785d1d959f2baf2 | refs/heads/master | 2022-04-18T00:30:39.295591 | 2020-01-24T10:45:16 | 2020-01-24T10:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,345 | py | import numpy as np
import torch, cv2, os
from time import time
from tqdm import tqdm
from PIL import Image
from config import CAMVID_CONFIGS
from matplotlib import pyplot as plt
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage
def get_class_weights(loader, num_classes, c=1.02):
_, labels = next(iter(loader))
all_labels = labels.flatten()
each_class = np.bincount(all_labels, minlength=num_classes)
prospensity_score = each_class / len(all_labels)
class_weights = 1 / (np.log(c + prospensity_score))
return class_weights
def read_image(image_file):
image = Image.open(image_file)
image = np.array(image)
image = cv2.resize(image, (512, 512), cv2.INTER_LINEAR)
image = torch.tensor(image).unsqueeze(0).float()
image = image.transpose(2, 3).transpose(1, 2).to(device)
return image
def decode_segmap(image, color_dict):
label_colours = np.array([
color_dict['obj0'], color_dict['obj1'],
color_dict['obj2'], color_dict['obj3'],
color_dict['obj4'], color_dict['obj5'],
color_dict['obj6'], color_dict['obj7'],
color_dict['obj8'], color_dict['obj9'],
color_dict['obj10'], color_dict['obj11']
]).astype(np.uint8)
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, 12):
r[image == l] = label_colours[l, 0]
g[image == l] = label_colours[l, 1]
b[image == l] = label_colours[l, 2]
rgb = np.zeros((image.shape[0], image.shape[1], 3)).astype(np.uint8)
rgb[:, :, 0] = b
rgb[:, :, 1] = g
rgb[:, :, 2] = r
return rgb
def predict_rgb(model, tensor, color_dict):
with torch.no_grad():
out = model(tensor.float()).squeeze(0)
out = out.data.max(0)[1].cpu().numpy()
return decode_segmap(out, color_dict)
def visualize_batch(loader, model, color_dict, n_images):
'''Visualize batch from model
Params:
loader -> Data loader
model -> Model for prediction
color_dict -> Class color dict
n_images -> Number of images (< batch size)
'''
x_batch, y_batch = next(iter(loader))
fig, axes = plt.subplots(nrows = n_images, ncols = 3, figsize = (16, 16))
plt.setp(axes.flat, xticks = [], yticks = [])
c = 1
for i, ax in enumerate(axes.flat):
if i % 3 == 0:
ax.imshow(ToPILImage()(x_batch[c]))
ax.set_xlabel('Image_' + str(c))
elif i % 3 == 1:
ax.imshow(
decode_segmap(
y_batch[c],
color_dict
)
)
ax.set_xlabel('Ground_Truth_' + str(c))
elif i % 3 == 2:
ax.imshow(
predict_rgb(
enet,
x_batch[c].unsqueeze(0).to(device),
color_dict
)
)
ax.set_xlabel('Predicted_Mask_' + str(c))
c += 1
plt.show()
class CamVidDataset(Dataset):
def __init__(self, images, labels, height, width):
'''Camvid Dataset
Params:
images -> List of image files
labels -> List of label files
height -> Height of image and label
width -> Width of image and label
'''
self.images = images
self.labels = labels
self.height = height
self.width = width
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_id = self.images[index]
label_id = self.labels[index]
# Read Image
x = Image.open(image_id)
x = np.array(x)
x = [
cv2.resize(
x, (self.height, self.width),
cv2.INTER_LINEAR
)
]
x = np.stack(x, axis=2)
x = torch.tensor(x).transpose(0, 2).transpose(1, 3)
# Read Mask
y = Image.open(label_id)
y = np.array(y)
y = [cv2.resize(
y, (self.height, self.width),
cv2.INTER_NEAREST
)]
y = torch.tensor(y)
return x.squeeze(), y.squeeze()
def train(
model, train_dataloader, val_dataloader,
device, criterion, optimizer, train_step_size, val_step_size,
visualize_every, save_every, save_location, save_prefix, epochs):
'''Training Function for Campvid
Params:
model -> Model
train_dataloader -> Train Data Loader
val_dataloader -> Validation Data Loader
device -> Training Device
criterion -> Loss Function
optimizer -> Optimizer
train_step_size -> Training Step Size
val_step_size -> Validation Step Size
visualize_every -> Visualization Checkpoint
save_every -> Saving Checkpoint
save_location -> Checkpoint Saving Location
save_prefix -> Checkpoint Prefix
epochs -> Number of Training epochs
'''
try:
os.mkdir(save_location)
except:
pass
train_loss_history, val_loss_history = [], []
train_time = []
for epoch in range(1, epochs + 1):
print('Epoch {}\n'.format(epoch))
# Training
start = time()
train_loss = 0
model.train()
for step in tqdm(range(train_step_size)):
x_batch, y_batch = next(iter(train_dataloader))
x_batch = x_batch.squeeze().to(device)
y_batch = y_batch.squeeze().to(device)
optimizer.zero_grad()
out = model(x_batch.float())
loss = criterion(out, y_batch.long())
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss_history.append(train_loss / train_step_size)
print('\nTraining Loss: {}'.format(train_loss_history[-1]))
train_time.append(time() - start)
print('Training Time: {} seconds'.format(train_time[-1]))
# Validation
val_loss = 0
model.eval()
for step in tqdm(range(val_step_size)):
x_val, y_val = next(iter(val_dataloader))
x_val = x_val.squeeze().to(device)
y_val = y_val.squeeze().to(device)
out = model(x_val.float())
out = out.data.max(1)[1]
val_loss += (y_val.long() - out.long()).float().mean()
val_loss_history.append(val_loss)
print('\nValidation Loss: {}'.format(val_loss))
# Visualization
if epoch % visualize_every == 0:
visualize_batch(val_dataloader, model, CAMVID_CONFIGS['class_colors'], 1)
# Checkpoints
if epoch % save_every == 0:
checkpoint = {
'epoch' : epoch,
'train_loss' : train_loss,
'val_loss' : val_loss,
'state_dict' : model.state_dict()
}
torch.save(
checkpoint,
'{}/{}-{}-{}-{}.pth'.format(
save_location, save_prefix,
epoch, train_loss, val_loss
)
)
print('Checkpoint saved')
print(
'\nTraining Done.\nTraining Mean Loss: {:6f}\nValidation Mean Loss: {:6f}'.format(
sum(train_loss_history) / epochs,
sum(val_loss_history) / epochs
)
)
return train_loss_history, val_loss_history, train_time | [
"19soumik.rakshit96@gmail.com"
] | 19soumik.rakshit96@gmail.com |
fe31ab89f3e3accf47cecdd7b82dfdfe1dc82ed0 | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5161 Profile Check/Profile_108541.py | 2258d75bb8e0e950291c863f0631348a9989fb97 | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("ProfileCleanedMIP")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108541.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(True),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(True),
usePedestalMean = cms.untracked.bool(True),
pedestalMean = cms.untracked.string('PedestalMean_108541.txt'),
mip = cms.untracked.string('SecondaryMIP.txt'),
roughmip = cms.untracked.string('PercentageCorrectedGeV.txt'),
secondaryShift = cms.untracked.string("PercentageCorrectedGeV_SecondaryShift.txt"),
beamEnergy = cms.untracked.double(150),
adcMap = cms.untracked.string('FinalAdcMapping_All.txt'),
lowestSampleSubtraction = cms.untracked.bool(True),
numberOfSamplesForSubtraction = cms.untracked.int32(16),
numberOfSamplesToSkip = cms.untracked.int32(16)
)
process.averagecharge = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(False) # interpolate for missing channels by averaging neighboring channels
)
process.averagecharge_interpolated = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_Interpolated_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(True)
)
process.filladc = cms.EDAnalyzer("FillAdcDistributionAnalyzer",
invert = cms.untracked.bool(False),
highdef = cms.untracked.bool(True),
divideMIP = cms.untracked.bool(False),
baselineSubtraction = cms.untracked.bool(True),
output = cms.untracked.string("AdcDistribution_108541.root")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(239)
)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.muonveto *
process.averagecharge
# process.averagecharge_interpolated *
# process.filladc
)
| [
"yichen@positron01.hep.caltech.edu"
] | yichen@positron01.hep.caltech.edu |
6f75e28c733c33a9c4658264dc52cb90fd3e4e09 | ab9a7d2ee5d73f6661483f039bb61480afb39aaa | /kegg_miner2.py | 0b369c33bb80f9b2954b1b7dfb2b30ce6174c9a7 | [] | no_license | hajimela/keggminer2 | 36291ae326ca98c9961cb348bc396ed10dc856a2 | 506b50000619c91415c822e98cd97a46cfdd83d2 | refs/heads/master | 2020-06-05T04:10:33.404729 | 2014-11-29T05:31:13 | 2014-11-29T05:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | #!/usr/bin/python
#Scripted by Yi Huang on 2013.02.22
#Final edit for adding commets: 2014.11.29
import datetime
import sys
import re
import urllib2
class KeggMiner(object):
def __init__(self,spec):
self.spec = spec
def check_spec(self):
# check the spec is support
if self.spec == "hsa":
return True
elif self.spec == "mmu":
return True
elif self.spec == "rno":
return True
else:
return False
def connect_to_db(self,data_type):
# Connet KEGG REST-style API
my_url = "http://rest.kegg.jp/%s" % data_type
request = urllib2.Request(my_url)
response = urllib2.urlopen(request)
data = response.read()
return data
def get_pathway_list(self):
# this function will retrieve a list of pathways for specific species
if self.check_spec() == True:
my_pathways = {}
data_type = "list/pathway/%s" % self.spec
my_data = self.connect_to_db(data_type)
kegg_path = []
# The data retrieved from KEGG API is single line, including "\n", line sep
kegg_path = my_data.split("\n")
kegg_path.pop()
for i in kegg_path:
if self.spec == "hsa":
i = re.sub(r'\s-\sHomo\ssapiens\s\(human\)','',i)
elif self.spec == "mmu":
i = re.sub(r'\s-\sMus\smusculus\s\(mouse\)','',i)
else:
i = re.sub(r'\s-\sRattus\snorvegicus\s\(rat\)','',i)
(p_id, p_name) = i.split("\t")
p_id = re.sub('path:','',p_id)
# remove all the characters that are not allowed to use in Java
p_name = re.sub('\'','_',p_name)
p_name = re.sub(',','_',p_name)
p_name = re.sub('-','_',p_name)
p_name = re.sub(r'\s-\s','_',p_name)
p_name = re.sub(r'\s/\s','_',p_name)
p_name = re.sub(' ','_',p_name)
p_name = re.sub(r'_\(\S+\)_','_',p_name)
p_name = re.sub(r'_\(\S+\)','',p_name)
p_name = re.sub(r'\(\S+\)_','_',p_name)
p_name = re.sub('__','_',p_name)
p_name = re.sub('___','_',p_name)
my_pathways[p_id] = p_name
return my_pathways
else:
print "Get pathway ERROR: Data will be retrieved for hsa/mmu/rno only!"
def get_gene_list(self):
# This function get the gene list for each pathway
if self.check_spec() == True:
my_pathway_contents = {}
my_pathways = self.get_pathway_list()
p_list = my_pathways.keys()
for p in p_list:
my_pathway_contents[p]=[]
data_type = "link/%s/pathway" % self.spec
my_data = self.connect_to_db(data_type)
temp = my_data.split("\n")
temp.pop()
for i in temp:
(p_id, g_id) = i.split("\t")
p_id = re.sub('path:','',p_id)
(spec_code,entrez_id) = g_id.split(":")
my_pathway_contents[p_id].append(entrez_id)
return my_pathway_contents
else:
print "Get gene ERROR:Data will be retrieved for hsa/mmu/rno only!"
def gmt_file_gen(self):
# Generate gmt file for GSEA
now = datetime.datetime.now()
str_time = now.strftime("%Y%m%d")
my_file = open("%s_kegg_pathways_ver_%s.gmt" % (self.spec,str_time), "wr+")
my_path = self.get_pathway_list()
my_path_contents = self.get_gene_list()
p_id_list = my_path.keys()
for p in p_id_list:
p_name = my_path[p]
p_content = my_path_contents[p]
gene_set_name = '%s_%s' % (p, p_name)
my_file.write ('%s\t' % gene_set_name)
for g in p_content:
my_file.write ('%s\t' % str(g))
my_file.write ("\n")
my_file.close()
# argv need import sys
km = KeggMiner(sys.argv[1])
km.gmt_file_gen() | [
"yi.huang@riken.jp"
] | yi.huang@riken.jp |
43a44eb94d4c3cdc0eb12a66ca6aeb7e6f8ab7c6 | 49253f12cea4b2ec1df4d68876c3c330fec3f52b | /001_数据结构相关/001_set集合_交集_并集_差集_对称差集.py | f1a21ae3614ae074ef531ce11370b4832eeadf37 | [] | no_license | FelixZFB/Python_development_skills_summary | b2877652a5396936a28d5c65fb407df201ffa158 | 998679496de8385bda34734f83d927a7d340876a | refs/heads/master | 2020-06-09T16:58:51.242686 | 2020-02-27T07:02:48 | 2020-02-27T07:02:48 | 193,472,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding:utf-8 -*-
#
# 集合支持一系列标准操作,包括并集、交集、差集和对称差集,例如:
# a = t | s # t 和 s的并集
# b = t & s # t 和 s的交集
# c = t – s # 求差集(项在t中,但不在s中)
# d = t ^ s # 对称差集(项在t或s中,但不会同时出现在二者中)
a = [1, 5, 10, 15, 10]
b = [1, 5, 10, 9, 12]
# 集合会去掉重复元素
print(set(a))
print("*" * 50)
# 并集
c1 = set(a) | set(b)
print(c1)
# 交集
c2 = set(a) & set(b)
print(c2)
# 交集
c3 = set(a) - set(b)
print(c3)
c3 = set(b) - set(a)
print(c3)
# 对称差集
c4 = set(a) ^ set(b)
print(c4) | [
"18200116656@qq.com"
] | 18200116656@qq.com |
af095db3e641ba111f02ffd9cd791a3093eb5f7f | ca954289cd260efb32f5133680aa461a71982b12 | /venv/bin/rst2pseudoxml.py | 5140891c06aa7b3dea6d293780619d8261d8f4e9 | [] | no_license | jackieroger/Project2 | 7657fbc2f63fae6a12c66fe418ce9b1d0bab3a6a | 309701517899706101112b1c0fddc9bc29cc774c | refs/heads/main | 2023-03-07T10:55:26.035207 | 2021-02-20T07:33:42 | 2021-02-20T07:33:42 | 333,577,470 | 0 | 0 | null | 2021-01-27T22:27:47 | 2021-01-27T22:27:47 | null | UTF-8 | Python | false | false | 654 | py | #!/Users/jroger/Documents/UCSF/classes/bmi_203/Project2/venv/bin/python
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| [
"jacquelynmarie@icloud.com"
] | jacquelynmarie@icloud.com |
664e156bea0f1e13ee514761f31115af2f4ca942 | 2dd0082221239fef0e0894c852f70f1eaeb62b9e | /Assignments/dustin/Django/lines/lines/views.py | a00b3a15e612ce29d7e5ddaeda6d95a545eae3e9 | [] | no_license | pjz987/2019-10-28-fullstack-night | 03097cf3dc24aeec0c326044bb0fc99385fbc333 | 4c643013de73f08d7503d62ec602d6a5c80ffa7e | refs/heads/master | 2022-11-11T19:40:00.296645 | 2020-06-25T16:14:47 | 2020-06-25T16:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from django.shortcuts import render
import json
from .models import Line
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def lines(request):
if request.body:
data = json.loads(request.body)
for line in data['lines']:
new_line = Line(x0=line['start'][0], y0=line['start'][1], x1=line['end'][0], y1=line['end'][1])
new_line.save()
all_lines = Line.objects.all()
line_dicts = [{'start': [one_line.x0, one_line.y0], 'end': [one_line.x1, one_line.y1]} for one_line in all_lines]
return JsonResponse({'rocketleague': line_dicts})
# Create your views here.
| [
"Xodacidal@gmail.com"
] | Xodacidal@gmail.com |
765ef51113cf42ecda4247258f4faac0ca6dfb9b | 6b1d15ff586690078d6c12e6935d783bce0f1e6c | /iliad/core/output.py | 0a0b39a6e070b1f3e6136f7908ca905bcd8b04e1 | [] | no_license | richardwhiuk/iliad | 41b12741a681da5f4ec821d546cf56a95d95f35c | 620e962bf904cd2ae24325d07c550a59762feddd | refs/heads/master | 2021-01-21T10:08:56.567708 | 2012-10-20T10:32:08 | 2012-10-20T10:32:33 | 1,278,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py |
def Get(module=None):
return module.load('Output')()
class Output:
def __init__(self):
pass
| [
"github@richardwhiuk.com"
] | github@richardwhiuk.com |
179abb64ecae99b91d68ad254486e6d81c809460 | c8e74578c57ba3b2f9ca985a68af55cbfb670c6c | /libs/nonblocking_subprocess/setup.py | e8e8b89ac4c5f04fdf42144ec1261c2d8a788647 | [
"BSL-1.0"
] | permissive | domnli/nxpy | 244f4693e01ac3e96a3e3b45ff3ab781c3ef21e3 | fd51d2b986679424586a7220b01d94efab7e4543 | refs/heads/master | 2020-07-14T14:34:47.601336 | 2019-01-24T11:11:06 | 2019-01-24T11:11:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | # nxpy_nonblocking_subprocess -------------------------------------------------
# Copyright Nicola Musatti 2008 - 2018
# Use, modification, and distribution are subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# See https://github.com/nmusatti/nxpy/tree/master/libs/nonblocking_subprocess.
r"""
Packaging information.
"""
import codecs
import os
from setuptools import setup
lib_name = 'nxpy_nonblocking_subprocess'
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here,'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=lib_name,
version="1.0.0",
author="Nicola Musatti",
author_email="nicola.musatti@gmail.com",
description="subprocess.Popen subclass using non-blocking I/O",
long_description=long_description,
project_urls={
"Documentation": "https://nxpy.readthedocs.io/en/latest/",
"Source Code": "https://github.com/nmusatti/nxpy",
},
license="Boost Software License 1.0 (BSL-1.0)",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
],
namespace_packages=['nxpy','nxpy.core'],
packages=['nxpy.core.nonblocking_subprocess'],
install_requires=[
'six',
'pypiwin32; sys_platform == "win32"',
'nxpy_past'
],
) | [
"nicola.musatti@gmail.com"
] | nicola.musatti@gmail.com |
b128003cda24490a60d3220cd215949c96c4d188 | 0ef2ff5a5441a2bb0a30f2242c8f6a70287d76a7 | /init/init_script.py | 439d2f08e09ecaada70f070e30985987c74b6d70 | [
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | martinsallandm/hw-xapp-python-lenovo | 5372f8e47e1cc9eabd1447286819d972c36cb1cd | 2123289d3a5ea7122607dea8e8f0d03a348d131b | refs/heads/main | 2023-08-14T22:59:57.948670 | 2021-09-27T01:59:14 | 2021-09-27T01:59:14 | 410,374,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,240 | py | # ==================================================================================
#
# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==================================================================================
# This initialization script reads in a json from the specified config map path
# to set up the initializations (route config map, variables etc) for the main
# xapp process
import json
import sys
import os
import signal
import time
default_routing_file = "/opt/route/test_route.rt"
lport = 0
def signal_handler(signum, frame):
print("Received signal {0}\n".format(signum))
if xapp_subprocess is None or xapp_pid is None:
print("No xapp running. Quiting without sending signal to xapp\n", flush=True)
else:
print("Sending signal {0} to xapp ...".format(signum), flush=True)
xapp_subprocess.send_signal(signum)
def parseConfigJson(config):
for k1 in config.keys():
if k1 in ParseSection:
result = ParseSection[k1](config)
if not result:
return False
return True
def getMessagingInfo(config):
global lport
if 'messaging' in config.keys() and 'ports' in config['messaging'].keys():
port_list = config['messaging']['ports']
for portdesc in port_list:
if 'port' in portdesc.keys() and 'name' in portdesc.keys() and portdesc['name'] == 'rmr-data':
lport = portdesc['port']
# Set the environment variable
os.environ["HW_PORT"] = str(lport)
return True
if lport == 0:
print("Error! No valid listening port", flush=True)
return False
return True
def getXappName(config):
myKey = "xapp_name"
if myKey not in config.keys():
print(("Error ! No information found for {0} in config\n".format(myKey)), flush=True)
return False
xapp_name = config[myKey]
print("Xapp Name is: " + xapp_name)
os.environ["XAPP_NAME"] = xapp_name
return True
ParseSection = dict()
ParseSection["xapp_name"] = getXappName
ParseSection["messaging"] = getMessagingInfo
# ================================================================
if __name__ == "__main__":
import subprocess
cmd = ["/usr/local/bin/run-hw-python.py"]
config_file = os.getenv("CONFIG_FILE", None)
if config_file is None:
print("Error! No configuration file specified\n", flush=True)
sys.exit(1)
with open(config_file, 'r') as f:
try:
config = json.load(f)
except Exception as e:
print(("Error loading json file from {0}. Reason = {1}\n".format(config_file, e)), flush=True)
sys.exit(1)
result = parseConfigJson(config)
if not result:
print("Error parsing config json. Not executing xAPP", flush=True)
sys.exit(1)
else:
print("Config read successfully", flush=True)
# Register signal handlers
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Start the xAPP
print("Executing xAPP ....", flush=True)
xapp_subprocess = subprocess.Popen(cmd, shell=False, stdin=None, stdout=None, stderr=None)
xapp_pid = xapp_subprocess.pid
# Periodically poll the process every 5 seconds to check if still alive
while 1:
xapp_status = xapp_subprocess.poll()
if xapp_status is None:
time.sleep(5)
else:
print("XaPP terminated via signal {0}\n".format(-1 * xapp_status), flush=True)
break
| [
"allan@dee.ufrn.br"
] | allan@dee.ufrn.br |
bf82932d59f491d72707d0acf4ac20398b0d17e0 | 5d3feb4b10ba3c9fa7f552d548d301f041c7e62b | /resumes/migrations/0017_auto_20181208_0024.py | 87034d6acd3018589f4952dda2d86d5eb11cc330 | [] | no_license | datge/EuropeanCV-Italian-version- | ac1cf2db7c38102d31ead4971c671f7fe340f277 | 1257b6e497995a554d1cf2d6b4b5dc52f86649e0 | refs/heads/master | 2020-04-10T10:38:18.937371 | 2018-12-08T21:23:53 | 2018-12-08T21:23:53 | 160,971,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.1.1 on 2018-12-07 23:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resumes', '0016_auto_20181007_1957'),
]
operations = [
migrations.AlterField(
model_name='altre_comp',
name='id_resume',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='resumes.Informazioni_Personali'),
),
]
| [
"dav-23@hotmail.it"
] | dav-23@hotmail.it |
89644be09475e2beafed536a5fca0858bfe57df0 | fd175bc06a5efb74de66e8272b18d4703aeb69f1 | /app_models/migrations/0006_auto_20171225_1221.py | c5aa01306ea581c3e24251986c4cb177fc698ab0 | [] | no_license | Ulan9304/mediasabak | 05ccb9bff41009ee3ea9a69caf1f2c7799073ec8 | 7df869c0e11e14c78e307165f1d72d9dd13c6b04 | refs/heads/master | 2021-09-01T13:54:28.184004 | 2017-12-27T09:01:26 | 2017-12-27T09:01:26 | 114,859,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-25 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_models', '0005_auto_20171225_1220'),
]
operations = [
migrations.AlterField(
model_name='media',
name='link2',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Ссылка на медиа 2'),
),
]
| [
"miyavi931@gmail.com"
] | miyavi931@gmail.com |
ac67a27db60a7525ff8ef0fa48f51b212d10446b | 51a6c543009d082b500d0a03e2c02e17dbdae2bc | /Day3/AH-ws/build/AUC-Robotics-Summer-Camp/Day6/auc_ws/src/auc_robot/catkin_generated/pkg.installspace.context.pc.py | 6b24713f8a918a6f7c33e3b26b32089a8af66710 | [] | no_license | Abdelrahman-Hanafy/AUCRobo-ROS | fca038a6231795187826bdb38edfaff66ad68d3f | 48055fc065ccdd82b9e45322fb6ddea2621a68c0 | refs/heads/master | 2022-12-13T16:12:20.054454 | 2020-09-04T15:27:16 | 2020-09-04T15:27:16 | 287,110,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "auc_robot"
PROJECT_SPACE_DIR = "/home/abdelrahman/AucRobotics/Day3/AH-ws/install"
PROJECT_VERSION = "0.0.0"
| [
"Abdelrahman.m.moustafa@gmail.com"
] | Abdelrahman.m.moustafa@gmail.com |
a9b02e6a52125369e3d3148193e40afc5ddcf206 | fc7ff6160626dea852009c2d97fbdf451608020f | /2ºAno/2ºSemestre/LA2/progDinamica/cubolandia.py | a7e81468a7b3f4dfcb052a6cd6ac5ec26449bfcd | [] | no_license | JorgeDPSilva/LCC | 5b55a254a7346a5f9d4b3ea056988f83ad96c5ac | 2f79b623a19bfe57e3607e2c751e6619a38d0de2 | refs/heads/main | 2023-08-19T07:17:32.521939 | 2021-09-27T14:20:38 | 2021-09-27T14:20:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | import sys
import math
def maneiras(lista_denom, size, inp):
if inp ==0: return 1
if inp < 0: return 0
if size <=0 and inp >=1: return 0
return maneiras(lista_denom, size-1, inp) + maneiras(lista_denom, size, inp-lista_denom[size-1])
def main():
lista_denom = []
for base in range(1, 22):
lista_denom.append(math.pow(base, 3))
#print(lista_denom)
inp = sys.stdin.readline().strip('\n')
inp = int(inp)
resultado = maneiras(lista_denom, len(lista_denom), inp)
print(resultado)
main() | [
"ricardocruz244@gmail.com"
] | ricardocruz244@gmail.com |
52bdb3230d140629357a95d7ffcbd591a87ff755 | b931f42289ab59ae835e10cbf903f311ecd34dc6 | /Python/Saurav_Saurav-Navdhare/Snake Game/Snake Game.py | 9ce5ddfbe14a818eba282ac7389ff323731188be | [
"MIT"
] | permissive | codersalman/Technocrats-HacktoberFest | dfb148a248af29562b5debffdfbe48e6956fe67e | 8ff0e13c0e24d23a5f5f042b0a0d05d4e2f2389e | refs/heads/main | 2023-09-03T10:11:22.854067 | 2021-10-23T17:27:32 | 2021-10-23T17:27:32 | 414,861,328 | 1 | 0 | MIT | 2021-10-08T05:46:51 | 2021-10-08T05:46:51 | null | UTF-8 | Python | false | false | 3,544 | py | import pygame
import random
# initializing pygame
pygame.init()
# Colors
white = (255, 255, 255) # rgb format
red = (255, 0, 0)
black = (0, 0, 0)
# Creating window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height))
# Game Title
pygame.display.set_caption("Coders Home")
pygame.display.update()
clock = pygame.time.Clock()
font = pygame.font.SysFont(None, 55)
def text_screen(text, color, x, y):
screen_text = font.render(text, True, color)
gameWindow.blit(screen_text, [x,y])
def plot_snake(gameWindow, color, snk_list, snake_size):
for x,y in snk_list:
pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])
# Game Loop
def gameloop():
exit_game = False
game_over = False
snake_x = 45
snake_y = 55
velocity_x = 0
velocity_y = 0
snk_list = []
snk_length = 1
food_x = random.randint(20, screen_width-20)
food_y = random.randint(60, screen_height -20)
score = 0
init_velocity = 4
snake_size = 30
fps = 60 # fps = frames per second
while not exit_game:
if game_over:
gameWindow.fill(white)
text_screen("Game Over! Press Enter To Continue", red, 100, 250)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
gameloop()
else:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = init_velocity
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - init_velocity
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - init_velocity
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = init_velocity
velocity_x = 0
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs(snake_x - food_x)<10 and abs(snake_y - food_y)<10:
score +=1
food_x = random.randint(20, screen_width - 30)
food_y = random.randint(60, screen_height - 30)
snk_length +=5
gameWindow.fill(white)
text_screen("Score: " + str(score * 10), red, 5, 5)
pygame.draw.rect(gameWindow, red, [food_x, food_y, snake_size, snake_size])
pygame.draw.line(gameWindow, red, (0,40), (900,40),5)
head = []
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
if head in snk_list[:-1]:
game_over = True
if snake_x<0 or snake_x>screen_width-20 or snake_y<50 or snake_y>screen_height-20:
game_over = True
plot_snake(gameWindow, black, snk_list, snake_size)
pygame.display.update()
clock.tick(fps)
pygame.quit()
quit()
gameloop()
| [
"noreply@github.com"
] | noreply@github.com |
ad6cc0a08e8ba3d2ad47ab45d0395df6b071594b | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/i18n/filters.py | 2807d5ca114aaa6b7749be72ef8b4ab16fdbd8fe | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Translation Domain Message Export and Import Filters
$Id: filters.py 38178 2005-08-30 21:50:19Z mj $
"""
__docformat__ = 'restructuredtext'
import time, re
from types import StringTypes
from zope.interface import implements
from zope.i18n.interfaces import IMessageExportFilter, IMessageImportFilter
from zope.app.i18n.interfaces import ILocalTranslationDomain
class ParseError(Exception):
def __init__(self, state, lineno):
Exception.__init__(self, state, lineno)
self.state = state
self.lineno = lineno
def __str__(self):
return "state %s, line %s" % (self.state, self.lineno)
class GettextExportFilter(object):
implements(IMessageExportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def exportMessages(self, languages):
'See IMessageExportFilter'
domain = self.domain.domain
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
dt = time.time()
dt = time.localtime(dt)
dt = time.strftime('%Y/%m/%d %H:%M', dt)
output = _file_header %(dt, language.encode('UTF-8'),
domain.encode('UTF-8'))
for msgid in self.domain.getMessageIds():
msgstr = self.domain.translate(msgid, target_language=language)
msgstr = msgstr.encode('UTF-8')
msgid = msgid.encode('UTF-8')
output += _msg_template %(msgid, msgstr)
return output
class GettextImportFilter(object):
implements(IMessageImportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def importMessages(self, languages, file):
'See IMessageImportFilter'
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
result = parseGetText(file.readlines())[3]
headers = parserHeaders(''.join(result[('',)][1]))
del result[('',)]
charset = extractCharset(headers['content-type'])
for msg in result.items():
msgid = unicode(''.join(msg[0]), charset)
msgid = msgid.replace('\\n', '\n')
msgstr = unicode(''.join(msg[1][1]), charset)
msgstr = msgstr.replace('\\n', '\n')
self.domain.addMessage(msgid, msgstr, language)
def extractCharset(header):
charset = header.split('charset=')[-1]
return charset.lower()
def parserHeaders(headers_text):
headers = {}
for line in headers_text.split('\\n'):
name = line.split(':')[0]
value = ''.join(line.split(':')[1:])
headers[name.lower()] = value
return headers
def parseGetText(content):
# The regular expressions
com = re.compile('^#.*')
msgid = re.compile(r'^ *msgid *"(.*?[^\\]*)"')
msgstr = re.compile(r'^ *msgstr *"(.*?[^\\]*)"')
re_str = re.compile(r'^ *"(.*?[^\\])"')
blank = re.compile(r'^\s*$')
trans = {}
pointer = 0
state = 0
COM, MSGID, MSGSTR = [], [], []
while pointer < len(content):
line = content[pointer]
#print 'STATE:', state
#print 'LINE:', line, content[pointer].strip()
if state == 0:
COM, MSGID, MSGSTR = [], [], []
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(0, pointer + 1)
elif state == 1:
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(1, pointer + 1)
elif state == 2:
if com.match(line):
COM.append(line.strip())
state = 2
pointer = pointer + 1
elif re_str.match(line):
MSGID.append(re_str.match(line).group(1))
state = 2
pointer = pointer + 1
elif msgstr.match(line):
MSGSTR.append(msgstr.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(2, pointer + 1)
elif state == 3:
if com.match(line) or msgid.match(line):
# print "\nEn", language, "detected", MSGID
trans[tuple(MSGID)] = (COM, MSGSTR)
state = 0
elif re_str.match(line):
MSGSTR.append(re_str.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(3, pointer + 1)
# the last also goes in
if tuple(MSGID):
trans[tuple(MSGID)] = (COM, MSGSTR)
return COM, MSGID, MSGSTR, trans
_file_header = '''
msgid ""
msgstr ""
"Project-Id-Version: Zope 3\\n"
"PO-Revision-Date: %s\\n"
"Last-Translator: Zope 3 Gettext Export Filter\\n"
"Zope-Language: %s\\n"
"Zope-Domain: %s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
'''
_msg_template = '''
msgid "%s"
msgstr "%s"
'''
| [
"chris@thegermanfriday.com"
] | chris@thegermanfriday.com |
b9eb103c12f4f8dbd794624d3cde08e4f5778d08 | ed346f01861dc28a4c5ccb065ae1e1ea6131fa4d | /file_samples/test1.py | 75bc3bee2e57c83f87b9f691cb0074bac667fe0c | [] | no_license | lucabianco78/QCBsciprolab | 69122534138ad32466c674b298b22421566f9817 | 9afc46563559acf5b75f3a8e48ad90e1b921bc1a | refs/heads/master | 2022-07-27T23:30:05.964541 | 2020-09-11T18:43:33 | 2020-09-11T18:43:33 | 148,633,966 | 0 | 0 | null | 2022-07-06T19:53:17 | 2018-09-13T12:25:39 | Jupyter Notebook | UTF-8 | Python | false | false | 1,781 | py | """exercise2.py"""
from Bio import SeqIO
import matplotlib.pyplot as plt
def trimRead(read, minQ):
"""returns the read with all bases at 3' with qual < minQ trimmed off"""
values = read.letter_annotations["phred_quality"]
#print(read.seq)
#print(values)
ind = len(read)-1
while(ind >= 0):
if(values[ind] >= minQ):
break
ind -= 1
s = read[0:ind]
#print(s.seq)
#print(s.letter_annotations["phred_quality"])
return s
def cleanFile(in_file,out_file,minQ, minLen):
"""quality trims reads in input and writes those longer than minLen to out_file"""
records = []
cnt = 0
for seq_record in SeqIO.parse(in_file, "fastq"):
cnt += 1
s=trimRead(seq_record,minQ)
if(len(s) > minLen):
records.append(s)
print("Total number of reads in input: {}".format(cnt))
print("Reads written to output file: {}".format(len(records)))
SeqIO.write(records,out_file, "fastq")
def plotStats(in_file):
dataDict = dict()
for seq_record in SeqIO.parse(in_file, "fastq"):
quals = seq_record.letter_annotations["phred_quality"]
for i in range(0,len(quals)):
if(i not in dataDict):
dataDict[i] = [quals[i]]
else:
dataDict[i].append(quals[i])
vals = []
for el in range(len(dataDict)):
vals.append(sum(dataDict[el])/len(dataDict[el]))
plt.plot(vals)
plt.xlabel("Position")
plt.ylabel("Quality value")
plt.show()
plt.close()
myfile = "test_reads_75k.fastq"
outfile = "filtered_reads_75k.fastq"
cleanFile(myfile, outfile, 32,50)
print("Original file plot:")
plotStats(myfile)
print("Trimmed file plot:")
plotStats(outfile)
| [
"luca.bianco@fmach.it"
] | luca.bianco@fmach.it |
94e3ba329a0abb5efbeeda76902d599c27d72984 | c53920dc6c16603a40a85c7dd1de97c4da57c440 | /utils/redis-tester.py | 27f5d4a14e310c6609193938ef42437c793293de | [] | no_license | GESkunkworks/fhid | 7ddf3a1dc7e6c7da42ff9b93180c719aac120000 | ee4b6b5ce7b7453033bfea5e15c876f0acc8c235 | refs/heads/master | 2021-04-03T01:51:08.650236 | 2018-03-12T03:00:01 | 2018-03-12T03:00:01 | 124,823,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import redis
import sys
rhost = sys.argv[1]
r = redis.StrictRedis(host=rhost, port=6379, db=0)
#for key in r.scan_iter():
# print key
print r.get(sys.argv[2])
| [
"rendicott@gmail.com"
] | rendicott@gmail.com |
6ae1a48e9a7af5f5440306d9c52f22ef4f875517 | b4181f6a5b222c5cb3591f3a3ed20be9900265f2 | /blog/models.py | c5e9c73b859fe8f573843e91a15f9a7e9ae8bf11 | [] | no_license | emilia-f/my-first-blog | e6d865abf3aa40d4c9553625879b78a74cbe01e4 | 3af1505e4d9849d98980f89b7f3f77a21476e70c | refs/heads/master | 2021-09-01T01:09:06.203354 | 2017-12-24T01:19:57 | 2017-12-24T01:19:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank = True, null = True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"emfrelek@gmail.com"
] | emfrelek@gmail.com |
2de938b9417aa1093845669a1869b97f19c4571b | 126498605720cca0ec70cc2f20aec531687d9fde | /train.py | 6efaf2615913c87740138ff2f556cb071058743a | [] | no_license | laket/deconv_cifar10 | 9b7fc120b51effdd6dac3c546da5e41b4bda47d7 | 46b46d52e2f387784952c4e9c67d5c7105ddefb4 | refs/heads/master | 2020-07-02T07:24:38.618057 | 2016-09-11T02:26:02 | 2016-09-11T02:26:02 | 67,860,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | #!/usr/bin/python
import os
import numpy as np
import tensorflow as tf
import mnist_input
import model
tf.app.flags.DEFINE_string('dir_log', './log',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_string('dir_parameter', './parameter',
"""Directory where to write parameters""")
FLAGS = tf.app.flags.FLAGS
def get_opt(loss, global_step):
lr = tf.train.exponential_decay(0.1,
global_step,
5000,
0.1,
staircase=True)
#opt = tf.train.AdamOptimizer(0.01)
opt = tf.train.MomentumOptimizer(0.01, momentum=0.95)
opt_op = opt.minimize(loss, global_step=global_step)
tf.scalar_summary("lr", lr)
return lr, opt_op
def train():
global_step = tf.Variable(0, trainable=False)
image, label = mnist_input.train_input()
network = model.Network()
logits = network.inference(image, is_train=True)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
entropy, loss = model.get_loss(label, logits)
lr, opt = get_opt(loss, global_step)
saver = tf.train.Saver(tf.trainable_variables())
summary_op = tf.merge_all_summaries()
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
init = tf.initialize_all_variables()
sess.run(init)
summary_writer = tf.train.SummaryWriter("log", sess.graph)
tf.train.start_queue_runners(sess=sess)
for num_iter in range(1,1000000):
value_entropy, value_loss, value_lr, _ = sess.run([entropy, loss, lr, opt])
if num_iter % 100 == 0:
print "lr = {} entropy = {} loss = {}".format(value_lr, value_entropy, value_loss)
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, num_iter)
if num_iter % 1000 == 0:
checkpoint_path = os.path.join(FLAGS.dir_parameter, 'model.ckpt')
saver.save(sess, checkpoint_path,global_step=num_iter)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.dir_log):
tf.gfile.DeleteRecursively(FLAGS.dir_log)
tf.gfile.MakeDirs(FLAGS.dir_log)
if tf.gfile.Exists(FLAGS.dir_parameter):
tf.gfile.DeleteRecursively(FLAGS.dir_parameter)
tf.gfile.MakeDirs(FLAGS.dir_parameter)
mnist_input.init()
train()
if __name__ == '__main__':
tf.app.run()
| [
"laket72@gmail.com"
] | laket72@gmail.com |
6009fe56a5b567eb3751301b21273398f872f28d | b13c57843cb8886c6f5d630ca099ad9130b26f25 | /python/장고/first.py | 94cc29b0619ffd718f3cfc6ee9a510900562b741 | [] | no_license | schw240/07.27-12.1_CLOUD | 6b563318f7208b843a13634a1cf46206197d6dfc | 8b4dc2d31e5d2ba96bde143116aba3ba0dad7a49 | refs/heads/master | 2023-03-25T15:44:03.555567 | 2021-03-30T02:09:32 | 2021-03-30T02:09:32 | 282,791,349 | 4 | 0 | null | 2021-03-19T15:00:00 | 2020-07-27T04:10:56 | Jupyter Notebook | UTF-8 | Python | false | false | 1,297 | py | from http.server import BaseHTTPRequestHandler, HTTPServer
import datetime
class HelloHandler(BaseHTTPRequestHandler):
def do_GET(self):
print(self.path)
if self.path == '/my':
self.send_response(200)
self.end_headers()
self.wfile.write("MyPage!".encode('utf-8'))
elif self.path == '/portfolio':
self.send_response(200)
self.end_headers()
self.wfile.write("Portfolio!".encode('utf-8'))
elif self.path == '/':
html = f"""
<html>
<head>
<title>나의홈페이지</title>
</head>
<body>
<h1>안녕하세요~ 저의 웹사이트에 오신걸 환영합니다.</h1>
<h2>{datetime.datetime.now()}</h2>
</body>
</html>
"""
self.send_response(200)
self.send_header("content-type", "text/html; charset=UTF-8")
self.end_headers()
self.wfile.write(html.encode('utf-8'))
else:
self.send_response(404)
self.end_headers()
self.wfile.write("404".encode('utf-8'))
if __name__== '__main__':
server = HTTPServer(('', 8888), HelloHandler)
print("Start Server")
server.serve_forever() | [
"schw240@gmail.com"
] | schw240@gmail.com |
0457fcf2f48044c2c99b0c8138cc9dbf7eefd484 | a7e4627576f578213ef7801d621b80ee27d4a937 | /~django/project/lib/python3.5/shutil.py | d3692eda1cafac818019c310ab5bdca617c54c9b | [] | no_license | sejutisarkar/django-basics | 9ee4aa94b63ea5477b6333cc35213efbc1ae8d4b | 0905a5c4ac645551aaf1577713f417929130ac81 | refs/heads/master | 2021-05-01T07:51:52.075798 | 2018-02-11T20:57:51 | 2018-02-11T20:57:51 | 121,166,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | //usr/lib/python3.5/shutil.py | [
"sejuti.dancinggirl8@gmail.com"
] | sejuti.dancinggirl8@gmail.com |
666734fbab3cb22c4ae127e2c5eb2dfdc12998ad | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /bilibili/__init__.py | 8575f7bf75a23b0ea81613856cde28d78cc161cf | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '2020-04-22 06:50' | [
"testerlyx@foxmail.com"
] | testerlyx@foxmail.com |
c193d93a056d78602d6b3690c786a643b00a4770 | 9d5fd8a897ae597425e2182760103eee2bdba514 | /macroForPy.py | d9c1654a3d9cc32b0a0e6fb2d19d4df4c58109ae | [] | no_license | AlexandrDragunkin/ARLN_PROTO | 29bbdbafa127e65e1de62b382eda0023bf573a4c | e1b0296f91eb6534402691cbd2bfeb784e642620 | refs/heads/master | 2021-08-30T13:59:56.140407 | 2017-12-18T07:09:25 | 2017-12-18T07:09:25 | 114,336,746 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 1,788 | py | # -*- coding: cp1251 -*-
#-------------------------------------------------------------------------------
# Name: macroForPy
# Purpose: Вызов макросов и все что с ним связано для python 3.3
#
# Author: Aleksandr Dragunkin
#
# Created: 15.01.2014
# Copyright: (c) GEOS 2013 http://k3info.ru/
# Licence: FREE
#-------------------------------------------------------------------------------
import k3
def _ProtoPath():
'''Возвращает значение глобальной переменной ProtoPath'''
return k3.GlobalVar('ProtoPath').value
def _k3files():
'''Возвращает значение системной переменной k3files'''
return k3.mpathexpand("<k3files>")
def setk3Var(vt=[]):
'''Преобразует список значений в список переменных k3 с добавлением ключа k3.k_byref'''
def _lset(v):
t = k3.Var()
# print(type(v))
if type(v) in [str, int, float]:
t.value = v
return (k3.k_byref, t)
elif type(v) == k3.VarArray:
t = v
#return t
return (k3.k_byref, t)
elif isinstance(v,k3.K3Obj):
t.value = v
return (k3.k_byref, t)
elif type(v) == k3.Var:
t = v
return (k3.k_byref, t)
else:
return None
return [_lset(x) for x in vt]
def runMacro(Name, v=[], path=_ProtoPath()):
'''запускает на выполненеие макрос к3 с именем Name из папки path и параметрами в списке v'''
# print(path+Name)
# print(setk3Var(v))
return k3.macro(path+Name, setk3Var(v), k3.k_done) | [
"alexandr69@gmail.com"
] | alexandr69@gmail.com |
6669fed033f6c0a877c6ea0aa6ed3f4b5bfd00f7 | 46f56f91f34ce0ab802e84adee57a89648f95497 | /apps/common/__init__.py | f47efb6dd517928297e440f36f2a4e57d331a21b | [] | no_license | chengh0/python-web | d9dd82df6986962a5be7541b990f194d1533a6cd | 06e7e6ae03f9c4f2da50a452d88187521ee39354 | refs/heads/master | 2020-04-04T16:12:38.879314 | 2018-11-04T10:54:43 | 2018-11-04T10:54:43 | 156,068,832 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from .views import bp | [
"chengchen6520@gmail.com"
] | chengchen6520@gmail.com |
e01d4438984b3f186ab5451f73d01832eba665d9 | 09fa79a4848fb76f25365b0750986d2893bc1b44 | /mispell_dict.py | 0b4914646f1c210f1402cb24f1e41002e863484a | [] | no_license | WillDng/qa_project | a6938e609fce85ed20efbc89de1b2ae01fa3e602 | 79f90c8c3f6cc0833f5e0b789536bcb31fb69b77 | refs/heads/master | 2022-12-06T00:39:29.148118 | 2020-05-03T17:20:29 | 2020-05-03T17:20:29 | 243,523,425 | 0 | 1 | null | 2022-11-22T05:30:14 | 2020-02-27T13:16:11 | Jupyter Notebook | UTF-8 | Python | false | false | 2,513 | py | mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"couldnt" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"doesnt" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"havent" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"shouldnt" : "should not",
"that's" : "that is",
"thats" : "that is",
"there's" : "there is",
"theres" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"theyre": "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"} | [
"46746073+WillDng@users.noreply.github.com"
] | 46746073+WillDng@users.noreply.github.com |
601e9c252ead9f43b5eb6a69bc6c756d5beda0e7 | 8912dca6ceb0e7bba5fde7df56a17632a736a0a4 | /calibration/pivot/pivot.py | 259a22087a8ae6822d1e5a2a5b21f58c54d484bf | [] | no_license | maximvarenov/catkin_ws | 57bdec503a5f0fdffd6ba4b4fd56203d7e6c6d5d | 282914721b4a5495aac9b1519ebabfe7874dd685 | refs/heads/main | 2023-09-01T05:30:13.958797 | 2021-10-01T10:59:41 | 2021-10-01T10:59:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import os
import sys
import numpy as np
import csv
def pivot_calibration(transforms):
p_t = np.zeros((3, 1))
T = np.eye(4)
A = []
b = []
for item in transforms:
i = 1
A.append(np.append(item[0, [0, 1, 2]], [-1, 0, 0]))
A.append(np.append(item[1, [0, 1, 2]], [0, -1, 0]))
A.append(np.append(item[2, [0, 1, 2]], [0, 0, -1]))
b.append((item[0, [3]]))
b.append((item[1, [3]]))
b.append((item[2, [3]]))
x = np.linalg.lstsq(A, b, rcond=None)
result = (x[0][0:3]).flatten() * -1
p_t = np.asarray(result).transpose()
T[:3, 3] = p_t.T
return p_t, T,x
if __name__ == "__main__":
np.set_printoptions(suppress=True)
transforms = list()
with open('./marker_3_pointer_pivot.txt', 'r') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
T = np.eye(4)
data = np.loadtxt(row, delimiter=',')
data = data.reshape((3, 4))
T[:3, :4] = data
transforms.append(T)
p_t, T, x = pivot_calibration(transforms)
print(x)
print('Calibtration matrix T')
print(T)
print('position of tip')
print(-x[0][3],-x[0][4],-x[0][5])
| [
"noreply@github.com"
] | noreply@github.com |
018a186845663f611cb74d50a4a07985de62ac46 | 1a093cb59320db7327d665910d8ed36ea5bba95b | /banana/analysis/mri/dwi.py | 06c6f5ca8cf63784e5d819b09cb611a69248c58d | [
"Apache-2.0"
] | permissive | MonashBI/banana | 23bfa1aff12fe0ded709c99679a1c0d9687e4ffa | 37364243b520ab14ac1243005dbd465f824542b4 | refs/heads/master | 2022-07-23T08:13:33.191149 | 2022-06-15T02:17:18 | 2022-06-15T02:17:18 | 134,526,663 | 3 | 3 | Apache-2.0 | 2019-11-13T23:00:15 | 2018-05-23T06:53:38 | Python | UTF-8 | Python | false | false | 49,248 | py | import os
from logging import getLogger
import tempfile
import subprocess as sp
from nipype.interfaces.utility import Merge, IdentityInterface
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap, EddyQuad # , EddySquad
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import (
FitTensor, ConstrainedSphericalDeconvolution)
# from nipype.workflows.dwi.fsl.tbss import create_tbss_all
# from banana.interfaces.noddi import (
# CreateROI, BatchNODDIFitting, SaveParamsAsNIfTI)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.analysis import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from arcana.data.file_format import pdf_format
from banana.interfaces.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask, MergeFslGrads)
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.analysis import AnalysisMetaClass
from banana.interfaces.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.dwi import TransformGradients, SelectShell
from banana.interfaces.utility import AppendPath
from banana.analysis.base import Analysis
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites, eddy_repol_cite)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS, json_format)
from .base import MriAnalysis
from .epi import EpiSeriesAnalysis, EpiAnalysis
logger = getLogger('banana')
class DwiAnalysis(EpiSeriesAnalysis, metaclass=AnalysisMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("Parameters used by Eddy preprocessing tool")),
FilesetSpec('eddy_qc', zip_format, 'preprocess_pipeline',
desc=("QC output generated by Eddy preprocessing tool")),
FilesetSpec('eddy_qc_summary', json_format, 'eddy_qc_summary_pipeline',
desc=("Study-wise database containing quality metrics and "
"data info."), frequency='per_visit'),
FilesetSpec('eddy_qc_report', pdf_format, 'eddy_qc_summary_pipeline',
desc=("Study-wise database containing quality metrics and "
"data info."), frequency='per_visit'),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('tensor_residual', nifti_gz_format,
'residual_pipeline',
desc=("The residual signal after the tensor has been "
"fit to the signal")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_dataset',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_dataset',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('pe_dir', None, dtype=str,
desc=("Phase-encoding direction of DW series")),
ParamSpec('intra_moco_parts', 0, dtype=int,
desc=("Number of partitions within a volume to motion "
"correct w.r.t the volume. If == 0, intra-volume MoCo "
"is disabled. Intra-volume MoCo requires slice timings"
" to be found in 'series' header")),
SwitchSpec('eddy_moco_by_suscep', False,
desc="Use susceptibility to determine motion correction"),
SwitchSpec('force_shelled', False,
desc=("Force eddy to treat gradient encoding scheme as "
"being shelled")),
ParamSpec('eddy_model', 'none',
choices=('none', 'linear', 'quadratic'),
desc=("Model for how diffusion gradients generate eddy "
"currents.")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
ParamSpec('num_shells', None, desc=('Number of b-value shells')),
MriAnalysis.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
def b_shells(self):
bpaths = []
if 'bvalues' in self.input_names:
bpaths = [f.path for f in self.spec('bvalues').slice]
elif 'series' in self.input_names:
mrtrix_ver = self.environment.satisfy(mrtrix_req.v(3.0))
tmp_dir = tempfile.mkdtemp()
self.environment.load(mrtrix_ver)
try:
bpaths = []
for fileset in self.spec('series').slice:
bpath = os.path.join(
tmp_dir, '{}__{}'.format(fileset.subject_id,
fileset.visit_id))
try:
sp.check_call(
('mrconvert {} {} -export_grad_fsl {} {}'
.format(fileset.path,
bpath + '.mif', bpath + '.bvec',
bpath + '.bval')), shell=True)
except sp.CalledProcessError as e:
logger.error(
("Could not extract bvalues from series file "
"'%s'"), fileset.path)
raise e
bpaths.append(bpath + '.bval')
finally:
self.environment.unload(mrtrix_ver)
else:
raise BananaUsageError(
"b-values not provided to study, required to determine "
"number of shells")
bvalues = set()
for bpath in bpaths:
with open(bpath) as f:
bvalues.update(round(float(b), -1)
for b in f.read().split())
return sorted(bvalues)
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
@property
def fod_algorithm(self):
if self.parameter('response_algorithm') == 'msmt_5tt':
algorithm = 'msmt_csd'
else:
algorithm = 'csd'
return algorithm
def fsl_grads(self, pipeline, coregistered=True):
"Adds and returns a node to the pipeline to merge the FSL grads and "
"bvecs"
try:
grad_fsl = pipeline.node('grad_fsl')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeFslGrads(),
inputs={
'grad_dirs': (grad_dirs, fsl_bvecs_format),
'bvals': ('bvalues', fsl_bvals_format)})
return (grad_fsl, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
"""
Performs a series of FSL preprocessing steps, including Eddy and Topup
Parameters
----------
phase_dir : str{AP|LR|IS}
The phase encode direction
"""
# Determine whether we can correct for distortion, i.e. if reference
# scans are provided
# Include all references
references = [fsl_cite, eddy_cite, topup_cite, eddy_repol_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
dw_series = ('series', mrtrix_image_format)
# Denoise the dwi-scan
if self.branch('preproc_denoise'):
# Run denoising
denoise = pipeline.add(
'denoise',
DWIDenoise(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': dw_series},
requirements=[mrtrix_req.v('3.0rc3')])
# Calculate residual noise
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': dw_series,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = dw_series
# Preproc kwargs
preproc_inputs = {'in_file': denoised}
preproc_kwargs = {}
if self.provided('grad_dirs') and self.provided('bvalues'):
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeFslGrads(),
inputs={
'grad_dirs': ('grad_dirs', fsl_bvecs_format),
'bvals': ('bvalues', fsl_bvals_format)})
preproc_inputs['grad_fsl'] = (grad_fsl, 'out')
elif self.spec('series').format not in (dicom_format,
mrtrix_image_format):
raise BananaUsageError(
"Either input 'series' image needs to gradient directions and "
"b-values in its header or they need to be explicitly "
"provided to 'grad_dirs' and 'bvalues' {}".format(self))
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
# Extract b=0 volumes
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.mif'),
inputs=preproc_inputs,
requirements=[mrtrix_req.v('3.0rc3')])
# Get first b=0 from dwi b=0 volumes
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
merge_rphase = pipeline.add(
'merge_rphase',
Merge(2),
inputs={
'in1': dwi_reference,
'in2': ('reverse_phase', mrtrix_image_format)})
# Concatenate extracted forward rpe with reverse rpe
combined_images = pipeline.add(
'combined_images',
MRCat(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
axis=3),
inputs={
'input_files': (merge_rphase, 'out')},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to extract the phase-encoding direction
# prep_dwi = pipeline.add(
# 'prepare_dwi',
# PrepareDWI(),
# inputs={
# 'pe_dir': ('ped', float),
# 'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
# distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
# distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('pe_dir')
eddy_parameters = '--repol --cnr_maps --slm={}'.format(
self.parameter('eddy_model'))
if self.parameter('intra_moco_parts') > 0:
eddy_parameters += ' --mporder={}'.format(
self.parameter('intra_moco_parts'))
if self.branch('eddy_moco_by_suscep'):
eddy_parameters += ' --estimate_move_by_susceptibility'
if self.branch('force_shelled'):
eddy_parameters += ' --data_is_shelled '
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.mif',
eddy_parameters=eddy_parameters,
eddyqc_all='qc-all',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format),
'eddy_qc': ('eddyqc_all', directory_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('6.0.1')],
wall_time=60)
# if distortion_correction:
# pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.mif',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (preproc, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Create bias correct node
bias_correct = pipeline.add(
"bias_correct",
DWIBiasCorrect(
algorithm='ants'),
inputs={
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
# Extract gradient directions that have been motion-corrected
# by dwipreproc
pipeline.add(
"extract_moco_grad",
ExtractFSLGradients(),
inputs={
'in_file': (bias_correct, 'out_file')},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to reorient preproc out_file
if self.branch('reorient2std'):
raise NotImplementedError(
"Reorientation to standard isn't handle at this stage because "
"gradients would also need to be rotated accordingly by a "
"bespoke interface")
# reorient = pipeline.add(
# 'fslreorient2std',
# fsl.utils.Reorient2Std(
# output_type='NIFTI_GZ'),
# inputs={
# 'in_file': ('series', nifti_gz_format)},
# requirements=[fsl_req.v('5.0.9')])
# reoriented = (reorient, 'out_file')
else:
pass
# reoriented = ('series', nifti_gz_format)
return pipeline
def eddy_qc_summary_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='eddy_qc_summary',
name_maps=name_maps,
desc=("Run group-wise analysis of Eddy QC output"))
pipeline.add(
'eddy_squad',
EddySquad(),
inputs={
'quad_dirs': ('eddy_qc', directory_format)},
outputs={
'eddy_qc_summary': ('group_db_json', json_format),
'eddy_qc_report': ('group_qc_pdf', pdf_format)},
requirements=[fsl_req.v('5.0.11')],
joinfield=['quad_dirs'],
joinsource=self.SUBJECT_ID)
return pipeline
def brain_extraction_pipeline(self, **name_maps):
"""
Generates a whole brain mask using MRtrix's 'dwi2mask' command
Parameters
----------
mask_tool: Str
Can be either 'bet' or 'dwi2mask' depending on which mask tool you
want to use
"""
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
# Create mask node
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
# Apply coregistration transform to gradients
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as analysis only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the analysis ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
# Pair subject and visit ids together, expanding so they can be
# joined and chained together
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Analysis.SUBJECT_ID, int),
'visit_id': (Analysis.VISIT_ID, int)})
# Set up join nodes
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
# Intensity normalization
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Set up expand nodes
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Analysis.SUBJECT_ID, int),
'visit_id': (Analysis.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
# Connect inputs
return pipeline
def tensor_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def residual_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='residuals',
desc=("Calculates the residuals after fitting tensor to each "
"shell"),
citations=[],
name_maps=name_maps)
b_shells = set(self.b_shells())
b_shells.remove(0.0)
iterate_shells = pipeline.add(
'iterate_shells',
IdentityInterface(
fields=['b']))
iterate_shells.iterables = ('b', b_shells)
select_shell = pipeline.add(
'select_shell',
SelectShell(
tol=5.0),
inputs={
'target': (iterate_shells, 'b'),
'bvals': ('bvalues', fsl_bvals_format)})
merge0 = pipeline.add(
'merge_axis_n_indices',
MergeTuple(2),
inputs={
'in2': (select_shell, 'indices')})
merge0.inputs.in1 = 3
split_shells = pipeline.add(
'split_shells',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline),
'coord': (merge0, 'out')},
requirements=[mrtrix_req.v('3.0')])
# Create tensor fit node
tensor = pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'in_file': (split_shells, 'out_file'),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0')])
merge1 = pipeline.add(
'merge_tensor_predicted',
Merge(2),
inputs={
'in1': (split_shells, 'out_file'),
'in2': (tensor, 'predicted_signal')})
residual = pipeline.add(
'residual',
MRCalc(
operation='subtract'),
inputs={
'operands': (merge1, 'out')})
max_residual = pipeline.add(
'max_residual',
MRMath(
operation='max',
axis=3),
inputs={
'in_files': (residual, 'out_file')})
merge3 = pipeline.add(
'merge_operands3',
Merge(2),
inputs={
'in1': (max_residual, 'out_file'),
'in2': (self.brain_mask_spec_name, nifti_gz_format)})
mask = pipeline.add(
'apply_mask',
MRCalc(
operation='multiply',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (merge3, 'out')},
requirements=[mrtrix_req.v('3.0rc3')])
merge_shells = pipeline.add(
'merge_shells',
MRCat(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
axis=3),
inputs={
'input_scans': (mask, 'out_file')},
outputs={
'tensor_residual': ('out_file', mrtrix_image_format)},
joinsource='iterate_shells',
joinfield=['input_scans'],
requirements=[mrtrix_req.v('3.0rc3')])
# mean = pipeline.add(
# 'mean',
# MRMath(
# operation='mean'),
# inputs={
# 'input_files': (merge_shells, 'out_file')})
# stddev = pipeline.add(
# 'stddev',
# MRMath(
# operation='std'),
# inputs={
# 'input_files': (merge_shells, 'out_file')})
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
response_algorithm : str
Algorithm used to estimate the response
"""
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm'),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Connect to outputs
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
"""
Averages the estimate response function over all subjects in the
project
"""
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
"""
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
dwi2fod = pipeline.add(
'dwi2fod',
ConstrainedSphericalDeconvolution(
algorithm=self.fod_algorithm,
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
# Check inputs/output are connected
return pipeline
def extract_b0_pipeline(self, **name_maps):
"""
Extracts the b0 images from a DWI analysis and takes their mean
"""
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI analysis",
citations=[mrtrix_cite],
name_maps=name_maps)
# Extraction node
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# FIXME: Need a registration step before the mean
# Mean calculation node
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True,
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Convert to Nifti
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI analysis",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff'),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefAnalysis(EpiAnalysis, metaclass=AnalysisMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special analysis used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
"""
Implementation of separate topup pipeline, moved from EPI analysis as it
is only really relevant for spin-echo DWI. Need to work out what to do
with it
"""
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| [
"tom.g.close@gmail.com"
] | tom.g.close@gmail.com |
ad6c121ebb464764546b7cd6feb7222e79490296 | 0cfcacf8228d8ddbbf4f2b01faeaa674eb6c2829 | /trainer/models/model_simple_bn.py | ef5ba249ae4849d9edccf946216cd46cf3990ea3 | [] | no_license | FGhavamian/hydranet_battery | a7d84e6a2143be35ae60da1eb8e57841cbed5e96 | 3e1918574063513888f911a9ff6cffb8c434addc | refs/heads/master | 2023-03-20T20:33:07.166708 | 2021-03-10T06:21:40 | 2021-03-10T06:21:40 | 175,961,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | import tensorflow as tf
from trainer.names import GRID_DIM
class ModelSimpleBN:
def __init__(self, feature_dim, target_dim_dict, filters, kernels):
self.feature_dim = feature_dim
self.target_dim_dict = target_dim_dict
self.filters = filters
self.kernels = kernels
def build(self):
feature, mask = self._hydra_input()
z = self._encoder(feature)
heads = self._heads(z)
output = self._hydra_output(heads, mask)
return tf.keras.models.Model(inputs=(feature, mask), outputs=output)
def _heads(self, z):
heads = []
for p_name, p_dim in self.target_dim_dict.items():
head = self._decoder(z, p_name)
head = tf.keras.layers.Conv2D(p_dim, 7, 1, 'same', activation='linear', name=p_name + '_head')(head)
heads.append(head)
return heads
def _encoder(self, x):
x = tf.keras.layers.Conv2D(self.filters[0], self.kernels[0], 2, 'same', activation='relu', name="encoder0")(x)
for i, (filter, kernel) in enumerate(zip(self.filters[1:], self.kernels[1:])):
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(filter, kernel, 2, 'same', activation='relu', name="encoder" + str(i+1))(x)
return x
def _decoder(self, x, mode):
x = tf.keras.layers.Conv2D(self.filters[0], self.kernels[0], 2, 'same', activation='relu', name="encoder0")(x)
for i, (filter, kernel) in enumerate(zip(self.filters[1:], self.kernels[1:])):
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2DTranspose(filter, kernel, 2, 'same', activation='relu',
name=mode + "decoder" + str(i))(x)
return x
def _hydra_input(self):
feature = tf.keras.layers.Input(shape=(GRID_DIM.y, GRID_DIM.x, self.feature_dim), name='feature')
mask = tf.keras.layers.Input(shape=(GRID_DIM.y, GRID_DIM.x, 1), name='mask')
return feature, mask
def _hydra_output(self, heads, mask):
o = tf.keras.layers.Concatenate(axis=-1)(heads)
return tf.keras.layers.Multiply(name="prediction")([o, mask])
if __name__ == '__main__':
from trainer.names import PHYSICAL_DIMS, PHYSICAL_DIMS_SCALAR
model = ModelSimpleBN(1, PHYSICAL_DIMS, [32, 16], [5, 5]).build()
model.summary()
| [
"fari.ghavamian@gmail.com"
] | fari.ghavamian@gmail.com |
1f59f228ff0e99901be7116e146557c93e7e6de9 | 8957f0b42ba945399a2eeb71f796c11c9eb35b06 | /lib/shutil.py | 9fbc746cde5e9927a18068b6eab8284c354c82f8 | [] | no_license | notro/tmp_CircuitPython_stdlib | 4de177cbb45b2209f07171c27f844c7d377dffc9 | 641727294039a9441c35ba1a1d22de403664b710 | refs/heads/master | 2020-03-27T18:26:33.544047 | 2019-02-15T20:49:34 | 2019-02-15T20:49:34 | 146,922,496 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41,703 | py | #"""Utility functions for copying and archiving files and directory trees.
#
#XXX The functions here don't copy the resource fork or other metadata on Mac.
#
#"""
#
import os
import sys
import stat
from os.path import abspath
import fnmatch
#import collections
#import errno
#import tarfile
#
#try:
# import bz2
# del bz2
# _BZ2_SUPPORTED = True
#except ImportError:
# _BZ2_SUPPORTED = False
#
#try:
# from pwd import getpwnam
#except ImportError:
# getpwnam = None
#
#try:
# from grp import getgrnam
#except ImportError:
# getgrnam = None
#
#__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
# "copytree", "move", "rmtree", "Error", "SpecialFileError",
# "ExecError", "make_archive", "get_archive_formats",
# "register_archive_format", "unregister_archive_format",
# "get_unpack_formats", "register_unpack_format",
# "unregister_unpack_format", "unpack_archive",
# "ignore_patterns", "chown", "which", "get_terminal_size",
# "SameFileError"]
# # disk_usage is added later, if available on the platform
#
class Error(OSError):
pass
class SameFileError(Error):
"""Raised when source and destination are the same file."""
class SpecialFileError(OSError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(OSError):
"""Raised when a command could not be executed"""
class ReadError(OSError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
#def copyfileobj(fsrc, fdst, length=16*1024):
def copyfileobj(fsrc, fdst, length=128): ###
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# # Macintosh, Unix.
# if hasattr(os.path, 'samefile'):
# try:
# return os.path.samefile(src, dst)
# except OSError:
# return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
# else:
# # XXX What about other special files? (sockets, devices...)
# if stat.S_ISFIFO(st.st_mode):
# raise SpecialFileError("`%s` is a named pipe" % fn)
# if not follow_symlinks and os.path.islink(src):
# os.symlink(os.readlink(src), dst)
# else:
if True: ###
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
#def copymode(src, dst, *, follow_symlinks=True):
# """Copy mode bits from src to dst.
#
# If follow_symlinks is not set, symlinks aren't followed if and only
# if both `src` and `dst` are symlinks. If `lchmod` isn't available
# (e.g. Linux) this method does nothing.
#
# """
# if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
# if hasattr(os, 'lchmod'):
# stat_func, chmod_func = os.lstat, os.lchmod
# else:
# return
# elif hasattr(os, 'chmod'):
# stat_func, chmod_func = os.stat, os.chmod
# else:
# return
#
# st = stat_func(src)
# chmod_func(dst, stat.S_IMODE(st.st_mode))
#
#if hasattr(os, 'listxattr'):
# def _copyxattr(src, dst, *, follow_symlinks=True):
# """Copy extended filesystem attributes from `src` to `dst`.
#
# Overwrite existing attributes.
#
# If `follow_symlinks` is false, symlinks won't be followed.
#
# """
#
# try:
# names = os.listxattr(src, follow_symlinks=follow_symlinks)
# except OSError as e:
# if e.errno not in (errno.ENOTSUP, errno.ENODATA):
# raise
# return
# for name in names:
# try:
# value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
# os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
# except OSError as e:
# if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
# raise
#else:
# def _copyxattr(*args, **kwargs):
# pass
#
#def copystat(src, dst, *, follow_symlinks=True):
# """Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
#
# If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
# only if both `src` and `dst` are symlinks.
#
# """
# def _nop(*args, ns=None, follow_symlinks=None):
# pass
#
# # follow symlinks (aka don't not follow symlinks)
# follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
# if follow:
# # use the real function if it exists
# def lookup(name):
# return getattr(os, name, _nop)
# else:
# # use the real function only if it exists
# # *and* it supports follow_symlinks
# def lookup(name):
# fn = getattr(os, name, _nop)
# if fn in os.supports_follow_symlinks:
# return fn
# return _nop
#
# st = lookup("stat")(src, follow_symlinks=follow)
# mode = stat.S_IMODE(st.st_mode)
# lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
# follow_symlinks=follow)
# try:
# lookup("chmod")(dst, mode, follow_symlinks=follow)
# except NotImplementedError:
# # if we got a NotImplementedError, it's because
# # * follow_symlinks=False,
# # * lchown() is unavailable, and
# # * either
# # * fchownat() is unavailable or
# # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# # (it returned ENOSUP.)
# # therefore we're out of options--we simply cannot chown the
# # symlink. give up, suppress the error.
# # (which is what shutil always did in this circumstance.)
# pass
# if hasattr(st, 'st_flags'):
# try:
# lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
# except OSError as why:
# for err in 'EOPNOTSUPP', 'ENOTSUP':
# if hasattr(errno, err) and why.errno == getattr(errno, err):
# break
# else:
# raise
# _copyxattr(src, dst, follow_symlinks=follow)
#
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
If source and destination are the same file, a SameFileError will be
raised.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
# copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
# copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
# if os.path.islink(srcname):
# linkto = os.readlink(srcname)
# if symlinks:
# # We can't just leave it to `copy_function` because legacy
# # code with a custom `copy_function` may rely on copytree
# # doing the right thing.
# os.symlink(linkto, dstname)
# copystat(srcname, dstname, follow_symlinks=not symlinks)
# else:
# # ignore dangling symlink if the flag is on
# if not os.path.exists(linkto) and ignore_dangling_symlinks:
# continue
# # otherwise let the copy occurs. copy2 will raise an error
# if os.path.isdir(srcname):
# copytree(srcname, dstname, symlinks, ignore,
# copy_function)
# else:
# copy_function(srcname, dstname)
# elif os.path.isdir(srcname):
if os.path.isdir(srcname): ###
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
# try:
# copystat(src, dst)
# except OSError as why:
# # Copying file access times may fail on Windows
# if getattr(why, 'winerror', None) is None:
# errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
# try:
# if os.path.islink(path):
# # symlinks to directories are forbidden, see bug #1669
# raise OSError("Cannot call rmtree on a symbolic link")
# except OSError:
# onerror(os.path.islink, path, sys.exc_info())
# # can't continue even if onerror hook returns
# return
names = []
try:
names = os.listdir(path)
except OSError:
if onerror is None: ###
raise ###
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except OSError:
if onerror is None: ###
raise ###
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except OSError:
if onerror is None: ###
raise ###
onerror(os.rmdir, path, sys.exc_info())
## Version using fd-based APIs to protect against races
#def _rmtree_safe_fd(topfd, path, onerror):
# names = []
# try:
# names = os.listdir(topfd)
# except OSError as err:
# err.filename = path
# onerror(os.listdir, path, sys.exc_info())
# for name in names:
# fullname = os.path.join(path, name)
# try:
# orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
# mode = orig_st.st_mode
# except OSError:
# mode = 0
# if stat.S_ISDIR(mode):
# try:
# dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
# except OSError:
# onerror(os.open, fullname, sys.exc_info())
# else:
# try:
# if os.path.samestat(orig_st, os.fstat(dirfd)):
# _rmtree_safe_fd(dirfd, fullname, onerror)
# try:
# os.rmdir(name, dir_fd=topfd)
# except OSError:
# onerror(os.rmdir, fullname, sys.exc_info())
# else:
# try:
# # This can only happen if someone replaces
# # a directory with a symlink after the call to
# # stat.S_ISDIR above.
# raise OSError("Cannot call rmtree on a symbolic "
# "link")
# except OSError:
# onerror(os.path.islink, fullname, sys.exc_info())
# finally:
# os.close(dirfd)
# else:
# try:
# os.unlink(name, dir_fd=topfd)
# except OSError:
# onerror(os.unlink, fullname, sys.exc_info())
#
#_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
# os.supports_dir_fd and
# os.listdir in os.supports_fd and
# os.stat in os.supports_follow_symlinks)
#
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
# elif onerror is None:
# def onerror(*args):
# raise
# if _use_fd_functions:
# # While the unsafe rmtree works fine on bytes, the fd based does not.
# if isinstance(path, bytes):
# path = os.fsdecode(path)
# # Note: To guard against symlink races, we use the standard
# # lstat()/open()/fstat() trick.
# try:
# orig_st = os.lstat(path)
# except Exception:
# onerror(os.lstat, path, sys.exc_info())
# return
# try:
# fd = os.open(path, os.O_RDONLY)
# except Exception:
# onerror(os.lstat, path, sys.exc_info())
# return
# try:
# if os.path.samestat(orig_st, os.fstat(fd)):
# _rmtree_safe_fd(fd, path, onerror)
# try:
# os.rmdir(path)
# except OSError:
# onerror(os.rmdir, path, sys.exc_info())
# else:
# try:
# # symlinks to directories are forbidden, see bug #1669
# raise OSError("Cannot call rmtree on a symbolic link")
# except OSError:
# onerror(os.path.islink, path, sys.exc_info())
# finally:
# os.close(fd)
# else:
# return _rmtree_unsafe(path, onerror)
return _rmtree_unsafe(path, onerror) ###
## Allow introspection of whether or not the hardening against symlink
## attacks is supported on the current platform
#rmtree.avoids_symlink_attacks = _use_fd_functions
#
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
# if os.path.islink(src):
# linkto = os.readlink(src)
# os.symlink(linkto, real_dst)
# os.unlink(src)
# elif os.path.isdir(src):
if os.path.isdir(src): ###
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
#def _get_gid(name):
# """Returns a gid, given a group name."""
# if getgrnam is None or name is None:
# return None
# try:
# result = getgrnam(name)
# except KeyError:
# result = None
# if result is not None:
# return result[2]
# return None
#
#def _get_uid(name):
# """Returns an uid, given a user name."""
# if getpwnam is None or name is None:
# return None
# try:
# result = getpwnam(name)
# except KeyError:
# result = None
# if result is not None:
# return result[2]
# return None
#
#def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
# owner=None, group=None, logger=None):
# """Create a (possibly compressed) tar file from all the files under
# 'base_dir'.
#
# 'compress' must be "gzip" (the default), "bzip2", or None.
#
# 'owner' and 'group' can be used to define an owner and a group for the
# archive that is being built. If not provided, the current owner and group
# will be used.
#
# The output tar file will be named 'base_name' + ".tar", possibly plus
# the appropriate compression extension (".gz", or ".bz2").
#
# Returns the output filename.
# """
# tar_compression = {'gzip': 'gz', None: ''}
# compress_ext = {'gzip': '.gz'}
#
# if _BZ2_SUPPORTED:
# tar_compression['bzip2'] = 'bz2'
# compress_ext['bzip2'] = '.bz2'
#
# # flags for compression program, each element of list will be an argument
# if compress is not None and compress not in compress_ext:
# raise ValueError("bad value for 'compress', or compression format not "
# "supported : {0}".format(compress))
#
# archive_name = base_name + '.tar' + compress_ext.get(compress, '')
# archive_dir = os.path.dirname(archive_name)
#
# if archive_dir and not os.path.exists(archive_dir):
# if logger is not None:
# logger.info("creating %s", archive_dir)
# if not dry_run:
# os.makedirs(archive_dir)
#
# # creating the tarball
# if logger is not None:
# logger.info('Creating tar archive')
#
# uid = _get_uid(owner)
# gid = _get_gid(group)
#
# def _set_uid_gid(tarinfo):
# if gid is not None:
# tarinfo.gid = gid
# tarinfo.gname = group
# if uid is not None:
# tarinfo.uid = uid
# tarinfo.uname = owner
# return tarinfo
#
# if not dry_run:
# tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
# try:
# tar.add(base_dir, filter=_set_uid_gid)
# finally:
# tar.close()
#
# return archive_name
#
#def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# # XXX see if we want to keep an external call here
# if verbose:
# zipoptions = "-r"
# else:
# zipoptions = "-rq"
# from distutils.errors import DistutilsExecError
# from distutils.spawn import spawn
# try:
# spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
# except DistutilsExecError:
# # XXX really should distinguish between "couldn't find
# # external 'zip' command" and "zip failed".
# raise ExecError("unable to create zip file '%s': "
# "could neither import the 'zipfile' module nor "
# "find a standalone zip utility") % zip_filename
#
#def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
# """Create a zip file from all the files under 'base_dir'.
#
# The output zip file will be named 'base_name' + ".zip". Uses either the
# "zipfile" Python module (if available) or the InfoZIP "zip" utility
# (if installed and found on the default search path). If neither tool is
# available, raises ExecError. Returns the name of the output zip
# file.
# """
# zip_filename = base_name + ".zip"
# archive_dir = os.path.dirname(base_name)
#
# if archive_dir and not os.path.exists(archive_dir):
# if logger is not None:
# logger.info("creating %s", archive_dir)
# if not dry_run:
# os.makedirs(archive_dir)
#
# # If zipfile module is not available, try spawning an external 'zip'
# # command.
# try:
# import zipfile
# except ImportError:
# zipfile = None
#
# if zipfile is None:
# _call_external_zip(base_dir, zip_filename, verbose, dry_run)
# else:
# if logger is not None:
# logger.info("creating '%s' and adding '%s' to it",
# zip_filename, base_dir)
#
# if not dry_run:
# with zipfile.ZipFile(zip_filename, "w",
# compression=zipfile.ZIP_DEFLATED) as zf:
# path = os.path.normpath(base_dir)
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
# for dirpath, dirnames, filenames in os.walk(base_dir):
# for name in sorted(dirnames):
# path = os.path.normpath(os.path.join(dirpath, name))
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
# for name in filenames:
# path = os.path.normpath(os.path.join(dirpath, name))
# if os.path.isfile(path):
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
#
# return zip_filename
#
#_ARCHIVE_FORMATS = {
# 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
# 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
# 'zip': (_make_zipfile, [], "ZIP file")
# }
#
#if _BZ2_SUPPORTED:
# _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
# "bzip2'ed tar-file")
#
#def get_archive_formats():
# """Returns a list of supported formats for archiving and unarchiving.
#
# Each element of the returned sequence is a tuple (name, description)
# """
# formats = [(name, registry[2]) for name, registry in
# _ARCHIVE_FORMATS.items()]
# formats.sort()
# return formats
#
#def register_archive_format(name, function, extra_args=None, description=''):
# """Registers an archive format.
#
# name is the name of the format. function is the callable that will be
# used to create archives. If provided, extra_args is a sequence of
# (name, value) tuples that will be passed as arguments to the callable.
# description can be provided to describe the format, and will be returned
# by the get_archive_formats() function.
# """
# if extra_args is None:
# extra_args = []
# if not callable(function):
# raise TypeError('The %s object is not callable' % function)
# if not isinstance(extra_args, (tuple, list)):
# raise TypeError('extra_args needs to be a sequence')
# for element in extra_args:
# if not isinstance(element, (tuple, list)) or len(element) !=2:
# raise TypeError('extra_args elements are : (arg_name, value)')
#
# _ARCHIVE_FORMATS[name] = (function, extra_args, description)
#
#def unregister_archive_format(name):
# del _ARCHIVE_FORMATS[name]
#
#def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
# dry_run=0, owner=None, group=None, logger=None):
# """Create an archive file (eg. zip or tar).
#
# 'base_name' is the name of the file to create, minus any format-specific
# extension; 'format' is the archive format: one of "zip", "tar", "bztar"
# or "gztar".
#
# 'root_dir' is a directory that will be the root directory of the
# archive; ie. we typically chdir into 'root_dir' before creating the
# archive. 'base_dir' is the directory where we start archiving from;
# ie. 'base_dir' will be the common prefix of all files and
# directories in the archive. 'root_dir' and 'base_dir' both default
# to the current directory. Returns the name of the archive file.
#
# 'owner' and 'group' are used when creating a tar archive. By default,
# uses the current owner and group.
# """
# save_cwd = os.getcwd()
# if root_dir is not None:
# if logger is not None:
# logger.debug("changing into '%s'", root_dir)
# base_name = os.path.abspath(base_name)
# if not dry_run:
# os.chdir(root_dir)
#
# if base_dir is None:
# base_dir = os.curdir
#
# kwargs = {'dry_run': dry_run, 'logger': logger}
#
# try:
# format_info = _ARCHIVE_FORMATS[format]
# except KeyError:
# raise ValueError("unknown archive format '%s'" % format)
#
# func = format_info[0]
# for arg, val in format_info[1]:
# kwargs[arg] = val
#
# if format != 'zip':
# kwargs['owner'] = owner
# kwargs['group'] = group
#
# try:
# filename = func(base_name, base_dir, **kwargs)
# finally:
# if root_dir is not None:
# if logger is not None:
# logger.debug("changing back to '%s'", save_cwd)
# os.chdir(save_cwd)
#
# return filename
#
#
#def get_unpack_formats():
# """Returns a list of supported formats for unpacking.
#
# Each element of the returned sequence is a tuple
# (name, extensions, description)
# """
# formats = [(name, info[0], info[3]) for name, info in
# _UNPACK_FORMATS.items()]
# formats.sort()
# return formats
#
#def _check_unpack_options(extensions, function, extra_args):
# """Checks what gets registered as an unpacker."""
# # first make sure no other unpacker is registered for this extension
# existing_extensions = {}
# for name, info in _UNPACK_FORMATS.items():
# for ext in info[0]:
# existing_extensions[ext] = name
#
# for extension in extensions:
# if extension in existing_extensions:
# msg = '%s is already registered for "%s"'
# raise RegistryError(msg % (extension,
# existing_extensions[extension]))
#
# if not callable(function):
# raise TypeError('The registered function must be a callable')
#
#
#def register_unpack_format(name, extensions, function, extra_args=None,
# description=''):
# """Registers an unpack format.
#
# `name` is the name of the format. `extensions` is a list of extensions
# corresponding to the format.
#
# `function` is the callable that will be
# used to unpack archives. The callable will receive archives to unpack.
# If it's unable to handle an archive, it needs to raise a ReadError
# exception.
#
# If provided, `extra_args` is a sequence of
# (name, value) tuples that will be passed as arguments to the callable.
# description can be provided to describe the format, and will be returned
# by the get_unpack_formats() function.
# """
# if extra_args is None:
# extra_args = []
# _check_unpack_options(extensions, function, extra_args)
# _UNPACK_FORMATS[name] = extensions, function, extra_args, description
#
#def unregister_unpack_format(name):
# """Removes the pack format from the registery."""
# del _UNPACK_FORMATS[name]
#
#def _ensure_directory(path):
# """Ensure that the parent directory of `path` exists"""
# dirname = os.path.dirname(path)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
#def _unpack_zipfile(filename, extract_dir):
# """Unpack zip `filename` to `extract_dir`
# """
# try:
# import zipfile
# except ImportError:
# raise ReadError('zlib not supported, cannot unpack this archive.')
#
# if not zipfile.is_zipfile(filename):
# raise ReadError("%s is not a zip file" % filename)
#
# zip = zipfile.ZipFile(filename)
# try:
# for info in zip.infolist():
# name = info.filename
#
# # don't extract absolute paths or ones with .. in them
# if name.startswith('/') or '..' in name:
# continue
#
# target = os.path.join(extract_dir, *name.split('/'))
# if not target:
# continue
#
# _ensure_directory(target)
# if not name.endswith('/'):
# # file
# data = zip.read(info.filename)
# f = open(target, 'wb')
# try:
# f.write(data)
# finally:
# f.close()
# del data
# finally:
# zip.close()
#
#def _unpack_tarfile(filename, extract_dir):
# """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
# """
# try:
# tarobj = tarfile.open(filename)
# except tarfile.TarError:
# raise ReadError(
# "%s is not a compressed or uncompressed tar file" % filename)
# try:
# tarobj.extractall(extract_dir)
# finally:
# tarobj.close()
#
#_UNPACK_FORMATS = {
# 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
# 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
# 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
# }
#
#if _BZ2_SUPPORTED:
# _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
# "bzip2'ed tar-file")
#
#def _find_unpack_format(filename):
# for name, info in _UNPACK_FORMATS.items():
# for extension in info[0]:
# if filename.endswith(extension):
# return name
# return None
#
#def unpack_archive(filename, extract_dir=None, format=None):
# """Unpack an archive.
#
# `filename` is the name of the archive.
#
# `extract_dir` is the name of the target directory, where the archive
# is unpacked. If not provided, the current working directory is used.
#
# `format` is the archive format: one of "zip", "tar", or "gztar". Or any
# other registered format. If not provided, unpack_archive will use the
# filename extension and see if an unpacker was registered for that
# extension.
#
# In case none is found, a ValueError is raised.
# """
# if extract_dir is None:
# extract_dir = os.getcwd()
#
# if format is not None:
# try:
# format_info = _UNPACK_FORMATS[format]
# except KeyError:
# raise ValueError("Unknown unpack format '{0}'".format(format))
#
# func = format_info[1]
# func(filename, extract_dir, **dict(format_info[2]))
# else:
# # we need to look at the registered unpackers supported extensions
# format = _find_unpack_format(filename)
# if format is None:
# raise ReadError("Unknown archive format '{0}'".format(filename))
#
# func = _UNPACK_FORMATS[format][1]
# kwargs = dict(_UNPACK_FORMATS[format][2])
# func(filename, extract_dir, **kwargs)
#
#
#if hasattr(os, 'statvfs'):
#
# __all__.append('disk_usage')
# _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
#
# def disk_usage(path):
# """Return disk usage statistics about the given path.
#
# Returned value is a named tuple with attributes 'total', 'used' and
# 'free', which are the amount of total, used and free space, in bytes.
# """
# st = os.statvfs(path)
# free = st.f_bavail * st.f_frsize
# total = st.f_blocks * st.f_frsize
# used = (st.f_blocks - st.f_bfree) * st.f_frsize
# return _ntuple_diskusage(total, used, free)
#
#elif os.name == 'nt':
#
# import nt
# __all__.append('disk_usage')
# _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
#
# def disk_usage(path):
# """Return disk usage statistics about the given path.
#
# Returned values is a named tuple with attributes 'total', 'used' and
# 'free', which are the amount of total, used and free space, in bytes.
# """
# total, free = nt._getdiskusage(path)
# used = total - free
# return _ntuple_diskusage(total, used, free)
#
#
#def chown(path, user=None, group=None):
# """Change owner user and group of the given path.
#
# user and group can be the uid/gid or the user/group names, and in that case,
# they are converted to their respective uid/gid.
# """
#
# if user is None and group is None:
# raise ValueError("user and/or group must be set")
#
# _user = user
# _group = group
#
# # -1 means don't change it
# if user is None:
# _user = -1
# # user can either be an int (the uid) or a string (the system username)
# elif isinstance(user, str):
# _user = _get_uid(user)
# if _user is None:
# raise LookupError("no such user: {!r}".format(user))
#
# if group is None:
# _group = -1
# elif not isinstance(group, int):
# _group = _get_gid(group)
# if _group is None:
# raise LookupError("no such group: {!r}".format(group))
#
# os.chown(path, _user, _group)
#
#def get_terminal_size(fallback=(80, 24)):
# """Get the size of the terminal window.
#
# For each of the two dimensions, the environment variable, COLUMNS
# and LINES respectively, is checked. If the variable is defined and
# the value is a positive integer, it is used.
#
# When COLUMNS or LINES is not defined, which is the common case,
# the terminal connected to sys.__stdout__ is queried
# by invoking os.get_terminal_size.
#
# If the terminal size cannot be successfully queried, either because
# the system doesn't support querying, or because we are not
# connected to a terminal, the value given in fallback parameter
# is used. Fallback defaults to (80, 24) which is the default
# size used by many terminal emulators.
#
# The value returned is a named tuple of type os.terminal_size.
# """
# # columns, lines are the working values
# try:
# columns = int(os.environ['COLUMNS'])
# except (KeyError, ValueError):
# columns = 0
#
# try:
# lines = int(os.environ['LINES'])
# except (KeyError, ValueError):
# lines = 0
#
# # only query if necessary
# if columns <= 0 or lines <= 0:
# try:
# size = os.get_terminal_size(sys.__stdout__.fileno())
# except (NameError, OSError):
# size = os.terminal_size(fallback)
# if columns <= 0:
# columns = size.columns
# if lines <= 0:
# lines = size.lines
#
# return os.terminal_size((columns, lines))
#
#def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# """Given a command, mode, and a PATH string, return the path which
# conforms to the given mode on the PATH, or None if there is no such
# file.
#
# `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
# of os.environ.get("PATH"), or can be overridden with a custom search
# path.
#
# """
# # Check that a given file can be accessed with the correct mode.
# # Additionally check that `file` is not a directory, as on Windows
# # directories pass the os.access check.
# def _access_check(fn, mode):
# return (os.path.exists(fn) and os.access(fn, mode)
# and not os.path.isdir(fn))
#
# # If we're given a path with a directory part, look it up directly rather
# # than referring to PATH directories. This includes checking relative to the
# # current directory, e.g. ./script
# if os.path.dirname(cmd):
# if _access_check(cmd, mode):
# return cmd
# return None
#
# if path is None:
# path = os.environ.get("PATH", os.defpath)
# if not path:
# return None
# path = path.split(os.pathsep)
#
# if sys.platform == "win32":
# # The current directory takes precedence on Windows.
# if not os.curdir in path:
# path.insert(0, os.curdir)
#
# # PATHEXT is necessary to check on Windows.
# pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# # See if the given file matches any of the expected path extensions.
# # This will allow us to short circuit when given "python.exe".
# # If it does match, only test that one, otherwise we have to try
# # others.
# if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
# files = [cmd]
# else:
# files = [cmd + ext for ext in pathext]
# else:
# # On other platforms you don't have things like PATHEXT to tell you
# # what file suffixes are executable, so just pass on cmd as-is.
# files = [cmd]
#
# seen = set()
# for dir in path:
# normdir = os.path.normcase(dir)
# if not normdir in seen:
# seen.add(normdir)
# for thefile in files:
# name = os.path.join(dir, thefile)
# if _access_check(name, mode):
# return name
# return None
| [
"noralf@tronnes.org"
] | noralf@tronnes.org |
81baef8090682ce775be599e4786806b1672e33f | 8a7abed7c441600a66bf2ef9135ff3a367ac0eb2 | /website/goals/migrations/0001_initial.py | 00da811bea795906390ec6595dd4df58f5432e91 | [] | no_license | mrooney/mikesgoals | 094d30160817879243b7539df5a3759d19583edc | dd0b0aee7ce20d43852cf694bc1ecb5af23dde94 | refs/heads/master | 2023-04-09T16:10:16.008923 | 2022-07-07T17:33:00 | 2022-07-07T17:33:00 | 4,474,379 | 2 | 0 | null | 2023-03-31T14:38:43 | 2012-05-28T20:30:43 | Python | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Goal'
db.create_table('goals_goal', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('frequency', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('goals', ['Goal'])
def backwards(self, orm):
# Deleting model 'Goal'
db.delete_table('goals_goal')
models = {
'goals.goal': {
'Meta': {'object_name': 'Goal'},
'frequency': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['goals'] | [
"mrooney.github@rowk.com"
] | mrooney.github@rowk.com |
393d712064d56ab0df11650c6d6f49b01aafb3b7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-9296.py | 9ce70cba78da7c66126b2610ec5adcf9316b16a0 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,754 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = $Exp
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
4f1bb8c4ae785963280e2fe29443763f3ef26a4a | 420f7b02595663869df60fe76a865c744e1e878b | /ch1/pseudonyms.py | 926dd0617e5f4c3854f378aa05e86d35fa763512 | [] | no_license | marcin-kopanski/impractical_python_projects | 79a0f78be5af0a61e8240c22fb6d8eea1f9f3bc4 | 1eeeb1244140369c42771424fbeacd251c2d87d5 | refs/heads/master | 2022-12-21T06:48:37.728560 | 2020-09-14T21:16:14 | 2020-09-14T21:16:14 | 295,348,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py | import random
import sys
print("Welcome to the Psych 'Sidekick Name Picker.'\n")
print("A name just like Sean would pick for Gus:\n\n")
first = ('Baby Oil', 'Bad News', 'Big Burps', "Bill 'Beenie-Weenie'",
"Bob 'Stinkbug'", 'Bowel Noises', 'Boxelder', "Bud 'Lite' ",
'Butterbean', 'Buttermilk', 'Buttocks', 'Chad', 'Chesterfield',
'Chewy', 'Chigger', 'Cinnabuns', 'Cleet', 'Cornbread', 'Crab Meat',
'Crapps', 'Dark Skies', 'Dennis Clawhammer', 'Dicman', 'Elphonso',
'Fancypants', 'Figgs', 'Foncy', 'Gootsy', 'Greasy Jim', 'Huckleberry',
'Huggy', 'Ignatious', 'Jimbo', "Joe 'Pottin Soil'", 'Johnny',
'Lemongrass', 'Lil Debil', 'Longbranch', '"Lunch Money"', 'Mergatroid',
'"Mr Peabody"', 'Oil-Can', 'Oinks', 'Old Scratch', 'Ovaltine',
'Pennywhistle', 'Pitchfork Ben', 'Potato Bug', 'Pushmeet',
'Rock Candy', 'Schlomo', 'Scratchensniff', 'Scut',
"Sid 'The Squirts'", 'Skidmark', 'Slaps', 'Snakes', 'Snoobs',
'Snorki', 'Soupcan Sam', 'Spitzitout', 'Squids', 'Stinky',
'Storyboard', 'Sweet Tea', 'TeeTee', 'Wheezy Joe',
"Winston 'Jazz Hands'", 'Worms')
last = ('Appleyard', 'Bigmeat', 'Bloominshine', 'Boogerbottom',
'Breedslovetrout', 'Butterbaugh', 'Clovenhoof', 'Clutterbuck',
'Cocktoasten', 'Endicott', 'Fewhairs', 'Gooberdapple', 'Goodensmith',
'Goodpasture', 'Guster', 'Henderson', 'Hooperbag', 'Hoosenater',
'Hootkins', 'Jefferson', 'Jenkins', 'Jingley-Schmidt', 'Johnson',
'Kingfish', 'Listenbee', "M'Bembo", 'McFadden', 'Moonshine', 'Nettles',
'Noseworthy', 'Olivetti', 'Outerbridge', 'Overpeck', 'Overturf',
'Oxhandler', 'Pealike', 'Pennywhistle', 'Peterson', 'Pieplow',
'Pinkerton', 'Porkins', 'Putney', 'Quakenbush', 'Rainwater',
'Rosenthal', 'Rubbins', 'Sackrider', 'Snuggleshine', 'Splern',
'Stevens', 'Stroganoff', 'Sugar-Gold', 'Swackhamer', 'Tippins',
'Turnipseed', 'Vinaigrette', 'Walkingstick', 'Wallbanger', 'Weewax',
'Weiners', 'Whipkey', 'Wigglesworth', 'Wimplesnatch', 'Winterkorn',
'Woolysocks')
while True:
firstName = random.choice(first)
lastName = random.choice(last)
print("\n\n")
print(firstName, lastName, file=sys.stderr)
print("\n\n")
try_again = input("\n\nTry again? (Press Enter else n to quit)\n ")
if try_again.lower() == "n":
break
input("\nPress Enter to exit.")
| [
"mk@pwrs.pl"
] | mk@pwrs.pl |
4dd39ca63c397b2d471502d25c1b147cfafefc90 | bea7c1fdcbd2477015ea14b2da92e4f736c89484 | /examples/mnist-simple.py | 2194ad85662be44ebf22b7be8644197ab0e3e152 | [
"Apache-2.0"
] | permissive | mzj14/mesh | e0a9d5d51e117a7232dfe186657d1ef4d4680a27 | bf04d24e7a9c54733dea014b82e5985a039da67c | refs/heads/master | 2020-04-10T15:24:37.193416 | 2018-12-18T21:00:22 | 2018-12-18T21:00:22 | 161,108,592 | 0 | 0 | Apache-2.0 | 2018-12-10T03:04:05 | 2018-12-10T03:04:04 | null | UTF-8 | Python | false | false | 7,960 | py | # coding=utf-8
# Copyright 2018 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST using Mesh TensorFlow and TF Estimator.
This is an illustration, not a good model.
# python mnist-simple.py --log_steps=10 --mesh_shape="b1:4" --layout="batch:b1"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import mnist_dataset as dataset # local file import
import tensorflow as tf
tf.flags.DEFINE_string("data_dir", "data-source",
"Path to directory containing the MNIST dataset")
tf.flags.DEFINE_string("model_dir", "model", "Estimator model_dir")
tf.flags.DEFINE_integer("batch_size", 200,
"Mini-batch size for the training. Note that this "
"is the global batch size and not the per-shard batch.")
tf.flags.DEFINE_integer("hidden_size", 512, "Size of each hidden layer.")
tf.flags.DEFINE_integer("train_epochs", 1, "Total number of training epochs.")
tf.flags.DEFINE_integer("epochs_between_evals", 1,
"# of epochs between evaluations.")
tf.flags.DEFINE_integer("log_steps", 10, "Number of log steps as a logging unit")
tf.flags.DEFINE_integer("eval_steps", 0,
"Total number of evaluation steps. If `0`, evaluation "
"after training is skipped.")
tf.flags.DEFINE_string("mesh_shape", "b1:4", "mesh shape")
tf.flags.DEFINE_string("layout", "rows:b1",
"layout rules")
FLAGS = tf.flags.FLAGS
def mnist_model(image, labels, mesh):
"""The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a tf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape []
"""
batch_dim = mtf.Dimension("batch", FLAGS.batch_size)
rows_dim = mtf.Dimension("rows", 28)
cols_dim = mtf.Dimension("cols", 28)
classes_dim = mtf.Dimension("classes", 10)
x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 28, 28]), [batch_dim, rows_dim, cols_dim])
y = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), [batch_dim])
w1 = mtf.get_variable(mesh, "w1", [rows_dim, cols_dim, classes_dim])
b1 = mtf.get_variable(mesh, "b1", [classes_dim])
logits = mtf.relu(mtf.einsum([x, w1], [batch_dim, classes_dim]) + b1)
if labels is None:
loss = None
else:
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, mtf.one_hot(y, classes_dim), classes_dim)
loss = mtf.reduce_mean(loss)
return logits, loss
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
tf.logging.info("features = %s labels = %s mode = %s params=%s" %
(features, labels, mode, params))
global_step = tf.train.get_global_step()
graph = mtf.Graph()
# wrapped graph named "my_mesh"
mesh = mtf.Mesh(graph, "my_mesh")
logits, loss = mnist_model(features, labels, mesh)
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
mesh_size = mesh_shape.size
print("mesh_shape.size = ", mesh_shape.size)
mesh_devices = [""] * mesh_size
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
optimizer = mtf.optimize.AdafactorOptimizer()
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
restore_hook = mtf.MtfRestoreHook(lowering)
tf_logits = lowering.export_to_tf_tensor(logits)
if mode != tf.estimator.ModeKeys.PREDICT:
tf_loss = lowering.export_to_tf_tensor(loss)
tf.summary.scalar("loss", tf_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
train_op = tf.group(tf_update_ops)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False, save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(tf_loss, "cross_entropy")
tf.identity(accuracy[1], name="train_accuracy")
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar("train_accuracy", accuracy[1])
# restore_hook must come before saver_hook
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
training_chief_hooks=[restore_hook, saver_hook])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"classes": tf.argmax(tf_logits, axis=1),
"probabilities": tf.nn.softmax(tf_logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[restore_hook],
export_outputs={
"classify": tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=tf_loss,
evaluation_hooks=[restore_hook],
eval_metric_ops={
"accuracy":
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1)),
})
def run_mnist():
"""Run MNIST training and eval loop."""
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir,
config=tf.estimator.RunConfig(log_step_count_steps=FLAGS.log_steps))
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
# ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
ds_batched = ds.cache().batch(FLAGS.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds_batched.repeat(FLAGS.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
# Train and evaluate model.
for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=None)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print("\nEvaluation results:\n\t%s\n" % eval_results)
def main(_):
run_mnist()
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"mazijun2017@outlook.com"
] | mazijun2017@outlook.com |
e4b32b9e6339263746a7889bc6d5cedd7cda3845 | edad2e75198bcfdf75965e132e268b4ebbbd70e2 | /old/other_scripts_notneeded/main_scripts_for_yuva/python_utility3_remote_seed_yuva.py | f79d87f760065f05716ccbed7e8ca8662d20bc47 | [] | no_license | nandithaec/python-utility | d50a6e802bf3a9afadbaf3f5207efdba875c7e70 | b8b7377b87630375ff804c7204d37a1c7ecee826 | refs/heads/master | 2021-01-10T07:00:18.820227 | 2015-04-16T06:01:44 | 2015-04-16T06:01:44 | 36,727,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,448 | py |
#!/usr/bin/env python
#IMPORTANT: It is assumed that we are running parallel ngspice simulations on a remote 48-core cluster at 10.107.105.201. If this is not the case, you will need to modify this script to run it on this machine, by commenting out the scp and ssh commands.
#Example usage: python /home/external/iitb/nanditha/simulations/c499_ecat_yuva/python_utility3_remote_seed_yuva.py -m c499_clk -p /home/external/iitb/nanditha/simulations/c499_ecat_yuva -d c499_ecat_yuva -t 180 -n 10 --group 10 --clk 125 --std_lib osu018_stdcells_correct_vdd_gnd.sp
import optparse
import re,os
import glob,shutil,csv
import random
import subprocess, time
import random,sys
#import python_compare_remote
from optparse import OptionParser
parser = OptionParser('This script reads in the template spice file and the inputs to the script are listed as arguments below, which are all necessary arguments.\nAfter a previous script has copied the current working directory to a remote cluster, this script invokes several scripts inturn:\n1.perl_calculate_gates_clk.pl\n2.perl_calculate_drain.pl\n3.deckgen_remote_seed.pl\n4.python_GNUparallel_ngspice_remote.py\n5.python_compare_remote_seed.py\n6.python_count_flips_remote_seed.py\n\nThe tasks of these scripts will be described in the help section of the respective scripts. The current script needs pnr/reports/5.postRouteOpt_mult/mult_postRoute.slk as an input. The current script will calculate the number of gates in the design(spice) file, pick a random gate, calculate the number of distinct drains for this gate and pick a drain to introduce glitch it.The location of the glitch is calculated based on the timing/slack information from the SoC encounter output: (pnr/reports/5.postRouteOpt_mult/mult_postRoute.slk) for the particular design, so that we introduce glitch only after the input has changed in the clk period, and before the next rising edge of the clk (when the latch is open). It then invokes deckgen.pl to modify the template spice file to introduce the glitched version of the gate in the spice file. The deckgen creates multiple spice files which will contain different input conditions since they are generated at different clk cycles.\nThe python_GNUparallel_ngspice_remote.py will then distribute these spice files across the different machines in the cluster and simulate these decks using ngspice. The results are csv files which contain output node values after spice simulation.\nThe results are then concatenated into one file and compared against the expected reference outputs that were obtained by the RTL simulation. If the results match, then it means that there was no bit-flip, so a 0 is reported, else a 1 is reported for a bit-flip. The number of flips in a single simulation is counted. Finally, if there are multiple flips given atleast one flip, it is reported as a percentage.\nAuthor:Nanditha Rao(nanditha@ee.iitb.ac.in)\n')
parser.add_option("-m", "--mod",dest='module', help='Enter the entity name(vhdl) or module name (verilog)')
parser.add_option("-n", "--num",dest='num', help='Enter the number of spice decks to be generated and simulated')
parser.add_option("-p", "--path", dest="path",help="Enter the ENTIRE path to your design folder (your working dir)- either this machine or remote machine. IF remote machine, enter ~/simulations/<design_folder_name>")
parser.add_option("-d", "--design", dest="design_folder",help="Enter the name of your design folder")
parser.add_option("-t", "--tech",dest='tech', help='Enter the technology node-for eg., For 180nm, enter 180')
parser.add_option("--group",dest='group', help='Enter the number of spice decks to be simulated at a time. For eg., if -n option is 10000, and say we want to run 100 at a time, then enter 100')
#parser.add_option("--backup",dest='backup', help='Enter the number of spice decks you want to backup/save per run. For ef., if you entered -n 1000 and --group 100, and if you want to save 2 decks per 100, enter 2 ')
#parser.add_option("-s", "--seed",dest='seed', help='Enter the random seed')
parser.add_option("-c", "--clk",dest='clk', help='Enter the clk freq in MHz')
parser.add_option("-l", "--std_lib",dest='std_lib', help='Enter the file name of the standard cell library (sp file)')
(options, args) = parser.parse_args()
module=options.module
num=options.num
path=options.path
design_folder=options.design_folder
tech=options.tech
num_at_a_time=options.group
#backup_per_run=options.backup
#seed=int(options.seed)
clk=(options.clk)
std_lib = options.std_lib
clk_period = (1.0/float(clk))*(0.000001)
half_clk_period = clk_period/2.0
change_time= half_clk_period/3.0
end_PWL= half_clk_period + change_time #in ns generally
#To determine when the glitch needs to be introduced, depends on the slack information
with open("%s/pnr/reports/5.postRouteOpt_%s/%s_postRoute.slk" %(path,module,module),"r") as f:
words=map(str.split, f)
line1=words[1] #2nd line after header
slack_read=line1[2]
print "\nSlack is: %s" %slack_read
slack_string=slack_read.replace("*/","")
slack_time=float(slack_string)
print "\nSlack is: %f ns" %slack_time
reqdtime_read=line1[1]
print "\nReqd time is: %s" %reqdtime_read
reqdtime_string=reqdtime_read.replace("*/","")
reqd_time=float(reqdtime_string)
print "\nReqd time is: %f ns" %reqd_time
arrival_time = reqd_time - slack_time
arrival_time_ns = arrival_time *(0.000000001)
print "\nArrival time is: %e " %arrival_time_ns
#What fraction of the clk period is the arrival time?
arrival_clk_part = arrival_time_ns / clk_period
print "\nArrival time is: %f clk periods" %arrival_clk_part
#Whatever number of decks to be simulated- is assumed to be more than or equal to 1000.
#At a time, only 1000 are generated and run- to save disk space. After collecting results, they are deleted
num_of_loops=(int(num)/int(num_at_a_time))
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("count*.csv"):
os.remove(f)
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("spice_rtl_*.csv"):
os.remove(f)
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("final_results_spice_outputs_*.csv"):
os.remove(f)
if os.path.isfile('%s/spice_results/result_summary_flipcount.csv' %(path)):
os.remove('%s/spice_results/result_summary_flipcount.csv' %(path))
#Clear Back up directory
backup_dir = '%s/backup_spice_decks' %(path)
if os.path.exists(backup_dir):
shutil.rmtree(backup_dir)
if not os.path.exists(backup_dir):
os.mkdir(backup_dir)
print "Deleting the existing spice decks before creating new ones!\n"
os.system('rm -rf %s/spice_decks_*' %path)
start_loop=1
frand = open('%s/random_number_histogram.txt' %(path), 'w')
seed = random.randint(0, sys.maxint)
print "seed is: ", seed
frand.write("Seed:%d\n" %seed)
random.seed(seed) #Seeding the random number generator
clk_period = (1.0/float(clk))*(0.000001)
print "\nclk is ",clk
print "\nClk_period: ", clk_period
os.system('cat $PBS_NODEFILE > %s/nodes.txt' %path)
print "PBS NODEFILE contents....written to nodes.txt\n"
time.sleep(3)
os.system('python %s/python_ssh_addr_yuva.py -p %s' %(path,path))
os.system('cat %s/sshmachines.txt' %path)
print "Check contents of sshmachines.txt file....\n"
time.sleep(10)
#Uncomment this for future designs. For decoder example, decoder folder has already been created on desktop
#os.system('ssh nanditha@10.107.90.52 mkdir /home/nanditha/simulations/%s' %(design_folder))
###########################################Comment this out if not using desktop to run##################################
"""
print "\nCopying a python script to desktop machine!\n"
os.system('scp %s/python_desktop_copy.py %s/glitch_%s.sp %s/tsmc018.m nanditha@10.107.90.52:/home/nanditha/simulations/%s/' %(path,path,std_lib,path,design_folder))
"""
######################################################################################################
#perl perl_calculate_gates_clk.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -m decoder_behav_pnr -f /home/user1/simulations/decoder
os.system('perl %s/perl_calculate_gates_clk.pl -s %s/reference_spice.sp -l %s/glitch_%s.sp -r %s/%s_reference_out/tool_reference_out.txt -m %s -f %s ' %(path,path,path,std_lib,path,module,module,path))
fg = open('%s/tmp_random.txt' %(path), 'r')
gate_clk_data = [line.strip() for line in fg]
num_of_gates=int(gate_clk_data[0])
print "\nnum of gates is %d" %num_of_gates
num_of_clks=int(gate_clk_data[1])
print "\nnum of clocks is %d" %num_of_clks
fg.close()
#Fresh simulation
for loop in range(start_loop, (num_of_loops+1)):
#time.sleep(2)
#os.system('cd /home/user1/simulations/decoder ; ls; pwd;ls | wc -l' )
#time.sleep(5)
print "Now, creating multiple spice decks in spice_decks folder in current directory on the remote machine\n"
#os.system('python %s/python_repeat_deckgen_remote_seed.py -m %s -n %s -f %s -o %s -s %d' %(path,module,num_at_a_time,path,loop,seed_new))
#########################################repeat_deckgen copied starting from here#######################################
if os.path.isfile("%s/%s_reference_out/RTL.csv" %(path,module)):
print "****Removing the existing RTL.csv file in folder %s_reference_out ****\n" %(module)
os.remove("%s/%s_reference_out/RTL.csv" %(path,module))
#Now, we need the header in RTL.csv, so we create an RTL.csv and copy the headers from the RTL_backup.csv that we had saved from Netlstfrmt.pl
fout = open('%s/%s_reference_out/RTL.csv' %(path,module), 'w')
fin = open('%s/%s_reference_out/RTL_backup.csv' %(path,module), 'r')
in_data=fin.read()
fout.write(in_data)
fout.close()
fin.close()
if not os.path.exists('%s/spice_decks_%s' %(path,loop)):
os.mkdir('%s/spice_decks_%s' %(path,loop))
start= ((loop-1)*int(num_at_a_time)) + 1 # ((1-1)*10) +1 =1 , ((2-1)*10) +1 =11
end = (int(num_at_a_time))*loop #(10*1) = 10, (10*2)=20
print "***Inside repeat_deckgen. Executing deckgen to create decks and RTL.csv reference file\n***"
for loop_var in range(start, end+1):
rand_gate= int(random.randrange(num_of_gates)) #A random gate picked
#print "Random gate is: ",rand_gate
rand_clk= int(random.randrange(num_of_clks)) #A random clk picked
#print "Random clock cycle is: ",rand_clk
#perl perl_calculate_drain.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -m decoder_behav_pnr -f /home/user1/simulations/decoder -g 27
os.system('perl %s/perl_calculate_drain.pl -s %s/reference_spice.sp -l %s/glitch_%s -r %s/%s_reference_out/tool_reference_out.txt -m %s -f %s -g %d ' %(path,path,path,std_lib,path,module,module,path,rand_gate))
fg = open('%s/tmp_random.txt' %(path), 'r')
drain_data = [line.strip() for line in fg]
num_of_drains=int(drain_data[0])
print "\nnum of drains is %d" %num_of_drains
fg.close()
#If num of drains is 2, randrange(2) returns 0 or 1,where as we want drain number 1 or drain number 2. so, doing +1
rand_drain= int(random.randrange(num_of_drains))+1 #A random drain picked.
#Arrival_time_part + initial_clk_part should add up to 1.5 clk periods
#The clk starts from low to high and then low, before the 2nd rising edge starts. The input is changed in the high period and the glitch is expected to arrrive later on, and before the next rising edge (when the latch will open)
#In every iteration, a different random number needs to be picked. Hence, this is inside the for loop
initial_clk_part = 1.5 - arrival_clk_part
initial_clk_part_abs = initial_clk_part * clk_period
#This means, glitch "can" occur before the input changes in the clk period as well. So, force the glitch to start only after input has changed
if (initial_clk_part_abs < end_PWL) :
initial_clk_part = end_PWL/clk_period
#unif=random.uniform(0,arrival_clk_part*clk_period)
#srand_glitch= (initial_clk_part*clk_period) + unif #A random glitch picked
unif=random.uniform(0,0.05*clk_period)
rand_glitch= (1.45*clk_period) + unif #arrival_clk + initial_clk should add up to 1.5
print "\nglitch within clk cycle= ",unif
print "\nRandom gate: %d\nRandom drain: %d\nRandom clock cycle:%d\nRandom glitch location:%e\n " %(rand_gate,rand_drain,rand_clk,rand_glitch)
frand.write("%d, %d, %d,%e\n" %(rand_gate,rand_drain,rand_clk,rand_glitch))
#perl deckgen_remote_seed.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -n 1 -m decoder_behav_pnr -f /home/user1/simulations/decoder -g 27 -d 2 -c 10 -i 1.42061344093991e-09 -o 1
#deckgen.pl will need to be remotely executed through python_repeat_deckgen.py multiple number of times
os.system('perl %s/deckgen_remote_seed.pl -s %s/reference_spice.sp -l %s/glitch_%s -r %s/%s_reference_out/tool_reference_out.txt -n %d -m %s -f %s -o %s -g %s -d %s -c %s -i %s' %(path,path,path,std_lib,path,module,loop_var,module,path,loop,rand_gate,rand_drain,rand_clk,rand_glitch))
##################Script repeat_deckgen copied ends here####################################
##################################Comment this out if not using desktop to run##################################
#delete existing files on desktop machine and copy new files for simulation
#os.system('ssh nanditha@10.107.90.52 python /home/nanditha/simulations/%s/python_desktop_copy.py -p %s -d %s -l %d' %(design_folder,path,design_folder,loop))
################################################################################################################
#print "\nmaster machine.. listing the files and pausing\n"
#os.system('cd /home/user1/simulations/decoder/spice_decks_%d ; ls; pwd;ls | wc -l' %loop)
#time.sleep(1)
#print "\nssh to slave.. listing the files and pausing\n"
#os.system('ssh user1@192.168.1.8 pwd; cd /home/user1/simulations/decoder/spice_decks_%d; pwd;ls;pwd;ls | wc -l' %loop)
#time.sleep(3)
print "Running GNU Parallel and ngspice on the created decks\n"
os.system('python %s/python_GNUparallel_ngspice_remote_yuva.py -n %s -d %s -o %s -p %s' %(path,num_at_a_time,design_folder,loop,path))
seed_new= int(random.randrange(100000)*random.random()) #Used by compare script to backup random decks
#seed_new=seed*loop
print "New seed every outer loop is ", seed_new
#python_results_compare.py will then need to be remotely executed
#Might need to execute these last 3 in a loop till the results are acceptable
print "Comparing the RTL and spice outputs\n"
os.system('python %s/python_compare_remote_seed.py -m %s -f %s -n %s -t %s -l %d' %(path,module,path,num_at_a_time,tech,loop))
##########################################################
spice_dir = '%s/spice_decks_%s' %(path,loop)
if os.path.exists(spice_dir):
shutil.rmtree(spice_dir)
########################################End of loop########################################################
#For validation of backup spice files
shutil.copy('%s/glitch_%s' %(path,std_lib), '%s/backup_spice_decks' %path )
shutil.copy('%s/tsmc018.m' %path, '%s/backup_spice_decks' %path )
print "Combining all rtl diff files\n"
os.system('python %s/python_count_flips_remote_seed.py -f %s -n %s --group %s -s %s' %(path,path,num,num_at_a_time,seed)) #To save the seed to results file
| [
"nanditha.ec@gmail.com"
] | nanditha.ec@gmail.com |
843bf8e56df11ab1f797e49a66da18aedb1658a5 | d7a4a15a2ae27520febc0970aecf7440251f28be | /PE_29.py | 8cd067bae7ac1a49f2638599508b6aafbb1ca5d0 | [] | no_license | ITanmayee/Project_Euler | 9bfb2ca7f452ab29eb5591e3e593199d158ae21c | 1410e64226ef52fdd054883700004c69d23fe58b | refs/heads/main | 2023-05-13T00:50:37.243734 | 2021-06-04T15:30:25 | 2021-06-04T15:30:25 | 359,804,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Consider all integer combinations of a^b for 2 <= a <= 5 and 2 <= b <= 5:
# How many distinct terms are in the sequence generated by a^b for 2 <= a <= 100 and 2 <= b <= 100?
def get_exp() :
terms = [a ** b for a in range(2,101) for b in range(2,101)]
distinct_terms = set(terms)
return len(distinct_terms)
print(get_exp())
| [
"noreply@github.com"
] | noreply@github.com |
d1c92b0e6a3ebd8bc736f57211f57c0e7ab3bf44 | 23166e339dec012b8621f8e13bdb96f4cc84bc6e | /spamer.py | 43bb94d2a1ef6ee5dc0380a3dd5e0dff17d82bcc | [] | no_license | antonperechnev/code_sample | c48c63ff398126bced08d25133d4b34fe58c6a56 | 60305b0a193a8ba8f7ffab02ce9470b0553c4d8c | refs/heads/master | 2020-04-05T03:18:24.737042 | 2019-11-21T12:32:41 | 2019-11-21T12:32:41 | 156,509,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | import pika
from pika.exceptions import ConnectionClosed
import backoff
import configparser
config = configparser.ConfigParser()
config.read('../Config/project_settings.ini')
# TODO get from config
class Sender:
USER = config['RabbitMQ']['user']
PASSWORD = config['RabbitMQ']['password']
HOST = config['RabbitMQ']['host']
PORT = config['RabbitMQ']['port']
def __init__(self, queue_name):
self.queue = queue_name
self.key = queue_name
self._connect()
def _connect(self):
self.credentials = pika.PlainCredentials(self.USER, self.PASSWORD)
self.parameters = pika.ConnectionParameters(host=self.HOST, port=self.PORT) # , credentials=self.credentials)
self.connection = pika.BlockingConnection(parameters=self.parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, durable=True)
@backoff.on_exception(backoff.expo, ConnectionClosed, max_tries=5)
def send(self, body):
try:
self.channel.basic_publish(
exchange='', routing_key=self.key, body=body,
properties=pika.BasicProperties(delivery_mode=2, )
)
print('message sent')
self.connection.close()
except ConnectionClosed:
self._connect()
raise ConnectionClosed
class Receiver(Sender):
def __init__(self, queue_name):
super().__init__(queue_name)
def consume(self, callback):
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(queue=self.queue, on_message_callback=callback)
try:
self.channel.start_consuming()
except KeyboardInterrupt:
self.channel.stop_consuming()
self.connection.close()
except (pika.exceptions.AMQPChannelError, pika.exceptions.ConnectionClosed) as e:
print(e)
exit(1)
| [
"noreply@github.com"
] | noreply@github.com |
a98b4f69bc5aa2c9685136e7a5cb05abbdec9f19 | 8a5bf9c17fc46897e612e9a378ef928ab93c232a | /server.py | 6a7c790e8e6888c4e3aa451c3e21cd1c1efabb6a | [] | no_license | m7jay/ChatApp | b259924fd6b512324004fc7c8732b52be4fb2cf3 | 50a03b8bbdb660467af0e45420f6fd918a4d1c1f | refs/heads/master | 2022-07-29T07:15:42.353183 | 2020-05-18T13:48:45 | 2020-05-18T13:48:45 | 264,642,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | """
### A server running on the local system listening to new connections, receive new messages and broadcast the message
### uses a header for sending and receiving data
### UTF-8 encoding is used for messages
"""
import socket
import select
HEADER_LENGTH = 10 #length of the message can be 10 digits
IP_Address = "127.0.0.1"
Port = 1234
"""
### create server
### AF_INET specifies the IPv4
### SOCK_STREAM specifies the TCP
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #to set the socket option to reuse the address
server_socket.bind((IP_Address, Port)) #binding the IP and the port
server_socket.listen() #listen to new connections
socket_list = [server_socket] #list of sockets
clients = {} #socket to user name mapping
print(f"Started the server on {IP_Address}:{Port}...")
"""
### function to receive the messages in format,
### | header | data |
"""
def receive_message(client_socket):
try:
#get the msg header
message_header = client_socket.recv(HEADER_LENGTH)
#if header not received, client connection closed
if not len(message_header):
return False
#get the message length
message_length = int(message_header.decode("utf-8").strip())
return {'header':message_header, 'data':client_socket.recv(message_length)}
except:
return False
while True:
#get the list of sockets to be read and the list of sockets with exceptions
read_sockets, _, exception_sockets = select.select(socket_list, [], socket_list)
for s in read_sockets:
#if the socket is the server, then a new connection has come
if s == server_socket:
client_socket, client_addr = server_socket.accept()
user = receive_message(client_socket) #receive the user name
if user is False:
continue
socket_list.append(client_socket) #add the new socket to the list
clients[client_socket] = user #create a mapping of socket to user
print("New connection established from {}:{}, username: {}".format(*client_addr, user['data'].decode("utf-8")))
#new message arrived
else:
msg = receive_message(s)
#if msg not received, something went wrong and connection lost
if msg is False:
print("Failed to receive the msg from: {}".format(clients[s]["data"].decode("utf-8")))
socket_list.remove(s)
del clients[s]
continue
user = clients[s]
print(f'Received msg from {user["data"].decode("utf-8")}: {msg["data"].decode("utf-8")}')
#send to message to other sockets
for client in clients:
if client != s:
client.send(user["header"] + user["data"] + msg["header"] + msg["data"])
#if there is an exception socket, something went wrong, close it
for s in exception_sockets:
socket_list.remove(s)
del clients[s] | [
"noreply@github.com"
] | noreply@github.com |
781475d82188e60d5cdf6c3c1abdd4222dee893d | 2bdbbf4fe09b87873d854f828886124873fe0cbb | /src/products/models.py | 52f85ae3eda71be6085e5716af9acd39de2f6215 | [] | no_license | Skylighty/ecommence-project | 2fbb488c09e419939eceda20133d67871e2a7d03 | 32e903681cc6c9c79457f634fdb9471a950c5ec8 | refs/heads/master | 2023-02-27T03:14:59.021485 | 2021-02-07T13:48:16 | 2021-02-07T13:48:16 | 336,797,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
onSale = models.BooleanField(default=False)
gallery = models.ImageField(upload_to=None, height_field=None, width_field=None, max_length=100, default='https://redzonekickboxing.com/wp-content/uploads/2017/04/default-image-620x600.jpg') | [
"pawgasiewski@gmail.com"
] | pawgasiewski@gmail.com |
14e8951c52da671d4febde1f050c47f3b07b1924 | b625c3879d2ac00e1788ac5260793cf906c58338 | /main.py | e0fbeb1ffe31482ff30daf03074fc466ae255d19 | [
"MIT"
] | permissive | luc1991/CyclicVoltammetry | c8746ecc2e3cdadc9101a98fffb66e346eb5d364 | fa72169ef66a3c6b1011ad16ca18436b55c6e4f7 | refs/heads/master | 2020-04-25T11:27:29.927716 | 2019-02-25T23:38:12 | 2019-02-25T23:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | import math
import numpy
import matplotlib.pyplot as plt
n = 100
Ox = numpy.zeros((n, 1))
Red = numpy.zeros((n, 1))
S = numpy.zeros((n, 1))
P = numpy.zeros((n, 1))
k1 = 1
k2 = 1
k3 = 1
for i in range(0, n):
Ox[i] = 100
S[i] = 100
E = 3
dt = 0.001
D = 0.001
E0 = 0
iR = 0.01
lk = 0
for i in range(0, 8000):
lk = lk + 1
if (lk < 500):
E = E - (6. / 500)
else:
E = E + (6. / 500)
if (lk >= 1000):
lk = 0
print(lk)
#print(E)
new_ox = numpy.array(Ox)
new_red = numpy.array(Red)
new_s = numpy.array(S)
new_p = numpy.array(P)
p = numpy.exp(E - E0)
q = Ox[0] + Red[0]
new_ox[0] = q - (q / (p + 1))
new_red[0] = q / (p+1)
I = Red[0] - new_red[0]
# Then contribution from iR drop. V = IR, I = V/R
I = I + E * iR
# And now change other things
#
# Diffusion;
for p in range(0, n-2):
diff_ox = D * (Ox[p] - Ox[p+1]) * Ox[p]
diff_red = D * (Red[p] - Red[p+1]) * Red[p]
diff_s = D * (S[p] - S[p+1]) * S[p]
diff_p = D * (P[p] - P[p+1]) * P[p]
new_ox[p] = new_ox[p] - diff_ox
new_ox[p+1] = new_ox[p+1] + diff_ox
new_red[p] = new_red[p] - diff_red
new_red[p+1] = new_red[p+1] + diff_red
new_s[p] = new_s[p] - diff_s
new_s[p+1] = new_s[p+1] + diff_s
new_p[p] = new_p[p] - diff_p
new_p[p+1] = new_p[p+1] + diff_p
if (new_p[p] < 0):
new_p[p] = 0
if (new_s[p] < 0):
new_s[p] = 0
# Reaction;
for p in range(0, n-1):
if (Red[p] > 0 and new_s[p] > 0):
dP = dt * k3 * Red[p] * new_s[p]
if (dP < 0):
exit()
if (dP > 1):
dP = 1
new_ox[p] = new_ox[p] + dP
new_red[p] = new_red[p] - dP
new_s[p] = new_s[p] - dP
new_p[p] = new_p[p] + dP
#print(new_p[p])
if (numpy.isnan(I)):
exit()
# Calculate current for each position
currents = numpy.zeros((n, 1))
for p in range(0, n - 1):
currents[p] = Red[p] - new_red[p]
Ox = numpy.array(new_ox)
Red = numpy.array(new_red)
S = numpy.array(new_s)
P = numpy.array(new_p)
print("%f, %f, %f, %f, %f, %f" % (I, E, Ox[0], Red[0], S[0], P[0]))
if (i % 100 == 0):
t = numpy.arange(0, n )
plt.plot(t, Ox, 'r', t, Red, 'b')
plt.ylim((0, 100))
plt.savefig(str(i) + ".conc.png")
plt.clf()
plt.plot(t, currents, 'b')
plt.ylim((-0.1, 0.1))
plt.savefig(str(i) + ".current.png")
plt.clf()
plt.plot(t, S, 'r', t, P, 'b')
plt.ylim((0, 110))
plt.savefig(str(i) + ".substrate.png")
plt.clf()
# with open(str(i)+".file", "w") as f:
# for l in range(0, n - 1):
# f.write("%d, %f, %f, %f, %f, %f\n" % (l, Ox[l], Red[l], S[l], P[l], currents[l]))
#print(Ox[1])
| [
"ben@tatmans.co.uk"
] | ben@tatmans.co.uk |
204725189c391dd3e806d873cbfe517466784569 | 82387646b7c503ede5bc6caa6b89ad64bc7151b6 | /Práctica_pandas_1/ej6.py | 6b3352d0215794fc39bbcee6bdbd35f2491c0e00 | [] | no_license | nicoaizen/Fundamentos_de_informatica_Aizen | 4fbc55cd92c4ad73b2a7e05feeffef5bf913ea74 | 3e2567d399c9952ce522f88f2057cf788091000a | refs/heads/main | 2023-06-08T05:30:46.090587 | 2021-07-05T18:40:23 | 2021-07-05T18:40:23 | 358,275,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Ejercicio 6
# Escribí un programa que muestre un resumen de la información básica de un DataFrame y sus datos.
import pandas as pd
datos_ejemplo = {"nombre": ["Agustina", "Diana", "Karen", "Julián", "Emilio", "Miguel", "Mateo", "Laura", "Jorge", "Lucas"], "puntaje": [12.5, 9, 16.5, 13, 9, 20, 14.5, 10, 8, 19], "intentos": [1, 3, 2, 3, 2, 3, 1, 1, 2, 1], "califica": [1, 0, 1, 0, 0, 1, 1, 0, 0, 1]}
labels = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
df = pd.DataFrame(datos_ejemplo, index=labels)
print(df.describe) | [
"nicoaizen@gmail.com"
] | nicoaizen@gmail.com |
cc60234f67b5a45a081d2732d2061bff78736d09 | a40a9dd318cc214ecbaac76f2149008880b306b7 | /LeetCode/convertToTitle.py | f08ea6eb69966a971d084c07a6547d9e75d5d004 | [] | no_license | nexusme/leetcode_try | fa75962239794f0ce905c7dff8eb24fd923082dd | cc83a8200beaccc5016db5bebdaa8ae6213c2c3b | refs/heads/master | 2022-12-15T05:24:28.056331 | 2020-09-03T09:39:25 | 2020-09-03T09:39:25 | 292,527,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #
# 给定一个正整数,返回它在 Excel 表中相对应的列名称。
# 例如,
#
# 1 -> A
# 2 -> B
# 3 -> C
# ...
# 26 -> Z
# 27 -> AA
# 28 -> AB
def convertToTitle(n):
"""
:type n: int
:rtype: str
"""
res = ''
while n:
n, y = divmod(n, 26)
print(n, y)
if y == 0:
n -= 1
y = 26
res = chr(y + 64) + res
print(res)
return res
convertToTitle(703)
| [
"you@nexus_me@hotmail.com"
] | you@nexus_me@hotmail.com |
79bc6849f6a2ce9dc29373de34993f7f25a284b5 | e0d23cdf422a1697766f068bc4f2263d70b27ced | /app/admin/views.py | 5b46e2f6083f18c7adba63babb5fcce3c4eb9864 | [] | no_license | Kittyqm/movie | 41e3c140591579458087f05d04cb6e4f06d42f8a | edeb493f149b9e731112cf804088bec2ccca1abd | refs/heads/master | 2021-08-27T22:12:55.802224 | 2017-12-10T13:56:08 | 2017-12-10T13:56:08 | 113,752,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,729 | py | # coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from . import admin
from flask import Flask, render_template, flash, redirect, url_for, session, request, abort
from app.admin.forms import LoginForm, TagFrom, MovieForm, PrevieForm, PwdForm, AuthForm, RoleForm, AdminForm
from app.models import Admin, Tag, Movie, Preview, User, Comment, Moviecol, Oplog, Adminlog, Userlog, Auth, Role
from functools import wraps
from app import db, app
from werkzeug.utils import secure_filename # 将filename改成一个安全的名称
import os
import uuid
import datetime
# 上下问处理器---获取当前时间
@admin.context_processor
def tpl_extra():
data = dict(
online_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
)
return data
# 登陆装饰器
def admin_login_req(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.has_key('admin') or session['admin'] is None:
return redirect(url_for('admin.login', next=request.url))
return f(*args, **kwargs)
return decorated_function
# 权限控制装饰器
def admin_auth(f):
@wraps(f)
def decorated_function(*args, **kwargs):
admin = Admin.query.join(
Role
).filter(
Role.id == Admin.role_id,
Admin.id == session["admin_id"]
).first()
auths = admin.role.auths
auths = list(map(lambda v: v, auths.split(',')))
auth_list = Auth.query.all()
urls = [v.url for v in auth_list for val in auths if val == v.id]
rule = request.url_rule
if str(rule) not in urls:
abort(404)
return f(*args, **kwargs)
return decorated_function
# 修改文件名称
def change_filename(filename):
fileinfo = os.path.splitext(filename)
filename = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(uuid.uuid4().hex) + fileinfo[-1]
return filename
@admin.route("/")
@admin_login_req
def index():
""" 后台首页 """
return render_template('admin/index.html')
@admin.route("/login/", methods=["GET", "POST"])
def login():
""" 后台登陆 """
form = LoginForm()
# 表示表单提交的时候要进行验证
if form.validate_on_submit():
data = form.data
admin = Admin.query.filter_by(name=data["account"]).first()
if not admin.check_pwd(data['pwd']):
flash('密码错误!', 'err')
return redirect(url_for('admin.login'))
session['admin'] = data['account']
session['admin_id'] = admin.id
adminlog = Adminlog(
admin_id=admin.id,
ip=request.remote_addr
)
db.session.add(adminlog)
db.session.commit()
return redirect(request.args.get('next') or url_for('admin.index'))
return render_template('admin/login.html', form=form)
@admin.route("/logout/")
@admin_login_req
def logout():
""" 后台退出 """
session.pop('admin', None)
session.pop('admin_id', None)
return render_template('admin/login.html')
@admin.route("/pwd/", methods=['GET', 'POST'])
@admin_login_req
def pwd():
"""后台密码修改"""
form = PwdForm()
if form.validate_on_submit():
data = form.data
admin = Admin.query.filter_by(name=session['admin']).first()
from werkzeug.security import generate_password_hash
admin.pwd = generate_password_hash(data['new_pwd'])
db.session.add(admin)
db.session.commit()
flash('修改密码成功!', 'ok')
redirect(url_for('admin.logout'))
return render_template('admin/pwd.html', form=form)
@admin.route("/tag/add/", methods=['GET', 'POST'])
@admin_login_req
def tag_add():
"""后台添加标签页"""
form = TagFrom()
if form.validate_on_submit():
data = form.data
tag = Tag.query.filter_by(name=data['name']).count()
if tag == 1:
flash(u'名称已经纯在!', 'err')
return redirect(url_for('admin.tag_add'))
tag = Tag(name=data['name'])
db.session.add(tag)
db.session.commit()
flash(u'添加标签成功', 'ok')
oplog = Oplog(
admin_id=session['admin_id'],
ip=request.remote_addr,
reason='添加标签%s' % data['name']
)
db.session.add(oplog)
db.session.commit()
return redirect(url_for('admin.tag_add'))
return render_template('admin/tag_add.html', form=form)
@admin.route("/tag/list/<int:page>/", methods=['GET'])
@admin_login_req
def tag_list(page=None):
"""后台标签列表"""
if page is None:
page = 1
page_data = Tag.query.order_by(Tag.addtime.desc()).paginate(page=page, per_page=10) # 当此处填写1的时候遍历部分会出错
return render_template('admin/tag_list.html', page_data=page_data)
@admin.route('/tag/del/<int:id>/', methods=['GET'])
@admin_login_req
def tag_del(id=None):
"""删除标签"""
tag = Tag.query.filter_by(id=id).first_or_404()
db.session.delete(tag)
db.session.commit()
flash(u'删除标签成功', 'ok')
return redirect(url_for('admin.tag_list', page=1))
@admin.route('/tag/edit/<int:id>/', methods=['GET', 'POST'])
@admin_login_req
def tag_edit(id=None):
"""编辑标签"""
form = TagFrom()
tag = Tag.query.filter_by(id=id).first_or_404()
if form.validate_on_submit():
data = form.data
tag_count = Tag.query.filter_by(name=data['name']).count()
if tag.name == data['name'] and tag_count == 1:
flash(u'名称已经存在!', 'err')
return redirect(url_for('admin.tag_edit', id=id))
tag.name = data['name']
db.session.add(tag)
db.session.commit()
flash(u'修改成功!', 'ok')
return redirect(url_for('admin.tag_edit', id=id))
return render_template('admin/tag_edit.html', form=form, tag=tag)
@admin.route("/movie/add/", methods=['GET', 'POST'])
@admin_login_req
def movie_add():
"""后台电影添加"""
form = MovieForm()
if form.validate_on_submit():
data = form.data
file_url = secure_filename(form.url.data.filename)
file_logo = secure_filename(form.logo.data.filename)
if not os.path.exists(app.config['UP_DIR']):
os.makedirs(app.config['UP_DIR'])
print (os.makedirs(app.config['UP_DIR']))
os.chmod(app.config['UP_DIR'], 'rw')
url = change_filename(file_url)
logo = change_filename(file_logo)
form.url.data.save(app.config['UP_DIR'] + url)
form.logo.data.save(app.config['UP_DIR'] + logo)
movie = Movie(
title=data['title'],
url=url,
info=data['info'],
logo=logo,
star=data['star'],
# playnum=data['playnum'],
playnum=0,
# commentnum=data['commentnum'],
commentnum=0,
tag_id=data['tag_id'],
area=data['area'],
release_time=data['release_time'],
length=data['length']
)
db.session.add(movie)
db.session.commit()
flash('添加电影成功!', 'ok')
return redirect(url_for('admin.movie_add'))
return render_template('admin/movie_add.html', form=form)
@admin.route("/movie/list/<int:page>", methods=['GET'])
@admin_login_req
def movie_list(page=None):
"""后台电影列表"""
if page is None:
page = 1
page_data = Movie.query.join(Tag).filter(Tag.id == Movie.tag_id).order_by(Movie.addtime.desc()).paginate(page=page,
per_page=10) # 当此处填写1的时候遍历部分会出错
return render_template('admin/movie_list.html', page_data=page_data)
@admin.route('/movie/del/<int:id>/', methods=['GET', 'POST'])
@admin_login_req
def movie_del(id):
'''电影列表删除'''
movie = Movie.query.filter_by(id=id).first_or_404()
db.session.delete(movie)
db.session.commit()
flash(u'删除电影成功', 'ok')
return redirect(url_for('admin.movie_list', page=1))
@admin.route('/movie/edit/<int:id>', methods=['GET', 'POST'])
@admin_login_req
def movie_edit(id=None):
form = MovieForm()
form.url.validators = []
form.logo.validators = []
movie = Movie.query.get_or_404(int(id))
if request.method == 'GET':
form.info.data = movie.info
form.tag_id.data = movie.tag_id
form.star.data = movie.star
if form.validate_on_submit():
data = form.data
movie_count = Movie.query.filter_by(title=data['title']).count()
if movie_count == 1 and movie.title != data['title']:
flash('片名已经存在!', 'err')
return redirect(url_for('admin.movie_edit', id=id))
if not os.path.exists(app.config['UP_DIR']):
os.makedirs(app.config['UP_DIR'])
print (os.makedirs(app.config['UP_DIR']))
os.chmod(app.config['UP_DIR'], 'rw')
if form.url.data.filename != '':
file_url = secure_filename(form.url.data.filename)
movie.url = change_filename(file_url)
form.url.data.save(app.config['UP_DIR'] + movie.url)
if form.logo.data.filename != '':
file_logo = secure_filename(form.logo.data.filename)
movie.logo = change_filename(movie.file_logo)
form.logo.data.save(app.config['UP_DIR'] + movie.logo)
movie.star = data['star']
movie.tag_id = data['tag_id']
movie.info = data['info']
movie.title = data['title']
movie.area = data['area']
movie.length = data['length']
movie.release_time = data['release_time']
db.session.add(movie)
db.session.commit()
flash('修改电影成功!', 'ok')
return redirect(url_for('admin.movie_edit', id=movie.id))
return render_template('admin/movie_edit.html', form=form, movie=movie)
@admin.route("/preview/add/", methods=['GET', 'POST'])
@admin_login_req
def preview_add():
"""后台电影预告"""
form = PrevieForm()
if form.validate_on_submit():
data = form.data
file_logo = secure_filename(form.logo.data.filename)
if not os.path.exists(app.config['UP_DIR']):
os.makedirs(app.config['UP_DIR'])
print (os.makedirs(app.config['UP_DIR']))
os.chmod(app.config['UP_DIR'], 'rw')
logo = change_filename(file_logo)
form.logo.data.save(app.config['UP_DIR'] + logo)
preview = Preview(
title=data['title'],
logo=logo
)
db.session.add(preview)
db.session.commit()
flash('添加电影预告成功!', 'ok')
return redirect(url_for('admin.preview_add'))
return render_template('admin/preview_add.html', form=form)
@admin.route("/preview/list/<int:page>", methods=['GET'])
@admin_login_req
def preview_list(page=None):
"""后台预告列表"""
if page is None:
page = 1
page_data = Preview.query.order_by(Preview.addtime.desc()).paginate(page=page, per_page=10) # 当此处填写1的时候遍历部分会出错
return render_template('admin/preview_list.html', page_data=page_data)
@admin.route("/preview/edit/<int:id>", methods=['GET', 'POST'])
@admin_login_req
def preview_edit(id):
"""后台预告编辑"""
form = PrevieForm()
preview = Preview.query.get_or_404(int(id))
if request.method == 'GET':
form.logo.data = preview.logo
if form.is_submitted():
data = form.data
preview_count = preview.query.filter_by(title=data['title']).count()
if preview_count == 1 and preview.title != data['title']:
print '111'
flash('预告名已经存在!', 'err')
return redirect(url_for('admin.preview_edit', id=id))
if not os.path.exists(app.config['UP_DIR']):
os.makedirs(app.config['UP_DIR'])
print (os.makedirs(app.config['UP_DIR']))
os.chmod(app.config['UP_DIR'], 'rw')
if form.logo.data.filename != '':
preview.file_logo = secure_filename(form.logo.data.filename)
preview.logo = change_filename(preview.file_logo)
form.logo.data.save(app.config['UP_DIR'] + preview.logo)
preview.title = data['title']
db.session.add(preview)
db.session.commit()
flash('修改预告成功!', 'ok')
return redirect(url_for('admin.preview_edit', id=preview.id))
return render_template('admin/preview_edit.html', id=id, form=form, preview=preview)
@admin.route("/preview/del/<int:id>", methods=['GET'])
@admin_login_req
def preview_del(id=None):
"""后台预告删除"""
preview = Preview.query.filter_by(id=id).first_or_404()
db.session.delete(preview)
db.session.commit()
flash(u'删除预告成功', 'ok')
return redirect(url_for('admin.preview_list', page=1))
@admin.route("/user/list/<int:page>", methods=['GET', 'POST'])
@admin_login_req
def user_list(page):
"""后台会员列表"""
if page is None:
page = 1
page_data = User.query.order_by(User.addtime.desc()).paginate(page=page, per_page=10) # 当此处填写1的时候遍历部分会出错
return render_template('admin/user_list.html', page_data=page_data)
@admin.route("/user/view/<int:id>")
@admin_login_req
def user_view(id=None):
"""后台会员详情"""
user_data = User.query.filter_by(id=id).first()
return render_template('admin/user_view.html', user_data=user_data)
@admin.route("/user/del/<int:id>")
@admin_login_req
def user_del(id=None):
"""后台会员删除"""
user_count = User.query.filter_by(id=id).first_or_404()
db.session.delete(user_count)
db.session.commit()
flash(u'删除会员成功', 'ok')
return redirect(url_for('admin.user_list', page=1))
@admin.route("/comment/list/<int:page>", methods=['GET', 'POST'])
@admin_login_req
def comment_list(page=None):
"""后台评论详情"""
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == Comment.movie_id,
User.id == Comment.user_id
).order_by(Comment.addtime.desc()).paginate(page=page, per_page=10) # 当此处填写1的时候遍历部分会出错
return render_template('admin/comment_list.html', comment=page_data)
@admin.route("/comment/del/<int:id>")
@admin_login_req
def comment_del(id=None):
"""后台评论删除"""
comment_count = Comment.query.filter_by(id=id).first_or_404()
db.session.delete(comment_count)
db.session.commit()
flash(u'删除评论成功', 'ok')
return redirect(url_for('admin.comment_list', page=1))
@admin.route("/moviecol/list/<int:page>", methods=['GET'])
@admin_login_req
def moviecol_list(page=None):
"""后台电影收藏"""
if page is None:
page = 1
page_data = Moviecol.query.join(
Movie
).join(User).filter(
Movie.id == Moviecol.movie_id,
User.id == Moviecol.user_id
).order_by(
Moviecol.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/moviecol_list.html', page_data=page_data)
@admin.route("/moviecol/del/<int:id>", methods=['GET'])
@admin_login_req
def moviecol_del(id=None):
"""后台电影收藏删除"""
moviecol_count = Moviecol.query.filter_by(id=id).first_or_404()
db.session.delete(moviecol_count)
db.session.commit()
flash(u'删除收藏成功', 'ok')
return redirect(url_for('admin.moviecol_list', page=1))
@admin.route("/oplog/list/<int:page>", methods=['GET'])
@admin_login_req
def oplog_list(page=None):
"""后台操作日志"""
if page is None:
page = 1
page_data = Oplog.query.join(
Admin
).filter(
Admin.id == Oplog.admin_id
).order_by(
Oplog.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/oplog_list.html', page_data=page_data)
@admin.route("/adminloginlog/list/<int:page>", methods=['GET'])
@admin_login_req
def adminloginlog_list(page=None):
"""后台管理员页面"""
if page is None:
page = 1
page_data = Adminlog.query.join(
Admin
).filter(
Admin.id == Adminlog.admin_id
).order_by(
Adminlog.addtime.desc()
).paginate(page=page, per_page=10)
print page_data
return render_template('admin/adminloginlog_list.html', page_data=page_data)
@admin.route("/userloginlog/list/<int:page>")
@admin_login_req
def userloginlog_list(page=None):
"""后台会员管理员页面"""
if page is None:
page = 1
page_data = Userlog.query.join(
User
).filter(
User.id == Userlog.user_id
).order_by(
Userlog.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/userloginlog_list.html', page_data=page_data)
@admin.route("/role/add/", methods=['GET', 'POST'])
@admin_login_req
def role_add():
"""后台添加角色"""
form = RoleForm()
if form.validate_on_submit():
data = form.data
role = Role(
name=data['name'],
auths=','.join(map(lambda v: str(v), data['auths']))
)
db.session.add(role)
db.session.commit()
flash('添加角色成功!', 'ok')
return render_template('admin/role_add.html', form=form)
@admin.route("/role/list/<int:page>")
@admin_login_req
def role_list(page=None):
"""后台角色列表"""
if page is None:
page = 1
page_data = Role.query.order_by(
Role.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/role_list.html', page_data=page_data)
@admin.route('/role/del/<int:id>', methods=['GET'])
@admin_login_req
def role_del(id=None):
"""后台权限删除"""
role = Role.query.filter_by(id=id).first_or_404()
db.session.delete(role)
db.session.commit()
flash('删除角色成功!', 'ok')
return redirect(url_for('admin.role_list', page=1))
@admin.route('/role/edit/<int:id>', methods=['GET', 'POST'])
@admin_login_req
def role_edit(id=None):
form = RoleForm()
role = Role.query.get_or_404(id)
if request.method == 'GET':
auths = role.auths
form.auths.data = list(map(lambda v: int(v), auths.split(',')))
if form.validate_on_submit():
data = form.data
role.auths = ','.join(map(lambda v: str(v), data['auths']))
role.name = data['name']
db.session.add(role)
db.session.commit()
flash('修改角色成功!', 'ok')
redirect(url_for('admin.role_edit', id=id))
return render_template('admin/role_edit.html', form=form, role=role)
@admin.route("/auth/add/", methods=['GET', 'POST'])
@admin_login_req
def auth_add():
"""后台权限添加"""
form = AuthForm()
if form.validate_on_submit():
data = form.data
auth = Auth(
name=data['name'],
url=data['url']
)
db.session.add(auth)
db.session.commit()
flash('添加权限成功!', 'ok')
return render_template('admin/auth_add.html', form=form)
@admin.route("/auth/list/<int:page>", methods=['GET'])
@admin_login_req
def auth_list(page=None):
"""后台权限列表"""
if page is None:
page = 1
page_data = Auth.query.order_by(
Auth.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/auth_list.html', page_data=page_data)
@admin.route('/auth/del/<int:id>', methods=['GET'])
@admin_login_req
def auth_del(id=None):
"""后台权限删除"""
auth = Auth.query.filter_by(id=id).first_or_404()
db.session.delete(auth)
db.session.commit()
flash('删除权限成功!', 'ok')
return redirect(url_for('admin.auth_list', page=1))
@admin.route('/auth/edit/<int:id>', methods=['GET', 'POST'])
@admin_login_req
def auth_edit(id=None):
form = AuthForm()
auth = Auth.query.get_or_404(id)
if form.validate_on_submit():
data = form.data
auth.url = data['url']
auth.name = data['name']
db.session.add(auth)
db.session.commit()
flash('修改权限成功!', 'ok')
redirect(url_for('admin.auth_edit', id=id))
return render_template('admin/auth_edit.html', form=form, auth=auth)
@admin.route("/admin/add/", methods=['GET', 'POST'])
@admin_login_req
def admin_add():
"""后台添加管理员"""
form = AdminForm()
from werkzeug.security import generate_password_hash
if form.validate_on_submit():
data = form.data
admin_count = Admin.query.filter_by(name=data['name']).count()
if admin_count == 1 and Admin.name != data['name']:
flash('管理员名已经存在!', 'err')
return redirect(url_for('admin.admin_add', id=id))
admin = Admin(
name=data['name'],
pwd=generate_password_hash(data["pwd"]),
role_id=data['role_id'],
is_super=1
)
db.session.add(admin)
db.session.commit()
flash('添加管理员成功!', 'ok')
return render_template('admin/admin_add.html', form=form)
@admin.route("/admin/list/<int:page>", methods=['GET', 'POST'])
@admin_login_req
def admin_list(page=None):
"""后台管理员列表"""
if page is None:
page = 1
page_data = Admin.query.join(
Role
).filter(
Role.id == Admin.role_id
).order_by(
Admin.addtime.desc()
).paginate(page=page, per_page=10)
return render_template('admin/admin_list.html', page_data=page_data)
| [
"aa931912343@qq.com"
] | aa931912343@qq.com |
792469a250c93b0821f6ef2bae12966bca7d73cb | ce1eb4545eda86a68dedf82c1d793caa84d082f4 | /bin/src/server.py | 5be3df4df35ddf64f8f68ed42d667a1a2dabc2f8 | [] | no_license | mmalyutin/car | 4acc1b0673a718e3316e694f82796707bec27527 | 5b80cca70410b3a6a2cd43704edbe1a67ddd1d8a | refs/heads/master | 2020-07-02T08:17:08.561182 | 2019-07-30T10:53:24 | 2019-07-30T10:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,346 | py | #!/usr/bin/python3
#-*- coding: utf-8 -*-
# Server is stay in GAZ-66.
import time
import wiringpi
import sys, time
import socket
import json
from threading import Thread
from socketserver import ThreadingMixIn
import RPi.GPIO as GPIO
import PWM
import os
import sys
sys.path.append('../../conf')
import conf
from HardwareSetting import HardwareSetting
from CarStatus import *
'''
class CarStatus:
def __init__(self):
self.status = {}
self.status['light'] = False
self.status['move'] = 0
self.status['turn'] = 0
def __del__(self):
return
'''
class ServerThread(Thread, conf.conf):
tcpServer = None
threads = []
def __init__(self):
Thread.__init__(self)
def __del__(self):
pass
def run(self):
TCP_IP = conf.conf.ServerIP
TCP_PORT = conf.conf.controlServerPort
BUFFER_SIZE = conf.conf.ServerBufferSize
self.tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcpServer.bind((TCP_IP, TCP_PORT))
threads = []
# Максимальное колличество подключений в очереди.
self.tcpServer.listen(1)
while True:
print("Car server up : Waiting for connections from TCP clients...")
(conn, (ip, port)) = self.tcpServer.accept()
newthread = ClientThread(conn, ip, port)
newthread.start()
self.threads.append(newthread)
def wait(self):
for t in self.threads:
t.join()
# Класс отвечает за обработку команд пульта управления.
class ClientThread(Thread, conf.conf, HardwareSetting):
def __init__(self, conn, ip, port):
Thread.__init__(self)
self.conn = conn
self.ip = ip
self.port = port
print("[+] New server socket thread started for " + ip + ":" + str(port))
# Класс состояния машинки.
#self.CarStatus = CarStatus()
GPIO.cleanup()
# Инициализация пинов
GPIO.setmode(GPIO.BCM)
self.statusLight = False
self.gpioLight = 17
#wiringpi.pinMode(self.gpioLight, wiringpi.GPIO.PWM_OUTPUT)
GPIO.setup(self.gpioLight, GPIO.OUT)
GPIO.output(self.gpioLight, GPIO.LOW)
# Управление сервоприводом поворота колес.
self.SERVO = 7
self.pwm_servo = PWM.PWM_Servo(self.SERVO)
self.pwm_servo.setFreq()
# Управление L298, мотор движения машинки.
self.L298_ENA = 10
self.L298_IN1 = 12
self.L298_IN2 = 13
self.L298_IN3 = 14
self.L298_IN4 = 15
self.L298_ENB = 11
self.pwm_motor = PWM.PWM_L298N_Motor(self.L298_ENA, self.L298_IN1, self.L298_IN2, self.L298_IN3, self.L298_IN4, self.L298_ENB)
self.pwm_motor.setFreq()
def __del__(self):
GPIO.output(self.gpioLight, GPIO.LOW)
self.moveStop()
self.turnCenter()
GPIO.cleanup()
def moveStop(self):
self.pwm_motor.stop()
#self.CarStatus.status['move'] = 0
CarStatus.statusCar['car']['speed'] = 0
def moveForward(self, speed):
#print('val', val)
self.pwm_motor.forward(speed)
#self.CarStatus.status['move'] = speed
CarStatus.statusCar['car']['speed'] = speed
def moveBack(self, speed):
self.pwm_motor.back(speed)
#self.CarStatus.status['move'] = speed
CarStatus.statusCar['car']['speed'] = -1 * speed
def turnCenter(self):
val = int(HardwareSetting._turnCenter)
#print('turnCenter {}', val)
self.pwm_servo.set(val)
#self.CarStatus.status['turn'] = val
CarStatus.statusCar['car']['turn'] = val
def turnLeft(self, turn):
#print('turnLeft {}', turn)
val = int(HardwareSetting._turnCenter + (-1 * turn * HardwareSetting._turnDelta / HardwareSetting.yZero))
#print('turnLeft {}', val)
self.pwm_servo.set(val)
#self.CarStatus.status['turn'] = val
CarStatus.statusCar['car']['turn'] = val
def turnRight(self, turn):
#print('turnRight {}', turn)
val = int(HardwareSetting._turnCenter + (-1 * turn * HardwareSetting._turnDelta / HardwareSetting.yZero))
#print('turnRight {}', val)
self.pwm_servo.set(val)
#self.CarStatus.status['turn'] = val
CarStatus.statusCar['car']['turn'] = val
def run(self):
while True :
data = self.conn.recv(2048)
data = data.decode()
if data == '' :
break
# Обработка полученных команд.
#print(data)
data = data.replace('}{', '}\n\n{')
data = data.split('\n\n')
#for i in reversed(data):
for i in data:
try:
cmd = json.loads(i)
except:
continue
#print(cmd)
answer = {}
answer['type'] = 'car'
answer['cmd'] = cmd['cmd']
# Свет.
if cmd['cmd'] == 'Start':
print(cmd)
if cmd['status'] == True :
if self.statusLight == False :
# Включить свет.
GPIO.output(self.gpioLight, GPIO.HIGH)
else :
# Выключить свет.
GPIO.output(self.gpioLight, GPIO.LOW)
self.statusLight = not self.statusLight
#self.CarStatus.status['light'] = self.statusLight
#answer['status'] = self.statusLight
CarStatus.statusCar['car']['light'] = self.statusLight
# Движение вперед. Полное 1
elif cmd['cmd'] == 'X':
print(cmd)
if cmd['status'] == True :
self.moveForward(cmd['val'])
else :
self.moveStop()
# Движение вперед. Частичное 0.5
elif cmd['cmd'] == 'Y':
print(cmd)
if cmd['status'] == True :
self.moveForward(cmd['val'])
else :
self.moveStop()
# Движение вперед. Частичное 0.75
elif cmd['cmd'] == 'A':
print(cmd)
if cmd['status'] == True :
self.moveForward(cmd['val'])
else :
self.moveStop()
# Движение назад. Частичное 0.66
elif cmd['cmd'] == 'B':
print(cmd)
if cmd['status'] == True :
self.moveBack(cmd['val'])
else :
self.moveStop()
elif cmd['cmd'] == 'turn':
turn = cmd['y']
if turn == 0 :
self.turnCenter()
elif turn > 0 : # Право
self.turnRight(turn)
elif turn < 0 : # Лево
self.turnLeft(turn)
answer['state'] = CarStatus.statusCar['car']
self.conn.send(json.dumps(answer, ensure_ascii=False).encode())
def handler(self):
pass
if __name__ == '__main__':
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
serverThread = ServerThread()
serverThread.start() | [
"djvu@inbox.ru"
] | djvu@inbox.ru |
2499c4b8a81732b60499196ca4086515080ac887 | 2d2550c0be7ac2cfa397c0f3cbd52dfcb557e4aa | /evaluation.py | 0b6773c815565169d5d0f197799e6f5810333b21 | [] | no_license | tasx0823/VSRNet | 91a620948aad98462ce99d4673345f9c1c0c5d7c | 13b14d8b468c0e34d610633c79f04b21607c92d7 | refs/heads/master | 2022-10-09T16:40:23.041651 | 2020-06-13T15:08:26 | 2020-06-13T15:08:26 | 271,972,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,154 | py | from __future__ import print_function
import os
import pickle
import numpy
import time
import numpy as np
import torch
from torch.autograd import Variable
from basic.metric import getScorer
from basic.util import AverageMeter, LogCollector
def l2norm(X):
"""L2-normalize columns of X
"""
norm = np.linalg.norm(X, axis=1, keepdims=True)
return 1.0 * X / norm
def cal_error(videos, captions, measure='cosine'):
videos = np.squeeze(videos)
if measure == 'cosine':
captions = l2norm(captions)
videos = l2norm(videos)
errors = -1 * numpy.dot(captions, videos.T)
return errors
def cal_error_gpu(videos, captions, measure='cosine'):
def l2norm_gpu(x):
norm = torch.norm(x, p=2, dim=1, keepdim=True)
return 1.0 * x / norm
captions = l2norm_gpu(captions)
videos = l2norm_gpu(videos)
errors = -1 * torch.dot(captions, videos.T)
return errors
def cal_error__(videos, captions, measure='cosine'):
if measure == 'cosine':
# captions = l2norm(captions)
# videos = l2norm(videos)
# errors = -1*numpy.dot(captions, videos.T)
captions = l2norm(captions)
videos = l2norm(videos)
errors = numpy.dot(videos, captions.T)
errors = np.reshape(errors, (-1, 7, np.shape(captions)[0]))
errors = np.max(errors, axis=1, keepdims=False)
errors = -1 * errors.T
return errors
def cal_error_(videos, captions, measure='cosine'):
if measure == 'cosine':
# captions = l2norm(captions)
# videos = l2norm(videos)
# errors = -1*numpy.dot(captions, videos.T)
captions = l2norm(captions)
videos = l2norm(videos)
errors = numpy.dot(videos, captions.T)
errors = np.reshape(errors, (-1, 7, np.shape(captions)[0]))
max_idxs = np.argmax(errors, axis=1)
errors = np.max(errors, axis=1, keepdims=False)
errors = -1 * errors.T
return errors, max_idxs.T
def encode_data(model, data_loader, log_step=10, logging=print, return_ids=True):
"""Encode all videos and captions loadable by `data_loader`
"""
batch_time = AverageMeter()
val_logger = LogCollector()
# switch to evaluate mode
model.val_start()
end = time.time()
# numpy array to keep all the embeddings
video_embs = None
cap_embs = None
atten_scores = None
video_ids = [''] * len(data_loader.dataset)
caption_ids = [''] * len(data_loader.dataset)
for i, (videos, captions, idxs, cap_ids, vid_ids, seg_ids, timestamp, duration, gt_attention_scores) in enumerate(
data_loader):
# make sure val logger is used
model.logger = val_logger
# compute the embeddings
vid_emb, cap_emb, atten_score, vid_emb_with_text, loss_matrix, loss_matrix2 = model.forward_emb(videos,
captions, True)
# initialize the numpy arrays given the size of the embeddings
if video_embs is None:
video_embs = np.zeros((len(data_loader.dataset), vid_emb.size(1)))
atten_scores = np.zeros((len(data_loader.dataset), 128))
if cap_embs is None:
cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))
timestamps = np.zeros((len(data_loader.dataset), 2))
durations = np.zeros((len(data_loader.dataset)))
video_embs[idxs] = vid_emb.data.cpu().numpy().copy()
cap_embs[idxs] = cap_emb.data.cpu().numpy().copy()
atten_scores[idxs] = atten_score.data.cpu().numpy().copy()
timestamps[idxs] = timestamp
durations[idxs] = duration
for j, idx in enumerate(idxs):
caption_ids[idx] = cap_ids[j]
video_ids[idx] = vid_ids[j]
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % log_step == 0:
logging('Test: [{0:2d}/{1:2d}]\t'
'{e_log}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(i, len(data_loader), batch_time=batch_time,
e_log=str(model.logger)))
del videos, captions
# video_embs = np.reshape(video_embs,(-1,2048))
if return_ids == True:
return video_embs, cap_embs, video_ids, caption_ids, timestamps, durations, atten_scores
else:
return video_embs, cap_embs
def cal_iou_using_id(pre_seg_id, timestamp, duration):
segs = [[0, 1], [0, 0.5], [0.5, 1], [0, 0.25], [0.25, 0.5], [0.5, 0.75], [0.75, 1]]
seg = segs[pre_seg_id]
timestamp = 1.0 * np.array(timestamp) / duration
union = (min(seg[0], timestamp[0]), max(seg[1], timestamp[1]))
inter = (max(seg[0], timestamp[0]), min(seg[1], timestamp[1]))
iou = 1.0 * (inter[1] - inter[0]) / (union[1] - union[0])
return iou
def cal_iou(pre_time, gt_time):
union = (min(pre_time[0], gt_time[0]), max(pre_time[1], gt_time[1]))
inter = (max(pre_time[0], gt_time[0]), min(pre_time[1], gt_time[1]))
iou = 1.0 * (inter[1] - inter[0]) / (union[1] - union[0])
return iou
def cal_grounding_accuracy_0_dot_7(file):
f1 = open(file, 'r')
lines = f1.readlines()
iou_cnt = 0.0
cnt = 0.0
iou_sum = 0.0
for line in lines:
line = line.strip()
line = line.split(' ', 3)
# video_id = line[0]
start = float(line[0])
end = float(line[1])
duration = float(line[2])
attn_scores = line[3].split(' ')
attn_scores = np.array(attn_scores).astype(np.float)
max_score = np.max(attn_scores)
th = 0.33 * max_score
# th = 0.3
pre_s = 0
while (pre_s < 127 and attn_scores[pre_s] < th):
pre_s += 1
pre_e = 127
while (pre_e > pre_s and attn_scores[pre_e] < th):
pre_e -= 1
pre_time = 1.0 * np.array((pre_s, pre_e)) / 127
gt_time = 1.0 * np.array((start, end)) / duration
# print(gt_time)
iou = cal_iou(pre_time, gt_time)
if iou < 0:
iou = 0
if iou >= 0.7:
iou_cnt += 1
cnt += 1
iou_sum += iou
return iou_cnt / cnt
def cal_grounding_accuracy(file):
f1 = open(file, 'r')
lines = f1.readlines()
iou_cnt = 0.0
cnt = 0.0
iou_sum = 0.0
for line in lines:
line = line.strip()
line = line.split(' ', 3)
# video_id = line[0]
start = float(line[0])
end = float(line[1])
duration = float(line[2])
attn_scores = line[3].split(' ')
attn_scores = np.array(attn_scores).astype(np.float)
max_score = np.max(attn_scores)
th = 0.33 * max_score
# th = 0.3
pre_s = 0
while (pre_s < 127 and attn_scores[pre_s] < th):
pre_s += 1
pre_e = 127
while (pre_e > pre_s and attn_scores[pre_e] < th):
pre_e -= 1
pre_time = 1.0 * np.array((pre_s, pre_e)) / 127
gt_time = 1.0 * np.array((start, end)) / duration
# print(gt_time)
iou = cal_iou(pre_time, gt_time)
if iou < 0:
iou = 0
if iou >= 0.5:
iou_cnt += 1
cnt += 1
iou_sum += iou
return iou_cnt / cnt
def is_retrieved_segment_correct(start, end, duration, attn_scores):
# For temporal localization task, we set the threshold gamma to 0.3
gamma = 0.33
pre_s = 0
max_score = np.max(attn_scores)
th = max_score * gamma
while (pre_s < 127 and attn_scores[pre_s] < th):
pre_s += 1
pre_e = 127
while (pre_e > pre_s and attn_scores[pre_e] < th):
pre_e -= 1
pre_time = 1.0 * np.array((pre_s, pre_e)) / 127
gt_time = 1.0 * np.array((start, end)) / duration
iou = cal_iou(pre_time, gt_time)
if iou > 0.5:
return True
return False
def is_retrieved_segment_correct_0_7(start, end, duration, attn_scores):
th = 0.33
pre_s = 0
while (pre_s < 127 and attn_scores[pre_s] < th):
pre_s += 1
pre_e = 127
while (pre_e > pre_s and attn_scores[pre_e] < th):
pre_e -= 1
pre_time = 1.0 * np.array((pre_s, pre_e)) / 127
gt_time = 1.0 * np.array((start, end)) / duration
iou = cal_iou(pre_time, gt_time)
if iou > 0.7:
return True
return False
# recall@k, Med r, Mean r for Text-to-Video Retrieval
def t2i(c2i, timestamps, durations, atten_scores, opt, vis_details=False, n_caption=5, topK=30, ):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
vis_details: if true, return a dictionary for ROC visualization purposes
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
assert c2i.shape[0] == timestamps.shape[0]
ranks = np.zeros(c2i.shape[0])
top1_cnt = 0
iou_sum = 0.0
iou_cnt_0_dot_3 = 0
iou_cnt_0_dot_5 = 0
iou_cnt_0_dot_7 = 0
iou_cnt_0_dot_9 = 0
topK_candidates = np.zeros((c2i.shape[0], topK)).astype(np.int)
f1 = open(os.path.join(opt.logger_name, 'rank1_s_t_and_duration1.txt'), 'w')
f10 = open(os.path.join(opt.logger_name, 'rank1_s_t_and_duration10.txt'), 'w')
f100 = open(os.path.join(opt.logger_name, 'rank1_s_t_and_duration100.txt'), 'w')
f_attention_scores = open(os.path.join(opt.logger_name, 'attention_scores.txt'), 'w')
acc_at_0_dot_5_1 = 0
acc_at_0_dot_5_10 = 0
acc_at_0_dot_5_100 = 0
acc_at_0_dot_5_1_total = 0
acc_at_0_dot_5_10_total = 0
acc_at_0_dot_5_100_total = 0
acc_0_5_fenzi = 0.0
acc_0_7_fenzi = 0.0
for i in range(len(ranks)):
# -----------------------------------------------------------------------------#
# Calculate acc@0.5 for every video
if is_retrieved_segment_correct(timestamps[i][0], timestamps[i][1], durations[i], atten_scores[i]):
acc_0_5_fenzi += 1
if is_retrieved_segment_correct_0_7(timestamps[i][0], timestamps[i][1], durations[i], atten_scores[i]):
acc_0_7_fenzi += 1
# -----------------------------------------------------------------------------#
d_i = c2i[i]
inds = np.argsort(d_i)
topK_candidates[i] = inds[:topK]
rank = np.where(inds == i / n_caption)[0][0]
f_attention_scores.write('{:f} {:f} {:f} '.format(timestamps[i][0], timestamps[i][1], durations[i]))
f_attention_scores.write(str(list(atten_scores[i])).replace('[', '').replace(']', '').replace(',', ''))
f_attention_scores.write('\n')
ranks[i] = rank
if (rank == 0):
top1_cnt += 1
# pre_seg_id = max_idx[i/n_caption]
# pre_seg_id = np.random.randint(7)
pre_seg_id = 0
iou = cal_iou_using_id(pre_seg_id, timestamps[i], durations[i])
f1.write('{:f} {:f} {:f} '.format(timestamps[i][0], timestamps[i][1], durations[i]))
f1.write(str(list(atten_scores[i])).replace('[', '').replace(']', '').replace(',', ''))
f1.write('\n')
if is_retrieved_segment_correct(timestamps[i][0], timestamps[i][1], durations[i], atten_scores[i]):
acc_at_0_dot_5_1 += 1
acc_at_0_dot_5_1_total += 1
iou_sum += iou
if iou >= 0.3:
iou_cnt_0_dot_3 += 1
if iou >= 0.5:
iou_cnt_0_dot_5 += 1
if iou >= 0.7:
iou_cnt_0_dot_7 += 1
if iou >= 0.9:
iou_cnt_0_dot_9 += 1
if (rank < 10):
f10.write('{:f} {:f} {:f} '.format(timestamps[i][0], timestamps[i][1], durations[i]))
f10.write(str(list(atten_scores[i])).replace('[', '').replace(']', '').replace(',', ''))
f10.write('\n')
if is_retrieved_segment_correct(timestamps[i][0], timestamps[i][1], durations[i], atten_scores[i]):
acc_at_0_dot_5_10 += 1
acc_at_0_dot_5_10_total += 1
if (rank < 100):
f100.write('{:f} {:f} {:f} '.format(timestamps[i][0], timestamps[i][1], durations[i]))
f100.write(str(list(atten_scores[i])).replace('[', '').replace(']', '').replace(',', ''))
f100.write('\n')
if is_retrieved_segment_correct(timestamps[i][0], timestamps[i][1], durations[i], atten_scores[i]):
acc_at_0_dot_5_100 += 1
acc_at_0_dot_5_100_total += 1
f1.close()
f10.close()
f100.close()
f_attention_scores.close()
acc_at_0_dot_5_1 = cal_grounding_accuracy(os.path.join(opt.logger_name, 'rank1_s_t_and_duration1.txt'))
acc_at_0_dot_5_10 = cal_grounding_accuracy(os.path.join(opt.logger_name, 'rank1_s_t_and_duration10.txt'))
acc_at_0_dot_5_100 = cal_grounding_accuracy(os.path.join(opt.logger_name, 'rank1_s_t_and_duration100.txt'))
acc_at_0_dot_7_1 = cal_grounding_accuracy_0_dot_7(os.path.join(opt.logger_name, 'rank1_s_t_and_duration1.txt'))
acc_at_0_dot_7_10 = cal_grounding_accuracy_0_dot_7(os.path.join(opt.logger_name, 'rank1_s_t_and_duration10.txt'))
acc_at_0_dot_7_100 = cal_grounding_accuracy_0_dot_7(os.path.join(opt.logger_name, 'rank1_s_t_and_duration100.txt'))
print(os.path.join(opt.logger_name, 'rank1_s_t_and_duration1.txt'))
# acc_at_0_dot_5_1 = 100.0 * acc_at_0_dot_5_1 / acc_at_0_dot_5_1_total
# acc_at_0_dot_5_10 = 100.0 * acc_at_0_dot_5_10 / acc_at_0_dot_5_10_total
# acc_at_0_dot_5_100 = 100.0 * acc_at_0_dot_5_100 / acc_at_0_dot_5_100_total
acc_at_0_dot_5_7 = [acc_at_0_dot_5_1, acc_at_0_dot_5_10, acc_at_0_dot_5_100, acc_at_0_dot_7_1, acc_at_0_dot_7_10,
acc_at_0_dot_7_100]
mean_iou = iou_sum / top1_cnt
acc_0_dot_3 = 1.0 * iou_cnt_0_dot_3 / top1_cnt
acc_0_dot_5 = 1.0 * iou_cnt_0_dot_5 / top1_cnt
acc_0_dot_7 = 1.0 * iou_cnt_0_dot_7 / top1_cnt
acc_0_dot_9 = 1.0 * iou_cnt_0_dot_9 / top1_cnt
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
r100 = 100.0 * len(np.where(ranks < 100)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return map(float, [r1, r10, r100, medr, meanr]), mean_iou, [acc_0_dot_3, acc_0_dot_5, acc_0_dot_7,
acc_0_dot_9], topK_candidates, acc_at_0_dot_5_7
# recall@k, Med r, Mean r for Text-to-Video Retrieval
def t2i_(c2i, vis_details=False, n_caption=5):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
vis_details: if true, return a dictionary for ROC visualization purposes
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
ranks = np.zeros(c2i.shape[0])
for i in range(len(ranks)):
d_i = c2i[i]
inds = np.argsort(d_i)
rank = np.where(inds == i / n_caption)[0][0]
ranks[i] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return map(float, [r1, r5, r10, medr, meanr])
# recall@k, Med r, Mean r for Video-to-Text Retrieval
def i2t(c2i, n_caption=5):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
# remove duplicate videos
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
ranks = np.zeros(c2i.shape[1])
for i in range(len(ranks)):
d_i = c2i[:, i]
inds = np.argsort(d_i)
rank = np.where(inds / n_caption == i)[0][0]
ranks[i] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return map(float, [r1, r5, r10, medr, meanr])
# mAP for Text-to-Video Retrieval
def t2i_map(c2i, n_caption=5):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
scorer = getScorer('AP')
perf_list = []
for i in range(c2i.shape[0]):
d_i = c2i[i, :]
labels = [0] * len(d_i)
labels[i / n_caption] = 1
sorted_labels = [labels[x] for x in np.argsort(d_i)]
current_score = scorer.score(sorted_labels)
perf_list.append(current_score)
return np.mean(perf_list)
# mAP for Video-to-Text Retrieval
def i2t_map(c2i, n_caption=5):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
scorer = getScorer('AP')
perf_list = []
for i in range(c2i.shape[1]):
d_i = c2i[:, i]
labels = [0] * len(d_i)
labels[i * n_caption:(i + 1) * n_caption] = [1] * n_caption
sorted_labels = [labels[x] for x in np.argsort(d_i)]
current_score = scorer.score(sorted_labels)
perf_list.append(current_score)
return np.mean(perf_list)
def t2i_inv_rank(c2i, n_caption=1):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
n_caption: number of captions of each image/video
"""
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[0])
for i in range(len(inv_ranks)):
d_i = c2i[i, :]
inds = np.argsort(d_i)
rank = np.where(inds == i / n_caption)[0]
inv_ranks[i] = sum(1.0 / (rank + 1))
return np.mean(inv_ranks)
def i2t_inv_rank(c2i, n_caption=1):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
n_caption: number of captions of each image/video
"""
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[1])
for i in range(len(inv_ranks)):
d_i = c2i[:, i]
inds = np.argsort(d_i)
rank = np.where(inds / n_caption == i)[0]
inv_ranks[i] = sum(1.0 / (rank + 1))
return np.mean(inv_ranks)
def i2t_inv_rank_multi(c2i, n_caption=2):
"""
Text->videos (Image Search)
c2i: (5N, N) matrix of caption to image errors
n_caption: number of captions of each image/video
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[1])
result = []
for i in range(n_caption):
idx = range(i, c2i.shape[0], n_caption)
sub_c2i = c2i[idx, :]
score = i2t_inv_rank(sub_c2i, n_caption=1)
result.append(score)
return result
| [
"532474454@qq.com"
] | 532474454@qq.com |
94bddabbc05ff1e6648d2572e2e87a47d4f9b64d | 1b5cc301d02cadb5977a94854122adb9db85f003 | /vowpalwabbitpreparation.py | 8e76402eb4d63d2e78f5ec8fbb537a532775b8ff | [] | no_license | malikabr/-Naive | 02f2bcb0b5459849fc6c162e94148997204abfc3 | 46b3035a927f744ea5605736a09443d0566f8f4c | refs/heads/master | 2020-03-14T21:43:18.042495 | 2018-06-11T05:37:10 | 2018-06-11T05:37:10 | 131,803,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | #this is for vowpal wabbit
from hazm import sent_tokenize
from hazm import Normalizer
def readData(fileName):
file = open(fileName, 'r')
data = file.read().split(u".")
sample_set = []
for sample in data:
if sample.__len__() > 1:
sample_set.append(sample)
return sample_set
file=open("result.txt","w")
#reading data
file1 = 'book1.txt'
file2 = "book2.txt"
sample_set1 = readData(file1)
sample_set2=readData(file2)
#Normalizind data
n = Normalizer()
for sample in sample_set1:
sample = n.normalize(sample)
n = Normalizer()
for sample2 in sample_set2:
sample2 = n.normalize(sample2)
#SENTENCE TOKENIZATION
all_sentences1 = []
for sample in sample_set1:
sentences1 = sent_tokenize(sample)
all_sentences1.extend(sentences1)
#print(all_sentences)
all_sentences2 = []
for sample in sample_set2:
sentences2 = sent_tokenize(sample)
all_sentences2.extend(sentences2)
size2=all_sentences2.__len__()
size1=all_sentences1.__len__()
if(size1> size2):
num = size2
elif(size1< size2):
num = size1
all_words = {}
cnt1 =1
cnt2 = 0
print(num)
for s in all_sentences1:
all_words[cnt1] = s
cnt1 += 2
if (cnt1 > 2*num):
break
for s in all_sentences2:
all_words[cnt2] = s
cnt2 += 2
if (cnt2 > 2*num):
break
for k , v in all_words.items():
m = divmod(k,2)
if m[1]==0:
file.write("-1" + '|' + v + "\n")
else:
file.write(str(m[1]) + '|' + v + "\n")
file.close()
| [
"noreply@github.com"
] | noreply@github.com |
269fda9caa8453ff95f35d91935e1d174fcbffc9 | 4bb93e693ffb9f808cb7751cbe9d490410680260 | /order/admin.py | 211e7e79514539685dc102c788b9612099ca5a83 | [] | no_license | Mamadaliyev/e-commerce | b2b5da23b7c7fdd346f7934a7ea5a97663eaaf4f | 07bfa7520874b8e68ea5473203ff7c280908d145 | refs/heads/main | 2023-04-11T19:59:36.683677 | 2021-05-06T20:29:37 | 2021-05-06T20:29:37 | 365,028,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.contrib import admin
from .models import Order, OrderItem
admin.site.register(Order)
| [
"shohruh@3i.ai"
] | shohruh@3i.ai |
3f6386e888c7ccdc34ad987e3f9354bf124474bd | 1bb3cdd898bb14dd5c58f7cd97efae38e635941e | /hw3/hw3-resources/Final/src/pagerank.py | 2ebc553a6bf4a3d4a1bc62fec4c3ffddb29fdcf3 | [] | no_license | lrmneves/mltextmining | 7704148105920240d3798c24e669f97bc3c81748 | 3e6d18bf404fd393744c77f52643be822930e87d | refs/heads/master | 2021-01-18T01:27:06.516957 | 2016-08-13T00:03:53 | 2016-08-13T00:03:53 | 51,187,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,817 | py | import os
import sys
from sklearn.preprocessing import normalize
from scipy.sparse import csc_matrix
from scipy import sparse
import numpy as np
import time
import warnings
import math
def get_topic_dict(path):
'''reads file on path and returns dict of dicts, with main key user, secondary key query and value = topic prob'''
topic_dict = {}
with open(path) as t_file:
lines = t_file.readlines()
for line in lines:
current = line.split(" ")
if not int(current[0]) in topic_dict:
topic_dict[int(current[0])] = {}
data = []
for i in range(2,len(current)):
data.append(float(current[i].split(":")[1]))
topic_dict[int(current[0])][int(current[1])] = np.array(data)
return topic_dict
def pagerank(r,M,p,alpha,beta,gamma, p0):
transposed_M = alpha*M.T
for it in range(10):
old_r = r
r = transposed_M.dot(old_r) + beta*p + gamma*p0
return r
def main():
path = ""
method = ""
alpha= 0.8
beta_factor = 0.2
beta = (1-alpha)*beta_factor
warnings.filterwarnings("ignore")
pr_weight = 1000
relevance_weight = 0.5
if len(sys.argv) > 1:
method = sys.argv[1]
if len(sys.argv) > 2:
path = sys.argv[2]
if len(sys.argv) > 3:
alpha = float(sys.argv[3])
beta_factor = float(sys.argv[4])
if len(sys.argv) > 5:
pr_weight = float(sys.argv[5])
relevance_weight = float(sys.argv[6])
if path == "":
beta_factor = 0.0
#initialize matrix M
with open("transition.txt","r") as transitions:
lines = transitions.readlines()
data = []
columns = []
rows = []
size = 0
for line in lines:
t = line.split(" ")
if len(t) > 1:
if int(t[0])-1 != int(t[1])-1:
rows.append(int(t[0])-1)
columns.append(int(t[1])-1)
if len(t) > 2:
data.append(int(t[2]))
else:
data.append(1)
if size < max(int(t[0]),int(t[1])):
size = max(int(t[0]),int(t[1]))
#adds diagonal
rows += range(size)
columns += range(size)
data += np.ones(size).tolist()
M = csc_matrix((data, (rows, columns)), shape=(size, size), dtype="float64")
M = normalize(M, norm='l1', axis=1)
with open("doc_topics.txt") as topics:
lines = topics.readlines()
topics_dict = {}
for line in lines:
current = line.split(" ")
if not int(current[1]) in topics_dict:
topics_dict[int(current[1])] = []
topics_dict[int(current[1])].append(int(current[0])-1)
tspr_dict = {}
for t in topics_dict:
rows = topics_dict[t]
columns = np.zeros(len(rows))
data = np.ones(len(rows))/len(rows)
tspr_dict[t] = csc_matrix((data, (rows, columns)), shape=(size,1))
#initialize variables
gamma = (1-alpha)*(1-beta_factor)
p0 = (1.0/size)*np.ones((size,1))
r_init = (1.0/size)*np.ones((size,1))
if path == "":
t_dict = get_topic_dict("query-topic-distro.txt")
else:
t_dict = get_topic_dict(path)
#this part assumes indri-lists folder is unzipped on this directory
#Gets the documents for each query and their relevance scores.
query_relevance_dict = {}
for root, dirs, files in os.walk("indri-lists"):
for f in files:
if not f.endswith(".results.txt"):
continue
query = f.split(".")[0]
query_relevance_dict[query] = {}
with open(os.path.join(root, f)) as current_file:
for line in current_file.readlines():
doc = line.split(" ")
query_relevance_dict[query][int(doc[2])] = float(doc[4])
s = time.time()
r_cache = {}
for t in topics_dict:
r_cache[t] = pagerank(r_init.copy(),M,tspr_dict[t],alpha,beta,gamma,p0.copy())
e = time.time()
base_time = e-s
num_queries = 0
if os.path.isfile("trec.txt"):
os.remove("trec.txt")
for u in t_dict:
for q in t_dict[u]:
s = time.time()
query = str(u) + "-" + str(q)
final_r = np.zeros((size,1))
for t in topics_dict:
term = (r_cache[t]*t_dict[u][q][t-1])
final_r = final_r + term
result = final_r.tolist()
if method != "":
for d in query_relevance_dict[query]:
if method == "WS":
result[d -1] = [pr_weight * result[d-1][0] + relevance_weight*query_relevance_dict[query][d]]
elif method == "CM":
result[d -1] = [-(pr_weight + relevance_weight)/(pr_weight/math.log(result[d-1][0]) \
+relevance_weight/(math.log(abs(query_relevance_dict[query][d]))))]
else:
break
ranking = sorted(range(len(result)),key=lambda x:result[x],reverse = True)
e = time.time()
base_time +=e-s
num_queries +=1
exp = "mltxt"
rank_value = 1
with open("trec.txt","a+") as trec:
for i in range(len(ranking)):
if ranking[i]+1 in query_relevance_dict[query]:
trec.write(str(query) +" Q0 " + str(ranking[i]+1) + " " + str(rank_value) + " " + \
'{:0.8f}'.format(result[ranking[i]][0]) + " " + exp +"\n")
rank_value+=1
print "Computation took " +str(base_time/num_queries) + " per query"
if __name__ == "__main__":
main()
| [
"lrmneves@poli.ufrj.br"
] | lrmneves@poli.ufrj.br |
6930cf4720bfea18dfb8f1d19af51e5c51bfd733 | ebefdbcd77452dbc9e27205972ca03040163673b | /laba/models.py | 6b67951afb7abc686e81ff1079fe3952eb07b008 | [] | no_license | vitalik-ez/cyber_laba | 8ce508b151a642c3c0877f21dd7aa5a9f75a45d3 | 3c544a9543ffb101399b48a7e8f1772ae27293c6 | refs/heads/master | 2023-01-19T06:53:17.704890 | 2020-12-01T08:24:41 | 2020-12-01T08:24:41 | 294,933,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | from django.db import models
from django.core.validators import MinValueValidator
from django.core.validators import MaxValueValidator
from django.utils.safestring import mark_safe
class electricalAppliances(models.Model):
name = models.CharField(max_length=30, verbose_name = "Назва")
power = models.IntegerField(validators=[MinValueValidator(1)], verbose_name = "Потужність (Вт)")
duration = models.IntegerField(validators=[MinValueValidator(1)], default=50, verbose_name = "Тривалість роботи (хв)")
def validate_file_extension(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1] # [0] returns path+filename
valid_extensions = ['.xlsx', '.xls']
if not ext.lower() in valid_extensions:
raise ValidationError('Unsupported file extension.')
class Windmills(models.Model):
name = models.CharField(max_length=30, verbose_name = "Назва")
energy_character = models.FileField(upload_to='energy_characteristic/', null=True, verbose_name = "Енергетична характеристика ВЕУ (добавте файл у форматі xls)", validators=[validate_file_extension])
#height = models.IntegerField(validators=[MinValueValidator(1)], verbose_name = "Варіанти поставки ВЕУ з різними висотами башти")
price_without_bashta = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(1000000)], verbose_name = "Вартість ВЕУ (без башти)")
#price_bashta = models.IntegerField(validators=[MinValueValidator(1)], verbose_name = "Вартість башти")
class Tower(models.Model):
height = models.IntegerField(verbose_name = "Висота башти", validators=[MinValueValidator(50), MaxValueValidator(300)])
price = models.IntegerField(verbose_name = "Вартість башти", validators=[MinValueValidator(1), MaxValueValidator(1000000)])
#windmills = models.ForeignKey(Windmills, on_delete=models.CASCADE)
windmills = models.IntegerField()
class TowerNew(models.Model):
height = models.IntegerField(verbose_name = "Висота new башти")
price = models.IntegerField(verbose_name = "Вартість new башти")
#windmills = models.ForeignKey(Windmills, on_delete=models.CASCADE)
windmills = models.IntegerField(verbose_name = "Який вітряк")
| [
"62476663+vitalik-ez@users.noreply.github.com"
] | 62476663+vitalik-ez@users.noreply.github.com |
2316953bd0d2aea006a49fd2d06ef8f20ef2c43a | dd8789c05f95e791d57574f43de71be56b6a6a70 | /assignments/cpu_simulation/components/fifo.py | 6843afab14b68b00e6b4038734c6e671b7cba9da | [] | no_license | saikiranreddy-nagulapally/5143-201-OpSys-Nagulapally | e5635ffc872f170837b425c1d962300c4cf76777 | c7b7bb1ef20ea64dd74a34850bf1c15d1a6ab7f6 | refs/heads/master | 2021-09-10T03:00:19.839131 | 2018-03-20T20:36:10 | 2018-03-20T20:36:10 | 79,628,285 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | #!/usr/bin/env python3
from sim_components import *
# === Class: FIFO===
class Fifo(list):
def __init__(self,init=[]):
"""***init***: Constructor for FIFO class.
- **Args:**
- init (list): A possible list of processes to be added to class.
- **Returns:**
- None
"""
self.Q = []
if len(init) > 0:
for i in init:
self.add(i)
def add(self,proc):
"""***add***: Add process to rear of queue.
- **Args:**
- proc (Process): Process to be added to queue
- **Returns:**
- None
"""
if not isinstance(proc,Process):
raise Exception("Queue requires items added to be of type 'Process'")
self.Q.append(proc)
def remove(self):
"""***remove***: Returns first item in the queue.
- **Args:**
- None
- **Returns:**
- (Process): Process removed from queue
"""
return self.Q.pop(0)
def empty(self):
"""***empty***: Boolean test if queue empty.
- **Args:**
- None
- **Returns:**
- (Bool): True if queue empty False otherwise.
"""
return len(self.Q) == 0
def first(self,key=None):
"""***first***: Copy of first item in the queue.
Returns a process that is a copy (reference to) the first
process in the queue without altering the queue. If a key
is present, it will return the value of that key.
- **Args:**
- key (string) : Key of value to be returned
- **Returns:**
- (mixed,Process): A copy of the first process in the queue, or a value
from the first process in the queue.
"""
if key is None:
return self.Q[0]
else:
return self.Q[0][key]
def last(self,key=None):
"""***last***: Copy of last item in the queue.
Returns a process that is a copy (reference to) the last
process in the queue without altering the queue. If a key
is present, it will return the value of that key.
- **Args:**
- key (string) : Key of value to be returned
- **Returns:**
- (mixed,Process): A copy of the last process in the queue, or a value
from the last process in the queue.
"""
if key is None:
return self.Q[-1]
else:
return self.Q[-1][key]
def __str__(self):
"""***str***: Visual dump of class state.
- **Args:**
- None
- **Returns:**
- None
"""
return my_str(self)
def __iter__(self):
"""***iter***: Creates an "iterator" so this class can act like a container class.
- **Args:**
- None
- **Yields:**
- A generator which can be used to iterate over.
"""
for elem in self.Q:
yield elem
if __name__=='__main__':
pass
| [
"saikiranreddy791@outlook.com"
] | saikiranreddy791@outlook.com |
ad58850b4e20e701d45ca9a0fef263371fb773a1 | 5dd44e9229d617d7f9ce82de3336e4b6ffcb08db | /2019/Day 07/Intcode.py | 7ccb3ef20b65b3fec5040451ba536a805baa4d24 | [] | no_license | roadsidegravel/advent-of-code | f69a6791132c710247789790d5ecde0a25e26209 | 4a7bc99adc4a352e6ac6b32100329a770de9d593 | refs/heads/main | 2023-05-05T00:21:13.460612 | 2021-06-03T10:50:01 | 2021-06-03T10:50:01 | 309,677,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,625 | py | class computer:
def __init__(self,intcode,automaticMode = False):
self.automaticMode = automaticMode
self.intcode = []
self.running = False
self.position = 0
self.inputs = []
self.outputs = []
self.log = []
for i in range(0,len(intcode)):
self.intcode.append(intcode[i])
#https://stackoverflow.com/questions/15189245/assigning-class-variable-as-default-value-to-class-method-argument
class modes:
def __init__(self, A,B,C):
self.modeFirstParam = C
self.modeSecondParam = B
self.modeThirdParam = A
class parametersClass:
def __init__(self,oneIntcode):
self.A = 0 #third parameter
self.B = 0 #second parameter
self.C = 0 #first parameter
self.D = 0 #opcode first digit
self.E = 0 #opcode second digit
arrayParam = self.buildParamArray(str(oneIntcode))
self.A = self.checkZeroOrOne(arrayParam[0], 'Parameter A')
self.B = self.checkZeroOrOne(arrayParam[1], 'Parameter B')
self.C = self.checkZeroOrOne(arrayParam[2], 'Parameter C')
self.D = arrayParam[3]
self.E = arrayParam[4]
def buildParamArray(self,intcodeString):
result = [0,0,0,0,0]
for i in range(1, len(intcodeString) + 1):
result[-i] = int(intcodeString[-i])
return result.copy()
def checkZeroOrOne(self,value,valueNameForErrorMessage):
if value == 0 or value == 1:
return value
else:
raise ValueError(f'{valueNameForErrorMessage} should be 1 or 0, not {value}')
def returnModes(self):
result = computer.modes(self.A,self.B,self.C)
return result
def run(self):
self.running = True
while self.running:
self.log.append(self.takeStep())
def takeStep(self, position = None):
if position is None:
position = self.position
#bounds limit check
if position < 0:
return self.exitMessage(f'position too low, {position}')
if position > len(self.intcode)-1:
return self.exitMessage(f'position too high, {position}')
currentParameters = self.parametersClass(self.intcode[position])
currentModes = currentParameters.returnModes()
opcode = int(currentParameters.D*10+currentParameters.E)
if opcode is 1:
return self.opcode1(position,currentModes)
elif opcode is 2:
return self.opcode2(position,currentModes)
elif opcode is 3:
return self.opcode3(position,currentModes)
elif opcode is 4:
return self.opcode4(position,currentModes)
elif opcode is 5:
return self.opcode5(position,currentModes)
elif opcode is 6:
return self.opcode6(position,currentModes)
elif opcode is 7:
return self.opcode7(position,currentModes)
elif opcode is 8:
return self.opcode8(position,currentModes)
elif opcode is 99:
return self.exitMessage('opcode 99')
else:
print(f'unknown opcode {opcode} encountered at position {position}')
return self.exitMessage(f'Uknown opcode encountered, {opcode} at position {position}')
def exitMessage(self,string):
self.running = False
result = string+', exiting'
return result
def getValueFromIntcodes(self,position,param):
if param == 0:
actualPosition = self.intcode[position]
result = self.intcode[actualPosition]
elif param == 1:
result = self.intcode[position]
else:
raise ValueError('parameter should be 0 or 1 and this should be caught earlier')
return result
def writeResultToIntcodes(self,position,param,result):
if param == 0:
actualPosition = self.intcode[position]
self.intcode[actualPosition] = result
elif param == 1:
raise ValueError('Parameters that an instruction writes to will never be in immediate mode.')
else:
raise ValueError('parameter should be 0 or 1 and this should be caught earlier')
def retrieveFirst(self,position, modes):
return self.getValueFromIntcodes(position + 1, modes.modeFirstParam)
def retrieveSecond(self,position, modes):
return self.getValueFromIntcodes(position + 2, modes.modeSecondParam)
def retrieveFirstAndSecond(self,position,modes):
first = self.retrieveFirst(position,modes)
second = self.retrieveSecond(position,modes)
return first, second
def writeToFirst(self,position, modes, result):
self.writeResultToIntcodes(position+1,modes.modeFirstParam,result)
def writeToThird(self,position, modes,result):
self.writeResultToIntcodes(position + 3, modes.modeThirdParam, result)
def opcode1(self,position,modes):
#adds
first, second = self.retrieveFirstAndSecond(position,modes)
result = first+second
self.writeToThird(position,modes,result)
self.position += 4
return f'opcode 1 at position {position} processed'
def opcode2(self,position,modes):
#multiplies
first, second = self.retrieveFirstAndSecond(position,modes)
result = first * second
self.writeToThird(position,modes,result)
self.position += 4
return f'opcode 2 at position {position} processed'
def opcode3(self,position,modes):
if len(self.inputs) > 0:
inputReceived = self.inputs[0]
self.inputs = self.inputs[1:]
else:
if self.automaticMode:
# wait for input
self.running = False
return f'pausing for input at position {position}'
else:
inputReceived = input('Enter an integer: ')
try:
intInputReceived = int(inputReceived)
except:
raise ValueError('I said, an integer.')
self.writeToFirst(position, modes, intInputReceived)
self.position += 2
return f'opcode 3 at position {position} processed'
def automaticModeTakeInputAndUnpauze(self,singleInput):
self.inputs.append(singleInput)
self.running = True
self.run()
def opcode4(self,position,modes):
#output
result = self.retrieveFirst(position,modes)
if not self.automaticMode:
print(result)
self.outputs.append(result)
self.position +=2
return f'opcode 4 at position {position} processed'
def opcode5(self, position, modes):
#jump if true
ZeroOrOther = self.retrieveFirst(position,modes)
if ZeroOrOther == 0:
self.position += 3
else:
distance = self.retrieveSecond(position,modes)
self.position = distance
return f'opcode 5 at position {position} processed'
def opcode6(self, position, modes):
#jump if false
ZeroOrOther = self.retrieveFirst(position,modes)
if ZeroOrOther == 0:
distance = self.retrieveSecond(position, modes)
self.position = distance
else:
self.position += 3
return f'opcode 6 at position {position} processed'
def opcode7(self,position, modes):
#less than
first, second = self.retrieveFirstAndSecond(position,modes)
if first < second:
result = 1
else:
result = 0
self.writeToThird(position,modes,result)
self.position +=4
return f'opcode 7 at position {position} processed'
def opcode8(self,position, modes):
#equals
first, second = self.retrieveFirstAndSecond(position,modes)
if first == second:
result = 1
else:
result = 0
self.writeToThird(position,modes,result)
self.position +=4
return f'opcode 8 at position {position} processed'
class amplifier:
def __init__(self,intcode,phaseSetting):
automaticMode = True
self.computer = computer(intcode,automaticMode)
self.takeInput(phaseSetting)
def takeInput(self,value):
self.computer.automaticModeTakeInputAndUnpauze(value)
def takeOutput(self):
return self.computer.outputs[-1]
class amplifiersInSeries:
def __init__(self,intcode,phaseSettings):
stringPhaseSettings = str(phaseSettings)
self.ampList = []
for i in range(0,len(stringPhaseSettings)):
self.ampList.append(amplifier(intcode,stringPhaseSettings[i]))
self.ampList[0].takeInput(0)
for i in range(1,len(stringPhaseSettings)):
self.ampList[i].takeInput(self.ampList[i-1].takeOutput())
def giveFinalOutput(self):
return self.ampList[-1].takeOutput()
def constructAmplifiersInSeriesFromFile(path,phaseSettings):
rawData = []
with open(path) as file:
rawData = file.readlines()
splitData = rawData[0].split(',')
cleanedData = [int(i) for i in splitData]
return amplifiersInSeries(cleanedData,phaseSettings)
class FindMaxThrusterSignalFromPath:
def __init__(self,path):
self.path = path
self.amplifierSettingsList = self.returnListOfValidPhaseSettings()
self.highestThrusterSignal = 0
self.findHighestThrusterSignal()
def returnListOfValidPhaseSettings(self):
result = []
numbers = ['0','1','2','3','4']
for i in range(0,len(numbers)):
first= numbers[i]
iRemoved = list(numbers)
iRemoved.remove(numbers[i])
for j in range(0,len(iRemoved)):
second = iRemoved[j]
ijRemoved = list(iRemoved)
ijRemoved.remove(iRemoved[j])
for k in range(0,len(ijRemoved)):
third = ijRemoved[k]
ijkRemoved = list(ijRemoved)
ijkRemoved.remove(ijRemoved[k])
for l in range(0,len(ijkRemoved)):
fourth = ijkRemoved[l]
ijklRemoved = list(ijkRemoved)
ijklRemoved.remove(ijkRemoved[l])
for m in range(0,len(ijklRemoved)):
fifth = ijklRemoved[m]
result.append(first+second+third+fourth+fifth)
return result
def findHighestThrusterSignal(self):
for ampSetting in self.amplifierSettingsList:
ampBank = constructAmplifiersInSeriesFromFile(self.path,ampSetting)
result = ampBank.giveFinalOutput()
if result > self.highestThrusterSignal:
self.highestThrusterSignal = result
class amplifiersInSeriesWithFeedbackLoop:
def __init__(self,intcode,phaseSettings):
stringPhaseSettings = str(phaseSettings)
self.ampList = []
for i in range(0,len(stringPhaseSettings)):
self.ampList.append(amplifier(intcode,stringPhaseSettings[i]))
self.goThroughAmpList(0)
def goThroughAmpList(self,firstInput):
self.ampList[0].takeInput(firstInput)
for i in range(1,len(self.ampList)):
self.ampList[i].takeInput(self.ampList[i-1].takeOutput())
lastLog = self.ampList[-1].computer.log[-1]
if lastLog != 'opcode 99, exiting':
self.goThroughAmpList(self.ampList[-1].takeOutput())
def giveFinalOutput(self):
return self.ampList[-1].takeOutput()
def constructAmplifiersInSeriesWithFeedbackLoopFromFile(path,phaseSettings):
rawData = []
with open(path) as file:
rawData = file.readlines()
splitData = rawData[0].split(',')
cleanedData = [int(i) for i in splitData]
return amplifiersInSeriesWithFeedbackLoop(cleanedData,phaseSettings)
class FindMaxThrusterFeedbackLoopSignalFromPath:
def __init__(self,path):
self.path = path
self.amplifierSettingsList = self.returnListOfValidPhaseSettings()
self.highestThrusterSignal = 0
self.findHighestThrusterSignal()
def returnListOfValidPhaseSettings(self):
result = []
numbers = ['5','6','7','8','9']
for i in range(0,len(numbers)):
first= numbers[i]
iRemoved = list(numbers)
iRemoved.remove(numbers[i])
for j in range(0,len(iRemoved)):
second = iRemoved[j]
ijRemoved = list(iRemoved)
ijRemoved.remove(iRemoved[j])
for k in range(0,len(ijRemoved)):
third = ijRemoved[k]
ijkRemoved = list(ijRemoved)
ijkRemoved.remove(ijRemoved[k])
for l in range(0,len(ijkRemoved)):
fourth = ijkRemoved[l]
ijklRemoved = list(ijkRemoved)
ijklRemoved.remove(ijkRemoved[l])
for m in range(0,len(ijklRemoved)):
fifth = ijklRemoved[m]
result.append(first+second+third+fourth+fifth)
return result
def findHighestThrusterSignal(self):
for ampSetting in self.amplifierSettingsList:
ampBank = constructAmplifiersInSeriesWithFeedbackLoopFromFile(self.path,ampSetting)
result = ampBank.giveFinalOutput()
if result > self.highestThrusterSignal:
self.highestThrusterSignal = result
| [
"github@roadsidegravel.com"
] | github@roadsidegravel.com |
4c1fac6ffc39bfa3667bc5a2ef3b71ca0e4f0283 | bee2af5228232ce94f418b61810cecd93af62615 | /movies/tests.py | a6adfa1859a67fd17757470bea1d839c9c970cc3 | [] | no_license | thuitafaith/djangoapp | b64c2e1a05c67b1135d4d9dd7975c17522238a69 | e06280b34a7b1ec012d0baab6f0fb153875a39b4 | refs/heads/master | 2022-12-11T19:06:08.540528 | 2019-08-29T12:36:45 | 2019-08-29T12:36:45 | 203,321,071 | 0 | 0 | null | 2022-11-22T04:13:07 | 2019-08-20T07:15:28 | Python | UTF-8 | Python | false | false | 1,600 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import Editor,Article,tags
import datetime as dt
# Create your tests here.
class EditorTestClass(TestCase):
# set up method
def setUp(self):
self.faith=Editor(first_name='faith',last_name='thuita',email='faith.thuita@moringaschool.com')
# testing instance
def test_instance(self):
self.assertTrue(isinstance(self.faith,Editor))
# testing save method
def test_save_method(self):
self.faith.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors)>0)
class ArticleTestClass(TestCase):
def setUp(self):
# creating a new editor and saving it
self.faith= Editor(first_name='faith',last_name='thuita',email='faith.thuita@moringaschool.com')
self.faith.save_editor()
# creating a new tag saving it
self.new_tag = tags(name='testing')
self.new_tag.save()
self.new_article = Article(title='Test Article',post= 'this is a random test post',editor=self.faith)
self.new_article.save()
self.new_article.tags.add(self.new_tag)
def tearDown(self):
Editor.objects.all().delete()
tags.objects.all().delete()
Article.objects.all().delete()
def test_get_news_today(self):
today_news = Article.todays_news()
self.assertTrue(len(today_news) > 0)
def test_get_news_by_date(self):
test_date = '2017-03-17'
date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
| [
"thuitamuthoni15@gmail.com"
] | thuitamuthoni15@gmail.com |
31cf540d38df645d0ccd8cb4b0a7c2089941a714 | 77abb27ac8e690d12b4c22e0f4da4c1cb3906042 | /MPIAA1/timer.py | a852f984c8d682515e03669db40d735f3370a959 | [] | no_license | zdarovagoshan/PythonLabs | 1e633f1195e91e7e0c2bd8c4e58e4228ccd2d9ea | 548dc99014e8c3b9a27638a9df3a376387470fbf | refs/heads/master | 2020-12-25T19:03:26.734717 | 2017-06-11T18:15:13 | 2017-06-11T18:15:13 | 94,021,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | import timeit
from DZ import read_file
def make_header(func_names):
return " ".join(["{0:<12}".format(s) for s in ["N"] + func_names])
def make_line(n, times):
return " ".join(["{0:<12}".format(n)] + ["{0:<12.5f}".format(t) for t in times])
def time_us(functions, ns, generator, repeats=int(1e6)):
"""Prints time table for given functions and inputs.
functions - dictionary of {func name: func(input)} - functions to time,
ns - list of n for which generate input,
generator - func(n) - input generation function,
repeats - number of times to call functions for each given input."""
generator()
keys = sorted(list(functions.keys()))
print(make_header(keys))
for n in ns:
data = read_file("records_1e{0}.txt".format(n))
times = []
for key in keys:
timer = timeit.Timer(lambda: functions[key](data))
times.append(timer.timeit(repeats))
print(make_line(n, times))
def time_me(func_name, function, ns, generator, repeats=int(1e6)):
"""Prints time table for given function and inputs.
function - func(input) - function to time,
ns - list of n for which generate input,
generator - func(n) - input generation function,
repeats - number of times to call function for each given input."""
time_us(functions={func_name: function}, ns=ns, generator=generator, repeats=repeats)
| [
"pm53.tyabin@gmail.com"
] | pm53.tyabin@gmail.com |
b6e2709e65f321665681d0977c916336fbd04565 | 4daee30fadd422c84c66cf52d9d41a792bf5df3b | /utils/email_util.py | 6ea54504d5cdaa654140a19bd363a13cbac645b9 | [] | no_license | lin14543/myspiders | d8d4789c9884be1ad925527d290672b6d42d32ce | 7ddb7536b07b75486b7bc4032c04b9a160eda11d | refs/heads/master | 2020-03-22T14:18:01.394577 | 2018-07-08T13:03:08 | 2018-07-08T13:03:08 | 140,168,011 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from smtplib import SMTP_SSL
from email.mime.text import MIMEText
from email.header import Header
from myspiders import settings
import logging
import traceback
import time, random
def send_email(subject, content, receivers):
message = MIMEText(content, 'html')
message['From'] = 'robot@lamudatech.com'
message['Subject'] = Header(subject, 'utf-8')
index = random.randint(0, len(settings.SENDER)-1)
try:
smtp = SMTP_SSL(settings.HOST_SERVER)
smtp.ehlo(settings.HOST_SERVER)
smtp.login(settings.SENDER[index], settings.PWD[index])
smtp.sendmail(settings.SENDER[index], list(receivers), message.as_string())
smtp.quit()
logging.info('success')
except:
logging.error(traceback.print_exc())
time.sleep(5)
| [
"lincc@lamudatech.com"
] | lincc@lamudatech.com |
f287244a91e88664b5d41777c7749b04894158ea | f4b16d247195621a5413aab56919b4e623b604b8 | /src/faimes/urban/dataimport/opinionmakers/settings.py | ed4e0587d66bb5c2e6bf895523ee08c2b6023e75 | [] | no_license | IMIO/faimes.urban.dataimport | cc1a7e3050538f409c29e3031a175e8d1a96c7db | 67fcaa14a5951df7cbaf64b59794aab0a2b88f7f | refs/heads/master | 2021-01-10T17:52:51.975421 | 2017-03-16T13:27:27 | 2017-03-16T13:27:27 | 52,949,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
from faimes.urban.dataimport.opinionmakers.importer import OpinionMakersImporter
from imio.urban.dataimport.access.settings import AccessImporterFromImportSettings
class OpinionMakersImporterFromImportSettings(AccessImporterFromImportSettings):
""" """
def __init__(self, settings_form, importer_class=OpinionMakersImporter):
"""
"""
super(OpinionMakersImporterFromImportSettings, self).__init__(settings_form, importer_class)
def get_importer_settings(self):
"""
Return the access file to read.
"""
settings = super(OpinionMakersImporterFromImportSettings, self).get_importer_settings()
access_settings = {
'db_name': 'Tab_Urba 97.mdb',
'table_name': 'CONSUL',
'key_column': 'Sigle',
}
settings.update(access_settings)
return settings
| [
"delcourt.simon@gmail.com"
] | delcourt.simon@gmail.com |
4d6dcd13140db07ae3bc200ca0a1ffe77c9a4cf5 | d247ae6374c46c6f5062fd7d2d89d7d3f851ac10 | /drf/treehouse-courses/ed_reviews/settings.py | cd80aac5ae401473e7af77fb0a7b6b8e490cf5ef | [] | no_license | hachtman/python | 99f221e40abb7df712d726617c31422274e77321 | a6b90c0effe972f4ba66e67a456ec36314c4d0ac | refs/heads/master | 2021-01-22T18:14:26.964396 | 2017-06-05T12:35:17 | 2017-06-05T12:35:17 | 85,068,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | """
Django settings for ed_reviews project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hr!k)elc@nc+fi@*i7n=sacsazvv)x)g^_y_1^$78t)vzg6ad('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'courses',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ed_reviews.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ed_reviews.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# REST Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES:': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
| [
"jackfllr1@googlemail.com"
] | jackfllr1@googlemail.com |
98c6aa7cdec4df7ab3d07d6bf68c3d49a7a7d59e | 57d411725b99581c4bcb3be582a95ac152c38794 | /frontend/models.py | 866c404cdd1c0e2eae23495c907bff180dfceea9 | [] | no_license | contail/moca_init | ea03e82121c90ae3272441bb93564f29cdaa69d5 | 94715473c9e7ef7124d6752f6ae36b461b9dcfaf | refs/heads/master | 2020-03-27T02:34:58.351896 | 2018-10-06T06:52:22 | 2018-10-06T06:52:22 | 145,801,542 | 0 | 0 | null | 2018-08-23T04:45:37 | 2018-08-23T04:45:36 | null | UTF-8 | Python | false | false | 293 | py | from django.db import models
# Create your models here.
class Settings(models.Model):
port = models.IntegerField(default=8000)
batch_size = models.IntegerField(default=5)
fps = models.IntegerField(default=10)
ip_address = models.CharField(default='127.0.0.1', max_length=120)
| [
"qkrtkdwls2121@gmail.com"
] | qkrtkdwls2121@gmail.com |
a535b3a240630adc3c7d8e08c3eb6d4ae7617dca | a1eb63e7cc5152833b61bc1f3976d1f0e9916f17 | /tokens/migrations/0001_initial.py | 64cab2948077363e68e3a34d9e0f51b740402875 | [] | no_license | life-iitk/Lifeiitkbackend | c9c08e926858d6b6841a6878d127cb134e17a435 | c8cec38d06b7207bf67c716a0eb8981d7e97ac48 | refs/heads/master | 2021-07-10T20:54:02.140467 | 2020-08-24T13:33:02 | 2020-08-24T16:35:53 | 189,474,089 | 5 | 50 | null | 2020-08-24T16:35:55 | 2019-05-30T19:57:02 | Python | UTF-8 | Python | false | false | 531 | py | # Generated by Django 2.2.3 on 2019-07-28 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('token', models.CharField(max_length=100, primary_key=True, serialize=False)),
('user', models.ManyToManyField(related_name='tokens', to='users.User')),
],
),
]
| [
"vikrantmalik051@gmail.com"
] | vikrantmalik051@gmail.com |
cd5c129f362552fb538d4924dff585e79d647985 | 64bc42781f9267da0c2d1042f4b3672809493941 | /tweet.py | 458e53845e9eb0e4ae4b249d1b57c8a6ca00ae03 | [] | no_license | katieamazing/pussybot | a5d47043d2a1087b2d8cd68dc9812673f18b7f53 | 6dfe06b2f83870976d09894cd524b18f85bb140d | refs/heads/master | 2021-01-16T18:32:22.630005 | 2017-08-13T04:23:12 | 2017-08-13T04:23:12 | 100,086,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | import imgs
import os, sys, codecs, yaml, random, requests
import nltk.data
from bs4 import BeautifulSoup
from io import BytesIO
from twython import Twython
def generate_tweet_text():
sys.stdout = codecs.getwriter("utf8")(sys.stdout)
sys.stderr = codecs.getwriter("utf8")(sys.stderr)
with open("C:\PussyBot\PUSSY AND HER LANGUAGE (Marvin R. Clark, 1895).html") as f:
body = f.read()
soup = BeautifulSoup(body, "lxml")
text = soup.get_text()
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
sentences = nltk.sent_tokenize(text)
possible = []
keywords = ("cat", "pussy", "kitten", "meow", "milk", "mouse", "play", "purr", "fish", "eyes", "sleep", "eat", "animal", "feline", "claw")
for s in sentences:
if len(s) > 50 and len(s) < 140 and any(keyword in s.lower() for keyword in keywords):
possible.append(s)
return random.choice(possible).replace(" ", " ")
tweet_image = random.choice(imgs.imgs)
print(tweet_image)
tweet_text = generate_tweet_text()
print(tweet_text)
with open("C:\PussyBot\config.yaml") as f:
config = yaml.load(f)
twitter_app_key = config["twitter_app_key"]
twitter_app_secret = config["twitter_app_secret"]
twitter_oauth_token = config["twitter_oauth_token"]
twitter_oauth_token_secret = config["twitter_oauth_token_secret"]
twitter = Twython(twitter_app_key, twitter_app_secret, twitter_oauth_token, twitter_oauth_token_secret)
res = requests.get(tweet_image)
image_io = BytesIO(res.content)
# Twitter upload, tweet
image_io.seek(0)
response = twitter.upload_media(media=image_io)
twitter.update_status(status=tweet_text, media_ids=[response['media_id']])
| [
"k.allen@live.com"
] | k.allen@live.com |
1360102d7dc0af95012af9cf8fd406fd35c2e3ff | e706bba03c86cee6d92ec16681037af6e748165f | /archive/bsoup-scrape-prod-name.py | 2f4103c28003787bc3ac64bd084aaff5587ce58f | [] | no_license | geekidharsh/tilt-python | f9642d846bc64ae452922035a1ac6051499e3e6c | 1dc00d37345b35dfa1441c9045f6fb2c36bf460b | refs/heads/master | 2023-04-28T11:57:04.802794 | 2023-04-26T00:45:07 | 2023-04-26T00:45:07 | 72,809,839 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | from bs4 import BeautifulSoup as bsoup
import requests as rq
import re
spring_2015 = ""
r = rq.get(spring_2015)
soup = bsoup(r.text)
classes_url_list = [c["href"] for c in soup.find_all("a", href=re.compile(r".*courses.cfm\?campId=1&termId=201501&subjId=.*"))]
print classes_url_list
with open("results.txt","wb") as acct:
for class_url in classes_url_list:
base_url = "http://my.gwu.edu/mod/pws/{}".format(class_url)
r = rq.get(base_url)
soup = bsoup(r.text)
# Use regex to isolate only the links of the page numbers, the one you click on.
page_count_links = soup.find_all("a",href=re.compile(r".*javascript:goToPage.*"))
try:
num_pages = int(page_count_links[-1].get_text())
except IndexError:
num_pages = 1
# Add 1 because Python range.
url_list = ["{}&pageNum={}".format(base_url, str(page)) for page in range(1, num_pages + 1)]
# Open the text file. Use with to save self from grief.
for url_ in url_list:
print "Processing {}...".format(url_)
r_new = rq.get(url_)
soup_new = bsoup(r_new.text)
for tr in soup_new.find_all('tr', align='center'):
stack = []
for td in tr.findAll('td'):
stack.append(td.text.replace('\n', '').replace('\t', '').strip())
acct.write(", ".join(stack) + '\n') | [
"harshvardhanpandey@hotmail.com"
] | harshvardhanpandey@hotmail.com |
3553a6b286d2ebd18a5c9155f8c68220b2a441e1 | 415de1c2d4599ee83519af334ee6f7238a099b7a | /devel/lib/python2.7/dist-packages/rbx1_nav/__init__.py | 5415f392c38a7ba6aa99300eb1e66e7817c72293 | [] | no_license | Mrsongpei/My-ros-Slam | 9ec93368edbcc0688955c539eb4bb54c00828728 | 3fc5de88f8bf617e9a295fe468594b155cb9b94b | refs/heads/master | 2020-06-30T06:12:43.756934 | 2018-07-25T09:50:30 | 2018-07-25T09:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/root/catkin_ws/src/rbx1/rbx1_nav/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"871452489@qq.com"
] | 871452489@qq.com |
9e20dbe06af05a8d7d2965f3a10c0d4cddb65dd4 | 6a928ba05fb0f0ff1c5fc2b16792299b8b0944d6 | /_Attic/btleclassifier_orig.py | 119176d090171d7c677fcde43c9a509d4e41efe9 | [
"MIT"
] | permissive | simsong/python-corebluetooth | 22b933c31f9aaaa614e76eafe6bfca679a50d626 | 29ef9e8d3e5ab8fd838fd254cd079449d8064441 | refs/heads/master | 2021-08-07T13:22:13.170086 | 2020-07-06T17:47:31 | 2020-07-06T17:47:31 | 197,768,612 | 3 | 3 | null | 2019-07-20T05:32:35 | 2019-07-19T12:25:17 | Python | UTF-8 | Python | false | false | 15,356 | py | # File: btleclassifier.py
# Author: Johannes K Becker <jkbecker@bu.edu>
# Date: 2019-01-29
# Last Modified Date: 2019-07-18
# Last Modified By: Johannes K Becker <jkbecker@bu.edu>
# Advertising Data Type (AD Type) Definitions here:
# https://www.bluetooth.com/specifications/assigned-numbers/generic-access-profile
#
# Data Type Value Data Type Name Reference for Definition
# 0x01 "Flags" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.3 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.3 and 18.1 (v4.0)Core Specification Supplement, Part A, section 1.3
# 0x02 "Incomplete List of 16-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x03 "Complete List of 16-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x04 "Incomplete List of 32-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, section 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x05 "Complete List of 32-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, section 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x06 "Incomplete List of 128-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x07 "Complete List of 128-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x08 "Shortened Local Name" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.2 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.2 and 18.4 (v4.0)Core Specification Supplement, Part A, section 1.2
# 0x09 "Complete Local Name" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.2 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.2 and 18.4 (v4.0)Core Specification Supplement, Part A, section 1.2
# 0x0A "Tx Power Level" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.5 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.3 (v4.0)Core Specification Supplement, Part A, section 1.5
# 0x0D "Class of Device" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)Core Specification Supplement, Part A, section 1.6
# 0x0E "Simple Pairing Hash C" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)
# 0x0E "Simple Pairing Hash C-192" Core Specification Supplement, Part A, section 1.6
# 0x0F "Simple Pairing Randomizer R" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)
# 0x0F "Simple Pairing Randomizer R-192" Core Specification Supplement, Part A, section 1.6
# 0x10 "Device ID" Device ID Profile v1.3 or later
# 0x10 "Security Manager TK Value" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.7 and 18.6 (v4.0)Core Specification Supplement, Part A, section 1.8
# 0x11 "Security Manager Out of Band Flags" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.6 and 18.7 (v4.0)Core Specification Supplement, Part A, section 1.7
# 0x12 "Slave Connection Interval Range" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.8 and 18.8 (v4.0)Core Specification Supplement, Part A, section 1.9
# 0x14 "List of 16-bit Service Solicitation UUIDs" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.9 and 18.9 (v4.0)Core Specification Supplement, Part A, section 1.10
# 0x15 "List of 128-bit Service Solicitation UUIDs" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.9 and 18.9 (v4.0)Core Specification Supplement, Part A, section 1.10
# 0x16 "Service Data" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.10 and 18.10 (v4.0)
# 0x16 "Service Data - 16-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x17 "Public Target Address" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.13
# 0x18 "Random Target Address" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.14
# 0x19 "Appearance" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.12
# 0x1A "Advertising Interval" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.15
# 0x1B "LE Bluetooth Device Address" Core Specification Supplement, Part A, section 1.16
# 0x1C "LE Role" Core Specification Supplement, Part A, section 1.17
# 0x1D "Simple Pairing Hash C-256" Core Specification Supplement, Part A, section 1.6
# 0x1E "Simple Pairing Randomizer R-256" Core Specification Supplement, Part A, section 1.6
# 0x1F "List of 32-bit Service Solicitation UUIDs" Core Specification Supplement, Part A, section 1.10
# 0x20 "Service Data - 32-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x21 "Service Data - 128-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x22 "LE Secure Connections Confirmation Value" Core Specification Supplement Part A, Section 1.6
# 0x23 "LE Secure Connections Random Value" Core Specification Supplement Part A, Section 1.6
# 0x24 "URI" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.18
# 0x25 "Indoor Positioning" Indoor Posiioning Service v1.0 or later
# 0x26 "Transport Discovery Data" Transport Discovery Service v1.0 or later
# 0x27 "LE Supported Features" Core Specification Supplement, Part A, Section 1.19
# 0x28 "Channel Map Update Indication" Core Specification Supplement, Part A, Section 1.20
# 0x29 "PB-ADV" Mesh Profile Specification Section 5.2.1
# 0x2A "Mesh Message" Mesh Profile Specification Section 3.3.1
# 0x2B "Mesh Beacon" Mesh Profile Specification Section 3.9
# 0x3D "3D Information Data" 3D Synchronization Profile, v1.0 or later
# 0xFF "Manufacturer Specific Data" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.4 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.4 and 18.11 (v4.0)Core Specification Supplement, Part A, section 1.4
class BTLEAdvIDToken(object):
def __init__(self, token_key):
self.type = str(token_key)
self.parser = BTLEAdvIDToken.tokens[self.type]['parser']
self.pattern = BTLEAdvIDToken.tokens[self.type]['pattern']
self.tokens = BTLEAdvIDToken.tokens[self.type]['tokens']
@classmethod
def get_matched_tokens(cls, data):
for vendor in BTLEAdvIDToken.tokens.keys():
token = BTLEAdvIDToken(vendor)
if token.pattern in data['raw']:
return token
return None
tokens = {
'Apple': { 'parser': 'parse_token_apple', 'pattern': "ff4c00", 'tokens': ["handoff", "nearby"] },
'Microsoft': { 'parser': 'parse_token_microsoft', 'pattern': "ff0600", 'tokens': ["msdata"] }
}
# @classmethod
# def parse_token_apple(cls, data):
# result = {}
# id_tokens = ['handoff', 'nearby']
# if 'manufacturer-specific' in data.keys() \
# and isinstance(data['manufacturer-specific'], dict):
# for t in id_tokens:
# if t in data['manufacturer-specific'].keys() \
# and isinstance(data['manufacturer-specific'][t], str):
# result[t] = data['manufacturer-specific'][t]
# else:
# result[t] = None
# return result
# @classmethod
# def parse_token_microsoft(cls, data):
# print "Parsing Microsoft", data
# return []
# @classmethod
# def get_token_type(cls, data):
# return
class BTLEAdvClassifier(object):
@classmethod
def parse_data(cls, adv_data):
d = {}
d["raw"] = adv_data
while adv_data:
ad_len = int(adv_data[:2], 16)
ad_str = adv_data[2:2+2*ad_len]
d = cls.parse_ad_structure(d, ad_str)
adv_data = adv_data[2+2*ad_len:]
return d
@classmethod
def parse_ad_structure(cls, d, ad_str):
try:
ad_type = int(ad_str[:2], 16)
ad_data = ad_str[2:]
if ad_type == 0x01:
d["flags"] = cls.parse_ad_type_0x01(ad_data)
elif ad_type == 0x11:
d["sec-mg-oob-flags"] = cls.parse_ad_type_0x11(ad_data)
elif ad_type == 0x16:
d["service-data"] = cls.parse_ad_type_0x16(ad_data)
elif ad_type == 0xff:
d["manufacturer-specific"] = cls.parse_ad_type_0xff(ad_data)
else:
d["unknown"] = (ad_type, ad_data)
except ValueError:
return d
return d
@classmethod
def parse_ad_type_0x01(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.1: Flags
"""
ad_data = int(data, 16)
ad_flags = []
if ad_data & 0x01<<0:
ad_flags.append("'LE Limited Discoverable Mode'")
if ad_data & 0x01<<1:
ad_flags.append("'LE General Discoverable Mode'")
if ad_data & 0x01<<2:
ad_flags.append("'BR/EDR Not Supported (i.e. bit 37 of LMP Extended Feature bits Page 0)'")
if ad_data & 0x01<<3:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Controller) (i.e. bit 49 of LMP Extended Feature bits Page 0)'")
if ad_data & 0x01<<4:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Host) (i.e. bit 66 of LMP Extended Feature bits Page 1)'")
return ad_flags
@classmethod
def parse_ad_type_0x11(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.7: Security Manager OOB Flags
"""
ad_data = int(data, 16)
ad_flags = []
if ad_data & 0x01<<0:
ad_flags.append("'OOB data present'")
else:
ad_flags.append("'OOB data not present'")
if ad_data & 0x01<<1:
ad_flags.append("'LE supported (Host) (i.e. bit 65 of LMP Extended Feature bits Page 1'")
if ad_data & 0x01<<2:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Host) (i.e. bit 66 of LMP Extended Fea- ture bits Page 1)'")
if ad_data & 0x01<<3:
ad_flags.append("'Address Type: Random Address'")
else:
ad_flags.append("'Address Type: Public Address'")
return ad_flags
@classmethod
def parse_ad_type_0x16(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.10: Service Data
and GATT Services list https://www.bluetooth.com/specifications/gatt/services
"""
service_uuid = int(data[2:4]+data[:2], 16) # First 2 octets contain the 16 bit service UUID, flip bytes around
service_data = data[4:] # additional service data
return (service_uuid, service_data)
apple_data_types = {
'02': 'ibeacon',
'05': 'airdrop',
'07': 'airpods',
'08': '(unknown)',
'09': 'airplay_dest',
'0a': 'airplay_src',
'0c': 'handoff',
'10': 'nearby',
}
@classmethod
def parse_ad_type_0xff(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.11: Manufacturer Specific Data
and Company Identifier List: https://www.bluetooth.com/specifications/assigned-numbers/company-identifiers
"""
company_id = int(data[2:4]+data[:2], 16) # First 2 octets contain the 16 bit service UUID, flip bytes around
man_specific_data = data[4:] # additional service data
d = {}
d["company_id"] = company_id
d["raw"] = man_specific_data
if company_id == 0x0006:
d["company_name"] = "Microsoft"
elif company_id == 0x004c:
d["company_name"] = "Apple"
# iBeacon: see format @ https://support.kontakt.io/hc/en-gb/articles/201492492-iBeacon-advertising-packet-structure
d["ibeacon"] = (0x1502 == int(man_specific_data[2:4]+man_specific_data[:2], 16))
while man_specific_data:
if man_specific_data[:2] in cls.apple_data_types:
apple_type = cls.apple_data_types[man_specific_data[:2]]
else:
apple_type = '(unknown)'
apple_len = int(man_specific_data[2:4], 16)
apple_data = man_specific_data[4:4+2*apple_len]
d[apple_type] = apple_data
man_specific_data = man_specific_data[4+2*apple_len:]
#print "###", data, apple_type, apple_len, apple_data, man_specific_data
return d
if __name__ == "__main__":
def print_r(d, level=0):
for k,v in d.items():
if isinstance(v, dict):
print(level*"\t" + k + ":")
print_r(v,level+1)
else:
print(level*"\t" + "%s: %s" % (k, v) )
example_data = ["02011a1aff4c000c0e00750f812422021c3e213d190f3310050b1c6d9072",
"02011a0aff4c0010050b1c6d9072"
]
print("Hi, this is just a demo:")
for data in example_data:
print("Parsing %s" % data)
print_r(BTLEAdvClassifier.parse_data(data), 1)
| [
"simsong@acm.org"
] | simsong@acm.org |
3dc81a6a9e61dc7e240f23f7ae0703a15b0455b2 | d2b6fe6f482e9eb829d8875a326667195e2db6df | /144. Binary Tree Preorder Traversal.py | 0315f22da880535683dcb2f98518aa7984ee2602 | [] | no_license | liuilin610/Leetcode-everyday | 74d09d990de6ed093375bbc5228d0f3975567260 | ad1e627a280f8aa00ba121cb7dd7a8c867949419 | refs/heads/main | 2023-09-03T14:29:08.492533 | 2021-11-09T17:32:33 | 2021-11-09T17:32:33 | 346,599,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorder(self, root: TreeNode) -> List[int]:
# III
if not root: return []
res, stack = [], []
cur = root
while cur or stack:
while cur:
res.append(cur.val)
stack.append(cur)
cur = cur.left
tmp = stack.pop()
cur = tmp.right
return res
# II dfs
def dfs(node):
if not node:
return []
res.append(node.val)
dfs(node.left)
dfs(node.right)
res =[]
dfs(root)
return res
"""
'''
# I general
if not root:
return []
return [root.val] + self.preorder(root.left) + self.preorder(root.right)
''' | [
"cuteflouer610@gmail.com"
] | cuteflouer610@gmail.com |
e2e3ca90948d895866c56a11adbffc570ac7234b | d165d6101255bfe064e8340cf49b70583a822d17 | /scripts/ZionNationalPark_PythonGeneratedMap_PartA.py | fc08e24302cb1488ca0f171f259bbdb158b88a60 | [] | no_license | bouchardgis/bouchardgis.github.io | 570250cab1f7906e6ff3188ee53b14b371b3d301 | fc0bdc1359b9c7efbb70b91834641d50c6572af8 | refs/heads/master | 2020-03-21T16:41:47.026772 | 2019-09-18T18:46:02 | 2019-09-18T18:46:02 | 138,787,278 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,499 | py | # Copyright: (c) David Enterprises Inc.
# ArcGIS Version: 10.3
# Python Version: 2.7
#
# Course: GEOS 455 - Assignment 3: Advanced Python
#
# Purpose: This script takes input features that may be impacted by impacting features (input),
# and outputs a feature class including the input features that would be impacted within
# a buffer of the impacting features.
#
#
# References: Python 2.7 Documentation (https://docs.python.org/2/)
# ArcPy Documentation (http://desktop.arcgis.com/en/arcmap/10.3/analyze/arcpy/what-is-arcpy-.htm)
# ArcPy Template for Geoprocessing (https://blogs.esri.com/esri/arcgis/2011/08/04/pythontemplate/)
# Extra help with Update Cursor use from:
# http://gis.stackexchange.com/questions/130588/python-script-to-add-fields-to-feature-classes
#--------------------------------------------------------------------------------------------------------------
import os
import sys
import arcpy
def do_partA(workspace, roads, research_areas, buffer_distance, output_feature):
"""This function takes input roads, buffers an area around them, selects the intersecting research areas and outputs them, and their area in acres"""
try:
# =- INPUTS ------------------------------------------------------------
#Get user parameters - these are retrieved as inputs to the function:
#Workspace - to save a bunch of issues; scratch will be set here as well
#Roads feature class (impacting features)
#Research areas feature class (features of concern)
#Output feature class (impacted features)
#Buffer distance (default 200m)
# = Inputs should be retrieved
# =- INITIALIZE LOCAL VARIABLES & ENV SETTINGS ----------------------------------------
# - Local Vars
output_buffer = workspace + os.sep + 'output_buffer'
# - Environment Settings
arcpy.env.workspace = workspace
arcpy.env.scratchworkspace = workspace
# =- Execute Buffer (200m)
arcpy.Buffer_analysis (roads, output_buffer, buffer_distance, 'FULL', 'ROUND', 'ALL', "", 'PLANAR')
arcpy.AddMessage('Road Buffer created')
# =- Create Feature Layer of buffer
# - Initialize Output Layers
buffer_layer = output_buffer + '_layer'
research_areas_layer = research_areas + '_layer'
# - Make Feature layer for roads buffer
arcpy.MakeFeatureLayer_management (output_buffer, buffer_layer)
# - Make Feature layer for research areas
arcpy.MakeFeatureLayer_management (research_areas, research_areas_layer)
arcpy.AddMessage('Layers Created')
# =-Select by location: whatever features fall within buffer
# - Initialize parameters
overlap_type = 'INTERSECT'
select_features = buffer_layer
search_distance = ''
selection_type = 'NEW_SELECTION'
invert_spatial_relationship = 'NOT_INVERT'
# - Execute Select Layer By Location
arcpy.SelectLayerByLocation_management (research_areas_layer, overlap_type, select_features, search_distance, selection_type, invert_spatial_relationship)
# =- Output feature class from selected features
arcpy.CopyFeatures_management (research_areas_layer, output_feature)
# =- Use an Update Cursor to determine the AREA of IMPACTED FEATURES (convert to acres, or just use AREA field)
arcpy.AddField_management(output_feature, "Area_Acres", "DOUBLE")
with arcpy.da.UpdateCursor(output_feature, ["Area_Acres", "AREA"]) as cursor:
for row in cursor:
row[0] = row[1]/4046.86
cursor.updateRow(row)
# =- Cleanup
del output_buffer
pass
except arcpy.ExecuteError:
print arcpy.GetMessages(2)
except Exception as e:
print e.args[0]
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Arguments are optional
argv = tuple(arcpy.GetParameterAsText(i)
for i in range(arcpy.GetArgumentCount()))
do_partA(*argv)
| [
"noreply@github.com"
] | noreply@github.com |
b06ae35fd72d776cb2929e6e2b86c58112bae2f2 | 937c3ee407884edde0cdf550c8d35f75d17448d5 | /module767.py | 6c619df90d7fb5d4496e7fcb225528cdbff292e2 | [] | no_license | Sudha2/PYTHON_PROGRAMMING | f3e8bb3929a73dedaa545245c9062b5b810cf27e | 2440ac5391856418e5b84b70cd4ac5bfcbd8fb4a | refs/heads/master | 2021-05-08T16:29:37.523314 | 2018-04-18T16:09:29 | 2018-04-18T16:09:29 | 120,160,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #-------------------------------------------------------------------------------
# Name: module7
# Purpose:
#
# Author: ELCOT
#
# Created: 12-02-2018
# Copyright: (c) ELCOT 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
mynam=6
n1=1
n2=2
count=0
if mynam<=0:
print("plse enter a positive no")
elif mynam==1:
print("Fibbanoci series upto",mynam)
print(n1)
else:
print("Fibbanoci series upto",mynam)
while count < mynam:
print(n1,end=' ')
nth = n1 + n2
n1 = n2
n2 = nth
count += 1
| [
"noreply@github.com"
] | noreply@github.com |
f80ecc94c57fd9432be06df60dcd3b9146ef817b | f879fdba21b882c447a9f0311f9e00b4264ab1d4 | /hack.py | 922f474568993ac4f6f66b4ab846e549f89f909e | [] | no_license | kimstars/python | 0047a1da9f86c10b1c0892aeee66ac24251c34f1 | b031fe577b4dd8ae274f72c17aa8a1063f8bccea | refs/heads/master | 2023-03-10T01:28:23.874620 | 2021-02-20T08:23:11 | 2021-02-20T08:23:11 | 340,601,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
import re
path = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(path)
url = "https://10fastfingers.com/typing-test/english"
driver.get(url)
price = driver.find_element_by_xpath("//div[@id='wordlist']")
price_content = price.get_attribute('innerHTML')
listsend = price_content.split("|")
print(listsend)
for ele in listsend:
notice=driver.find_element_by_id("inputfield")
notice.send_keys(ele)
notice.send_keys(" ")
time.sleep(0.01)
print("done")
# cuối cùng thì t cũng xong rùi ^^ | [
"buikika9@gmail.com"
] | buikika9@gmail.com |
5eb6a1f21d9fc32d24281c689b6ff9a9054d8bef | db6fb7d752e1f6c502797eaa1ffcde15fd433a8b | /sibi/xmlrpc_server.py | ea6f880cf30cef625e8f423a1e08d8b99eee10ef | [] | no_license | manuelfedele/sibi | 9a4233acb1f1c3a8da6888ecb58e6f7ece2920f7 | 2c8f6f1b62a69cefca3fe596c2d80469a7fd0a7b | refs/heads/master | 2023-04-06T19:02:03.988897 | 2023-03-28T08:08:20 | 2023-03-28T08:08:20 | 319,912,697 | 1 | 0 | null | 2023-03-28T08:07:50 | 2020-12-09T09:55:30 | Python | UTF-8 | Python | false | false | 7,110 | py | import sys
from typing import List
from twisted.internet import reactor
from twisted.web import xmlrpc
from twisted.web.client import HTTPConnectionPool, Agent
from sibi.ibapi.common import BarData
from sibi.ibapi.contract import Contract, ComboLeg
from sibi.ibapi.order import Order
class XMLRPCServer(xmlrpc.XMLRPC):
def __init__(self, factory):
super(XMLRPCServer, self).__init__()
pool = HTTPConnectionPool(reactor, persistent=True)
pool.maxPersistentPerHost = 10
self.agent = Agent(reactor, pool=pool)
self.factory = factory
xmlrpc.addIntrospection(self)
def xmlrpc_reqContractDetails(
self,
symbol: str = "",
secType: str = "STK",
currency: str = "USD",
exchange: str = "SMART",
lastTradeDateOrContractMonth: str = "",
strike: str = "",
right: str = "",
):
"""This returns information about a contract's conID, symbol, local symbol, currency, etc.
reqContractDetails takes as an argument a Contract object which may uniquely match one contract,
and unlike other API functions it can also take a Contract object which matches multiple contracts
in IB's database. When there are multiple matches, a list is returned
Args:
symbol (str): IB symbol (eg. SPY, AAPL, DAX)
secType (str): The type of security (eg. IND, STK, OPT)
currency (str): The currency of the security (eg. EUR, USD)
exchange (str): The exchange (eg SMART, CBOE)
lastTradeDateOrContractMonth (str): This is a date for OPTIONS (eg. 20210104) or a month for FUTURES (eg. 202103)
strike (str): Strike price for options
right (str): Right for options (eg. C or CALL, P or PUT)
"""
contract = Contract()
contract.symbol = symbol
contract.secType = secType
contract.currency = currency
contract.exchange = exchange
contract.lastTradeDateOrContractMonth = lastTradeDateOrContractMonth
contract.strike = strike
contract.right = right
contract.includeExpired = False
result = self.factory.reqContractDetails(contract)
return result
def xmlrpc_reqHistoricalData(
self,
symbol: str = "",
secType: str = "",
currency: str = "",
exchange: str = "",
whatToShow: str = "MIDPOINT",
durationStr: str = "1 M",
barSizeSetting: str = "1 day",
endDateTime: str = "",
useRTH: int = 1,
formatDate: int = 1,
keepUpToDate: bool = False,
chartOptions=None,
) -> List[BarData]:
if chartOptions is None:
chartOptions = []
contract = Contract()
contract.symbol = symbol
contract.secType = secType
contract.currency = currency
contract.exchange = exchange
result = self.factory.reqHistoricalData(
contract,
endDateTime,
durationStr,
barSizeSetting,
whatToShow,
useRTH,
formatDate,
keepUpToDate,
chartOptions,
)
return result
def xmlrpc_reqMktData(
self,
conId: str = 0,
symbol: str = "",
secType: str = "",
currency: str = "",
exchange: str = "",
lastTradeDateOrContractMonth: str = "",
strike: float = 0.0,
right: str = "",
):
contract = Contract()
contract.conId = conId
contract.symbol = symbol
contract.secType = secType
contract.currency = currency
contract.exchange = exchange
contract.lastTradeDateOrContractMonth = lastTradeDateOrContractMonth
contract.strike = strike
contract.right = right
contract.includeExpired = False
result = self.factory.reqMktData(contract)
return result
def xmlrpc_cancelMktData(self, tickerId: int = -1):
"""Cancels market data subscriptions.
Args:
tickerId (int): The ticker ID which was specified in original market data request.
Cancelling a subscription allows the user to make a subscription to a different contract and remain
within the level 1 market data lines allowance.
"""
result = self.factory.cancelMktData(tickerId)
return result
def xmlrpc_placeOrder(
self,
symbol: str = "",
secType: str = "STK",
currency: str = "USD",
exchange: str = "SMART",
lastTradeDateOrContractMonth: str = "",
strike: str = "",
right: str = "",
orderType: str = "MKT",
limitPrice: float = sys.float_info.max,
totalQuantity: int = 1,
action: str = "BUY",
comboLegs=None,
allOrNone: bool = True,
):
"""This procedure places an Order if a valid contract and a valid order are provided
Args:
symbol (str): IB symbol (eg. SPY, AAPL, DAX)
secType (str): The type of security (eg. IND, STK, OPT)
currency (str): The currency of the security (eg. EUR, USD)
exchange (str): The exchange (eg SMART, CBOE)
lastTradeDateOrContractMonth (str): This is a date for OPTIONS (eg. 20210104) or a month for FUTURES (eg. 202103)
strike (str): Strike price for options
right (str): Right for options (eg. C or CALL, P or PUT)
orderType (str): Order's typlogy (eg. MKT, LMT)
limitPrice (float): A limit price provided if LMT order
totalQuantity (int): Quantity to buy
action (int): Order's action (BUY/SELL)
comboLegs (list): If provided, indentifies this order as a Combo order
allOrNone (bool): Indicates whether or not all the order has to be filled on a single execution.
"""
contract = Contract()
contract.symbol = symbol
contract.secType = secType
contract.currency = currency
contract.exchange = exchange
if lastTradeDateOrContractMonth:
contract.lastTradeDateOrContractMonth = lastTradeDateOrContractMonth
if strike:
contract.strike = strike
if right:
contract.right = right
contract.comboLegs = []
if comboLegs and secType == "BAG":
for comboLeg in comboLegs:
leg = ComboLeg()
for key, value in comboLeg.items():
setattr(leg, key, value)
contract.comboLegs.append(leg)
order = Order()
order.action = action
order.orderType = orderType
order.totalQuantity = totalQuantity
order.lmtPrice = float(limitPrice)
order.allOrNone = allOrNone
order.smartComboRoutingParams = []
result = self.factory.placeOrder(contract, order)
return result
def xmlrpc_cancelOrder(self, orderId: int):
result = self.factory.cancelOrder(orderId)
return result
| [
"manuelfedele@gmail.com"
] | manuelfedele@gmail.com |
5cd856ff87fdf738a92a6989085967db6be3856d | 11327a3f091476dea9807cba12cf0954a06adbd2 | /Raspberry Pi/predictor.py | d3fb59a3f46ea7a1488ebd893355500fa93339d4 | [] | no_license | maxned/TrailPi | 5e654c5533314192451af482f4741cea1ed803aa | e86bfe88c3e60c55da4c9e007c250b9f709a234f | refs/heads/master | 2022-12-26T11:57:58.175038 | 2019-06-27T18:05:05 | 2019-06-27T18:15:03 | 172,150,794 | 1 | 0 | null | 2022-12-08T05:16:31 | 2019-02-23T00:03:07 | Python | UTF-8 | Python | false | false | 425 | py | import sys
import os
from PIL import Image
from classifier.classifier import Classifier
cnn = Classifier(json_file = 'classifier/model.json', weights_file = 'classifier/model.h5')
def predict_animal(image_path):
assert os.path.exists(image_path), "Image not found at: "+ str(image_path)
animal, accuracy = cnn.predict_animal(image_path)
if animal:
return accuracy
else:
return 1.0-accuracy | [
"jaleman@ucdavis.edu"
] | jaleman@ucdavis.edu |
20ea9a92f7b5fe7c268b0e7ca25ecb734ff78d94 | fa5e6de7ee7e6c4dbe255021e77f4acc771fa648 | /code/src/plan2scene/utils/tile_util.py | fa50e32b996ee3f80fd4ce0cf655ef692d07f1f6 | [
"MIT"
] | permissive | blurgyy/plan2scene | 85d54ed3c0c10be4dfb2a811ea2335319ead4347 | 3470e1ed3ed24bfeddb71c58fa233d40662fa777 | refs/heads/main | 2023-05-30T19:06:21.284884 | 2021-06-18T01:40:36 | 2021-06-18T01:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | import shutil
import subprocess
import os
import os.path as osp
import uuid
from PIL import Image
import tempfile
import subprocess
from plan2scene.utils.io import load_image
def tile_image(image: Image.Image, embark_texture_synthesis_path: str, seam_mask_path: str):
"""
Correct seams of a texture so it can be tiled
:param image: Texture to be seam corrected
:param embark_texture_synthesis_path: Path to texture-synthesis project.
:param seam_mask_path: Path to texture-synthesis/imgs/masks/1_tile.jpg
:return: Seam corrected image
"""
assert isinstance(image, Image.Image)
try:
temp_location = tempfile.mkdtemp()
prefix = uuid.uuid4().hex
script_path = osp.abspath(embark_texture_synthesis_path)
script_dir_path = osp.dirname(script_path)
image.save(osp.join(temp_location, prefix + "_to_tile.png"))
command = "%s --inpaint %s --out-size %d --tiling -o %s generate %s" % (
script_path, osp.abspath(seam_mask_path),
image.width,
osp.join(temp_location, prefix + "_tiled.png"),
osp.join(temp_location, prefix + "_to_tile.png"),
)
assert subprocess.call(command, shell=True) == 0
tiled_image = load_image(osp.join(temp_location, prefix + "_tiled.png"))
return tiled_image
finally:
shutil.rmtree(temp_location)
| [
"mvidanap@sfu.ca"
] | mvidanap@sfu.ca |
6e99e2b0f6f8111ffd361e92f4944fcb6aba5698 | 5e79fbddf94e91041bc33012d12c36087486a15c | /website-django-css-emb/hyperspacecss/hyperspacecss/wsgi.py | 67ff6dac4ad7a13ce016c241cefbf704692b1cc2 | [] | no_license | Benni1908/DjaDr | cab26caf6fe46d83c3c9041a84ca681020df6037 | a965153f939d71ca64f21d995314706c97b8628c | refs/heads/master | 2020-03-17T14:23:47.778856 | 2018-07-22T18:01:21 | 2018-07-22T18:01:21 | 133,670,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for hyperspacecss project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hyperspacecss.settings")
application = get_wsgi_application()
| [
"promotionmts@gmail.com"
] | promotionmts@gmail.com |
33fb168c8e8a9206598e9a287663cb9dc7e661e6 | f975be6fd6edafeb9f71d7f0c3e0ce6b114b0edc | /media.py | 325bb5c2e426458be812c1da277be485db4c3b23 | [
"MIT"
] | permissive | arushidoshi/Movie_trailer_website | 4a3a297040a68e9dc95987c9ae2d10a2e5d77b65 | 2bbb4c0625eaf8218475b115ec71f947e8e2cb16 | refs/heads/master | 2021-01-19T05:33:18.959052 | 2016-08-04T07:00:25 | 2016-08-04T07:00:25 | 64,906,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import webbrowser
class Movie():
"""This class defines the structure of storing movie related information"""
# the init fuction is a constructor
def __init__(self, movie_title, movie_summary, movie_poster, movie_trailer):
self.title = movie_title
self.storyline = movie_summary
self.poster = movie_poster
self.youtube_trailer = movie_trailer
# to open trailer in a web browser window
def show_trailer(self):
webbrowser.open(self.youtube_trailer)
| [
"Jasmin@Jasmins-MacBook-Pro.local"
] | Jasmin@Jasmins-MacBook-Pro.local |
ae0fe073b6e2e089c816b4165d5b733b816e64e9 | 435e08e159e08219138922b5059aa7955cbf28b6 | /Sýnidæmi/Synidaemi5.py | a664fd178dabf806cd0af41aea1076aa498a3409 | [] | no_license | haflidijr/All-assignments | 8d244470a05accffda7872ba31b725c17552e1da | 4072734463f88e3d9b86497e40fa8df739b5980c | refs/heads/master | 2022-12-11T02:50:05.751151 | 2020-09-10T13:59:09 | 2020-09-10T13:59:09 | 293,494,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py |
a = int(input("Sladu inn tolu "))
# profid ad sla inn 1, 2 eda 3
print("Hun verdur borin saman vid breytuna b sem hefur gildid 2.")
b = 2
if (a < b):
print("a er minni en b")
elif (a == b):
print("a er jafnstor og b")
else:
print("a er staerri en b")
# talvan athugar fyrst hvort a se minni en b, ef svo er ekki ta utilokar hun tann moguleika
# og heldur afram ad athuga skilyrdi (eins og a < b, eda a == b) tangad til hun finnur eitthvad
# sem er satt. Ef hun finnur ekkert satt skilyrdi ta endar hun a ad velja blokkina sem fylgir
# else lykilordinu.
| [
"haflidih20@ru.is"
] | haflidih20@ru.is |
df77e03b76e88dc1ac42b30692eea87c5c6da7f5 | 6eb670743e3fffa8ad967550e23603626c84a4f6 | /projects/__init__.py | 1754665870ef8cfd4f42edc3b5f11af37c2e388c | [
"MIT"
] | permissive | johnopana/App-Repo | 53c27cb4ce271564d4e557fc257603c5e64f4fa1 | 03bad2ba4d74b1bfcc01940bac2b270270f12b7e | refs/heads/master | 2021-09-10T04:51:30.730363 | 2020-02-21T08:34:11 | 2020-02-21T08:34:11 | 240,487,597 | 0 | 0 | MIT | 2021-09-08T01:40:43 | 2020-02-14T10:53:25 | Python | UTF-8 | Python | false | false | 1,677 | py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(default='', upload_to='images/')),
('description', models.CharField(max_length=200)),
('date_posted', models.DateTimeField(auto_now=True)),
('link', models.URLField(max_length=250)),
('country', models.CharField(max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(blank=True, default='profiles/a.jpg', upload_to='profiles/')),
('bio', models.CharField(default='Welcome to you bio', max_length=100)),
('contact', models.CharField(max_length=80)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"johnopana2016@gmail.com"
] | johnopana2016@gmail.com |
06d163a2fe5ead35d5e572263a70fde2496f201a | 745197407e81606718c4cdbedb6a81b5e8edf50b | /tests/texttest/TestSelf/TestData/GUI/CopyTestPermission/TargetApp/printpermissions.py | 982669999d14a181bf22034492a6efd8f0066ec8 | [] | no_license | dineshkummarc/texttest-3.22 | 5b986c4f6cc11fd553dab173c7f2e90590e7fcf0 | 85c3d3627082cdc5860d9a8468687acb499a7293 | refs/heads/master | 2021-01-23T20:44:35.653866 | 2012-06-25T07:52:13 | 2012-06-25T07:52:13 | 4,779,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python
import os
if os.name == "posix":
os.system("fake_executable.py 2> /dev/null")
else:
os.system("fake_executable.py 2> nul")
| [
"dineshkummarc@gmail.com"
] | dineshkummarc@gmail.com |
c429802a9089f13c1454fc1561fb824738bee9ed | 35a2c7e6a01dc7f75116519e4521880416f2a9f2 | /tag/migrations/0002_value.py | 2824ada9ec86e752a45838963178796c90938761 | [] | no_license | engrogerio/edr-rest | ae977857d86aab3ef5b40e4d2be2e24abda97cb9 | a1115a1cd80c0531a85545681b0d3a70b97c529e | refs/heads/master | 2021-01-12T06:35:47.059448 | 2016-12-26T16:20:14 | 2016-12-26T16:20:14 | 77,392,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-07 15:42
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('form', '0002_auto_20160907_1542'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tag', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Value',
fields=[
('created_when', models.DateTimeField(default=datetime.datetime.now)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('numeric', models.DecimalField(decimal_places=10, max_digits=20)),
('text', models.CharField(max_length=1000)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('inspection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='form.Inspection')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tag.Tag')),
],
options={
'abstract': False,
},
),
]
| [
"eng.rogerio@gmail.com"
] | eng.rogerio@gmail.com |
f573772161ac4cbca9f9e470290039cbabc79bb9 | 71a75d3ee8c19a3f0f7c781d5fc4a0b374914d28 | /__moven.py | a3f1c60b93480ee896a4b536b8d083c25e5dd464 | [] | no_license | calciumdave/DMV | 36f95e3eea30aca12b33645b45c9e151cec082fd | 666b0199bd602b0e898e1135486cec9dba4fab15 | refs/heads/master | 2016-08-11T18:52:43.477871 | 2016-01-06T04:39:39 | 2016-01-06T04:39:39 | 44,949,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | import datetime
from decimal import Decimal
from operator import add
def str2date(s):
''' convert string s of format u'2014-04-10' to datetime.date format '''
return datetime.date(*map(int, s.split('-')))
def datefields(d):
'''extract year, month, day, week information in a list'''
return [d.year, d.month, d.day, d.weekday()+1]
def spendMeter(rdd, day0 = None, day1 = None, category=None, subcategory=None, period = 'Day'):
''' calculate the current moven spend meter from an rdd (spack database) rdd0 is the spark data base, each element in the format of [u'2014-04-10', u'Account Transfers', u'Transfer', u'1.74', u'ODP TRANSFER FROM SAVINGS 000002', u'Credit'], category is the category, if none will summarize over all categories, subcategory follow the same. the function returns day-spending over the period of time; day0 and day1 are starting and ending dates, in datetime.date format, period reserved for further.'''
if category or category == '': ## empty field exist
rdd = rdd.filter(lambda x: x[1] == category)
if subcategory or subcategory == '': ## empty subcategory field, not none
rdd = rdd.filter(lambda x:x[2] == subcategory)
## convert to date, use the key
rdd = rdd.map(lambda x: (str2date(x[0]) , Decimal(x[3]) ) )
if day0:
rdd = rdd.filter(lambda x: x[0]>=day0)
if day1:
rdd = rdd.filter(lambda x: x[0]<=day1)
D = {'year':0, 'month':1, 'day':2, 'week':3}
Nfield = D.get(period.lower(),1)
## convert to month unit if asked by user
if Nfield == 1: ## monthly spending is studied
rdd = rdd.map(lambda x: (datetime.date(x.year, x.month, 1), x[1]) )
## sum up spending per every day or per every month, no year option yer
rdd = rdd.reduceByKey(add)
## calculate average per day, per weekday, or per month over period of time
rdd1 = rdd.map(lambda x: (datefields(x[0])[Nfield], (1, x[1])))
result = rdd1.reduceByKey(lambda x,y: (x[0]+y[0], x[1]+y[1])).map(lambda x: (x[0], x[1][1]/x[1][0]).collect()
## sort by day, weekday, or month (Monday = 0)
s_result = sorted(result, key=lambda x:x[0])
## calculate accumulative spending in a month, week, or year
for i in range(1,len(s_result)):
s_result[i] = (s_result[i][0], s_result[i][1]+s_result[i-1][1])
return s_result
| [
"dahu0002@hotmail.com"
] | dahu0002@hotmail.com |
41966c4c82d82d656d5fa42250f7a8267dfc0855 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_solomon_wzs_coin_jam2.py | dd210cf970f87c5ec2c5810a2df187cfd1dd819d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/python2
import math
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 113,
193, 241, 257, 337, 353, 401, 433, 449, 577, 593, 641,
673, 769, 881, 929, 977, 1009, 1153, 1201, 1217, 1249,
1297, 1361, 1409, 1489, 1553, 1601, 1697, 1777, 1873,
1889, 2017, 2081, 2113, 2129, 2161, 2273, 2417, 2593,
2609, 2657, 2689, 2753, 2801, 2833, 2897, 3041, 3089,
3121, 3137, 3169, 3217, 3313, 3329, 3361, 3457, 3617,
3697, 3761, 3793, 3889, 4001, 4049, 4129, 4177, 4241,
4273, 4289, 4337, 4481, 4513, 4561, 4657, 4673, 4721,
4801, 4817, 4993, 5009, 5153, 5233, 5281, 5297, 5393,
5441, 5521, 5569, 5857, 5953, 6113, 6257, 6337, 6353,
6449, 6481, 6529, 6577, 6673, 6689, 6737, 6833, 6961,
6977, 7057, 7121, 7297, 7393, 7457, 7489, 7537, 7649,
7681, 7793, 7841, 7873, 7937, 8017, 8081, 8161, 8209,
8273, 8353, 8369, 8513, 8609, 8641, 8689, 8737, 8753,
8849, 8929, 9041, 9137, 9281, 9377, 9473, 9521, 9601,
9649, 9697, 9857]
def montgomery(n, p, m):
r = n % m
k = 1
while p > 1:
if p & 1 != 0:
k = (k * r) % m
r = (r * r) % m
p /= 2
return (r * k) % m
def is_prime(n):
if n < 2:
return False
for i in xrange(len(prime_list)):
if n % prime_list[i] == 0 or montgomery(prime_list[i], n - 1, n) != 1:
return False
return True
def f(n, j):
res = ""
for x in xrange(int("1%s1" % ("0" * (n - 2)), 2),
int("1%s1" % ("1" * (n - 2)), 2) + 1,
2):
s = bin(x)[2:]
ok = True
for i in xrange(2, 11, 1):
n = int(s, i)
if is_prime(n):
ok = False
break
if ok:
l = [0] * 9
for i in xrange(2, 11, 1):
n = int(s, i)
ok = False
for k in xrange(2, min(int(math.sqrt(n)), 1000000)):
if n % k == 0:
ok = True
l[i - 2] = str(k)
break
if not ok:
break
if ok:
res += "%s %s\n" % (s, " ".join(l))
j -= 1
if j == 0:
return res[0:len(res)-1]
import sys
fd = open(sys.argv[1], "rb")
t = int(fd.readline().strip())
for i in xrange(1, t + 1):
line = fd.readline().strip()
arr = line.split(" ")
n = int(arr[0])
j = int(arr[1])
res = f(n, j)
print "Case #%d:\n%s" % (i, res)
fd.close()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
4a18de0d5e899b7c2bf6a7a4ae5221920142a355 | 13d6eae7251e043754495b2462ea7f38327f608d | /py3.4/lib/python3.4/site-packages/bit_array/bit_array.py | 3e5aa24657a0f141840abd76d06cd619d2df279e | [] | no_license | sumitbsn/impala-connect | 09972170164deba843165f987d3145c37581d596 | 4adba01184beae609e173bc68ac761cf1f451622 | refs/heads/master | 2020-12-13T11:15:51.378126 | 2020-01-16T19:59:33 | 2020-01-16T19:59:33 | 234,400,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | '''
Copyright (c) 2016 Bryant Moscon - bmoscon@gmail.com
See LICENSE file for the terms and conditions
associated with this software.
'''
import array
from math import ceil
class BitArray(object):
def __init__(self, bits):
# L = 4 bytes minimum, but could be 8 bytes. No need to waste all that space
# so lets look up the size of L on this machine
self.elem_size = array.array('L').itemsize * 8
self.elem_mask = self.elem_size - 1
if isinstance(bits, list):
data = list(bits)
bits = len(data)
else:
data = None
size = int(ceil(float(bits) / float(self.elem_size)))
self.size = bits
self.bits = array.array('L', (0,) * size)
if data is not None:
index = 0
for i in xrange(0, len(data), self.elem_size):
self.bits[index] = int("".join(str(x) for x in data[i:i+self.elem_size])[::-1],2)
index += 1
def __len__(self):
return self.size
def __str__(self):
pad = self.size % self.elem_size
arr_len = len(self.bits)
return '0b' + ''.join([bin(self.bits[i])[2:].zfill(pad)[::-1] for i in range(arr_len)])
def __repr__(self):
return self.__str__()
def __setitem__(self, index, value):
if index > self.size:
raise IndexError
array_index = index >> self.elem_size
bit_position = index & self.elem_mask
mask = 1 << bit_position
if value:
self.bits[array_index] |= mask
else:
mask = ~mask
self.bits[array_index] &= mask
def __getitem__(self, index):
if isinstance(index, slice):
return BitArray([self[x] for x in range(*index.indices(len(self)))])
else:
if index >= self.size:
raise IndexError
array_index = index >> self.elem_size
bit_position = index & self.elem_mask
mask = 1 << bit_position
return 1 if self.bits[array_index] & mask else 0
| [
"sumit@edge1.valhalla.phdata.io"
] | sumit@edge1.valhalla.phdata.io |
fd7686f5c303dc6016ae5eca1939a310a1c8e67d | 643872cfa5fbbab4ff80de7030d1ffa308292a74 | /pyddcurves/utils.py | 255d27302d618fca57d85175bb50e81d87154129 | [
"BSD-2-Clause"
] | permissive | mrayson/pyddcurves | e34cf7729a2f98bd2095829a035f9fbc43e052ef | 38e42491dbac7d84d83c1598d78b411e458bd4a9 | refs/heads/master | 2023-07-09T11:45:28.220236 | 2021-08-10T09:25:57 | 2021-08-10T09:25:57 | 352,568,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,156 | py | import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from datetime import datetime
import h5py
from scipy import stats
from . import models
# Output function
# Convert the trace and observed data to a hdf5 object
def bhm_6_to_h5(outfile, trace, obsdata, use_bhm=True, nparams=6):
nsamples = len(trace)
nt = obsdata['n_times']
# Convert the trace to a numpy array
beta_samples = np.zeros((nparams,nt,nsamples))
mu_beta_samples = np.zeros((nparams, nsamples))
params = ['beta_0','beta_1','beta_2','beta_3','beta_4','beta_5']
for ii in range(nparams):
beta_samples[ii,...] = trace[params[ii]][0:nsamples,:].T
#mu_beta_samples[ii,...] = trace['mu_beta_%d'%ii][0:nsamples].T
if use_bhm:
# Convert the mean samples
mu_beta_samples[0,...] = trace['mu_beta_0'][0:nsamples].T
mu_beta_samples[1,...] = trace['mu_beta_1'][0:nsamples].T
mu_beta_samples[3,...] = trace['mu_beta_3'][0:nsamples].T
mu_beta_samples[5,...] = trace['mu_beta_5'][0:nsamples].T
mu_beta_samples[2,...] = trace['mu_beta_mid'][0:nsamples,0].T
mu_beta_samples[4,...] = trace['mu_beta_mid'][0:nsamples,1].T
###
# Save to hdf5
f = h5py.File(outfile,'w')
f['beta_samples'] = beta_samples
if use_bhm:
f['mu_beta_samples'] = mu_beta_samples
# Save all of the observed data into its own group
g = f.create_group('data')
for kk in obsdata.keys():
g[kk] = obsdata[kk]
print('Saved to %s with contents:'%outfile)
print(f.name)
for name in f:
print('\t',name)
print(g.name)
for name in g:
print('\t',name)
f.close()
def bhm_harmonic_to_h5(outfile, trace, obsdata, omega):
nparams=6
nsamples = len(trace)
nt = obsdata['n_times']
# Convert the trace to a numpy array
beta_samples = np.zeros((nparams,nt,nsamples))
aa_samples = np.zeros((nparams, nsamples))
# Convert the samples
beta_samples[0,...] = trace['beta_0'][0:nsamples].T
beta_samples[1,...] = trace['beta_1'][0:nsamples].T
beta_samples[3,...] = trace['beta_3'][0:nsamples].T
beta_samples[5,...] = trace['beta_5'][0:nsamples].T
beta_samples[2,...] = trace['beta_mid'][0:nsamples,:,0].T
beta_samples[4,...] = trace['beta_mid'][0:nsamples,:,1].T
# Order the mean of the harmonics
aa_samples[0,...] = trace['aa'][0:nsamples,0].T
aa_samples[1,...] = trace['aa'][0:nsamples,1].T
aa_samples[2,...] = trace['aa_mid'][0:nsamples,0].T
aa_samples[3,...] = trace['aa'][0:nsamples,2].T
aa_samples[4,...] = trace['aa_mid'][0:nsamples,1].T
aa_samples[5,...] = trace['aa'][0:nsamples,3].T
###
# Save to hdf5
f = h5py.File(outfile,'w')
f['beta_samples'] = beta_samples
# Save the other variables
f['omega'] = np.array(omega)
f['aa'] = aa_samples
f['Aa'] = trace['Aa'][0:nsamples,...]
f['Ba'] = trace['Ba'][0:nsamples,...]
f['sigma_beta'] = trace['sigma_beta']
f['sigma_curve'] = trace['sigma_curve']
# Save all of the observed data into its own group
g = f.create_group('data')
for kk in obsdata.keys():
g[kk] = obsdata[kk]
print('Saved to %s with contents:'%outfile)
print(f.name)
for name in f:
print('\t',name)
print(g.name)
for name in g:
print('\t',name)
f.close()
#############
# Beta prediction utilities
#############
def harmonic_beta_np(aa, Aa, Ba, omega, tdays):
nomega = len(omega)
amp = np.ones_like(tdays)[...,None] * aa[None,...]
for ii in range(nomega):
amp += Aa[...,ii]*np.cos(omega[ii]*tdays[...,None]) + Ba[...,ii]*np.sin(omega[ii]*tdays[...,None])
return amp
def truncnorm(lower, upper, mu, sigma):
return stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
def beta_prediction(infile, time, outfile=None, scaled=False):
"""
Create a prediction of beta's from harmonic MCMC hdf5 file
Set outfile to save as hdf5
Returns: beta_samples
"""
# Read in the harmonic h5 file
f = h5py.File(infile,'r')
# Load the harmonic variables and time data variables
omega = f['omega'][:]
aa = f['aa'][:]
Aa = f['Aa'][:]
Ba = f['Ba'][:]
sigma_beta = f['sigma_beta'][:]
# Save all of the observed data into its own group
data = f['/data']
z_std = np.array(data['z_std'])
rho_std = np.array(data['rho_std'])
rho_mu = np.array(data['rho_mu'])
print(z_std,np.array(z_std))
f.close()
# Convert the time to days since 2000-1-1
dt = time- np.datetime64('2000-01-01')
tdays = dt.view('int')*1e-9/86400
# print(aa.shape, Ba.shape)
nsamples = aa.shape[-1]
nt = tdays.shape[0]
# Do a prediction on each beta parameter
mean_samples = np.zeros((6,nt,nsamples))
beta_samples = np.zeros((6,nt,nsamples))
#noise_betas = np.random.normal(scale=sigma_beta[0:nsamples,:], size=(nt,nsamples,6))#.swapaxes(0,1)
for ii in range(6):
mean_samples[ii,...] = harmonic_beta_np(aa[ii,0:nsamples], Aa[0:nsamples,:,ii], Ba[0:nsamples,:,ii], omega, tdays)
for nn in range(nsamples):
if ii in [0,1]:
lower = -1e5
else:
lower = 0
beta_samples[ii,:,nn] = truncnorm(lower,1e5, mean_samples[ii,:,nn], sigma_beta[nn,ii]*np.ones((nt,)) ).rvs(nt)
# Scale the results
if scaled:
beta_samples[0,...] *= rho_std
beta_samples[0,...] += rho_mu
beta_samples[1,...] *= rho_std
beta_samples[2::,...] *= z_std
z_std = 1.
rho_std = 1.
rho_mu = 0.
if outfile is not None:
# Save the results
fout = h5py.File(outfile,'w')
fout['beta_samples'] = beta_samples
# Save all of the observed data into its own group
g = fout.create_group('data')
g['z_std'] = z_std
g['rho_std'] = rho_std
g['rho_mu'] = rho_mu
g['time'] = time.view(int)
fout.close()
print(outfile)
return beta_samples
# Input conversion function
def density_to_obsdict(rho, z2d, time64, ntimeavg, z_std, rho_mu, rho_std):
"""
Convert the density/depth/time data to a dictionary
"""
nt,nz = rho.shape
nanidx = ~np.isnan(rho)
# Create an array for the time ubdex
timeidx = np.arange(0,nt, )[:,np.newaxis] * np.ones_like(rho)
# Transform the time so that multiple obs. profiles share the same time idx
timeidx /= ntimeavg
timevec = np.floor(timeidx[nanidx]).astype(int) #+ 1
ntidx = timevec.max() +1
data = {
'N':nanidx.sum(),
'n_times':ntidx,
'rho':(rho[nanidx]-rho_mu)/rho_std,
'z':z2d[nanidx]/z_std,
'timeidx':timevec,
'rho_std':rho_std,
'rho_mu':rho_mu,
'z_std':z_std,
'time':time64[::ntimeavg].view('<i8') # datetime64 to integer
}
# Compute the time in days as well
dt = data['time'].view('<M8[ns]') - np.datetime64('2000-01-01')
data.update({'tdays':dt.view('int')*1e-9/86400})
return data
def merge_obs_dicts(d1,d2):
"""
Merge two observed data dictionaries
"""
N = d1['N']+d2['N']
nt1 = d1['n_times']
n_times = d1['n_times'] + d2['n_times']
# Copy the first data set over
dm = {}
for kk in d1.keys():
dm.update({kk:d1[kk]})
# Update a few keys
dm['N'] = N
dm['n_times'] = n_times
dm['rho'] = np.hstack([dm['rho'],d2['rho']])
dm['z'] = np.hstack([dm['z'],d2['z']])
dm['time'] = np.hstack([dm['time'],d2['time']])
dm['tdays'] = np.hstack([dm['tdays'],d2['tdays']])
# Important!! time index starts from the previous data set
dm['timeidx'] = np.hstack([dm['timeidx'],d2['timeidx']+nt1])
return dm
def plot_density_h5_step(h5file, tstep, samples = None, zmin=None):
"""
Plot a single time step from a hdf5 file
"""
with h5py.File(h5file,'r') as f:
data = f['/data']
z_std = data['z_std']
rho_std = data['rho_std']
rho_mu = data['rho_mu']
if zmin is None:
zmin = data['z'][:].min()*z_std
zout = np.linspace(zmin,0,100)
rhomean = np.zeros_like(zout).astype(float)
nparams, nt, nsamples = f['beta_samples'].shape
if samples is None:
samples = nsamples
plt.figure(figsize=(5,8))
beta = f['beta_samples'][:]
for rand_loc in np.random.randint(0, nsamples, samples):
rhotmp = models.double_tanh([beta[ii,tstep,rand_loc] for ii in range(6)], zout/z_std)
plt.plot(rhotmp*rho_std+rho_mu, zout, '0.5', lw=0.2, alpha=0.5)
rhomean+=rhotmp*rho_std+rho_mu
idx = data['timeidx'][:]==tstep
plt.plot(data['rho'][idx]*rho_std+rho_mu, data['z'][idx]*z_std,'b.', alpha=0.1)
rhomean /= samples
#plt.plot(rhomean, zout, 'k--',) # Mean fit
#plt.xlim(1020,1027)
plt.ylim(zmin,0 )
plt.ylabel('Depth [m]')
plt.xlabel(r'$\rho$ [kg m$^{-3}$]')
plt.title(data['time'][tstep].astype('<M8[ns]'))
# CSV parsing functions
def convert_time(tt):
try:
dt= datetime.strptime(tt, '%Y-%m-%dT%H:%M:%S')
except:
dt= datetime.strptime(tt, '%Y-%m-%d %H:%M')
return dt
def read_density_csv(csvfile):
# Reads into a dataframe object
df = pd.read_csv(csvfile, index_col=0, sep=', ', parse_dates=['Time'], date_parser=convert_time)
# Load the csv data
depths= np.array([float(ii) for ii in df.columns.values])
rho_obs_tmp = df[:].values.astype(float)
time = df.index[:]
# Clip the top
rho_obs_2d = rho_obs_tmp[:,:]
# Remove some nan
fill_value = 1024.
rho_obs_2d[np.isnan(rho_obs_2d)] = fill_value
return xr.DataArray(rho_obs_2d,dims=('time', 'depth'),
coords={'time':time.values,'depth':depths})
| [
"matt.rayson@gmail.com"
] | matt.rayson@gmail.com |
6ee5960e1dc64c010d0e2ed95cb5fcd348eccd3e | 1679a86a7d5b2056b57635ffc79e2cbe4ca3f8b4 | /icdb/storage/storage.py | 6bb0baaab448245cc16e5f3843dd7bf67a33337d | [] | no_license | icoz/icdb | 18ad7e32c13afec410d7f37122231eb503ffbdfe | e513515c20f4c23cebd95bd22effd5bdc2ae02bc | refs/heads/master | 2020-05-17T12:12:46.203286 | 2013-12-14T22:21:34 | 2013-12-14T22:21:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,742 | py | # -------------------------------#
# Written by icoz, 2013 #
# email: icoz.vt at gmail.com #
# License: GPL v3 #
# -------------------------------#
'''
File contains records (variable size) one by one.
Inspired by Haystack
While saving bytes(bytes.decode) is used by default.
Record:
magic number, 8 bytes
hash(md5), 16 bytes
flags, 2 bytes (so big for 8 byte alignment), 0 - ok, 1 - deleted
key_size, 2 bytes
value_size, 4 bytes
key, <key_size> bytes
value, <value_size> bytes
'''
from hashlib import md5
import struct
from os import rename, remove
def hash_md5(info):
''' for hashing using MD5 '''
m = md5()
m.update(str(info).encode())
return m.digest()
class Storage(object):
''' Class Storage for saving key-value pairs using hash '''
MAGIC_NUMBER = b'\x59\x0d\x1f\x70\xf9\x52\x55\xad'
def __init__(self, fname):
''' init Storage using filename for save data '''
# print('icdb storage init')
self.filename = fname
self.fout = open(fname, 'ab')
pass
def __del__(self):
''' safely close files before die '''
# print('icdb storage del')
self.fout.close()
def __enter__(self):
''' for use with with-statement '''
# print('icdb storage enter')
with open(self.filename, 'rb') as fin:
self.binary_cache = fin.read() # read all file
return self
def __exit__(self, type, value, traceback):
''' for use with with-statement, flushes file '''
# print('icdb storage exit')
self.fout.flush()
pass
def set_unsafe(self, key, value):
""" append data in storage. NOTE! if there is key, may be duplicates """
if type(key) is not str:
key = str(key)
if type(value) is not str:
value = str(value)
self.__set_by_hash__(hash_md5(key), key, value)
pass
def set(self, key, value):
""" create or update data in storage """
self.__delete_by_hash__(hash_md5(key))
# TODO tests for time used. To run compress
self.set_unsafe(key, value)
pass
def records(self):
""" internal. Generator for records """
if 'binary_cache' not in self.__dict__:
with open(self.filename, 'rb') as fin:
self.binary_cache = fin.read() # read all file
b = self.binary_cache
if len(b) > 32:
pos = 0
while True:
pos = b.find(self.MAGIC_NUMBER, pos)
if pos == -1:
break
hash = b[pos + 8:pos + 24]
flags, key_size, val_size = struct.unpack_from(
'hhi', b, pos + 24)
if flags == 0:
yield (pos, hash, flags, key_size, val_size)
pos = pos + 1
def get_list(self):
""" get all pairs from storage """
arr = []
with open(self.filename, 'rb') as fin:
b = fin.read()
for (pos, hs, flags, key_size, val_size) in self.records():
key = b[pos + 24 + 2 + 2 + 4:
pos + 24 + 2 + 2 + 4 + key_size].decode()
value = b[pos + 24 + 2 + 2 + 4 + key_size:
pos + 24 + 2 + 2 + 4 + key_size + val_size].decode()
arr.append((key, value))
return arr
def get_dict(self):
''' get all pairs from storage '''
arr = dict()
with open(self.filename, 'rb') as fin:
b = fin.read()
for (pos, hs, flags, key_size, val_size) in self.records():
key = b[pos + 24 + 2 + 2 + 4:
pos + 24 + 2 + 2 + 4 + key_size].decode()
value = b[pos + 24 + 2 + 2 + 4 + key_size:
pos + 24 + 2 + 2 + 4 + key_size + val_size].decode()
# arr.append((key, value))
arr[key] = value
return arr
def compress(self):
''' recreates db-file '''
self.fout.close()
rename(self.filename, self.filename + '.old')
with open(self.filename + '.old', 'rb') as fin:
b = fin.read()
self.fout = open(self.filename, 'ab')
pos = 0
while True:
pos = b.find(self.MAGIC_NUMBER, pos)
if pos == -1:
break
hash = b[pos + 8:pos + 24]
flags, key_size, val_size = struct.unpack_from(
'hhi', b, pos + 24)
self.fout.write(b[pos:pos + 32 + key_size + val_size])
pos = pos + 1
remove(self.filename + '.old')
with open(self.filename, 'rb') as fin:
self.binary_cache = fin.read()
def get(self, key):
''' returns value by given key. Or None if does not exists '''
if type(key) is not str:
key = str(key)
h = hash_md5(key)
return self.__get_by_hash__(h)
def __get_by_hash__(self, hash):
''' returns value by hash. On fail returns None '''
value = None
for (pos, hs, flags, key_size, val_size) in self.records():
# print("get.record_offset= %i hash = %16s, flags = %s" % (pos, hs,
# flags))
if hs == hash:
# if hs == hash and flags == 0:
# parse record
# print("get.record_offset= %i hash = %16s, flags = %s, ks=%i,
# vs=%i" % (pos, hs, flags, key_size, val_size))
# with open(self.filename, 'rb') as fin:
# b = fin.read()
b = self.binary_cache
value = b[pos + 24 + 2 + 2 + 4 + key_size:
pos + 24 + 2 + 2 + 4 + key_size + val_size].decode()
return value
# return value
# return None
def __set_by_hash__(self, hash, key, value):
''' internal. append record. If one exists - mark it deleted '''
self.fout.write(self.MAGIC_NUMBER)
self.fout.write(hash)
# flags = 0
s = struct.pack('hhi', 0, len(key), len(value))
self.fout.write(s)
self.fout.write(key.encode())
self.fout.write(value.encode())
# self.fout.flush()
pass
def delete(self, key):
''' delete pair by key '''
self.__delete_by_hash__(hash_md5(key))
pass
def __delete_by_hash__(self, hash):
''' internal. delete pair by hash '''
for (pos, hs, *rest) in self.records():
if hs == hash:
# set flag deleted
with open(self.filename, 'r+b') as f:
f.seek(pos + 24)
f.write(b'\x01')
pass
| [
"icoz.vt@gmail.com"
] | icoz.vt@gmail.com |
8f490f12947ce2901727b7b62e038095a64c6c37 | 0c11622e157e97fd6bf0c42d20875e590d7f77e2 | /mysite/recipes/migrations/0007_auto_20161119_0509.py | 2540fe6d76fbbeea4b521e6e46d714f9131336d6 | [
"BSD-3-Clause"
] | permissive | Patrick-Lam-Apps/iCanCook | 957b71796d708f78aec74041ff41b5e4874cc000 | 1285e704e8ee079783a8f85bc07b6f3742e0a7e9 | refs/heads/master | 2020-05-23T10:11:36.863728 | 2017-01-30T07:07:15 | 2017-01-30T07:07:15 | 80,399,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-19 05:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0006_auto_20161118_0843'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='rid',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| [
"patricklam92117@gmail.com"
] | patricklam92117@gmail.com |
6269b0e357f1e9f7e52331c7114d3d02ab9a5c37 | ba177c8eeb0f98fbc566ef779cd045e19c678074 | /Fig1F_ZscoreHist_0100.py | 270aaab28b71ea8337c7abd12e0a1c6e7ad8f4e4 | [] | no_license | MattNolanLab/Tennantetal2018 | f0a821be1bc5553ab33535c0b42217572b2d13c5 | ea5babb874006cff6b6da070830f5722ff02a82f | refs/heads/master | 2021-05-10T08:42:38.661282 | 2018-09-18T15:03:09 | 2018-09-18T15:03:09 | 118,899,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,994 | py | # -*- coding: utf-8 -*-
"""
### Calculates Z-scores for each location bin along the track
- Location bins are 10 cm
- Z-scores calculated for each mouse in last training week then averaged over mice
"""
# import packages and functions
from Functions_Core_0100 import extractstops, filterstops, create_srdata, makebinarray, shuffle_analysis_pertrial3, z_score1, adjust_spines,readhdfdata,maketrialarray
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import stats
import math
from scipy.stats import uniform
#--------------------------------------------------------------------------------------------------------------#
# First half of script gets data for first training week
#--------------------------------------------------------------------------------------------------------------#
# Load raw data: specify the HDF5 file to read data from
filename = 'Data_Input/Behaviour_DataFiles/Task13_0300.h5' # raw data files
# specify mouse/mice and day/s to analyse
days = ['Day' + str(int(x)) for x in np.arange(1,5.1)]
mice = ['M' + str(int(x)) for x in np.arange(1,9.1)]
bins = np.arange(0,19+1e-6,1) # size of location bins
# empty arrays for storing data
firststopstorebeac = np.zeros((len(days), len(mice), 20));firststopstorenbeac = np.zeros((len(days), len(mice), 20));firststopstoreprobe = np.zeros((len(days), len(mice), 20))
firststopstorebeac[:,:,:] = np.nan;firststopstorenbeac[:,:,:] = np.nan; firststopstoreprobe[:,:,:] = np.nan
#loop days and mice to collect data
for mcount,mouse in enumerate(mice):
for dcount,day in enumerate(days):
try:
saraharray = readhdfdata(filename,day,mouse,'raw_data')# get raw datafile for mouse and day
except KeyError:
print ('Error, no file')
continue
# make array of trial number per row of data in dataset
trialarray = maketrialarray(saraharray) # make array of trial number same size as saraharray
saraharray[:,9] = trialarray[:,0] # replace trial number because of increment error (see README.py)
# split data by trial type
dailymouse_b = np.delete(saraharray, np.where(saraharray[:, 8] > 0), 0) # delete all data not on beaconed tracks
dailymouse_nb = np.delete(saraharray, np.where(saraharray[:, 8] != 10), 0)# delete all data not on non beaconed tracks
dailymouse_p = np.delete(saraharray, np.where(saraharray[:, 8] != 20), 0)# delete all data not on probe tracks
#extract stops
stopsdata_b = extractstops(dailymouse_b)
stopsdata_nb = extractstops(dailymouse_nb)
stopsdata_p = extractstops(dailymouse_p)
# filter stops
stopsdata_b = filterstops(stopsdata_b)
stopsdata_nb = filterstops(stopsdata_nb)
stopsdata_p = filterstops(stopsdata_p)
# Shuffle stops data & store data
if mcount == 3 or mcount == 5 or mcount == 6 or mcount == 7 or mcount == 8: # mice to analyse
trialids_b = np.unique(stopsdata_b[:, 2]) # make array of unique trial numbers
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_b, trialids_b ) # get average real stops & shuffled stops per lcoation bin
zscore_b = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorebeac[dcount,mcount,:] = zscore_b # store zscores
if stopsdata_nb.size >0 : # if there is non-beaconed data
trialids_nb = np.unique(stopsdata_nb[:, 2])# make array of unique trial numbers
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_nb, trialids_nb ) # get average real stops & shuffled stops per lcoation bin
zscore_nb = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorenbeac[dcount, mcount,:] = zscore_nb # store zscores
if stopsdata_p.size >0 : # if there is probe data
trialids_p = np.unique(stopsdata_p[:, 2])# make array of unique trial numbers
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_p, trialids_p ) # get average real stops & shuffled stops per lcoation bin
zscore_p = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstoreprobe[dcount, mcount,:] = zscore_p # store zscores
print('##...', mcount,day, '...##')
mcount +=1
# Load raw data: specify the HDF5 file to read data from
filename = 'Data_Input/Behaviour_DataFiles/Task12_0600.h5'
# specify mouse/mice and day/s to analyse
days = ['Day' + str(int(x)) for x in np.arange(1,5.1)]
mice = ['M' + str(int(x)) for x in np.arange(1,8.1)]# choose specific day/s
# empty arrays for storing data
firststopstorebeac2 = np.zeros((len(days), len(mice), 20));firststopstorenbeac2= np.zeros((len(days), len(mice), 20));firststopstoreprobe2= np.zeros((len(days), len(mice), 20))
firststopstorebeac2[:,:,:] = np.nan;firststopstorenbeac2[:,:,:] = np.nan;firststopstoreprobe2[:,:,:] = np.nan
#loop days and mice to collect data
for mcount,mouse in enumerate(mice):
for dcount,day in enumerate(days):
try:
saraharray = readhdfdata(filename,day,mouse,'raw_data')
except KeyError:
print ('Error, no file')
continue
continue
# make array of trial number per row of data in dataset
trialarray = maketrialarray(saraharray) # make array of trial number same size as saraharray
saraharray[:,9] = trialarray[:,0] # replace trial number because of increment error (see README.py)
# split data by trial type
dailymouse_b = np.delete(saraharray, np.where(saraharray[:, 8] > 0), 0) # delete all data not on beaconed tracks
dailymouse_nb = np.delete(saraharray, np.where(saraharray[:, 8] != 10), 0)# delete all data not on non beaconed tracks
dailymouse_p = np.delete(saraharray, np.where(saraharray[:, 8] != 20), 0)# delete all data not on probe tracks
#extract stops
stopsdata_b = extractstops(dailymouse_b)
stopsdata_nb = extractstops(dailymouse_nb)
stopsdata_p = extractstops(dailymouse_p)
# filter stops
stopsdata_b = filterstops(stopsdata_b)
stopsdata_nb = filterstops(stopsdata_nb)
stopsdata_p = filterstops(stopsdata_p)
# shuffle data and store in arrays
if mcount == 5 or mcount == 6 or mcount == 7: # if control mouse, save data
trialids_b = np.unique(stopsdata_b[:, 2]) # get array of trial numbers for beaconed
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_b, trialids_b ) # get average real stops & shuffled stops per lcoation bin
zscore_b = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorebeac2[dcount,mcount,:] = zscore_b # store zscores
if stopsdata_nb.size >0 :
trialids_nb = np.unique(stopsdata_nb[:, 2]) # get array of trial numbers for non-beaconed
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_nb, trialids_nb ) # get average real stops & shuffled stops per lcoation bin
zscore_nb = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorenbeac2[dcount, mcount,:] = zscore_nb # store zscores
if stopsdata_p.size >0 :
trialids_p = np.unique(stopsdata_p[:, 2]) # get array of trial numbers for probe
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_p, trialids_p ) # get average real stops & shuffled stops per lcoation bin
zscore_p = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstoreprobe2[dcount, mcount,:] = zscore_p # store zscores
print('##...', mcount,day, '...##')
mcount +=1
# AVERAGE DATA FOR PLOTS
# stack experiments then average over days then mice
con_b1 = np.nanmean(np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0), axis = 0)
con_nb1 = np.nanmean(np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0), axis = 0)
con_p1 = np.nanmean(np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0), axis = 0)
sdcon_b1 = np.nanstd(np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0), axis = 0)/math.sqrt(8)
sdcon_nb1 = np.nanstd(np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0), axis = 0)/math.sqrt(8)
sdcon_p1 = np.nanstd(np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0), axis = 0)/math.sqrt(8)
# WRITE DATA TO .CSV FOR R
# stack experiments then average over days
con_beac1 = np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)
con_nbeac1 = np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)
con_probe1 = np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)
sdcon_beac1 = np.nanstd(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)/math.sqrt(8)
sdcon_nbeac1 = np.nanstd(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)/math.sqrt(8)
sdcon_probe1 = np.nanstd(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)/math.sqrt(8)
x = np.vstack((con_beac1[3,:],con_beac1[5,:],con_beac1[6,:],con_beac1[7,:],con_beac1[8,:],con_beac1[14,:],con_beac1[15,:],con_beac1[16,:])) # stack mice
x1 = np.vstack((con_nbeac1[3,:],con_nbeac1[5,:],con_nbeac1[6,:],con_nbeac1[7,:],con_nbeac1[8,:],con_nbeac1[14,:],con_nbeac1[15,:],con_nbeac1[16,:]))
x2 = np.vstack((con_probe1[3,:],con_probe1[5,:],con_probe1[6,:],con_probe1[7,:],con_probe1[8,:],con_probe1[14,:],con_probe1[15,:],con_probe1[16,:],))
mice = np.array([1,2,3,4,5,6,7,8]); mouse = np.hstack((mice, mice, mice)) # array of mouse number
trialb = np.array([1,1,1,1,1,1,1,1]); trialnb = np.array([2,2,2,2,2,2,2,2]); trialp = np.array([3,3,3,3,3,3,3,3]); trials = np.hstack((trialb, trialnb, trialp)) # array for trial type
x = np.vstack((x,x1,x2)) # stack beaconed, nonbeaconed and probe
data = np.vstack((mouse, trials)); data=np.transpose(data) # stack mouse & trial arrays
data = np.hstack((data,x))# stack data and mouse, trial arrays
np.savetxt('Data_Output/Figure1/Figure1_F_Week1_0100.csv', data,fmt = '%i,%i,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f', delimiter = '\t', header = 'Mouse, Trial, 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20') # save data
# WRITE DATA TO .CSV FOR R
# stack experiments then average over days
con_beac11 = np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)
con_nbeac11 = np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)
con_probe11 = np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)
sdcon_beac11 = np.nanstd(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)/math.sqrt(8)
sdcon_nbeac11 = np.nanstd(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)/math.sqrt(8)
sdcon_probe11 = np.nanstd(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)/math.sqrt(8)
#--------------------------------------------------------------------------------------------------------------#
# Next half of script gets data for last training week
#--------------------------------------------------------------------------------------------------------------#
# Load raw data: specify the HDF5 file to read data from
filename = 'Data_Input/Behaviour_DataFiles/Task13_0300.h5' # raw data files
# specify mouse/mice and day/s to analyse
days = ['Day' + str(int(x)) for x in np.arange(15,18.1)]
mice = ['M' + str(int(x)) for x in np.arange(1,9.1)]
# empty arrays for storing data
firststopstorebeac = np.zeros((len(days), len(mice), 20));firststopstorenbeac = np.zeros((len(days), len(mice), 20));firststopstoreprobe = np.zeros((len(days), len(mice), 20)); firststopstorebeac[:,:,:] = np.nan;firststopstorenbeac[:,:,:] = np.nan; firststopstoreprobe[:,:,:] = np.nan
# loop days and mice to collect data
for mcount,mouse in enumerate(mice):
for dcount,day in enumerate(days):
try:
saraharray = readhdfdata(filename,day,mouse,'raw_data')# get raw datafile for mouse and day
except KeyError:
print ('Error, no file')
continue
# make array of trial number per row of data in dataset
trialarray = maketrialarray(saraharray) # make array of trial number same size as saraharray
saraharray[:,9] = trialarray[:,0] # replace trial number because of increment error (see README.py)
# split data by trial type
dailymouse_b = np.delete(saraharray, np.where(saraharray[:, 8] > 0), 0) # delete all data not on beaconed tracks
dailymouse_nb = np.delete(saraharray, np.where(saraharray[:, 8] != 10), 0)# delete all data not on non beaconed tracks
dailymouse_p = np.delete(saraharray, np.where(saraharray[:, 8] != 20), 0)# delete all data not on probe tracks
#extract stops
stopsdata_b = extractstops(dailymouse_b)
stopsdata_nb = extractstops(dailymouse_nb)
stopsdata_p = extractstops(dailymouse_p)
# filter stops
stopsdata_b = filterstops(stopsdata_b)
stopsdata_nb = filterstops(stopsdata_nb)
stopsdata_p = filterstops(stopsdata_p)
# Shuffle stops data & get zscores
if mcount == 3 or mcount == 5 or mcount == 6 or mcount == 7 or mcount == 8:
trialids_b = np.unique(stopsdata_b[:, 2]) # get array of unique trial numbers for beaconed
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_b, trialids_b ) # get average real stops & shuffled stops per lcoation bin
zscore_b = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorebeac[dcount,mcount,:] = zscore_b # store zscores
if stopsdata_nb.size >0 :
trialids_nb = np.unique(stopsdata_nb[:, 2]) # get array of unique trial numbers for non-beaconed
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_nb, trialids_nb ) # get average real stops & shuffled stops per lcoation bin
zscore_nb = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorenbeac[dcount, mcount,:] = zscore_nb # store zscores
if stopsdata_p.size >0 :
trialids_p = np.unique(stopsdata_p[:, 2]) # get array of unique trial numbers for probe
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_p, trialids_p ) # get average real stops & shuffled stops per lcoation bin
zscore_p = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstoreprobe[dcount, mcount,:] = zscore_p # store zscores
print('##...', mcount,day, '...##')
mcount +=1
# Load raw data: specify the HDF5 file to read data from
filename = 'DataFiles/Task12_0600.h5'
# specify mouse/mice and day/s to analyse
days = ['Day' + str(int(x)) for x in np.arange(15,18.1)]
mice = ['M' + str(int(x)) for x in np.arange(1,8.1)]
# empty arrays for storing data
firststopstorebeac2 = np.zeros((len(days), len(mice), 20));firststopstorenbeac2= np.zeros((len(days), len(mice), 20));firststopstoreprobe2= np.zeros((len(days), len(mice), 20))
firststopstorebeac2[:,:,:] = np.nan;firststopstorenbeac2[:,:,:] = np.nan;firststopstoreprobe2[:,:,:] = np.nan
# loop days and mice to collect data
for mcount,mouse in enumerate(mice):
for dcount,day in enumerate(days):
try:
saraharray = readhdfdata(filename,day,mouse,'raw_data')
except KeyError:
print ('Error, no file')
continue
# make array of trial number per row of data in dataset
trialarray = maketrialarray(saraharray) # make array of trial number same size as saraharray
saraharray[:,9] = trialarray[:,0] # replace trial number because of increment error (see README.py)
# split data by trial type
dailymouse_b = np.delete(saraharray, np.where(saraharray[:, 8] > 0), 0) # delete all data not on beaconed tracks
dailymouse_nb = np.delete(saraharray, np.where(saraharray[:, 8] != 10), 0)# delete all data not on non beaconed tracks
dailymouse_p = np.delete(saraharray, np.where(saraharray[:, 8] != 20), 0)# delete all data not on probe tracks
#extract stops
stopsdata_b = extractstops(dailymouse_b)
stopsdata_nb = extractstops(dailymouse_nb)
stopsdata_p = extractstops(dailymouse_p)
# filter stops
stopsdata_b = filterstops(stopsdata_b)
stopsdata_nb = filterstops(stopsdata_nb)
stopsdata_p = filterstops(stopsdata_p)
# Shuffle stops data & get zscores
if mcount == 5 or mcount == 6 or mcount == 7: # if control mouse, save data
trialids_b = np.unique(stopsdata_b[:, 2])
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_b, trialids_b ) # get average real stops & shuffled stops per lcoation bin
zscore_b = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorebeac2[dcount,mcount,:] = zscore_b # store zscores
if stopsdata_nb.size >0 :
trialids_nb = np.unique(stopsdata_nb[:, 2])
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_nb, trialids_nb ) # get average real stops & shuffled stops per lcoation bin
zscore_nb = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstorenbeac2[dcount, mcount,:] = zscore_nb # store zscores
if stopsdata_p.size >0 :
trialids_p = np.unique(stopsdata_p[:, 2])
srbin_mean, srbin_std, shuffled_mean, shuffled_std = shuffle_analysis_pertrial3( stopsdata_p, trialids_p ) # get average real stops & shuffled stops per lcoation bin
zscore_p = z_score1(srbin_mean, srbin_std, shuffled_mean, shuffled_std) # calculate z-scores
firststopstoreprobe2[dcount, mcount,:] = zscore_p # store zscores
print('##...', mcount,day, '...##')
mcount +=1
# AVERAGE DATA FOR PLOTS
# stack experiments then average over days then mice
con_b = np.nanmean(np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0), axis = 0)
con_nb = np.nanmean(np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0), axis = 0)
con_p = np.nanmean(np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0), axis = 0)
sdcon_b = np.nanstd(np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0), axis = 0)/math.sqrt(6)
sdcon_nb = np.nanstd(np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0), axis = 0)/math.sqrt(6)
sdcon_p = np.nanstd(np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0), axis = 0)/math.sqrt(6)
# WRITE DATA TO .CSV FOR R
# stack experiments then average over days
con_beac = np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)
con_nbeac = np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)
con_probe = np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)
sd_con_beac = np.nanstd(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)/math.sqrt(6)
sd_con_nbeac = np.nanstd(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)/math.sqrt(6)
sd_con_probe = np.nanstd(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)/math.sqrt(6)
# stack experiments then average over days
con_beac22 = np.nanmean(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)
con_nbeac22 = np.nanmean(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)
con_probe22 = np.nanmean(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)
sd_con_beac22 = np.nanstd(np.hstack((firststopstorebeac[:,:,:],firststopstorebeac2[:,:,:])), axis = 0)/math.sqrt(6)
sd_con_nbeac22 = np.nanstd(np.hstack((firststopstorenbeac[:,:,:],firststopstorenbeac2[:,:,:])), axis =0)/math.sqrt(6)
sd_con_probe22 = np.nanstd(np.hstack((firststopstoreprobe[:,:,:],firststopstoreprobe2[:,:,:])), axis = 0)/math.sqrt(6)
x = np.vstack((con_beac[3,:],con_beac[5,:],con_beac[6,:],con_beac[7,:],con_beac[8,:],con_beac[14,:],con_beac[15,:],con_beac[16,:]))
x1 = np.vstack((con_nbeac[3,:],con_nbeac[5,:],con_nbeac[6,:],con_nbeac[7,:],con_nbeac[8,:],con_nbeac[14,:],con_nbeac[15,:],con_nbeac[16,:]))
x2 = np.vstack((con_probe[3,:],con_probe[5,:],con_probe[6,:],con_probe[7,:],con_probe[8,:],con_probe[14,:],con_probe[15,:],con_probe[16,:],))
mice = np.array([1,2,3,4,5,6,7,8]); mouse = np.hstack((mice, mice, mice))
trialb = np.array([1,1,1,1,1,1,1,1]); trialnb = np.array([2,2,2,2,2,2,2,2]); trialp = np.array([3,3,3,3,3,3,3,3]); trials = np.hstack((trialb, trialnb, trialp))
x = np.vstack((x,x1,x2))
data = np.vstack((mouse, trials)); data=np.transpose(data)
data = np.hstack((data,x))
np.savetxt('Data_Output/Figure1/Figure1_F_Week4_0100.csv', data,fmt = '%i,%i,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f,%10.3f', delimiter = '\t', header = 'Mouse, Trial, 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20')
# PLOT GRAPHS
bins = np.arange(0.5,19.5+1e-6,1) # track bins
fig = plt.figure(figsize = (12,3))
ax = fig.add_subplot(1,3,1)
ax.axvspan(44, 44+12, facecolor='g', alpha=0.25, hatch = '/', linewidth =0) # green box spanning the rewardzone - to mark reward zone
ax.axvspan(0, 15, facecolor='k', alpha=0.15, hatch = '/', linewidth =0) # black box
ax.axvspan(100-15, 100, facecolor='k', alpha=0.15, hatch = '/', linewidth =0)# black box
ax.axvline(0, linewidth = 3, color = 'black') # bold line on the y axis
ax.axhline(-10, linewidth = 3, color = 'black') # bold line on the x axis
ax.axhline(0, linewidth = 1,ls='--', color = 'black') # bold line on the x axis
ax.plot(bins*5,con_b,color = 'red',label = 'AAV-fl-GFP', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_b-sdcon_b,con_b+sdcon_b, facecolor = 'red', alpha = 0.3)
ax.plot(bins*5,con_b1,'',color = 'blue',label = 'AAV-fl-GFP', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_b1-sdcon_b1,con_b1+sdcon_b1, facecolor = 'blue', alpha = 0.3)
ax.tick_params(axis='x', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.tick_params(axis='y', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.set_xlim(0,100)
ax.set_ylim(-10,10)
adjust_spines(ax, ['left','bottom']) # removes top and right spines
ax.locator_params(axis = 'x', nbins=3) # set number of ticks on x axis
ax.locator_params(axis = 'y', nbins=4) # set number of ticks on y axis
ax.set_xticklabels(['0', '100', '200'])
ax = plt.ylabel('Location (cm)', fontsize=16, labelpad = 18)
ax = fig.add_subplot(1,3,2) #stops per trial
ax.axvspan(44, 44+12, facecolor='g', alpha=0.25, hatch = '/', linewidth =0) # green box spanning the rewardzone - to mark reward zone
ax.axvspan(0, 15, facecolor='k', alpha=0.15, hatch = '/', linewidth =0) # black box
ax.axvspan(100-15, 100, facecolor='k', alpha=0.15, hatch = '/', linewidth =0)# black box
ax.axvline(0, linewidth = 3, color = 'black') # bold line on the y axis
ax.axhline(-10, linewidth = 3, color = 'black') # bold line on the x axis
ax.axhline(0, linewidth = 1,ls='--', color = 'black') # bold line on the x axis
ax.plot(bins*5,con_nb,color = 'red', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_nb-sdcon_nb,con_nb+sdcon_nb, facecolor = 'red', alpha = 0.3)
ax.plot(bins*5,con_nb1,color = 'blue', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_nb1-sdcon_nb1,con_nb1+sdcon_nb1, facecolor = 'blue', alpha = 0.3)
ax.tick_params(axis='x', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.tick_params(axis='y', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.set_xlim(0,100)
ax.set_ylim(-10,10)
adjust_spines(ax, ['left','bottom']) # re;moves top and right spines
ax.locator_params(axis = 'x', nbins=3) # set number of ticks on x axis
ax.locator_params(axis = 'y', nbins=4) # set number of ticks on y axis
ax.set_xticklabels(['0', '100', '200'])
ax = plt.xlabel('Location (cm)', fontsize=16, labelpad = 18)
ax = fig.add_subplot(1,3,3) #stops per trial
ax.axvspan(44, 44+12, facecolor='g', alpha=0.25, hatch = '/', linewidth =0) # green box spanning the rewardzone - to mark reward zone
ax.axvspan(0, 15, facecolor='k', alpha=0.15, hatch = '/', linewidth =0) # black box
ax.axvspan(100-15, 100, facecolor='k', alpha=0.15, hatch = '/', linewidth =0)# black box
ax.axhline(0, linewidth = 1,ls='--', color = 'black') # bold line on the x axis
ax.axvline(0, linewidth = 3, color = 'black') # bold line on the y axis
ax.axhline(-10, linewidth = 3, color = 'black') # bold line on the x axis
ax.plot(bins*5,con_p,color = 'red', label = 'Beaconed', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_p-sdcon_p,con_p+sdcon_p, facecolor = 'red', alpha = 0.3)
ax.plot(bins*5,con_p1,color = 'blue', label = 'Beaconed', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_p1-sdcon_p1,con_p1+sdcon_p1, facecolor = 'blue', alpha = 0.3)
ax.tick_params(axis='x', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.tick_params(axis='y', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.set_xlim(0,100)
ax.set_ylim(-10,10)
adjust_spines(ax, ['left','bottom']) # removes top and right spines
ax.locator_params(axis = 'x', nbins=3) # set number of ticks on x axis
ax.locator_params(axis = 'y', nbins=4) # set number of ticks on y axis
ax.set_xticklabels(['0', '100', '200'])
plt.subplots_adjust(hspace = .35, wspace = .35, bottom = 0.15, left = 0.07, right = 0.82, top = 0.92)
fig.savefig('Plots/Figure1/Task13_ZscoreHist_0100'+'.png', dpi =200) # path to save file
plt.close()
# PLOT GRAPHS
bins = np.arange(0.5,19.5+1e-6,1) # track bins
fig = plt.figure(figsize = (12,3))
ax = fig.add_subplot(1,3,1)
ax.axvspan(44, 44+12, facecolor='g', alpha=0.25, hatch = '/', linewidth =0) # green box spanning the rewardzone - to mark reward zone
ax.axvspan(0, 15, facecolor='k', alpha=0.15, hatch = '/', linewidth =0) # black box
ax.axvspan(100-15, 100, facecolor='k', alpha=0.15, hatch = '/', linewidth =0)# black box
ax.axvline(0, linewidth = 3, color = 'black') # bold line on the y axis
ax.axhline(-10, linewidth = 3, color = 'black') # bold line on the x axis
ax.axhline(0, linewidth = 1,ls='--', color = 'black') # bold line on the x axis
ax.plot(bins*5,con_beac22[5,:],color = 'red',label = 'AAV-fl-GFP', linewidth = 2) #plot becaoned trials
ax.fill_between(bins*5,con_beac22[5,:]-sd_con_beac22[5,:],con_beac22[5,:]+sd_con_beac22[5,:], facecolor = 'red', alpha = 0.3)
ax.tick_params(axis='x', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.tick_params(axis='y', pad = 10, top='off', right = 'off', direction = 'out',width = 2, length = 7,labelsize =16)
ax.set_xlim(0,100)
ax.set_ylim(-10,11)
adjust_spines(ax, ['left','bottom']) # removes top and right spines
ax.locator_params(axis = 'x', nbins=3) # set number of ticks on x axis
ax.locator_params(axis = 'y', nbins=4) # set number of ticks on y axis
ax.set_xticklabels(['0', '100', '200'])
ax = plt.ylabel('Location (cm)', fontsize=16, labelpad = 18)
ax = fig.add_subplot(1,3,2) #stops per trial
ax = fig.add_subplot(1,3,3) #stops per trial
plt.subplots_adjust(hspace = .35, wspace = .35, bottom = 0.15, left = 0.07, right = 0.82, top = 0.92)
fig.savefig('Plots/Figure1/Task13_ZscoreHist_M5_0100'+'.png', dpi =200) # path to save file
plt.close()
| [
"sarahtennant@c020051.wlan.net.ed.ac.uk"
] | sarahtennant@c020051.wlan.net.ed.ac.uk |
7b5b48e4194c24d7b61994637942273bab2714f5 | 7c33d6e59287e3133a6875e67024955c09c07f1e | /TensorFlow practise/Chapter5/example3.1.1.py | 6d9dd341e4b743e1f4d167950e658af3d99a2f16 | [] | no_license | karljiang118440/Machine-Deep-Learning | 135dff1c9676c92736a02f308cbac89cbdbdf00b | 06ee40c140ca6f19e49197ae73bc50d8a5abbbb9 | refs/heads/master | 2020-05-17T22:17:45.316222 | 2020-01-16T03:54:23 | 2020-01-16T03:54:23 | 183,995,986 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py |
import tensorflow as tf
g1=tf.Graph()
with g1.as_default():
v=tf.get_varibale(
"v",shape=[1],initializer=tf.zeros_initializer
)
g2=tf.Graph()
with g2.as_default():
v=tf.get_variable(
"v",shape=[1],initializer=tf.ones_initializer
)
with tf.Session(Graph=g1) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("",reuse=True):
print(sess.run(tf.get_variable("v")))
with tf.Session(Graph=g2) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("",reuse=True):
print(sess.run(tf.get_variable("v")))
| [
"jiang_chaoqing@126.com"
] | jiang_chaoqing@126.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.