blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c96667e76a4d649fc180fffd2ee6abb688e027cb | d4fdbd68c42d6b9babe347cb3b65535e4d782172 | /tensorflow_datasets/image/voc_test.py | 1bbb9140e84808b1f66441b6ba103c2e8483ec03 | [
"Apache-2.0"
] | permissive | thanhkaist/datasets | 2809260c5e95e96d136059bea042d1ed969a6fcf | 02da35c558ec8ea704e744a2008c5cecb2e7a0a1 | refs/heads/master | 2020-06-04T16:13:14.603449 | 2019-06-14T22:01:33 | 2019-06-14T22:02:54 | 192,097,735 | 2 | 0 | Apache-2.0 | 2019-06-15T16:02:18 | 2019-06-15T16:02:18 | null | UTF-8 | Python | false | false | 1,060 | py | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PASCAL VOC image data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import voc
class Voc2007Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = voc.Voc2007
SPLITS = {
'train': 1,
'validation': 2,
'test': 3,
}
if __name__ == '__main__':
testing.test_main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
abddbc07c422c2ef39b94b3f3c841f01e40010b6 | 4f1154f708e15a4056eb5ad34a1d8ee8229e3ac9 | /common/db.py | 5b9e9e65401a455c5c70735e7a53f3dd6219d76b | [] | no_license | jackross00/portfoliomgr | 708688376643e0f95a45967e5cf0fb5140d145ac | 906407847072e74662dd9835186242a4016ec3d2 | refs/heads/main | 2023-08-09T21:48:30.343221 | 2021-08-29T15:54:41 | 2021-08-29T15:54:41 | 377,010,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import sqlalchemy
def open_conn(schema='webapp'):
global engine
engine = sqlalchemy.create_engine('postgresql+psycopg2://postgres:pretzel123@localhost/{0}'.format(schema), pool_recycle=3600, connect_args={'options': '-csearch_path=intmed,datapit'})
open_conn()
def exec_sql(cmd=None,schema=None):
engine = sqlalchemy.create_engine('postgresql+psycopg2://postgres:pretzel123@localhost/{0}'.format(schema), pool_recycle=3600, connect_args={'options': '-csearch_path=intmed,datapit'})
engine.execute('{0}'.format(cmd)) | [
"jacksross96@gmail.com"
] | jacksross96@gmail.com |
daec12433149d0fa4a8fe97be29bea0af0818e98 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /tunas/schema_test.py | c27def77e798c010f8c380efefd021e51bc209c9 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 8,923 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for schema."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
from tunas import schema
class SchemaTest(tf.test.TestCase):
def test_oneof_equality_simple(self):
not_one_of = collections.namedtuple(
'NotOneOf', ['choices', 'tag', 'mask'])
tensor1 = tf.constant([3.0])
tensor2 = tf.constant([4.0])
self.assertEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1, 2], 'foo'))
self.assertEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', tensor1))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1], 'foo'))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
schema.OneOf([1, 2], 'bar'))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', None))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
schema.OneOf([1, 2], 'foo', tensor2))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo', tensor1),
not_one_of([1, 2], 'foo', tensor1))
self.assertNotEqual(
schema.OneOf([1, 2], 'foo'),
{})
self.assertNotEqual(
{},
schema.OneOf([1, 2], 'foo'))
def test_oneof_equality_nested(self):
self.assertEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([4], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
schema.OneOf([schema.OneOf([1, 5], 'a'), schema.OneOf([3], 'b')], 'c'))
self.assertNotEqual(
schema.OneOf([schema.OneOf([1, 2], 'a'), schema.OneOf([3], 'b')], 'c'),
'Goooooooooooooooooooooooooooooooooooooooooooooogle')
def test_oneof_repr(self):
self.assertEqual(
repr(schema.OneOf([1, 2], 'foo')),
'OneOf(choices=[1, 2], tag=\'foo\')')
self.assertStartsWith(
repr(schema.OneOf([1, 2], 'foo', tf.constant([3.0]))),
'OneOf(choices=[1, 2], tag=\'foo\', mask=')
def test_map_oenofs_with_tuple_paths_trivial(self):
structure = schema.OneOf([1, 2], 'tag')
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure),
schema.OneOf([10, 20], 'tag'))
self.assertEqual(all_paths, [()])
self.assertEqual(all_oneofs, [schema.OneOf([1, 2], 'tag')])
def test_map_oneofs_with_tuple_paths_simple(self):
structure = [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure), [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
])
self.assertEqual(all_paths, [
(0,),
(1,)
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs_with_tuple_paths_containing_arrays_and_dicts(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_tuple_paths(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_paths, [
('foo', 0),
('foo', 1),
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs_with_tuple_paths_containing_nested_oneofs(self):
structure = {
'root': schema.OneOf([
schema.OneOf([
{'leaf': schema.OneOf([1, 10], 'level2')},
{'leaf': schema.OneOf([2, 20], 'level2')},
], 'level1'),
schema.OneOf([
{'leaf': schema.OneOf([3, 30], 'level2')},
{'leaf': schema.OneOf([4, 40], 'level2')},
{'leaf': schema.OneOf([5, 50], 'level2')},
], 'level1')
], 'level0')
}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([oneof.choices[0]], oneof.tag)
self.assertEqual(
schema.map_oneofs_with_tuple_paths(visit, structure),
{
'root': schema.OneOf([
schema.OneOf([
{'leaf': schema.OneOf([1], 'level2')},
], 'level1'),
], 'level0')
})
self.assertEqual(all_paths, [
('root', 'choices', 0, 'choices', 0, 'leaf'),
('root', 'choices', 0, 'choices', 1, 'leaf'),
('root', 'choices', 0),
('root', 'choices', 1, 'choices', 0, 'leaf'),
('root', 'choices', 1, 'choices', 1, 'leaf'),
('root', 'choices', 1, 'choices', 2, 'leaf'),
('root', 'choices', 1),
('root',),
])
# A OneOf node's children should already be updated by the time we visit it.
self.assertEqual(all_oneofs, [
schema.OneOf([1, 10], 'level2'),
schema.OneOf([2, 20], 'level2'),
schema.OneOf(
[
{'leaf': schema.OneOf([1], 'level2')},
{'leaf': schema.OneOf([2], 'level2')},
], 'level1'),
schema.OneOf([3, 30], 'level2'),
schema.OneOf([4, 40], 'level2'),
schema.OneOf([5, 50], 'level2'),
schema.OneOf(
[
{'leaf': schema.OneOf([3], 'level2')},
{'leaf': schema.OneOf([4], 'level2')},
{'leaf': schema.OneOf([5], 'level2')},
], 'level1'),
schema.OneOf(
[
schema.OneOf([
{'leaf': schema.OneOf([1], 'level2')},
], 'level1'),
schema.OneOf([
{'leaf': schema.OneOf([3], 'level2')},
], 'level1')
], 'level0'),
])
def test_map_oneofs_with_paths(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_paths = []
all_oneofs = []
def visit(path, oneof):
all_paths.append(path)
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs_with_paths(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_paths, [
'foo/0',
'foo/1',
])
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
def test_map_oneofs(self):
structure = {
'foo': [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
]}
all_oneofs = []
def visit(oneof):
all_oneofs.append(oneof)
return schema.OneOf([x*10 for x in oneof.choices], oneof.tag)
self.assertEqual(schema.map_oneofs(visit, structure), {
'foo': [
schema.OneOf([10, 20], 'tag1'),
schema.OneOf([30, 40, 50], 'tag2'),
]})
self.assertEqual(all_oneofs, [
schema.OneOf([1, 2], 'tag1'),
schema.OneOf([3, 4, 5], 'tag2'),
])
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3dd5147ae9f30926a48cab65fa22cd06522b47c6 | 52a669eaba3ffcfdce3e4e069b3a718ecae99f5e | /raiden/tests/benchmark/_codespeed.py | 9d1374369d596807930b544aaaa62f07a9cb2bf4 | [
"MIT"
] | permissive | ulope/raiden | 8eec274797c4c8b7e365be3af00605a6365ab1a2 | 3f6b1f8fe23e34bbaed842362af7f38f68e6e5af | refs/heads/master | 2022-07-19T13:49:30.121352 | 2021-06-08T08:49:30 | 2021-06-08T08:49:30 | 94,885,749 | 0 | 0 | MIT | 2022-07-06T23:21:47 | 2017-06-20T11:49:29 | Python | UTF-8 | Python | false | false | 876 | py | import json
import os
import requests
try:
_CODESPEED_USER = os.environ["CODESPEED_USER"]
_CODESPEED_PASSWORD = os.environ["CODESPEED_PASSWORD"]
_BENCHMARK_HOST = os.environ["BENCHMARK_HOST"]
except KeyError:
print("Codespeed environment variables not available, posting results would fail.")
def post_result(codespeed_url, commit_id, branch, bench_name, value):
data = [
{
"commitid": commit_id,
"project": "raiden",
"branch": branch,
"executable": "raiden",
"benchmark": bench_name,
"environment": _BENCHMARK_HOST,
"result_value": value,
}
]
data_ = {"json": json.dumps(data)}
url = codespeed_url + "/result/add/json/"
resp = requests.post(url, data=data_, auth=(_CODESPEED_USER, _CODESPEED_PASSWORD))
resp.raise_for_status()
| [
"istankovic@posteo.net"
] | istankovic@posteo.net |
2725d1c03afc9f15527b05e3895cc8a6aab7c275 | ccd4d37469bc1b60cdaca39bad12e6dd0cd208fe | /project3/orders/migrations/0002_auto_20181129_2116.py | 27b48e710460188c3ca12805deec63c8382eab48 | [] | no_license | AlessandroMozzato/cs50w | 98d02574aed5a0c69fa2e3a8e8e751e283aa200e | 9c36d6487e4734f1608f43da1e84d2026433eaf9 | refs/heads/master | 2021-06-17T17:47:13.890214 | 2019-06-06T19:17:25 | 2019-06-06T19:17:25 | 145,459,230 | 0 | 2 | null | 2021-06-10T20:52:39 | 2018-08-20T19:04:20 | HTML | UTF-8 | Python | false | false | 372 | py | # Generated by Django 2.0.3 on 2018-11-29 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='size',
field=models.BooleanField(default=True),
),
]
| [
"mozzatoale@gmail.com"
] | mozzatoale@gmail.com |
cf472624e90417b7b9dd44a827fca5cdad1d7083 | 333352a8f17441336d8d4401ae756d16a6af64ff | /ML-101 Modules/Module 03/Lesson 03/Practice 2/vehicles_helper.py | 3a2d4b9c63b568c608000610ef2d12ac04480740 | [
"MIT"
] | permissive | peargrape/data-science | d29b6873e5e83f66476d7ea7a381e80508656389 | cb59cb856859c4c6df05fded37f441b073e6903c | refs/heads/main | 2023-08-29T06:41:55.243147 | 2021-10-27T07:37:17 | 2021-10-27T07:37:17 | 408,247,972 | 0 | 0 | MIT | 2021-09-19T22:10:07 | 2021-09-19T22:10:06 | null | UTF-8 | Python | false | false | 2,392 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.svm import SVC
import warnings
warnings.filterwarnings('ignore')
def attributes_counts(dataset):
""" Basic descriptions of the target attribute.
This module displays 'Class' attribute value counts
and visualisation plot of the dataset.
Parameters:
dataset.
"""
print("'Class' Value Counts: "+" \n", dataset['Class'].value_counts())
print("\n Visualisation plot: "+" \n", dataset['Class'].value_counts().plot(x = dataset['Class'], kind='bar'))
def all_attributes_visual(dataset, one, two, three):
""" Visual presentation of all attributes in the dataset.
This module shows all attributes divided into 3 parts
for better visualization.
Parameters:
dataset,
one: selected attributes for part one
two: selected attributes for part two
three: selected attributes for part three
"""
print("Part one:"+"\n")
df1 = dataset[one]
sns.pairplot(df1, kind="scatter", hue="Class", plot_kws=dict(s=80, edgecolor="white", linewidth=2.5))
plt.show()
print("\n")
print("Part two:"+"\n")
df2 = dataset[two]
sns.pairplot(df2, kind="scatter", hue="Class", plot_kws=dict(s=80, edgecolor="white", linewidth=2.5))
plt.show()
print("\n")
print("Part three:"+"\n")
df3 = dataset[three]
sns.pairplot(df3, kind="scatter", hue="Class", plot_kws=dict(s=80, edgecolor="white", linewidth=2.5))
plt.show()
def corr_plot_list(dataset):
""" This module presents correlation plot and list of each attribute"""
print("'Correlation list of each attribute: ")
corr = dataset.corr()
corr_abs = corr.abs()
num_cols = len(dataset)
num_corr = corr_abs.nlargest(num_cols, 'Class')['Class']
print(num_corr)
print("\n")
print("'Correlation plot of each attribute: "+"\n", dataset.corr()['Class'].sort_values().plot(kind='bar', figsize=(18, 6)))
| [
"noreply@github.com"
] | noreply@github.com |
085844914eefe2ebb6fb48b6e56ff09e865d3a4a | 50b270528c1e267e5bac6bcd023b45df8393bea3 | /django/project/forum/views.py | 6e684fa1009191d460fcd4759d94fe2a76866265 | [] | no_license | KarolPawlukowicz/Django-Mailowa-grupa-dyskusyjna | 87e0da9f982abcc64416ac15d2d0102f3e1319be | 3925828c8aa6953cac101808bd68b018e339fa56 | refs/heads/master | 2020-12-23T23:15:43.860372 | 2020-01-30T21:10:24 | 2020-01-30T21:10:24 | 237,304,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | import poplib
from email.parser import Parser
from django.shortcuts import render
from .models import Group
from django.views.generic import ListView, DetailView, CreateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
def groups(request):
context = {
'groups': Group.objects.all()
}
return render(request, 'forum/groups.html', context)
class GroupListView(ListView):
model = Group
template_name = 'forum/groups.html'
context_object_name = 'groups'
class GroupDeleteView(LoginRequiredMixin, DeleteView):
model = Group
success_url = '/groups'
class GroupDetailView(DetailView):
model = Group
def get_context_data(self, **kwargs):
context = super(GroupDetailView, self).get_context_data(**kwargs)
SERVER = 'pop3.poczta.onet.pl'
username = 'email'
password = 'Haslo'
pop3server = poplib.POP3(SERVER)
# print(pop3server.getwelcome())
pop3server.user(username)
pop3server.pass_(password)
messages = [pop3server.retr(i) for i in range(1, len(pop3server.list()[1]) + 1)]
messages = [b"\n".join(m[1]) for m in messages]
messages = [Parser().parsestr(m.decode()) for m in messages]
froms = []
subjects = []
contents = []
for message in messages:
email_from = message.get('From')
email_subject = message.get('Subject')
# print('From ' + email_from)
# print('To ' + email_to)
# print('Subject ' + email_subject)
for part in message.walk():
if part.get_content_type() == 'text/plain':
body = part.get_payload(decode=True)
print('tresc: ')
print(str(body))
contents.append(body)
froms.append(email_from)
subjects.append(email_subject)
mylist = zip(froms, subjects, contents)
context['myList'] = mylist
# context['From'] = froms
# context['Subject'] = subjects
# context['Messages'] = contents
return context
class GroupCreateView(LoginRequiredMixin, CreateView):
model = Group
fields = ['group_name', 'group_code']
def home(request):
return render(request, 'forum/home.html', {'title': 'Home'})
| [
"karpawlukowicz@gmail.com"
] | karpawlukowicz@gmail.com |
ab7b5850c8a4219d95bc177a69194213af9cd6ad | 16ba9862e6b85d0fab61ad1b26800b2eff4c4cd4 | /middleware/tests/test_config.py | 2038a27e1ead031fa8bedeb2a66994fde54823d1 | [] | no_license | alaxa27/Way-Connect_Box | 724d51926b7c0a5bc32277684de2a274323ba5b8 | c26572f47541fe56ee81c8bf7169b18eda426ebb | refs/heads/master | 2022-12-08T06:08:24.234111 | 2019-01-17T15:42:11 | 2019-01-17T15:42:11 | 138,509,499 | 0 | 0 | null | 2022-12-08T02:14:25 | 2018-06-24T19:25:58 | C | UTF-8 | Python | false | false | 4,537 | py | from unittest import TestCase
from unittest.mock import patch, call
import config
class TestConfig(TestCase):
def test_write_config(self):
pass
@patch('subprocess.call')
def test_reload_daemons(self, callMock):
config.reload_daemons()
callMock.assert_called_with('/bin/systemctl daemon-reload', shell=True)
@patch('config.restart_services')
def test_restart_updated_services(self, restartMock):
"""Only updated config's services should be restarted."""
old = {
'A': 0,
'B': 3
}
new = {
'B': 3,
'A': 1
}
services = {
'A': 'serviceA',
'B': 'serviceB'
}
config.restart_updated_services(new, old, services)
restartMock.assert_called_once_with('serviceA')
@patch('config.restart_services')
def test_restart_new_config_service(self, restartMock):
"""New config key's service should be restarted."""
old = {
'A': 0
}
new = {
'A': 0,
'B': 1
}
services = {
'A': 'serviceA',
'B': 'serviceB'
}
config.restart_updated_services(new, old, services)
restartMock.assert_called_once_with('serviceB')
@patch('config.restart_services')
def test_restart_config_service_not_in_info(self, restartMock):
"""If a service needs to be restarted but is not present in the infos\
nothing should happen."""
old = {
'A': 0
}
new = {
'A': 1,
'B': 3
}
services = {
}
try:
config.restart_updated_services(new, old, services)
except config.RestartUpdatedServicesError:
self.fail('It should not raise RestartUpdateServicesError')
restartMock.assert_not_called()
def test_restart_remote_key_not_exist(self):
"""If a service is present in the configInfo but not on the remote, it\
should raise a RestartUpdateServicesError"""
old = {}
new = {}
services = {
'A': 'serviceA'
}
with self.assertRaises(config.RestartUpdatedServicesError):
config.restart_updated_services(new, old, services)
@patch('subprocess.call')
@patch('utils.reboot')
def test_restart_services_monit_reboot(self, rebootMock, subCallMock):
"""If monit then reboot needs to be reload, monit reload and systemctl
restart monit then reboot"""
config.restart_services(['monit', 'reboot'])
subCallMock.assert_has_calls([
call(['monit', 'reload']),
call(['systemctl', 'restart', 'monit'])
])
self.assertEqual(rebootMock.call_count, 1)
def test_get_config_files(self):
"""Should retrieve the right paths associated with the right key."""
remoteConfig = {
'A': 1,
'FILES_A': 'a;b;c',
'SERVICES_A': 'd;e;f',
'FILES_B': 'r;t;y'
}
configFiles = config.get_list_from_config(remoteConfig, 'FILES')
expectedResult = {
'A': ['a', 'b', 'c'],
'B': ['r', 't', 'y']
}
self.assertEqual(configFiles, expectedResult)
def test_get_config_services(self):
"""Should retrieve the right services associated with the right key."""
remoteConfig = {
'A': 1,
'FILES_A': 'a;b;c',
'SERVICES_A': 'd;e;f',
'FILES_B': 'r;t;y',
'SERVICES_C': 'p;o;g'
}
configFiles = config.get_list_from_config(remoteConfig, 'SERVICES')
expectedResult = {
'A': ['d', 'e', 'f'],
'C': ['p', 'o', 'g']
}
self.assertEqual(configFiles, expectedResult)
def test_replace_occurence(self):
"""Should replace vars in config value. no matter what is the key."""
remoteConfig = {
'A': 'RTY',
'B': 'AZE',
'C': 'tres'
}
string1 = 'ZERTYGOIJFEPKA'
expectedResult1 = string1
newString1 = config.replace_occurence(string1, remoteConfig)
string2 = 'YIUBOWC_BIHU_FNEKWC_POZ'
expectedResult2 = 'YIUBOAZEIHU_FNEKWC_POZ'
newString2 = config.replace_occurence(string2, remoteConfig)
self.assertEqual(newString1, expectedResult1)
self.assertEqual(newString2, expectedResult2)
| [
"alaxa27@gmail.com"
] | alaxa27@gmail.com |
63c7c30de11e644228be90bc3edc923154acc744 | 6970714f3d7c6a18cb8e9da90f365ce59afdbb3d | /Data Structures/htmlchecker.py | 83673df5f9cd4821b927635849cb8309e17243ea | [] | no_license | christopherhui/Data-Structures-and-Algorithms | d8e30e59d7cd8cb47f9ed888351656ca1f48abac | ac01e938addcb57faca2166b326fba5e4eac9acb | refs/heads/master | 2020-03-25T22:56:25.301930 | 2018-08-10T07:33:38 | 2018-08-10T07:33:38 | 144,251,637 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | from pythonds.basic.stack import Stack
def balancedHTMLTags(document):
tagStack = Stack() # Stack for "<" and ">"
wordStack = Stack() # Stack for retaining words within tags
for i in range(0, len(document)):
if document[i] == '<':
close = False
start = i + 1
tagStack.push(document[i])
if document[i + 1] == '/':
close = True # for closing Tags
elif document[i] == '>' and not tagStack.isEmpty():
end = i - 1
tagStack.pop()
if close == False:
wordStack.push(document[start:end])
elif close == True and not wordStack.isEmpty():
if wordStack.peek() == document[start + 1:end]:
wordStack.pop()
else:
return False
else:
return False
if tagStack.isEmpty() and wordStack.isEmpty():
return True
else:
return False
print(balancedHTMLTags("<div>hi</div>"))
print(balancedHTMLTags("<html> <head> <title>Example</title> </head> <body> <h1>Hello, world</h1> </body> </html>"))
print(balancedHTMLTags("</div>test-for-false<div>"))
print(balancedHTMLTags("<h1> uhhhh </div> <div>wait wut</h1>"))
print(balancedHTMLTags("<h1>ok <div> </div> </h1>"))
| [
"christophergkhui@gmail.com"
] | christophergkhui@gmail.com |
8aa0eb7e2f218416241e350136d8c8c47ec3bd63 | fcb5e9c148c1e2f67bd79bd2aeb11cd152deff88 | /main.py | b193f9bf20705a8ed599769bd222fd4533333c9e | [] | no_license | Firas-Ben-Hassan/Graph_Embeddings | 2612932cf0084f5a2c80add26e11e735987f2962 | 4ff959a4b6d57966632c0ba946250200d6d07fea | refs/heads/master | 2021-06-16T23:32:02.845551 | 2017-05-31T11:50:39 | 2017-05-31T11:50:39 | 92,940,738 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,924 | py | import networkx as nx
from gensim.models import Word2Vec
import node2vec
import numpy.random as npr
def discard_edges(nx_graph, p):
if p == 0:
return nx_graph
else:
ebunch = nx_graph.edges()
npr.shuffle(ebunch)
for i in range(0, len(ebunch), p):
nx_graph.remove_edges_from([ebunch[i]])
return nx_graph
def read_graph():
edgelist_file = input("Enter graph edgelist filename: ")
is_unweighted = input("Unweighted graph (Y/N): ")
is_undirected = input("Undirected graph (Y/N): ")
if is_unweighted == "Y":
nx_graph = nx.read_edgelist(edgelist_file, create_using=nx.DiGraph())
for edge in nx_graph.edges():
nx_graph[edge[0]][edge[1]]['weight'] = 1
else:
nx_graph = nx.read_edgelist(edgelist_file, data=(('weight', float), ), create_using=nx.DiGraph())
if is_undirected == "Y":
nx_graph = nx_graph.to_undirected()
# P = int(input("Enter fraction of edges to discard (0-9): "))
# return discard_edges(nx_graph, P)
return nx_graph
def learn_node_features(walks, dim, window, epoch, output):
emb_walks = [[str(n) for n in walk] for walk in walks]
node_model = Word2Vec(emb_walks, size=dim, window=window, min_count=0, sg=1, workers=4, iter=epoch)
node_model.wv.save_word2vec_format(output)
def learn_node_features_2(walks, dim, window, epoch):
emb_walks = [[str(n) for n in walk] for walk in walks]
node_model = Word2Vec(emb_walks, size=dim, window=window, min_count=0, sg=1, workers=4, iter=epoch)
return node_model
def save_node_features(nm1, nm2, nodes, dim, output):
with open(output, 'w') as out:
fv = [str(len(nodes)) + " " + str(dim) + "\n"]
for n in nodes:
nr = [n] + list(nm1[str(n)]) + list(nm2[str(n)])
fv.append(" ".join([str(r) for r in nr]) + "\n")
out.writelines(fv)
if __name__ == '__main__':
nx_graph = read_graph()
print("Select Algorithm to train")
print("1) Node2vec")
print("2) DeepWalk")
print("3) LINE")
select = input("Enter option: ")
if select == "1":
print("Based on previous experiments the best in-out and return hyperparameters are {0.25, 0.50, 1, 2, 4}")
P = float(input("Enter in-out parameter: "))
Q = float(input("Enter return parameter: "))
graph = node2vec.Graph(nx_graph, is_directed=nx.is_directed(nx_graph), p=P, q=Q)
graph.preprocess_transition_probs()
num_walks = int(input("Enter no. of walks to sample for each node: "))
walk_length = int(input("Enter length of each walk: "))
walks = graph.simulate_walks(num_walks=num_walks, walk_length=walk_length)
D = int(input("Enter dimensionality of the feature vectors: "))
W = int(input("Enter window size: "))
epoch = int(input("Enter number of iterations: "))
output = input("Enter output file: ")
learn_node_features(walks=walks, dim=D, window=W, epoch=epoch, output=output)
elif select == "2":
P = 1
Q = 1
graph = node2vec.Graph(nx_graph, is_directed=nx.is_directed(nx_graph), p=P, q=Q)
graph.preprocess_transition_probs()
num_walks = int(input("Enter no. of walks to sample for each node: "))
walk_length = int(input("Enter length of each walk: "))
walks = graph.simulate_walks(num_walks=num_walks, walk_length=walk_length)
D = int(input("Enter dimensionality of the feature vectors: "))
W = int(input("Enter window size: "))
epoch = int(input("Enter number of iterations: "))
output = input("Enter output file: ")
learn_node_features(walks=walks, dim=D, window=W, epoch=epoch, output=output)
elif select == "3":
num_walks = int(input("Enter no. of walks to sample for each node: "))
walk_length = int(input("Enter length of each walk: "))
D = int(input("Enter dimensionality of the feature vectors: "))
W = int(input("Enter window size: "))
epoch = int(input("Enter number of iterations: "))
output = input("Enter output file: ")
P = 0.001
Q = 1
graph = node2vec.Graph(nx_graph, is_directed=nx.is_directed(nx_graph), p=P, q=Q)
graph.preprocess_transition_probs()
walks = graph.simulate_walks(num_walks=num_walks, walk_length=walk_length)
node_model1 = learn_node_features_2(walks=walks, dim=D/2, window=W, epoch=epoch)
P = 1
Q = 0.001
graph = node2vec.Graph(nx_graph, is_directed=nx.is_directed(nx_graph), p=P, q=Q)
graph.preprocess_transition_probs()
walks = graph.simulate_walks(num_walks=num_walks, walk_length=walk_length)
node_model2 = learn_node_features_2(walks=walks, dim=D/2, window=W, epoch=epoch)
save_node_features(nm1=node_model1, nm2=node_model2, nodes=nx.nodes(nx_graph), dim=D, output=output) | [
"firas.benhassan@supcom.tn"
] | firas.benhassan@supcom.tn |
be2dbb2850c37601b90706e3d19bfac3c370606c | e0cc36b60f2ac3e4d8fa8184f67b8586d54a2941 | /venv/Scripts/pip3.5-script.py | 3fa035374c30913d6fef7bc2a7ab2e4400f85483 | [] | no_license | 112517249/test_requests_unittest | a5a0632e81800902ddbe817a6f02d70912301c8e | e059515b556e32cb637e620c72d70ba22681434a | refs/heads/master | 2022-10-12T03:04:35.261536 | 2020-06-09T08:24:16 | 2020-06-09T08:24:16 | 270,944,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #!E:\PycharmProjects\test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.5'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.5')()
)
| [
"zz@email.com"
] | zz@email.com |
2fc48de98fbc2450366953e3be1285d20c36401a | ac8ffabf4d7339c5466e53dafc3f7e87697f08eb | /python_solutions/1080.insufficient-nodes-in-root-to-leaf-paths.py | 4ba1ede95bb6688d9b4c3e860ddfe8e1d3dd646d | [] | no_license | h4hany/leetcode | 4cbf23ea7c5b5ecfd26aef61bfc109741f881591 | 9e4f6f1a2830bd9aab1bba374c98f0464825d435 | refs/heads/master | 2023-01-09T17:39:06.212421 | 2020-11-12T07:26:39 | 2020-11-12T07:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
from typing import List
import itertools
import math
import heapq
import string
true = True
false = False
MIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007
#
# @lc app=leetcode id=1080 lang=python3
#
# [1080] Insufficient Nodes in Root to Leaf Paths
#
# https://leetcode.com/problems/insufficient-nodes-in-root-to-leaf-paths/description/
#
# algorithms
# Medium (49.43%)
# Total Accepted: 14.4K
# Total Submissions: 29K
# Testcase Example: '[1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14]\n1'
#
# Given the root of a binary tree, consider all root to leaf paths: paths from
# the root to any leaf. (A leaf is a node with no children.)
#
# A node is insufficient if every such root to leaf path intersecting this node
# has sum strictly less than limit.
#
# Delete all insufficient nodes simultaneously, and return the root of the
# resulting binary tree.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14], limit = 1
#
# Output: [1,2,3,4,null,null,7,8,9,null,14]
#
#
#
# Example 2:
#
#
#
# Input: root = [5,4,8,11,null,17,4,7,1,null,null,5,3], limit = 22
#
# Output: [5,4,8,11,null,17,4,7,null,null,null,5]
#
#
#
# Example 3:
#
#
#
# Input: root = [1,2,-3,-5,null,4,null], limit = -1
#
# Output: [1,null,-3,4]
#
#
#
#
# Note:
#
#
# The given tree will have between 1 and 5000 nodes.
# -10^5 <= node.val <= 10^5
# -10^9 <= limit <= 10^9
#
#
#
#
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sufficientSubset(self, root: TreeNode, limit: int) -> TreeNode:
if not root: return None
if not root.left and not root.right:
return root if root.val >= limit else None
root.left = self.sufficientSubset(root.left, limit - root.val)
root.right = self.sufficientSubset(root.right, limit - root.val)
return None if not root.left and not root.right else root
| [
"ssruoz@gmail.com"
] | ssruoz@gmail.com |
2a38f9bdee2aedb06b848420931f4339af7a1815 | b1b74e9217449548bb3d40cf1da7ceab335c3ef8 | /qubits/softBakeTest_041119/resistance_analysis.py | 4671defc1cf4ed72c793f00d0e7ef1febe817f9e | [] | no_license | jmonroe2/dataProcessing | a5cd6566fc0ac88f3f464112a65163ae5841200a | 37dda1884567ceecabb9d06cfc6c75812658ba12 | refs/heads/master | 2021-06-24T14:00:17.383050 | 2020-10-24T15:27:39 | 2020-10-24T15:27:39 | 153,939,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
This script exists to analyze probing data of Softbake test 04/11/19
"""
import sys, time, os
import numpy as np
import matplotlib.pyplot as plt
def main():
data_file = os.getcwd() + "/data.txt"
min_res = 100
max_res = 15E3
## open file
with open(data_file) as open_file:
read_lines = open_file.readlines()
## parse lines with resistance
res_list = []
for line in read_lines:
if "Ohms" not in line: continue
num = float(line[:len("Ohms")+1])
if min_res < num < max_res:
res_list.append(num)
if num > 1000: print(num)
##END loop through
## do stats
mean = np.round(np.mean(res_list),-1)
std = np.round(np.std(res_list), 1)
print(mean, std)
## make figure
#bins = np.linspace(mean-2*std,mean+2*std, 15)
bins = np.linspace(0,1000,20)
plt.hist(res_list,bins=bins)
plt.xlabel("Resistance [$\Omega$]")
plt.ylabel("Count")
plt.show()
##END main
if __name__ == '__main__':
main()
| [
"jonathan0monroe@gmail.com"
] | jonathan0monroe@gmail.com |
224d95222155e0b7f599b3d1ee3f2fe0b45333e5 | 135c112b57ae964bd8311cfd4e770850aec66cd4 | /plot_analyzed_data.py | df274b5012deeda1ad30e6f79466d4444a40dbd9 | [] | no_license | KDilch/P2_LAB | 016ccebd7659505f04045c80760e9ccbe0d1290b | fea2d44e441ec3a0675e1c90625326b04fee1fa9 | refs/heads/master | 2020-06-02T05:12:58.683783 | 2019-09-25T21:52:21 | 2019-09-25T21:52:21 | 191,049,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,811 | py | import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import os
import scipy.optimize as opt
(voltages, reduced_E_fields, t12s, stdDevT12s, t13s, stdDevT13s, t23s, stdDevT23s, v12s, stdDevV12s, v13s, stdDevV13s, v23s, stdDevV23s, v_av, std_v_av, diff_12, err_diff12, diff23, err_diff23, diff13, err_diff13) = np.loadtxt("results.tsv", delimiter="\t", unpack=True)
palette = plt.get_cmap('Set1')
#================================Curve fits=============================================================================
def time_func(x, t0, a):
return t0 + a/x
def vel_func(x, a, b, c):
return a + b*x + c*x**2
optimizedParameters_t12, pcov_t12 = opt.curve_fit(time_func, reduced_E_fields, t12s, sigma=stdDevT12s, p0=[3, 30])
print(f"optimizedParameters_t12: t0:'{optimizedParameters_t12[0]}', a:'{optimizedParameters_t12[1]}', \npcov: {pcov_t12}")
optimizedParameters_t13, pcov_t13 = opt.curve_fit(time_func, reduced_E_fields, t13s, sigma=stdDevT13s, p0=[4, 60])
print(f"optimizedParameters_t13: t0:'{optimizedParameters_t13[0]}', a:'{optimizedParameters_t13[1]}', \npcov: {pcov_t13}")
optimizedParameters_t23, pcov_t23 = opt.curve_fit(time_func, reduced_E_fields, t23s, sigma=stdDevT23s, p0=[2, 30])
print(f"optimizedParameters_t23: t0:'{optimizedParameters_t23[0]}', a:'{optimizedParameters_t23[1]}', \npcov: {pcov_t23}")
data_time = np.array([['t_12', optimizedParameters_t12[0], np.sqrt(pcov_t12[0][0]), optimizedParameters_t12[1], np.sqrt(pcov_t12[1][1])],
['t_13', optimizedParameters_t13[0], np.sqrt(pcov_t13[0][0]), optimizedParameters_t13[1],
np.sqrt(pcov_t13[1][1])],
['t_23', optimizedParameters_t23[0], np.sqrt(pcov_t23[0][0]), optimizedParameters_t23[1],
np.sqrt(pcov_t23[1][1])]])
optimizedParameters_v12, pcov_v12 = opt.curve_fit(vel_func, reduced_E_fields, v12s, sigma=stdDevV12s, p0=[2.5, 30, 0.005])
print(f"optimizedParameters_v12: a:'{optimizedParameters_v12[0]}', b:'{optimizedParameters_v12[1]}', c:'{optimizedParameters_v12[2]}', \npcov: {pcov_v12}")
optimizedParameters_v13, pcov_v13 = opt.curve_fit(vel_func, reduced_E_fields, v13s, sigma=stdDevV13s, p0=[2.5, 30, 0.005])
print(f"optimizedParameters_v13: a:'{optimizedParameters_v13[0]}', b:'{optimizedParameters_v13[1]}', c:'{optimizedParameters_v13[2]}', \npcov: {pcov_v13}")
optimizedParameters_v23, pcov_v23 = opt.curve_fit(vel_func, reduced_E_fields, v23s, sigma=stdDevV23s, p0=[2.5, 30, 0.005])
print(f"optimizedParameters_v23: a:'{optimizedParameters_v23[0]}', b:'{optimizedParameters_v23[1]}', c:'{optimizedParameters_v23[2]}', \npcov: {pcov_v23}")
data_velocity = np.array([['v_12', optimizedParameters_v12[0], np.sqrt(pcov_v12[0][0]), optimizedParameters_v12[1], np.sqrt(pcov_v12[1][1]), optimizedParameters_v12[2], np.sqrt(pcov_v12[2][2])],
['v_13', optimizedParameters_v13[0], np.sqrt(pcov_v13[0][0]), optimizedParameters_v13[1],
np.sqrt(pcov_v13[1][1]), optimizedParameters_v13[2], np.sqrt(pcov_v13[2][2])],
['v_23', optimizedParameters_v23[0], np.sqrt(pcov_v23[0][0]), optimizedParameters_v23[1],
np.sqrt(pcov_v23[1][1]), optimizedParameters_v23[2], np.sqrt(pcov_v23[2][2])]])
# ================================Plots=================================================================================
plt.plot(reduced_E_fields, time_func(reduced_E_fields, *optimizedParameters_t12), color=palette(4), label="fit T_12")
plt.errorbar(reduced_E_fields, t12s, stdDevT12s, color=palette(4), fmt='.', label="T_12")
plt.plot(reduced_E_fields, time_func(reduced_E_fields, *optimizedParameters_t13), color=palette(1), label="fit T_13")
plt.errorbar(reduced_E_fields, t13s, stdDevT13s, color=palette(1), fmt='.', label="T_13")
plt.plot(reduced_E_fields, time_func(reduced_E_fields, *optimizedParameters_t23), color=palette(2), label="fit T_23")
plt.errorbar(reduced_E_fields, t23s, stdDevT23s, color=palette(2), fmt='.', label="T_23")
plt.legend()
plt.xlabel("E/p (V/Pa)")
plt.ylabel("Time (µs)")
plt.grid(True)
plt.savefig("results_plot_time_after_fixes.png")
plt.clf()
plt.errorbar(reduced_E_fields, v12s, stdDevT12s, color=palette(4), fmt='.', label="V_12")
plt.plot(reduced_E_fields, vel_func(reduced_E_fields, *optimizedParameters_v12), color=palette(4), label="fit V_12")
plt.errorbar(reduced_E_fields, v13s, stdDevV13s, color=palette(1), fmt='.', label="V_13")
plt.plot(reduced_E_fields, vel_func(reduced_E_fields, *optimizedParameters_v13), color=palette(1), label="fit V_13")
plt.errorbar(reduced_E_fields, v23s, stdDevV23s, color=palette(2), fmt='.', label="V_23")
plt.plot(reduced_E_fields, vel_func(reduced_E_fields, *optimizedParameters_v23), color=palette(2), label="fit V_23")
plt.legend()
plt.xlabel("E/p (V/Pa)")
plt.ylabel("Velocity (cm/µs)")
plt.grid(True)
plt.savefig("results_plot_vel_after_fixes.png")
plt.clf()
plt.errorbar(reduced_E_fields, diff13, err_diff13, color=palette(4), fmt='.', label="D_13")
plt.errorbar(reduced_E_fields, diff_12, err_diff12, color=palette(1), fmt='.', label="D_12")
plt.errorbar(reduced_E_fields, diff23, err_diff23, color=palette(2), fmt='.', label="D_23")
plt.legend()
plt.xlabel("E/p (V/Pa)")
plt.ylabel("D (mm^2/µs)")
plt.grid(True)
plt.savefig("diffusion.png")
plt.clf()
f = open(os.path.join(f"_tex_time_fit.tex"), 'w+')
f.write(tabulate(data_time,
headers=["Przypadek", "t0 (us)", "\Delta t_0 (us)", "a", "\Delta a"],
tablefmt='latex'))
f.close()
f1 = open(os.path.join(f"_tex_vel_fit.tex"), 'w+')
f1.write(tabulate(data_velocity,
headers=["Przypadek", "a (us)", "Delta a (us)", "b", "Delta b", "c", "Delta c"],
tablefmt='latex'))
f1.close() | [
"klaudia.dilcher@gmail.com"
] | klaudia.dilcher@gmail.com |
d2362fa08761676b441e809f4b9a29568e0a9bdd | 9c97b3e4af65daae6dd584d3c47750e56325af68 | /Character.py | 60c6f93990d5c5e9d597b39bcc54dd660871e3f1 | [] | no_license | tongsumate/MongoDB | a5f22002c30886b90d51a618bd1df1e3bf1b9a0e | 575fdac568cc0aabef11c34546c928fe17636702 | refs/heads/main | 2023-03-09T12:22:12.222005 | 2021-02-23T05:58:42 | 2021-02-23T05:58:42 | 340,862,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,013 | py | import pymongo
from bson import json_util
from flask import Flask,jsonify,render_template,request
from flask_pymongo import PyMongo
app = Flask(__name__)
client = pymongo.MongoClient("mongodb://admin:AAGzez02811@10.100.2.126:27017")
#client = pymongo.MongoClient("mongodb://admin:AAGzez02811@node9149-advweb-11.app.ruk-com.cloud:11154")
db = client["MMORPG"]
####### index ###############
@app.route("/")
def index():
texts = "Hello World , Welcome to MongoDB"
return texts
########## GET ALL #################
@app.route("/Character", methods=['GET'])
def get_allcharacter():
char = db.Character
weapon = db.Weapon
output = []
output.append("Character : ")
for x in char.find():
output.append({'_id' : x['_id'],'name' : x['name'],
'level' : x['level'],
'class' : x['class'],
'guild' : x['guild'],
'server' : x['server']})
output.append("Weapon : ")
for y in weapon.find():
output.append({'char_id' : y['char_id'],'weapon_name' : y['weapon_name'],
'weapon_type' : y['weapon_type'],
'weapon_amount' : y['weapon_amount']})
return jsonify(output)
############## JOIN COLLECTION ###############
@app.route("/Inventory", methods=['GET'])
def get_inventory():
char = db.Character
#weapon = db.Weapon
output = char.aggregate([
{
'$lookup':
{
'from': "Weapon",
'localField': '_id',
'foreignField': 'char_id',
'as': "Weapon"
}
}
])
return json_util.dumps(output)
############## JOIN name,nameweapon ###############
@app.route("/InventoryChar", methods=['GET'])
def get_inventoryjoin():
char = db.Character
#weapon = db.Weapon
output = char.aggregate([
{
'$lookup':
{
'from': "Weapon",
'localField': '_id',
'foreignField': 'char_id',
'as': "Weapon"
}
},
{'$unwind':'$Weapon'},
{
'$project': {'name':1,
'weapon_name':'$Weapon.weapon_name'}
},
])
return json_util.dumps(output)
############## GET ONE ############################
@app.route("/Character/<name>", methods=['GET'])
def get_onecharacter(name):
char = db.Character
x = char.find_one({'name' : name})
if x:
output = {'name' : x['name'],'level' : x['level'],
'class' : x['class'],
'guild' : x['guild'],
'server' : x['server']}
else:
output = "No such name"
return jsonify(output)
######################### INSERT ####################
@app.route('/Character', methods=['POST'])
def add_character():
char = db.Character
name = request.json['name']
level = request.json['level']
classes = request.json['class']
guild = request.json['guild']
server = request.json['server']
char_id = char.insert({'name': name, 'level': level,
'class': classes,
'guild': guild,
'server': server})
new_char = char.find_one({'_id': char_id })
output = {'name' : new_char['name'], 'level' : new_char['level'],
'class' : new_char['class'],
'guild' : new_char['guild'],
'server' : new_char['server'],}
return jsonify(output)
##################### UPDATE ########################
@app.route('/Character/<name>', methods=['PUT'])
def update_character(name):
char = db.Character
x = char.find_one({'name' : name})
if x:
myquery = {'name' : x['name'],'level' : x['level'],
'class' : x['class'],
'guild' : x['guild'],
'server' : x['server']}
name = request.json['name']
level = request.json['level']
classes = request.json['class']
guild = request.json['guild']
server = request.json['server']
newvalues = {"$set" : {'name' : name, 'level' : level,
'class' : classes,
'guild' : guild,
'server' : server,}}
char_id = char.update_one(myquery, newvalues)
output = {'name' : name, 'level' : level,
'class' : classes,
'guild' : guild,
'server' : server}
return jsonify(output)
##################### DELETE ############################
@app.route('/Character/<name>', methods=['DELETE'])
def delete_character(name):
char = db.Character
x = char.find_one({'name' : name})
char_id = char.delete_one(x)
output = "Deleted complete"
return jsonify(output)
if __name__ == "__main__":
app.run(host='0.0.0.0',port = 80) | [
"sumet.srithong@gmail.com"
] | sumet.srithong@gmail.com |
75a63b080058ba26e1aa2ae9b422c95c519a403c | 3e93c3bbe35c24bf7f1a75c612ab300f37063621 | /C1/L1_18_mappingnameseq_namedtuple.py | f393d21fd2cf887c699056da4973e6a7725476db | [] | no_license | rengokantai/orpycok3ed | 5ac0195a48f02dcc5bbc720e812f637464215e8f | 50ce744265dc6af0d1a4724ea52348faeb47764d | refs/heads/master | 2021-01-10T05:05:53.477092 | 2016-03-12T20:04:45 | 2016-03-12T20:04:45 | 53,352,163 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | __author__ = 'Hernan Y.Ke'
from collections import namedtuple
# memorize this syntax
Me = namedtuple('Me',['first','last'])
me = Me(1,2)
print(me.first,me.last)
she=[Me(3,4),Me(5,6)]
#me = Me(first=1,last=2) # illegal!
me = me._replace(first=3)
print(me.first)
# get namedtuple
def get_num(tuplearr):
res=0
for param in tuplearr:
s = Me(*param) # iterate a array with namedtuple instance. param->All params of a instance
res+=s.first+s.last
return res
print(get_num(she))
#replace all params
def replace_params(tupleparams):
return me._replace(**tupleparams) # two stars. kwargs
newparams={'first':7,'last':8}
print(replace_params(newparams)) | [
"yuriqiao@gmail.com"
] | yuriqiao@gmail.com |
88a1fb94a80440856187d57d3df5d55a56f854f5 | 0dc8627205c1545b4a5d82d1b9e55bc64eedc0b8 | /transcripts/conf.py | 3f10dbffa1b2b69fda708f5fbb98846f344db5cd | [] | no_license | evildmp/DjangoConEuropeTranscripts | 508ee00c1c2bde803dd13aaac6171eb9fbcbb2db | d21e57780e1b4c497d8a700e5b99999bded9f303 | refs/heads/master | 2016-08-11T06:52:40.669453 | 2015-08-13T22:32:33 | 2015-08-13T22:32:33 | 36,973,906 | 12 | 9 | null | 2017-01-01T08:49:40 | 2015-06-06T09:01:29 | Python | UTF-8 | Python | false | false | 9,937 | py | # -*- coding: utf-8 -*-
#
# Speech-to-text reports from DjangoCon Europe 2015 build configuration file, created by
# sphinx-quickstart on Sat Jun 6 09:11:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Speech-to-text reports from DjangoCon Europe 2015'
copyright = u'2015, Hilary Maclean and Sheryll Holley'
author = u'Hilary Maclean and Sheryll Holley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Speech-to-textreportsfromDjangoConEurope20155'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015.tex', u'Speech-to-text reports from DjangoCon Europe 2015',
u'Hilary Maclean and Sheryll Holley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015', u'Speech-to-text reports from DjangoCon Europe 2015',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Speech-to-textreportsfromDjangoConEurope2015', u'Speech-to-text reports from DjangoCon Europe 2015',
author, 'Speech-to-textreportsfromDjangoConEurope2015', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"daniele@vurt.org"
] | daniele@vurt.org |
746d5b1978fde02f7eee9b5aa0e3c59284e76031 | 632027b71895bec28fbb126198fcdf659618b138 | /Lista_05/Lista_05_Ex_04.py | d54e4b4493c1f509ebbd0a19d8fe26138cfe4977 | [] | no_license | VTozo/Raciocinio-Algoritmico | 40f6d1ff536bc65e7b5473175f9dd28c3329554c | 65157087fd939b82d2d88bbc5fa85e783e6fab4a | refs/heads/master | 2020-03-13T09:38:51.877256 | 2018-04-25T22:21:03 | 2018-04-25T22:21:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py |
print("1- Dólar 2- Libra- 3 Euro")
moeda = int(input("Qual moeda você quer? "))
if moeda < 1 or 3 < moeda:
print("Valor incorreto!")
exit()
montante = float(input("Digite o valor desejado: "))
if montante < 0:
print("Valor incorreto!")
exit()
if moeda == 1:
preco = montante * 3.30152861
elif moeda == 2:
preco = montante * 4.65882003
elif moeda == 3:
preco = montante * 4.06302618
if montante < 1000:
preco *= 1.05
else:
preco *= 1.03
print("O preço final é: R$ %.2f" %preco)
| [
"noreply@github.com"
] | noreply@github.com |
68a929c1b07064d9de55642bbf4f31a2ea0f4e29 | 3469179384888faba953972c5ec507a59e4d3da3 | /NN.py | 48e4f1e70cadb290cefbfb971bd4181eee7346f9 | [] | no_license | pwalaw2/Neural_Network | 299482b48c02e6f4e5f5ae65cab90717a065f7a8 | a895cb8f07e221c0cee588490f5824be60141cd4 | refs/heads/master | 2021-04-15T03:30:54.279578 | 2018-03-22T02:37:33 | 2018-03-22T02:37:33 | 126,264,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,594 | py | # @Name: Pratik Walawalkar
# @UIN: 667624808
# importing libraries: random and matplotlib
# random: for generating random numbers, matplotlib: for plotting graphs
import random
import matplotlib.pyplot as plt
# generating optimal weights i.e. w randomly between mentioned range
w0 = random.uniform(-0.25,0.25)
w1 = random.uniform(-1,1)
w2 = random.uniform(-1,1)
w = []
w.append(w0)
w.append(w1)
w.append(w2)
# generating w' randomly between mentioned range for carrying out PTA
wi0 = random.uniform(-1,1)
wi1 = random.uniform(-1,1)
wi2 = random.uniform(-1,1)
wi = []
wi.append(wi0)
wi.append(wi1)
wi.append(wi2)
# function for perceptron training algorithm
def PTA(experiment):
print("Optimal weights before PTA i.e. w: ",w)
print("Updated weights for carrying out PTA i.e. w': ",wi)
T = []
s = []
s0 = []
s1 = []
d = []
# picking S= x1,...,xn vectors indepedently and uniformly at random in [-1,1]
# creating [1 x1 x2] list corresponding to S collection
for p in range(experiment):
T.append([])
T[p].append(1)
T[p].append(random.uniform(-1,1))
T[p].append(random.uniform(-1,1))
# matrix multiplication - [1 x1 x2][w0 w1 w2]T with step activation function as u(.)
for i in range(experiment):
sum= (w[0]*T[i][0])+(w[1]*T[i][1])+(w[2]*T[i][2])
T[i].pop(0)
s.append(T[i])
if sum<0:
d.append(0) # collection of desired outputs = 0
s0.append(T[i]) # collection of S0 vectors where S0 is subset of S
else:
d.append(1) # collection of desired outputs = 1
s1.append(T[i]) # collection of S1 vectors where S1 is subset of S
# d is collection of all desired outputs containing zeros and ones
# print("S: ",s,"\r\nTotal vectors in S: ",len(s))
# print("\r\nTotal vectors in S0: ",len(s0))
# print("\r\nTotal vectors in S1: ",len(s1))
# plotting S1 and S0 collection of (x1,x2) vectors
for x in s1:
x1 = x[0]
x2 = x[1]
plt.plot(x1,x2,'gs')
for x in s0:
x1 = x[0]
x2 = x[1]
plt.plot(x1,x2,'ro')
# plotting line: w0 + w1x1 + w2x2 = 0
m1 = (-w0-w2)/w1 # x-intercept for x1
m2 = (-w0+w2)/w1 # x-intercept for x2
xline = [m1,m2]
yline = [1,-1]
plt.title("Plot before PTA")
plt.axis([-1, 1, -1, 1])
plt.plot(xline,yline,'b-',lw=2)
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
# red circles denote collection of S0 vectors(<0), green squares denote collection of S1 vectors(>=0)
print("\r\nIndex-> Red = Class S0 ; Green = Class S1\r\n")
# creating Ti collection vectors of [1 x1 x2]
Ti = []
for e in range(experiment):
Ti.append([])
Ti[e] = [1]+s[e]
eta = [1,0.1,10] # training parameters (1, 0.1, 10)
for n in range(3):
misarray = []
epocharray = []
epoch = 0
flag = 0
sum = 0
wii = []
# creating local copy of w' and storing in new weight variable (used for updating after every misclassfication) i.e. wu
wu0 = wi0
wu1 = wi1
wu2 = wi2
while(flag==0):
mis = 0 # counter for misclassfications
# Perceptron Training Algorithm
for e in range(experiment):
# matrix multiplication of [1 x1 x2] and wu weights
sum = (wu0*Ti[e][0])+(wu1*Ti[e][1])+(wu2*Ti[e][2])
if sum<0:
cal = 0 # if above result is less than 0, calculated o/p = 0
else:
cal = 1 # if above result is greater than or equal to 0, calculated o/p = 1
if (cal != d[e]):
mis = mis + 1 # if calculated o/p not equal to desired output for S(x1,x2) vector, then increment misclassfication
# update weights as per PTA if there is misclassfication
wu0 = wu0 + (eta[n]*(Ti[e][0])*(d[e]-cal))
wu1 = wu1 + (eta[n]*(Ti[e][1])*(d[e]-cal))
wu2 = wu2 + (eta[n]*(Ti[e][2])*(d[e]-cal))
wii.append([])
wii[epoch].append(wu0)
wii[epoch].append(wu1)
wii[epoch].append(wu2)
epoch = epoch+1 #increment epoch number when all samples are fed in PTA
misarray.append(mis) # array for getting range for misclassfication for plotting graph
epocharray.append(epoch) # array for getting range for epoch for plotting graph
if mis==0:
flag = 1 # If misclassfication is zero that means our PTA has completed succesffully and comes out of while loop
else:
flag = 0
print("Weights after first epoch i.e. w'' : ",wii[0]) # w'' after first epoch
print("For eta = ",eta[n]," :")
print("Total number of epochs required for convergence: ",epoch) # Total number of epochs
print("Final weights: ",wii[epoch-1]) # Final weights where convergence is achieved
# plot for epoch v/s misclassfication for each eta and samples(100 and 1000)
plt.title("Epoch V/S Miss")
plt.axis([0,epoch+1,0,100])
plt.plot(epocharray,misarray,'o-')
plt.show()
return
print("------------Perceptron Training Algorithm with 100 samples-----------------\r\n")
# passing 100 samples into PTA function
PTA(100)
print("------------Perceptron Training Algorithm with 1000 samples-----------------\r\n")
# passing 1000 samples into PTA function
PTA(1000)
| [
"pwalaw2@uic.edu"
] | pwalaw2@uic.edu |
8d9a2d6979882bef832c416fb15445d8e897680d | 2de8dd72970042ca46277cdf09c3b293636e491e | /hw/hw01/hw01.py | f23b72f91e702c2881a23487e0ca6e501309b58e | [] | no_license | hironorinakauchi/cs61a | 3256d0885bbe28ed6a41097038d57255a3e1661c | 3238c663881f938d7d3dd79e5244522664998d21 | refs/heads/master | 2020-04-13T00:50:39.094451 | 2018-12-23T02:18:37 | 2018-12-23T02:18:37 | 162,855,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | def your_course_username():
"""Return your course username.
>>> username = your_course_username()
>>> username.startswith('cs61a-')
True
"""
return "cs61a-agf"
from operator import add, sub
def a_plus_abs_b(a, b):
"""Return a+abs(b), but without calling abs.
>>> a_plus_abs_b(2, 3)
5
>>> a_plus_abs_b(2, -3)
5
"""
if b < 0:
f = sub
else:
f = add
return f(a, b)
def two_of_three(a, b, c):
"""Return x*x + y*y, where x and y are the two largest members of the
positive numbers a, b, and c.
>>> two_of_three(1, 2, 3)
13
>>> two_of_three(5, 3, 1)
34
>>> two_of_three(10, 2, 8)
164
>>> two_of_three(5, 5, 5)
50
"""
return abs(a**2+b**2+c**2)-pow(min(a,b,c),2)
#return (max(a,b,c)**2) + (max(min(a,b), min(a,c), min(b,c))**2)
def largest_factor(n):
"""Return the largest factor of n*n-1 that is smaller than n.
>>> largest_factor(4) # n*n-1 is 15; factors are 1, 3, 5, 15
3
>>> largest_factor(9) # n*n-1 is 80; factors are 1, 2, 4, 5, 8, 10, ...
8
"""
x = n*n-1
for i in range(1, x+1):
if x % i == 0:
if i < n:
factor = i
return factor
def if_function(condition, true_result, false_result):
"""Return true_result if condition is a true value, and
false_result otherwise.
>>> if_function(True, 2, 3)
2
>>> if_function(False, 2, 3)
3
>>> if_function(3==2, 3+2, 3-2)
1
>>> if_function(3>2, 3+2, 3-2)
5
"""
if condition:
return true_result
else:
return false_result
def with_if_statement():
"""
>>> with_if_statement()
1
"""
if c():
return t()
else:
return f()
def with_if_function():
return if_function(c(), t(), f())
def c():
return 1 == 1
def t():
return 1
def f():
return 1 / 0
def hailstone(n):
"""Print the hailstone sequence starting at n and return its
length.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
step = 1
print(n)
while n != 1:
if n % 2 == 0:
n = n//2
step += 1
print(n)
elif n % 2 != 0:
n = (3*n)+1
step += 1
print(n)
return step
challenge_question_program = """
"ILOVECS = "ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34); ILOVEEECS += ILOVECS; print(repr("ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34); ILOVEEECS += ILOVECS; print(ILOVEEECS) + chr(34); print(ILOVECS)")) + chr(34); print(ILOVECS)";ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34); ILOVEEECS += ILOVECS; print(ILOVEEECS + chr(34));print(repr("ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34); ILOVEEECS += ILOVECS; print(ILOVEEECS) + chr(34); print(ILOVECS)"))
"
"""
# c = [(x) for x in [67,83,61,65] if x > 61];print(list(map(chr,c)))
# c = [(x) for x in [67,83,61,65] if x > 61];print(list(map(chr,c)));
# print(list(map(chr, c = [(x) for x in [67, 83, 61, 65] if x > 61])))
# doesnt work since map() doesnt take keyword arguments
# c = [(x) for x in [67, 83, 61, 65]] if (list(map(chr, c)))[0] == eval(repr(c))
# ILOVECS = "ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34)"
# ILOVEEECS = chr(73) + chr(76) + chr(79) + chr(86) + chr(69) + chr(67) + chr(83) + chr(32) + chr(61) + chr(32) + chr(34); ILOVEEECS += ILOVECS; print(ILOVEEECS)
# which returns | [
"hironorisama@gmail.com"
] | hironorisama@gmail.com |
789bbf9f99878e115df5bf5cf4714f48a107da13 | 3d08c317ea23fc5c10511badb2b518803db3a0ba | /spekulatio/som/node.py | 43fe20dfc3e97228411b3314d9ae7bb78d6f76bd | [
"MIT"
] | permissive | iwilltry42/spekulatio | c6491731952e84ced895525c60cd7aa5553400c4 | 42d678b7d7fcc13284902be5a08fb0407d96ec4d | refs/heads/master | 2021-07-11T05:39:59.683218 | 2020-11-23T20:14:54 | 2020-11-23T20:14:54 | 211,082,189 | 0 | 0 | MIT | 2019-09-26T12:21:36 | 2019-09-26T12:21:35 | null | UTF-8 | Python | false | false | 2,532 | py |
from collections import OrderedDict
class Node:
"""Base class of all the nodes in a som."""
def __init__(self, path, is_dir, title=None, data=None, toc=None, content=None):
self.path = path
self.is_dir = is_dir
self.local_data = OrderedDict()
self.global_data = OrderedDict()
self.data = OrderedDict()
# metadata
self._title = title
self.toc = toc or []
self.content = content
self.raw_data = data or {}
# relationships
self.parent = None
self.next = None
self.prev = None
self.root = None
self.children = []
@property
def skip(self):
return self.is_dir and not bool(self.children)
@property
def title(self):
return self.raw_data.get('title') or self._title
@property
def url(self):
# if set explicitly, return the value provided by the user
manually_set_url = self.data.get('_url')
if manually_set_url:
return manually_set_url
# calculate the relative destination path/url
relative_path = self.path.relative_to(self.root.path)
if self.is_dir:
html_path = relative_path / 'index.html'
else:
html_path = relative_path.with_suffix('.html')
relative_url = f"/{'/'.join(html_path.parts)}"
return relative_url
@property
def sections(self):
"""Return the second level entries in the toc."""
return self.toc[0]['children'] if self.toc else []
def iter_nodes(self):
"""Traverse tree from this node."""
for node in self.children:
yield node
if node.is_dir:
yield from node.iter_nodes()
def iter_dir_nodes(self):
"""Return only children nodes that are directories."""
for child in self.children:
if child.is_dir:
yield child
def add_child(self, node):
"""Set parent/child relationship between this and the passed node."""
self.children.append(node)
node.parent = self
def same_branch(self, node):
"""Return True if the passed node and this one belong to the same branch."""
ancestor = self
while ancestor:
if ancestor == node:
return True
ancestor = ancestor.parent
return False
def __str__(self):
return str(self.path)
def __repr__(self):
return f'<{self.__class__}:{self.path}>'
| [
"asopena@ehmm.org"
] | asopena@ehmm.org |
1604c99f9ae2007ff5743f2baa50fd62b161e9be | 996a45b73d950e1d4a094b8c17ffd29262cf82a7 | /00_College Courses/Foothill College/Python for Programmers 2/Assignment 01/HunterCarlisleLab1-1.py | 723b88adb9cd68df1d00f7a9cab2828b1b9a3297 | [] | no_license | huntingcarlisle/CS-Coursework | 3e4e848ff628b019d8de58cf1e5df3241145b27c | 592d8d75dc6d79d7dd3e53e29e499e43640e37cb | refs/heads/master | 2021-07-01T16:57:14.530850 | 2019-05-03T08:42:39 | 2019-05-03T08:42:39 | 132,169,792 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | #!/usr/bin/env python3
from datetime import date
"""
Hunter Carlisle | Foothill College Fall 2018 | Lab One
This program takes user input and prints various expressions to console.
Inputs: User Last Name and Student ID
Print Output: Expressions per Assignment Spec
"""
# Get and Validate User Input
while True:
family_name = input("Enter your family name: ")
if (family_name.isalpha() and (len(family_name) in range(2, 15))):
break
while True:
student_id = input("Enter your student ID: ")
if (student_id.isdigit() and (len(student_id) == 8)):
break
n_let = len(family_name)
my_id = sum(int(digit) for digit in str(student_id))
# Evaluate and Store Expressions per Spec
expressions = ['{0:.2f}'.format(my_id / 2), my_id % 2, sum(range(2, n_let)),
my_id + n_let, abs(n_let - my_id),
'{0:.2f}'.format(my_id / (n_let + 1100)),
bool((n_let % n_let) and (my_id * my_id)),
bool(1 or (my_id / 0)), '{0:.2f}'.format(round(3.15, 1))]
# Print to Console
print("my_id is: " + str(my_id))
print("n_let is: " + str(n_let))
for index, value in enumerate(expressions, start = 1):
print("expression {}: {}".format(str(index),
str(value)))
print("Today's date is " + str(date.today()))
# Program Run
"""
/home/huntingcarlisle/coding/metaSlate-master/venvUbuntu/bin/python /home/huntingcarlisle/assignementOne/HunterCarlisleLab1.py
Enter your family name: Carlisle
Enter your student ID: 20343101
my_id is: 14
n_let is: 8
expression 1: 7.00
expression 2: 0
expression 3: 27
expression 4: 22
expression 5: 6
expression 6: 0.01
expression 7: False
expression 8: True
expression 9: 3.10
Today's date is 2018-10-02
Process finished with exit code 0
"""
| [
"huntingcarlisle@gmail.com"
] | huntingcarlisle@gmail.com |
bbfb3c710b2df0d470dc404bc76435144be5f9cc | 7b4cd67991b65dd5350f9ccde422d102ec1fb247 | /ml_project/tests/features/test_build_features.py | 321140ec143eed2b22b1ff1d91443b4c43a66299 | [] | no_license | made-ml-in-prod-2021/spin-to-win | ba6becbcc3410ad533a943fdb8cde440717aa10f | 6d71b18362d605945f1c6655c4b5d5aa4f143ae2 | refs/heads/main | 2023-06-06T14:24:41.560146 | 2021-06-28T10:14:45 | 2021-06-28T10:14:45 | 355,611,260 | 0 | 0 | null | 2021-06-28T10:14:46 | 2021-04-07T16:23:27 | HTML | UTF-8 | Python | false | false | 2,344 | py | from src.features.build_features import *
import pytest
def test_can_check_raw_data(train_data_sample):
assert set(train_data_sample.columns) == {
'age','ca','chol','cp','exang','fbs','oldpeak',
'restecg','sex','slope','target','thal','thalach','trestbps'
}
RawDataPreprocessor.check_raw_data(train_data_sample)
train_data_sample['new_feature_that_should_not_raise'] = 123
RawDataPreprocessor.check_raw_data(train_data_sample)
def test_raise_if_bad_columns(train_data_sample):
train_data_broken = train_data_sample.rename(columns={'age': 'birthdate'})
with pytest.raises(Exception) as e_info:
RawDataPreprocessor.check_raw_data(train_data_broken)
train_data_broken = train_data_sample.drop(columns={'target'})
with pytest.raises(Exception) as e_info:
RawDataPreprocessor.check_raw_data(train_data_broken)
def test_can_select_usefull_columns(train_data_sample):
RawDataPreprocessor.select_needed_columns(train_data_sample)
def test_can_rename_columns(train_data_sample):
preprocessed = RawDataPreprocessor.select_needed_columns(train_data_sample)
RawDataPreprocessor.rename_columns(preprocessed)
def test_can_make_human_readable_categories(train_data_sample):
preprocessed = RawDataPreprocessor.select_needed_columns(train_data_sample)
preprocessed = RawDataPreprocessor.rename_columns(preprocessed)
RawDataPreprocessor.make_human_readable_categories(preprocessed)
def test_can_process_categorical_features(train_data_sample):
preprocessed = RawDataPreprocessor.select_needed_columns(train_data_sample)
preprocessed = RawDataPreprocessor.rename_columns(preprocessed)
preprocessed = RawDataPreprocessor.make_human_readable_categories(preprocessed)
RawDataPreprocessor.process_categorical_features(preprocessed)
def test_can_preprocess_raw_data(train_data_sample, expected_columns):
transformer = RawDataPreprocessor()
train_data_transformed = transformer.fit_transform(train_data_sample)
assert set(expected_columns) == set(train_data_transformed.columns)
def test_can_preprocess_fake_data(fake_data, expected_columns):
transformer = RawDataPreprocessor()
train_data_transformed = transformer.fit_transform(fake_data)
assert all([i in train_data_transformed.columns for i in expected_columns])
| [
"d.svirchkov@qiwi.com"
] | d.svirchkov@qiwi.com |
e7264ccc6a71876dff805367e13c30b468a009de | 8c80f1220297b91707b42a0baee31365e69d2d1d | /build/lib/WORC/plotting/plotminmaxresponse.py | 3eef97f930d725f863784c0b8d43dbe6a91372e7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Sikerdebaard/WORC | 4fca18330513ea0c500a90e770beb345b427d539 | 2c7a23c0a0c7480af378b9e093f06989b3304c8b | refs/heads/master | 2020-05-25T05:02:35.060113 | 2019-05-08T07:02:30 | 2019-05-08T07:02:30 | 187,640,566 | 0 | 0 | null | 2019-05-20T12:56:28 | 2019-05-20T12:56:27 | null | UTF-8 | Python | false | false | 11,356 | py | #!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import argparse
import WORC.labels.label_processing as lp
import os
import glob
from natsort import natsorted
import numpy as np
from PREDICT.plotting.getfeatureimages import getfeatureimages
import scipy
def main():
parser = argparse.ArgumentParser(description='Radiomics results')
parser.add_argument('-im', '--im', metavar='im',
nargs='+', dest='im', type=str, required=False,
help='List of patient image files (nii)')
parser.add_argument('-seg', '--seg', metavar='seg',
nargs='+', dest='seg', type=str, required=False,
help='List of patient segmentation files (nii)')
parser.add_argument('-imtest', '--imtest', metavar='imtest',
nargs='+', dest='imtest', type=str, required=False,
help='List of patient image files of test database (nii)')
parser.add_argument('-segtest', '--segtest', metavar='segtest',
nargs='+', dest='segtest', type=str, required=False,
help='List of patient segmentation files of test database (nii)')
parser.add_argument('-feat', '--feat', metavar='feat',
nargs='+', dest='feat', type=str, required=True,
help='List of patient feature files (HDF)')
parser.add_argument('-class', '--class', metavar='class',
nargs='+', dest='classs', type=str, required=True,
help='Classification of patients (text)')
parser.add_argument('-label_type', '--label_type', metavar='label_type',
nargs='+', dest='label_type', type=str, required=True,
help='Name of the label that was predicted')
parser.add_argument('-out', '--out', metavar='out',
nargs='+', dest='out', type=str, required=False,
help='Output folder')
args = parser.parse_args()
if type(args.classs) is list:
args.classs = ''.join(args.classs)
if type(args.label_type) is list:
args.label_type = ''.join(args.label_type)
if type(args.out) is list:
args.out = ''.join(args.out)
if type(args.feat) is list and len(args.feat) == 1:
args.feat = ''.join(args.feat)
if os.path.isdir(args.feat):
args.feat = glob.glob(args.feat + '/features_*.hdf5')
args.feat = natsorted(args.feat)
if type(args.im) is list:
args.im = ''.join(args.im)
if type(args.seg) is list:
args.seg = ''.join(args.seg)
if type(args.imtest) is list:
args.imtest = ''.join(args.imtest)
if type(args.segtest) is list:
args.segtest = ''.join(args.segtest)
# Read and stack the features
print("Reading features.")
image_features_temp = list()
for i_feat in range(len(args.feat)):
feat_temp = pd.read_hdf(args.feat[i_feat])
feat_values = feat_temp.feature_values
feat_labels = feat_temp.feature_labels
feat = {k: v for k, v in zip(feat_labels, feat_values)}
image_features_temp.append(feat)
# Get the labels and patient IDs
print("Reading class labels.")
label_type = args.label_type
label_data, image_features = lp.findlabeldata(args.classs,
label_type,
args.feat,
image_features_temp)
labels = image_features[0].keys()
featvect = dict()
flab = dict()
for l in labels:
featvect[l] = {"all": [], "1": [], "0": []}
flab[l] = {"all": [], "1": [], "0": []}
# Stack per feature type and class
print("Stacking features.")
label = label_data['label'].tolist()[0]
patient_IDs = label_data['patient_IDs'].tolist()
for imfeat, label, pid in zip(image_features, label, patient_IDs):
for fl in labels:
featvect[fl]['all'].append(imfeat[fl])
flab[fl]['all'].append(pid)
if label[0] == 0:
featvect[fl]['0'].append(imfeat[fl])
flab[fl]['0'].append(pid)
else:
featvect[fl]['1'].append(imfeat[fl])
flab[fl]['1'].append(pid)
# Save image of min and max response per feature
image_type = 'CT'
# imname = '/*/*/image.nii.gz'
# segname = '/*/*/seg*.nii.gz'
imname = '/*preop_Tumor.nii.gz'
segname = '/*Tumor_mask.nii.gz'
for fl in labels:
if 'cf_' not in fl:
features = featvect[fl]['all']
maxind = np.argmax(features)
minind = np.argmin(features)
print fl, 'min', patient_IDs[minind]
print fl, 'max', patient_IDs[maxind]
if args.im is not None:
im_min = glob.glob(os.path.join(args.im, patient_IDs[minind]) + imname)
if len(im_min) == 0:
# Search in testing folder
im_min = glob.glob(os.path.join(args.imtest, patient_IDs[minind]) + imname)[0]
else:
im_min = im_min[0]
seg_min = glob.glob(os.path.join(args.seg, patient_IDs[minind]) + segname)
if len(seg_min) == 0:
# Search in testing folder
seg_min = glob.glob(os.path.join(args.segtest, patient_IDs[minind]) + segname)[0]
else:
seg_min = seg_min[0]
im_max = glob.glob(os.path.join(args.im, patient_IDs[maxind]) + imname)
if len(im_max) == 0:
# Search in testing folder
im_max = glob.glob(os.path.join(args.imtest, patient_IDs[maxind]) + imname)[0]
else:
im_max = im_max[0]
seg_max = glob.glob(os.path.join(args.seg, patient_IDs[maxind]) + segname)
if len(seg_max) == 0:
# Search in testing folder
seg_max = glob.glob(os.path.join(args.segtest, patient_IDs[maxind]) + segname)[0]
else:
seg_max = seg_max[0]
if 'LBP' in fl:
# Save LBP image
LBPim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['LBP'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(LBPim, 3)))
LBPim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['LBP'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(LBPim, 3)))
elif 'Gabor' in fl:
# Save Gabor image
Gind = fl.index('Gabor')
Aind = fl.index('A')
gabor_settings = dict()
gabor_settings['gabor_frequencies'] = [float(fl[Gind + 6:Aind])]
try:
gabor_settings['gabor_angles'] = [float(fl[Aind + 1:Aind +1 + 4])]
except ValueError:
# 0.0: two numbers
gabor_settings['gabor_angles'] = [float(fl[Aind + 1:Aind +1 + 3])]
Gaborim = getfeatureimages(im_min, seg_min,
image_type=image_type,
gabor_settings=gabor_settings,
types=['Gabor'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Gaborim, 3)))
Gaborim = getfeatureimages(im_max, seg_max,
image_type=image_type,
gabor_settings=gabor_settings,
types=['Gabor'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Gaborim, 3)))
elif 'sf_' in fl or 'hf_' in fl or 'tf_GL' in fl:
# Save segmentation
Shapeim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['Shape'])[0]
filename = fl + '_min_' + patient_IDs[minind] + '_seg.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Shapeim, 3)))
Shapeim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['Shape'])[0]
filename = fl + '_max_' + patient_IDs[maxind] + '_seg.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Shapeim, 3)))
# Save images
Histogramim = getfeatureimages(im_min, seg_min,
image_type=image_type,
types=['Histogram'])[0]
Histogramim[Histogramim == -1000] = 0
filename = fl + '_min_' + patient_IDs[minind] + '_im.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Histogramim, 3)))
Histogramim = getfeatureimages(im_max, seg_max,
image_type=image_type,
types=['Histogram'])[0]
Histogramim[Histogramim == -1000] = 0
filename = fl + '_max_' + patient_IDs[maxind] + '_im.png'
savename = os.path.join(args.out, filename)
scipy.misc.imsave(savename, np.fliplr(np.rot90(Histogramim, 3)))
if __name__ == '__main__':
main()
| [
"m.starmans@erasmusmc.nl"
] | m.starmans@erasmusmc.nl |
beaad99b8141fc377596ba1a4a4746e9549172c0 | 155f9c610ad06d76685874270352903b06892c65 | /todo_list/todo_app/models.py | 049d9e853f878d7260f052f6591938f491d0b2a4 | [] | no_license | phadkesharan/TodoList-Django | abdb6fd6a4f466230f87463034ea994196d63129 | a7413456d626d2d6355cfd362d7a71161a119669 | refs/heads/main | 2023-06-19T01:07:54.105892 | 2021-07-20T14:13:39 | 2021-07-20T14:13:39 | 387,801,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from datetime import datetime
from django.db import models
import datetime
# Create your models here.
class TodoItem(models.Model):
content = models.TextField()
| [
"phadkesharan@gmail.com"
] | phadkesharan@gmail.com |
19084bc467a1952d5e8747cd85c9dc97fedaca64 | b81ec2dcd90c54379f6289eea52c4bf4fb995f86 | /graficar.py | 41fb258f6955eb1e9482861436729e118d2ebb94 | [] | no_license | cghernandez/tims-civ | f0bf0eec1a024fe2f40d61da90b88aeb8210f2d5 | 5e6f0b2b5d4071bba2c7cc6da3bc7e6d4569d66c | refs/heads/master | 2020-03-21T04:43:04.411085 | 2018-06-30T08:11:36 | 2018-06-30T08:11:36 | 138,123,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | import matplotlib.pyplot as plt
import numpy as np
import os
#Lists all directories
directorios = os.listdir()
directorios.remove("graficar.py")
directorios = sorted(directorios,
key = lambda x: int(x),
reverse = False)
#Empty lists that will be filled with data
indx = []
m_maxpop = []
m_fails = []
m_meanpop = []
s_maxpop = []
s_fails = []
s_meanpop = []
for carpeta in directorios:
with open(carpeta + "/MEANS","r") as archivo:
archivo.readline()
archivo.readline()
archivo.readline()
m_maxpop.append(float(archivo.readline()))
m_fails.append(float(archivo.readline()))
m_meanpop.append(float(archivo.readline()))
with open(carpeta + "/STDEVS","r") as archivo:
archivo.readline()
archivo.readline()
archivo.readline()
s_maxpop.append(float(archivo.readline()))
s_fails.append(float(archivo.readline()))
s_meanpop.append(float(archivo.readline()))
indx.append(carpeta)
plt.title("Mean final population vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Mean final population")
plt.plot(indx,m_maxpop,"ro")
plt.show()
plt.title("Mean city failures vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Mean city failures")
plt.plot(indx,m_fails, "ro")
plt.show()
plt.title("Mean population per city vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Mean population per city")
plt.plot(indx,m_meanpop, "ro")
plt.show()
plt.title("Standard deviation of final population vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Standard deviation of final population")
plt.plot(indx,s_maxpop, "ro")
plt.show()
plt.title("Standard deviation of city failures vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Standard deviation of city failures")
plt.plot(indx,s_fails, "ro")
plt.show()
plt.title("Standard deviation of population per city vs number of cities")
plt.xlabel("Number of cities")
plt.ylabel("Standard deviation of population per city")
plt.plot(indx,s_meanpop, "ro")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
242e93a4e5f81e325e0aa153d7112fe5fa19e57a | d1c7db3c433281a884141e220a301f4cf32cdbd3 | /genetic-maze/genetic_maze.py | 6ac19793b0196f6149a5edda9394a4643ae5d7e9 | [] | no_license | pairing-with-matt-and-mike/pairing | ad00079eca229e20689bd77347908ca33a199ec5 | 77a90328c8c54f39b160211207f3556b70960113 | refs/heads/master | 2023-08-18T08:25:01.402201 | 2023-07-26T20:50:09 | 2023-07-26T20:50:09 | 157,622,499 | 0 | 0 | null | 2023-07-19T12:19:35 | 2018-11-14T23:01:21 | Python | UTF-8 | Python | false | false | 2,536 | py | import pytest
import random
def test_initialise_population():
generation = initialise_population(5)
assert len(generation) == 5
assert all([len(genome) == 1 for genome in generation])
assert all([
gene in ("w", "a", "s", "d")
for genome in generation
for gene in genome
])
@pytest.mark.parametrize("end, good_genome, bad_genome", (
(( 0, 1), "w", "s"),
((-1, 0), "a", "w"),
(( 0, -1), "s", "w"),
(( 1, 0), "d", "w"),
(( 0, 2), "ww", "w"),
(( 0, 2), "w", "")
))
def test_genomes_closer_to_goal_are_fitter(end, good_genome, bad_genome):
maze = {
"start": (0, 0),
"end": end,
}
assert fitness(maze, good_genome) < fitness(maze, bad_genome)
def move(pos, gene):
diff_x, diff_y = {
"w": ( 0, 1),
"a": (-1, 0),
"s": ( 0, -1),
"d": ( 1, 0),
}[gene]
x, y = pos
return (x + diff_x, y + diff_y)
def distance(pos_a, pos_b):
ax, ay = pos_a
bx, by = pos_b
return abs(ay - by) + abs(ax - bx)
def fitness(maze, genome):
pos = maze["start"]
for gene in genome:
pos = move(pos, gene)
return (distance(maze["end"], pos), len(genome))
def mutate(genome):
return genome + random.choice("wasd")
def initialise_population(n):
return [mutate("") for _ in range(n)]
def test_select():
maze = {
"start": (0, 0),
"end": (0, 1),
}
assert select(maze, ["a", "w"], 1) == ["w"]
def select(maze, generation, n):
return sorted(generation, key = lambda genome: fitness(maze, genome))[:n]
def next_generation(maze, generation):
parents = select(maze, generation, 5)
random.shuffle(parents)
x = list(frozenset([
genome
for parent in parents
for genome in (parent, mutate(parent), mutate(parent), mutate(parent))
]))
random.shuffle(x)
return x
def main():
number_of_generations = 5
generation = initialise_population(10)
maze = {
"start": (0, 0),
"end": (10, 10),
}
for i in range(number_of_generations):
generation = next_generation(maze, generation)
print("=== Gen {} ===".format(i))
for genome in select(maze, generation, len(generation)):
print(genome)
print("=== BEST ===")
for genome in select(maze, generation, 10):
print(genome)
# for a generation
# - run the fitness function, if great then done
# - pick best
# - mutate and breed
if __name__ == "__main__":
main()
| [
"thatismatt@gmail.com"
] | thatismatt@gmail.com |
fceb9153a4fcf2f6a900430aa71bd6c15f273e57 | f53aec5df3b285b5f92716852fc1f478c7f2dac6 | /workdesk/urls.py | 1deea242946b75c76b4f9c95428fa2db394d4776 | [] | no_license | vaishnavi-gupta18/WorkDesk-Backend | 46789f0880b90650267f84c0b725d58e2e632a31 | 3d76ad77e9fdf88475e3d160872e40d288bea6d8 | refs/heads/master | 2023-08-15T20:28:02.572189 | 2021-10-23T11:59:47 | 2021-10-23T11:59:47 | 400,776,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | from rest_framework_nested import routers
from django.urls import path
from .views import ShortProjectViewSet, ProjectViewSet, UserProjectViewSet, UserCardViewSet, ListViewSet, CardViewSet, MemberViewSet, UserViewSet, GroupViewSet, CommentViewSet
router = routers.SimpleRouter()
router.register('user',UserViewSet)
router.register('group',GroupViewSet)
router.register('member',MemberViewSet)
router.register('home',ShortProjectViewSet,basename='projectlist')
router.register('project',ProjectViewSet)
router.register('userproject',UserProjectViewSet,basename='userproject')
router.register('usercard',UserCardViewSet,basename='usercard')
router.register('List',ListViewSet)
router.register('Card',CardViewSet)
list_router = routers.NestedSimpleRouter(router, 'project', lookup='project')
list_router.register('list',ListViewSet,basename='project-list')
card_router = routers.NestedSimpleRouter(router, 'List', lookup='List')
card_router.register('card',CardViewSet,basename='List-card')
| [
"vaishnavi_g@me.iitr.ac.in"
] | vaishnavi_g@me.iitr.ac.in |
0ab091f1bac3f6b3782abb3cf2f34ba686b858fc | 6dcd5f4bb4c39e2d887e5d557e188ba4c8a75081 | /src/UsersDB.py | 3d3f2264fceef218c5169ec87a6f6ca4b65d695f | [] | no_license | Pella86/HappyRateBot | 815653033593aedc22c779025d00bddec4614f46 | f23f786a3c9dc19f2378958470d82974d018bd64 | refs/heads/master | 2020-03-22T00:16:38.670215 | 2018-07-22T11:50:53 | 2018-07-22T11:50:53 | 139,234,809 | 1 | 1 | null | 2018-07-22T06:41:21 | 2018-06-30T09:01:21 | Python | UTF-8 | Python | false | false | 4,288 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 12:10:14 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
# py imports
import os
import hashlib
import string
# my imports
import Databases
import UserProfile
import random
import Logging
#==============================================================================
# logging
#==============================================================================
# create logger
log = Logging.get_logger(__name__, "WARNING")
#==============================================================================
# Helpers
#==============================================================================
def get_hash_id(personid):
pid = hashlib.sha256()
pid.update(bytes(personid))
return pid.digest()
#==============================================================================
# User database
#==============================================================================
class UsersDB:
def __init__(self):
self.folder = "./databases/user_db"
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.database = Databases.Database(self.folder, "user_")
self.database.loadDB()
self.database.update_uid()
log.info("loaded users database")
folder = "./databases/banned_user_db"
if not os.path.isdir(folder):
os.mkdir(folder)
self.banned_database = Databases.Database(folder, "banned_user_")
def getUsersList(self):
return self.database.getValues()
def check_nickname(self, user, text):
error_message = None
alphanumeric = string.ascii_letters + string.digits
if len(text) < 3:
error_message = "too short"
elif len(text) >= 15:
error_message = "too long"
elif not all(c in alphanumeric for c in text):
error_message = "invalid character"
elif text in [u.display_id for u in self.database.getValues()]:
error_message = "already present"
if error_message is None:
user.display_id = text
self.database[user.hash_id].setData(user)
return True
else:
return error_message
def banUser(self, user):
duser = self.database[user.hash_id]
self.deleteUser(user)
def addUser(self, person, chatid):
# hash the id
hash_id = get_hash_id(person.id)
if self.database.isNew(hash_id):
log.info("added new user to database: {}".format(self.database.short_uid))
# create a unique display id
start_number = 0x10000000
stop_number = 0xFFFFFFFF
display_id = random.randint(start_number,stop_number)
log.debug("display id {}".format(display_id))
# check for uniqueness
display_id_list = [user.display_id for user in self.database.getValues()]
while display_id in display_id_list:
display_id = random.randint(start_number,stop_number)
log.debug("new display id {}".format(display_id))
# language
lang_tag = person.language_code if person.language_code else "en"
# user instance
user = UserProfile.UserProfile(hash_id, display_id, chatid, lang_tag)
data = Databases.Data(hash_id, user)
self.database.addData(data)
def deleteUser(self, user):
data = self.database[user.hash_id]
self.database.deleteItem(data)
def hGetUser(self, hash_id):
return self.database[hash_id].getData()
def getUser(self, person):
log.debug("User already in database, got user")
hash_id = get_hash_id(person.id)
return self.database[hash_id].getData()
def setUser(self, user):
self.database[user.hash_id].setData(user)
def update(self):
log.info("updating database...")
self.database.updateDB()
| [
"pigmeo127@gmail.com"
] | pigmeo127@gmail.com |
108e02ca3f0e8e3ea1c4460e16956878f2407df1 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/virtual_network_tap.py | bcdbaf91e38252e9c1258aa4d97c8130368e7538 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,511 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkTapInitArgs', 'VirtualNetworkTap']
@pulumi.input_type
class VirtualNetworkTapInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input['FrontendIPConfigurationArgs']] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualNetworkTap resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['FrontendIPConfigurationArgs'] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input['NetworkInterfaceIPConfigurationArgs'] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if destination_load_balancer_front_end_ip_configuration is not None:
pulumi.set(__self__, "destination_load_balancer_front_end_ip_configuration", destination_load_balancer_front_end_ip_configuration)
if destination_network_interface_ip_configuration is not None:
pulumi.set(__self__, "destination_network_interface_ip_configuration", destination_network_interface_ip_configuration)
if destination_port is not None:
pulumi.set(__self__, "destination_port", destination_port)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tap_name is not None:
pulumi.set(__self__, "tap_name", tap_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> Optional[pulumi.Input['FrontendIPConfigurationArgs']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@destination_load_balancer_front_end_ip_configuration.setter
def destination_load_balancer_front_end_ip_configuration(self, value: Optional[pulumi.Input['FrontendIPConfigurationArgs']]):
pulumi.set(self, "destination_load_balancer_front_end_ip_configuration", value)
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@destination_network_interface_ip_configuration.setter
def destination_network_interface_ip_configuration(self, value: Optional[pulumi.Input['NetworkInterfaceIPConfigurationArgs']]):
pulumi.set(self, "destination_network_interface_ip_configuration", value)
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> Optional[pulumi.Input[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@destination_port.setter
def destination_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "destination_port", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tapName")
def tap_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual network tap.
"""
return pulumi.get(self, "tap_name")
@tap_name.setter
def tap_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tap_name", value)
class VirtualNetworkTap(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Virtual Network Tap resource.
API Version: 2020-11-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualNetworkTapInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Virtual Network Tap resource.
API Version: 2020-11-01.
:param str resource_name: The name of the resource.
:param VirtualNetworkTapInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualNetworkTapInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualNetworkTapInitArgs.__new__(VirtualNetworkTapInitArgs)
__props__.__dict__["destination_load_balancer_front_end_ip_configuration"] = destination_load_balancer_front_end_ip_configuration
__props__.__dict__["destination_network_interface_ip_configuration"] = destination_network_interface_ip_configuration
__props__.__dict__["destination_port"] = destination_port
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tap_name"] = tap_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interface_tap_configurations"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20201101:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210201:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210301:VirtualNetworkTap"), pulumi.Alias(type_="azure-native:network/v20210501:VirtualNetworkTap")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkTap, __self__).__init__(
'azure-native:network:VirtualNetworkTap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkTap':
"""
Get an existing VirtualNetworkTap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualNetworkTapInitArgs.__new__(VirtualNetworkTapInitArgs)
__props__.__dict__["destination_load_balancer_front_end_ip_configuration"] = None
__props__.__dict__["destination_network_interface_ip_configuration"] = None
__props__.__dict__["destination_port"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interface_tap_configurations"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return VirtualNetworkTap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> pulumi.Output[Optional['outputs.FrontendIPConfigurationResponse']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> pulumi.Output[Optional[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network tap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network tap resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
f19684c5167e1475dcd6b88c9c8c5a3057fd4041 | 6ea52c93be4ede7cc58c22f6600570fe3fde716a | /tests/pyro_test_suite/model18.py | 1e965523b3994af11a2987c453b530ff4ef31e23 | [
"MIT"
] | permissive | wonyeol/static-analysis-for-support-match | 7158797d3766850df0fc96ba4e0dadc8428bf32a | 850fb58ec5ce2f5e82262c2a9bfc067b799297c1 | refs/heads/master | 2022-12-01T21:33:46.511474 | 2022-11-20T23:35:47 | 2022-11-20T23:35:47 | 216,757,903 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # test_iplate_in_guide_not_model_error [subsample_size=None]
p = torch.tensor(0.5)
pyro.sample("x", Bernoulli(p))
| [
"xavier.rival@gmail.com"
] | xavier.rival@gmail.com |
edb029758ac8cbce4d3df897a65c1851278e95e8 | 7ac585c7e59aa8a2085d2f778b08de1c8aba74e7 | /mysite/marcador/models.py | 26d3cb30282815763e4788f22f5192d9009a28c3 | [] | no_license | IbraDirir/MarcadoreApp | 92ee1eaad29bfb604de6190032542d9556fbc372 | f6a0757e4da7af4c62ad237d3d26877fbe58d5b2 | refs/heads/master | 2021-01-10T17:51:47.535066 | 2015-10-23T13:44:16 | 2015-10-23T13:44:16 | 43,567,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=50, unique=True)
class Meta:
verbose_name = 'tag'
verbose_name_plural = 'tags'
ordering = ['name']
def __str__(self):
return self.name
class PublicBookmarkManager(models.Manager):
def get_queryset(self):
qs = super(PublicBookmarkManager, self).get_queryset()
return qs.filter(is_public=True)
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
title = models.CharField('title', max_length=255)
description = models.TextField('description', blank=True)
is_public = models.BooleanField('public', default=True)
date_created = models.DateTimeField('date created')
date_updated = models.DateTimeField('date updated')
owner = models.ForeignKey(User, verbose_name='owner',
related_name='bookmarks')
tags = models.ManyToManyField(Tag, blank=True)
objects = models.Manager()
public = PublicBookmarkManager()
class Meta:
verbose_name = 'bookmark'
verbose_name_plural = 'bookmarks'
ordering = ['-date_created']
def __str__(self):
return '%s (%s)' % (self.title, self.url)
def save(self, *args, **kwargs):
if not self.id:
self.date_created = now()
self.date_updated = now()
super(Bookmark, self).save(*args, **kwargs)
# Create your models here.
| [
"khaliildirir@gmail.com"
] | khaliildirir@gmail.com |
f7406062c70e33b42b577dad9a4000dbac3b613b | 9a192289d99d68eef6b81c0f7d5eb54c5bfe8fce | /app01/models.py | 271ca6165f19c85942ead9689ad1860074a6757f | [] | no_license | ishexintong/myexcel | fedf5fb4d418be3da6bbc1e51cfeb2fd20ee1a49 | ab0cb4d9492a723192394688e541643f09188d5c | refs/heads/master | 2020-04-11T02:19:33.676702 | 2018-12-12T06:32:19 | 2018-12-12T06:32:19 | 161,441,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class UserInfo(AbstractUser):
'''
用户信息表
'''
nid =models.AutoField(primary_key=True)
telephone=models.CharField(max_length=11,null=True,blank=True,unique=True,verbose_name='电话号码')
avatar=models.FileField(upload_to='static/imgs/',default='static/imgs/default.png',verbose_name='用户头像')
create_time=models.DateTimeField(auto_now_add=True,verbose_name='创建时间')
class Meta:
verbose_name_plural='用户信息表'
def __str__(self):
return self.username
class Student(models.Model):
'''
学生信息表
'''
nid=models.AutoField(primary_key=True)
std_num=models.CharField(max_length=11,verbose_name='学号')
name=models.CharField(max_length=64,verbose_name='姓名')
telephone=models.CharField(max_length=11,verbose_name='电话',blank=True,null=True)
qq=models.CharField(max_length=11,verbose_name='qq',null=True,blank=True)
std_grade=models.CharField(max_length=23,verbose_name='年级')
std_class=models.CharField(max_length=24,verbose_name='班级')
class Meta:
verbose_name_plural='学生信息表'
def __str__(self):
return self.name
class StdExcel(models.Model):
'''
学生excel文件
'''
nid=models.AutoField(primary_key=True)
stdfile=models.FileField(upload_to='static/stdfiles/',verbose_name='学生excel文件路径')
upload_time=models.DateTimeField(auto_now_add=True,verbose_name='上传文件时间') | [
"137841632@qq.com"
] | 137841632@qq.com |
ca2e60ef61a63bcc4473f3bb4ca159430fb5c13a | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/intentions/PyAnnotateTypesIntentionTest/methodAfterConstructorCall.py | 0cdc87e27827504a3baf5a3c8d4524a6604e3e8c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 133 | py | class MyClass:
def __init__(self):
pass
def method(self, x):
pass
x = MyClass()
foo = x.met<caret>hod(42)
| [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
c15b33d0d47051e9aaec2b8beddb6d995bf4ea34 | 9ab29e9f548d6cf85e7bd073fa4217425a76a92b | /hand_oop.py | 0b40391db810988cb0636abab0b420e36fef4d4d | [] | no_license | Fabaladibbasey/MITX_6.00.1 | 888010ad10d0199dc677da810b77698d6799ae9e | 287bfe1ec47479eeb1d62707163b6edad276842c | refs/heads/main | 2023-03-25T00:09:52.543278 | 2021-03-23T19:43:55 | 2021-03-23T19:43:55 | 338,302,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | import random
class Hand(object):
def __init__(self, n):
'''
Initialize a Hand.
n: integer, the size of the hand.
'''
assert type(n) == int
self.HAND_SIZE = n
self.VOWELS = 'aeiou'
self.CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
# Deal a new hand
self.dealNewHand()
def dealNewHand(self):
'''
Deals a new hand, and sets the hand attribute to the new hand.
'''
# Set self.hand to a new, empty dictionary
self.hand = {}
# Build the hand
numVowels = self.HAND_SIZE // 3
for i in range(numVowels):
x = self.VOWELS[random.randrange(0,len(self.VOWELS))]
self.hand[x] = self.hand.get(x, 0) + 1
for i in range(numVowels, self.HAND_SIZE):
x = self.CONSONANTS[random.randrange(0,len(self.CONSONANTS))]
self.hand[x] = self.hand.get(x, 0) + 1
def setDummyHand(self, handString):
'''
Allows you to set a dummy hand. Useful for testing your implementation.
handString: A string of letters you wish to be in the hand. Length of this
string must be equal to self.HAND_SIZE.
This method converts sets the hand attribute to a dictionary
containing the letters of handString.
'''
assert len(handString) == self.HAND_SIZE, "Length of handString ({0}) must equal length of HAND_SIZE ({1})".format(len(handString), self.HAND_SIZE)
self.hand = {}
for char in handString:
self.hand[char] = self.hand.get(char, 0) + 1
def calculateLen(self):
'''
Calculate the length of the hand.
'''
ans = 0
for k in self.hand:
ans += self.hand[k]
return ans
def __str__(self):
'''
Display a string representation of the hand.
'''
output = ''
hand_keys = sorted(self.hand.keys())
for letter in hand_keys:
for j in range(self.hand[letter]):
output += letter
return output
def update(self, word):
"""
Does not assume that self.hand has all the letters in word.
Updates the hand: if self.hand does have all the letters to make
the word, modifies self.hand by using up the letters in the given word.
Returns True if the word was able to be made with the letter in
the hand; False otherwise.
word: string
returns: Boolean (if the word was or was not made)
"""
# Your code here
newHand = {}
newHand = self.hand.copy()
for letter in word:
newHand[letter] = newHand.get(letter, 0) - 1
for key in newHand:
if newHand.get(key, 0) < 0:
return False
self.hand = newHand
return True
myHand = Hand(7)
print(myHand)
print(myHand.calculateLen())
myHand.setDummyHand('aazzmsp')
print(myHand)
print(myHand.calculateLen())
print(myHand.update('zaama'))
print(myHand)
| [
"fabaladibbasey27@gmail.com"
] | fabaladibbasey27@gmail.com |
449f604ea7700b07098af426afd1d4ba361050c4 | 9fef0d8935cb1c1775614b21c5a1dcc1d84686e0 | /assign3/qlearningAgents.py | 281c9f0c7fe2faf80635ac17b97182d575266a69 | [] | no_license | dcrozz/ai-and-machine-learn-in-hku | 54902e96d28fc61d99ceac604d32f269317a2bca | 29b2dca5aa982a03b43b721ad9f07b2e9a4e6456 | refs/heads/master | 2021-01-22T05:27:55.712336 | 2017-05-10T06:53:11 | 2017-05-10T06:53:11 | 81,663,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,486 | py | from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.qvalues = {}
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
if (state,action) not in self.qvalues:
return 0.0
else:
return self.qvalues[(state, action)]
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
possibleActions = self.getLegalActions(state)
if len(possibleActions) == 0:
return 0
value = None
result = None
for action in possibleActions:
temp = self.getQValue(state, action)
if value is None or temp > value:
value = temp
result = action
if value is None:
value = 0
return value
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
possibleActions = self.getLegalActions(state)
if len(possibleActions) == 0:
return None
result = max(possibleActions,
key=lambda x: self.computeQValueFromValues(state, x))
# value = None
# result = None
# for action in possibleActions:
# temp = self.getQValue(state, action)
# if value is None or temp > value:
# value = temp
# result = action
return result
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
if len(legalActions) == 0:
return None
if util.flipCoin(self.epsilon):
return random.choice(legalActions)
return self.computeActionFromQValues(state)
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
if (state, action) not in self.qvalues:
self.qvalues[(state, action)] = 0.0
nextStateValue = self.computeValueFromQValues(nextState)
curStateValue = self.qvalues[(state, action)]
calculation = reward + (self.discount * nextStateValue) - curStateValue
self.qvalues[(state, action)] = curStateValue + (self.alpha * calculation)
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
features = self.featExtractor.getFeatures(state, action)
result = 0
for feature in features:
result += self.weights[feature] * features[feature]
return result
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
features = self.featExtractor.getFeatures(state, action)
correction = reward + self.discount*self.getValue(nextState) - self.getQValue(state, action)
for feature in features:
self.weights[feature] += self.alpha * correction * features[feature]
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
print("Final weights vector: ")
print(self.weights)
pass
| [
"ukeyim@gmail.com"
] | ukeyim@gmail.com |
4e0b3787572a1137eef7f4042ec28f2cc0c86c90 | 077364ec7bae9bb80a3e36f5939584c3cf6529b0 | /mooc/q1_print.py | 3116ac0e2fb8ef104acb7f6629975221028cdb83 | [] | no_license | fsi166771/mygit | 6d86a855255493796ff9ef6156601873aecfb376 | 4169228cdbbc2eb2834077d69df1caf6020cd336 | refs/heads/master | 2020-12-03T00:40:38.290730 | 2017-06-30T08:26:39 | 2017-06-30T08:26:39 | 96,060,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 字符串连接
str1 = raw_input('输入一个人的名字:')
str2 = raw_input('输入一个国家名字:')
print '世界这大,{}想去{}看看。'.format(str1, str2)
| [
"zjak1@163.com"
] | zjak1@163.com |
55c2841b5ae6ddfc0e8c0cb6f34e33306f5fca3a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma58.py | 8e84b65dd9e10c0774f2965011964ccb0cbd933f | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma58.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
b109044623ec7042559e3c7e07f763d3a3bc98a2 | 99c86d3a486e839e6df4b8ed57f631c51c4fd298 | /helpdesk/models.py | 65fdef5799407275b3a25ba4988030665c163b6e | [] | no_license | sgalichenko/helpdesk | 53114d9caab75bd3eb7e50d4cfe6a167d6d877d7 | 77a521e42faa2dd374e438844098d88fb368ce15 | refs/heads/master | 2021-05-28T11:50:53.388355 | 2015-01-18T08:56:51 | 2015-01-18T08:56:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from django.db import models
from django.contrib.auth.models import User, Group
import datetime
import os
import random
import time
class Ticket(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(User)
publication_date = models.DateTimeField(blank=False, null=False)
person = models.CharField(max_length=200, blank=True, null=True)
email = models.EmailField(max_length=75, blank=True, null=True)
phone = models.CharField(max_length=15, blank=True, null=True)
text = models.TextField(max_length=10000)
group = models.ForeignKey(Group)
isopen = models.BooleanField(default=True, blank=False, null=False)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.id:
self.publication_date = datetime.datetime.now()
#self.id = int(time.time())
timestr = str(int(time.time()))
self.id = int(timestr[6:]+timestr[4:6]+timestr[:4])
#modulo = 999983 # prime
#incrementor = 171803 # relative prime
#self.id = 100003 # some start value
#self.id = (self.id + incrementor) % modulo
return super(Ticket, self).save(*args,**kwargs)
class TicketComment(models.Model):
ticket = models.ForeignKey(Ticket)
text = models.TextField(max_length=10000)
author = models.ForeignKey(User)
publication_date = models.DateTimeField(blank=False, null=False)
def __str__(self):
return self.text
def save(self, *args, **kwargs):
if not self.id:
self.publication_date = datetime.datetime.now()
return super(TicketComment, self).save(*args,**kwargs)
#class UserGroup(models.Model):
# name = models.CharField(max_length=100)
# userlist = models.ForeignKey(User, related_name="users")
# creation_date = models.DateTimeField(blank=False, null=False)
# created_by = models.ForeignKey(User, related_name="createdby")
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.creation_date = datetime.datetime.now()
# return super(UserGroup, self).save(*args,**kwargs)
| [
"gyroflop@gmail.com"
] | gyroflop@gmail.com |
daa51845b3982cae1a63d5bc3308a044b627aa78 | 295e5ae4339015f36a2d49a9429012a4f1da21e6 | /project/network/mlp.py | 6e8a527ec6ad6d326b9ffbc6f4d70d6b0500d264 | [] | no_license | AlfioEmanueleFresta/cs-project | fb460850de68a47a55bf5d543d424b7279c8bac5 | 3f676b59a2d7ca870c5de2517c79b4396c05e38a | refs/heads/master | 2021-07-08T20:43:44.484197 | 2017-10-07T21:59:46 | 2017-10-07T21:59:46 | 68,594,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | import lasagne
import theano.tensor as T
from .generic import GenericNetwork
class MLPNetwork(GenericNetwork):
def defaults(self):
defaults = super(MLPNetwork, self).defaults()
defaults.update({
# Dense Layers
'dense_layers': 2,
'dense_layers_activation': lasagne.nonlinearities.sigmoid,
'dense_layers_w': lasagne.init.GlorotUniform,
'dense_layers_size': 500,
'dense_layers_dropout': 0.3,
'output_layer_activation': lasagne.nonlinearities.softmax,
# Training options
'train_objective': lasagne.objectives.categorical_crossentropy,
'train_max_epochs': 500,
'train_updates': lasagne.updates.nesterov_momentum,
'train_updates_learning_rate': 0.01,
'train_updates_momentum': 0.9,
# General configuration
'allow_input_downcast': True,
'verbose': True,
})
return defaults
def __init__(self,
max_words_per_sentence,
**kwargs):
kwargs.update({'max_words_per_sentence': max_words_per_sentence})
self.input_var = T.tensor3('inputs')
self.target_var = T.matrix('targets')
super(MLPNetwork, self).__init__(**kwargs)
def build_network(self):
# Building the neural network
#########################################################
# TODO Implement number of LSTM and Dense Layers options -- these are currently ignored.
# Input and mask layer
input_shape = (None, self.max_words_per_sentence, self.input_features_no)
l_in = lasagne.layers.InputLayer(shape=input_shape, input_var=self.input_var)
l_dense = l_in
for i in range(self.dense_layers):
l_dense = lasagne.layers.DenseLayer(l_dense, num_units=self.dense_layers_size,
nonlinearity=self.dense_layers_activation,
W=self.dense_layers_w())
l_dense = lasagne.layers.DropoutLayer(l_dense, p=self.dense_layers_dropout)
l_out = lasagne.layers.DenseLayer(l_dense, num_units=self.output_categories_no,
nonlinearity=self.output_layer_activation,
W=self.dense_layers_w())
#l_out = lasagne.layers.ReshapeLayer(l_out, shape=(-1, self.output_categories_no))
self.network = l_out
| [
"alfio.emanuele.f@gmail.com"
] | alfio.emanuele.f@gmail.com |
7de69c82b0fcb66f520c6acdeda2f813e828ddc1 | 23d36df4f9c4f764e6b2cb01e27274dea0d8afea | /manage.py | 6b9a8e5bfe2c020abf284a5890c3ae273fd89a00 | [] | no_license | PratikS18/bstheme | 09020275e98c353f4c535f197ab4a5e7c2c5008f | 5e1435c9682268b3e7ee886a7151b93bdd51d8df | refs/heads/master | 2021-01-14T19:44:30.384154 | 2020-08-07T13:32:42 | 2020-08-07T13:32:42 | 242,735,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bootstraptheme.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"anikets@packt.com"
] | anikets@packt.com |
11eac3f2eddd6db881b96e11800b7eefbdc728d4 | 69651dbc04aaf55f04ccddb6158874b12a228afc | /Test.py | 5b1617ac4a4f024bb3823908a267e9a4d32b005d | [] | no_license | TMda/AlgoTrade-IB | 30ba8c8807bf9389a764bc7f262b7db4dfa82cfe | 90e67160e18e0c2dbd5989e52fb9e5217e5538e4 | refs/heads/master | 2016-09-12T11:08:49.326036 | 2016-06-01T02:42:42 | 2016-06-01T02:42:42 | 57,075,329 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,088 | py | from __future__ import print_function
import pandas as pd
import numpy as np
import ib.ext
from ib.ext import Contract
#from MyAlgoSystem.bar import LiveFeed
#from MyAlgoSystem.IbBroker import MyIbBroker
#from MyAlgoSystem.strategy import MyLiveStrategy
from barfeed import LiveFeed
from IbBroker import MyIbBroker
from strategy import MyLiveStrategy
from numpy import append
import time
def makeStkContrcat(m_symbol,m_secType = 'STK',m_exchange = 'SMART',m_currency = 'USD'):
from ib.ext.Contract import Contract
newContract = Contract()
newContract.m_symbol = m_symbol
newContract.m_secType = m_secType
newContract.m_exchange = m_exchange
newContract.m_currency = m_currency
return newContract
def makeForexContract(m_symbol,m_secType = 'CASH',m_exchange = 'IDEALPRO',m_currency = 'USD'):
from ib.ext.Contract import Contract
newContract = Contract()
newContract.m_symbol = m_symbol
newContract.m_secType = m_secType
newContract.m_exchange = m_exchange
newContract.m_currency = m_currency
return newContract
class thomas(MyLiveStrategy):
def __init__(LiveBarFeed,broker,shortSMA):
self.sma=0
self.__price=None
self._shortSMA=shortSMA
self.__count=0
def onBar(self,bar):
now=dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
priceBar=bar.getClose()
print('%s[Thomas onBars]********************************'%(now))
#bar=self._events.get().bar
if self.__count==0:
self.price=np.array(priceBar,dtype=float)
self.sma=np.array(priceBar,dtype=float)
else:
append(self.sma,priceBar)
append(self.price,priceBar)
if self.__count<self._shortSMA:
self.__count=+1
return
#f1=open(self.logfile, 'w+')
self.i +=1
self.j +=1
positions=self.getBroker().Positions()
if positions:
for position in positions:
if position['position'] ==0:
#print('ON BAR: %d' %(self.i))
#print('STEP: %d' %(self.j ))
#print(' bar is: %s' %(priceBar))
print('%s[Thomas onBars]ON BAR: %d' %(now,self.i))
print('%s[Thomas onBars]STEP: %d' %(now,self.j ))
print('%s[Thomas onBars]bar is: %s' %(now,priceBar))
print('%s[Thomas onBars]IB cash:%s '%(now,self.getBroker().getCash()))
print('%s[Thomas onBars]IB shares: %s'%(now,self.getBroker().getShares()))
print('%s[Thomas onBars]IB Broker positions ibContract.m_symbol: %s'%(now,position['ibContract.m_symbol']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_secType: %s'%(now,position['ibContract.m_secType']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_currency: %s'%(now,position['ibContract.m_currency']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_exchange: %s'%(now,position['ibContract.m_exchange']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_multiplier: %s'%(now,position['ibContract.m_multiplier']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_expiry: %s'%(now,position['ibContract.m_expiry']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_strike: %s'%(now,position['ibContract.m_strike']))
print('%s[Thomas onBars]IB Broker positions ibContract.m_right: %s'%(now,position['ibContract.m_right']))
print('%s[Thomas onBars]IB Broker positions position: %s'%(now,position['position']))
print('%s[Thomas onBars]IB Broker positions marketPrice: %s'%(now,position['marketPrice']))
print('%s[Thomas onBars]IB Broker positions marketValue: %s'%(now,position['marketValue']))
print('%s[Thomas onBars]IB Broker positions averageCost: %s'%(now,position['averageCost']))
print('%s[Thomas onBars]IB Broker positions unrealizedPNL: %s'%(now,position['unrealizedPNL']))
print('%s[Thomas onBars]IB Broker positions realizedPNL: %s'%(now,position['realizedPNL']))
PL=position['realizedPNL']
Share=position['position']
marketPrice=position['marketPrice']
entryPrice=position['averageCost']
returnPosition=(PL/(entryPrice*Share))*100
print('%s[Thomas onBars]IB Broker positions return: %s'%(now,returnPosition))
print('%s[Thomas onBars]------------'%(now, ))
#Checking exit conditions for the positions
if position['ibContract.m_right']=='C' and\
self.exitLongSignal(entryPrice,marketPrice,PL,Share,returnPosition):
ibContract=Contract()
ibContract.m_symbol = position['ibContract.m_symbol']
ibContract.m_secType = position['ibContract.m_secType']
ibContract.m_exchange = position['ibContract.m_exchange']
ibContract.m_multiplier = position['ibContract.m_multiplier']
ibContract.m_expiry = position['ibContract.m_expiry']
ibContract.m_strike = position['ibContract.m_strike']
ibContract.m_right = position['ibContract.m_right']
self.createMarketOrder(ibContract,'SELL', quantity=Share)
print('%s[Thomas onBars]EXITING POSITION >>EXIT SIGNAL TRUE on %s' %(now,self.__instrument))
elif position['ibContract.m_right']=='P' and\
self.exitShortSignal(entryPrice,marketPrice,PL,Share,returnPosition):
ibContract=Contract()
ibContract.m_symbol = position['ibContract.m_symbol']
ibContract.m_secType = position['ibContract.m_secType']
ibContract.m_exchange = position['ibContract.m_exchange']
ibContract.m_multiplier = position['ibContract.m_multiplier']
ibContract.m_expiry = position['ibContract.m_expiry']
ibContract.m_strike = position['ibContract.m_strike']
ibContract.m_right = position['ibContract.m_right']
self.createMarketOrder(ibContract,'SELL', quantity=Share)
print('%s[Thomas onBars]EXITING POSITION >>EXIT SIGNAL TRUE on %s' %(now,self.__instrument))
else:
print('%s[Thomas onBars]NO EXITING POSITION >>EXIT SIGNAL False on %s' %(now,self.__instrument))
else:
print('%s[Thomas onBars]No active Position '%(now, ))
if self.enterLongSignal(bar):
quantity =int(self.getBroker().getCash() * 0.9 *0.5/ (200))
#Build an option contract 3 strike price away in the money
ibContract=Contract()
ibContract.m_symbol = position['ibContract.m_symbol']
ibContract.m_secType = 'OPT'
ibContract.m_exchange = 'SMART'
ibContract.m_multiplier = 100
ibContract.m_expiry = '20160306'
ibContract.m_strike = float(priceBar-(3*0.5))
ibContract.m_right = 'C'
self.createMarketOrder(ibContract,'BUY', quantity)
print('%s[Thomas onBars]ENTERING LONG POSITION OPTION == CALL OPTION OF: %s' %(now,self.__instrument))
elif self.enterShortSignal(bar):
quantity =int(self.getBroker().getCash() * 0.9 *0.5/ (200))
#Build an option contract 3 strike price away in the money
ibContract=Contract()
ibContract.m_symbol = position['ibContract.m_symbol']
ibContract.m_secType = 'OPT'
ibContract.m_exchange = 'SMART'
ibContract.m_multiplier = 100
ibContract.m_expiry = '20160306'
ibContract.m_strike = float(priceBar+(3*0.5))
ibContract.m_right = 'P'
self.createMarketOrder(ibContract,'BUY', quantity)
print('%s[Thomas onBars]ENTERING SHORT POSITION == PUT OPTION OF: %s' %(now,self.__instrument))
self.__count=+1
print('[Thomas onBars]EXIT ====EXIT====EXIT========================')
def enterLongSignal(self, bar):
return bar.getPrice() < self.__entryWMA[-1] #and bar.getDateTime().time() < datetime.time(11,30,00)
def exitLongSignal(self,entryPrice,marketPrice,PL,Share,returnPosition):
print('[exitLongSignal]===========================================')
crossi=self.__price[-1] < self.sma[-1]
returnCondition=returnPosition <-0.1
cond=(crossi or returnCondition)
print('%s[exitLongSignal] position PNL: %s' %(now,PL ))
print('%s[exitLongSignal] position Return: %s' %(now,returnPosition ))
print('%s[exitLongSignal] condition Cross over condition: %s' %(now,crossi ))
print('%s[exitLongSignal] condition Return: %s' %(now,returnCondition ))
print('%s[exitLongSignal] Signal Condition: %s' %(now,cond))
return cond
def enterShortSignal(self, bar):
return bar.getPrice() > self.__entryWMA[-1] #and bar.getDateTime().time() < datetime.time(11,30,00)
def exitShortSignal(self,entryPrice,marketPrice,PL,Share,returnPosition):
print('[exitShortSignal]===========================================')
crossi=self.__price[-1] > self.sma[-1]
returnCondition=returnPosition <-0.1
cond=(crossi or returnCondition)
print('%s[exitShortSignal] position PNL: %s' %(now,PL ))
print('%s[exitShortSignal] position Return: %s' %(now,returnPosition ))
print('%s[exitShortSignal] condition Cross over condition: %s' %(now,crossi ))
print('%s[exitShortSignal] condition Return: %s' %(now,returnCondition ))
print('%s[exitShortSignal] Signal Condition: %s' %(now,cond))
return cond
#Contract to be used
bac=makeStkContrcat('BAC')
#bac.m_symbol = 'BAC'
#bac.m_secType = 'STK'
#bac.m_exchange = 'SMART'
#bac.m_currency = 'USD'
eur=makeForexContract('EUR')
bacFeed = LiveFeed(contract=bac,frequency=60,debug=True)
IbBroker = MyIbBroker(debug=True)
thomas = MyLiveStrategy(LiveBarFeed=bacFeed,broker=IbBroker,debug=True)
thomas.run()
| [
"TMda@users.noreply.github.com"
] | TMda@users.noreply.github.com |
8dd7a8369a2f7b352443bc0d36d23dd32bcc554e | bf576b059cbecb0cbb8a6c885dcfded5bd685399 | /4.Python course/3.Expand course/1.Small windmill/Small windmill.py | 18c56da878b1cb6a7ef0d38234ce809b1bea040f | [] | no_license | YahboomTechnology/Superbit-expansion-board | 0d3c2fd06c5df9280d230af429931af2c48dc6d5 | 4df7e03426d486d2b2f8f649359eee2d62851083 | refs/heads/master | 2023-04-07T03:16:15.786669 | 2023-03-29T01:12:57 | 2023-03-29T01:12:57 | 206,778,307 | 13 | 8 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from microbit import *
import superbit
a = 135
display.show(Image.HEART)
superbit.servo270(superbit.S1, 135)
superbit.motor_control(superbit.M1, 255, 0)
while True:
if button_a.is_pressed():
a = a - 1
if a < 0:
a = 0
superbit.servo270(superbit.S1, a)
elif button_b.is_pressed():
a = a + 1
if a > 270:
a = 270
superbit.servo270(superbit.S1, a)
| [
"2448532184@qq.com"
] | 2448532184@qq.com |
9d110ceae0f2f319471a4112af223302cc51efd3 | c7f0fc9f66d9a364ce6d02dca566347b10852576 | /Additive_number.py | c84e8af2ddae4463c39f0d9c6c5b01e2753ef4e5 | [] | no_license | myfamurewa/Python-Practice | 027b727d2cd0eb09833b49efc38b4b6e3d53b646 | ac3321dabf2db11f2899ad985a15f4d319a4e2ec | refs/heads/master | 2023-03-10T01:12:07.522008 | 2021-02-24T19:05:32 | 2021-02-24T19:05:32 | 275,929,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | def check_valid(left_num: str, right_num: str, remainder: str) -> bool:
if len(right_num) > 1 and right_num[0] == "0" or (len(left_num) > 1 and left_num[0] == "0"):
return False
sumofstr = str(int(left_num) + int(right_num))
while remainder and len(remainder) >= len(sumofstr):
if remainder == sum:
return True
if remainder.startswith(sum):
remainder = remainder[len(sum):]
left_num = right_num
right_num = sumofstr
return False
return False
def isAdditiveNumber(num: str) -> bool:
# if the num is less than three digits return false
if len(num) < 3 or not num:
return False
# give a stopping point
stopping_point = (len(num)//2) + 1
for left_index in range(1, stopping_point):
for right_index in range(left_index + 1, stopping_point + 1):
left_num = num[:left_index]
right_num = num[left_index:right_index]
remainder = num[right_index:]
if check_valid(left_num, right_num, remainder):
return True
return False
| [
"myfamurewa@gmail.com"
] | myfamurewa@gmail.com |
bc27f479ed72618b28215c6eefada4ba525c249d | 10e5ecf13d2fa4e9e3d866fd42d68a99258d28b3 | /tensor-note/tensor1.14/tensor_5_mnist_test.py | ce377a407a29bf314cbe183611514aefcb622a02 | [] | no_license | FictionDk/python-repo | 2ba20bece0e900040833be305eb81157704533cf | 41fa3a95d62f16d7cf632cfefb09226ec24f4e1a | refs/heads/master | 2023-04-27T18:39:53.998065 | 2023-04-18T10:33:23 | 2023-04-18T10:33:23 | 91,284,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | # -*- coding: utf-8 -*-
import tensorflow.compat.v1 as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
import tensor_5_mnist_forward as mnist_forward
import tensor_5_mnist_backward as mnist_backward
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
TEST_INTERVAL_SECS = 8
def test(mnist):
with tf.Graph().as_default() as g:
print("type(g) is ",type(g))
x = tf.placeholder(tf.float32,[
mnist.test.num_examples,
mnist_forward.IMAGE_SIZE,
mnist_forward.IMAGE_SIZE,
mnist_forward.NUM_CHANNELS])
y_ = tf.placeholder(tf.float32,[None,mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x,False,None)
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
reshaped_x = np.reshape(mnist.test.images,(
mnist.test.num_examples,
mnist_forward.IMAGE_SIZE,
mnist_forward.IMAGE_SIZE,
mnist_forward.NUM_CHANNELS))
accuracy_score = sess.run(accuracy,feed_dict={x:reshaped_x,y_:mnist.test.labels})
print("After %s training step(s), test accuracy = %g." % (global_step,accuracy_score))
else:
print("No checkpoint file found in path.")
return
time.sleep(TEST_INTERVAL_SECS)
def main():
mnist = input_data.read_data_sets("./data/",one_hot=True)
test(mnist)
if __name__ == '__main__':
main()
| [
"ficito.d2k@gmail.com"
] | ficito.d2k@gmail.com |
d9c42d18f40fe8f7deea40e2ffa928c7cc15d4d7 | 29cd26a0f676c56fa0b605aa4fa28d4cf80bfdda | /Python/drillMove.py | 74c233fc87ad56344fa2c6d2bf4b215b7b233cd9 | [] | no_license | Najdhillon/The-Tech-Academy-Course-Work | 1f199945d2b665c0c8ed5547391f9b7e7c0d5a6e | 1fb0e7be725ba9b44d34ff7707eadd2361f534c7 | refs/heads/master | 2020-04-15T15:22:03.509245 | 2017-02-07T04:32:52 | 2017-02-07T04:32:52 | 54,001,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import shutil, os
from datetime import datetime, timedelta
import time
os.chdir('C:\\Users\\navjot.dhillon\\Desktop\DailyFile\\')
srcf = ('C:\\Users\\navjot.dhillon\\Desktop\DailyFile\\')
destf = ('C:\\Users\\navjot.dhillon\\Desktop\MoveFile\\')
for f in os.listdir(srcf):
src = os.path.join(srcf,f)
dest = os.path.join(destf,f)
if f.endswith(".txt"):
# Last Mod time calculation
modtime = time.time() - (os.path.getmtime(src))
#modtimets = (datetime.fromtimestamp(modtime)) - this was not working(ND)
h24ago = time.time() - (24*60*60)
last24 = time.time() - h24ago
#check = modtime - timedelta(hours = 24)
if modtime < last24:
shutil.copy(src, dest)
print '{} has been copied to {}'.format(src,dest)
| [
"najdhil@yahoo.com"
] | najdhil@yahoo.com |
72500071812b0f6f8d51db9beaa2e8c7fc9d42df | 7c519894072f88b47b0249e2b90401c7414f4cfe | /Lists/Liststring.py | 27c6a373a24b624e21abacdd617534d9716af7d4 | [
"MIT"
] | permissive | yuvan1234/python-75-hackathon | db25db0f72c5000ff0d31066f4b4d6c51165a4e2 | 3ace446224f346c50343398b882d9fd133fe1bc9 | refs/heads/master | 2020-04-11T09:20:30.067112 | 2018-12-15T13:04:52 | 2018-12-15T13:04:52 | 161,674,241 | 0 | 2 | MIT | 2018-12-13T17:42:59 | 2018-12-13T17:42:59 | null | UTF-8 | Python | false | false | 38 | py | List=['Yuvan','Bajjurla']
print(List)
| [
"yuvan.bajjurla@gmail.com"
] | yuvan.bajjurla@gmail.com |
2ff67f71a4cd32162107246b30b23368bdbc91f9 | ac9e103799433c9271f7ce840bdddeee471d2b18 | /ciena/settings.py | 0973a71678e4ebd3044c0e1d0b9f2befd6241baa | [] | no_license | raheemmian/ciena-challenge | f265248a3723b78f140dfeb30b2342ba91c13793 | 7df10ea217c70831fac6b54c82564703199bf565 | refs/heads/main | 2023-05-26T09:00:05.634870 | 2021-06-03T01:11:14 | 2021-06-03T01:11:14 | 373,333,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | """
Django settings for ciena project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-!@o^_vy!-j_h1#kd!d_a&x_&hp+da!iz%(t-yy@yyj+y4ln49o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'osa'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ciena.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ciena.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"raheemmian@hotmail.com"
] | raheemmian@hotmail.com |
e5dcefd74e755e164a7238304f594bb73321a27b | 14f470babc5dc0a4c67e230d7c1b19c83f561f93 | /idconn/connectivity/retrieval-ID-pipeline-regionwise.py | 40d52c7e25fc3c9c1b581bfdb328ed203890afc5 | [
"MIT"
] | permissive | NBCLab/IDConn | cee11156453c148d05d83c270dd8128bdf0ee219 | 0677e372c02fe35be28f70567e71e040e1d2a023 | refs/heads/master | 2023-06-01T17:33:58.764642 | 2020-10-01T19:59:36 | 2020-10-01T19:59:36 | 300,373,299 | 2 | 0 | MIT | 2021-06-17T17:04:02 | 2020-10-01T17:56:33 | Jupyter Notebook | UTF-8 | Python | false | false | 10,089 | py | from __future__ import division
from os.path import join, basename, exists
from os import makedirs
from glob import glob
from nilearn import input_data, datasets, plotting
from nilearn.image import concat_imgs
from nilearn.input_data import NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
from scipy.stats import pearsonr
import bct
import json
import numpy as np
import pandas as pd
labels = [
"limbic",
"limbic",
"orbitofrontal",
"orbitofrontal",
"basal ganglia",
"salience",
"salience",
"salience",
"hunger",
"hunger",
"hunger",
"hunger",
"hunger",
"hunger",
"hunger",
"motor learning",
"frontoparietal",
"frontoparietal",
"frontoparietal",
"hand",
"hand",
"hand",
"motor execution",
"motor execution",
"higher order visual",
"higher order visual",
"lateral visual",
"lateral visual",
"medial visual",
"default mode",
"default mode",
"default mode",
"default mode",
"default mode",
" cerebellum",
"right central executive",
"right central executive",
"right central executive",
"right central executive",
"right central executive",
"auditory",
"auditory",
"mouth",
"mouth",
"left central executive",
"left central executive",
"left central executive",
]
# only want post subjects
subjects = [
"101",
"102",
"103",
"104",
"106",
"107",
"108",
"110",
"212",
"214",
"215",
"216",
"217",
"218",
"219",
"320",
"323",
"324",
"325",
"327",
"328",
"330",
"331",
"333",
"334",
"335",
"336",
"337",
"338",
"339",
"340",
"341",
"342",
"343",
"344",
"345",
"346",
"348",
"349",
"350",
"451",
"453",
"455",
"458",
"459",
"460",
"462",
"463",
"464",
"465",
"467",
"468",
"469",
"470",
"502",
"503",
"571",
"572",
"573",
"574",
"577",
"578",
"581",
"582",
"584",
"585",
"586",
"587",
"588",
"589",
"591",
"592",
"593",
"594",
"595",
"596",
"597",
"598",
"604",
"605",
"606",
"607",
"608",
"609",
"610",
"612",
"613",
"614",
"615",
"617",
"618",
"619",
"620",
"621",
"622",
"623",
"624",
"625",
"626",
"627",
"629",
"630",
"631",
"633",
"634",
]
# subjects = ['633', '634']
# all subjects 102 103 101 104 106 107 108 110 212 X213 214 215 216 217 218 219 320 321 X322 323 324 325
# 327 328 X329 330 331 X332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 451
# X452 453 455 X456 X457 458 459 460 462 463 464 465 467 468 469 470 502 503 571 572 573 574 X575 577 578
# X579 X580 581 582 584 585 586 587 588 589 X590 591 592 593 594 595 596 597 598 604 605 606 607 608 609
# 610 X611 612 613 614 615 X616 617 618 619 620 621 622 623 624 625 626 627 X628 629 630 631 633 634
# errors in fnirt-to-mni: 213, 322, 329, 332, 452, 456, 457, 575, 579, 580, 590, 611, 616, 628
# subjects without post-IQ measure: 452, 461, 501, 575, 576, 579, 583, 611, 616, 628, 105, 109, 211, 213, 322, 326, 329, 332
# subjects = ['101','103']
# something weird going on with the regionwise parcellation
# in subjects 321, 347 (run 0 has only 46 regions), 618 (run 1 only has 46 regions),
# 631 (run 1 has only 43 regions),
subjects = ["321", "347"]
# In[5]:
# data_dir = '/home/data/nbc/physics-learning/data/pre-processed'
data_dir = "/home/data/nbc/physics-learning/retrieval-graphtheory/output"
sink_dir = "/home/kbott006/physics-retrieval/output"
runs = [0, 1]
connectivity_metric = "correlation"
conditions = ["phy", "gen"]
thresh_range = np.arange(0.1, 1, 0.1)
highpass = 1 / 55.0
correlation_measure = ConnectivityMeasure(kind=connectivity_metric)
# In[ ]:
# gen_timing = np.genfromtxt('/home/data/nbc/physics-learning/physics-learning/RETRconditionGeneralSess1.txt',
# delimiter='\t')
gen_timing = np.genfromtxt(
"/home/data/nbc/physics-learning/retrieval-graphtheory/RETRconditionGeneralSess1.txt",
delimiter="\t",
dtype=int,
)
gen_timing = (gen_timing / 2) - 1
gen_timing = gen_timing[:, 0:2]
# phy_timing = np.genfromtxt('/home/data/nbc/physics-learning/physics-learning/RETRconditionPhysicsSess1.txt',
# delimiter='\t')
phy_timing = np.genfromtxt(
"/home/data/nbc/physics-learning/retrieval-graphtheory/RETRconditionPhysicsSess1.txt",
delimiter="\t",
)
phy_timing = (phy_timing / 2) - 1
phy_timing = phy_timing[:, 0:2]
timing = {}
timing["phy"] = phy_timing
timing["gen"] = gen_timing
# run preprocessing once per run per subject
for subject in subjects:
try:
print subject
ntwk_run_cond = {}
ntwk = {}
hipp = {}
hipp_run_cond = {}
corrmats = {}
for run in runs:
# xfm laird 2011 maps to subject's epi space & define masker
epi = join(
data_dir, subject, "{0}-{1}_retr-mcf.nii.gz".format(subject, run)
)
confounds = join(
data_dir, subject, "{0}-{1}_retr-confounds.txt".format(subject, run)
)
# icn = join(data_dir, subject,'{0}-{1}_18_icn_retr.nii.gz'.format(subject, run))
# icn_regions = connected_label_regions(icn, min_size=50., labels=labels)
icn_regions = join(
data_dir,
subject,
"{0}-{1}_18_icn-regions_retr.nii.gz".format(subject, run),
)
hippo = join(
data_dir, subject, "{0}-{1}_hippo_retr.nii.gz".format(subject, run)
)
regn_masker = NiftiLabelsMasker(
icn_regions, standardize=True, high_pass=highpass, t_r=2.0, verbose=1
)
hipp_masker = NiftiLabelsMasker(
hippo, standardize=True, high_pass=highpass, t_r=2.0, verbose=1
)
# extract the network-wise and hippocampus timeseries per run
# fmri = join(data_dir, subject, 'session-1', 'retr', 'mni', '{0}_filtered_func_data_{1}.nii.gz'.format(subject, run))
ntwk_ts = regn_masker.fit_transform(epi, confounds=confounds)
hipp_ts = hipp_masker.fit_transform(epi, confounds=confounds)
# ts = [ntwk_ts, hipp_ts]
# and then separate each run's timeseries into the different conditions
for condition in conditions:
ntwk_run_cond["{0} {1}".format(condition, run)] = np.vstack(
(
ntwk_ts[
timing[condition][0, 0]
.astype(int) : (
timing[condition][0, 0] + timing[condition][0, 1] + 1
)
.astype(int),
:,
],
ntwk_ts[
timing[condition][1, 0]
.astype(int) : (
timing[condition][1, 0] + timing[condition][1, 1] + 1
)
.astype(int),
:,
],
ntwk_ts[
timing[condition][2, 0]
.astype(int) : (
timing[condition][2, 0] + timing[condition][2, 1] + 1
)
.astype(int),
:,
],
)
)
print ntwk_run_cond["{0} {1}".format(condition, run)].shape
hipp_run_cond["{0} {1}".format(condition, run)] = np.vstack(
(
hipp_ts[
timing[condition][0, 0]
.astype(int) : (
timing[condition][0, 0] + timing[condition][0, 1] + 1
)
.astype(int)
],
hipp_ts[
timing[condition][1, 0]
.astype(int) : (
timing[condition][1, 0] + timing[condition][1, 1] + 1
)
.astype(int)
],
hipp_ts[
timing[condition][2, 0]
.astype(int) : (
timing[condition][2, 0] + timing[condition][2, 1] + 1
)
.astype(int)
],
)
)
for condition in conditions:
ntwk[condition] = np.vstack(
(
ntwk_run_cond["{0} 0".format(condition)],
ntwk_run_cond["{0} 1".format(condition)],
)
)
hipp[condition] = np.vstack(
(
hipp_run_cond["{0} 0".format(condition)],
hipp_run_cond["{0} 1".format(condition)],
)
)
corrmats[condition] = correlation_measure.fit_transform([ntwk[condition]])[
0
]
df = pd.DataFrame(corrmats[condition], index=labels, columns=labels)
df.to_csv(
join(
sink_dir,
"{0}-{1}-corrmat-regionwise.csv".format(subject, condition),
)
)
df.to_csv(
join(
data_dir,
subject,
"{0}-{1}-corrmat-regionwise.csv".format(subject, condition),
)
)
except Exception as e:
print e
| [
"kbott006@fiu.edu"
] | kbott006@fiu.edu |
76aa60f6f57e1566e5e2ccd335cd2e9d89b94e1e | 5b4222e63b4d5df97d2fbe098ea28df410ebc0bb | /ngraph/python/tests/test_onnx/test_additional_models.py | 316381360dfb16f01ed637513b864b398acca741 | [
"Apache-2.0"
] | permissive | groove-x/openvino | a4e3e9c5ab05ac404b7a7687f7c357baeac8fe3b | 58ac859c14f85046f0eb3d36627a8bf49ad778c7 | refs/heads/release/2021.1 | 2023-02-09T17:53:30.753296 | 2020-12-25T04:25:11 | 2020-12-25T04:25:11 | 240,161,117 | 0 | 0 | Apache-2.0 | 2020-12-23T10:02:44 | 2020-02-13T02:34:00 | C++ | UTF-8 | Python | false | false | 3,399 | py | # ******************************************************************************
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import tests
from operator import itemgetter
from pathlib import Path
import os
from tests.test_onnx.utils import OpenVinoOnnxBackend
from tests.test_onnx.utils.model_importer import ModelImportRunner
def _get_default_additional_models_dir():
onnx_home = os.path.expanduser(os.getenv("ONNX_HOME", os.path.join("~", ".onnx")))
return os.path.join(onnx_home, "additional_models")
MODELS_ROOT_DIR = tests.ADDITIONAL_MODELS_DIR
if len(MODELS_ROOT_DIR) == 0:
MODELS_ROOT_DIR = _get_default_additional_models_dir()
tolerance_map = {
"arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001},
"fp16_inception_v1": {"atol": 0.001, "rtol": 0.001},
"mobilenet_opset7": {"atol": 0.001, "rtol": 0.001},
"resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001},
"test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001},
"test_resnet101v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet18v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet34v2": {"atol": 0.001, "rtol": 0.001},
"test_resnet50v2": {"atol": 0.001, "rtol": 0.001},
"mosaic": {"atol": 0.001, "rtol": 0.001},
"pointilism": {"atol": 0.001, "rtol": 0.001},
"rain_princess": {"atol": 0.001, "rtol": 0.001},
"udnie": {"atol": 0.001, "rtol": 0.001},
"candy": {"atol": 0.003, "rtol": 0.003},
}
zoo_models = []
# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR"
for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"):
mdir, file = os.path.split(str(path))
if not file.startswith("."):
mdir = str(mdir)
if mdir.endswith("/"):
mdir = mdir[:-1]
model = {"model_name": path, "model_file": file, "dir": mdir}
basedir = os.path.basename(mdir)
if basedir in tolerance_map:
# updated model looks now:
# {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...}
model.update(tolerance_map[basedir])
zoo_models.append(model)
if len(zoo_models) > 0:
sorted(zoo_models, key=itemgetter("model_name"))
# Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
OpenVinoOnnxBackend.backend_name = tests.BACKEND_NAME
# import all test cases at global scope to make them visible to pytest
backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__)
test_cases = backend_test.test_cases["OnnxBackendValidationModelImportTest"]
del test_cases
test_cases = backend_test.test_cases["OnnxBackendValidationModelExecutionTest"]
del test_cases
globals().update(backend_test.enable_report().test_cases)
| [
"noreply@github.com"
] | noreply@github.com |
b63a69be0846d0417dad07d4d3fd03ac582d960d | 33253bb759aaa41f808f59fd7c3bfc16b0bca674 | /Enhanced TCN for Log Anomaly Detection on the BGL Dataset/test_BGL.py | 238834e8a4e692a8bba3c29fa696d44752ac2ddf | [
"MIT"
] | permissive | h10gforks/LightLog | e52aff4e0ecec53d93a9fc4c315846eb549a5bd6 | b9328763feba22a57775f82790bf799553ee73d7 | refs/heads/main | 2023-08-27T04:22:49.222029 | 2021-10-29T13:06:58 | 2021-10-29T13:06:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | import json
import pandas as pd
import numpy as np
import keras
from keras.models import load_model
import time
with open('./data/bgl_semantic_vec.json') as f:
# Step1-1 open file
gdp_list = json.load(f)
value = list(gdp_list.values())
# Step1-2 PCA: Dimensionality reduction to 20-dimensional data
from sklearn.decomposition import PCA
estimator = PCA(n_components=20)
pca_result = estimator.fit_transform(value)
# Step1-3 PPA: De-averaged
ppa_result = []
result = pca_result - np.mean(pca_result)
pca = PCA(n_components=20)
pca_result = pca.fit_transform(result)
U = pca.components_
for i, x in enumerate(result):
for u in U[0:7]:
x = x - np.dot(u.transpose(), x) * u
ppa_result.append(list(x))
ppa_result = np.array(ppa_result)
def read_test(split = 0.7):
logs_data = pd.read_csv('./data/bgl_data.csv')
logs_data = logs_data.values
label = pd.read_csv('./data/bgl_label.csv')
label = label.values
logs = []
for i in range(0,len(logs_data)):
padding = np.zeros((300,20))
data = logs_data[i]
for j in range(0,len(data)):
padding[j] = pca_result[int(data[j]-1)]
padding = list(padding)
logs.append(padding)
logs = np.array(logs)
split_boundary = int(logs.shape[0] * split)
valid_x = logs[split_boundary:]
test_y = label[split_boundary:]
valid_x = np.reshape(valid_x, (valid_x.shape[0],valid_x.shape[1],20))
valid_y = keras.utils.to_categorical(np.array(test_y))
return valid_x,valid_y,test_y
def test_model(test_x):
model = load_model('./model/E-TCN.h5')
y_pred = model.predict(test_x, batch_size=512)
return y_pred
test_x,valid_y,label = read_test()
start = time.clock()
y_pred = test_model(test_x)
end = time.clock()
print('The detection time is',end-start)
y_pred = np.argmax(y_pred, axis=1)
tp = 0
fp = 0
tn = 0
fn = 0
for j in range(0, len(y_pred)):
if label[j] == y_pred[j] and label[j] == 0:
tp = tp + 1
elif label[j] != y_pred[j] and label[j] == 0:
fp = fp + 1
elif label[j] == y_pred[j] and label[j] == 1:
tn = tn + 1
elif label[j] != y_pred[j] and label[j] == 1:
fn = fn + 1
print('TP,FP,TN,FN are: ',[tp,fp,tn,fn])
print('Precision, Recall, F1-measure are:',tn/(tn+fn),tn/(tn+fp),2*(tn/(tn+fn)*(tn/(tn+fp))/(tn/(tn+fn)+tn/(tn+fp))))
datas = pd.DataFrame(data=[tp,fp,tn,fn])
datas.to_csv('./result/result_BGL',index=False,header=False)
| [
"2435066195@qq.com"
] | 2435066195@qq.com |
3b5b830ece25ebde56182792dca6ff39cf0ef06f | e5f3be412055537a81476cfcb2a1101f48d6d951 | /invert.py | e1aa8f3feaf5ec6004ffa720811341aca24d559a | [] | no_license | LuciMoore/MRI_nonlinear_registration | aef6100b31edbd48013176ca3e82d04fd0574e07 | 5978d67c308dcdb297e0014b006b465c88e18cb7 | refs/heads/master | 2023-08-04T13:00:28.351993 | 2019-06-24T23:42:50 | 2019-06-24T23:42:50 | 190,086,121 | 0 | 0 | null | 2023-07-22T07:28:01 | 2019-06-03T21:39:42 | Python | UTF-8 | Python | false | false | 3,651 | py | #!/usr/bin/env python3
# standard lib
import argparse
import os
from glob import glob
# external libs
import nipype.pipeline.engine as pe
from nipype.interfaces import ants, utility
from nipype.interfaces.image import Rescale
def main():
parser = generate_parser()
args = parser.parse_args()
subject_T1w_folder = args.subject_T1w_folder
jlf_folder = args.joint_fusion_folder
njobs = args.njobs
pattern = os.path.join(jlf_folder, 'Template*')
template_list = glob(pattern)
atlas_images = []
for i in template_list:
atlas_images.append(os.path.join(i, "T1w_brain.nii.gz"))
atlas_segmentations = []
for i in template_list:
atlas_segmentations.append(os.path.join(i, "Segmentation.nii.gz"))
randint = random.randint(1,100)
warped_dir = os.path.join('./invert_dir', 'jlf{}'.format(randint))
#subject T1w brain image
subject_T1w = os.path.join(subject_T1w_folder, 'T1w_acpc_dc_restore_brain.nii.gz')
subject_T2w = os.path.join(subject_T1w_folder, 'T2w_acpc_dc_restore_brain.nii.gz')
#make list of subject T1w and T2w
subject_Tws = [subject_T1w, subject_T2w]
register(warped_dir, subject_Tws, atlas_images, atlas_segmentations, n_jobs=njobs)
def generate_parser():
parser = argparse.ArgumentParser(description='non-linear registration from Brown')
parser.add_argument('subject_T1w_folder', help='path to subject T1w restored brain')
parser.add_argument('joint_fusion_folder', help='path to joint label fusion atlas directory')
parser.add_argument('--njobs', default=1, type=int, help='number of cpus to utilize')
return parser
def register(warped_dir, subject_Tws, atlas_images, atlas_segmentations, n_jobs):
#create list for subject T1w and T2w because Nipype requires inputs to be in list format specifically fr JLF node
sub_T1w_list = []
sub_T1w_list.append(subject_Tws[0])
sub_T2w_list = []
sub_T2w_list.append(subject_Tws[1])
atlas_forinvert = atlas_images[0] #just use
def main():
# sub_T2w_inverted =
subject_T2w = '/home/exacloud/lustre1/fnl_lab/data/HCP/processed/BCP/BCP_NEO_ATROPOS_4/sub-375518/ses-1m/files/T1w/T1w_acpc_dc_restore_brain.nii.gz'
inv(subject_T2w)
def inv(subject_T2w):
fsl.maths.MathsCommand(in_file=subject_T2w, args="-recip", out_file="T1w_acpc_dc_restore_brain_inverse.nii.gz")
if __name__ == '__main__':
main()
input_spec = pe.Node(
utility.IdentityInterface(fields=['subject_Txw', 'subject_Txw_list', 'subject_dual_Tws', 'atlas_image', 'atlas_segmentation', 'atlas_forinvert']),
#iterables=[('atlas_image', atlas_images), ('atlas_segmentation', atlas_segmentations)],
#synchronize=True,
name='input_spec'
)
# set input_spec
input_spec.inputs.subject_Txw = subject_Tws[1] #using T2w here
input_spec.inputs.subject_Txw_list = sub_T2w_list
input_spec.inputs.subject_dual_Tws = subject_Tws
input_spec.inputs.atlas_forinvert = atlas_forinvert
invert = pe.Node(Rescale(invert=True,
percentile = 1.), name='invert')
wf = pe.Workflow(name='wf', base_dir=warped_dir)
wf.connect(input_spec, "subject_Txw", invert, "in_file")
wf.connect(input_spec, "atlas_forinvert", invert, "ref_file") #should I use the subject or atlas T1w here?
wf.config['execution']['parameterize_dirs'] = False
#create workflow graph
wf.write_graph()
#Nipype plugins specify how workflow should be executed
output = wf.run(plugin='MultiProc', plugin_args={'n_procs' : n_jobs})
if __name__ == '__main__':
main() | [
"lucille.a.mo@gmail.com"
] | lucille.a.mo@gmail.com |
4c7ea5619cc66f92ba1b9905cdc2da6911ed9e1f | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/throughput/final3/main.py | 7a486cdf102a8565fccfdac8cf8ebbcbaf5f011c | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,723 | py | import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
with open('k80_time.json', 'r') as fp:
k80_time = json.load(fp)
with open('data/pwr.json', 'r') as fp:
pwr_dict = json.load(fp)
with open('data/util.json', 'r') as fp:
util_dict = json.load(fp)
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
index = 0
all_jobs_started = False
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1024'
host_node = 'c0168'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
######################### do a regression fit ########################
with open('x_data.json') as f:
x_train = json.load(f)
with open('y_data.json') as f:
y_train = json.load(f)
model = neighbors.KNeighborsRegressor(n_neighbors = 1, weights='distance')
model.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.1:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def min_speedup_demotion(K80_job, demote_list):
num_demote = len(demote_list)
global speedup_dict
# selectively demote among active K80 jobs and demote list jobs
K80_qual = list(set(list(K80_job.values())))
if 'idle' in K80_qual:
K80_qual.remove('idle')
K80_pool = list(set(K80_qual).union(demote_list))
if len(K80_pool) <= 8: # demote all jobs, no promotion
return [], demote_list[:] # must return a copy, otherwise the output points to the same address as input
else: # promote the top 4 jobs
pool_dict = {}
for job in K80_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:8] # 8 least speedup jobs
demotion_list = list(set(demote_list).intersection(sorted_pool))
promotion_list = list(set(list(K80_job.values())).difference(sorted_pool))
if 'idle' in promotion_list:
promotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.1:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
V100_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global v100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()))
check_step2_complete(list(K80_job.values()))
for job in list(V100_job.values()):
if job not in qualified_job and job != 'idle':
x2 = 3600 / k80_time[job]
x1 = pwr_dict[job]
x3 = util_dict[job]
if x1 > 0:
if job in step1_job:
qualified_job.append(job)
print('job' + job + ' has been qualified for demotion')
speedup_pred = model.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
if all_jobs_started:
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
else:
promote_list = []
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > demote_qualify_time:
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)))
elif job not in demote_list and job not in step2_job and job in qualified_job:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling')
if len(promote_list) > 0 or len(demote_list) > 0:
if all_jobs_started:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
else:
promoted, demoted = min_speedup_demotion(K80_job, demote_list)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
save_job(K80_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
save_job(V100_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing')
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if not all_jobs_started:
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
start_job(V100_node, gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
elif index >= len(queue):
all_jobs_started = True
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
| [
"baolin.li1994@gmail.com"
] | baolin.li1994@gmail.com |
e8e63a744f6ac41e11ed04e35bb8e6e7bf646d29 | 6e98c8f8b0018dc834958feb65da165c7936522c | /Model/din_model/din_model/trainer/build_dataset.py | db150c9b659503c64eaa046551c7abb07d69d6ec | [
"Apache-2.0"
] | permissive | helenyu18/blue-marlin | 5c43bed76e07fa0393fc24f2e73ff81f0dba16a3 | 668985fad1993a682808e271610c1cf2cec6a6f5 | refs/heads/master | 2021-05-27T05:26:24.737665 | 2020-09-10T22:00:29 | 2020-09-10T22:00:29 | 294,529,284 | 0 | 0 | Apache-2.0 | 2020-09-10T21:44:43 | 2020-09-10T21:44:43 | null | UTF-8 | Python | false | false | 1,269 | py | import random
import pickle
random.seed(1234)
with open('../raw_data/remap.pkl', 'rb') as f:
reviews_df = pickle.load(f)
cate_list = pickle.load(f)
user_count, item_count, cate_count, example_count = pickle.load(f)
train_set = []
test_set = []
for reviewerID, hist in reviews_df.groupby('reviewerID'):
pos_list = hist['asin'].tolist()
def gen_neg():
neg = pos_list[0]
while neg in pos_list:
neg = random.randint(0, item_count-1)
return neg
neg_list = [gen_neg() for i in range(len(pos_list))]
for i in range(1, len(pos_list)):
hist = pos_list[:i]
if i != len(pos_list) - 1:
train_set.append((reviewerID, hist, pos_list[i], 1))
train_set.append((reviewerID, hist, neg_list[i], 0))
else:
label = (pos_list[i], neg_list[i])
test_set.append((reviewerID, hist, label))
random.shuffle(train_set)
random.shuffle(test_set)
assert len(test_set) == user_count
# assert(len(test_set) + len(train_set) // 2 == reviews_df.shape[0])
with open('dataset.pkl', 'wb') as f:
pickle.dump(train_set, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(test_set, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(cate_list, f, pickle.HIGHEST_PROTOCOL)
pickle.dump((user_count, item_count, cate_count), f, pickle.HIGHEST_PROTOCOL)
| [
"rezaa77@gmail.com"
] | rezaa77@gmail.com |
49f026a79eb728b120146d42fa4fc8130a140696 | b2c3636960d14ad06181a547d66556174b385b70 | /testchild.py | 531ab76983ef2212464d5b8d5267f8838f5e2b27 | [] | no_license | sarahmoakler/courseralab | 56ae261d1fe687af062948254bd8e8c9a4e5dc18 | cd50a0108a7a93959912797bea2576c3301e83fe | refs/heads/master | 2022-11-26T15:42:16.614391 | 2020-08-03T02:25:06 | 2020-08-03T02:25:06 | 284,572,779 | 0 | 0 | null | 2020-08-03T01:51:10 | 2020-08-03T01:12:59 | Python | UTF-8 | Python | false | false | 81 | py | ## Adding a new file in child branch (aka branch1)
Print ("Inside child branch")
| [
"noreply@github.com"
] | noreply@github.com |
ad8293ee5c8a2ce590ec5cbe06b4991ee10eed3c | c1abd1ec3e6e5c899ca3a3d2759aef23ace4805e | /node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | 107e703b19e8875cb98f93a3f247733be47c3172 | [
"MIT"
] | permissive | Dipinti3/MyNewTicTacToeApp | e619d03f9023eaff46a4db406562f09c2e3b88b7 | 81193fcc6f195c16d291c35cdcd24acc0b5ff7e6 | refs/heads/main | 2023-07-13T11:40:00.464866 | 2021-08-22T22:46:09 | 2021-08-22T22:46:09 | 398,915,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/dipinti/Library/Caches/node-gyp/14.16.1",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"globalconfig": "/usr/local/etc/npmrc",
"userconfig": "/Users/dipinti/.npmrc",
"init_module": "/Users/dipinti/.npm-init.js",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"save_exact": "true",
"cache": "/Users/dipinti/.npm",
"user_agent": "npm/7.20.3 node/v14.16.1 darwin x64 workspaces/false",
"prefix": "/usr/local"
}
}
| [
"dipinti@zipcodes-MBP.fios-router.home"
] | dipinti@zipcodes-MBP.fios-router.home |
511983bd248c99533c39d7438846d9925561842e | ed5654284f4ff05dface2761525ba8889abe6239 | /vacinebot/__main__.py | 63ee940bc1a012d8bea3f22a06f50c7494475c01 | [] | no_license | mikaelhadler/vvvacinasbot | 372d9b1197821dd7794f6a0fac24161c3036887f | d1514e96e6199448bb9a7df85fdaf381dd8546cf | refs/heads/main | 2023-06-06T06:05:28.712476 | 2021-06-26T14:39:15 | 2021-06-26T14:39:15 | 376,333,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | from . import bot
if __name__ == '__main__':
bot.start() | [
"mikaelhadler@gmail.com"
] | mikaelhadler@gmail.com |
7388c9b72535ef113b908151ecfbd89687afebe6 | 447f7846f81cb330d986b9274cb5337709b3d9fb | /mysite/blog/migrations/0001_initial.py | ccdff7c619f56546924893ea9d66bb3d5faa8bad | [] | no_license | mjghorbany/django_files | 4a354b4605fe2f29bfde5444fa8a0b8acc78265b | 4eb5b15f4b178c524b3e83c559c0df19230843e0 | refs/heads/master | 2021-01-01T17:40:02.504897 | 2018-09-23T21:09:00 | 2018-09-23T21:09:00 | 98,127,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-20 03:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('body', models.TextField()),
('date', models.DateTimeField()),
],
),
]
| [
"jay.ghorbani@accenture.com"
] | jay.ghorbani@accenture.com |
3b79ecee53bab652825699f9a829541d12808883 | 53d22468fb1c9e0f4b4710a31fb7ac638549b8a7 | /src/episode_stats.py | 8c746bbebadc8b8367d5a5f0ae15a6bda7162cea | [
"MIT"
] | permissive | binderwang/drivebot | 768bcfe224d94b931c45c41ced2a1b0067c6417d | a8fb86731c52b7594dd135e8759622c29172b557 | refs/heads/master | 2020-12-14T09:48:59.857490 | 2016-05-03T03:17:58 | 2016-05-03T03:17:58 | 58,269,730 | 1 | 0 | null | 2016-05-07T14:33:18 | 2016-05-07T14:33:17 | null | UTF-8 | Python | false | false | 261 | py | #!/usr/bin/env python
import json
import sys
episode_id = 0
for line in sys.stdin:
episode = json.loads(line)
rewards = [event['reward'] for event in episode]
print "\t".join(map(str, [episode_id, len(episode), sum(rewards)]))
episode_id += 1
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
241d5f507cb5715b138b4fbeeafe3afc277ad81f | 6585f143d0208ff07aaaf0c0833be6b4366c7dc3 | /BackEnd/services/reimbursement_services.py | 48b0b2a6f2a57eb954604a5c888ccf73a706fa03 | [] | no_license | robertjgoette/project1-Revature | e6a49f73d7dbe6b1363f597625653c4fe3afd746 | 8ae755b9f93d7bc42c530485ced0d2df3f577873 | refs/heads/main | 2023-06-14T04:25:03.494637 | 2021-07-14T14:57:04 | 2021-07-14T14:57:04 | 379,026,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from abc import ABC, abstractmethod
from typing import List
from entities.reimbursements import Reimbursement
class ReimbursementServices(ABC):
# CRUD Functions
# CREATE
@abstractmethod
def post_reimbursement(self, reimbursement: Reimbursement, employee_id: int) -> Reimbursement:
pass
# READ
@abstractmethod
def get_all_reimbursement_employee(self, employee_id: int) -> List[Reimbursement]:
pass
@abstractmethod
def get_all_reimbursement(self) -> List[Reimbursement]:
pass
@abstractmethod
def get_reimbursement(self, reimbursement_id: int) -> Reimbursement:
pass
# UPDATE
@abstractmethod
def put_reimbursement(self, reimbursement: Reimbursement, reviewer_id: int) -> Reimbursement:
pass
| [
"54007387+robertjgoette@users.noreply.github.com"
] | 54007387+robertjgoette@users.noreply.github.com |
2f8c03f052351b799bfba46a92f2566cc993aedd | 5181d3b3ef8fe301ea2d6b095260e9d327c2fd79 | /scripts/dl/download_hrrr.py | dad9ed84e463252c8a1b7b4fff6d35e96c53d1d1 | [] | no_license | danhreitz/iem | 88113ef9c9c4a2918c9c2abdfd0510d5ca4ec819 | ed490dcd6c2a8359f88cb805ccee8f6707566f57 | refs/heads/master | 2021-01-18T15:27:28.607250 | 2015-08-10T21:33:54 | 2015-08-10T21:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | """
Since the NOAAPort feed of HRRR data does not have radiation, we should
download this manually from NCEP
Run at 40 AFTER for the previous hour
"""
import urllib2
import sys
import datetime
import os
def fetch(valid):
""" Fetch the radiation data for this timestamp
80:54371554:d=2014101002:ULWRF:top of atmosphere:anl:
81:56146124:d=2014101002:DSWRF:surface:anl:
"""
uri = valid.strftime(("http://www.ftp.ncep.noaa.gov/data/nccf/"
"nonoperational/com/hrrr/prod/hrrr.%Y%m%d/hrrr.t%Hz."
"wrfprsf00.grib2.idx"))
data = urllib2.urlopen(uri, timeout=30)
offsets = []
neednext = False
for line in data:
tokens = line.split(":")
if neednext:
offsets[-1].append(int(tokens[1]))
neednext = False
if tokens[3] in ['ULWRF', 'DSWRF']:
offsets.append([int(tokens[1]), ])
neednext = True
# Save soil temp and water at surface, 10cm and 40cm
if tokens[3] in ['TSOIL', 'SOILW']:
if tokens[4] in ['0-0 m below ground',
'0.01-0.01 m below ground',
'0.04-0.04 m below ground']:
offsets.append([int(tokens[1]), ])
neednext = True
outfn = valid.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/"
"%H/hrrr.t%Hz.3kmf00.grib2"))
outdir = os.path.dirname(outfn)
if not os.path.isdir(outdir):
os.makedirs(outdir, mode=0775) # make sure LDM can then write to dir
output = open(outfn, 'ab', 0664)
req = urllib2.Request(uri[:-4])
if len(offsets) != 8:
print("download_hrrr_rad warning, found %s gribs for %s" % (
len(offsets), valid))
for pr in offsets:
req.headers['Range'] = 'bytes=%s-%s' % (pr[0], pr[1])
f = urllib2.urlopen(req, timeout=30)
output.write(f.read())
output.close()
def main():
""" Go Main Go"""
ts = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
if len(sys.argv) == 5:
ts = datetime.datetime(int(sys.argv[1]), int(sys.argv[2]),
int(sys.argv[3]), int(sys.argv[4]))
fetch(ts)
if __name__ == '__main__':
os.umask(0002)
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
708a1faf765b96d7f5a0505c9bf4c02d987ff8ba | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/highLimDly.py | bc4cf1c574d3d5599664567f174174735af07093 | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | """highLimDly standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:15.
import pylon.resources.datapoints.time_sec
from pylon.resources.standard import standard
class highLimDly(pylon.resources.datapoints.time_sec.time_sec):
"""highLimDly standard property type. High limit delay. The time limit
during normal operation before the alarm air temp high alarm is
recognized."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPThighLimDly'
self._property_scope, self._property_key = 0, 124
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = highLimDly()
pass
| [
"lcoppa@rocketmail.com"
] | lcoppa@rocketmail.com |
8e32b5ebb88f2444a7ee07ace862ba9c18be1ba7 | 1acd397ecc175e44c7f5c5314a5040dba06d76c4 | /libnmo.py | 8b4d7f657e05d0647a6d4233c8b451069e11f25c | [] | no_license | rolandshoemaker/nausicaa | affef176708a2c2c78866158694de971f9ae7511 | 6cbd759f15add49ea15acf6af4df51cfa4983d0d | refs/heads/master | 2021-01-10T23:27:51.789487 | 2015-02-25T10:18:09 | 2015-02-25T10:18:09 | 31,247,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,631 | py | # _
# (_)
# _ __ __ _ _ _ ___ _ ___ __ _ __ _
# | '_ \ / _` | | | / __| |/ __/ _` |/ _` |
# | | | | (_| | |_| \__ \ | (_| (_| | (_| |
# |_| |_|\__,_|\__,_|___/_|\___\__,_|\__,_|
#
# licensed under the MIT license <http://opensource.org/licenses/MIT>
import requests
import json
from urllib.parse import urlencode, quote
BASE_URL = "https://rest.nexmo.com"
RESP_TYPE = "json"
def send_request(url, method="post", url_args=None, json_obj=None):
if method == "get":
r = requests.get(url, params=url_args)
if not r.status_code == 200:
raise ValueError # or you know something proper...
return r.json()
elif method =="post":
r = requests.post(url, params=url_args)
if not r.status_code == 200:
raise ValueError # ...
return r.json()
class Nexmo(object):
key = None
secret = None
balance = None # in euros...
numbers = None
def __init__(self, key, secret):
if not key or not secret:
raise ValueError # but actually something better...
self.key = key
self.secret = secret
self.update_balance()
self.update_numbers()
def update_balance(self):
resp = send_request("%s/account/get-balance" % (BASE_URL), method="get", url_args={"api_key": self.key, "api_secret": self.secret})
self.last_balance = resp["value"]
self.balance = resp["value"]
def update_numbers(self, index=1, size=10, pattern=None, search_pattern=None):
req_args = {
"api_key": self.key,
"api_secret": self.secret,
"index": index,
"size": size
}
if pattern:
req["pattern"] = pattern
if search_pattern:
req["search_pattern"] = search_pattern
resp = send_request("%s/account/numbers" % (BASE_URL), method="get", url_args=req_args)
self.numbers = [NexmoNumber(n["msisdn"], n["type"], n["country"], n["features"]) for n in resp["numbers"]]
def send_msg(self, message):
message["api_key"] = self.key
message["api_secret"] = self.secret
resp = send_request("%s/sms/%s" % (BASE_URL, RESP_TYPE), url_args=message)
self.update_balance()
return resp["messages"]
class NexmoNumber(object):
msisdn = None
n_type = None
country = None
features = None
def __init__(self, msisdn, n_type, country, features):
self.msisdn = msisdn
self.n_type = n_type
self.country = country
self.features = features
# msg = {
# "from": send_from,
# "to": send_to,
# "type": "text", # "unicode",
# "text": body, # body of the message if a text (not binary/wap)
# "status-report-req": 0, # DLR
# "client-ref": "libnmo", # who diddit
# "network-code": "", # specific network, MCCMNC
# "vcard": "", # vcard body
# "vcal": "", # vcal body
# "ttl": 0, # message 'life span'?
# "message-class": 0, # set to zero for FLASH msg
# # binary spec
# "body": 0, # hex encoded binary
# "udh": 0 # hex encoded udh (idk what that is...)
# }
class NexmoMsg(object):
@staticmethod
def new_text(send_from, send_to, body, client_ref=None, status_report_req=False, flash_message=False):
if len(body) > 3200:
raise ValueError # ....
try:
body.encode("ascii")
text_type = "text"
except UnicodeEncodeError:
text_type = "unicode"
# body = quote(body)
# basic message structure for now...
msg = {
"from": send_from,
"to": send_to,
"type": text_type, # "unicode",
"text": body, # body of the message if a text (not binary/wap)
"client-ref": "libnmo", # who diddit
}
if client_ref:
msg["client-ref"] = client_ref
if status_report_req:
msg["status-report-req"] = 1
if flash_message:
msg["message-class"] = 0 # this does weird things atm, idk...
return msg
| [
"rolandshoemaker@gmail.com"
] | rolandshoemaker@gmail.com |
840342402fb7db880e69b4c0da9de7e57a02dca5 | 1d1aec410c9cd56fd9df320a192caed6277e4487 | /props/migrations/0012_auto_20180317_1505.py | 7930012233d858ad99cec3b519b2ec2e6b665160 | [] | no_license | mephistophyles/gogetmylunch | f86bf1b695040ffa99fe429e12482a1a86d06e41 | c2bfef4b3b154fc3e4ae9cae8c8cccc3336bc428 | refs/heads/master | 2021-04-09T16:10:32.558868 | 2018-03-17T18:38:44 | 2018-03-17T18:38:44 | 125,658,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # Generated by Django 2.0.3 on 2018-03-17 15:05
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('props', '0011_auto_20180317_1503'),
]
operations = [
migrations.AlterField(
model_name='prop',
name='date_added',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 17, 15, 5, 15, 139601)),
),
]
| [
"mephistophyles@gmail.com"
] | mephistophyles@gmail.com |
c3af127904d957a29958033e8898da66cbee1238 | 70ed82598c7ae19dc3de4a3a8400e9767b8a74b0 | /Net/BaseNet/ResNet/fine_tuning_2.py | a5c28f115867e33b9eb23304dfaf71d8d7a0216b | [] | no_license | UpCoder/MedicalImage | f255922b988392cd4c3a90715fb945ee20edb3b4 | 34c11562658e6f362ee7eb53740ba96209a22d45 | refs/heads/master | 2021-01-19T16:59:13.251726 | 2017-12-04T14:55:32 | 2017-12-04T14:55:32 | 101,031,357 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,832 | py | # -*- coding: utf-8 -*-
# 使用patch训练好的模型,来对ROI进行微调
from resnet import inference_small, loss
import tensorflow as tf
from Config import Config as sub_Config
from Slice.MaxSlice.MaxSlice_Resize import MaxSlice_Resize
from tensorflow.examples.tutorials.mnist import input_data
from Tools import changed_shape, calculate_acc_error, acc_binary_acc
import numpy as np
from Patch.ValData import ValDataSet
from Patch.Config import Config as patch_config
def train(train_data_set, val_data_set, load_model_path, save_model_path):
x = tf.placeholder(
tf.float32,
shape=[
None,
sub_Config.IMAGE_W,
sub_Config.IMAGE_H,
sub_Config.IMAGE_CHANNEL
],
name='input_x'
)
y_ = tf.placeholder(
tf.float32,
shape=[
None,
]
)
tf.summary.histogram(
'label',
y_
)
global_step = tf.Variable(0, trainable=False)
# variable_average = tf.train.ExponentialMovingAverage(
# sub_Config.MOVING_AVERAGE_DECAY,
# global_step
# )
# vaeriable_average_op = variable_average.apply(tf.trainable_variables())
# regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
is_training = tf.placeholder('bool', [], name='is_training')
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
'where to store the dataset')
tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
y = inference_small(x, is_training=is_training,
num_classes=sub_Config.OUTPUT_NODE,
use_bias=FLAGS.use_bn,
num_blocks=3)
tf.summary.histogram(
'logits',
tf.argmax(y, 1)
)
loss_ = loss(
logits=y,
labels=tf.cast(y_, np.int32)
)
tf.summary.scalar(
'loss',
loss_
)
train_op = tf.train.GradientDescentOptimizer(
learning_rate=sub_Config.LEARNING_RATE
).minimize(
loss=loss_,
global_step=global_step
)
# with tf.control_dependencies([train_step, vaeriable_average_op]):
# train_op = tf.no_op(name='train')
with tf.variable_scope('accuracy'):
accuracy_tensor = tf.reduce_mean(
tf.cast(
tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
tf.float32
)
)
tf.summary.scalar(
'accuracy',
accuracy_tensor
)
saver = tf.train.Saver()
merge_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if load_model_path:
saver.restore(sess, load_model_path)
writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
for i in range(sub_Config.ITERATOE_NUMBER):
images, labels = train_data_set.get_next_batch(sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
images = changed_shape(images, [
len(images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
sub_Config.IMAGE_CHANNEL
])
_, loss_value, accuracy_value, summary, global_step_value = sess.run(
[train_op, loss_, accuracy_tensor, merge_op, global_step],
feed_dict={
x: images,
y_: labels
}
)
writer.add_summary(
summary=summary,
global_step=global_step_value
)
if i % 100 == 0 and i != 0 and save_model_path is not None:
# 保存模型 五分类每500步保存一下模型
import os
save_path = os.path.join(save_model_path, str(global_step_value))
if not os.path.exists(save_path):
os.mkdir(save_path)
save_path += '/model.ckpt'
print 'mode saved path is ', save_path
saver.save(sess, save_path)
if i % 100 == 0:
validation_images, validation_labels = val_data_set.get_next_batch()
validation_images = changed_shape(
validation_images,
[
len(validation_images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
1
]
)
validation_accuracy, validation_loss, summary, logits = sess.run(
[accuracy_tensor, loss_, merge_op, y],
feed_dict={
x: validation_images,
y_: validation_labels
}
)
calculate_acc_error(
logits=np.argmax(logits, 1),
label=validation_labels,
show=True
)
binary_acc = acc_binary_acc(
logits=np.argmax(logits, 1),
label=validation_labels,
)
val_writer.add_summary(summary, global_step_value)
print 'step is %d,training loss value is %g, accuracy is %g ' \
'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
(global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
writer.close()
val_writer.close()
if __name__ == '__main__':
phase_name = 'ART'
state = ''
traindatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/train'
valdatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/val'
val_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
shuffle=True,
data_path=valdatapath
)
train_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
data_path=traindatapath,
shuffle=True,
)
train(
train_dataset,
val_dataset,
load_model_path=None,
save_model_path='/home/give/PycharmProjects/MedicalImage/Net/BaseNet/ResNet/models/fine_tuning/2-128/'
) | [
"546043882@qq.com"
] | 546043882@qq.com |
d2a1e7580caf6d8657185392e0de2ecb545ef47b | bbe4d72283a60757c071f309d6bad2395ba27b3e | /PA_stage_test.py | 8d33f46c20263dbcc0c46b544a1ad8de971f642a | [] | no_license | subham2pgr/PythonRestApiGet-Put-Post | f4a6e1f7d6e621ecc9539f00369aba9fd0240e10 | c9501856e831377e2170857e3f91a02c99f8c2a3 | refs/heads/master | 2020-11-24T20:03:37.260050 | 2019-12-16T07:06:28 | 2019-12-16T07:06:28 | 228,323,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | import subprocess
import shlex
import app_list as wb
from pathlib import Path
import datetime
import difflib
import sys
from itertools import izip
import glob
import os
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import yaml
import wget
import tarfile
app_id_dict={
"206":"2.2.2.2",
"1111":"2.2.2.3",
"1143":"2.2.2.4"
}
#Function to test the PA stage PUT,POST and GET and dig cmd using changed IP
def PA_stage_test():
date_time=datetime.datetime.now()
date_time="/appctrl/t/test/log/PAlog/"+str(date_time)+".log"
Path(date_time).touch()
my_file=open(date_time,"w")
for k,v in app_id_dict.iteritems():
postcmd='curl -s -H "X-API-Key: mPXWeT7Q2S9rD7Fo9gZwL8xBB0M66UMO970zQe7g" https://stage-aggregator.rvbd-staging.cloudns.cc/patterns -X POST'
getcmd= 'curl -s https://stage-aggregator.rvbd-staging.cloudns.cc/patterns'
api="\"X-API-Key: mPXWeT7Q2S9rD7Fo9gZwL8xBB0M66UMO970zQe7g\""
url=" https://stage-aggregator.rvbd-staging.cloudns.cc/applications/"+k+" -H "
content="\'Content-Type: application/json\'"
key="\"ipv4s\""
value="\""+v+"\""
data="\'{"+key+": ["+value+"]}\'"
putcmd="curl -s -H "+api+url+content+" -X PUT --data "+data
putproc=subprocess.Popen(shlex.split(putcmd),stdout=subprocess.PIPE)
putout,puterr=putproc.communicate()
postproc=subprocess.Popen(shlex.split(postcmd),stdout=subprocess.PIPE)
postout,posterr=postproc.communicate()
getproc=subprocess.Popen(shlex.split(getcmd),stdout=subprocess.PIPE)
getout,geterr=getproc.communicate()
s2=v
s1="dig www.abc.com"
s3=".80.6."+s2+".v0.appcs.x.riverbed.cc AAAA @pastage.appcs.x.riverbed.cc +short"
cmd=s1+s3
proc=subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE)
out,err=proc.communicate()
my_file.write("PA stage testing for app_id"+str(k)+"\n")
my_file.write("PUT\n")
s=putout+"\n"
my_file.write(s)
my_file.write("Deploy\n")
s=postout+"\n"
my_file.write(s)
my_file.write("get patteren\n")
s=getout+"\n"
my_file.write(s)
my_file.write("end_to_end\n")
s=out+"\n"
my_file.write(s)
my_file.close()
| [
"noreply@github.com"
] | noreply@github.com |
8e716bc61206410842d3270ed425d9b7d359e9bc | a4cf5f3604524e69fba5bee878821e614381346a | /learning_log/learning_log/urls.py | 453163c8fcbf9349a280b7b4c47cb731ab1a5832 | [] | no_license | pray-lee/learn_python | 8b21c1abefce52f2a20117f132f6e32b06281386 | e9e23bc84dd3f12ece09413429d711fc0b764b51 | refs/heads/master | 2020-03-29T06:21:57.061295 | 2019-09-11T08:49:20 | 2019-09-11T08:49:20 | 149,621,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | """learning_log URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', include('learning_logs.urls', namespace='learning_logs'))
]
| [
"praycis@gmail.com"
] | praycis@gmail.com |
b753599b025bf0d84f2a9458efebc481af45b145 | 4d9a17d0cbff98971f4ec6a1ace3a8fb64b7676a | /mysite/home/migrations/0003_homepage_body.py | d3227f2be2141fa7db66a1bec71f443b3d22cb89 | [] | no_license | fukiko2020/Wagtail-Practice | 982701bb69cb8c80318ed3b48fedfa2bc87c3b50 | ca163a005d129f26c2a0639e46fc4bd50e5f74c1 | refs/heads/main | 2023-03-09T12:30:28.724399 | 2021-02-13T12:17:34 | 2021-02-13T12:17:34 | 337,383,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.1.6 on 2021-02-11 06:41
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0002_create_homepage'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body',
field=wagtail.core.fields.RichTextField(blank=True),
),
]
| [
"kinako.fukiko.0822@gmail.com"
] | kinako.fukiko.0822@gmail.com |
704f23bbe8f0762782de27bee7242837f7c788fb | 46dece1fe6206de56bf631f5885fe196801f19ce | /venv/bin/flask | b98ab7db6d759ef705431f69f582bf2e3244f0ab | [] | no_license | michaelusim/Sentiment_analyzer | c9dca592216048395d512d0f8a4823d3d3d81375 | 3b1e9deb9bf5e7afca6338d0e879d8280d2b957d | refs/heads/master | 2020-04-19T01:55:24.903326 | 2019-01-27T15:49:34 | 2019-01-27T15:49:34 | 167,886,079 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/Users/monyeije16/PycharmProjects/NewWebBasedGui/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"monyeije16@saintpeters.edu"
] | monyeije16@saintpeters.edu | |
f6b0c0ebfcfea1688b03ec725be8faebb3cbbbee | 2598f255696842f043372dd68fe4d5fd48d1a41c | /Ofelia/expedient/src/python/expedient/clearinghouse/users/views.py | 5bd342d561ba8106b5c71655fbdfedc0cbb0a6c3 | [
"BSD-3-Clause"
] | permissive | zanetworker/C-BAS | 8e5442df83626e95d9562497278869ee3c4fad51 | 695c6f72490a02bbb308d44526631dbf426ab900 | refs/heads/master | 2021-01-01T06:55:39.085086 | 2014-08-11T09:37:42 | 2014-08-11T09:37:42 | 22,351,372 | 1 | 0 | null | 2014-08-08T16:15:54 | 2014-07-28T17:28:44 | Python | UTF-8 | Python | false | false | 7,668 | py | '''
Created on Dec 3, 2009
@author: jnaous
'''
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponseNotAllowed
from django.core.urlresolvers import reverse
from expedient.clearinghouse import users
from django.views.generic import create_update, simple
from django.contrib import auth
from expedient.common.permissions.shortcuts import must_have_permission,\
give_permission_to
from registration import views as registration_views
from expedient.clearinghouse.users.forms import FullRegistrationForm
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import password_reset
from expedient.clearinghouse.users.forms import LDAPPasswordResetForm
def home(request):
'''show list of users and form for adding users'''
must_have_permission(request.user, User, "can_manage_users")
user_list = auth.models.User.objects.all().order_by('username')
if request.method == "GET":
pwd_form = auth.forms.UserCreationForm()
user_form = users.forms.UserForm()
userprofile_form = users.forms.UserProfileForm()
elif request.method == "POST":
pwd_form = auth.forms.UserCreationForm(request.POST)
user_form = users.forms.UserForm(request.POST)
userprofile_form = users.forms.UserProfileForm(request.POST)
# check that all data is valid
if pwd_form.is_valid() and user_form.is_valid() and userprofile_form.is_valid():
# create the user first
user = pwd_form.save()
# use the user to save the user info
user_form = users.forms.UserForm(request.POST, instance=user)
user = user_form.save()
# now store the user profile
up = users.models.UserProfile(user=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=up)
userprofile_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/home.html',
extra_context={
'user_list': user_list,
'pwd_form': pwd_form,
'user_form': user_form,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Manage users", request.path),
)
},
)
def detail(request, user_id=None):
if user_id == None:
user = request.user
else:
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
profile = users.models.UserProfile.get_or_create_profile(user)
if request.method == "GET":
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user)
user_form = users.forms.UserForm(instance=user)
userprofile_form = users.forms.UserProfileForm(instance=profile)
elif request.method == "POST":
if request.POST.get("change_pwd", False):
data = request.POST
else:
data = None
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user, data)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user, data)
user_form = users.forms.UserForm(request.POST, instance=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
if user_form.is_valid() and userprofile_form.is_valid():
user = user_form.save()
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
userprofile_form.save()
if request.POST.get("change_pwd", False) and pwd_form.is_valid():
pwd_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
elif "change_pwd" not in request.POST:
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
try:
slice_set = user.slice_set.all()
except AttributeError:
slice_set = ()
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/detail.html',
extra_context={
'curr_user': user,
'slices': slice_set,
'pwd_form': pwd_form,
'user_form': user_form,
'show_owner': True,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Account for %s" % user.username, reverse("users_detail", args=[user.id])),
)
},
)
def saved(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
print user.id
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/saved.html',
extra_context={
'curr_user': user,
},
)
def delete(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
return create_update.delete_object(
request,
auth.models.User,
reverse("users_home"),
user_id,
template_name="expedient/clearinghouse/users/confirm_delete.html",
)
def register(request):
try:
return registration_views.register(
request,
form_class=FullRegistrationForm)
except Exception as e:
print "[ERROR] Exception at 'expedient.clearinghouse.users.views': user '%s' (%s) could not fully register. RegistrationForm module returned: %s" % (request.POST['username'], request.POST['email'], str(e))
return simple.direct_to_template(
request,
template='registration/registration_incomplete.html',
extra_context={
'exception': e,
'root_email': settings.ROOT_EMAIL,
'failed_username': request.POST['username'],
'failed_email': request.POST['email'],
},
)
def activate(request, activation_key):
template_name = 'registration/activate.html'
activation_key = activation_key.lower() # Normalize before trying anything with it.
# Import only here to avoid every time warning 'DeprecationWarning:
# the sha module is deprecated; use the hashlib module instead'
from registration.models import RegistrationProfile
account = RegistrationProfile.objects.activate_user(activation_key)
if account:
give_permission_to(
"can_edit_user", account, account, can_delegate=True)
return simple.direct_to_template(
request,
template=template_name,
extra_context={
'account': account,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
},
)
def my_password_reset(request):
if request.method == 'GET':
return password_reset(request)
else:
email = request.POST['email']
users = User.objects.filter(email = email)
if len(users) == 1 and users[0].password == '!':
return HttpResponseRedirect(settings.OFREG_URL+settings.OFREG_RESET_PATH)
else:
return password_reset(request, password_reset_form=LDAPPasswordResetForm)
| [
"umar.toseef@eict.de"
] | umar.toseef@eict.de |
b64bbcb9cad8e4e00bda92e555abe40da2d52fc3 | 12f6f701f4d7f2dccde9c72763d5614d2987d68f | /QAGAN-NIQE/STL10_NIQE_GP.py | 52c5ab9aee07143be8cf9b0cb54fc31186a9f492 | [] | no_license | parimala1175/QAGANS-Quality-Aware-Generative-Adversarial-Networks. | 6ce383b9d74b13e5ab7a1499d4106799fd4bab34 | dd1a1ddf13c16689e1cd1b5b954b9bd3e9d5b910 | refs/heads/master | 2022-04-16T13:28:59.125626 | 2020-04-15T06:50:51 | 2020-04-15T06:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,834 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import division, print_function, absolute_import
import cPickle as pickle
import os
import urllib
from glob import glob
import numpy as np
import tarfile
import pickle
from tensorflow.python.platform import gfile
import tensorflow as tf
from tensordata.augmentation import random_flip
import adler.tensorflow as atf
import sys
from NIQE_penalty import *
slim = tf.contrib.slim
import tensorflow as tf
import numpy as np
import tensordata
import functools
############ Hyper parameters #################################################################
MAX_ITERS = 1000000
SUMMARY_FREQ = 10
BATCH_SIZE = 64
RESET = True
sav_freq = 10
size = 48
lambda_1 = 1.0
lambda_2 = 0.1
stability_regularizer_factor = 1e-5
############################## creating the folders for storing logs and check points ##########
name = './STL_NIQE_checkpoints'
log_dir = './STL_NIQE_logs'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(name):
os.makedirs(name)
############################ Interactive session intialization #################################
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.InteractiveSession()
input_fname_pattern = '*.png'
############## input data path ###################################################################
data_x = glob(os.path.join("/home/parimala/Aa_current/Current_GAN_work/SSIM_Expicit_regularization/WGAN_GP_SSIM_STL_architecture_96_/img_unlabeled_48/", input_fname_pattern))
def read_input(batch_size):
with tf.device('/cpu:0'):
reader = tf.WholeFileReader()
filename_queue = tf.train.string_input_producer(data_x)
data_num = len(data_x)
key, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value, channels=3, name="dataset_image")
image = tf.image.resize_images(image, [48, 48], method=tf.image.ResizeMethod.BICUBIC)
img_batch = tf.train.batch([image],
batch_size=batch_size
)
img_batch = (tf.cast(img_batch, tf.float32) / 256.0)
return img_batch, data_num
################### reading the dataset into x_train ############################################
with tf.name_scope('placeholders'):
x_train_ph, _ = read_input(batch_size=BATCH_SIZE)
with tf.name_scope('pre_process'):
x_train = (x_train_ph - 0.5) * 2.0
x_true = x_train
##################### All the required building blocks for the architecture ######################
### convolution operation #############################################################
def apply_conv(x, filters=32, kernel_size=3, he_init=True):
if he_init:
initializer = tf.contrib.layers.variance_scaling_initializer(uniform=True)
else:
initializer = tf.contrib.layers.xavier_initializer(uniform=True)
return tf.layers.conv2d(x, filters=filters, kernel_size=kernel_size,
padding='SAME', kernel_initializer=initializer)
################# activation function ############################################################
def activation(x):
with tf.name_scope('activation'):
return tf.nn.relu(x)
#################################### batch normalization #########################################
def bn(x):
return tf.contrib.layers.batch_norm(x,
decay=0.9,
center=True,
scale=True,
epsilon=1e-5,
zero_debias_moving_mean=True,
is_training=True)
############################## stable normalization #############################################
def stable_norm(x, ord):
x = tf.contrib.layers.flatten(x)
alpha = tf.reduce_max(tf.abs(x) + 1e-5, axis=1)
result = alpha * tf.norm(x / alpha[:, None], ord=ord, axis=1)
return result
############################## down sampling the layer ##########################################
def downsample(x):
with tf.name_scope('downsample'):
x = tf.identity(x)
return tf.add_n([x[:,::2,::2,:], x[:,1::2,::2,:],
x[:,::2,1::2,:], x[:,1::2,1::2,:]]) / 4.
############################ up sampling the layer ###############################################
def upsample(x):
with tf.name_scope('upsample'):
x = tf.identity(x)
x = tf.concat([x, x, x, x], axis=-1)
return tf.depth_to_space(x, 2)
############################# convolution mean pool ################################
def conv_meanpool(x, **kwargs):
return downsample(apply_conv(x, **kwargs))
def meanpool_conv(x, **kwargs):
return apply_conv(downsample(x), **kwargs)
def upsample_conv(x, **kwargs):
return apply_conv(upsample(x), **kwargs)
############################## res block ############################################
def resblock(x, filters, resample=None, normalize=False):
if normalize:
norm_fn = bn
else:
norm_fn = tf.identity
if resample == 'down':
conv_1 = functools.partial(apply_conv, filters=filters)
conv_2 = functools.partial(conv_meanpool, filters=filters)
conv_shortcut = functools.partial(conv_meanpool, filters=filters,
kernel_size=1, he_init=False)
elif resample == 'up':
conv_1 = functools.partial(upsample_conv, filters=filters)
conv_2 = functools.partial(apply_conv, filters=filters)
conv_shortcut = functools.partial(upsample_conv, filters=filters,
kernel_size=1, he_init=False)
elif resample == None:
conv_1 = functools.partial(apply_conv, filters=filters)
conv_2 = functools.partial(apply_conv, filters=filters)
conv_shortcut = tf.identity
with tf.name_scope('resblock'):
x = tf.identity(x)
update = conv_1(activation(norm_fn(x)))
update = conv_2(activation(norm_fn(update)))
skip = conv_shortcut(x)
return skip + update
################# Resblock optimized ##################################################################
def resblock_optimized(x, filters):
with tf.name_scope('resblock'):
x = tf.identity(x)
update = apply_conv(x, filters=filters)
update = conv_meanpool(activation(update), filters=filters)
skip = meanpool_conv(x, filters=128, kernel_size=1, he_init=False)
return skip + update
######################################## Generator architecture using the Resnet layers #################
def generator(z, reuse):
with tf.variable_scope('generator', reuse=reuse):
with tf.name_scope('pre_process'):
z = tf.layers.dense(z, 6 * 6 * 128)
x = tf.reshape(z, [-1, 6, 6, 128])
with tf.name_scope('x1'):
x = resblock(x, filters=128, resample='up', normalize=True) # 12
x = resblock(x, filters=128, resample='up', normalize=True) # 24
x = resblock(x, filters=128, resample='up', normalize=True) # 48
with tf.name_scope('post_process'):
x = activation(bn(x))
result = apply_conv(x, filters=3, he_init=False)
return tf.tanh(result)
########################## The discriminator architecture using the Resnet layers ##################
def discriminator(x, reuse):
with tf.variable_scope('discriminator', reuse=reuse):
with tf.name_scope('pre_process'):
x2 = resblock_optimized(x, filters=128)
with tf.name_scope('x1'):
x3 = resblock(x2, filters=128, resample='down')
x4 = resblock(x3, filters=128)
x5 = resblock(x4, filters=128)
with tf.name_scope('post_process'):
x6 = activation(x5)
x7 = tf.reduce_mean(x6, axis=[1, 2])
flat2 = tf.contrib.layers.flatten(x7)
flat = tf.layers.dense(flat2, 1)
return flat
######################################################################################################
with tf.name_scope('gan'):
z = tf.random_normal([tf.shape(x_true)[0], 128], name="z")
x_generated = generator(z, reuse=False)
d_true = discriminator(x_true, reuse=False)
d_generated = discriminator(x_generated, reuse=True)
z_gen = tf.random_normal([BATCH_SIZE * 2, 128], name="z")
d_generated_train = discriminator(generator(z_gen, reuse=True), reuse=True)
###########################################################################################################
with tf.name_scope('regularizer'):
epsilon = tf.random_uniform([tf.shape(x_true)[0], 1, 1, 1], 0.0, 1.0)
x_hat = epsilon * x_generated + (1 - epsilon) * x_true
d_hat = discriminator(x_hat, reuse=True)
############ WGAN-1-GP calculation ########################################################################
gradients = tf.gradients(d_hat, x_hat)[0]
C_xhat_grad_norm = tf.norm(slim.flatten(gradients), axis=1) # l2 norm
d_regularizer1 = tf.reduce_mean(tf.square(C_xhat_grad_norm - 1.))
############################## NIQE penalty term computation ################################################
niqe_score_grad = niqe(tf.image.rgb_to_grayscale(gradients),48)
#niqe_score_grad = tf.py_func(niqe_grad,[gradients], tf.float32)
niqe_score_mean_grad = tf.reduce_mean(niqe_score_grad)
############### regularizer equation final based on NIQE ###################################################################
added_regularizer = lambda_1*d_regularizer1 + lambda_2*niqe_score_mean_grad + stability_regularizer_factor * d_regularizer_mean_stability
############### loss functions for the GAN and training #####################################################
with tf.name_scope('loss_gan'):
wasserstein_scaled = (tf.reduce_mean(d_generated) - tf.reduce_mean(d_true))
wasserstein = wasserstein_scaled
g_loss = tf.reduce_mean(d_generated_train)
######### The proposed QAGAN -NIQE loss function based on NIQE ###############################################
d_loss = (-wasserstein + added_regularizer)
with tf.name_scope('optimizer'):
ema = atf.EMAHelper(decay=0.99)
global_step = tf.Variable(0, trainable=False, name='global_step')
decay = tf.maximum(0., 1.-(tf.cast(global_step, tf.float32)/MAX_ITERS))
learning_rate = 2e-4 * decay
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0., beta2=0.9)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='gan/generator')
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
with tf.control_dependencies(update_ops):
g_train = optimizer.minimize(g_loss, var_list=g_vars,
global_step=global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='gan/discriminator')
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
with tf.control_dependencies(update_ops):
d_train = optimizer.minimize(d_loss, var_list=d_vars)
############ summary writing ######################################################################################
with tf.name_scope('summaries'):
tf.summary.scalar('wasserstein_scaled', wasserstein_scaled)
tf.summary.scalar('wasserstein', wasserstein)
tf.summary.scalar('g_loss', g_loss)
tf.summary.scalar('d_loss', d_loss)
tf.summary.scalar('d_regularizer_niqe', niqe_score_mean_grad)
tf.summary.scalar('d_regularizer_gp', d_regularizer1)
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('added_regularizer', added_regularizer)
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('global_step', global_step)
atf.image_grid_summary('x_generated', x_generated)
merged_summary = tf.summary.merge_all()
################### intializing the variables and graph ########################
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
############ The image files and coordinate the loading of image files #########
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
########### Add op to save and restore #########################################
saver = tf.train.Saver(max_to_keep=2)
######## i = 1000 uncomment and enter the model number for restoring the model #####
if not reset:
nn = name + "/model.ckpt-" + str(i)
saver.restore(sess,nn)
################ Standardized validation z ######################################################
train_summary_writer = tf.summary.FileWriter(log_dir)
while True:
i = sess.run(global_step)
if i >= MAX_ITERS:
break
################## discriminator training 5 times ##################################################
num_d_train = 5
for j in range(num_d_train):
_, d_loss_result = sess.run([d_train, d_loss])
############## generator training ##################################################################
_, g_loss_result, _ = sess.run([g_train, g_loss, ema.apply])
print(' i={}, d_loss={}, g_loss={}'.format(i,d_loss_result,g_loss_result))
######################### summary writing ###########################################################
if i % SUMMARY_FREQ == SUMMARY_FREQ - 1:
ema_dict = ema.average_dict()
merged_summary_result_train = sess.run(merged_summary)
train_summary_writer.add_summary(merged_summary_result_train, i)
############### check point writing ###################################################################
if i % save_freq == save_freq-1:
saver.save(sess,name + "/model.ckpt", global_step=i)
| [
"noreply@github.com"
] | noreply@github.com |
1ed8a791199fb00c45f4eb0ca5c4eb7b0da7e94c | 20db5a27f2a8b2d324085f5e1ec6c46ad7c1e8c3 | /manage.py | c330237e8173667cd9d46db5ee3ead0af6d478a2 | [] | no_license | mortadagzar/djangoMovie | dae326fc83a31e485792b1ee42fa89b7d681049d | e83904c0c1ecc45992eed7516cb483bd2c97590b | refs/heads/master | 2020-04-01T22:32:28.246877 | 2018-10-19T02:41:22 | 2018-10-19T02:41:22 | 153,713,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoMovie.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"mortadagzar@gmail.com"
] | mortadagzar@gmail.com |
aa68f988913d5356935838e1c55305c02c569057 | b1e8b238c258d31b9862aa36a591bb5c44ade131 | /my_test.py | 3ffd01a0899be4694643091330dc4e9b2a7e5dd4 | [] | no_license | Leonidas-I/python-powershell | 9a503fdaccbbbd8ce5297fda36640588bcf90f35 | 81520dcc06d9379f5fe7d9df25a3ebdddfb5c7b5 | refs/heads/master | 2022-12-03T05:47:47.385845 | 2020-07-18T03:52:14 | 2020-07-18T03:52:14 | 280,581,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | import random
class Card(object):
suit_name = ['Clubs', 'Diamonds', 'Hearts', 'Spades']
rank_name = [None, 'Ace', '2', '3', '4', '5', '6',
'7', '8', '9', '10', 'Jack', 'Queen', 'King']
def __init__(self, suit=0, rank=1):
self.suit = suit
self.rank = rank
def __str__(self):
return '%s of %s' % (Card.rank_name[self.rank], Card.suit_name[self.suit])
def __cmp__(self, other): #method compare 2 la bai c1, c2
c1 = self.suit, self.rank
c2 = other.suit, other.rank
return cmp(c1, c2)
class Deck(object):
def __init__(self):
self.cards = []
for suit in range(4):
for rank in range(1, 14):
card = Card(suit, rank)
self.cards.append(card)
def __str__(self):
res = []
for card in self.cards:
res.append(str(card))
return '\n'.join(res)
def add_card(self, card):
self.cards.append(card)
def remove_card(self, card):
self.cards.remove(card)
def pop_card(self, i = 0): #chia bai tu la tren top cua deck
return self.cards.pop(i)
def shuffle(self):
random.shuffle(self.cards)
def sort(self):
self.cards.sort()
def move_card(self, hand, num):
for i in range(num):
hand.add_card(self.pop_card())
class Hand(Deck):
def __init__(self, label=''):
self.cards = []
# self.label = label
def main():
c1 = Card(3, 12)
c2 = Card(3, 5)
print c1, c2, c1 > c2
deck = Deck()
deck.shuffle()
hand = Hand()
deck.move_card(hand, 5)
print deck
print ''
print hand
if __name__ == '__main__':
main() | [
"doombringer@disroot.org"
] | doombringer@disroot.org |
726904d1d091167b545718adf4e6beb1b170a92b | 879b484caa5ef9e462d0897434c5a4cc02137c37 | /util/test_bank.py | 9868b641809f7fefce529130e92646a5277a674c | [] | no_license | qqq1123/appnium- | 9c2540b750ea4cfa7633f3a24426b530655822dd | 8971b394cb783ec54f3cfbb03bee8d4bf22aac57 | refs/heads/main | 2023-04-05T08:47:25.042423 | 2021-04-17T03:57:01 | 2021-04-17T03:57:01 | 358,778,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | import requests
import json
import urllib3
num = 5846
url2 = 'http://p2pxin.rongtuojinrong.com/rongtuoxinsoc/user/paorxercijiangli'
url = "http://api.uat.rongtuoyunlian.cn/php/recommend/getRecommendIdentity"
#https://onlineuat.cupdata.com/openapi/api/gateway
# data = {
# "identity": "0x4fca57771d96d933d8801b4a49ef3117ba88865ae573e489289d4d63811ddcde",
# "amount": "10000",
# "sms_code": "965236",
# "sms_seq": "1",
# "appid": "pc",
# "user_id": "2592",
# "scbUserId": "14544",
# "token": "443fa0c05e2e80762283310a53b7989e"
# }
# for i in range(0,5):
# res = requests.post(url=url,data=data).json()
# print(res)
# data = {
#
# "rongxinIdentity": "0x44a0ac0de5450d58033aafe68c52f7cc7e14408726390ba08416ae41d295203d",
#
# }
# urllib3.disable_warnings()
# res = requests.post(url=url,data=data,verify=False).json()
# print(res)
data = {
"rongxinIdentity": "0xf3b256499718ae0731ae7ad4070ae1b91aafccfa8507db3a304f37d74f5fcb8d"
}
urllib3.disable_warnings()
res = requests.post(url=url,data=data).json()
print(res)
# for i in range(0,50):
# urllib3.disable_warnings()
# res = requests.post(url=url,data=data,verify=False)
# print(res)
# num = num + 1
#
'''
查询一二级合伙人接口
http://api.uat.rongtuoyunlian.cn/php/recommend/getRecommendIdentity'''
| [
"362484633@qq.com"
] | 362484633@qq.com |
d7695202a7eac262d72a336803baadd7fb91647b | 749ecf780d7e1e23d4bb4ea4c748f54efb33ad6e | /app.py | 75c382cb27cc44a92cba2d8fc46a477dcb32fdf2 | [] | no_license | arthuramsouza/playroom | 28df30b1ba1c2f188274559af93bd60c368e9bf9 | 242a8c9c239a0f0d9a0175b09660a1b28f3e9634 | refs/heads/master | 2023-05-11T12:35:16.987877 | 2020-03-23T06:09:37 | 2020-03-23T06:09:37 | 247,905,442 | 1 | 0 | null | 2023-05-02T18:43:38 | 2020-03-17T07:26:23 | Python | UTF-8 | Python | false | false | 191 | py | from flask import Flask
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config.from_object('config')
db = MySQL(app)
from views import *
if __name__ == '__main__':
app.run()
| [
"arthuramsouza@gmail.com"
] | arthuramsouza@gmail.com |
d70f89891b0b806e60e5c729f691a486fcd710db | e98a912d1da048afda6972549eb52a92fddd766e | /make_result/fig_out.py | a96579a5e362d1b6ee3275de35d03084c4245fd1 | [] | no_license | rsaito831/ABW_sim | 5b4806f49998c8f2b704926b76c03a3f3609038c | a39ba7603388717524c0ff719bf6ba82b8978b1c | refs/heads/master | 2020-03-29T19:11:13.163381 | 2018-09-26T06:42:47 | 2018-09-26T06:42:47 | 150,251,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py |
import sys
import matplotlib.pyplot as plt
import numpy as np
args = sys.argv
data01_axis1, data01_value1 = np.loadtxt(args[1], unpack=True)
data02_axis2, data02_value2 = np.loadtxt(args[2], unpack=True)
data02_value2 = data02_value2 / 1000000
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.plot(data01_axis1, data01_value1, "-o", markersize=3, color="b", label="True value")
ax.plot(data02_axis2, data02_value2, "o", markersize=0.5, color="r", label="Spruce")
ax.set_xlim(0, 400)
ax.set_ylim(500, 700)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Available bandwidth (Mbps)")
ax.legend(loc="upper right")
plt.show()
| [
"rsaito831@gmail.com"
] | rsaito831@gmail.com |
b9fc0ded63c3c6f0ff7857c261a68f18076d6d8e | 9dc8c299ee7d4a225002127cc03b4253c8a721fd | /libs/unittest/live_related_condition.py | 5604fdc9fc852993f3b40a2a692f9a1c3da1f49b | [] | no_license | namesuqi/strategy_corgi | 5df5d8c89bdf7a7c465c438048be20ef16120f4f | 557b8f8eabf034c2a57c25e6bc581858dd4f1b6e | refs/heads/master | 2020-03-07T04:00:18.313901 | 2018-03-29T07:50:50 | 2018-03-29T07:50:50 | 127,253,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | # !/usr/bin/python
# coding=utf-8
# author: JinYiFan
from config import *
from libs.module.live_seeds import *
import time
def wait_for_second(wait_time):
time.sleep(wait_time)
def change_config(live_file_count, live_peer_count, rate_of_peer_and_file):
"""
修改config文件的参数配置
:param live_file_count: 文件总数
:param live_peer_count: 播放的节点总数
:param rate_of_peer_and_file: 单个文件对应的播放节点数
"""
orm.session.query(Configs).filter(Configs.role == "live_file_count").update(
{"content": live_file_count})
orm.session.query(Configs).filter(Configs.role == "live_peer_count").update(
{"content": live_peer_count})
orm.session.query(Configs).filter(Configs.role == "rate_of_peer_and_file").update(
{"content": rate_of_peer_and_file})
orm.session.commit()
orm.session.close()
def change_peer_flow_to_0():
"""
将peer的CDN和P2P流量设为0
"""
orm.session.query(Live_Peer).update({"cdn": 0, "p2p": 0})
orm.session.commit()
orm.session.close()
def change_LF_flow_to_0():
"""
将LF的CDN和P2P流量设为0
"""
orm.session.query(Live_Seed).update({"upload": 0, "download": 0})
orm.session.commit()
orm.session.close()
def add_player(play_num):
"""
增加播放节点
:param play_num: 增加的播放节点数
"""
peer_num_infos = orm.session.query(Live_Online).offset(200).limit(play_num).all()
file_id = orm.session.query(Live_Peer).first().file_id
num = 0
for num in range(play_num):
live_peer_sdk = Live_Peer(peer_id=peer_num_infos[num].peer_id, version=peer_num_infos[num].sdk_version,
country=peer_num_infos[num].country, province_id=peer_num_infos[num].province_id,
city_id=peer_num_infos[num].city_id, isp_id=peer_num_infos[num].isp_id,
file_id=file_id, chunk_id=get_random_chunk_id(), operation=OPERATION, cdn=CDN,
p2p=P2P, ssid=peer_num_infos[num].ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def one_peer_multi_channel(channel_num):
"""
一个播放节点播放多个频道
:param channel_num: 一个播放节点播放的频道数
"""
peer_info = orm.session.query(Live_Peer).first()
file_info = orm.session.query(Live_File).offset(5).limit(channel_num).all()
for num in range(channel_num - 1):
live_peer_sdk = Live_Peer(peer_id=peer_info.peer_id, version=peer_info.version, country=peer_info.country,
province_id=peer_info.province_id, city_id=peer_info.city_id, isp_id=peer_info.isp_id,
file_id=file_info[num].file_id, chunk_id=get_random_chunk_id(), operation=OPERATION,
cdn=CDN, p2p=P2P, ssid=peer_info.ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def del_player(del_num):
"""
删除播放节点
:param del_num: 删除的播放节点数
"""
peer_infos = orm.session.query(Live_Peer).all()
session_ids = list()
for peer_info in peer_infos:
session_ids.append(peer_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Peer).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
def del_seed(del_num):
"""
删除雷锋节点
:param del_num: 删除的雷锋节点数
"""
seed_infos = orm.session.query(Live_Seed).all()
session_ids = list()
for seed_info in seed_infos:
session_ids.append(seed_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Seed).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
if __name__ == "__main__":
del_seed(20)
# add_player(3)
# one_peer_multi_channel(3)
# del_player(2)
| [
"suqi_name@163.com"
] | suqi_name@163.com |
87b307a9a67c645c792fcca067a3cfb649de8aca | 4c2722604cf76e1073d45dabe1620e48cb05420d | /f3.1.py | ad45a495d4a9ba9152d7e5cc72a40b9343ec53ce | [] | no_license | Sultansharav/function | 18435e3f2d17addca9732eb50abce4f794dd308b | 5d59ce1e5799ae2ca257740a8d25d98d773cccc8 | refs/heads/master | 2020-08-03T13:21:36.808432 | 2019-09-30T03:18:28 | 2019-09-30T03:18:28 | 211,766,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # Too anhny too esehiig shalgah
def huvaagchTooloh(p):
k=0
for i in range(1,p+1):
if p%i == 0: k+=1
return k
def anhnyToo(p):
if huvaagchTooloh(p) == 2:
return True
else:
return False
print(anhnyToo(int(input()))) | [
"asultan.0516@gmail.com"
] | asultan.0516@gmail.com |
8d637f9712aa8cd0fa725ea3c7b3285cb522f1da | be5a758c99f05c8ae8c224bf43335154114ee5f6 | /kombu/compat.py | 224f2e33e5d44865c3202047427a7e1c535ba30d | [
"BSD-3-Clause"
] | permissive | bradjasper/kombu | 160ed1b5651f91a87752df40791d01c91ca1fe16 | 4c9ac1436eb0468508f8b2cf1bda997535e1326d | refs/heads/master | 2021-01-16T00:23:17.928400 | 2010-07-28T17:25:32 | 2010-07-28T17:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,431 | py | from itertools import count
from kombu import entity
from kombu import messaging
def iterconsume(connection, consumer, no_ack=False, limit=None):
consumer.consume(no_ack=no_ack)
for iteration in count(0):
if limit and iteration >= limit:
raise StopIteration
yield connection.drain_events()
def entry_to_binding(queue, **options):
binding_key = options.get("binding_key") or options.get("routing_key")
e_durable = options.get("exchange_durable") or options.get("durable")
e_auto_delete = options.get("exchange_auto_delete") or \
options.get("auto_delete")
q_durable = options.get("queue_durable") or options.get("durable")
q_auto_delete = options.get("queue_auto_delete") or \
options.get("auto_delete")
e_arguments = options.get("exchange_arguments")
q_arguments = options.get("queue_arguments")
b_arguments = options.get("binding_arguments")
exchange = entity.Exchange(options.get("exchange"),
type=options.get("exchange_type"),
delivery_mode=options.get("delivery_mode"),
routing_key=options.get("routing_key"),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return entity.Binding(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get("exclusive"),
auto_delete=q_auto_delete,
queue_arguments=q_arguments,
binding_arguments=b_arguments)
class Publisher(messaging.Producer):
exchange = ""
exchange_type = "direct"
routing_key = ""
durable = True
auto_delete = False
_closed = False
def __init__(self, connection, exchange=None, routing_key=None,
exchange_type=None, durable=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
if auto_delete is not None:
self.auto_delete = auto_delete
if durable is not None:
self.durable = durable
if not isinstance(self.exchange, entity.Exchange):
self.exchange = entity.Exchange(name=self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
super(Publisher, self).__init__(self.backend, self.exchange,
**kwargs)
def send(self, *args, **kwargs):
return self.publish(*args, **kwargs)
def close(self):
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
class Consumer(messaging.Consumer):
queue = ""
exchange = ""
routing_key = ""
exchange_type = "direct"
durable = True
exclusive = False
auto_delete = False
exchange_type = "direct"
_closed = False
def __init__(self, connection, queue=None, exchange=None,
routing_key=None, exchange_type=None, durable=None,
exclusive=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
if durable is not None:
self.durable = durable
if exclusive is not None:
self.exclusive = exclusive
if auto_delete is not None:
self.auto_delete = auto_delete
self.queue = queue or self.queue
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
exchange = entity.Exchange(self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
binding = entity.Binding(self.queue,
exchange=exchange,
routing_key=self.routing_key,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete)
super(Consumer, self).__init__(self.backend, binding, **kwargs)
def close(self):
self.cancel()
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __iter__(self):
return self.iterqueue(infinite=True)
def fetch(self, no_ack=None, enable_callbacks=False):
if no_ack is None:
no_ack = self.no_ack
message = self.bindings[0].get(no_ack)
if message:
if enable_callbacks:
self.receive(message.payload, message)
return message
def process_next(self):
raise NotImplementedError("Use fetch(enable_callbacks=True)")
def discard_all(self, filterfunc=None):
if filterfunc is not None:
raise NotImplementedError(
"discard_all does not implement filters")
return self.purge()
def iterconsume(self, limit=None, no_ack=None):
return iterconsume(self.connection, self, no_ack, limit)
def wait(self, limit=None):
it = self.iterconsume(limit)
return list(it)
def iterqueue(self, limit=None, infinite=False):
for items_since_start in count():
item = self.fetch()
if (not infinite and item is None) or \
(limit and items_since_start >= limit):
raise StopIteration
yield item
class _CSet(messaging.Consumer):
def __init__(self, connection, *args, **kwargs):
self.connection = connection
self.backend = connection.channel()
super(_CSet, self).__init__(self.backend, *args, **kwargs)
def iterconsume(self, limit=None, no_ack=False):
return iterconsume(self.connection, self, no_ack, limit)
def discard_all(self):
return self.purge()
def add_consumer_from_dict(self, queue, **options):
self.bindings.append(entry_to_binding(queue, **options))
def add_consumer(self, consumer):
self.bindings.extend(consumer.bindings)
def close(self):
self.cancel()
self.channel.close()
def ConsumerSet(connection, from_dict=None, consumers=None,
callbacks=None, **kwargs):
bindings = []
if consumers:
for consumer in consumers:
map(bindings.extend, consumer.bindings)
if from_dict:
for queue_name, queue_options in from_dict.items():
bindings.append(entry_to_binding(queue_name, **queue_options))
return _CSet(connection, bindings, **kwargs)
| [
"askh@opera.com"
] | askh@opera.com |
58ab3e67d229116c62c0e48a9e0a77e9bcac2a78 | 69077e615b35127904128db364c4febaca9c5795 | /vilani_name.py | bea3d213b644b09f50186b1d3e59a2c6e87d1009 | [
"MIT"
] | permissive | markurbin/Vilani_Name | 793141093ec54b8ff84127bb824995ff0b6ee775 | 6e772ce7eadec7b5fb95226f23648f591c35f67d | refs/heads/master | 2020-04-01T07:36:31.747248 | 2018-10-14T17:47:02 | 2018-10-14T17:47:02 | 152,995,966 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,497 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 22 22:13:18 2016
@author: Mark Urbin
# "The Traveller game in all forms is owned by Far Future Enterprises.
# Copyright 1977 - 2018 Far Future Enterprises."
This program translates Terran names to Vilani.
This for the science fiction role playing game Traveller.
Names of length of 2 or 3 names can be entered as a command line option
examples: "Joe Generic" or "John Quincy Citizen"
In the case of no middle name, a common name will be used
If no command line input, a default name of "Walter Piracy Smith" will be used.
It is up to the user to break the resulting name in to first and last names.
For guidance see
http://www.freelancetraveller.com/features/culture/customs/vilnames.html
This is based on a 'Your name in Vilani' web page that I'm pretty sure does
not exist anymore.
"""
#Standard libraries
import random
import sys
def replace1(nWip):
list1 = ('ag', 'ir', 'uk', 'aag', 'iir')
list2 = ('shir','sir')
list3 = ('uu','u')
nWip = nWip.replace('ch','sh')
nWip = nWip.replace('c','k')
nWip = nWip.replace('f','b')
nWip = nWip.replace('j','ii')
nWip = nWip.replace('o','aa')
nWip = nWip.replace('p','m')
nWip = nWip.replace('q','k')
nWip = nWip.replace('t','r')
nWip = nWip.replace('x','kash')
nWip = nWip.replace('y','ii')
nWip = nWip.replace('l',random.choice(list1))
nWip = nWip.replace('s',random.choice(list2))
nWip = nWip.replace('u',random.choice(list3))
nWip = nWip.replace('v',random.choice(list3))
nWip = nWip.replace('w',random.choice(list3))
c = 0
tWip = ''
for x in nWip:
if x == 'h':
if c > 0:
if (nWip[c-1] != 's') and (nWip[c-1] != 'g'):
tWip += 'kh'
else:
tWip += x
else:
tWip += x
else:
tWip += x
c+=1
return tWip
def doubleCon(nWip):
vowels = 'aeiou'
list1 = ('ii','i','aa','a')
c = 0
tWip = ''
for x in nWip:
if x not in vowels:
if c > 0:
if x == nWip[c-1]:
tWip += random.choice(list1)
tWip += x
else:
tWip += x
else:
tWip += x
else:
tWip += x
c+=1
return tWip
def addvowel(x):
tWip = ''
list1 = ('a','e','i','ii','uu','aa')
tWip += random.choice(list1)
tWip += x
return tWip
def sepCon(nWip):
vowels = 'aeiou'
c = 0
tWip = ''
temp = ''
for x in nWip:
if x not in vowels:
if c > 0:
if nWip[c-1] not in vowels:
temp = nWip[c-1] + x
if (temp == 'kh') or (temp == 'sh') or (temp == 'rk'):
tWip += x
else:
tWip += addvowel(x)
else:
tWip += x
else:
tWip += x
else:
tWip += x
c+=1
return tWip
def dipReplace(nWip):
dipList = ('ow','ou','oi','oy','ou','ea')
list1 = ('a','e','i','ii','uu','aa')
tWip = ''
c = 0
temp = ' '
for x in nWip:
if c > 0:
temp = nWip[c-1] + x
if temp in dipList:
tWip += random.choice(list1)
else:
tWip += x
else:
tWip += x
c += 1
return tWip
def dipthong(nWip):
nWip = nWip.replace('ae','e')
nWip = nWip.replace('ai','ii')
nWip = nWip.replace('ao','uu')
nWip = nWip.replace('au','aa')
nWip = nWip.replace('ay','e')
nWip = nWip.replace('ea','i')
nWip = nWip.replace('ee','ii')
nWip = nWip.replace('ei','e')
nWip = nWip.replace('eo','i')
nWip = nWip.replace('eu','u')
nWip = nWip.replace('ey','ii')
nWip = nWip.replace('ia','a')
nWip = nWip.replace('ie','e')
nWip = nWip.replace('io','o')
nWip = nWip.replace('iu','u')
nWip = nWip.replace('oa','ua')
nWip = nWip.replace('oe','e')
nWip = nWip.replace('oi','i')
nWip = nWip.replace('oo','uu')
nWip = nWip.replace('ou','uu')
nWip = nWip.replace('oy','i')
nWip = nWip.replace('ue','u')
nWip = nWip.replace('ui','ii')
nWip = nWip.replace('uo','ua')
nWip = nWip.replace('uy','ii')
return nWip
def vilaniName(nWip):
nWip = dipthong(nWip)
#print '1: ',nWip
nWip = replace1(nWip)
#print '2: ',nWip
nWip = doubleCon(nWip)
#print '3: ',nWip
nWip = sepCon(nWip)
#print '4: ',nWip
nWip = dipReplace(nWip)
#print '5: ',nWip
return nWip
def insertName():
'Insert a common middle name'
commonMale = ['john','james','michael','robert','mark','kevin','noah',
'mason','lucas','adam','carter','henry','alexander','jack',
'gabriel','muhammad','asher','clark','bruce','donald',
'scott','christopher', 'jerry','roger','alan','ian',
'mohammand', 'yue', 'yang', 'raj', 'edgar','steve',
'marc','jean','chad', 'gary', 'larry','neil','glenn',
'ace','christian','gabriel','raphael','azrael','chamuel',
'lincoln','samuel','hunter','fitzgerald','otto','gunther',
'karl','rudolf','benjamin','ernest','oscar','ralph',
'elmer','frederick','herbert','lee','spock','lawerance',
'ronald','william','billy']
commonFemale =['susan','mary','carolyn','judy', 'kim', 'lori','kathy',
'carol', 'laura', 'kimberly','olivia','sophia','charlotte',
'harper', 'evelyn', 'scarlett', 'emily', 'madison',
'elizabeth', 'victoria', 'grace', 'aubrey', 'aurora',
'skylar', 'hazel', 'nova', 'brooklyn', 'kaitlyn','sally',
'patty','patricia', 'linda','amy','yelena','joan',
'stephanie','ursula', 'taylor', 'jophiel','ariel','priya',
'li','jamie','josephine','carrie','sarah', 'helen',
'gracie','ann','alice','ada','sadie','doris','virginia',
'dakota','marilyn','catherine','ruby','beverly','reagan']
# For now, randomly select either a male or femalename
if 1 == random.randint(1,2):
nName = random.choice(commonMale)
else:
nName = random.choice(commonFemale)
return nName
#Start of main executable code
# If no Middle Name use a common name
if len(sys.argv) < 4:
if len(sys.argv) == 3:
name = sys.argv[1]
name += ' '
name += insertName()
name += ' '
name += sys.argv[2]
else:
name = sys.argv[1]
name += ' '
name += sys.argv[2]
name += ' '
name += sys.argv[3]
if len(sys.argv) < 3:
name = 'Walter Piracy Smith'
print 'Original Name: ', name
sName = name.split(' ')
# Reverse the middle name
vNameWip = sName[0] + sName[2][::-1] + sName[1]
# Make everything lower Case
nWip = vNameWip.lower()
#nWip = villaniName(nWip)
#Printing 5 names to get multiple values due to random selection
for y in range(0,5):
print vilaniName(nWip)
| [
"noreply@github.com"
] | noreply@github.com |
efc54871703ecce3f1cb626bd1351abbdff392ef | 34ef83114e02b173bd2d55eb53ad399e738a8e3c | /django/test_bootstrap/blog/models.py | b12d7ddd4b0a24411e62b4e99bf00bcafa60e565 | [] | no_license | vavilon/Python3 | e976a18eb301e4953696d1e3f4730ed890da015a | 8c79729747ce51d60ad685e6a2e58292954ed7eb | refs/heads/master | 2023-01-09T13:44:37.408601 | 2018-01-25T22:41:14 | 2018-01-25T22:41:14 | 100,892,055 | 0 | 1 | null | 2022-12-26T20:29:27 | 2017-08-20T22:23:06 | Python | UTF-8 | Python | false | false | 506 | py | from django.db import models
# Create your models here.
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True
)
def publish(self):
self.published_date = timezone.now()
def __str__(self):
return self.title
| [
"overon4ek@gmail.com"
] | overon4ek@gmail.com |
3fac458c8f38d04e4724c1f19199c6c517b324b6 | 675b72eae65f8e258794decf9627e5fdf8b04559 | /plugin_tests/examples_test.py | aa8ae4a0e021a3a57aefdf2dd02021e68f45841a | [
"Apache-2.0"
] | permissive | jbeezley/large_image | 368f730ea6fe2c4b75a9c3412c08ce8f41be545a | ac4cbaff4ae2fbbde425d3cd1aee2ff03e6235c8 | refs/heads/master | 2021-01-11T06:15:48.687563 | 2016-10-24T17:09:08 | 2016-10-24T17:09:08 | 71,806,470 | 0 | 0 | null | 2016-10-24T16:04:04 | 2016-10-24T16:04:03 | null | UTF-8 | Python | false | false | 1,695 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import os
import subprocess
import unittest
class LargeImageExamplesTest(unittest.TestCase):
def testAverageColor(self):
# Test running the program
testDir = os.path.dirname(os.path.realpath(__file__))
examplesDir = os.path.join(testDir, '../examples')
prog = 'average_color.py'
imagePath = os.path.join(os.environ['LARGE_IMAGE_DATA'],
'sample_image.ptif')
process = subprocess.Popen(
['python', prog, imagePath, '-m', '1.25'],
shell=False, stdout=subprocess.PIPE, cwd=examplesDir)
results = process.stdout.readlines()
self.assertEqual(len(results), 19)
finalColor = [float(val) for val in results[-1].split()[-3:]]
self.assertEqual(round(finalColor[0]), 245)
self.assertEqual(round(finalColor[1]), 247)
self.assertEqual(round(finalColor[2]), 247)
| [
"david.manthey@kitware.com"
] | david.manthey@kitware.com |
c07188d1ee79a950eae8de801a83fc7e33532916 | 593e287de8e95e1355a76fff8cd4302013325917 | /datasets/filter-uswebsites.py | 3377470c606f3fda90c835282cfc6658264fcf18 | [] | no_license | miamayixuan/Final-Project | 9efea72a385be9c3041c55f0c7c15a9aa0039cb5 | c2f350fb8c6275a6d4398be334c38e2c6e111b84 | refs/heads/master | 2020-12-08T08:02:44.259336 | 2020-01-20T01:18:20 | 2020-01-20T01:18:20 | 232,932,185 | 1 | 0 | null | 2020-01-19T00:55:17 | 2020-01-10T00:23:34 | HTML | UTF-8 | Python | false | false | 712 | py | #imports necessary packages
import csv
from pprint import pprint
from datetime import datetime
# reads in the all domains data set
with open('all-domains-30-days.csv', 'r') as f:
reader = csv.DictReader(f)
rows = list(reader)
site_visits = [dict(row) for row in rows]
# writes column headers we want
with open('all_domains_output.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['name', 'value'])
# creates a counter so that we can obtain only the top ten results
i=0
# loops through the file writing only the columns we want for only the top ten results:
for site in site_visits:
i+=1
if i > 10:
break
name = site["domain"]
visits = site["visits"]
writer.writerow([name, visits])
| [
"davidmstansbury@gmail.com"
] | davidmstansbury@gmail.com |
a058b693de47a2f87e3e8e4092a4deb46cc07dad | 9ccc0fc184a16d0f1ad0613e54e491cfa0cc3efe | /rebase/tests/unit/skills/importable_modules.py | db86063d8a4c64b7969f6fafe890520072aaca37 | [] | no_license | rebase-inc/api | 97c6ff512eef91ac83b894aafb09f068872a6681 | e1caf88099d06ac2f56d2e4caec3d5eee1925713 | refs/heads/master | 2021-03-27T14:41:00.101197 | 2017-02-18T03:37:17 | 2017-02-18T03:37:17 | 30,560,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | from pprint import pprint
from unittest import TestCase
from git import Repo
from rebase.skills.importable_modules import (
ImportableModules,
is_python_module,
path_without_extension,
python_modules,
start_dir,
to_namespace,
)
from .git import Tree, Blob
class ImportableModulesTest(TestCase):
def test_is_python_module(self):
foo_py = Blob('foo.py', 'x/y/z/foo.py')
bar_js = Blob('bar.js', 'a/b/c/bar.js')
self.assertTrue( is_python_module(foo_py) )
self.assertFalse( is_python_module(bar_js) )
def test_path_without_extension(self):
foo_py = Blob('foo.py', 'a/b/c/foo.py')
self.assertEqual( path_without_extension(foo_py), 'a/b/c/foo' )
def test_python_modules(self):
foo_py = Blob('foo.py', 'a/b/c/foo.py')
yo_py = Blob('yo.py', 'a/b/c/yo.py')
bar_js = Blob('bar.js', 'a/b/c/bar.js')
tree = Tree('c', 'a/b/c', blobs=[ foo_py, yo_py, bar_js ])
self.assertEqual( python_modules(tree), { 'a/b/c/foo', 'a/b/c/yo' } )
def test_to_namespace(self):
self.assertEqual( to_namespace('a/b', 'a'), 'b' )
self.assertEqual( to_namespace('a/b/c/foo', 'a/b'), 'c.foo' )
self.assertEqual( to_namespace('a/b/c/foo', ''), 'a.b.c.foo' )
def test_start_dir(self):
self.assertEqual( start_dir('a'), '')
self.assertEqual( start_dir('a/b'), 'a')
self.assertEqual( start_dir('a/b/c'), 'a/b')
def test_importable_modules_root_modules(self):
foo_py = Blob('foo.py', 'foo.py')
yo_py = Blob('yo.py', 'yo.py')
bar_js = Blob('bar.js', 'bar.js')
tree = Tree('', '', blobs=[ foo_py, yo_py, bar_js ])
importable_modules = ImportableModules(tree)
self.assertEqual( importable_modules, { 'foo', 'yo' } )
def test_importable_modules_one_sub_dir(self):
foo_py = Blob('foo.py', 'a/foo.py')
yo_py = Blob('yo.py', 'a/yo.py')
bar_js = Blob('bar.js', 'a/bar.js')
a = Tree('a', 'a', blobs=[foo_py, yo_py, bar_js])
root_tree = Tree('', '', trees=[a])
importable_modules = ImportableModules(root_tree)
self.assertEqual( importable_modules, { 'foo', 'yo' } )
def test_importable_modules_one_pkg(self):
b = Tree('b', 'a/b', blobs=[
Blob('__init__.py', 'a/b/__init__.py'),
Blob('foo.py', 'a/b/foo.py'),
Blob('yo.py', 'a/b/yo.py'),
Blob('bar.js', 'a/b/bar.js'),
])
a = Tree('a', 'a', blobs=[Blob('__init__.py', 'a/__init__.py')], trees=[b])
root_tree = Tree('', '', trees=[a])
importable_modules = ImportableModules(root_tree)
self.assertEqual( importable_modules, { 'a', 'a.b', 'a.b.foo', 'a.b.yo' } )
def test_modules(self):
backend = Repo('.')
commit = backend.commit('d4819c15b3994dcd9e5faed9f180f1eaa71e3210')
importable_modules = ImportableModules(commit.tree)
root_modules = {'build', 'git-worker', 'parse_python2', 'rq-population', 'run-worker', 'scheduler', 'wsgi'}
self.assertTrue(root_modules.issubset(importable_modules))
| [
"rapha.open.source@gmail.com"
] | rapha.open.source@gmail.com |
2727f055ba4fd1ad97e4e5a0f59365711f17792a | 2bbdab12cb53d156bda1b7e4f3ce129cc730d573 | /di_10n01/models.py | baf1cd96bec993178d5d3c3046132385fcd20da1 | [] | no_license | 000Justin000/surrogate | 4e31bf43240078bcaeb34910b6617b952b43f1df | 9f471251c3555725ab0505b40a93781792dc06a1 | refs/heads/master | 2021-05-11T19:34:27.512581 | 2018-01-17T15:34:22 | 2018-01-17T15:34:22 | 115,680,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | ../SOT/models.py | [
"juntengjia@hotmail.com"
] | juntengjia@hotmail.com |
434153e344fd51bbd477726190b6bffce6f42c4d | 3de3dae722829727edfdd6cc3b67443a69043475 | /edexOsgi/com.raytheon.uf.common.dataplugin.text/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/text/subscription/request/SubscriptionRequest.py | 237472774c674b7b8fb879656ce996c5d08db82a | [
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | Unidata/awips2 | 9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb | d76c9f96e6bb06f7239c563203f226e6a6fffeef | refs/heads/unidata_18.2.1 | 2023-08-18T13:00:15.110785 | 2023-08-09T06:06:06 | 2023-08-09T06:06:06 | 19,332,079 | 161 | 75 | NOASSERTION | 2023-09-13T19:06:40 | 2014-05-01T00:59:04 | Java | UTF-8 | Python | false | false | 1,268 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# File auto-generated against equivalent DynamicSerialize Java class
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Sep 05, 2014 bclement Generated
class SubscriptionRequest(object):
def __init__(self):
self.message = None
def getMessage(self):
return self.message
def setMessage(self, message):
self.message = message
| [
"mjames@unidata.ucar.edu"
] | mjames@unidata.ucar.edu |
b8acc579b13a7bb35130f20698e3489073b14792 | 773deb7825ff84eec3e0cf6ae8266d07251df392 | /CHAPTER05/bw41.py | c7231b7bd8d7d2ba190f52df2a0fa74e6f62a961 | [] | no_license | kji0205/py | 3ca9c2a351af05ce62d7c7c3c261ed98a7e8290d | b45ffb3424b7c0da8192d431cb7ad7933c60ef81 | refs/heads/master | 2021-01-20T18:57:51.603386 | 2016-06-23T14:24:57 | 2016-06-23T14:24:57 | 61,639,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # 진정한 병렬성을 실현하려면 concurrent.futures를 고려하자
import logging
from pprint import pprint
from sys import stdout as STDOUT
from time import time
def gcd(pair):
a, b = pair
low = min(a, b)
for i in range(low, 0, -1):
if a % i == 0 and b % i == 0:
return i
numbers = [(1963309, 2265973), (2030677, 3814172),
(1551645, 2229620), (2039045, 2020802)]
start = time()
results = list(map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ThreadPoolExecutor
start = time()
pool = ThreadPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ProcessPoolExecutor
start = time()
pool = ProcessPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
| [
"kji0205@gmail.com"
] | kji0205@gmail.com |
32488fbd7fc3396021b71149e9fa5501f2d5f820 | bcdfeafe6276deee45b39268dadc4e79b001bce5 | /fmm/backend/__init__.py | c297273c4923c266411d8efab3a358f3d30ec346 | [
"BSD-3-Clause"
] | permissive | exafmm/pyexafmm | 178bf16f57e5d508d0a9bb77e556a2db30546686 | 5062803438cb9c1afe84c53e6a186e60254cf88d | refs/heads/master | 2023-07-19T17:44:25.849752 | 2022-02-21T11:46:30 | 2022-02-21T11:46:30 | 212,838,965 | 11 | 1 | BSD-3-Clause | 2023-07-06T22:32:37 | 2019-10-04T14:52:51 | Python | UTF-8 | Python | false | false | 35 | py | from fmm.backend.api import BACKEND | [
"noreply@github.com"
] | noreply@github.com |
b92db110450f2c14108bb1fe808f9ce2eb57621f | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190422001746.py | b97cd844c688b0223ebd64a788ff07412f228001 | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,111 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import asyncio
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.current_robot_pose = robot.pose
self.robot = robot
# start streaming
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.pick_up_pose = Pose(x=4.5, y=12.75, z=0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=12.75, z=0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), self.pick_up_pose]
self.drive_speed = speed_mmps(50)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
threading.Thread(target=self.runGUI).start()
def runGUI(self):
self.gui = GUIWindow(self.grid, show_camera=True)
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(0, 0, 0)
self.gui.start()
async def drive_to(self, directions):
print("-" * 20 + "DRIVING" + "-" * 20)
if isinstance(directions, (list,)):
for pose in directions:
await self.__drive_to_pose(pose)
else:
await self.__drive_to_pose(directions)
async def __drive_to_pose(self, pose):
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
translation = (pose - self.current_arena_pose).position
directions = Pose(x=translation.x, y=translation.y, z=0, angle_z=pose.rotation.angle_z)
print("We will follow these directions: ", directions, "\n\n")
await self.__execute_directions(directions)
print("Directions followed!", "\n\n")
self.update_current_arena_pose()
async def __execute_directions(self, directions):
print("Current arena pose is:", self.current_arena_pose, "\n\n")
print("Current robot pose is:", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.x * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.y * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose, "\n\n")
print("ROBOT is TURNING ", diff_heading_deg(directions.rotation.angle_z.degrees, 90), "degrees.", "\n\n")
await self.robot.turn_in_place(angle=degrees(diff_heading_deg(directions.rotation.angle_z.degrees, 90))).wait_for_completed()
print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n")
def update_current_arena_pose(self):
print("-" * 20 + "UPDATING POSE" + "-" * 20)
coordinate_systems_diff = diff_heading_deg(self.current_robot_pose.rotation.angle_z.degrees, self.current_arena_pose.rotation.angle_z.degrees)
print("robot pose before we moved was: ", self.current_robot_pose)
print("My diff heading degree is: ")
arena_initial_pose_mm = rotate_point(self.current_robot_pose.position.x, self.current_robot_pose.position.y, coordinate_systems_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y, coordinate_systems_diff)
d_x = arena_final_pose_mm[0] - arena_initial_pose_mm[0]
d_y = arena_final_pose_mm[1] - arena_initial_pose_mm[1]
d_heading = self.robot.pose.rotation.angle_z - self.current_robot_pose.rotation.angle_z
difference_pose = convertPoseFromMmToInches(Pose(x=d_x, y=d_y, z=0, angle_z=d_heading))
print("We think we moved ", difference_pose, "\n\n")
self.current_arena_pose = self.current_arena_pose + difference_pose
print("Current pose is now ", self.current_arena_pose, "\n\n")
async def pick_up_cube(self, tries=5):
print("-" * 20 + "GETTING CUBE" + "-" * 20)
cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed().obj
if (picked_up_cube == None):
print("Could not get the cube.")
await self.robot.say_text("Help me!").wait_for_completed()
asyncio.sleep(5)
else:
print("Picked up cube!")
async def set_down_cube(self):
print("-" * 20 + "SETTING DOWN CUBE" + "-" * 20)
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
async def localize(self, turn_angle=20):
print("-" * 20 + "LOCALIZING" + "-" * 20)
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(self.grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.current_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = self.pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
print("We localized to arena location ", self.current_arena_pose)
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
self.current_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / self.grid.scale, dy / self.grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker(robot)
await cosimo.localize()
await cosimo.drive_to(cosimo.pick_up_pose)
while True:
await cosimo.pick_up_cube(tries=5)
await cosimo.drive_to(cosimo.drop_off_directions)
await cosimo.set_down_cube()
await cosimo.drive_to(cosimo.pick_up_directions)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
| [
"josh@lawn-143-215-110-217.lawn.gatech.edu"
] | josh@lawn-143-215-110-217.lawn.gatech.edu |
310c9ef7c52f88fbf9e2564573d6762814477a4e | 9bddd25271ca710a7166307229e407b62e411a47 | /cobs/state_modifier/modifier.py | b2cc82fb67605f412d49f1ab1524923d75877dda | [
"MIT"
] | permissive | sustainable-computing/COBS-joint-control | 3ace2f49562f65294d04f73f13a2ccff4af8fac7 | 0bb9ba0ceaea329e9e1474674ece6e76143dce53 | refs/heads/master | 2023-08-08T20:33:45.072668 | 2023-07-26T18:40:36 | 2023-07-26T18:40:36 | 304,130,824 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | class StateModifier:
def __init__(self):
self.models = list()
def add_model(self, model):
self.models.append(model)
def get_update_states(self, true_state, environment):
for model in self.models:
model.step(true_state, environment)
def get_ignore_by_checkpoint(self):
ignore_set = set()
for model in self.models:
if hasattr(model, 'ignore_by_checkpoint'):
ignore_set = ignore_set.union(set(model.ignore_by_checkpoint()))
return ignore_set
| [
"tzhang6@blizzard.cs.ualberta.ca"
] | tzhang6@blizzard.cs.ualberta.ca |
d485028798e1c737f0af507daf3b21f679ec03ae | b55c368efdfe360123be1a2e7677cee53706d1f9 | /VectorTrans/Main.py | 7f33d97819742d7ae327669e60bb979628d2c4fb | [
"MIT"
] | permissive | ZzhiWang/DRImplicitVecXform | 207cd6ef6edf5bc90b2abb1242e2d7bb3b322f95 | 2ec0c64fb098e29ce74929f5e19bce90b2f5791c | refs/heads/master | 2023-03-17T14:51:34.755756 | 2020-08-01T09:26:35 | 2020-08-01T09:26:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | import numpy as np
from Tools import Preprocess
from VectorTrans.DRTrans import DRTrans
from VectorTrans.MDSTrans import MDSTrans
from VectorTrans.TSNETrans import TSNETrans
from VectorTrans.PCATrans import PCATrans
from VectorTrans.MDSTransPlus import MDSTransPlus
from VectorTrans.TSNETransPlus import TSNETransPlus
from VectorTrans.CreateJson import JsonFile
def load_data():
X = np.loadtxt("..\\Data\\data.csv", dtype=np.float, delimiter=",")
label = np.loadtxt("..\\Data\\label.csv", dtype=np.int, delimiter=",")
return X, label
def run_example():
dr_method = 'MDS' # 'MDS' 't-SNE' 'PCA' 'MDSPlus' 't-SNEPlus'
X, label = load_data()
repeat = Preprocess.has_repeat(X)
if repeat:
print("Please recheck the input data for duplicate points")
return
X = Preprocess.normalize(X) # Optional
(n, d) = X.shape
trans = DRTrans()
if dr_method == 'MDS':
trans = MDSTrans(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == 't-SNE':
trans = TSNETrans(X, label=label, y_init=None, perplexity=30.0)
elif dr_method == 'PCA':
trans = PCATrans(X, label=label)
elif dr_method == "MDSPlus":
trans = MDSTransPlus(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == "t-SNEPlus":
trans = TSNETransPlus(X, label=label, y_init=None, perplexity=30.0)
else:
print("This method is not supported at this time: ", dr_method)
return
trans.transform(nbrs_k=20, MAX_EIGEN_COUNT=4, yita=0.1)
np.savetxt("..\\Data\\"+str(dr_method)+"_Y.csv", trans.Y, fmt='%.18e', delimiter=",")
if n*d < 1024 ** 3 / 2:
np.savetxt("..\\Data\\"+str(dr_method)+"_derivative.csv", trans.derivative, fmt='%.18e', delimiter=",")
json_file = JsonFile(path="..\\Data\\")
json_file.create_file(trans)
if __name__ == '__main__':
run_example()
| [
"sdu2014@126.com"
] | sdu2014@126.com |
f7363780a3282a89ab03770ad5780972929a1345 | 0b54fe39df677cfb4a08fb9d40c79e4a9af740f7 | /makeplots.py | 73a172ae8e0c52406f0e78a3d93ba9163db71a33 | [] | no_license | aatishb/fluent | c34a39b484837265a0dc056032d9094f40b44573 | 9d64a929df1b9b37c02c1be1e4323114483adb0d | refs/heads/master | 2016-09-05T14:40:43.099330 | 2012-08-30T20:40:12 | 2012-08-30T20:40:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,414 | py | import numpy as np
import flu
import matplotlib.pyplot as plt
AA = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
def count_AA(seq):
dict = {}
for word in AA:
dict[word] = 0
count = 0.0
for word in seq:
if word in AA:
dict[word] += 1
count += 1
if not (count == 0):
return [dict[word]/count for word in AA]
else:
return [dict[word] for word in AA]
def deltat(virarray):
meandist = []
print "Calculating mean deltaT"
for year1 in years:
for year2 in years:
if years.index(year2) == years.index(year1) + 1:
i = years.index(year1)
dist = []
for word1 in virarray[i]:
for word2 in virarray[i+1]:
label1 = str(word1.place)+'/'+str(word1.year)+'/'+str(word1.id)
label2 = str(word2.place)+'/'+str(word2.year)+'/'+str(word2.id)
dist.append(tree.distance(label1,label2))
meandist.append(np.mean(dist))
print "done"
return meandist
def corr(data):
data_tr = np.transpose(data)
#print data_tr[10]
data_r2 = np.corrcoef(data_tr)
return data_r2
def plotstuff(virarray, time, relent, prob, sel, entropy):
print "\n"
print "Generating figures"
#Plots all sites under selection
plt.clf()
plt.subplots_adjust(bottom=0)
plt.imshow(sel,interpolation='nearest', cmap=plt.cm.Reds,aspect='auto')
plt.yticks(range(len(years[0:-1])), years[0:-1])
plt.savefig("heatmap.png",bbox_inches='tight',dpi=100)
sites = range(588)
#Plots Probability of Neutral Model vs Position
plt.clf()
plt.subplots_adjust(bottom=-0.5)
for i in range(len(years)-1):
ax = plt.subplot(str(511+i))
ax.set_yscale('log')
ax.plot(sites, prob[i], label=years[i])
plt.savefig("prob.png",bbox_inches='tight',dpi=100)
#Plots Relative Entropy vs Position
plt.clf()
plt.subplots_adjust(bottom=-0.5)
for i in range(len(years)-1):
ax = plt.subplot(str(511+i))
ax.set_yscale('linear')
ax.plot(sites, relent[i], label=years[i])
ax.plot(sites, (-np.log(0.01)/len(virarray[i+1]))*np.ones(588))
ax.plot(sites, (-np.log(0.001)/len(virarray[i+1]))*np.ones(588))
plt.savefig("relent.png",bbox_inches='tight',dpi=100)
#Plots Average Relative Entropy vs Position
minN = min(Nlist)
mean_relent = [np.mean(word) for word in zip(*relent)]
plt.clf()
plt.subplots_adjust(bottom=0)
ax = plt.subplot(111)
ax.set_yscale('linear')
ax.plot(sites, mean_relent, label=years[i])
ax.plot(sites, (-np.log(0.01)/minN)*np.ones(588))
ax.plot(sites, (-np.log(0.001)/minN)*np.ones(588))
plt.savefig("mean_relent.png",bbox_inches='tight',dpi=100)
#Plots Average Probability of Neutral Model vs Position
mean_prob = [np.mean(word) for word in zip(*prob)]
plt.clf()
plt.subplots_adjust(bottom=0)
ax = plt.subplot(111)
ax.set_yscale('log')
ax.plot(sites, np.array(mean_prob,dtype=float), label=years[i])
plt.savefig("mean_prob.png",bbox_inches='tight',dpi=100)
#Build a correlation matrix
corrmat = [[np.abs(word) for word in row] for row in corr(sel)]
#Plots correlation among all sites
plt.clf()
plt.subplots_adjust(bottom=0)
plt.imshow(corrmat, interpolation='nearest', cmap=plt.cm.Reds, aspect='auto')
#plt.yticks(sites, sites)
plt.savefig("corr_heatmap.png",bbox_inches='tight',dpi=200)
#Make a list of sites that are selected at least four out of five seasons
sel_list = []
site = 0
for sel_site in zip(*sel):
count = 0
for word in sel_site:
if word == True:
count += 1
if count >= 4:
sel_list.append(site)
site += 1
print sel_list
#Plot Entropy over time for these sites
plt.clf()
plt.figure()
for site in sel_list:
plt.plot(range(len(years)), entropy[site], label=str(site))
plt.legend(loc='best')
plt.xlabel('years')
plt.savefig("entropy for selected sites.png")
#Divides sequence into chunks and makes a plot of entropy vs time
plt.clf()
plt.figure()
step_size = 50
site_chunks = [sites[i:i+step_size] for i in range(0, len(sites), step_size)]
for site_chunk in site_chunks:
entropy_mean = []
entropy_std = []
for year in range(len(years)):
templist = []
for site in site_chunk:
#if not entropy[site][year] == 0:
templist.append(entropy[site][year])
entropy_mean.append(flu.np.mean(templist))
entropy_std.append(flu.np.std(templist))
plt.errorbar(range(len(years)), entropy_mean, yerr = entropy_std/flu.np.sqrt(len(site_chunk)) , label=str(site_chunk[0])+'-'+str(site_chunk[-1]))
plt.legend(loc='best')
plt.xlabel('years')
plt.savefig("entropy versus time.png")
#Split sequences into chunks of 100 amino acids and plot number of variant sites over time
plt.clf()
plt.figure()
step_size = 100
site_chunks = [sites[i:i+step_size] for i in range(0, len(sites), step_size)]
for site_chunk in site_chunks:
entropy_mean = []
for year in range(len(years)):
templist = []
count = 0
for site in site_chunk:
if not entropy[site][year] == 0:
count += 1
entropy_mean.append(count)
plt.errorbar(range(len(years)), entropy_mean , label=str(site_chunk[0])+'-'+str(site_chunk[-1]))
plt.legend(loc='best')
plt.savefig("num variants sites.png")
print "Done"
from Bio import Phylo
tree = Phylo.read('egyptH5NHA.phy_phyml_tree.txt', 'newick')
nodes = tree.get_terminals()
clade1 = Phylo.read('clade1.txt', 'newick')
c1nodes = [word.name for word in clade1.get_terminals()]
c1ids = [word.name.split('/')[2] for word in clade1.get_terminals()]
clade2 = Phylo.read('clade2.txt', 'newick')
c2nodes = [word.name for word in clade2.get_terminals()]
c2ids = [word.name.split('/')[2] for word in clade2.get_terminals()]
years = ['2006','2007','2008','2009','2010','2011']
virarray = [[word for word in flu.seqlist if (word.place == "Egypt" and word.year == year)] for year in years]
time = [0.0143905347475, 0.039710268946, 0.0457343136228, 0.0451385507375, 0.0425961707733]
timec1 = [0.013992550923,0.040050920329,0.0492249534222,0.0624247846509,0.0557367576217]
timec2 = [0.0144531601812,0.0208246072785,0.0251183491289,0.0228981542124,0.0199896902198]
# Create array of frequencies at each site. freqarray : years, sites, freq of amino acids
freqarray = np.array([[count_AA(word) for word in zip(*[vir.seq for vir in virpool])] for virpool in virarray], dtype=float)
# Next, create and populate 3 arrays, for relative entropy, probability of fitting neutral model, and selection
prob = [[] for year in years[0:-1]]
relent = [[] for year in years[0:-1]]
sel = [[] for year in years[0:-1]]
entropy = [[flu.entropy(season) for season in freq_position] for freq_position in zip(*freqarray)]
Nlist = []
for year1 in years:
for year2 in years:
if years.index(year2) == years.index(year1) + 1:
i = years.index(year1)
#calculate relative entropy pairs of sites across years
xent = []
for (site1,site2) in zip(freqarray[i+1],freqarray[i]):
if not (np.sum(site1)*np.sum(site2) == 0):
xent.append(flu.KLdivergence(site1,flu.expQ(site2,time[i])))
else:
xent.append(0.0)
N = len(virarray[i+1])
Nlist.append(N)
#print N
#create arrays of relative entropy, probability, and binary variable indicating selection.
#arrays iterate over amino acid position. i = year in which samples collected
relent[i] = np.array(xent,dtype=float)
prob[i] = np.exp(-1.0*N*np.array(xent,dtype=float))
sel[i] = np.less_equal(prob[i],0.001)
#print deltat(virarray)
plotstuff(virarray, time, relent, prob, sel, entropy)
| [
"aatish@gmail.com"
] | aatish@gmail.com |
9a0e986d7a22643c61cfa7262cfcd710ef3b77b2 | 154a9c4255ec016154e5461cacb998bcfaec37c1 | /aitdt/urls.py | a81900cab2260f2aa69856cc2760167a1d1fa8e6 | [] | no_license | BetterTribe/aitdt | 1f5f66d3c3dcfea02340d6294362e2706b7dad8c | 6ebd5e3fb8c7bc53a9d76b8dc898bc1b7f0a4c88 | refs/heads/master | 2020-07-09T19:09:33.271767 | 2019-08-31T21:24:28 | 2019-08-31T21:24:28 | 204,056,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """aitdt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('search/', include('search.urls')),
path('admin/', admin.site.urls),
]
| [
"11donank22@gmail.com"
] | 11donank22@gmail.com |
d56e7846c82c52f70ed8995eae5b8812797b0c59 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/blocks/parser_.py | f1a794e50470f6bace4dc005a1697707cf3acf70 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 93 | py | # proxy module
from __future__ import absolute_import
from codetools.blocks.parser_ import *
| [
"ischnell@enthought.com"
] | ischnell@enthought.com |
c9bbb18d49a220a9c5dba67e26b75ee3e9d1b3c3 | ad1ff82d1361f76b043faa304aa3b7be3652b303 | /tools/supervisor.py | 40f2f0a9c939658268aa3ae39a028bf539add829 | [] | no_license | jon--lee/aor | 3a0f92e345a88c347146acba4b9f7513a3a986cf | 4a4cd8800dfc209c382507740e68586b34178a1b | refs/heads/master | 2020-06-10T05:31:51.179020 | 2019-06-24T23:48:24 | 2019-06-24T23:48:24 | 193,597,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import numpy as np
from expert import tf_util
class Supervisor():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(s[None], stochastic=False)[0]
return action
class Supervisor2():
def __init__(self, policy_fn, sess):
self.policy_fn = policy_fn
self.sess = sess
with self.sess.as_default():
tf_util.initialize()
def sample_action(self, s):
with self.sess.as_default():
intended_action = self.policy_fn(s[None,:])[0]
return intended_action
def intended_action(self, s):
return self.sample_action(s)
class Supervisor3():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(False, s)[0]
return action
| [
"123abcjonathanlee@gmail.com"
] | 123abcjonathanlee@gmail.com |
5e04932dfb27aeb12ad72a0908fe316451c52a4a | 46a6f325ae1ea6f107c220a50f7fb86dfd9522a1 | /swea/1861/solution.py | a4c8f682f23f68643f66c96cc0cabfa8572c0e09 | [] | no_license | jiyong1/problem-solving | 5cf6310461ad21be4ce91bdee8eef74cdb3b23db | fce67785eb5fd37e230efbfe571d7446ee78d305 | refs/heads/master | 2023-03-29T13:18:09.136215 | 2021-03-31T14:27:15 | 2021-03-31T14:27:15 | 331,573,598 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | import sys
sys.stdin = open('input.txt')
from collections import deque
T = int(input())
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
def canIGo(cy, cx, num):
for d in range(4):
ny = cy + dy[d]
nx = cx + dx[d]
if ny < 0 or nx < 0 or ny >= N or nx >= N:
continue
if arr[ny][nx] == num + 1:
return 1
else:
return 0
def bfs(q):
result_d = 0
result_s = 0
while q:
cy, cx, clen = q.popleft()
cnum = arr[cy][cx]
if clen > result_d:
result_d = clen
result_s = cnum
elif clen == result_d and result_s > cnum:
result_s = cnum
for d in range(4):
ny = cy + dy[d]
nx = cx + dx[d]
if ny < 0 or nx < 0 or ny >= N or nx >= N:
continue
if arr[ny][nx] == cnum - 1:
q.append([ny, nx, clen+1])
break
return result_s, result_d
for tc in range(1, T+1):
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
# 끝 점은 무조건 있다. (방문할 수 있는 방의 수가 0, 나보다 1 큰게 없음)
# 끝점들 에서 부터 bfs로 탐색하고 가장 많이 방문 할 수 있는 부분을 출력하면 될 거 같음
# 만약에 모든 점에서 bfs나 dfs를 돌리면 답은 얻을 수 있지만 엄청 오래걸림
q = deque()
for i in range(N):
for j in range(N):
if canIGo(i, j, arr[i][j]):
continue
q.append([i, j, 1])
# 끝 점들에서 bfs 탐색
s, d = bfs(q)
print('#{} {} {}'.format(tc, s, d))
| [
"jyong9591@gmail.com"
] | jyong9591@gmail.com |
8fb4c6aa19a856c31c1bdc3e05f50f7c5ae8abb8 | ab210a199e94a3da207275813b5430373364376b | /tankgui/__init__.py | 2f49f90db714090cb2edee21bc9550aa3ad83fbf | [] | no_license | xmcp/botzone-mccts | b0f416c2ce4b74efd7f5ca9ace73ff7ee0bd99af | e074597507f65862330bc0018d7b1c2b289da99b | refs/heads/master | 2020-04-13T22:44:28.903849 | 2019-11-19T07:25:17 | 2019-11-19T07:25:17 | 163,487,243 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,092 | py | from enum import Enum, IntEnum
import json
import tkinter
import tkinter.messagebox
import threading
import os
cwd=os.path.split(__file__)[0]
S=9
class Terrain(Enum):
air=0
brick=1
steel=2
water=3
base0=4
base1=5
tank_backdrop=6
destroyed=7
killed=8
class Action(IntEnum):
idle=-1
up=0
right=1
down=2
left=3
fup=4
fright=5
fdown=6
fleft=7
def shooting_each(a1,a2):
a1,a2 = min(a1,a2),max(a1,a2)
return (a1==Action.fup and a2==Action.fdown) or (a1==Action.fright and a2==Action.fleft)
DY=[-1,0,1,0]*2 # *2 for move and fire
DX=[0,1,0,-1]*2
class InvalidAction(Exception):
def __init__(self,s):
self.str=s
def __str__(self):
return f'<Invalid Move: {self.str}>'
__repr__=__str__
class GameOver(Exception):
def __str__(self):
return '<Game Over>'
__repr__=__str__
class Tank:
def __init__(self,world,y,x):
self.y=y
self.x=x
self.world=world
self.shoot_cd=0
self.killed=False
self.action_todo=None
self.world.terrain[self.y][self.x]=Terrain.tank_backdrop
def check_move(self,action):
ny=self.y+DY[action]
nx=self.x+DX[action]
if not (0<=nx<S and 0<=ny<S):
return False
if self.world.terrain[ny][nx]!=Terrain.air:
return False
return True
def do_action_move(self):
action=self.action_todo
if self.killed:
return
if Action.idle<action<Action.fup: # move
if not self.check_move(action):
raise InvalidAction('bad move')
assert self.world.terrain[self.y][self.x]==Terrain.tank_backdrop
if len(self.world.get_tank_by_cord(self.y,self.x))==1: # last tank
self.world.terrain[self.y][self.x]=Terrain.air
self.y+=DY[action]
self.x+=DX[action]
#self.world.terrain[self.y][self.x]=Terrain.tank_backdrop # set terrain later
def do_action_fire(self):
action=self.action_todo
if self.killed:
return
if action>=Action.fup:
if self.shoot_cd>0:
raise InvalidAction('fire with cd')
self.shoot_cd=2
self_overlapping=len(self.world.get_tank_by_cord(self.y,self.x))>1
ny=self.y
nx=self.x
while True:
ny+=DY[action]
nx+=DX[action]
if not (0<=nx<S and 0<=ny<S):
break
terr=self.world.terrain[ny][nx]
if terr==Terrain.tank_backdrop: # shooting tank
tanks=self.world.get_tank_by_cord(ny,nx)
assert tanks
if self_overlapping or len(tanks)>1 or not shooting_each(self.action_todo,tanks[0].action_todo): # passed counter-shoot test
self.world.terrain[ny][nx]=Terrain.killed
break
elif terr not in [Terrain.air,Terrain.water]: # shooting terrain
if terr in [Terrain.base0,Terrain.base1]:
raise GameOver()
elif terr!=Terrain.steel: # do shoot
self.world.terrain[ny][nx]=Terrain.destroyed
break
if self.shoot_cd>0:
self.shoot_cd-=1
class World:
def __init__(self,terrain_desc):
self.terrain=[[Terrain.air for _x in range(S)] for _y in range(S)]
self.terrain[0][S//2]=Terrain.base0
self.terrain[S-1][S//2]=Terrain.base1
self.team=[
[
Tank(self,0,S//2-2),
Tank(self,0,S//2+2),
],
[
Tank(self,S-1,S//2+2),
Tank(self,S-1,S//2-2),
]
]
if terrain_desc is not None:
self.init_terrain(Terrain.brick,terrain_desc['brickfield'])
self.init_terrain(Terrain.steel,terrain_desc['steelfield'])
self.init_terrain(Terrain.water,terrain_desc['waterfield'])
def init_terrain(self,terrain,desc):
block_size=S*3
mask=desc[0]+desc[1]*(2**block_size)+desc[2]*(4**block_size)
for y in range(S):
for x in range(S):
if mask&(1<<(y*S+x)):
self.terrain[y][x]=terrain
def proc_turn(self,actions):
for team,tank in [[0,0],[0,1],[1,0],[1,1]]: # plan action
self.team[team][tank].action_todo=actions[team][tank]
for team,tank in [[0,0],[0,1],[1,0],[1,1]]: # move
self.team[team][tank].do_action_move()
for team, tank in [[0,0],[0,1],[1,0],[1,1]]: # update tank backdrop
t=self.team[team][tank]
if not t.killed:
self.terrain[t.y][t.x]=Terrain.tank_backdrop
for team,tank in [[0,0],[0,1],[1,0],[1,1]]: # fire
self.team[team][tank].do_action_fire()
for y in range(S): # remove destroyed terrain and killed tank
for x in range(S):
if self.terrain[y][x]==Terrain.destroyed:
self.terrain[y][x]=Terrain.air
elif self.terrain[y][x]==Terrain.killed:
self.terrain[y][x]=Terrain.air
for tank in self.get_tank_by_cord(y,x):
tank.killed=True
for team in [0,1]: # check all tanks died
if self.team[team][0].killed and self.team[team][1].killed:
raise GameOver()
def get_tank_by_cord(self,y,x):
return [
tank
for team in self.team for tank in team
if tank.y==y and tank.x==x and not tank.killed
]
D=48
class GUI:
PROMPT=-10
action_txt={
PROMPT: ' input ',
Action.idle: ' ',
Action.up: ' ↑ ',
Action.down: ' ↓ ',
Action.left: ' ← ',
Action.right: ' → ',
Action.fup: ' ↑ FIRE ',
Action.fdown: ' ↓ FIRE ',
Action.fleft: ' ← FIRE ',
Action.fright: ' → FIRE ',
}
hover_txt={
PROMPT: '×',
Action.idle: '',
Action.up: '↑',
Action.down: '↓',
Action.left: '←',
Action.right: '→',
Action.fup: '╩',
Action.fdown: '╦',
Action.fleft: '╣',
Action.fright: '╠',
}
def __init__(self):
self.tk=tkinter.Tk()
self.tk.title('tank gui')
self.tk.resizable(False,False)
self.done_flag=threading.Event()
self.done_flag.set()
self.lock=threading.Lock()
self.imgs={
k:tkinter.PhotoImage(file=f'{cwd}/img/{k.name}.png')
for k in [Terrain.base0,Terrain.base1,Terrain.brick,Terrain.steel,Terrain.water,Terrain.tank_backdrop]
}
self.tank_imgs={
(team,tank,cd):tkinter.PhotoImage(file=f'{cwd}/img/tank_{team}{tank}{"x" if cd else ""}.png')
for team in [0,1] for tank in [0,1] for cd in [0,1]
}
self.world=World(None)
self.myside=0
self.action=[Action.idle, Action.idle]
self.action_tank=None
self.action_var=[tkinter.StringVar(self.tk),tkinter.StringVar(self.tk)]
self.done_var=tkinter.StringVar(self.tk,value='.')
self.cvs=tkinter.Canvas(self.tk,width=9*D,height=9*D,bg='black')
self.cvs.grid(row=0,column=0)
self.hover_id=[None,None]
bar=tkinter.Frame(self.tk)
self.tank_label=[
tkinter.Label(bar,image=self.tank_imgs[0,0,self.world.team[self.myside][0].shoot_cd]),
tkinter.Label(bar,image=self.tank_imgs[0,1,self.world.team[self.myside][1].shoot_cd]),
]
self.tank_label[0].grid(row=0,column=0)
tkinter.Label(bar,textvariable=self.action_var[0],font='Consolas').grid(row=0,column=1)
self.tank_label[1].grid(row=0,column=2)
tkinter.Label(bar,textvariable=self.action_var[1],font='Consolas').grid(row=0,column=3)
tkinter.Label(bar,textvariable=self.done_var,bg='green',fg='white',font='Consolas').grid(row=0,column=4)
self.update_action_var()
bar.grid(row=1,column=0,sticky='we',pady=3)
self.tk.bind('<Key-1>',lambda *_: self.change_action_tank(0))
self.tk.bind('<Key-2>',lambda *_: self.change_action_tank(1))
self.tk.bind('<Up>',lambda *_: self.change_action(Action.up))
self.tk.bind('<Down>',lambda *_: self.change_action(Action.down))
self.tk.bind('<Left>',lambda *_: self.change_action(Action.left))
self.tk.bind('<Right>',lambda *_: self.change_action(Action.right))
self.tk.bind('<Return>',lambda *_: self.done())
def new_mission(self,world:World,myside):
self.world=world
self.myside=myside
self.action=[Action.idle,Action.idle]
self.action_tank=None
self.done_flag.clear()
self.done_var.set(' INPUT ')
self.cvs.delete('all')
self.draw_terrain()
self.draw_tank()
self.tank_label[0]['image']=self.tank_imgs[0,0,self.world.team[self.myside][0].shoot_cd]
self.tank_label[1]['image']=self.tank_imgs[0,1,self.world.team[self.myside][1].shoot_cd]
self.update_action_var()
def update_action_var(self):
for tank in [0,1]:
act=self.action[tank]
if act==Action.idle and tank==self.action_tank:
act=self.PROMPT
self.action_var[tank].set(self.action_txt[act])
self.cvs.itemconfigure(self.hover_id[tank],text=self.hover_txt[act])
def change_action_tank(self,ind):
self.action_tank=ind
self.action[ind]=Action.idle
self.update_action_var()
def change_action(self,act:Action):
if self.action_tank is None:
return
if self.action[self.action_tank]==act and self.world.team[self.myside][self.action_tank].shoot_cd==0:
self.action[self.action_tank]+=(Action.fup-Action.up)
else:
self.action[self.action_tank]=act
self.update_action_var()
def done(self):
for tank in [0,1]:
if Action.up<=self.action[tank]<Action.fup and not self.world.team[self.myside][tank].check_move(self.action[tank]):
tkinter.messagebox.showerror('tank gui',f'Tank {tank+1} Invalid Move')
return
self.done_var.set('.')
self.done_flag.set()
def draw_terrain(self):
imgs=self.imgs.copy()
if self.myside==1:
imgs[Terrain.base0],imgs[Terrain.base1] = imgs[Terrain.base1],imgs[Terrain.base0]
for y in range(S):
for x in range(S):
if self.world.terrain[y][x] in self.imgs.keys():
self.cvs.create_image(x*D,y*D,anchor='nw',image=imgs[self.world.terrain[y][x]])
def draw_tank(self):
tank_imgs=self.tank_imgs.copy()
if self.myside==1:
for tank in [0,1]:
for cd in[0,1]:
tank_imgs[0,tank,cd],tank_imgs[1,tank,cd] = tank_imgs[1,tank,cd],tank_imgs[0,tank,cd]
for team in [0,1]:
for tank in [0,1]:
t=self.world.team[team][tank]
y=t.y*D+team*D//2
x=t.x*D+tank*D//2
if not t.killed:
self.cvs.create_image(x, y, anchor='nw', image=tank_imgs[team, tank, t.shoot_cd])
if team==self.myside:
self.hover_id[tank]=self.cvs.create_text(x,y,anchor='nw',font='黑体 -24',fill='blue')
def process(self,world:World,myside):
with self.lock:
self.new_mission(world,myside)
self.done_flag.wait()
#self.cvs.delete('all')
return json.dumps(self.action)
def main(inp_str):
inp=json.loads(inp_str)
init,*first_moves=inp['requests']
second_moves=inp['responses']
if init['mySide']==0:
first_moves,second_moves = second_moves,first_moves
assert len(second_moves)==len(first_moves)
world=World(init)
for move in zip(first_moves,second_moves):
world.proc_turn(move)
res=gui.process(world,init['mySide'])
return res
sample_str='''{
"requests": [
{
"brickfield": [
14119466,
54744598,
44201816
],
"mySide": 0,
"steelfield": [
16777217,
43008,
67108868
],
"waterfield": [
524288,
8388616,
128
]
},
[
5,
6
],
[
-1,
-1
],
[
-1,
5
],
[
6,
3
],
[
0,
3
]
],
"responses": [
[
2,
2
],
[
5,
7
],
[
1,
3
],
[
5,
7
],
[
1,
3
]
]
}'''
startup_done=threading.Event()
def startup():
global gui
gui=GUI()
startup_done.set()
tkinter.mainloop()
threading.Thread(target=startup).start()
startup_done.wait()
if __name__=='__main__':
threading.Thread(target=lambda:main(sample_str)).start()
threading.Thread(target=lambda:main(sample_str)).start()
threading.Thread(target=lambda:main(sample_str)).start() | [
"xiaoyuanan2000@sina.com"
] | xiaoyuanan2000@sina.com |
d396d4af003c59777155a72f329149800f2549f2 | b08b43d708717f4341589575bd5756100b708389 | /Python/一个复杂的参数传递例子.py | 7882290b270d5c5f05c08cac93555ed20559d211 | [] | no_license | LeAstrea/Openstack_python_project | 50fd1b7901fa6750efd76d58d2477efb8cbb7ae1 | c23aa96a8b3d2c6632a335d22c007039a396dd1b | refs/heads/master | 2023-03-15T20:52:53.451988 | 2018-10-30T09:09:32 | 2018-10-30T09:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py |
# 参数的传递
class test():
def wrap(self, func, a, b=0, *args, **kwargs):
print("a get a, b:", a, b)
func(*args, **kwargs)
def func(self, c, d=0):
print("b get c, d:", c, d)
def main(self, a, b, c):
print("---------1---------")
print("c get a, b, c:", a, b, c)
self.wrap(self.func, a=1, b=b, c=c, d=(4, 5, 6, 7)) # ok
self.wrap(self.func, a=1, b=b, c=c, d=4) # ok
self.wrap(self.func, 1, b, c, 4) # ok
self.wrap(self.func, 1, b, c) # ok
#self.wrap(self.func, 1, b) # error,无法将只有的两个参数正确传给a、c
self.wrap(self.func, a=1, c=c) # 这样才可以
print("---------2---------")
dome = test()
dome.main(1, 2, 3)
"""
1. func(arg1,arg2,...)
2. func(arg1,arg2=value2,...)
3. func(*arg1)
4. func(**arg1)
先1,后2,再3,最后4,也就是先把方式1中的arg解析,然后解析方式2中的arg=value,再解析方式3,即是把多出来的arg这种形式的实参组成个tuple传进去,最后把剩下的key=value这种形式的实参组成一个dictionary传给带俩个星号的形参,也就方式4。
"""
| [
"noreply@github.com"
] | noreply@github.com |
24084e5a12f50ce2ebc7332f32ffb6b573624685 | 9552e8024de5341f185cc2273e621a1c5278424f | /backend/reviews/models.py | 95afb3940f493ea101f3d16e70fa8b8815f0db9f | [] | no_license | Ya-kuku/PJT_PopcornAngle | 158e0911e47c7d85cead22a0aa237b2d9673f5ce | 5b9bf297efb7f1066cf5999c997aee90ac3dd9a7 | refs/heads/master | 2023-03-06T06:01:15.728711 | 2021-02-15T01:51:54 | 2021-02-15T01:51:54 | 316,192,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from django.db import models
from django.conf import settings
from movies.models import Movie
# Create your models here.
class Review(models.Model):
writer=models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
movie_id = models.ForeignKey(Movie,on_delete=models.CASCADE)
summary=models.CharField(max_length=500)
content=models.TextField()
spo=models.BooleanField()
created_at=models.DateTimeField(auto_now_add=True,null=True)
updated_at=models.DateTimeField(auto_now=True,null=True)
class Comment(models.Model):
writer=models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
review=models.ForeignKey(Review,on_delete=models.CASCADE)
content=models.CharField(max_length=300)
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True) | [
"wemanse@naver.com"
] | wemanse@naver.com |
be74c2eb7f22ddb1743955e8f99ebba5b1f8546c | 80d9de67db6e319723d012dd083838b140e16879 | /book_blog/urls.py | 70ad2fe10cf69a01cfbec2d345c5ee93623b9bea | [] | no_license | byfenixxx/final_project_makers | 8509231f57b613c2ef8b7c35ccfeed3ca795bc60 | 5e9c1c3ac779fd320a4c1cc19faa0f561cae02ee | refs/heads/master | 2023-06-09T21:46:49.151314 | 2021-07-02T09:12:10 | 2021-07-02T09:12:10 | 382,291,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | """book_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework.permissions import AllowAny
from rest_framework.routers import DefaultRouter
from main.views import GenreListView, BookViewSet, ImageView
router = DefaultRouter()
router.register("books", BookViewSet)
schema_view = get_schema_view(
openapi.Info(
title="My API",
default_version="v1",
description="My ecommerce API"
),
public=True,
permission_classes=[AllowAny],
)
urlpatterns = [
path('admin/', admin.site.urls),
path("api-auth/", include("rest_framework.urls")),
path("v1/api/", include(router.urls)),
path("v1/api/genres/", GenreListView.as_view()),
path("v1/api/account/", include("account.urls")),
path("v1/api/add-image/", ImageView.as_view()),
path("v1/api/docs/", schema_view.with_ui("swagger")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"sanzharkurmanbekov@Sanzhars-MacBook-Pro.local"
] | sanzharkurmanbekov@Sanzhars-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.