blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4334e650942a9aab5b5ecc302da8ac15b452505 | 56a28df7ecf17fc6176288c935cab26df6a87922 | /migrations/versions/fa91a70d3959_.py | 4d3952bb37e81b49db943f1b1a7ce13f4dfa8395 | [] | no_license | ehiber/To-Do-List-API | 6ee2fe0183cc04f719998d0518ab856442328f2d | a06e54ec8339e1109cd3db85ac586fc7cd17f2a5 | refs/heads/develop | 2021-07-05T07:05:35.691410 | 2020-01-29T21:44:04 | 2020-01-29T21:44:04 | 235,905,998 | 0 | 0 | null | 2021-05-06T20:06:00 | 2020-01-23T23:21:41 | Python | UTF-8 | Python | false | false | 827 | py | """empty message
Revision ID: fa91a70d3959
Revises: 584e5b9842d2
Create Date: 2020-01-29 16:08:10.771590
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'fa91a70d3959'
down_revision = '584e5b9842d2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('task', 'done',
existing_type=mysql.TINYINT(display_width=1),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('task', 'done',
existing_type=mysql.TINYINT(display_width=1),
nullable=False)
# ### end Alembic commands ###
| [
"ehiber5@hotmail.com"
] | ehiber5@hotmail.com |
7d73eb374ffb9b3499f6562d0bd69e82c757e245 | 97ee1485dfe3b3eb2254ff85edd8f5fa34e21531 | /redisext/models/abc.py | a36089dd85385d9c46114c65205a53d25466f6da | [
"MIT"
] | permissive | mylokin/redisext | 7f1c02efc330978ba613d3a4438c495301d55aca | b54baf99c804736e2e7540187f0d6f9a3ca64e0b | refs/heads/master | 2023-08-17T14:02:07.115055 | 2023-08-03T13:42:54 | 2023-08-03T13:42:54 | 30,815,631 | 0 | 2 | MIT | 2023-08-03T13:42:55 | 2015-02-15T02:32:36 | Python | UTF-8 | Python | false | false | 1,917 | py | from __future__ import absolute_import
import redisext.backend.abc
import redisext.backend.redis as redis
import redisext.packages.dsnparse as dsnparse
class Model(object):
KEY = None
CONNECTION = None
CONNECTION_REUSE = True
def __init__(self, key=None):
self.key = key or getattr(self, 'KEY', None)
self._connection = None
self._master = None
self._slave = None
@classmethod
def decode(cls, value):
serializer = getattr(cls, 'SERIALIZER', None)
if value and serializer:
return serializer.decode(value)
else:
return value
@classmethod
def encode(cls, value):
serializer = getattr(cls, 'SERIALIZER', None)
if value and serializer:
return serializer.encode(value)
else:
return value
@property
def connection(self):
if not self._connection:
if isinstance(self.CONNECTION, str):
conn = dsnparse.parse(self.CONNECTION)
class Connection(redis.Connection):
MASTER = {'host': conn.host, 'port': conn.port, 'db': conn.paths[0]}
self._connection = Connection
elif issubclass(self.CONNECTION, redisext.backend.abc.IConnection):
self._connection = self.CONNECTION
else:
raise ValueError
return self._connection
def connect_to_master(self):
if not self.CONNECTION_REUSE:
return self.connection.connect_to_master()
if not self._master:
self._master = self.connection.connect_to_master()
return self._master
def connect_to_slave(self):
if not self.CONNECTION_REUSE:
return self.connection.connect_to_slave()
if not self._slave:
self._slave = self.connection.connect_to_slave()
return self._slave
| [
"mylokin@me.com"
] | mylokin@me.com |
8b5353bb413efa3cbabe1730e3767936265568a8 | 0d0efed91a1e320509a7625bd72ebea1b64fc95b | /numpy_learn/5_numpy_function.py | 0fc81c094aa3704f8098fde5e9b67f07f7783576 | [] | no_license | starryrbs/python_ai | ed74a3c2d53378b47b2be910d97255f2706fd25e | 80f8fd361d7b366ba0607417f0272bbaa3672e51 | refs/heads/master | 2020-04-24T03:48:52.260392 | 2019-02-20T13:56:42 | 2019-02-20T13:56:42 | 171,681,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,783 | py | import numpy as np
# numpy的随机数函数
# numpy的random子库:np.random.*,主要有np.random.rand() np.random.randn() np.random.randint()
# rand(d0,d1,d2,……,dn) : 根据d0-dn创建随机数组,浮点数,范围是[0,1),均匀分布
a = np.random.rand(2, 3, 4)
print(a)
"""
[[[0.4506612 0.5296636 0.9747625 0.90105177]
[0.25850117 0.90704491 0.87144252 0.00418912]
[0.69423447 0.690204 0.4432447 0.37734196]]
[[0.41056822 0.4220897 0.80819521 0.99022746]
[0.61803924 0.93554027 0.3742707 0.94081985]
[0.15283965 0.09844152 0.25726209 0.24488101]]]
"""
# randn(d0,d1,d2,……,dn) : 根据d0-dn创建随机数组,标准正态分布
a = np.random.randn(2, 3, 4)
print(a)
# randint(low, high, shape) : 根据shape创建随机数数组,范围是[low, high)
a = np.random.randint(5, 10, (2, 3, 4))
print(a)
"""
[[[5 6 5 7]
[7 5 9 5]
[6 7 6 5]]
[[8 6 7 5]
[6 8 5 6]
[8 8 7 9]]]
"""
# seed(s) : 随机数种子,s是给定的种子值
# np.random.seed(5)
# a = np.random.randint(5, 10, (2, 3, 4))
# print(a)
"""
[[[8 5 6 5]
[9 8 5 5]
[9 6 5 8]]
[[9 8 6 9]
[7 6 6 7]
[6 6 6 7]]]
"""
# 如上图:当给定的种子值为4时,数组的值并不会改变
# shuffle(a): 根据数组a的每一纵列进行随机排列,数组a发生改变
a = np.random.randint(5, 10, (3, 4))
print(a)
"""
[[[6 8 7 8]
[9 7 7 9]
[5 6 6 8]]
[[6 6 5 6]
[5 7 5 5]
[6 8 5 9]]]
"""
np.random.shuffle(a)
print(a)
"""
[[8 7 8 7]
[5 6 5 8]
[7 9 5 5]]
[[5 6 5 8]
[8 7 8 7]
[7 9 5 5]]
"""
# permutation(a) :根据数组a的每一纵列进行随机排列,数组a不改变
a = np.random.randint(5, 10, (3, 4))
print(a)
"""
[[8 7 5 9]
[5 9 8 6]
[6 6 5 5]]
"""
b = np.random.permutation(a)
print(a)
"""
[[9 5 7 9]
[5 9 5 7]
[6 8 6 7]]
"""
print(b)
"""
[[5 9 5 7]
[6 8 6 7]
[9 5 7 9]]
"""
# choice(a, size, replace, p):从一维数组a中以概率p抽取元素,形成size形状的新数组,replace表示是否可以重用元素,默认为True
a = np.arange(6)
print(np.random.choice(a, 2, replace=False, p=a / np.sum(a)))
# replace在一维数组中有效
"""
uniform(low, high, size) : 产生具有均匀分布的数组,low起始值,high结束值,size形状
normal(loc,scale,size) : 产生具有正态分布的数组,loc均值,scale标准差,size形状
poisson(lam,size) : 产生具有泊松分布的数组,lam随机事件发生率,size形状
"""
# numpy的统计函数:
# np.sum(a, axis=None) : 根据给定轴axis计算数组a相关元素之和,axis整数或元组。
a = np.arange(15).reshape((3, 5))
print(a)
"""
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]]
"""
print(np.sum(a, axis=0))
# [15 18 21 24 27]
print(np.sum(a, axis=1))
# [10 35 60]
"""
当axis=None时,np.sum(a)表示数组a的所有元素总和
当axis=0时,表示的是数组a各纵列元素之和
当axis=1时,表示的是数组a各横列元素之和
mean(a, axis=None) :根据给定轴axis计算数组a相关元素的期望,axis整数或元组
"""
# mean 求取均值
print(1, np.mean(a))
print(np.mean(a, axis=0))
# average(a,axis=None,weights=None):根据给定轴axis计算数组a相关元素的加权平均值
print(np.average(a, axis=0, weights=[2, 3, 4]))
# [ 6.11111111 7.11111111 8.11111111 9.11111111 10.11111111]
# 6.111111111111111是这样计算出来的: (0 * 2 + 5 * 3 + 4 * 10) / (2 + 3 + 4)
"""
std(a, axis=None) : 根据给定轴axis计算数组a相关元素的标准差
var(a, axis=None) : 根据给定轴axis计算数组a相关元素的方差
min(a) max(a) : 计算数组a中元素的最小值、最大值
argmin(a) argmax(a) : 计算数组a中元素最小值、最大值的降一维后下标
unravel_index(index, shape) : 根据shape将一维下标index转换成多维下标
ptp(a) : 计算数组a中元素最大值与最小值的差
median(a) : 计算数组a中元素的中位数(中值)
"""
print("----------梯度函数------------")
"""
np.gradient(a) :计算数组a中元素的梯度,当a为多维时,返回每个维度梯度
梯度:连续值之间的变化率,即斜率
XY坐标轴连续三个X坐标对应的Y轴值:a, b, c,其中,b的梯度是: (c‐a)/2
"""
a = np.random.randint(0, 20, (5))
print(a)
# [ 5 5 13 6 10]
print(np.gradient(a))
# [ 0. 4. 0.5 -1.5 4. ]
# 0 : (5-5)/1
# 4. : (10-6)/1
# 0.5: (6-5)/2
# 4. : (13-5)/2
# 当a为多维数组时
a = np.arange(12).reshape(2,6)
print(a)
"""
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]]
"""
print(np.gradient(a))
"""
[array([[6., 6., 6., 6., 6., 6.],
[6., 6., 6., 6., 6., 6.]]), array([[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.]])]
"""
# 上侧表示最外层维度(axis=0)的梯度,下侧表示第二层维度(axis=1)的梯度。 | [
"1322096624@qq.com"
] | 1322096624@qq.com |
29d9a13fc428d7f9e1e4996ffc1477522c5baa82 | cf8146853f8eac1a2942849bc7564f94a96fe9cb | /pynetbox/models/circuits.py | 0e97cc707dae5cdc36c0cb2dce4b3f12913684df | [
"Apache-2.0"
] | permissive | markkuleinio/pynetbox | a07dbbad05f3e1beb919ce924a8e6335ddd6a5a9 | 0981320289b3a6ccb2fadab70e4047ec5d329ebd | refs/heads/master | 2023-09-03T21:04:20.813085 | 2023-08-29T04:04:42 | 2023-08-29T04:04:42 | 182,992,019 | 0 | 0 | Apache-2.0 | 2019-04-23T10:31:37 | 2019-04-23T10:31:37 | null | UTF-8 | Python | false | false | 760 | py | """
(c) 2017 DigitalOcean
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pynetbox.core.response import Record
class Circuits(Record):
def __str__(self):
return self.cid
class CircuitTerminations(Record):
def __str__(self):
return self.circuit.cid
| [
"zmoody@digitalocean.com"
] | zmoody@digitalocean.com |
bb18ab3f331dbd440d23d2e0fb922bcf8c0bbcd9 | e2f7ede165cc5f35d33b413b1cbab84bce582e0c | /agence_sncf/urls.py | d184f74d4d7ceef4a5c3e14a8b8be8042d71b11c | [] | no_license | julienbronner/Projet_SBD | 98029d433c4c5253754ca16054162775e6aeab2f | 6b12b6be6ba0497ee7d3453fd57fe820f7663797 | refs/heads/main | 2023-01-27T16:46:30.217850 | 2020-12-04T22:42:53 | 2020-12-04T22:42:53 | 318,264,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | """agence_sncf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
from agence import views
urlpatterns = [
url(r'^$', views.index),
url(r'^agence/', include('agence.urls', namespace = 'agence')),
url(r'^admin/', admin.site.urls)
] | [
"julbronner@free.fr"
] | julbronner@free.fr |
6aec87a1fbe7be776d760cf637c53614801b725b | 35286efd76814a1f3bc05da07f2968d05737c238 | /esim/test.py | b013aa69540a306acd2cfacf63915c5ba49b3226 | [
"Apache-2.0"
] | permissive | jiniaoxu/text_matching | ac41c7de8f66f61a6958a35dfd4584539cd97c51 | 154de91000e8677703192cf5eae49fc6c3c09eea | refs/heads/master | 2020-06-04T05:45:09.320991 | 2019-06-13T02:50:54 | 2019-06-13T02:50:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from esim.graph import Graph
import tensorflow as tf
from utils.load_data import load_data
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
p, h, y = load_data('ccb/test.csv', data_size=1000)
model = Graph()
saver = tf.train.Saver()
with tf.Session()as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, '../output/esim/esim_12.ckpt')
loss, acc = sess.run([model.loss, model.acc],
feed_dict={model.p: p,
model.h: h,
model.y: y,
model.keep_prob: 1})
print('loss: ', loss, ' acc:', acc)
| [
"zjiuzhou@gmail.com"
] | zjiuzhou@gmail.com |
0e8d365040a495051d58dcb06f5a3448469c7b24 | 13cab28ace93296d4c57ed2e30121e6af25a0595 | /python修改张铁海/p12.py | f2ae9607120d95d453e2dc320d9702d2f7f0dddb | [
"MIT"
] | permissive | zthdsb/robot | cedf5663ba3bcf9d0464c00aaeb40a32fd282f0a | b76dffdc63a50ea3955068b3d28194bd12c26719 | refs/heads/master | 2021-08-19T12:12:54.698698 | 2017-11-26T06:00:22 | 2017-11-26T06:00:22 | 112,051,816 | 0 | 0 | null | 2017-11-26T03:06:06 | 2017-11-26T03:06:05 | null | UTF-8 | Python | false | false | 254 | py |
pCan=0.001
pNon=0.999
z='positive'
# z='negative'
pPosCan=0.8
pPosNon=0.1
if z=='positive':
p=[pPosCan*pCan, pPosNon*pNon]
else:
p=[(1-pPosCan)*pCan,(1-pPosNon)*pNon]
S=sum(p)
for i in range(len(p)):
p[i]=p[i]/sum(p)
print(p[0])
print(p[1])
| [
"1278405208@qq.com"
] | 1278405208@qq.com |
daee6d647f89c7ef2312d24653337d9dd6c1b677 | 8a79d40fb610bd8f99526a2a6d9a5a097839c829 | /shangji/asgi.py | af7e29b2c31fd236762a3ae47ba5d0eb14373a42 | [] | no_license | fenglongyun/shangji1 | 0f254addc3037a34f0930f4b56a93639375b0dd9 | 8b0b751c331f21eedbdaefe4b879eb31dcd165b8 | refs/heads/master | 2023-02-14T15:37:32.191845 | 2021-01-14T14:38:22 | 2021-01-14T14:38:22 | 307,700,935 | 0 | 0 | null | 2020-12-19T07:36:58 | 2020-10-27T13:00:18 | Python | UTF-8 | Python | false | false | 391 | py | """
ASGI config for shangji project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shangji.settings')
application = get_asgi_application()
| [
"19121711185@163.com"
] | 19121711185@163.com |
a9f87c09db842dec84d7f43ae82487dea6c66bfb | 9c6e7ee7ae1a4f301b70a4ee79cb0d6857247c80 | /libros/migrations/0002_libromodelo_slug.py | f25cbe2106ae6d744edecffaca248a2214db6d0e | [] | no_license | DerianD/proyecto | 035994780b4f58bfec69a98b198a5c7f889e7fe6 | 4fcd44444cd7c845fc6cd2027b630410596430af | refs/heads/master | 2020-05-27T05:45:02.056344 | 2017-03-16T06:04:16 | 2017-03-16T06:04:16 | 82,528,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-12 03:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libros', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='libromodelo',
name='slug',
field=models.SlugField(blank=True),
),
]
| [
"derian.droid123@gmail.com"
] | derian.droid123@gmail.com |
48bbfcaa47206b217a1dc3a4c42db54c9c544888 | bb3af1c67988ab9bf0acb89942a23789743d0a79 | /Helpers/callbacks.py | 782513866558f9c498e49e89656ee0b8bc5c9674 | [] | no_license | TBomberman/MatrixScores | 288751ab73a4cc8b11d7018b6baa9499e57c877c | 0dbbe4c2a6bcf94f8b9670ff075ebf8c3d97b9c4 | refs/heads/master | 2023-01-28T11:40:24.315695 | 2020-12-15T06:24:03 | 2020-12-15T06:24:03 | 317,684,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from tensorflow.keras.callbacks import Callback
import datetime
class NEpochLogger(Callback):
def __init__(self,display=100):
'''
display: Number of epochs to wait before outputting loss
'''
self.seen = 0
self.display = display
def on_epoch_end(self, epoch, logs={}):
if epoch % self.display == 0:
print('{0}: {1}/{2} - Epoch Train Loss: {3:8.7f} Val Loss: {4:8.7f}'
.format(datetime.datetime.now(), epoch, self.params['epochs'], logs['loss'], logs['val_loss']))
| [
"godwinwoo@hotmail.com"
] | godwinwoo@hotmail.com |
16f0611933913ee87d8a374cb889efd828cd11ab | 985b89f24b1360d6519097e278421e760552bdea | /articles/migrations/0001_initial.py | 1a0eed574d8c69adfcbb7812d6669e59c9c334f9 | [] | no_license | macbotxxx/fintech | c1077e4e103104869e6a98c217a148aae81cca8a | 0f1f73ef07eec6a128cdc2a304abfb537f87c8ba | refs/heads/master | 2023-08-10T23:32:32.650774 | 2021-09-13T11:32:28 | 2021-09-13T11:32:28 | 401,274,806 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # Generated by Django 3.2.6 on 2021-08-27 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title1', models.CharField(max_length=200, null=True)),
('title2', models.CharField(help_text='Blog title for this particular blog', max_length=200, null=True, verbose_name='Blog Title')),
],
options={
'verbose_name': 'Blog',
'verbose_name_plural': 'Blog',
},
),
]
| [
"assanamamichael@gmail.com"
] | assanamamichael@gmail.com |
4a85160591a2d1391c3b158a203ad2f227a3bd0e | 3ad0cce6e17c0f5448e0ee5ce68a4813a31327d7 | /summarize.py | 9193ca43b3cfb0a4ca7fd31548252637af4596ce | [] | no_license | Mariarone/Multilingual-text-summarization | 27d112e59847ee48fd7658e15e415d95835a124f | 86acc2592ed336020b682e4910524942fe8ad9c7 | refs/heads/master | 2020-06-06T07:47:11.387044 | 2019-06-19T07:38:15 | 2019-06-19T07:38:15 | 192,682,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,545 | py | #Author: Parav Patel
# This program utilizes the basics of the nltk (NLP) library to create a text summarizer which outputs a summary for a given text
# (article, story, blog posts etc..).
from __future__ import print_function
import array
import string
import operator
#Natural Language Processing Libraries
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from flask import Flask, render_template, request #Used to render .html templates
#Webscrapping using BeautifulSoup, not yet implemented
import bs4 as bs #beautifulsource4
class summarize:
def get_summary(self, input, max_sentences):
sentences_original = sent_tokenize(input)
#Remove all tabs, and new lines
if (max_sentences > len(sentences_original)):
print ("Error, number of requested sentences exceeds number of sentences inputted")
#Should implement error schema to alert user.
s = input.strip('\t\n')
#Remove punctuation, tabs, new lines, and lowercase all words, then tokenize using words and sentences
words_chopped = word_tokenize(s.lower())
sentences_chopped = sent_tokenize(s.lower())
stop_words = set(stopwords.words("english"))
punc = set(string.punctuation)
#Remove all stop words and punctuation from word list.
filtered_words = []
for w in words_chopped:
if w not in stop_words and w not in punc:
filtered_words.append(w)
total_words = len(filtered_words)
#Determine the frequency of each filtered word and add the word and its frequency to a dictionary (key - word,value - frequency of that word)
word_frequency = {}
output_sentence = []
for w in filtered_words:
if w in word_frequency.keys():
word_frequency[w] += 1.0 #increment the value: frequency
else:
word_frequency[w] = 1.0 #add the word to dictionary
#Weighted frequency values - Assign weight to each word according to frequency and total words filtered from input:
for word in word_frequency:
word_frequency[word] = (word_frequency[word]/total_words)
#Keep a tracker for the most frequent words that appear in each sentence and add the sum of their weighted frequency values.
#Note: Each tracker index corresponds to each original sentence.
tracker = [0.0] * len(sentences_original)
for i in range(0, len(sentences_original)):
for j in word_frequency:
if j in sentences_original[i]:
tracker[i] += word_frequency[j]
#Get the highest weighted sentence and its index from the tracker. We take those and output the associated sentences.
for i in range(0, len(tracker)):
#Extract the index with the highest weighted frequency from tracker
index, value = max(enumerate(tracker), key = operator.itemgetter(1))
if (len(output_sentence)+1 <= max_sentences) and (sentences_original[index] not in output_sentence):
output_sentence.append(sentences_original[index])
if len(output_sentence) > max_sentences:
break
#Remove that sentence from the tracker, as we will take the next highest weighted freq in next iteration
tracker.remove(tracker[index])
sorted_output_sent = self.sort_sentences(sentences_original, output_sentence)
return (sorted_output_sent)
# @def sort_senteces:
# From the output sentences, sort them such that they appear in the order the input text was provided.
# Makes it flow more with the theme of the story/article etc..
def sort_sentences (self, original, output):
sorted_sent_arr = []
sorted_output = []
for i in range(0, len(output)):
if(output[i] in original):
sorted_sent_arr.append(original.index(output[i]))
sorted_sent_arr = sorted(sorted_sent_arr)
for i in range(0, len(sorted_sent_arr)):
sorted_output.append(original[sorted_sent_arr[i]])
print (sorted_sent_arr)
return sorted_output
#------------Flask Application---------------#
app = Flask(__name__)
@app.route('/templates', methods=['POST'])
def original_text_form():
title = "Summarizer"
text = request.form['input_text'] #Get text from html
max_value = sent_tokenize(text)
num_sent = int(request.form['num_sentences']) #Get number of sentence required in summary
sum1 = summarize()
summary = sum1.get_summary(text, num_sent)
print (summary)
return render_template("index.html", title = title, original_text = text, output_summary = summary, num_sentences = max_value)
@app.route('/')
def homepage():
title = "Text Summarizer"
return render_template("index.html", title = title)
if __name__ == "__main__":
app.debug = True
app.run()
| [
"mariarone1997@gmail.com"
] | mariarone1997@gmail.com |
561107764d55ee75983f3adc71f5cf85b27d5ea0 | 5a45981c89d0d9c0f2e9453abdefc333deb53e80 | /nanodet/model/fpn/fpn.py | b031c6c81b0d7eacf7b045c53975dc5b07aa5c94 | [
"Apache-2.0"
] | permissive | zhiqwang/nanodet | fd0b2e9c4badf492649aef7c3b397394c3110d1d | dd94177c0cb411ee21f4fc4ebc2ef01647e64823 | refs/heads/main | 2023-03-17T12:23:12.788037 | 2021-03-15T12:00:19 | 2021-03-15T12:00:19 | 348,642,567 | 2 | 0 | Apache-2.0 | 2021-03-17T09:01:43 | 2021-03-17T09:01:43 | null | UTF-8 | Python | false | false | 3,241 | py | # Modification 2020 RangiLyu
# Copyright 2018-2019 Open-MMLab.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.functional as F
from ..module.conv import ConvModule
from ..module.init_weights import xavier_init
class FPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
conv_cfg=None,
norm_cfg=None,
activation=None
):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.fp16_enabled = False
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode='bilinear')
# build outputs
outs = [
# self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
laterals[i] for i in range(used_backbone_levels)
]
return tuple(outs)
# if __name__ == '__main__':
| [
"lyuchqi@gmail.com"
] | lyuchqi@gmail.com |
80500f93626cb2b34b36e5ecb087affbefd175a1 | f3b3739d4886f1cbb48a0615a70bf05919302aeb | /fortnite_apps/sistema/migrations/0013_auto_20180920_1726.py | 705474304964d548d4d15427f218c321aeaf3439 | [] | no_license | wahello/LigaArgentinaFortnite | 7a0b4a9b3fd33ea3216385e786e76cc65d27f173 | 508e39ac14dd98d34dc89400f695fd5edd815fd7 | refs/heads/master | 2020-04-12T13:24:06.532205 | 2018-12-14T03:38:35 | 2018-12-14T03:38:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Generated by Django 2.1.1 on 2018-09-20 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sistema', '0012_perfil_puntos'),
]
operations = [
migrations.AddField(
model_name='perfil',
name='kills',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='perfil',
name='wins',
field=models.IntegerField(blank=True, default=0),
),
]
| [
"mmquiroga10@gmail.com"
] | mmquiroga10@gmail.com |
f06362183c2da8dbd66a5104d177d31c7bd29598 | 243a15fabf17551e6fb5ccfc93a912f460838b09 | /test/test_save_attributes_dialog.py | 3ef0e2ea2b68bc9d08ea87d448d6b34368eec302 | [] | no_license | nouhailaImr/PYQGIS | 0ac549eafeb67f7a237c7ad91d57e857a7be73d7 | 4502527a1e47351e5635495c600aa777ec068843 | refs/heads/main | 2023-05-15T14:55:11.366699 | 2021-06-15T22:29:22 | 2021-06-15T22:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'elimrani.nouhaila@gmail.com'
__date__ = '2021-06-13'
__copyright__ = 'Copyright 2021, Nouhaila'
import unittest
from qgis.PyQt.QtGui import QDialogButtonBox, QDialog
from save_attributes_dialog import SaveAttributesDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class SaveAttributesDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = SaveAttributesDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(SaveAttributesDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| [
"elimrani.nouhaila@gmail.com"
] | elimrani.nouhaila@gmail.com |
f1b6f23525382617a5501166f87ecca57e0d62c3 | 938a496fe78d5538af94017c78a11615a8498682 | /algorithms/401-500/442.find-all-duplicates-in-an-array.py | 6a7dfa1426ec528b0bb7cf1b4da44bb4ceb85ca5 | [] | no_license | huilizhou/Leetcode-pyhton | 261280044d15d0baeb227248ade675177efdb297 | 6ae85bf79c5a21735e3c245c0c256f29c1c60926 | refs/heads/master | 2020-03-28T15:57:52.762162 | 2019-11-26T06:14:13 | 2019-11-26T06:14:13 | 148,644,059 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | # 数组中重复的数据
class Solution(object):
def findDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 我的写法,不合题意。时间复杂度上
# dic = {}
# res = []
# for i in nums:
# dic[i] = dic.get(i, 0) + 1
# if dic[i] > 1:
# res.append(i)
# return res
# 人家的写法,将元素变成索引,因为题目中的范围是1<=a[i]<=n
res = []
for n in nums:
if nums[abs(n) - 1] > 0:
nums[abs(n) - 1] *= -1
else:
res.append(abs(n))
return res
print(Solution().findDuplicates([4, 3, 2, 7, 8, 2, 3, 1]))
| [
"2540278344@qq.com"
] | 2540278344@qq.com |
4ef8e1d2e12b3523fcac998c34474ba0632e1e55 | 15a865beb4689453f76569ad6dce1cb12e17b207 | /dsub/lib/providers_util.py | f1beb687a8fc55d301bc5918f78ef00803ff5847 | [
"Apache-2.0"
] | permissive | calbach/dsub | e6bfd5ef5f351fc69fc045bf3717fbd782389dbb | f639da55bb33529e002cc62f3a85c4a6b4af5cd3 | refs/heads/master | 2021-07-09T17:41:00.361317 | 2017-10-03T18:17:13 | 2017-10-03T18:17:13 | 106,348,301 | 0 | 1 | null | 2017-10-10T00:13:40 | 2017-10-10T00:13:40 | null | UTF-8 | Python | false | false | 6,783 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for providers."""
import os
import textwrap
from . import param_util
_LOCALIZE_COMMAND_MAP = {
param_util.P_GCS: 'gsutil -m rsync -r',
param_util.P_LOCAL: 'rsync -r',
}
def build_recursive_localize_env(destination, inputs):
"""Return a multi-line string with export statements for the variables.
Arguments:
destination: Folder where the data will be put.
For example /mnt/data
inputs: a list of InputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the inputs.
"""
export_input_dirs = '\n'.join([
'export {0}={1}/{2}'.format(var.name,
destination.rstrip('/'),
var.docker_path.rstrip('/')) for var in inputs
if var.recursive
])
return export_input_dirs
def build_recursive_localize_command(destination, inputs, file_filter):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_filter: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_filter]
filtered_inputs = [
var for var in inputs
if var.recursive and var.file_provider == file_filter
]
copy_input_dirs = '\n'.join([
textwrap.dedent("""
mkdir -p {data_mount}/{docker_path}
for ((i = 0; i < 3; i++)); do
if {command} {source_uri} {data_mount}/{docker_path}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive localization failed."
exit 1
fi
done
chmod -R o+r {data_mount}/{docker_path}
""").format(
command=command,
source_uri=var.uri,
data_mount=destination.rstrip('/'),
docker_path=var.docker_path) for var in filtered_inputs
])
return copy_input_dirs
def build_recursive_gcs_delocalize_env(source, outputs):
"""Return a multi-line string with export statements for the variables.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the outputs.
"""
filtered_outs = [
var for var in outputs
if var.recursive and var.file_provider == param_util.P_GCS
]
return '\n'.join([
'export {0}={1}/{2}'.format(var.name,
source.rstrip('/'),
var.docker_path.rstrip('/'))
for var in filtered_outs
])
def build_recursive_delocalize_command(source, outputs, file_filter):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam.
file_filter: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively to GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_filter]
filtered_outputs = [
var for var in outputs
if var.recursive and var.file_provider == file_filter
]
return '\n'.join([
textwrap.dedent("""
for ((i = 0; i < 3; i++)); do
if {command} {data_mount}/{docker_path} {destination_uri}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive de-localization failed."
exit 1
fi
done
""").format(
command=command,
data_mount=source.rstrip('/'),
docker_path=var.docker_path,
destination_uri=var.uri) for var in filtered_outputs
])
def get_task_metadata(job_metadata, task_id):
"""Returns a dict combining job metadata with the task id."""
task_metadata = job_metadata.copy()
task_metadata['task-id'] = task_id
return task_metadata
def _format_task_uri(fmt, task_metadata):
"""Returns a URI with placeholders replaced by task metadata values."""
values = {
'job-id': None,
'task-id': 'task',
'job-name': None,
'user-id': None
}
for key in values:
values[key] = task_metadata.get(key) or values[key]
return fmt.format(**values)
def format_logging_uri(uri, task_metadata):
"""Inserts task metadata into the logging URI.
The core behavior is inspired by the Google Pipelines API:
(1) If a the uri ends in ".log", then that is the logging path.
(2) Otherwise, the uri is treated as "directory" for logs and a filename
needs to be automatically generated.
For (1), if the job is a --tasks job, then the {task-id} is inserted
before ".log".
For (2), the file name generated is {job-id}, or for --tasks jobs, it is
{job-id}.{task-id}.
In addition, full task metadata subsitition is supported. The URI
may include substitution strings such as
"{job-id}", "{task-id}", "{job-name}", and "{user-id}".
Args:
uri: URI indicating where to write logs
task_metadata: dictionary of task metadata values
Returns:
The logging_uri formatted as described above.
"""
task_id = task_metadata.get('task-id')
# If the user specifies any formatting (with curly braces), then use that
# as the format string unchanged.
fmt = str(uri)
if '{' not in fmt:
if uri.endswith('.log'):
if task_id is not None:
parts = os.path.splitext(uri)
fmt = '%s.{task-id}.log' % parts[0]
else:
# The path is a directory - generate the file name
if task_id is not None:
fmt = os.path.join(uri, '{job-id}.{task-id}.log')
else:
fmt = os.path.join(uri, '{job-id}.log')
return _format_task_uri(fmt, task_metadata)
| [
"mbookman@google.com"
] | mbookman@google.com |
a77bd4d31456f2ce3ce41c0cacca05640eb1c348 | ce5beb5e0e7abcfd8a5e90b8b4b5d5fb7cc1fd8a | /PYTHON/OS/Managing files and directories/intro.py | 269a5e9ee63252c6ca4b0b7ab37fbbddf84ffbf9 | [] | no_license | rashiraffi/Tutorials | f8b92dfe8388c4f617e277e3635c1c322708768c | 6fc6fb7a72e6774b7b785b2e37b5bcd853e31869 | refs/heads/master | 2021-06-09T05:45:01.558160 | 2021-05-29T16:16:04 | 2021-05-29T16:16:04 | 181,651,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import os
# Create file "demo.txt"
with open("demo.txt","w") as file:
file.write("This is a demo file...")
print("demo.txt file created")
# check whether the file exist or not for this path sub module of OS is used.
# if the file exist the .exist function will return TRUE
print("demo.txt esist: ",os.path.exists("demo.txt"))
# Rename file using .rename function
os.rename("demo.txt","demo_1.txt")
print("demo.txt esist: ",os.path.exists("demo.txt"))
print("demo_1.txt esist: ",os.path.exists("demo_1.txt"))
# delete file using .remove function
os.remove("demo_1.txt")
print("demo_1.txt Deleted...")
print("demo_1.txt esist: ",os.path.exists("demo_1.txt")) | [
"rashi1281@gmail.com"
] | rashi1281@gmail.com |
f2b3256c22467e1b32dda229247fffda1cde9b95 | e3bb63f93e36aab4a78356ba9d0e82f935325906 | /bitmovin/resources/models/manifests/hls/vtt_media.py | 78827f4ae4353e744ea3c2459772045c4d003fa8 | [
"Unlicense"
] | permissive | camberbridge/bitmovin-python | 1668367980df49f9088b93e4b6764563cbdb8bcf | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | refs/heads/master | 2020-04-09T17:51:46.786389 | 2018-11-30T14:46:34 | 2018-11-30T14:46:34 | 160,493,890 | 0 | 0 | Unlicense | 2018-12-05T09:31:18 | 2018-12-05T09:31:17 | null | UTF-8 | Python | false | false | 1,259 | py | from .abstract_media import AbstractMedia
class VttMedia(AbstractMedia):
def __init__(self, name, group_id, vtt_url, language=None, assoc_language=None, is_default=None, autoselect=None,
characteristics=None, id_=None):
super().__init__(id_=id_, name=name, group_id=group_id, language=language, assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect, characteristics=characteristics)
self.vttUrl = vtt_url
@classmethod
def parse_from_json_object(cls, json_object):
media = super().parse_from_json_object(json_object=json_object)
id_ = media.id
name = media.name
group_id = media.groupId
language = media.language
assoc_language = media.assocLanguage
is_default = media.isDefault
autoselect = media.autoselect
characteristics = media.characteristics
vtt_url = json_object.get('vttUrl')
vtt_media = VttMedia(id_=id_, name=name, group_id=group_id, language=language, assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect, characteristics=characteristics,
vtt_url=vtt_url)
return vtt_media
| [
"dominic.miglar@netunix.at"
] | dominic.miglar@netunix.at |
5e22ffac10fa94b272782b5fb37171ecb6f396a3 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /tracforgeplugin/branches/bewst/0.11-clearsilver/tracforge/subscriptions/admin.py | 969c6f76281620f148a1c266c27477e5f976eac7 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | # TracForge subscription manager
from trac.core import *
from trac.admin import IAdminPanelProvider
from manager import SubscriptionManager
from util import open_env
class TracForgeSubscriptionAdmin(Component):
"""Admin GUI for subscriptions."""
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if req.perm.has_permission('TRACFORGE_ADMIN'):
yield ('tracforge', 'Tracforge', 'subscriptions', 'Subscriptions')
def process_admin_request(self, req, cat, page, path_info):
mgr = SubscriptionManager(self.env)
types = list(mgr.get_subscribables())
subscriptions = {}
for type in types:
subscriptions[type] = list(mgr.get_subscriptions(type))
if req.method == 'POST':
if req.args.get('add'):
env = req.args.get('env')
type = req.args.get('type')
assert type in types
# Verify that this looks like an env
try:
open_env(env)
except IOError:
raise TracError, "'%s' is not a valid Trac environment"%env
if env not in subscriptions[type]:
mgr.subscribe_to(env, type)
req.redirect(req.href.admin(cat,page))
req.hdf['tracforge.types'] = types
req.hdf['tracforge.subscriptions'] = subscriptions
return 'tracforge_subscriptions_admin.cs', 'text/html'
| [
"bewst@7322e99d-02ea-0310-aa39-e9a107903beb"
] | bewst@7322e99d-02ea-0310-aa39-e9a107903beb |
1e10a988a22576889c2db6363a27333b37798c92 | 9813f92baae36effa035988f4ec402e3358500aa | /frozenset.py | 760ebecdfd52c0005f93b2bdf1994cd65e307db6 | [] | no_license | KripaSharma2016/advancepython | c483ac1b26527396b259e03dc70b0e69ef9b4f7a | 52961c6a5200c88f061512a7695d1cc3b2800b95 | refs/heads/master | 2021-11-23T23:37:56.603904 | 2017-05-22T04:49:21 | 2017-05-22T04:49:21 | 92,011,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | '''
Created on 13-May-2017
@author: ks016399
'''
my_f_set = frozenset([1,2,3,4])
print(my_f_set)
#my_f_set.add(78)
#print(my_f_set)
my_list = [1,2,3,4]
print(frozenset(my_list))
#my_f_set = frozenset([1,2,3,[1,2,3],4,5,6])
#print(my_f_set)
my_dict = {'product':'apple','cost':'120'}
print(frozenset(my_dict))
f_set_parking_allot = frozenset(['A_1','A_2','A_3'])
cars = ['bmw-x2','honda','maruti']
my_list = zip(f_set_parking_allot,cars)
#alloted_space = {x:y for x,y in f_set_parking_allot,cars}
print(my_list)
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
def get_prime():
results = []
gen_prime = gen_primes()
for i in range(100):
results.append(gen_prime.next())
return results
my_f_prime_set = frozenset(get_prime())
print(my_f_prime_set)
num = input("enter any number to check weather it is prime of not:")
if num in my_f_prime_set:
print("yess! it is prime number!")
else:
print("Not a prime number!")
| [
"kripashankarsharma95@gmail.com"
] | kripashankarsharma95@gmail.com |
6dfb552f84c315549a67ba62eb906b19cda46631 | cea1ca6422eb46e60a6925dbfe6dbf67a6e73c21 | /Test/test.py | 5fefa2b6f3506f35e2014473e496b7e5418b2cac | [] | no_license | Dibu2002/Robosapies | 41bd0b236398cc90c6a867a955e119413b3b6feb | a12dc18ec0cb4c6f869d26d74cd5a320a3ed0be2 | refs/heads/master | 2023-08-01T03:49:03.595458 | 2021-09-14T05:45:56 | 2021-09-14T05:45:56 | 406,240,090 | 0 | 0 | null | 2021-09-14T05:45:13 | 2021-09-14T05:45:12 | null | UTF-8 | Python | false | false | 1,387 | py | import cv2
import numpy as np
import imutils
import json
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, 1420)
cap.set(4, 800)
px = 725
py = 290
fx = 1156
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 60, 0])
upper_blue = np.array([121, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
cnts = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
area = cv2.contourArea(c)
if area > 100:
cv2.drawContours(frame, [c], -1, (0, 255, 0), 3)
M = cv2.moments(c)
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
print(cx, " ", cy)
cv2.circle(frame, (cx, cy), 7, (255, 255, 255), -1)
cv2.putText(frame, "blue", (cx - 20, cy - 20), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 255, 255), 3)
if cy > py:
dictionary = {'func': 'F'}
else:
dictionary = {'func': 'P'}
json_object = json.dumps(dictionary, indent=4)
print(json_object)
with open("data.json", "w") as file:
file.write(json_object)
cv2.imshow("result", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows() | [
"sahajmistry005@gmail.com"
] | sahajmistry005@gmail.com |
8c0f06a4bce89ad0b1b33f09966d1c306e74fed4 | 2d367cd526ec7482f129a4660a0dab5fa7ae5df1 | /scripts/scripts_20180521/analyses/aDNA-ModernComparison/parseSuperfiles/parseBeagleSuperfile.ManyFilters.py | c6c375a3dc9cdaf1bbf80e9ad1b8db364bfde5a4 | [] | no_license | ab08028/OtterExomeProject | 1bad439e2c42162e854eab995c45e6c8391b2640 | 96c7758d424c44ae4d52dc951d89a59761b9a2c7 | refs/heads/master | 2021-06-01T20:23:02.554308 | 2020-08-13T17:01:22 | 2020-08-13T17:01:22 | 134,327,795 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,561 | py | """
Created on Fri May 10 12:14:23 2019
@author: annabelbeichman
In this script, we want to parse a angsd "superfile" that I generate that has concatenated bed-format (0based) coordinates in the first 12 columns, then the angsd maf output, then genotype probabilities or genotype lhoods (can be either), and finally count data per individual/per site
We can parse that file and count the number of callable sites per individual that pass posterior prob threshold, depth filter, and minimum number of individuals with data at that site (affects prior)
The script will calculate heterozygosity for all sites and just for transversions
It sums up heterozygosity posterior probability for each individual and divides by callable GTs (following methods of Fages et al. 2019 in Cell)
Explantion: the script sets up a dictionary for your list of individuals (order of list MUST match the input bam file list in ANGSD)
It then goes through the file site by site and for each individual it checks if the maximum posterior for the individual's three genotypes is >= some threshold (e.g. 0.5 or 0.95) .
If it is, it adds the heterozygosity posterior probabilty into that individual's dictionary entry (heterozygosity numerator), and tallies the GT as a callable Site for that individual (heterozygosity denominator)
# It will also calculate heterozygosity for transversions only
# and then reports the total sites passing the threshold per individual, the total het probabilities, and divides the two (hets/total sites) for all hets or just transversions
usage: python script.py inputFilepath sampleIDFile outputFile MaxProbCutoff PerIndividualDepthMinimum minIndsPerSite
"""
import gzip
import sys
filepath = sys.argv[1] #path to input superfile file, should contain transitions and transversions (a concatenation of angsd results in bed format, then mafs, then GPs or GLs, then counts -- generated from previous script)
sampleIDFile=sys.argv[2] # path to file with list of names in SAME ORDER as bamList you used for angsd
outname= sys.argv[3] # output file
MaxProbCutoff=float(sys.argv[4]) # # if the max of the 3 probs is below this, discard; keep if >= to the cutoff
PerIndividualDepthMinimum=float(sys.argv[5])
minIndsPerSite=float(sys.argv[6]) # min number of individuals that have data at a site (note: won't be counted unless they also pass the PerIndividualDepthMinimum; so if you require 2 inds at 2 read depth (not recommended, just an example) then if you had an individual with 1 read, and an individual with 2 reads, it wouldn't pass.)
#sys.stdout.write("helloworld")
################# list of possible transversions ###############
# In beagle format, nucleotides are labeled as numbers
# # Beagle codes: the allele codes as 0=A, 1=C, 2=G, 3=T
# so the 8 possible transversions are:
# 0-1 : A-C
# 1-0 : C-A
# 0-3 : A-T
# 3-0: T-A
# 1-2 : C-G
# 2-1 : G-C
# 2-3 : G-T
# 3-2: T-G
transversions=[('0','1'),('1','0'),('0','3'),('3','0'),('1','2'),('2','1'),('2','3'),('3','2')]
######### for testing only (set these as arguments eventually) #######################
#filepath="/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/scripts/sandbox/parseBeagleFile/miniSample-testPosteriors.beagle.gprobs.gz"
#filepath="/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/scripts/sandbox/parseBeagleSuperfile/test.superfileForPythonTests.bed.gz"
# this was drawn from high coverage, AF prior 20190524
# list of samples # check order super carefully!!! must be in same order as input bam list for angsd!!!!!!!
#sampleIDFile="/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/scripts/scripts_20180521/data_processing/variant_calling_aDNA/bamLists/SampleIDsInOrder.HighCoverageAndADNAOnly.BeCarefulOfOrder.txt"
#outname="/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/scripts/sandbox/parseBeagleSuperfile/testout.txt"
#MaxProbCutoff=0.95 # if the max of the 3 probs is below this, discard; keep if >= to the cutoff
#PerIndividualDepthMinimum=1
#minIndsPerSite=2
######################################################
# read samples into a list (strip \n from end of each one)
sampList = [line.rstrip('\n') for line in open(sampleIDFile)]
numInd=len(sampList)
# these are now in the proper order ### MAKE SURE IT'S ORDER YOUR BAMLIST WAS IN FOR ANGSD!!!!! OTHERWISE INDS WILL BE ASSIGNED INCORRECTLY
###### make empty dictionaries : ##############
#### counts of missing GTs:
missingDict=dict()
#### counts of called GTs:
calledDict=dict()
#### sums of het/homAlt/homRef GPs or GLs passing filters
hetProbSumDict=dict()
homAltProbSumDict=dict()
homRefProbSumDict=dict()
#### transversions:
TransvOnly_HetProbSumDict=dict()
TransvOnly_HomAltProbSumDict=dict()
TransvOnly_HomRefProbSumDict=dict()
# populate all the dicts with sample IDs and 0s:
for sample in sampList:
calledDict[sample]=0
missingDict[sample]=0
hetProbSumDict[sample]=0
homAltProbSumDict[sample]=0
homRefProbSumDict[sample]=0
TransvOnly_HetProbSumDict[sample]=0
TransvOnly_HomAltProbSumDict[sample]=0
#TransvOnly_HomRefProbSumDict[sample]=0 # don't need HomRef transversions -- they are HomRef so aren't elevated by transversions
########### Open beagle GL posteriors file #############
superfile = gzip.open(filepath,"rb")
#counts = gzip.open(countsFile,"rb") # don't need counts file because it's all in super file
# get beagle header:
header=[]
for line in superfile:
if "#" in line:
header=line.strip().split('\t')
# check that length of header is numInd*3 + 3 (marker allele1 allele2):
#len(header)==(numInd*3) + 3 # should be TRUE
break
#print(header)
# need to figure out positions within the header of certain things
# know header 0-11 will be bed fmt stuff
# and 12-18 (?) will be mafs
# note that .index("foo") will only return teh FIRST entry and will throw and error if it isn't there. in this case that is okay, but just be aware.
# things I need for filtering: nInd;
#perSitenIndIndex=header.index("nInd") # so not really using this information from MAFs; actually just getting it from the "counts" part of the file. (counting number of inds that pass depth filter so both filters are applied together to the whole line; then again perGT am applying the depth filter)
allele1Index=header.index("allele1")
allele2Index=header.index("allele2")
beagleIndex=header.index("Ind0") # where beagle GPs or GLs start
countIndex=header.index("ind0TotDepth") # where per-ind read counts start
markerIndex=header.index("marker")
# reset file:
superfile.seek(0)
for line0 in superfile:
# skip header and process things directly
if line0.startswith("#"):
continue
# process beagle line, split by tabs:
line=line0.strip().split('\t')
# First want to check if the line passes the nInd filter
# But want that nInd filter to also account for the minDepth filter
# so need a couple things:
counts=line[countIndex:]
# need to check if a minimum number of counts pass the count threshold
indsPassingThreshold = sum(float(i) >= float(PerIndividualDepthMinimum) for i in counts) # want to check if this value is >= the min number of individiduals at the site
if float(indsPassingThreshold) < float(minIndsPerSite):
continue
# dont need to add this line to failed GTs dict, because it fails for all individuals. just don't include it in counts at all.
# if the number of individuals with depth above the depth cutoff passes the minInd threshold, then we are off to the races (but want to record nInd passing threshold)
# but note that if an individual GT still doesn't pass depth cutoff, it won't be included in the calculation for that individual which is good
elif float(indsPassingThreshold) >= float(minIndsPerSite):
marker=line[markerIndex] # marker name
allele1=line[allele1Index] # allele 1 (reference)
allele2=line[allele2Index] # allele 2 alternate
# GT_Probs (or GLs) are located from the first "Ind0" until that index + 2*Number of individuals because there are 3 GTs per individual; note python range is non-inclusive of last number so adding these together works as 22:49, where 48 is the last one you want. I checked that this works and it does.
GT_Probs=line[beagleIndex:(beagleIndex+(numInd*3))] # genotype posterior probabilities or Lhoods for all GTs/ inds
# Split GTs into per-individual ; note that the second [1] of each set is the het GT
GTs_perInd = [GT_Probs[i:i+3] for i in range(0,len(GT_Probs),3)] # groups together every set of three GTs (3 per individual) # be careful here; checked it carefully
# make a dictionary:
GTs_perInd_Dict = dict(zip(sampList,GTs_perInd)) # note that zip maintains the respective orders, but then dict orders alphabetically. this is okay as long as zip happens before dict
# iterate through the individuals
# get max per set:
# want to get count information:
# make a dictionary of the read counts per individual:
counts_perInd_Dict = dict(zip(sampList,counts))
for sample in sampList:
# first check if there isn't missing data, otherwise skip it
# careful, was treating counts as strings for some reason
# if either of these things are true ()
GTs=GTs_perInd_Dict[sample]
if float(counts_perInd_Dict[sample]) < float(PerIndividualDepthMinimum) or float(max(GTs)) < float(MaxProbCutoff):
missingDict[sample]+=1 # add to the missing count
# check if it passes both filters (read counts per individual and the maxProbCutoff); must pass both to keep add to het sum
elif float(counts_perInd_Dict[sample])>= float(PerIndividualDepthMinimum) and float(max(GTs)) >= float(MaxProbCutoff):
# count the site as callable:
calledDict[sample]+=1
# get het/homAlt/homRef GPs or GLs:
# don't need to define these as variables, just plunk them in (make sure indices are right); should be homRef 0, het 1, homAlt 2
#homRefProb=GTs[0] # the first value is homRef
#hetProb=GTs[1] # the middle value is the heterozygosity posterior value
#homAltProb=GTs[2] # the third value is homALt
homRefProbSumDict[sample]+=float(GTs[0]) # add it to the total probability
hetProbSumDict[sample]+=float(GTs[1]) # add it to the total probability
homAltProbSumDict[sample]+=float(GTs[2]) # add it to the total probability
# check if it's a transversion, if yes, add to the transvHetDict, if not don't
if (allele1,allele2) in transversions:
# note transversions only for HomRef doesn't make sense, so not tracking.
#TransvOnly_HomRefProbSumDict[sample]+=float(GTs[0])
TransvOnly_HetProbSumDict[sample]+=float(GTs[1])
TransvOnly_HomAltProbSumDict[sample]+=float(GTs[2])
heterozygosityDict=dict()
for sample in sampList:
heterozygosityDict[sample] = hetProbSumDict[sample]/calledDict[sample]
TransvOnly_heterozygosityDict=dict()
for sample in sampList:
TransvOnly_heterozygosityDict[sample] = TransvOnly_HetProbSumDict[sample]/calledDict[sample]
# note that transv and regular het have same denominator (all callable sites)
## want to save this information
# missingDict, calledDict, hetProbDict, heterozygosityDict
outfile=open(outname, "w")
# also want to calculate total hom-alt GTs (GPs or GLs -- not totally sure. try it both ways.)
outheader="sample\tuncallableSites\tcallableSites\tsumHetGLsOrGPs\tsumHetGLsOrGPs_TransversionsOnly\tsumHomAltGLsOrGPs\tsumHomAltGLsOrGPs_TransversionsOnly\tsumHomRefGLsOrGPs\tHetPerSite\tHetPerSite_TransversionsOnly\tFilter_PerIndividualDepthMinimum\tFilter_minIndsPerSite\tFilter_ProbThresholdForCallableSite\n"
outfile.write(outheader)
for sample in sampList:
out=[sample,str(missingDict[sample]),str(calledDict[sample]),str(hetProbSumDict[sample]),str(TransvOnly_HetProbSumDict[sample]),str(homAltProbSumDict[sample]),str(TransvOnly_HomAltProbSumDict[sample]),str(homRefProbSumDict[sample]),str(heterozygosityDict[sample]),str(TransvOnly_heterozygosityDict[sample]),str(PerIndividualDepthMinimum),str(minIndsPerSite),str(MaxProbCutoff)]
outfile.write("\t".join(out))
outfile.write("\n")
outfile.close()
superfile.close()
| [
"annabel.beichman@gmail.com"
] | annabel.beichman@gmail.com |
cdf5408e5aacacd9dccf1a02545c5e547bca71cf | e55449519ee59ef2f92a0ff33eca992b393a13de | /Python/Graph/DepthFirstSearch.py | 8680a53c566b8a1cbbe7fcfc5c3fff8d843f5910 | [] | no_license | vivekyadav6838/Data-Structures-and-Algorithms-for-Interviews | 6c23f0f94e006b50f1211b39c8f103718a080962 | 16c4b2ef55804260eed521e626a6375542a22275 | refs/heads/master | 2023-08-28T20:11:17.310870 | 2021-10-07T17:11:37 | 2021-10-07T17:11:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #
# @author
# aakash.verma
#
# Output:
# 0 1 2 3 4 5
#
from collections import deque
class Graph:
def __init__(self, vertices):
self.vertices = vertices
self.adjacency_list = [[] for i in range(self.vertices)]
def add_edge(self, source, destination):
self.adjacency_list[source].append(destination)
self.adjacency_list[destination].append(source) # for directed graph comment this line
def dfs_util(self, node, visited, dfs):
dfs.append(node)
visited[node] = True
for child in self.adjacency_list[node]:
if visited[child] is not True:
self.dfs_util(child, visited, dfs)
def depth_first_search(self, source):
dfs = []
visited = [False] * self.vertices
self.dfs_util(source, visited, dfs)
return dfs
if __name__ == '__main__':
g = Graph(6);
g.add_edge(0, 1)
g.add_edge(1, 2)
g.add_edge(2, 3)
g.add_edge(2, 4)
g.add_edge(4, 5)
ans = g.depth_first_search(0)
for ele in ans:
print(ele, end = " ")
| [
"info.aakash11@gmail.com"
] | info.aakash11@gmail.com |
c8cc989854b6032c0ab5b892364d30a4f6210ac5 | 6ed541d5ae2203d2b2fe68863568e5dfce45f9c1 | /Decorator_20200412.py | b7dc40218149636a242ca65034543c37d4368e2a | [] | no_license | max2004boy/Hackerrank-Problem-Solving | c79763f2da7865ab8972f126fd02b23d302a101e | 4e9a4c562e79bb177f1c0fb72a14da48c0a2a434 | refs/heads/master | 2022-11-05T04:39:33.765297 | 2020-06-18T03:48:03 | 2020-06-18T03:48:03 | 257,767,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 23:19:59 2020
@author: max20
"""
def wrapper(f):
def fun(l):
# complete the function
f(['+91 {} {}'.format(x[-10:-5],x[-5:]) for x in l])
return fun
@wrapper
def sort_phone(l):
print(*sorted(l), sep='\n')
if __name__ == '__main__':
l = [input() for _ in range(int(input()))]
sort_phone(l)
| [
"noreply@github.com"
] | max2004boy.noreply@github.com |
048e6f01f7814357bed0a5d0c2a5c16732e92176 | b80cf47111616d37f0e9ee6ff31031c1036ad7e5 | /22082019/000920.py | a669012ae27319c596461774499cab53f0c526b6 | [] | no_license | nicolasilvac/MCOC-NivelacionPython | a62cc68688a85331c02f574927111d30e6a747c7 | 504e04f2287bf010053cd114e49d656122ecbec0 | refs/heads/master | 2020-07-06T15:05:23.214283 | 2019-08-31T00:39:53 | 2019-08-31T00:39:53 | 203,060,906 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | a = ['banana', 'apple', 'microsoft']
for elemento in a:
print elemento #imprime cada elemento de la lista
#banana
#apple
#microsoft
b = [20, 10, 5]
total = 0
for e in b:
total = total + e #cada "e" toma el elemento numero e de la lista (en este caso son floats por lo que se pueden sumar)
print total
#35 (la suma total)
c = list(range(1,5)) #crea una lista desde el 1 al 4, no cuenta el 5
print c
#[1, 2, 3, 4]
total2 = 0
for i in range(1, 5):
total2 += i #me ahorro tener que poner total2 = total2 + i
print total2
#10
print(list(range(1, 8)))
#[1, 2, 3, 4, 5, 6, 7]
total3 = 0
for i in range(1, 8):
if i % 3 == 0:
total3 += i #suma los restos
print total3
#9
| [
"noreply@github.com"
] | nicolasilvac.noreply@github.com |
e92ce01a02475650c07ca66be99fa405c96d18b6 | 1bc0a826fecfae64d5704254a1d208fd32959aef | /lib/main_1.py | 29470ac5857add244075562358b1908de4e2d390 | [
"MIT"
] | permissive | hs2923/MLProject | 387e2a774c549f373511d0810e7406271b0b259b | d54604e7db95ff91d096b663ab229f709862701b | refs/heads/master | 2021-08-29T12:21:14.573535 | 2017-12-14T00:09:49 | 2017-12-14T00:09:49 | 111,725,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | ##
## main
##
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import libraries
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import script with auxiliar functions
import aux_functions as aux
def main(unused_argv):
############################################
## Generate letters E,F,L
############################################
train_data, train_labels, test_data, test_labels = aux.generateLetterData( 6000, 1200, True, 234 )
print("Letter Data Dimensions")
print("train_data")
print(train_data.shape)
print("train_labels")
print(train_labels.shape)
print("test")
print(test_data.shape)
print("test_labels")
print(test_labels.shape)
############################################
## Run session
############################################
with tf.Graph().as_default():
with tf.Session() as sess:
# instatiate Network
net = aux.SlidesNetwork(sess, 64, 3, 2)
# usual tf initialization
sess.run(tf.global_variables_initializer())
# make labels one-hot encoded
onehot_labels_train = tf.one_hot( indices = tf.cast(train_labels, tf.int32), depth = 3 )
# print MSE before training
print('error rate BEFORE training is {}'.format((np.sum(net.compute(train_data)!=train_labels) / train_labels.size)))
# train network
net.train( train_data, onehot_labels_train.eval() )
# now train...
for i in range(3000):
net.train(train_data,onehot_labels_train.eval())
# print MSE after training
print('error rate AFTER training is {}'.format(( np.sum(net.compute(train_data)!=train_labels) / train_labels.size)))
# get weights for each node
node1, node2 = net.getWeights()
# plot weights for node 1
plt.imshow( node1.eval(), cmap = "gray")
plt.show()
# plot weights for node 2
plt.imshow( node2.eval(), cmap = "gray")
plt.show()
if __name__ == "__main__":
tf.app.run()
| [
"31713176+joaquimlyrio@users.noreply.github.com"
] | 31713176+joaquimlyrio@users.noreply.github.com |
07c4c62ddb39072ba8e82ce7df75ff30d493b0d0 | 54bba666285f7b97fe101368f25556d9500c3c45 | /MCM.py | 1becfa66a3a3913ebc14acd746ea890fa29cd837 | [
"MIT"
] | permissive | dlara10/DanielLara_Ejercicio22 | 4e8510d90b5bb82ab71bc44f9ffb84fec9d3c257 | 203c7e1145a2675803ab0fb691039f5f2b85f1d1 | refs/heads/master | 2020-05-04T21:44:38.743980 | 2019-04-04T14:51:10 | 2019-04-04T14:51:10 | 179,486,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import os
import numpy as np
import matplotlib.pyplot as plt
os.system("g++ MCM.cpp -o MCM.x")
os.system("./MCM.x > datos.dat")
data = np.loadtxt("datos.dat")
x = data[:0]
plt.figure()
plt.hist(x)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Histogram')
plt.savefig("datos")
| [
"d.lara10@uniandes.edu.co"
] | d.lara10@uniandes.edu.co |
02ce163ea159df24d919148ce5efbf3b4dea820d | 1bdf0c2d6143a171a37380017e8c420bb124b967 | /eg/字典排序.py | cde54dc7cedc0964172d4ec6171b8a81c4610144 | [] | no_license | Xu-Angel/pyLearn | fb1277ab28536fc1a7236efb7e92a400759b689d | 73eea71f53c2616b48c99d07b35771005e304f5e | refs/heads/master | 2021-04-04T04:09:20.752575 | 2020-04-17T09:34:33 | 2020-04-17T09:34:33 | 248,423,590 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | # key
# def dictionairy():
# # 声明字典
# key_value ={}
# # 初始化
# key_value[2] = 56
# key_value[1] = 2
# key_value[5] = 12
# key_value[4] = 24
# key_value[6] = 18
# key_value[3] = 323
# print ("按键(key)排序:")
# # sorted(key_value) 返回一个迭代器
# # 字典按键排序
# for i in sorted (key_value) :
# print((i, key_value[i]), end=" ")
# value
def dictionairy():
# 声明字典
key_value ={}
# 初始化
key_value[2] = 56
key_value[1] = 2
key_value[5] = 12
key_value[4] = 24
key_value[6] = 18
key_value[3] = 323
print ("按值(value)排序:")
print(sorted(key_value.items(), key = lambda kv:(kv[1], kv[0])))
dictionairy()
# 列表排序
lis = [{ "name" : "Taobao", "age" : 100},
{ "name" : "Runoob", "age" : 7 },
{ "name" : "Google", "age" : 100 },
{ "name" : "Wiki" , "age" : 200 }]
# 通过 age 升序排序
print ("列表通过 age 升序排序: ")
print (sorted(lis, key = lambda i: i['age']) )
print ("\r")
# 先按 age 排序,再按 name 排序
print ("列表通过 age 和 name 排序: ")
print (sorted(lis, key = lambda i: (i['age'], i['name'])) )
print ("\r")
# 按 age 降序排序
print ("列表通过 age 降序排序: ")
print (sorted(lis, key = lambda i: i['age'],reverse=True) ) | [
"1049547382@qq.com"
] | 1049547382@qq.com |
2f3980b046aa3fb23f3794d17df2df686e195394 | a3b81c72118a1a2cb5a00a9b56f82695f6273d30 | /datautils/representation.py | 75715225df7a0f4d93007a8ea677d5e9bbf4d20d | [] | no_license | thomlake/DataUtils | 4c4e94ebc2055dab38e3385b48bbb10947942d47 | c44958800bfa0d4e74ea9a4efe7b1a1a70c5c865 | refs/heads/master | 2020-05-05T03:38:28.871046 | 2012-07-18T19:21:22 | 2012-07-18T19:21:22 | 3,423,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,677 | py | #---------------------------------------#
# This file is part of DataUtils.
#
# DataUtils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BowNlPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DataUtils. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------#
# author:
# tllake
# email:
# <thomas.l.lake@wmich.edu>
# <thom.l.lake@gmail.com>
# date:
# 2012.02.12
# file:
# representation.py
# description:
# classes for storing vector representation of non vector things
#---------------------------------------#
import numpy as np
def binarray(dim, onesat):
z = np.zeros(dim)
z[list(onesat)] = 1
return z
def onehotarray(dim, oneat):
z = np.zeros(dim)
z[oneat] = 1
return z
def onehotoffsetarray(offsetdim, onehotdim, indices):
offset, onehot = indices
z = np.zeros(offsetdim + onehotdim)
z[offset] = 1
z[offsetdim + onehot] = 1
return z
def wheregt(x, t):
return tuple([i for i in np.where(x > t)[0]])
class IntRep(object):
def __init__(self, vocab = None, notfound = '<UNK?>'):
self.notfound = notfound
self.item_to_idx = {notfound: 0}
self.idx_to_item = {0: notfound}
self.idx_ctr = {0: 0}
self.dim = 1
if vocab:
for word in vocab:
try:
i = self.item_to_idx[word]
self.idx_ctr[i] += 1
except KeyError:
self.item_to_idx[word] = self.dim
self.idx_to_item[self.dim] = word
self.idx_ctr[self.dim] = 1
self.dim += 1
def __getitem__(self, word):
try:
return self.item_to_idx[word]
except KeyError:
return 0
def inv(self, val):
try:
return self.idx_to_item[val]
except:
return self.notfound
def topk(self, k = None, get_item = False):
s = sorted(self.idx_ctr.iteritems(), key = lambda x: x[1], reverse = True)[:k]
if get_item:
return [(self.inv(idx), count) for idx, count in s][:k]
return s[:k]
def add(self, word):
try:
i = self.item_to_idx[word]
self.idx_ctr[i] += 1
except KeyError:
self.item_to_idx[word] = self.dim
self.idx_to_item[self.dim] = word
self.idx_ctr[self.dim] = 1
self.dim += 1
class OneHotRep(object):
def __init__(self, vocab = None, notfound = '<UNK?>'):
self.item_to_idx = {notfound: 0}
self.idx_to_item = {0: notfound}
self.item_to_rep = {}
self.dim = 1
self.notfound = notfound
if vocab:
self.dim += len(vocab)
self.item_to_idx = dict([(item, i) for i, item in enumerate(vocab)])
self.idx_to_item = dict([(i, item) for i, item in enumerate(vocab)])
def __getitem__(self, x):
try:
return self.item_to_rep[x]
except KeyError:
try:
idx = self.item_to_idx[x]
except KeyError:
idx = self.item_to_idx[self.notfound]
z = onehotarray(self.dim, idx)
self.item_to_rep[x] = z
return z
def add(self, item):
self.item_to_idx[item] = self.dim
self.idx_to_item[self.dim] = item
self.dim += 1
def bagrep(self, items):
z = np.zeros(self.n)
z[[self[item] for item in items]] = 1
return z
def bagfrom(self, rep, thresh = 0.):
return [self.idx_to_item[idx] for idx in wheregt(rep, thresh)]
def itemfrom(self, rep):
return self.idx_to_item[rep.argmax()]
class OneHotOffsetRep(object):
def __init__(self, offsetdim, onehotdim, vocab = None, notfound = '<UNK?>'):
self.onehotdim = onehotdim
self.offsetdim = offsetdim
self.notfound = notfound
self.curroffset = 0
self.curronehot = 1
notfoundidx = (0, 0)
self.item_to_idx = {notfound: notfoundidx}
self.idx_to_item = {notfoundidx: notfound}
self.item_to_rep = {notfound: onehotoffsetarray(self.offsetdim, self.onehotdim, notfoundidx)}
if vocab:
for item in vocab:
self.add(item)
def __getitem__(self, x):
try:
return self.item_to_rep[x]
except KeyError:
try:
idx = self.item_to_idx[x]
except KeyError:
return self.item_to_rep[self.notfound]
z = onehotoffsetarray(self.offsetdim, self.onehotdim, idx)
self.item_to_rep[x] = z
return z
def __newidx(self):
if self.curronehot % self.onehotdim == 0:
self.curronehot = 1
self.curroffset += 1
return (self.curroffset, 0)
else:
idx = (self.curroffset, self.curronehot)
self.curronehot += 1
return idx
def add(self, item):
try:
self.item_to_idx[item]
except KeyError:
idx = self.__newidx()
self.item_to_idx[item] = idx
self.idx_to_item[idx] = item
def itemfrom(self, rep, thresh = 0.):
try:
offsetidx = rep[:self.offsetdim].argmax()
onehotidx = rep[self.offsetdim:].argmax()
return self.idx_to_item[(offsetidx, onehotidx)]
except KeyError:
pass
return '__ITEM_NOT_FOUND__'
class RandBinRep(object):
def __init__(self, dim, vocab = None, p = 0.1, notfound = '<UNK?>'):
self.dim = dim
self.q = 1 - p
self.notfound = notfound
notfoundidx = wheregt(np.random.random(self.dim), self.q)
self.item_to_idx = {notfound: notfoundidx}
self.idx_to_item = {notfoundidx: notfound}
self.item_to_rep = {notfound: binarray(dim, notfoundidx)}
if vocab:
for item in vocab:
idx = self.__newidx()
self.item_to_idx[item] = idx
self.idx_to_item[idx] = item
def __getitem__(self, x):
try:
return self.item_to_rep[x]
except KeyError:
try:
idx = self.item_to_idx[x]
except KeyError:
return self.item_to_rep[self.notfound]
z = binarray(self.dim, idx)
self.item_to_rep[x] = z
return z
def __newidx(self):
idx = wheregt(np.random.random(self.dim), self.q)
patience = 1000
while idx in self.idx_to_item and patience > 0:
idx = wheregt(np.random.random(self.dim), self.q)
patience -= 1
if patience < 0:
return ()
return idx
def add(self, item):
try:
self.item_to_idx[item]
except KeyError:
idx = self.__newidx()
self.item_to_idx[item] = idx
self.idx_to_item[idx] = item
def itemfrom(self, rep, thresh = 0.):
try:
return self.idx_to_item[wheregt(rep, thresh)]
except KeyError:
pass
return '__ITEM_NOT_FOUND__'
class BinaryTreeSoftmaxRep(object):
def __init__(self, n):
self.n = n
| [
"thom.l.lake@gmail.com"
] | thom.l.lake@gmail.com |
3c32af0c8c3dd971d0aaa4bddbac2f32bc78ea47 | 93d361d1cfaf5065aada52ff53833b67302c2b1c | /project/urls.py | 9cef54d5038b91c04d21c889fda0d9087dcbd3ed | [] | no_license | append-knowledge/restapi-with-jwt-token | 0fe573cd45633829645544447f66e6d6b43458ad | fbd276fb38cbd687253176b1dd96f07e16707dfd | refs/heads/master | 2023-08-27T02:55:20.826945 | 2021-10-09T18:33:52 | 2021-10-09T18:33:52 | 415,391,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from django.urls import path
from project import views
urlpatterns=[
path('accounts/signup',views.SignUpview.as_view(),name='signup'),
path('accounts/signin',views.SignInView.as_view(),name='signin'),
path('accounts/signout',views.SignOutView.as_view(),name='logout'),
path('accounts/home',views.HomeView.as_view(),name='home'),
path('accounts/change/<int:id>',views.ChangeDetailsView.as_view(),name='editdetails'),
path('accounts/remove/<int:id>',views.delete,name='removeitem')
] | [
"lijojose95@gmail.com"
] | lijojose95@gmail.com |
ff867b19969fb12f7c9a4f8cd4865f82f49e6c70 | 9c8857d980cc53bc4f69eee3355226fcd0b42746 | /app/main.py | 0712ce2ce6088a1f2105ac717532b0ac3b048a3f | [] | no_license | moh-hosseini98/FastAPI-crud-tortoise-orm-and-postgres | bbaf9a305504d45e0db549edf11fe00b625404f9 | 75f2c5d4eeee38113f5bb0d19956f721c0836db1 | refs/heads/main | 2023-08-17T22:16:54.912308 | 2021-10-20T14:41:08 | 2021-10-20T14:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import uvicorn
if __name__=="__main__":
uvicorn.run("server.app:app",host="0.0.0.0",port=8000,reload=True) | [
"mamadhss@yahoo.com"
] | mamadhss@yahoo.com |
f11683ed6c463a91f9f3e196b22c1208be494e30 | c4fd93ce21086262a9cd0a94cd4052321981e655 | /sentiment_decision_tree.py | e9913d398c92251d73078edf21ffd41cc32d40ad | [] | no_license | jmarshallDev/MLProject | d69a7c0391b4996ce7722da68d809f138a3baea6 | 4c37ee49d5d050d6927977bc58be203521fa3c14 | refs/heads/main | 2023-01-30T23:41:12.133032 | 2020-12-14T04:51:34 | 2020-12-14T04:51:34 | 311,744,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # decisiontree.py
"""Predict Sentiment of a given sentence using a decision tree."""
import matplotlib.pyplot as plt
import numpy as np
import os
import pdb
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.metrics import confusion_matrix
from preprocessing import gimme
ROOT = os.path.dirname(os.path.abspath(__file__)) # root directory of this code
def main():
# Loading and Splitting Data
training_data, validation_data, testing_data, training_labels, validation_labels, testing_labels = gimme()
# Train a decision tree via information gain on the training data
clf = DecisionTreeClassifier(
criterion="entropy",
splitter="best",
max_depth=None, # class_weight="balanced",
random_state=0)
clf.fit(training_data, training_labels)
# Test the decision tree
pred = clf.predict(testing_data)
# Compare training and test accuracy
print("train accuracy =", np.mean(training_labels == clf.predict(training_data)))
print("test accuracy =", np.mean(testing_labels == pred))
# Show the confusion matrix for test data
cm = confusion_matrix(testing_labels, pred)
print("Confusion matrix:")
print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in cm]))
# Visualize the tree using matplotlib and plot_tree
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(11, 5), dpi=150)
plot_tree(clf, class_names=["positive", "negative"], filled=True, rounded=True, fontsize=6)
plt.show()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | jmarshallDev.noreply@github.com |
94792a1bda13eac1d3f97a44481616c63e24d376 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1642.py | c31871167442ed89cb0e8fb17031677d335e0e83 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Nishant
#
# Created: 12-04-2014
# Copyright: (c) Nishant 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
input_file = "E:\Dropbox\CodeBase\Python\GoogleCodeJam_2014\A-small-attempt0.in"
output_file = "E:\Dropbox\CodeBase\Python\GoogleCodeJam_2014\A-output.txt"
f = open(input_file, 'r')
o = open(output_file, 'w')
cases = int(f.readline())
lst = list(f)
i = 0
j = 1
while i < (cases * 10):
first = int(lst[i])
# print (first)
arr1 = [lst[i+1], lst[i+2], lst[i+3], lst[i+4]][first-1]
# print (arr1)
i += 5
sec = int(lst[i])
# print (sec)
arr2 = [lst[i+1], lst[i+2], lst[i+3], lst[i+4]][sec-1]
# print (arr2)
i += 5
set1 = set(arr1.split())
set2 = set(arr2.split())
# print (set1)
# print (set2)
res = set1 & set2
if len(res) == 0:
o.write ("Case #%s: Volunteer cheated!\n" %(j))
elif len(res) > 1:
o.write ("Case #%s: Bad magician!\n" %(j))
else:
o.write ("Case #%s: %s\n" %(j, next(iter(res))))
j += 1
f.close()
o.close()
if __name__ == '__main__':
main() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
b05399a6ff94cb5efa0799a162c6431e21c5440a | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res_bw/scripts/common/lib/plat-mac/carbon/carbonevt.py | a403e280d89920be14e1e2e9b2990efb37dd6195 | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 372 | py | # 2016.10.11 22:21:54 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/CarbonEvt.py
from _CarbonEvt import *
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-mac\carbon\carbonevt.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:21:54 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
22d50b0a0e87deb6771fa95b2869f89989d637c4 | 958863c5c364575f7475a0b00bd8cce4ee0ef419 | /mysite/mysite/settings.py | ddc9db0b946a28830f0886a40ecf6bc38abde2b3 | [] | no_license | marcioaleson/Django | 2216530107db26c1688a4f1037f453a21184f797 | 37387c718891833cbeae30ef91da8ba3be74b606 | refs/heads/master | 2021-01-13T08:23:24.893170 | 2016-12-07T00:53:37 | 2016-12-07T00:53:37 | 71,876,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!g4)t2$r@4-fye_3!i$yk4yx!wbgek0wi-=(^5a&aoz649xq@x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"marcioaleson@gmail.com"
] | marcioaleson@gmail.com |
53db77bbc9e84818439f3a441def7bce0eea206d | 96583055700c7e589fa5d59d4ff39c016a33c697 | /db.py | 1e178568eb96b5d4fbda8343eee45cad6169d179 | [] | no_license | gospel306/webproject | 6354d9e233e2c97b0f1d09dc5bc3d3c23312cee7 | 256f44cb6ed5476fc0b484ac92fbe35b6fbf7317 | refs/heads/develop | 2021-06-16T20:28:11.396913 | 2021-06-01T08:36:15 | 2021-06-01T08:36:15 | 206,063,544 | 1 | 3 | null | 2020-08-04T07:44:34 | 2019-09-03T11:41:42 | TSQL | UTF-8 | Python | false | false | 24,445 | py | import pprint
import pymysql
import requests
import webbrowser
from bs4 import BeautifulSoup as bs
from selenium import webdriver
tabledict = {12 :'tourplace', 14 : 'culture' , 15 :'festival', 25 : 'course', 28 : 'leports' , 32 : 'lodgment', 38 : 'shopping', 39 : 'restaurant'}
URL = 'http://api.visitkorea.or.kr/openapi/service/rest/KorService/areaBasedList?ServiceKey=gAgIdPkL9wh55d59ACpjljEduRtTrvBooU9ZLJJmtESF%2Fyfe3i1F65XlDslHoWTjN10OxyuoRJXavvQpnJRLZw%3D%3D&contentTypeId=&areaCode=&sigunguCode=&cat1=&cat2=&cat3=&listYN=Y&MobileOS=ETC&MobileApp=TourAPI3.0_Guide&arrange=A&numOfRows=30000&pageNo=1&_type=json'
res = requests.get(URL)
respon = res.json()
for i in range(25344):
print('i = {}'.format(i))
temp = respon['response']['body']['items']['item'][i+25344]
if "addr1" in temp:
add = temp['addr1']
if "addr2" in temp:
address = add + ' ' + str(temp['addr2'])
else :
address = add
else:
address= ''
if "areacode" in temp : areacode = temp['areacode']
else : areacode = 0
contentid = temp['contentid']
contenttypeid = temp['contenttypeid']
# 공통정보에서 overview 가지고 오기 ( 웹 크롤링)
commonurl = 'http://api.visitkorea.or.kr/guide/tourDetail.do?contentId={}&langtype=KOR&typeid={}&oper=area&burl=&contentTypeId=&areaCode=&sigunguCode=&cat1=&cat2=&cat3=&listYN=Y&MobileOS=ETC&MobileApp=TourAPI3.0_Guide&arrange=A&numOfRows=12&pageNo=1'.format(contentid, contenttypeid)
path = "C:/chromedriver"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('headless')
driver = webdriver.Chrome(path, chrome_options=chrome_options)
driver.get(commonurl)
html = driver.page_source
soup = bs(html,'html.parser')
table2 = soup.find('div',class_="summary")
overview = str(table2.find("p").get_text()).strip()
tempdict = {}
table2 = soup.find("div",class_="txtData none").find("ul").select('li')
conn = pymysql.connect(host='localhost', port=3306, user='root', password='123123',
db='mydb', charset='utf8')
image,mapx,mapy,title,tel,zipcode,sigungu = '', 0.0, 0.0, '','','',0
if "firstimage" in temp : image = temp['firstimage']
if "mapx" in temp : mapx = temp['mapx']
if "mapy" in temp : mapy = temp['mapy']
if "title" in temp : title = temp['title']
title = "'{}'".format(title)
if "tel" in temp : tel = str(temp['tel'])
if "zipcode" in temp : zipcode = temp['zipcode']
if "sigungucode" in temp : sigungu = temp['sigungucode']
sql = "INSERT INTO place (contentid,contenttypeid,mapx,mapy,tel,title,zipcode,areacode,sigunguCode,address,image,overview) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (int(contentid),int(contenttypeid),float(mapx),float(mapy),str(tel),str(title),str(zipcode),int(areacode),int(sigungu),str(address),str(image),str(overview))
curs = conn.cursor()
curs.execute(sql,val)
print("finish place")
infocenter, accomcount, chkbabycarriage, chkcreditcard, chkpet,parking,restdate = '','','','','','',''
for element in table2:
tempstr = str(element)
catag = str(element.find("strong").get_text().rstrip(' :'))
print(catag)
if catag == '예약 안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
infocenter = cutting
if catag == '문의 및 안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
infocenter = cutting
if catag == '수용인원':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
accomcount = cutting
if catag == '유모차 대여 여부':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
chkbabycarriage = cutting
if catag == '신용카드 가능 여부':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
chkcreditcard = cutting
if catag == '애완동물 동반 가능 여부':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
chkpet = cutting
if catag == '주차시설':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
parking = cutting
if catag == '쉬는날':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
restdate = cutting
if catag == '쉬는 날':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
restdate = cutting
commonsql = "INSERT INTO samething (accomcount,chkbabycarriage,chkcreditcard,chkpet,infocenter,parking,restdate,place_contentid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"
commonvalue = (
str(accomcount), str(chkbabycarriage), str(chkcreditcard), str(chkpet), str(infocenter), str(parking),str(restdate), int(contentid))
commoncurs = conn.cursor().execute(commonsql, commonvalue)
print("finish common")
imagetable = soup.find('div',class_="imgGallery")
if imagetable :
visited = []
tmp = imagetable.find('div', class_ = "bx-wrapper").find_all('li')
length = len(tmp)
for z in tmp :
if (len(visited) != 0 ) :
if (visited[0] == z.find('img').get('alt')) :
break
else :
visited.append(z.find('img').get('alt'))
else :
visited.append(z.find('img').get('alt'))
imagesql = "INSERT INTO image (contentid, originimgurl) VALUES (%s,%s)"
imageval = (int(contentid), str(z.find('img').get('src')))
curs = conn.cursor()
curs.execute(imagesql, imageval)
print("finish image")
expagerange, expguide, usetime = '', '', ''
parkingfee, usefee, usetimeculture = '', '', ''
agelimit, bookingplace, discountinfofestival, eventenddate, eventplace, eventstartdate, placeinfo, playtime, program, sponsor1, sponsor2, sponsor1tel, sponsor2tel, subevent, usetimefestival, spendtimefestival = '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''
distance, taketime = '', ''
openperiod, parkingfeeleports, reservation, usefeeleports, usetimeleports = '', '', '', '', ''
barbecue, campfire, checkintime, checkouttime, chkcooking, foodplace, pickup, publicbath, reservationlodgin, reservationurl, roomcount, roomtype, subfacility = '', '', '', '', '', '', '', '', '', '', '', '', ''
opentime, saleitem, shopguide = '', '', ''
firstmenu, kidsfacility, opentimefood, packing, reservationfood, smoking, treatmenu = '', '', '', '', '', '', ''
print(contenttypeid)
print(contentid)
if contenttypeid == 12:
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '체험가능연령' :
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
expagerange = cutting
continue
if catag == '체험안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
expguide = cutting
continue
if catag == '이용시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usetime = cutting
continue
toursql = "INSERT INTO tourplace (contentid, expagerange, expguide ,usetime) VALUES (%s,%s,%s,%s)"
tourval = (int(contentid), str(expagerange),str(expguide),str(usetime))
curs = conn.cursor()
curs.execute(toursql, tourval)
if contenttypeid == 14 :
print(catag)
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '주차요금':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
parkingfee = cutting
continue
if catag == '관람료':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usefee = cutting
continue
if catag == '이용요금':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usefee = cutting
continue
if catag == '이용시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usetimeculture = cutting
continue
culturesql = "INSERT INTO cultural (contentid, parkingfee, usefee, usetimeculture) VALUES (%s,%s,%s,%s)"
cultureval = (int(contentid), str(parkingfee),str(usefee),str(usetimeculture))
curs = conn.cursor()
curs.execute(culturesql, cultureval)
if contenttypeid == 15 :
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '관람가능연령':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
agelimit = cutting
continue
if catag == '예매처':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
bookingplace = cutting
continue
if catag == '할인정보':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
discountinfofestival = cutting
continue
if catag == '행사종료일':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
eventenddate = cutting
continue
if catag == '행사장소':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
eventplace = cutting
continue
if catag == '행사시작일':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
eventstartdate = cutting
continue
if catag == '행사장 위치안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
placeinfo = cutting
continue
if catag == '공연시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
playtime = cutting
continue
if catag == '프로그램':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
program = cutting
continue
if catag == '주최자 정보':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
sponsor1 = cutting
continue
if catag == '주관자 정보':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
sponsor2 = cutting
continue
if catag == '주최자 연락처':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
sponsor1tel = cutting
continue
if catag == '주관자 연락처':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
sponsor2tel = cutting
continue
if catag == '부대행사':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
subevent = cutting
continue
if catag == '이용요금':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usetimefestival = cutting
continue
if catag == '관람소요시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
spendtimefestival = cutting
continue
festivalsql = "INSERT INTO festival (contentid, agelimit, bookingplace, discountinfofestival, eventenddate, eventplace, eventstartdate, placeinfo, playtime, program, sponsor1, sponsor2, sponsor1tel, sponsor2tel, subevent, usetimefestival, spendtimefestival) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
festivalval = (int(contentid), str(agelimit),str(bookingplace),str(discountinfofestival),str(eventenddate),str(eventplace),str(eventstartdate),str(placeinfo),str(playtime),str(program),str(sponsor1),str(sponsor2),str(sponsor1tel),str(sponsor2tel),str(subevent),str(usetimefestival),str(spendtimefestival))
curs = conn.cursor()
curs.execute(festivalsql, festivalval)
if contenttypeid == 25:
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '총 거리':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
distance = cutting
continue
if catag == '관람소요시간' or '소요시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
taketime = cutting
continue
coursesql = "INSERT INTO course (contentid, distance, taketime) VALUES (%s,%s,%s)"
courseval = (int(contentid), str(distance),str(taketime))
curs = conn.cursor()
curs.execute(coursesql, courseval)
if contenttypeid == 28 :
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '개장시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
openperiod = cutting
continue
if catag == '주차요금':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
parkingfeeleports = cutting
continue
if catag == '예약안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
reservation = cutting
continue
if catag == '이용요금':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usefeeleports = cutting
continue
if catag == '이용시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
usetimeleports = cutting
continue
leportssql = "INSERT INTO leports (contentid, openperiod, parkingfeeleports, reservation, usefeeleports, usetimeleports) VALUES (%s,%s,%s,%s,%s,%s)"
leportsval = (int(contentid), str(openperiod), str(parkingfeeleports), str(reservation), str(usefeeleports), str(usetimeleports))
curs = conn.cursor()
curs.execute(leportssql, leportsval)
if contenttypeid == 32 :
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '바베큐장':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
barbecue = cutting
continue
if catag == '캠프파이어':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
campfire = cutting
continue
if catag == '체크인':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
checkintime = cutting
continue
if catag == '체크아웃':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
checkouttime = cutting
continue
if catag == '조리 가능':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
chkcooking = cutting
continue
if catag == '식음료장':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
foodplace = cutting
continue
if catag == '픽업서비스':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
pickup = cutting
continue
if catag == '공용샤워실':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
publicbath = cutting
continue
if catag == '예약 안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
reservationlodgin = cutting
continue
if catag == '예약안내 홈페이지':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
reservationurl = cutting
continue
if catag == '객실 수':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
roomcount = cutting
continue
if catag == '객실 유형':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
roomtype = cutting
continue
if catag == '부대 시설':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
subfacility = cutting
continue
lodgesql= "INSERT INTO lodgment (contentid,barbecue, campfire, checkintime, checkouttime, chkcooking, foodplace, pickup, publicbath, reservationlodgin, reservationurl, roomcount, roomtype, subfacility) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
lodgeval = (int(contentid), str(barbecue), str(campfire), str(checkintime), str(checkouttime), str(chkcooking), str(foodplace), str(pickup), str(publicbath), str(reservationlodgin), str(reservationurl), str(roomcount), str(roomtype), str(subfacility))
curs = conn.cursor()
curs.execute(lodgesql, lodgeval)
if contenttypeid == 38 :
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '영업시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
opentime = cutting
continue
if catag == '판매품목':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
saleitem = cutting
continue
if catag == '매장안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
shopguide = cutting
continue
shoppingsql = "INSERT INTO shopping (contentid ,opentime, saleitem, shopguide) VALUES (%s,%s,%s,%s)"
shoppingval = (int(contentid), str(opentime), str(saleitem), str(shopguide))
curs = conn.cursor()
curs.execute(shoppingsql, shoppingval)
if contenttypeid == 39 :
for element in table2:
catag = str(element.find("strong").get_text().rstrip(' :')).strip()
tempstr = str(element)
if catag == '대표 메뉴':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
firstmenu = cutting
continue
if catag == '어린이놀이방':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
kidsfacility = cutting
continue
if catag == '영업 시간':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
opentimefood = cutting
continue
if catag == '포장 가능':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
packing = cutting
continue
if catag == '예약안내':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
reservationfood = cutting
continue
if catag == '금연/흡연':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
smoking = cutting
continue
if catag == '취급 메뉴':
leftcut = tempstr.lstrip("<li><strong>{} : </strong>".format(catag))
cutting = leftcut.rstrip('</li>')
treatmenu = cutting
continue
restaurantsql = "INSERT INTO restaurant (contentid ,firstmenu, kidsfacility, opentimefood, packing, reservationfood, smoking, treatmenu) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"
restaurantval = (int(contentid), str(firstmenu), str(kidsfacility), str(opentimefood), str(packing), str(reservationfood), str(smoking), str(treatmenu))
curs = conn.cursor()
curs.execute(restaurantsql, restaurantval)
conn.commit()
conn.close() | [
"powermiso@naver.com"
] | powermiso@naver.com |
3335164fecee55b52a9dcb67d791282b6f310d3e | 0e8090146b8e269afc826ca9310b8a517fe6c38b | /user/models.py | f3fd58a8c0a8671139218cc5646a00d8f310adad | [] | no_license | wxy72/1708project | 56f6cf6fcd397f3a851458156ef128fd284554b9 | 356c0876eeb5ef70046a463907d928b8089f991a | refs/heads/master | 2020-03-11T13:27:08.105569 | 2018-04-21T06:00:55 | 2018-04-21T06:00:55 | 130,025,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from django.db import models
# Create your models here.
class User(models.Model):
SEX = (
('M', '男'),
('F', '女'),
('U', '保密'),
)
nickname = models.CharField(max_length=64, unique=True, null=False, blank=False)
password = models.CharField(max_length=128, null=False, blank=False)
icon = models.ImageField()
age = models.IntegerField()
sex = models.CharField(max_length=8, choices=SEX) | [
"18227109996@163.com"
] | 18227109996@163.com |
7331db6bbc26b8c2088cca46dffdc7622db5ffc5 | aa15002c5316b4c7e0a9563a40826057729e0b13 | /tensorflow/python/keras/layers/preprocessing/table_utils.py | f5397da1f3eb482547e40b4ab293d3051753f429 | [
"Apache-2.0"
] | permissive | kkimdev/tensorflow | 8238c5594ae44f084725ddf9b34d6d41645d4072 | 2fb75db6ad4f4a7f01ef4755b96b49f8eb6108db | refs/heads/master | 2020-07-07T18:09:40.662883 | 2020-05-14T18:59:11 | 2020-05-14T19:05:05 | 203,429,154 | 0 | 0 | Apache-2.0 | 2019-08-20T18:07:46 | 2019-08-20T18:07:46 | null | UTF-8 | Python | false | false | 7,427 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for working with tf.lookup tables in Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import gfile
class TableHandler(object):
"""Wrapper object that holds a lookup table and provides accessors."""
def __init__(self, table, oov_tokens=None, use_v1_apis=False):
self.table = table
self.use_v1_apis = use_v1_apis
if oov_tokens is None:
self.oov_tokens = oov_tokens
else:
if not isinstance(oov_tokens, (list, tuple, np.ndarray)):
oov_tokens = [oov_tokens]
self.oov_tokens = math_ops.cast(oov_tokens, table._value_dtype) # pylint: disable=protected-access
def data(self):
keys, values = self.table.export()
return (self._eval(keys), self._eval(values))
def vocab_size(self):
return self._eval(self.table.size())
def clear(self):
keys, _ = self.table.export()
self._run(self.table.remove(keys))
def insert(self, keys, values):
if len(values) != len(keys):
raise RuntimeError("Size mismatch between values and key arrays. "
"Keys had size %s, values had size %s." %
(len(keys), len(values)))
self._run(self.table.insert(keys, values))
def _replace_oov_buckets(self, inputs, lookups):
"""Replace the default OOV value with one of the OOV bucket values."""
if self.oov_tokens is None:
return lookups
num_oov_elements = self.oov_tokens.shape.num_elements()
if inputs.dtype.is_integer:
oov_indices = math_ops.floormod(inputs, num_oov_elements)
else:
oov_indices = string_ops.string_to_hash_bucket_fast(
inputs, num_buckets=num_oov_elements)
oov_values = array_ops.gather(self.oov_tokens, oov_indices)
oov_locations = math_ops.equal(lookups, self.table._default_value) # pylint: disable=protected-access
return array_ops.where(oov_locations, oov_values, lookups)
def _ragged_lookup(self, inputs):
"""Perform a table lookup on a ragged tensor."""
# The table lookup ops don't natively support ragged tensors, so if we have
# a RT we need to use map_flat_values to look up every element.
indexed_data = ragged_functional_ops.map_flat_values(
self.table.lookup, inputs)
indexed_data = ragged_functional_ops.map_flat_values(
self._replace_oov_buckets, inputs, indexed_data)
# Composite tensors can pass tensor values through, which will cause
# errors if all operations in the TF graph do so. We can break this chain
# with an identity here.
return array_ops.identity(indexed_data)
def _sparse_lookup(self, inputs):
"""Perform a table lookup on a sparse tensor."""
values = self.table.lookup(inputs.values)
values = self._replace_oov_buckets(inputs.values, values)
indexed_data = sparse_tensor.SparseTensor(inputs.indices, values,
inputs.dense_shape)
# Composite tensors can pass tensor values through, which will cause
# errors if all operations in the TF graph do so. We can break this chain
# with an identity here.
return array_ops.identity(indexed_data)
def _tensor_lookup(self, inputs):
"""Perform a table lookup on a tf.tensor."""
values = self.table.lookup(inputs)
indexed_data = self._replace_oov_buckets(inputs, values)
# (b/149446477): output does not preserve input shape.
indexed_data.set_shape(inputs.shape)
return indexed_data
def lookup(self, inputs):
"""Perform a table lookup."""
# Sparse tensors don't play nicely with tensor conversion, so we handle
# them before attempting to convert lists or arrays to tensors.
if isinstance(
inputs, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return self._sparse_lookup(inputs)
# Try to convert lists/arrays to tensors or RaggedTensors.
inputs = ragged_tensor.convert_to_tensor_or_ragged_tensor(inputs)
# Run the lookup operation on the converted tensor.
if ragged_tensor.is_ragged(inputs):
return self._ragged_lookup(inputs)
else:
return self._tensor_lookup(inputs)
def _eval(self, tensor):
if self.use_v1_apis:
return K.get_session().run(tensor)
else:
return tensor.numpy()
def _run(self, op):
if self.use_v1_apis:
K.get_session().run(op)
def get_vocabulary_from_file(vocabulary_path, encoding="utf-8"):
"""Read a vocabulary in from a file."""
vocab = []
with gfile.GFile(vocabulary_path, "r") as reader:
while True:
# Get the next line (incl. \n), and break if nothing is left to read.
text = reader.readline()
if not text:
break
# Convert the raw text and strip whitespace.
if isinstance(text, str):
token = text
elif isinstance(text, bytes):
token = text.decode(encoding, "ignore")
token = token.strip()
vocab.append(token)
return vocab
def validate_vocabulary_is_unique(vocabulary):
"""Validate that a vocabulary contains no repeated tokens."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
repeated_items = [
item for item, count in collections.Counter(vocabulary).items()
if count > 1
]
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are %s" % repeated_items)
def assert_same_type(expected_type, values, value_name):
"""Assert that 'values' is of type 'expected_type'."""
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def convert_to_ndarray(x, dtype=None):
"""Convert 'x' to a numpy array."""
array = np.array(x) if isinstance(x, (list, tuple)) else x
if dtype not in (None, dtypes.string):
# If the dtype is an integer, we do permissive casting. This allows
# users to examine int32 data if the dtype is int64 without trouble.
np_dtype = dtypes.as_dtype(dtype).as_numpy_dtype
if np.can_cast(array.dtype, np_dtype):
array = array.astype(np_dtype, casting="safe")
return array
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
21df5fa635e3215f57ad25b52e1a15bb1b98609a | 57ac922518e3ad98308daea8cff02b72983de700 | /simulator_fast/simulator/SFM/force_helbing.py | fc81cbd6c6d49d2f610e1d821508354e71300658 | [] | no_license | montapo/ma_simulator | 3117c4b7bbe9e0c023ce8695bb90852cd51bff49 | 70ddc5f2dfb3ba4c8525fb336d2b3881d3e7568c | refs/heads/master | 2020-12-06T08:58:12.421857 | 2016-09-05T10:44:40 | 2016-09-05T10:44:40 | 67,412,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | import numpy as np
from global_config import gc
def update_bucket(peds):
"""
speed up simulation by bucket algorithm
"""
xmesh = gc["xmesh"]
ymesh = gc["ymesh"]
bucket = [[] for col in range(xmesh * ymesh)]
for ped in peds:
x = int(ped.x[0] / gc["bucket_scale"])
y = int(ped.x[1] / gc["bucket_scale"])
bucket_num = x+y*xmesh
if bucket_num >= xmesh*ymesh:
continue
bucket[x + y * xmesh].append(ped)
return bucket
class Forces(object):
def __init__(self,props,ped):
self.f = np.zeros(2)
self.props = props
self.ped = ped
self.order = self.ped.destorder
def f_destination(self):
"""
force to destination
"""
now = self.ped.destnow
r = self.ped.dests[self.order[now]].center - self.ped.x
r_norm = np.linalg.norm(r)
e = r/r_norm
return (self.ped.desired_v*e - self.ped.v)/self.props.tau
def f_rep(self,peds):
"""
repulsive force between pedestrians
"""
xmesh = gc["xmesh"]
ymesh = gc["ymesh"]
f_r = np.array([0.0, 0.0])
# update_bucket
bucket = update_bucket(peds)
# get own bucket positions
x = int(self.ped.x[0] / gc["bucket_scale"])
y = int(self.ped.x[1] / gc["bucket_scale"])
# list of neighbor bucket
neighbors = [(x - 1 + (y - 1) * xmesh), (x + (y - 1) * xmesh), (x + 1 + (y - 1) * xmesh),
(x - 1 + y * xmesh), (x + y * xmesh), (x + 1 + y * xmesh),
(x - 1 + (y + 1) * xmesh), (x + (y + 1) * xmesh), (x + 1 + (y + 1) * xmesh)]
for neighbor in neighbors:
# neighbor bucket
if 0 <= neighbor < xmesh * ymesh:
# bucket[neighbor] has information of peds.
for ped_other in bucket[neighbor]:
if (self.ped != ped_other and ped_other.isvalid):
r = self.ped.x - ped_other.x
r_norm = np.linalg.norm(r)
_f =self.props.V_to_ped * \
np.exp(-r_norm / self.props.b_to_ped)
# TODO set view angle
v_norm = np.linalg.norm(self.ped.v)
if v_norm == 0 or (np.dot(r, self.ped.v)) / (r_norm * v_norm) > 0:
f_r += (_f / r_norm) * r
f_r += (_f / r_norm) * r
return f_r
def f_wall(self):
"""
repulsive force from wall
"""
f_w = np.zeros(2)
for wall in gc["walls"]:
vertices = wall.vertices
invalid_side = wall.invalid_side
if self.ped.is_inside(vertices,self.ped.x):
for i in range(len(vertices)):
if len(invalid_side)!=0 and \
(tuple(vertices[i-1]),tuple(vertices[i])) in invalid_side:
continue
line = vertices[i]-vertices[i-1]
vec = self.ped.x - vertices[i-1]
# distance between point and line
distance = abs(np.cross(line,vec)/np.linalg.norm(line))
n_tmp = np.array([line[1],-line[0]])
# normal vector
n = n_tmp/np.linalg.norm(n_tmp)
f_w += n*self.props.V_to_obst * np.exp(-distance / self.props.b_to_obst)
return f_w
def f_obstacle(self):
"""
repulsive force from obstacles
"""
f_o = np.zeros(2)
obstacles = gc["obstacles"]
for obs in obstacles:
vec = self.ped.x - obs.center
vec_normal = vec/np.linalg.norm(vec)
distance = np.abs(np.linalg.norm(vec)-obs.r)
f_o += vec_normal * \
obs.V * np.exp(-distance / obs.b)
return f_o
def f_sum(self,peds):
f_dest = self.f_destination()
f_rep = self.f_rep(peds)
f_wall = self.f_wall()
f_obst = self.f_obstacle()
f_rand = np.random.rand(2)*0.5 # heuristic
self.f = f_obst + f_wall + f_dest + f_rep + f_rand
return self.f
| [
"imai.takeaki@lab.ntt.co.jp"
] | imai.takeaki@lab.ntt.co.jp |
62cb4ccb18c022df619c3504d361faf82bf1a3c3 | c01ff5df4e04a39e2da9bd418278e1e1af9a65d0 | /Learn Python the Hard Way/python 2.x/ex02.py | 8bd66c1d52529d9d944d1e4f4f3dad7ca61d6a26 | [] | no_license | JustinTLee/Textbook-solutions | 1d6bce668f88f5d308bc5483ff9c2b9aa6c4e5cc | 5477c44228d1b9896661f7bf987af7073392b9b7 | refs/heads/master | 2021-01-22T07:58:09.890716 | 2019-01-14T09:13:57 | 2019-01-14T09:13:57 | 102,320,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # A comment, this is so you can read your program later
# Anything after the # is ignored by python
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out of a piece of code:
# print "This won't run."
print "This will run."
| [
"justinlee.gwu@gmail.com"
] | justinlee.gwu@gmail.com |
ae2f3de1b7eacdc7cfaca05fea27de5ee8f08410 | da1d21bb8d0760bfba61cd5d9800400f928868aa | /misc/scripts/category_transformation_001.py | 0226537d27ec40ac6726d5b97eb9d427f608ba0e | [] | no_license | biznixcn/WR | 28e6a5d10f53a0bfe70abc3a081c0bf5a5457596 | 5650fbe59f8dfef836503b8092080f06dd214c2c | refs/heads/master | 2021-01-20T23:53:52.887225 | 2014-05-13T02:00:33 | 2014-05-13T02:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # -*- coding: utf-8 -*-
from circuits.models import Circuit
from circuits.utils import CircuitCategory
"""
Transformaciones
Literary + Culture + Music + Art + Academic & Education => Arts & Culture
3 4 8 10 19 => 4
Lifestyle + Green + Fashion + Design + Technology + Business + Geek + Spiritual + Entertainment => Lifestyle
18 7 6 11 16 14 17 21 25 18
"""
for circuit in Circuits.objects.filter(category__in=[3, 8, 10, 19]):
circuit.category = 4
circuit.save()
for circuit in Circuits.objects.filter(category__in=[7, 6, 11, 16, 14, 17, 21, 25]):
circuit.category = 18
circuit.save()
| [
"mbc@Mathiass-MacBook-Pro.local"
] | mbc@Mathiass-MacBook-Pro.local |
82d19a665695af1ec71114ea90b529002297f702 | 90fae96be4f5237f3a33d3df6f217fdf994454d3 | /Proyecto3/settings.py | d252b3e6a0e77dd7e4d676f09b8d9a42d262e00e | [] | no_license | gabrez1985/proyectoheroku0320200610 | ce4c45c865af5a13dfc31a794d7a4b58eb0e6b67 | c5613ea09c3066dce7920e154f5d5b68b9b1eeb2 | refs/heads/master | 2023-08-16T07:17:10.224428 | 2020-06-11T03:56:48 | 2020-06-11T03:56:48 | 271,442,491 | 0 | 0 | null | 2021-09-22T19:12:25 | 2020-06-11T03:27:23 | Python | UTF-8 | Python | false | false | 3,695 | py | """
Django settings for Proyecto3 project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1cu$33tq#vg%s@vvq^6%8+xj)9b&fy)rb!=rb_38ld@w8s15lk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'Proyecto3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Proyecto3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
import dj_database_url
from decouple import config
# volver a activar si subo a Heroku
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| [
"noreply@github.com"
] | gabrez1985.noreply@github.com |
8c5db1946658ab443d7300038473c82702e1de90 | 04125b74273ad8b648343691565ab0cd6e25fa50 | /image_comp_test.py | b32b911b4decb827e8360a480808f031846c8d3a | [] | no_license | alpha0080/spineToolAdv | 32918fa10b47ec9f19586b8878b243afd9dae945 | c394e382502c11fb2b19f86f1e6352dee76444b5 | refs/heads/master | 2021-07-25T09:10:38.883564 | 2018-12-11T00:56:12 | 2018-12-11T00:56:12 | 142,319,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | import os, math,time
try:
sys.path.append("C:/Program Files/Pixar/RenderManProServer-22.1/lib/python2.7/Libs/ite-packages")
#sys.path.append("C:/Program Files/Pixar/RenderManProServer-21.7/lib/python2.7/Lib/site-packages")
import ice
except:
pass
import ice
max_frames_row = 10.0
frames = []
tile_width = 0
tile_height = 0
spritesheet_width = 0
spritesheet_height = 0
folder = "C:/Temp/testImage/1"
files = os.listdir(folder)
files.sort()
#print(files)
for i in files:
filename = folder +'/' +i
image = ice.Load(filename)
imageMetaData = image.GetMetaData()
frames.append(imageMetaData)
print frames
# imageSize = imageMetaData['Original Size']
#imageWidth = int(imageMetaData['Original Size'].split(" ")[0].split("(")[1])
#imageHeight = int(imageMetaData['Original Size'].split(" ")[1].split(")")[0])
###ref https://minzkraut.com/2016/11/23/making-a-simple-spritesheet-generator-in-python/ | [
"alpha@mail.chungyo.net"
] | alpha@mail.chungyo.net |
d028c95e014ada7805a874b0f2bb713f1fcb053e | 119d44cfa7ff995a055c72c16be80b907bc1737c | /palindrome.py | 79eabdd020b7aabbd3ceae6ec4d489fc3218ce74 | [] | no_license | soundarsurya/python-code-set2 | cdedd81fe8ec06d52b071144682ec8eabb2fbe46 | 7f7b974188d2c717bd803aa2bc238838417ce628 | refs/heads/master | 2021-05-13T22:39:56.141195 | 2018-09-28T08:29:12 | 2018-09-28T08:29:12 | 116,493,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | num = input('Enter any number : ')
if num == num[::-1]:
print('yes')
else:
print('No')
| [
"noreply@github.com"
] | soundarsurya.noreply@github.com |
b23308cd96ae9100d23c3da9bc98fb4f625f57ba | 35c4cac7cd28997411b76a8db6724a4cae271b5d | /views.py | 04a14dca3990169fe00cb2d47605a9d022c4f691 | [] | no_license | ws7854579/apiTest | c3e8572b04038a273245633689959e8fbe4a78a4 | 8f16fabae0ed770c922dafd7bb2f54dc07ce5f34 | refs/heads/master | 2020-03-22T02:54:51.577373 | 2019-01-15T03:30:56 | 2019-01-15T03:30:56 | 139,401,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,375 | py | # _*_ coding: utf-8 _*_
import MySQLdb
import method
from logger import Logger
from flask import request,jsonify,session,abort,flash,url_for,redirect,g,render_template
import json,time,os,math
mylogger = Logger('views').getlog()
#添加接口到数据库
def add_entry():
mylogger.info('进入add_entry()方法')
if not session.get('logged_in'):
abort(401)
mylogger.info('url_name:%s'%request.form['url'])
cur = g.db.cursor()
cur.execute("insert into test.api_list (url_name,url,method,params_from_sql,type,md5_params,url_params,cache_table) values('%s','%s','%s','%s','%s','%s','%s','%s')"%(request.form['url_name'],request.form['url'],request.form['method'],request.form['params_from_sql'],request.form['type'],request.form['md5_params'],request.form['url_params'],request.form['cache_table']))
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('start_test'))
#添加接口页面
def add_mock_list():
mylogger.info('进入add_mock_list')
if not session.get('logged_in'):
abort(401)
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
cur = g.db.cursor()
d_json = request.form['res_data']
tsql = "insert into test.mock_list (date,url_name,url,req_form,method,req_data,req_blob) values('%s','%s','%s','%s','%s','%s','{json}')" % (
date, request.form['url_name'], request.form['url_path'], request.form['req_form'],
request.form['req_method'], request.form['req_data'] )
mylogger.info(tsql)
sql = tsql.format(json=MySQLdb.escape_string(d_json))
cur.execute(sql)
g.db.commit()
return redirect(url_for('mock_test'))
#update mock详情
def update_mock_list():
mylogger.info('进入update_mock_list方法')
#date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
cur = g.db.cursor()
ud_json = request.form['update_res_data']
tsql = "update test.mock_list set url_name='%s',url='%s',req_form='%s',req_data='%s',req_blob='{json}' where id=%s"%(request.form['update_url_name'],request.form['update_url_path'],request.form['update_req_form'],request.form['update_req_data'],g_list_id)
mylogger.info(tsql)
sql = tsql.format(json=MySQLdb.escape_string(ud_json))
cur.execute(sql)
g.db.commit()
return redirect(url_for('mock_test'))
#登录页面
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('main'))
return render_template('login.html',error=error)
#主页面
def main():
mylogger.info('来到了主界面===========================')
if not session.get('logged_in'):
return redirect(url_for('login'))
cur = g.db.cursor()
cur.execute('select id,date,status,report from test_history order by id asc')
entries = [dict(id=row[0], date=row[1], status=row[2], report=row[3]) for row in cur.fetchall()]
return render_template('main.html',entries=entries)
#测试历史记录
def test_history():
mylogger.info('来到了测试历史记录页面===================')
if not session.get('logged_in'):
return redirect(url_for('login'))
p = request.args.get('p', '1')
limit_start = (int(p) - 1) * 10
history = method.get_history_list(limit_start)
testList = []
a = 1
for row in history:
testList.append(dict(date=row[0], status=row[1], pass_num=row[2], fail_num=row[3], report=row[4],history_id =a))
a += 1
#testList = [dict(date=row[0], status=row[1], pass_num=row[2], fail_num=row[3], report=row[4]) for row in history]
history_list = len(history)
page_sum_bf = float(history_list) / 10
page_sum_l = math.modf(page_sum_bf)
mylogger.info(page_sum_l)
pageNum = int(page_sum_l[1]) + 1
page_dic = list(range(1, pageNum + 1))
mylogger.info(testList)
return render_template('test_history.html',testList=testList,page_num=page_dic,p=int(p))
#开始测试页面
def start_test():
mylogger.info('来到了start_test页面=========================')
if not session.get('logged_in'):
return redirect(url_for('login'))
p = request.args.get('p','1')
select_value = request.args.get('select_value')
mylogger.info('获取到的select_value为:%s'%select_value)
mylogger.info('当前展示第%s页数据'%p)
apiNum = method.get_page()
mylogger.info("总共有%s个api"%apiNum)
page_sum_bf = float(apiNum)/10
page_sum_l = math.modf(page_sum_bf)
mylogger.info(page_sum_l)
pageNum = int(page_sum_l[1])+1
mylogger.info("================当前的页数为%s========="%p)
mylogger.info("===============应分为%s页============="%pageNum)
limit_start = (int(p)-1)*10
page_dic = list(range(1,pageNum+1))
cur = g.db.cursor()
sql = 'select id,url_name,url,cache_table from api_list limit {0},10'.format(limit_start)
#cur.execute('select url_name,url from api_list order by id desc')
cur.execute(sql)
listApis = [dict(id=row[0],url_name=row[1],url=row[2],cache_table=row[3]) for row in cur.fetchall()]
mylogger.info(listApis)
return render_template('start_test.html',listApis=listApis,page_dic=page_dic,p=int(p))
#添加新接口
def add_api():
mylogger.info('来到了添加新接口界面=========================')
if not session.get('logged_in'):
return redirect(url_for('login'))
return render_template('add_api.html')
#logout界面
def logout():
session.pop('logged_in',None)
flash('You were logged out')
return redirect(url_for('login'))
#mock测试界面
def mock_test():
mylogger.info('来到了Mock测试界面=========================')
if not session.get('logged_in'):
return redirect(url_for('login'))
#分页和查库
p = request.args.get('p', '1')
list_id_web = request.args.get('listId',None)
mylogger.info('listId:%s'%list_id_web)
limit_start = (int(p) - 1) * 10
mockList_bf = method.get_mock_data(limit_start)
testList = []
a = 1
for row in mockList_bf:
testList.append(dict(url_name=row[0], url_path=row[1], req_blob=str(row[2]),list_id=int(row[3]),method=row[4],req_form=row[5],req_data=row[6],history_id=a))
a += 1
history_list = len(mockList_bf)
#分页
page_sum_bf = float(history_list) / 10
page_sum_l = math.modf(page_sum_bf)
mylogger.info(page_sum_l)
pageNum = int(page_sum_l[1]) + 1
page_dic = list(range(1, pageNum + 1))
mylogger.info(testList)
if list_id_web != None:
mylogger.info('method==get,返回另一个界面')
for mockApi in testList:
mylogger.info(mockApi)
mylogger.info('此次的list_id:%s'%mockApi['list_id'])
if str(list_id_web) == str(mockApi['list_id']):
global g_list_id
g_list_id = mockApi['list_id']
mylogger.info('找到了相同的list_id')
json.dumps(mockApi)
return jsonify(mockApi)
else:
return render_template('mock_test.html',mockList=testList,page_num=page_dic,p=int(p),list_id=list_id_web)
| [
"sunsuwei@ccx.cn"
] | sunsuwei@ccx.cn |
2d51f24f75bb3b6b21fb1210c3409e1c3063acde | f7778bf3b8173915c97193f51ff8a1ac2260a68a | /Section 3 code files/Code/webdirbuster.py | 8645d7b32c1044ce087596ec1ac46444c4785168 | [
"MIT"
] | permissive | PacktPublishing/Python-for-Automating-Information-Security | 35f5ab480c430788e881017ec8c919be1524cc30 | d6d1eaa053c3a5f5b103e17fefe8b4d9b33c0858 | refs/heads/master | 2023-05-25T12:34:43.912975 | 2023-01-30T09:16:51 | 2023-01-30T09:16:51 | 245,961,846 | 26 | 24 | MIT | 2023-05-22T22:44:20 | 2020-03-09T06:39:43 | Python | UTF-8 | Python | false | false | 3,798 | py | import argparse
import json
import re
import requests
from typing import List
from urllib3.exceptions import InsecureRequestWarning
quiet = False
def print_message(message: str):
"""
Print message to STDOUT if the quiet option is set to False (this is the default).
:param message: message to print
:return: None
"""
global quiet
if not quiet:
print(message)
def enumerate(base_url: str, dirs_file: str, recurse=False) -> List:
"""
Enumerate valid directories reachable via HTTP/HTTPS.
:param base_url: base URL to search
:param dirs_file: file containing names of commonly hosted directories
:param recurse: whether or not to recursively enumerate discovered directories
:return: list containing valid, reachable URLs
"""
# suppress insecure HTTPS warning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
valid_urls = []
with open(dirs_file, 'r') as f:
while True:
tmp_dir = f.readline()
if not tmp_dir:
break
tmp_dir = tmp_dir.strip()
if tmp_dir == '':
test_url = base_url
else:
if re.search(r'/$', base_url):
test_url = '{}{}'.format(base_url, tmp_dir)
else:
test_url = '{}/{}'.format(base_url, tmp_dir)
print_message('Checking {}'.format(test_url))
result = requests.get('{}'.format(test_url), verify=False)
if result.status_code == 200:
url = result.url
print_message('Found URL: {}'.format(url))
valid_urls.append(url)
if recurse and tmp_dir != '':
recurse_results = enumerate(url, dirs_file, recurse)
valid_urls.extend(recurse_results)
return valid_urls
def main():
"""
Main logic.
:return: None
"""
global quiet
parser = argparse.ArgumentParser(description='A smart-ish web directory enumeration tool.')
parser.add_argument('url', help='Base URL to search (must start with http:// or https://)')
parser.add_argument('dirs_file', help='File containing directory names to enumerate')
parser.add_argument('-r', '--recurse', help='Recursively enumerate subdirectories of discovered directories',
action='store_true')
parser.add_argument('-o', '--output', help='Output file to write to')
parser.add_argument('-f', '--format', help='Output format (default is json)', default='json',
choices=['json', 'plain'])
parser.add_argument('-q', '--quiet', help='Do not print informative messages', action='store_true')
args = parser.parse_args()
base_url = args.url
if not re.search(r'^https?://', base_url):
print('Error, url parameter must begin with either http:// or https://')
return
dirs_file = args.dirs_file
recurse = args.recurse
output = args.output
output_format = args.format
quiet = args.quiet
print_message('Enumerating web directories.')
valid_urls = list(set(enumerate(base_url, dirs_file, recurse)))
# print results
if output:
print_message('Writing output to {}.'.format(output))
with open(output, 'w') as of:
if output_format == 'json':
json.dump(valid_urls, of, indent=2)
else:
for line in valid_urls:
of.write('{}\n'.format(line))
else:
print_message('Writing output to STDOUT.')
if output_format == 'json':
print(json.dumps(valid_urls, indent=2))
else:
for line in valid_urls:
print(line)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | PacktPublishing.noreply@github.com |
34fc5d7be9fdfc130eb473c15b4b7bdb80a10ee2 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /python/ray/workflow/tests/test_lifetime.py | ece91c8445d32ee09fe290f16ece2e35641d73c0 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 1,596 | py | import os
import ray
import time
import pytest
from ray._private.test_utils import (
run_string_as_driver_nonblocking,
run_string_as_driver,
)
from ray.tests.conftest import * # noqa
from ray import workflow
from unittest.mock import patch
driver_script = """
import time
import ray
from ray import workflow
@ray.remote
def foo(x):
time.sleep(1)
if x < 20:
return workflow.continuation(foo.bind(x + 1))
else:
return 20
if __name__ == "__main__":
ray.init(storage="{}")
output = workflow.create(foo.bind(0)).run_async(workflow_id="driver_terminated")
time.sleep({})
"""
def test_workflow_lifetime_1(workflow_start_cluster):
# Case 1: driver exits normally
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init(storage=storage_uri)
run_string_as_driver(driver_script.format(storage_uri, 5))
output = workflow.get_output("driver_terminated")
assert ray.get(output) == 20
def test_workflow_lifetime_2(workflow_start_cluster):
# Case 2: driver terminated
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init(storage=storage_uri)
proc = run_string_as_driver_nonblocking(driver_script.format(storage_uri, 100))
time.sleep(10)
proc.kill()
time.sleep(1)
output = workflow.get_output("driver_terminated")
assert ray.get(output) == 20
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | pdames.noreply@github.com |
508a0a7679e3689a1426c29f3e7813d7b463ade0 | 800af47c01db36b902b290e7a25be86b636ac2c2 | /finn/components/text/InputBox.py | e3b2d00a77d280082198e2b71e3f10c4d200bfc3 | [] | no_license | bearhockey/finn | 6a4b85115ff41106ce1031ac3a8a62222c857469 | 7747c4d8a0877016d09ccd7b1fe73381690952f5 | refs/heads/master | 2021-01-12T17:56:39.050603 | 2018-06-26T16:36:16 | 2018-06-26T16:36:16 | 71,306,397 | 0 | 0 | null | 2018-06-26T16:36:17 | 2016-10-19T01:23:17 | Python | UTF-8 | Python | false | false | 1,500 | py | import pygame
from finn.components.text.TextBox import TextBox
from finn.components.text.TextCursor import TextCursor
class InputBox(TextBox):
def __init__(self, rect, box_color=None, border_color=None, highlight_color=None, active_color=None, message='',
text_color=None, font=None, text_limit=None, allowed_characters=None):
TextBox.__init__(self, rect, box_color=box_color, border_color=border_color, highlight_color=highlight_color,
active_color=active_color, message=message, text_color=text_color, font=font)
self.active = False
self.text_limit = text_limit
self.allowed_characters = allowed_characters
def update(self, key, mouse, offset=(0, 0)):
if key[0] or key[1]:
self.poll(key)
return TextBox.update(self, key, mouse, offset)
def draw(self, screen):
TextBox.draw(self, screen)
if self.active:
TextCursor.draw(screen, (self.rect.left + self.font.size(self.message)[0], self.rect.top), self.font)
def poll(self, in_key):
if self.active:
if in_key[0] == pygame.K_BACKSPACE:
self.message = self.message[0:-1]
elif in_key[0] == pygame.K_RETURN:
return self.message
elif len(self.message) < self.text_limit:
if not self.allowed_characters or in_key[0] in self.allowed_characters:
self.message += in_key[1]
return None
| [
"bobmanworld@yahoo.com"
] | bobmanworld@yahoo.com |
0432d7381f05e80df54e99bec5376c80ce9d8516 | 76ede31afcbdb3dce371e9b1d224b72a1ed1aadd | /demo.py | b7bb764ec0180b9d9bf38bceb49c162c19e8f2e5 | [] | no_license | marinarecuta/bibles | 8d885933f7bc351b892099cff319a75c7b85d099 | 5b4200961b5e85862cf0df327618da1408bfb4c1 | refs/heads/main | 2023-01-27T15:58:08.085458 | 2020-12-15T19:06:58 | 2020-12-15T19:06:58 | 321,762,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import requests
BASE_URL = "https://api.scripture.api.bible/v1"
ENDPOINT = "/swagger.json"
def get_info_from_api():
info = requests.get(BASE_URL + ENDPOINT)
return info
INFO = get_info_from_api()
STATUS_CODE = INFO.status_code
BODY = INFO.json()
print(STATUS_CODE, INFO.ok)
print(get_info_from_api())
print(dir(INFO))
print(BODY)
print(type(BODY))
| [
"noreply@github.com"
] | marinarecuta.noreply@github.com |
56da124f05b01e70233a87435baf0156aca9e476 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /own_year/new_thing.py | 49f9081472a90473e35618be695869cba50090c3 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
#! /usr/bin/env python
def way_or_great_work(str_arg):
same_way_or_group(str_arg)
print('year')
def same_way_or_group(str_arg):
print(str_arg)
if __name__ == '__main__':
way_or_great_work('few_point_or_different_thing')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
6b7f77fe2120e75b0a3c0d682447587076ab6d0d | f93f03dac8e7340f35ddb8ac75e9fdbb19b935a8 | /toontown/golf/DistributedGolfHole.py | 0b98a83f3d638d695be462c2a35497eeb5967066 | [] | no_license | toontown-classic/toontown-otp-ai | 5d07f26658ca23e52c65254f23a70cbc5936ae6d | b0be971b4689f811f6abacb7af33242b06d8f9be | refs/heads/develop | 2022-03-10T10:42:37.203938 | 2022-03-08T05:56:52 | 2022-03-08T05:56:52 | 158,168,681 | 5 | 5 | null | 2022-02-26T19:32:31 | 2018-11-19T05:50:36 | Python | UTF-8 | Python | false | false | 71,952 | py | import math
import random
import time
from pandac.PandaModules import TextNode, BitMask32, Point3, Vec3, Vec4, deg2Rad, Mat3, NodePath, VBase4, OdeTriMeshData, OdeTriMeshGeom, OdeRayGeom, CollisionTraverser, CollisionSegment, CollisionNode, CollisionHandlerQueue
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
from direct.gui.DirectGui import DirectWaitBar, DGG, DirectLabel
from direct.task import Task
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from toontown.golf import BuildGeometry
from toontown.golf import DistributedPhysicsWorld
from toontown.golf import GolfGlobals
from direct.interval.IntervalGlobal import Sequence, Parallel, LerpScaleInterval, LerpFunctionInterval, Func, Wait, SoundInterval, ParallelEndTogether, LerpPosInterval, ActorInterval, LerpPosHprInterval, LerpColorScaleInterval, WaitInterval
from direct.actor import Actor
from toontown.golf import GolfHoleBase
from toontown.distributed import DelayDelete
class DistributedGolfHole(DistributedPhysicsWorld.DistributedPhysicsWorld, FSM, GolfHoleBase.GolfHoleBase):
defaultTransitions = {'Off': ['Cleanup', 'ChooseTee', 'WatchTee'],
'ChooseTee': ['Aim', 'Cleanup'],
'WatchTee': ['WatchAim',
'Cleanup',
'WatchTee',
'ChooseTee',
'Aim'],
'Wait': ['Aim',
'WatchAim',
'Playback',
'Cleanup',
'ChooseTee',
'WatchTee'],
'Aim': ['Shoot',
'Playback',
'Cleanup',
'Aim',
'WatchAim'],
'WatchAim': ['WatchAim',
'WatchShoot',
'Playback',
'Cleanup',
'Aim',
'ChooseTee',
'WatchTee'],
'Playback': ['Wait',
'Aim',
'WatchAim',
'Cleanup',
'ChooseTee',
'WatchTee'],
'Cleanup': ['Off']}
id = 0
notify = directNotify.newCategory('DistributedGolfHole')
unlimitedAimTime = base.config.GetBool('unlimited-aim-time', 0)
unlimitedTeeTime = base.config.GetBool('unlimited-tee-time', 0)
golfPowerSpeed = base.config.GetDouble('golf-power-speed', 3)
golfPowerExponent = base.config.GetDouble('golf-power-exponent', 0.75)
DefaultCamP = -16
MaxCamP = -90
def __init__(self, cr):
self.notify.debug('Hole Init')
DistributedPhysicsWorld.DistributedPhysicsWorld.__init__(self, base.cr)
GolfHoleBase.GolfHoleBase.__init__(self, 1)
FSM.__init__(self, 'Golf_%s_FSM' % self.id)
self.currentGolfer = 0
self.ballDict = {}
self.ballShadowDict = {}
self.holeNodes = []
self.golfCourse = None
self.golfCourseRequest = None
self.holePositions = []
self.timer = None
self.teeTimer = None
self.aimStart = None
self.titleLabel = None
self.teeInstructions = None
self.aimInstructions = None
self.powerReminder = None
self.lastTimeHeadingSent = 0
self.lastTempHeadingSent = 0
self.holdCycleTime = 0.0
self.inPlayBack = 0
self.swingInterval = None
self.sfxInterval = None
self.isLookingAtPutt = False
self.clubs = {}
self.camInterval = None
self.flyOverInterval = None
self.needToDoFlyOver = True
self.translucentLastFrame = []
self.translucentCurFrame = []
self.localMissedSwings = 0
self.localToonHitControl = False
self.warningInterval = None
self.playBackDelayDelete = None
self.aimMomentum = 0.0
self.lastBumpSfxPos = Point3(0, 0, 0)
self.__textGen = TextNode('golfHoleText')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
if TTLocalizer.getLanguage() in ['castillian',
'japanese',
'german',
'portuguese',
'french']:
self.__textGen.setGlyphScale(0.7)
self.avIdList = []
self.enterAimStart = 0
return
def generate(self):
self.notify.debug('Hole Generate')
DistributedPhysicsWorld.DistributedPhysicsWorld.generate(self)
self.golfPowerTaskName = self.uniqueName('updateGolfPower')
def announceGenerate(self):
DistributedPhysicsWorld.DistributedPhysicsWorld.announceGenerate(self)
self.setup()
self.sendReady()
self.request('Off')
index = 1
for avId in self.avIdList:
self.createBall(avId, index)
self.createClub(avId)
index += 1
if self.avIdList:
avId = self.avIdList[0]
self.currentGolfer = avId
self.currentGolferActive = False
def delete(self):
self.removePlayBackDelayDelete()
self.request('Cleanup')
taskMgr.remove(self.golfPowerTaskName)
DistributedPhysicsWorld.DistributedPhysicsWorld.delete(self)
GolfHoleBase.GolfHoleBase.delete(self)
if hasattr(self, 'perfectIval'):
self.perfectIval.pause()
del self.perfectIval
self.golfCourse = None
if self.teeInstructions:
self.teeInstructions.destroy()
self.teeInstructions = None
if self.aimInstructions:
self.aimInstructions.destory()
self.aimInstructions = None
if self.powerReminder:
self.powerReminder.destroy()
self.powerReminder = None
if self.swingInterval:
self.swingInterval.pause()
self.swingInterval = None
if self.sfxInterval:
self.sfxInterval.pause()
self.sfxInterval = None
if self.camInterval:
self.camInterval.pause()
self.camInterval = None
for club in self.clubs:
self.clubs[club].removeNode()
del self.clubs
if hasattr(self, 'scoreBoard'):
if hasattr(self.scoreBoard, 'maximizeB'):
if self.scoreBoard.maximizeB:
self.scoreBoard.maximizeB.hide()
if not self.titleLabel == None:
self.titleLabel.destroy()
self.notify.debug('Deleted title label')
self.notify.debug('Delete function')
if self.flyOverInterval:
self.flyOverInterval.pause()
self.flyOverInterval = None
for key in self.ballShadowDict:
self.ballShadowDict[key].removeNode()
self.dropShadowModel.removeNode()
return
def sendReady(self):
self.sendUpdate('setAvatarReadyHole', [])
def createClub(self, avId):
club = NodePath('club-%s' % avId)
clubModel = loader.loadModel('phase_6/models/golf/putter')
clubModel.reparentTo(club)
clubModel.setR(clubModel, 45)
self.clubs[avId] = club
def attachClub(self, avId, pointToBall = False):
club = self.clubs[avId]
if club:
av = base.cr.doId2do.get(avId)
if av:
av.useLOD(1000)
lHand = av.getLeftHands()[0]
club.setPos(0, 0, 0)
club.reparentTo(lHand)
netScale = club.getNetTransform().getScale()[1]
counterActToonScale = lHand.find('**/counteractToonScale')
if counterActToonScale.isEmpty():
counterActToonScale = lHand.attachNewNode('counteractToonScale')
counterActToonScale.setScale(1 / netScale)
self.notify.debug('creating counterActToonScale for %s' % av.getName())
club.reparentTo(counterActToonScale)
club.setX(-0.25 * netScale)
if pointToBall:
club.lookAt(self.clubLookatSpot)
def createToonRay(self):
self.toonRay = OdeRayGeom(self.space, 10.0)
self.toonRay.setCollideBits(BitMask32(16777215))
self.toonRay.setCategoryBits(BitMask32(0))
self.toonRay.setRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.space.setCollideId(self.toonRay, GolfGlobals.TOON_RAY_COLLIDE_ID)
self.rayList.append(self.toonRay)
def createSkyRay(self):
self.skyRay = OdeRayGeom(self.space, 100.0)
self.skyRay.setCollideBits(BitMask32(240))
self.skyRay.setCategoryBits(BitMask32(0))
self.skyRay.setRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.space.setCollideId(self.skyRay, 78)
self.rayList.append(self.skyRay)
def createCameraRay(self):
self.cameraRay = OdeRayGeom(self.space, 30.0)
self.cameraRay.setCollideBits(BitMask32(8388608))
self.cameraRay.setCategoryBits(BitMask32(0))
self.space.setCollideId(self.cameraRay, GolfGlobals.CAMERA_RAY_COLLIDE_ID)
self.cameraRayNodePath = self.terrainModel.attachNewNode('cameraRayNodePath')
self.rayList.append(self.cameraRay)
def loadLevel(self):
GolfHoleBase.GolfHoleBase.loadLevel(self)
self.teeNodePath = self.terrainModel.find('**/tee0')
if self.teeNodePath.isEmpty():
teePos = Vec3(0, 0, 10)
else:
teePos = self.teeNodePath.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.notify.debug('teeNodePath heading = %s' % self.teeNodePath.getH())
self.teePositions = [teePos]
teeIndex = 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
while not teeNode.isEmpty():
teePos = teeNode.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.teePositions.append(teePos)
self.notify.debug('teeNodeP heading = %s' % teeNode.getH())
teeIndex += 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
self.holeBottomNodePath = self.terrainModel.find('**/holebottom0')
if self.holeBottomNodePath.isEmpty():
self.holeBottomPos = Vec3(*self.holeInfo['holePos'][0])
else:
self.holeBottomPos = self.holeBottomNodePath.getPos()
self.holePositions.append(self.holeBottomPos)
minHard = Point3(0, 0, 0)
maxHard = Point3(0, 0, 0)
self.hardSurfaceNodePath.calcTightBounds(minHard, maxHard)
centerX = (minHard[0] + maxHard[0]) / 2.0
centerY = (minHard[1] + maxHard[1]) / 2.0
heightX = (centerX - minHard[0]) / math.tan(deg2Rad(23))
heightY = (centerY - minHard[1]) / math.tan(deg2Rad(18))
height = max(heightX, heightY)
self.camTopViewPos = Point3(centerX, centerY, height)
self.camTopViewHpr = Point3(0, -90, 0)
self.createRays()
self.createToonRay()
self.createCameraRay()
def createLocatorDict(self):
self.locDict = {}
locatorNum = 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
while not curNodePath.isEmpty():
self.locDict[locatorNum] = curNodePath
locatorNum += 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
def loadBlockers(self):
loadAll = base.config.GetBool('golf-all-blockers', 0)
self.createLocatorDict()
self.blockerNums = self.holeInfo['blockers']
for locatorNum in self.locDict:
if locatorNum in self.blockerNums or loadAll:
locator = self.locDict[locatorNum]
locatorParent = locator.getParent()
locator.getChildren().wrtReparentTo(locatorParent)
else:
self.locDict[locatorNum].removeNode()
self.hardSurfaceNodePath.flattenStrong()
def loadSounds(self):
self.hitBallSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hit_Ball.ogg')
self.holeInOneSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hole_In_One.ogg')
self.holeInTwoPlusSfx = loader.loadSfx('phase_4/audio/sfx/MG_sfx_vine_game_fall.ogg')
self.ballGoesInStartSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Goes_In_Start.ogg')
self.ballGoesInLoopSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Goes_In_Loop.ogg')
self.ballGoesToRestSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Rest_In_Cup.ogg')
self.kickedOutSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Sad_Noise_Kicked_Off_Hole.ogg')
self.crowdBuildupSfx = []
self.crowdApplauseSfx = []
self.crowdMissSfx = []
for i in xrange(4):
self.crowdBuildupSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Buildup.ogg'))
self.crowdApplauseSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Applause.ogg'))
self.crowdMissSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Miss.ogg'))
self.bumpHardSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hit_Barrier_3.ogg')
self.bumpMoverSfx = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_2.ogg')
self.bumpWindmillSfx = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_1.ogg')
def setup(self):
self.notify.debug('setup golf hole')
self.loadLevel()
self.loadSounds()
self.camMove = 0
self.arrowKeys = ArrowKeys.ArrowKeys()
self.arrowKeys.setPressHandlers([None,
None,
self.__leftArrowPressed,
self.__rightArrowPressed,
self.__beginTossGolf])
self.arrowKeys.setReleaseHandlers([None,
None,
None,
None,
self.__endTossGolf])
self.targets = render.attachNewNode('targetGameTargets')
self.ballFollow = render.attachNewNode('nodeAtBall')
self.startingTeeHeading = self.teeNodePath.getH()
self.ballFollow.setH(self.startingTeeHeading)
self.ballFollowToonSpot = self.ballFollow.attachNewNode('toonAimSpot')
self.ballFollowToonSpot.setX(-2.0)
self.ballFollowToonSpot.setY(0)
self.ballFollowToonSpot.setH(-90)
self.clubLookatSpot = self.ballFollow.attachNewNode('clubLookat')
self.clubLookatSpot.setY(-(GolfGlobals.GOLF_BALL_RADIUS + 0.1))
camera.reparentTo(self.ballFollow)
self.camPosBallFollow = Point3(0.0, -23.0, 12.0)
self.camHprBallFollow = Point3(0, -16.0, 0)
camera.setPos(self.camPosBallFollow)
camera.setHpr(self.camHprBallFollow)
if self.holeBottomNodePath.isEmpty():
holePositions = self.holePositions
for index in xrange(len(holePositions)):
holePos = holePositions[index]
targetNodePathGeom, t1, t2 = BuildGeometry.addCircleGeom(self.targets, 16, 1)
targetNodePathGeom.setPos(holePos)
targetNodePathGeom.setBin('ground', 0)
targetNodePathGeom.setDepthWrite(False)
targetNodePathGeom.setDepthTest(False)
targetNodePathGeom.setTransparency(TransparencyAttrib.MAlpha)
targetNodePathGeom.setColorScale(0.0, 0.0, 0.0, 1.0)
self.holeNodes.append(targetNodePathGeom)
holeSphere = CollisionSphere(0, 0, 0, 1)
holeSphere.setTangible(1)
holeCNode = CollisionNode('Hole')
holeCNode.addSolid(holeSphere)
holeC = targetNodePathGeom.attachNewNode(holeCNode)
holeC.show()
holeC.setCollideMask(ToontownGlobals.PieBitmask)
toon = base.localAvatar
toon.setPos(0.0, 0.0, -100.0)
toon.b_setAnimState('neutral', 1.0)
self.pollingCtrl = 0
self.timeLastCtrl = 0.0
self.powerBar = DirectWaitBar(guiId='launch power bar', pos=(0.0, 0, -0.65), relief=DGG.SUNKEN, frameSize=(-2.0,
2.0,
-0.2,
0.2), borderWidth=(0.02, 0.02), scale=0.25, range=100, sortOrder=50, frameColor=(0.5, 0.5, 0.5, 0.5), barColor=(1.0, 0.0, 0.0, 1.0), text='', text_scale=0.26, text_fg=(1, 1, 1, 1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.power = 0
self.powerBar['value'] = self.power
self.powerBar.hide()
self.accept('tab', self.tabKeyPressed)
self.putAwayAllToons()
base.transitions.irisOut(t=0)
self.dropShadowModel = loader.loadModel('phase_3/models/props/drop_shadow')
self.dropShadowModel.setColor(0, 0, 0, 0.5)
self.dropShadowModel.flattenMedium()
self.dropShadowModel.hide()
return
def switchToAnimState(self, animStateName, forced = False):
curAnimState = base.localAvatar.animFSM.getCurrentState()
curAnimStateName = ''
if curAnimState:
curAnimStateName = curAnimState.getName()
if curAnimStateName != animStateName or forced:
base.localAvatar.b_setAnimState(animStateName)
def __aimTask(self, task):
self.attachClub(self.currentGolfer, True)
x = -math.sin(self.ballFollow.getH() * 0.0174532925)
y = math.cos(self.ballFollow.getH() * 0.0174532925)
dt = globalClock.getDt()
b = self.curGolfBall()
forceMove = 500
forceMoveDt = forceMove * dt
posUpdate = False
momentumChange = dt * 60.0
if (self.arrowKeys.upPressed() or self.arrowKeys.downPressed()) and not self.golfCourse.canDrive(self.currentGolfer):
posUpdate = True
self.aimMomentum = 0.0
self.ballFollow.headsUp(self.holeBottomNodePath)
elif self.arrowKeys.rightPressed() and not self.arrowKeys.leftPressed():
self.aimMomentum -= momentumChange
if self.aimMomentum > 0:
self.aimMomentum = 0.0
elif self.aimMomentum < -30.0:
self.aimMomentum = -30.0
posUpdate = True
self.switchToAnimState('GolfRotateLeft')
self.scoreBoard.hide()
elif self.arrowKeys.leftPressed() and not self.arrowKeys.rightPressed():
self.aimMomentum += momentumChange
if self.aimMomentum < 0.0:
self.aimMomentum = 0.0
elif self.aimMomentum > 30.0:
self.aimMomentum = 30.0
posUpdate = True
self.switchToAnimState('GolfRotateRight')
self.scoreBoard.hide()
else:
self.aimMomentum = 0.0
self.switchToAnimState('GolfPuttLoop')
self.ballFollow.setH(self.ballFollow.getH() + self.aimMomentum * dt)
if self.arrowKeys.upPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
if self.arrowKeys.downPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(-x * forceMoveDt, -y * forceMoveDt, 0))
if self.arrowKeys.leftPressed() and self.arrowKeys.rightPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(0, 0, 3000 * dt))
if posUpdate:
if globalClock.getFrameTime() - self.lastTimeHeadingSent > 0.2:
self.sendUpdate('setTempAimHeading', [localAvatar.doId, self.ballFollow.getH()])
self.lastTimeHeadingSent = globalClock.getFrameTime()
self.lastTempHeadingSent = self.ballFollow.getH()
elif self.lastTempHeadingSent != self.ballFollow.getH():
self.sendUpdate('setTempAimHeading', [localAvatar.doId, self.ballFollow.getH()])
self.lastTimeHeadingSent = globalClock.getFrameTime()
self.lastTempHeadingSent = self.ballFollow.getH()
self.setCamera2Ball()
self.fixCurrentGolferFeet()
self.adjustClub()
self.orientCameraRay()
return task.cont
def fixCurrentGolferFeet(self):
golfer = base.cr.doId2do.get(self.currentGolfer)
if not golfer:
return
golferPos = golfer.getPos(render)
newPos = Vec3(golferPos[0], golferPos[1], golferPos[2] + 5)
self.toonRay.setPosition(newPos)
def adjustClub(self):
club = self.clubs[self.currentGolfer]
if club:
distance = club.getDistance(self.clubLookatSpot)
scaleFactor = distance / 2.058
club.setScale(1, scaleFactor, 1)
def resetPowerBar(self):
self.power = 0
self.powerBar['value'] = self.power
self.powerBar['text'] = ''
def sendSwingInfo(self):
kickHimOut = self.updateWarning()
if kickHimOut:
return
curAimTime = globalClock.getRealTime() - self.enterAimStart
if curAimTime < 0:
curAimTime = 0
if curAimTime > GolfGlobals.AIM_DURATION:
curAimTime = GolfGlobals.AIM_DURATION
self.notify.debug('curAimTime = %f' % curAimTime)
x = -math.sin(self.ballFollow.getH() * 0.0174532925)
y = math.cos(self.ballFollow.getH() * 0.0174532925)
b = self.curGolfBall()
if hasattr(base, 'golfPower') and base.golfPower != None:
self.power = float(base.golfPower)
if not self.swingInfoSent:
self.sendUpdate('postSwingState', [self.getCycleTime(),
self.power,
b.getPosition()[0],
b.getPosition()[1],
b.getPosition()[2],
x,
y,
curAimTime,
self.getCommonObjectData()])
self.swingInfoSent = True
if self.power < 15 and self.golfCourse.scores[localAvatar.doId][self.golfCourse.curHoleIndex] == 0:
self.powerReminder = DirectLabel(text=TTLocalizer.GolfPowerReminder, text_shadow=(0, 0, 0, 1), text_fg=VBase4(1, 1, 0.0, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.8), scale=0.12)
return
def updateWarning(self):
retval = False
if not self.localToonHitControl:
self.localMissedSwings += 1
else:
self.localMissedSwings = 0
if self.localMissedSwings == GolfGlobals.KICKOUT_SWINGS - 1:
self.warningLabel = DirectLabel(parent=aspect2d, relief=None, pos=(0, 0, 0), text_align=TextNode.ACenter, text=TTLocalizer.GolfWarningMustSwing, text_scale=0.12, text_font=ToontownGlobals.getSignFont(), text_fg=(1, 0.1, 0.1, 1), text_wordwrap=20)
self.warningInterval = Sequence(LerpColorScaleInterval(self.warningLabel, 10, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'), Func(self.warningLabel.destroy))
self.warningInterval.start()
elif self.localMissedSwings >= GolfGlobals.KICKOUT_SWINGS:
self.golfCourse.handleFallingAsleepGolf(None)
retval = True
return retval
def assignRecordSwing(self, avId, cycleTime, power, x, y, z, dirX, dirY, commonObjectData):
ball = self.ballDict[avId]['golfBall']
holdBallPos = ball.getPosition()
self.useCommonObjectData(commonObjectData)
self.trackRecordBodyFlight(ball, cycleTime, power, Vec3(x, y, z), dirX, dirY)
ball.setPosition(holdBallPos)
self.sendUpdate('ballMovie2AI', [cycleTime,
avId,
self.recording,
self.aVRecording,
self.ballInHoleFrame,
self.ballTouchedHoleFrame,
self.ballFirstTouchedHoleFrame,
commonObjectData])
self.ballMovie2Client(cycleTime, avId, self.recording, self.aVRecording, self.ballInHoleFrame, self.ballTouchedHoleFrame, self.ballFirstTouchedHoleFrame, commonObjectData)
def __watchAimTask(self, task):
self.setCamera2Ball()
self.attachClub(self.currentGolfer, True)
self.adjustClub()
self.fixCurrentGolferFeet()
self.orientCameraRay()
return task.cont
def __watchTeeTask(self, task):
self.setCamera2Ball()
return task.cont
def curGolfBall(self):
return self.ballDict[self.currentGolfer]['golfBall']
def curGolfBallGeom(self):
return self.ballDict[self.currentGolfer]['golfBallGeom']
def curBallShadow(self):
return self.ballShadowDict[self.currentGolfer]
def cleanupGeom(self):
self.targets.remove()
self.terrainModel.remove()
self.powerBar.destroy()
def cleanupPowerBar(self):
self.powerBar.hide()
def cleanupPhysics(self):
pass
def curBall(self):
return self.ballDict[self.currentGolfer]['ball']
def curBallANP(self):
return self.ballDict[self.currentGolfer]['ballActorNodePath']
def curBallActor(self):
return self.ballDict[self.currentGolfer]['ballActor']
def enterAim(self):
self.notify.debug('Aim')
self.notify.debug('currentGolfer = %s' % self.currentGolfer)
self.switchToAnimState('GolfPuttLoop', forced=True)
self.swingInfoSent = False
self.lastState = self.state
self.aimMomentum = 0.0
self.enterAimStart = globalClock.getRealTime()
taskMgr.add(self.__aimTask, 'Aim Task')
self.showOnlyCurGolfer()
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
self.camPivot = self.ballFollow.attachNewNode('golf-camPivot')
self.targetCamPivot = self.ballFollow.attachNewNode('golf-targetCamPivot')
self.targetCamPivot.setP(self.DefaultCamP)
self.curCamPivot = self.ballFollow.attachNewNode('golf-curCamPivot')
self.curCamPivot.setP(self.DefaultCamP)
self.ccTrav = CollisionTraverser('golf.ccTrav')
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('golf.ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.camPivot.attachNewNode(self.ccLineNode)
self.ccLineBitMask = BitMask32(1048576)
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.camCollisionQueue = CollisionHandlerQueue()
self.ccTrav.addCollider(self.ccLineNodePath, self.camCollisionQueue)
if strokes:
self.ballFollow.headsUp(self.holeBottomNodePath)
self.camPivot.setP(self.DefaultCamP)
self._golfBarrierCollection = self.terrainModel.findAllMatches('**/collision?')
self._camAdjust = ScratchPad()
self._camAdjust.iters = 0
self._camAdjust.lower = self.DefaultCamP
self._camAdjust.upper = self.MaxCamP
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
self.camPivot.setP(self.DefaultCamP)
base.camera.wrtReparentTo(self.camPivot)
A = Point3(0, 0, 0)
B = base.camera.getPos()
AtoB = B - A
AtoBnorm = Point3(AtoB)
AtoBnorm.normalize()
A += AtoBnorm * 0.4
self.ccLine.setPointA(A)
self.ccLine.setPointB(B)
self.camPivot.setP(self.DefaultCamP)
self._camAdjust.task = taskMgr.add(self._adjustCamera, 'adjustCamera')
self.resetPowerBar()
self.powerBar.show()
self.aimDuration = GolfGlobals.AIM_DURATION
if not self.unlimitedAimTime:
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.aimDuration)
self.timer.countdown(self.aimDuration, self.timerExpired)
self.aimInstructions = DirectLabel(text=TTLocalizer.GolfAimInstructions, text_shadow=(0, 0, 0, 1), text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, -0.8), scale=TTLocalizer.DGHaimInstructions)
self.skyContact = 1
self.localToonHitControl = False
self._adjustCamera()
return
def exitAim(self):
localAvatar.wrtReparentTo(render)
taskMgr.remove(self._camAdjust.task)
taskMgr.remove('Aim Task')
taskMgr.remove(self.golfPowerTaskName)
if self.timer:
self.timer.stop()
self.timer.destroy()
self.timer = None
self.powerBar.hide()
self.ccLineNodePath.detachNode()
self.targetCamPivot.detachNode()
self.curCamPivot.detachNode()
self.camPivot.detachNode()
if self.aimInstructions:
self.aimInstructions.destroy()
self.aimInstructions = None
return
def timerExpired(self):
taskMgr.remove(self.golfPowerTaskName)
self.aimStart = None
self.sendSwingInfo()
self.resetPowerBar()
return
def _adjustCamera(self, task=None, first=True):
if task is None and first:
while 1:
self._adjustCamera(first=False)
if self._camAdjust.iters == 0:
return Task.cont
MaxIters = 5
finalP = self._camAdjust.lower
localAvatar.stash()
for barrier in self._golfBarrierCollection:
barrier.stash()
self.ccTrav.traverse(render)
for barrier in self._golfBarrierCollection:
barrier.unstash()
localAvatar.unstash()
midP = (self._camAdjust.lower + self._camAdjust.upper)/2
if self.camCollisionQueue.getNumEntries() > 0:
self.camCollisionQueue.sortEntries()
entry = self.camCollisionQueue.getEntry(0)
sPoint = entry.getSurfacePoint(self.camPivot)
self._camAdjust.lower = self.camPivot.getP()
finalP = midP
self.camPivot.setP(finalP)
else:
self._camAdjust.upper = self.camPivot.getP()
finalP = self._camAdjust.upper
self.camPivot.setP(midP)
if abs(self._camAdjust.lower - self._camAdjust.upper) < 1.0:
self._camAdjust.iters = MaxIters
self._camAdjust.iters += 1
if self._camAdjust.iters >= MaxIters:
self.targetCamPivot.setP(self._camAdjust.upper)
if task is None:
self.curCamPivot.setP(finalP)
self._camAdjust.iters = 0
self._camAdjust.lower = self.DefaultCamP
self._camAdjust.upper = self.MaxCamP
self.camPivot.setP(self.DefaultCamP)
if task is not None:
self.curCamPivot.setP(self.curCamPivot,
self.targetCamPivot.getP(self.curCamPivot)*min(1.0, 1.0*globalClock.getDt()))
curP = self.curCamPivot.getP()
self.curCamPivot.setP(self.DefaultCamP)
base.camera.reparentTo(self.ballFollow)
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
base.camera.wrtReparentTo(self.curCamPivot)
self.curCamPivot.setP(curP)
base.camera.wrtReparentTo(self.ballFollow)
return Task.cont
def enterChooseTee(self):
self.notify.debug('ChooseTee')
self.curGolfBallGeom().show()
self.curBallShadow().show()
self.lastState = self.state
taskMgr.add(self.__chooseTeeTask, 'ChooseTee Task')
self.ballFollow.setH(self.startingTeeHeading)
self.localAvatarChosenTee = False
self.localTempTee = 0
if len(self.teePositions) > 1:
self.localTempTee = 1
self.chooseTeeDuration = GolfGlobals.TEE_DURATION
if not self.unlimitedTeeTime:
self.teeTimer = ToontownTimer.ToontownTimer()
self.teeTimer.posInTopRightCorner()
self.teeTimer.setTime(self.chooseTeeDuration)
self.teeTimer.countdown(self.chooseTeeDuration, self.teeTimerExpired)
self.teeInstructions = DirectLabel(text=TTLocalizer.GolfChooseTeeInstructions, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, text_shadow=(0, 0, 0, 1), relief=None, pos=(0, 0, -0.75), scale=TTLocalizer.DGHteeInstructions)
self.powerBar.hide()
return
def exitChooseTee(self):
localAvatar.wrtReparentTo(render)
if hasattr(self, 'teeInstructions') and self.teeInstructions:
self.teeInstructions.destroy()
self.teeInstructions = None
taskMgr.remove('ChooseTee Task')
taskMgr.remove(self.golfPowerTaskName)
if self.teeTimer:
self.teeTimer.stop()
self.teeTimer.destroy()
self.teeTimer = None
self.powerBar.show()
return
def sendTeeInfo(self):
self.sendUpdate('setAvatarTee', [self.localTempTee])
self.localAvatarChosenTee = True
def __chooseTeeTask(self, task):
if self.localAvatarChosenTee:
return task.done
if self.arrowKeys.jumpPressed():
if self.flyOverInterval and self.flyOverInterval.isPlaying():
pass
else:
self.sendTeeInfo()
return task.cont
def changeTee(self, newTee):
ball = self.curGolfBall()
ball.setPosition(self.teePositions[newTee])
self.setCamera2Ball()
self.fixCurrentGolferFeet()
self.adjustClub()
def changeLocalTee(self, newTee):
self.changeTee(newTee)
self.sendUpdate('setAvatarTempTee', [localAvatar.doId, newTee])
self.fixCurrentGolferFeet()
self.adjustClub()
def __leftArrowPressed(self):
if self.state != 'ChooseTee':
return
self.localTempTee -= 1
if self.localTempTee < 0:
self.localTempTee = len(self.teePositions) - 1
self.changeLocalTee(self.localTempTee)
def __rightArrowPressed(self):
if self.state != 'ChooseTee':
return
self.localTempTee += 1
self.localTempTee %= len(self.teePositions)
self.changeLocalTee(self.localTempTee)
def teeTimerExpired(self):
self.sendTeeInfo()
def enterWatchAim(self):
self.notify.debug('Watch Aim')
self.notify.debugStateCall(self)
self.notify.debug('currentGolfer = %s' % self.currentGolfer)
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
if strokes:
self.ballFollow.lookAt(self.holeBottomNodePath)
self.ballFollow.setP(0)
self.showOnlyCurGolfer()
taskMgr.add(self.__watchAimTask, 'Watch Aim Task')
def exitWatchAim(self):
self.notify.debugStateCall(self)
av = base.cr.doId2do.get(self.currentGolfer)
if av:
heading = av.getH(render)
toonPos = av.getPos(render)
av.reparentTo(render)
av.setH(heading)
av.setPos(toonPos)
self.notify.debug('av %s now at position %s' % (av.getName(), av.getPos()))
else:
self.notify.debug('could not get avId %d' % self.currentGolfer)
taskMgr.remove('Watch Aim Task')
def enterWatchTee(self):
self.notify.debug('Watch Tee')
self.notify.debugStateCall(self)
self.curGolfBallGeom().show()
self.ballFollow.setH(self.startingTeeHeading)
self.ballShadowDict[self.currentGolfer].show()
def exitWatchTee(self):
self.notify.debugStateCall(self)
av = base.cr.doId2do.get(self.currentGolfer)
taskMgr.remove('Watch Tee Task')
def enterWait(self):
self.notify.debug('Wait')
self.notify.debugStateCall(self)
def exitWait(self):
self.notify.debugStateCall(self)
def removePlayBackDelayDelete(self):
if self.playBackDelayDelete:
self.playBackDelayDelete.destroy()
self.playBackDelayDelete = None
return
def enterPlayback(self):
def shiftClubToRightHand():
club = self.clubs[self.currentGolfer]
av = base.cr.doId2do.get(self.currentGolfer)
if av and club:
club.wrtReparentTo(av.getRightHands()[0])
av = base.cr.doId2do.get(self.currentGolfer)
if not av:
return
else:
self.removePlayBackDelayDelete()
self.playBackDelayDelete = DelayDelete.DelayDelete(av, 'GolfHole.enterPlayback')
self.accept('clientCleanup', self._handleClientCleanup)
self.inPlayBack = 1
self.setLookingAtPutt(False)
self.swingInterval = Sequence(ActorInterval(av, 'swing-putt', startFrame=0, endFrame=GolfGlobals.BALL_CONTACT_FRAME), Func(self.startBallPlayback), ActorInterval(av, 'swing-putt', startFrame=GolfGlobals.BALL_CONTACT_FRAME, endFrame=23), Func(shiftClubToRightHand), Func(self.setLookingAtPutt, True), Func(self.removePlayBackDelayDelete))
adjustedBallTouchedHoleTime = self.ballTouchedHoleTime + GolfGlobals.BALL_CONTACT_TIME
adjustedBallFirstTouchedHoleTime = self.ballFirstTouchedHoleTime + GolfGlobals.BALL_CONTACT_TIME
adjustedBallDropTime = self.ballDropTime + GolfGlobals.BALL_CONTACT_TIME
adjustedPlaybackEndTime = self.playbackMovieDuration + GolfGlobals.BALL_CONTACT_TIME
self.notify.debug('adjustedTimes ballTouched=%.2f ballFirstTouched=%.2f ballDrop=%.2f playbaybackEnd=%.2f' % (adjustedBallTouchedHoleTime,
adjustedBallFirstTouchedHoleTime,
adjustedBallDropTime,
adjustedPlaybackEndTime))
if self.ballWillGoInHole:
curDuration = self.swingInterval.getDuration()
lookPuttInterval = ActorInterval(av, 'look-putt')
if curDuration < adjustedBallDropTime:
self.swingInterval.append(lookPuttInterval)
curDuration = self.swingInterval.getDuration()
diffTime = adjustedBallDropTime - curDuration
if diffTime > 0:
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=diffTime))
self.swingInterval.append(ActorInterval(av, 'good-putt', endTime=self.playbackMovieDuration, loop=1))
elif self.ballTouchedHoleTime:
self.notify.debug('doing self.ballTouchedHoleTime')
curDuration = self.swingInterval.getDuration()
lookPuttInterval = ActorInterval(av, 'look-putt')
if curDuration < adjustedBallTouchedHoleTime:
self.swingInterval.append(lookPuttInterval)
curDuration = self.swingInterval.getDuration()
diffTime = adjustedBallTouchedHoleTime - curDuration
if diffTime > 0:
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=diffTime))
self.swingInterval.append(ActorInterval(av, 'bad-putt', endFrame=32))
self.swingInterval.append(ActorInterval(av, 'badloop-putt', endTime=self.playbackMovieDuration, loop=1))
else:
self.swingInterval.append(ActorInterval(av, 'look-putt'))
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=self.playbackMovieDuration, loop=1))
sfxInterval = Parallel()
ballHitInterval = Sequence(Wait(GolfGlobals.BALL_CONTACT_TIME), SoundInterval(self.hitBallSfx))
sfxInterval.append(ballHitInterval)
if self.ballWillGoInHole:
ballRattle = Sequence()
timeToPlayBallRest = adjustedPlaybackEndTime - self.ballGoesToRestSfx.length()
if adjustedBallFirstTouchedHoleTime < timeToPlayBallRest:
diffTime = timeToPlayBallRest - adjustedBallFirstTouchedHoleTime
if self.ballGoesInStartSfx.length() < diffTime:
ballRattle.append(Wait(adjustedBallFirstTouchedHoleTime))
ballRattle.append(SoundInterval(self.ballGoesInStartSfx))
timeToPlayLoop = adjustedBallFirstTouchedHoleTime + self.ballGoesInStartSfx.length()
loopTime = timeToPlayBallRest - timeToPlayLoop
if self.ballGoesInLoopSfx.length() == 0.0:
numLoops = 0
else:
numLoops = int(loopTime / self.ballGoesInLoopSfx.length())
self.notify.debug('numLoops=%d loopTime=%f' % (numLoops, loopTime))
if loopTime > 0:
ballRattle.append(SoundInterval(self.ballGoesInLoopSfx, loop=1, duration=loopTime, seamlessLoop=True))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx))
self.notify.debug('playing full rattling')
else:
self.notify.debug('playing abbreviated rattling')
timeToPlayBallGoesIn = adjustedBallFirstTouchedHoleTime
ballRattle.append(Wait(timeToPlayBallGoesIn))
startTime = self.ballGoesInStartSfx.length() - diffTime
self.notify.debug('adjustedBallDropTime=%s diffTime=%s starTime=%s' % (adjustedBallDropTime, diffTime, startTime))
ballRattle.append(SoundInterval(self.ballGoesInStartSfx, startTime=startTime))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx))
else:
self.notify.debug('playing abbreviated ball goes to rest')
ballRattle.append(Wait(adjustedBallFirstTouchedHoleTime))
diffTime = adjustedPlaybackEndTime - adjustedBallFirstTouchedHoleTime
startTime = self.ballGoesToRestSfx.length() - diffTime
self.notify.debug('adjustedBallDropTime=%s diffTime=%s starTime=%s' % (adjustedBallDropTime, diffTime, startTime))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx, startTime=startTime))
sfxInterval.append(ballRattle)
crowdBuildupSfx = self.crowdBuildupSfx[self.avIdList.index(self.currentGolfer)]
crowdApplauseSfx = self.crowdApplauseSfx[self.avIdList.index(self.currentGolfer)]
crowdMissSfx = self.crowdMissSfx[self.avIdList.index(self.currentGolfer)]
if self.ballWillGoInHole:
crowdIval = Sequence()
buildupLength = crowdBuildupSfx.length()
self.notify.debug('buildupLength=%s' % buildupLength)
diffTime = adjustedBallFirstTouchedHoleTime - buildupLength
if diffTime > 0:
crowdIval.append(Wait(diffTime))
crowdIval.append(SoundInterval(crowdBuildupSfx))
crowdIval.append(SoundInterval(crowdApplauseSfx))
else:
startTime = buildupLength - adjustedBallFirstTouchedHoleTime
self.notify.debug('playing abbreviated crowd build and applause diffTime=%s startTime=%s' % (diffTime, startTime))
crowdIval.append(SoundInterval(crowdBuildupSfx, startTime=startTime))
crowdIval.append(SoundInterval(crowdApplauseSfx))
sfxInterval.append(crowdIval)
elif self.ballFirstTouchedHoleTime:
crowdIval = Sequence()
buildupLength = crowdBuildupSfx.length()
self.notify.debug('touched but not going in buildupLength=%s' % buildupLength)
diffTime = adjustedBallFirstTouchedHoleTime - buildupLength
if diffTime > 0:
self.notify.debug('waiting %.2f to play crowd buildup' % diffTime)
crowdIval.append(Wait(diffTime))
crowdIval.append(SoundInterval(crowdBuildupSfx))
crowdIval.append(SoundInterval(crowdMissSfx))
else:
startTime = buildupLength - adjustedBallFirstTouchedHoleTime
self.notify.debug('playing abbreviated crowd build and miss diffTime=%s startTime=%s' % (diffTime, startTime))
crowdIval.append(SoundInterval(crowdBuildupSfx, startTime=startTime))
crowdIval.append(SoundInterval(crowdMissSfx))
sfxInterval.append(crowdIval)
if self.sfxInterval:
sfxInterval.finish()
self.sfxInterval = sfxInterval
self.sfxInterval.start()
self.swingInterval.start()
def exitPlayback(self):
self.notify.debug('Exiting Playback')
if self.swingInterval:
self.swingInterval.pause()
av = base.cr.doId2do.get(self.currentGolfer)
if av:
if self.ballWillGoInHole:
av.loop('good-putt', restart=0)
elif self.ballTouchedHoleTime:
pass
else:
av.loop('neutral')
self.setLookingAtPutt(False)
if av == base.localAvatar:
if self.ballWillGoInHole:
av.b_setAnimState('GolfGoodPutt')
elif self.ballTouchedHoleTime:
av.b_setAnimState('GolfBadPutt')
else:
av.b_setAnimState('neutral')
taskMgr.remove('playback task')
self.curGolfBall().disable()
self.readyCurrentGolfer(None)
self.inPlayBack = 0
if self.powerReminder:
self.powerReminder.destroy()
self.powerReminder = None
return
def setLookingAtPutt(self, newVal):
self.isLookingAtPutt = newVal
def getLookingAtPutt(self):
return self.isLookingAtPutt
def startBallPlayback(self):
self.playbackFrameNum = 0
self.sourceFrame = self.recording[0]
self.destFrameNum = 1
self.destFrame = self.recording[self.destFrameNum]
self.aVSourceFrame = self.aVRecording[0]
self.aVDestFrameNum = 1
self.aVDestFrame = self.aVRecording[self.aVDestFrameNum]
self.inPlayBack = 2
def isCurBallInHole(self):
retval = False
ball = self.curGolfBall()
ballPos = ball.getPosition()
for holePos in self.holePositions:
displacement = ballPos - holePos
length = displacement.length()
self.notify.debug('hole %s length=%s' % (holePos, length))
if length <= GolfGlobals.DistanceToBeInHole:
retval = True
break
return retval
def handleBallGoingInHole(self):
par = GolfGlobals.HoleInfo[self.holeId]['par']
unlimitedSwing = False
av = base.cr.doId2do.get(self.currentGolfer)
if av:
unlimitedSwing = av.getUnlimitedSwing()
if not unlimitedSwing:
self.curGolfBall().setPosition(0, 0, -100)
self.ballShadowDict[self.currentGolfer].setPos(0, 0, -100)
self.ballShadowDict[self.currentGolfer].hide()
strokes = 3
if self.golfCourse:
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
else:
self.notify.warning('self.golfCourse is None')
diff = strokes - par
if diff > 0:
textStr = '+' + str(diff)
else:
textStr = diff
if strokes == 1:
textStr = TTLocalizer.GolfHoleInOne
elif diff in TTLocalizer.GolfShotDesc:
if self.ballWillGoInHole:
textStr = TTLocalizer.GolfShotDesc[diff]
perfectTextSubnode = hidden.attachNewNode(self.__genText(textStr))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0 - 1.35
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
animTrack = Sequence()
av = base.cr.doId2do.get(self.currentGolfer)
animTrack.append(Func(self.golfCourse.updateScoreBoard))
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5), Func(self.sendUpdate, 'turnDone', []))
soundTrack = Sequence()
if strokes == 1:
soundTrack.append(SoundInterval(self.holeInOneSfx))
elif self.hasCurGolferReachedMaxSwing and not self.ballWillGoInHole:
soundTrack.append(SoundInterval(self.kickedOutSfx))
self.perfectIval = Parallel(textTrack, soundTrack, animTrack)
self.perfectIval.start()
def __playbackTask(self, task):
return self.playBackFrame(task)
def toonRayCollisionCallback(self, x, y, z):
if self.state not in ('Aim', 'WatchAim', 'ChooseTee', 'WatchTee'):
return
tempPath = render.attachNewNode('temp')
tempPath.setPos(x, y, z)
relPos = tempPath.getPos(self.ballFollowToonSpot)
av = base.cr.doId2do.get(self.currentGolfer)
if av:
zToUse = relPos[2]
if zToUse < 0 - GolfGlobals.GOLF_BALL_RADIUS:
zToUse = 0 - GolfGlobals.GOLF_BALL_RADIUS
av.setPos(0, 0, zToUse)
tempPath.removeNode()
def preStep(self):
if self.currentGolferActive:
GolfHoleBase.GolfHoleBase.preStep(self)
def postStep(self):
if self.currentGolferActive:
GolfHoleBase.GolfHoleBase.postStep(self)
DistributedPhysicsWorld.DistributedPhysicsWorld.postStep(self)
if self.inPlayBack == 2:
self.playBackFrame()
self.makeCurGolferLookAtBall()
elif self.state == 'Playback' and self.inPlayBack == 0:
self.request('Wait')
self.updateTranslucentObjects()
def updateTranslucentObjects(self):
for translucentNodePathLastFrame in self.translucentLastFrame:
if translucentNodePathLastFrame not in self.translucentCurFrame:
translucentNodePathLastFrame.setColorScale(1, 1, 1, 1)
for transNpCurFrame in self.translucentCurFrame:
if transNpCurFrame not in self.translucentLastFrame:
self.notify.debug('making translucent %s' % transNpCurFrame)
transNpCurFrame.setColorScale(1, 1, 1, 0.25)
transNpCurFrame.setTransparency(1)
def makeCurGolferLookAtBall(self):
if self.getLookingAtPutt():
av = base.cr.doId2do.get(self.currentGolfer)
if av:
ballPos = self.curGolfBall().getPosition()
av.headsUp(ballPos[0], ballPos[1], ballPos[2])
av.setH(av.getH() - 90)
def playBackFrame(self):
doPrint = 0
doAVPrint = 0
lastFrame = self.recording[len(self.recording) - 1][0]
if self.playbackFrameNum >= self.destFrame[0]:
self.sourceFrame = self.destFrame
self.destFrameNum += 1
doPrint = 1
if self.destFrameNum < len(self.recording):
self.destFrame = self.recording[self.destFrameNum]
else:
self.notify.debug('recording length %s' % len(self.recording))
if self.isCurBallInHole() or self.hasCurGolferReachedMaxSwing():
self.handleBallGoingInHole()
self.request('Wait')
else:
self.golfCourse.updateScoreBoard()
self.request('Wait')
self.sendUpdate('turnDone', [])
return
self.projLength = self.destFrame[0] - self.sourceFrame[0]
self.projPen = self.destFrame[0] - self.playbackFrameNum
propSource = float(self.projPen) / float(self.projLength)
propDest = 1.0 - propSource
projX = self.sourceFrame[1] * propSource + self.destFrame[1] * propDest
projY = self.sourceFrame[2] * propSource + self.destFrame[2] * propDest
projZ = self.sourceFrame[3] * propSource + self.destFrame[3] * propDest
newPos = Vec3(projX, projY, projZ)
ball = self.curGolfBall()
ball.setPosition(newPos)
if self.playbackFrameNum >= self.aVDestFrame[0]:
self.aVSourceFrame = self.aVDestFrame
self.aVDestFrameNum += 1
doAVPrint = 1
if self.aVDestFrameNum < len(self.aVRecording):
self.aVDestFrame = self.aVRecording[self.aVDestFrameNum]
newAV = Vec3(self.aVSourceFrame[1], self.aVSourceFrame[2], self.aVSourceFrame[3])
self.projLength = self.aVDestFrame[0] - self.aVSourceFrame[0]
self.projPen = self.aVDestFrame[0] - self.playbackFrameNum
propSource = float(self.projPen) / float(self.projLength)
propDest = 1.0 - propSource
projX = self.aVSourceFrame[1] * propSource + self.aVDestFrame[1] * propDest
projY = self.aVSourceFrame[2] * propSource + self.aVDestFrame[2] * propDest
projZ = self.aVSourceFrame[3] * propSource + self.aVDestFrame[3] * propDest
newAV = Vec3(projX, projY, projZ)
ball = self.curGolfBall()
ball.setAngularVel(newAV)
if self.playbackFrameNum < lastFrame - 1:
ball.enable()
else:
ball.disable()
self.setCamera2Ball()
self.placeBodies()
if doAVPrint:
pass
if doPrint:
self.notify.debug('. %s %s %s %s %s' % (self.playbackFrameNum,
self.sourceFrame[0],
self.destFrame[0],
self.destFrameNum,
newPos))
self.playbackFrameNum += 1
def enterCleanup(self):
taskMgr.remove('update task')
if hasattr(self, 'arrowKeys'):
self.arrowKeys.destroy()
self.arrowKeys = None
self.ignoreAll()
if self.swingInterval:
self.swingInterval.pause()
self.swingInterval = None
if self.sfxInterval:
self.sfxInterval.pause()
self.sfxInterval = None
self.cleanupGeom()
return
def exitCleanup(self):
pass
def setCamera2Ball(self):
b = self.curGolfBall()
ballPos = Point3(b.getPosition()[0], b.getPosition()[1], b.getPosition()[2])
self.ballFollow.setPos(ballPos)
def hitBall(self, ball, power, x, y):
self.performSwing(self, ball, power, x, y)
def ballMovie2Client(self, cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData):
self.notify.debug('received Movie, number of frames %s %s ballInFrame=%d ballTouchedHoleFrame=%d ballFirstTouchedHoleFrame=%d' % (len(movie),
len(spinMovie),
ballInFrame,
ballTouchedHoleFrame,
ballFirstTouchedHoleFrame))
if self.state == 'Playback':
self.notify.debug('SMASHED PLAYBACK')
return
self.ballShadowDict[avId].show()
self.holdCycleTime = cycleTime
self.holdCommonObjectData = commonObjectData
self.useCommonObjectData(self.holdCommonObjectData)
self.recording = movie
self.aVRecording = spinMovie
endingBallPos = Vec3(movie[-1][1], movie[-1][2], movie[-1][3])
endingFrame = movie[-1][0]
self.playbackMovieDuration = endingFrame * self.DTAStep
self.notify.debug('playback movie duration=%s' % self.playbackMovieDuration)
displacement = self.holePositions[0] - endingBallPos
self.ballWillGoInHole = False
if displacement.length() <= GolfGlobals.DistanceToBeInHole:
self.ballWillGoInHole = True
self.notify.debug('endingBallPos=%s, distanceToHole=%s, ballWillGoInHole=%s' % (endingBallPos, displacement.length(), self.ballWillGoInHole))
self.ballDropTime = ballInFrame * self.DTAStep
self.ballTouchedHoleTime = ballTouchedHoleFrame * self.DTAStep
self.ballFirstTouchedHoleTime = ballFirstTouchedHoleFrame * self.DTAStep
if self.state == 'WatchTee':
self.request('WatchAim')
self.request('Playback')
def golfersTurn(self, avId):
self.readyCurrentGolfer(avId)
if avId == localAvatar.doId:
self.setCamera2Ball()
self.request('Aim')
else:
self.setCamera2Ball()
self.request('WatchAim')
def readyCurrentGolfer(self, avId):
for index in self.ballDict:
self.ballDict[index]['golfBallOdeGeom'].setCollideBits(BitMask32(0))
self.ballDict[index]['golfBallOdeGeom'].setCategoryBits(BitMask32(0))
self.ballDict[index]['golfBall'].disable()
if avId:
self.currentGolfer = avId
self.currentGolferActive = True
if avId in self.ballDict:
self.ballDict[avId]['golfBallOdeGeom'].setCollideBits(BitMask32(16777215))
self.ballDict[avId]['golfBallOdeGeom'].setCategoryBits(BitMask32(4278190080L))
else:
self.currentGolferActive = False
def setGolferIds(self, avIds):
self.avIdList = avIds
self.numPlayers = len(self.avIdList)
self.teeChosen = {}
for avId in self.avIdList:
self.teeChosen[avId] = -1
def setHoleId(self, holeId):
self.holeId = holeId
self.holeInfo = GolfGlobals.HoleInfo[holeId]
def createBall(self, avId, index = None):
golfBallGeom, golfBall, odeGeom = self.createSphere(self.world, self.space, GolfGlobals.GOLF_BALL_DENSITY, GolfGlobals.GOLF_BALL_RADIUS, index)
startPos = self.teePositions[0]
if len(self.teePositions) > 1:
startPos = self.teePositions[1]
golfBall.setPosition(startPos)
golfBallGeom.hide()
if self.notify.getDebug():
self.notify.debug('golf ball body id')
golfBall.write()
self.notify.debug(' -')
golfBallGeom.setName('golfBallGeom%s' % avId)
self.ballDict[avId] = {'golfBall': golfBall,
'golfBallGeom': golfBallGeom,
'golfBallOdeGeom': odeGeom}
golfBall.disable()
shadow = self.dropShadowModel.copyTo(render)
shadow.setBin('shadow', 100)
shadow.setScale(0.09)
shadow.setDepthWrite(False)
shadow.setDepthTest(True)
self.ballShadowDict[avId] = shadow
shadow.hide()
def setGolfCourseDoId(self, golfCourseDoId):
self.golfCourseDoId = golfCourseDoId
self.golfCourse = base.cr.doId2do.get(self.golfCourseDoId)
if not self.golfCourse:
self.cr.relatedObjectMgr.abortRequest(self.golfCourseRequest)
self.golfCourseRequest = self.cr.relatedObjectMgr.requestObjects([self.golfCourseDoId], eachCallback=self.__gotGolfCourse)
else:
self.scoreBoard = self.golfCourse.scoreBoard
self.scoreBoard.hide()
def __gotGolfCourse(self, golfCourse):
self.golfCourseRequest = None
self.golfCourse = golfCourse
return
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def sendBox(self, pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2):
self.swingBox.setPosition(pos0, pos1, pos2)
self.swingBox.setQuaternion(Quat(quat0, quat1, quat2, quat3))
self.swingBox.setAngularVel(anV0, anV1, anV2)
self.swingBox.setLinearVel(lnV0, lnV1, lnV2)
def hasCurGolferReachedMaxSwing(self):
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
maxSwing = self.holeInfo['maxSwing']
retval = strokes >= maxSwing
if retval:
pass
return retval
def __getGolfPower(self, time):
elapsed = max(time - self.aimStart, 0.0)
t = elapsed / self.golfPowerSpeed
t = math.pow(t, self.golfPowerExponent)
power = int(t * 100) % 200
if power > 100:
power = 200 - power
return power
def __beginTossGolf(self):
if self.aimStart != None:
return
if not self.state == 'Aim':
return
if self.swingInfoSent:
return
self.localToonHitControl = True
time = globalClock.getFrameTime()
self.aimStart = time
messenger.send('wakeup')
self.scoreBoard.hide()
taskMgr.add(self.__updateGolfPower, self.golfPowerTaskName)
return
def __endTossGolf(self):
if self.aimStart == None:
return
if not self.state == 'Aim':
return
messenger.send('wakeup')
taskMgr.remove(self.golfPowerTaskName)
self.aimStart = None
self.sendSwingInfo()
self.resetPowerBar()
return
def __updateGolfPower(self, task):
if not self.powerBar:
print '### no power bar!!!'
return Task.done
newPower = self.__getGolfPower(globalClock.getFrameTime())
self.power = newPower
self.powerBar['value'] = newPower
self.powerBar['text'] = TTLocalizer.GolfPowerBarText % {'power': newPower}
return Task.cont
def golferChooseTee(self, avId):
self.readyCurrentGolfer(avId)
self.putAwayAllToons()
if self.needToDoFlyOver and self.doFlyOverMovie(avId):
pass
else:
if avId == localAvatar.doId:
self.setCamera2Ball()
if not self.state == 'ChooseTee':
self.request('ChooseTee')
else:
self.setCamera2Ball()
self.request('WatchTee')
self.takeOutToon(self.currentGolfer)
def setAvatarTempTee(self, avId, tempTee):
if self.state != 'WatchTee':
return
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
self.changeTee(tempTee)
def setAvatarFinalTee(self, avId, finalTee):
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
self.changeTee(finalTee)
def setTempAimHeading(self, avId, heading):
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
if self.state != 'WatchAim':
return
if avId != localAvatar.doId:
self.ballFollow.setH(heading)
def stickToonToBall(self, avId):
av = base.cr.doId2do.get(avId)
if av:
av.reparentTo(self.ballFollowToonSpot)
av.setPos(0, 0, 0)
av.setH(0)
def putAwayToon(self, avId):
av = base.cr.doId2do.get(avId)
if av:
av.reparentTo(render)
av.setPos(0, 0, -1000)
av.setH(0)
def putAwayAllToons(self):
for avId in self.avIdList:
self.putAwayToon(avId)
def takeOutToon(self, avId):
self.stickToonToBall(avId)
self.fixCurrentGolferFeet()
self.attachClub(avId)
def showOnlyCurGolfer(self):
self.notify.debug('curGolfer = %s' % self.currentGolfer)
self.stickToonToBall(self.currentGolfer)
self.fixCurrentGolferFeet()
self.attachClub(self.currentGolfer)
for avId in self.avIdList:
if avId != self.currentGolfer:
self.putAwayToon(avId)
def tabKeyPressed(self):
doInterval = True
self.notify.debug('tab key pressed')
if not hasattr(self, 'ballFollow'):
return
if self.flyOverInterval and self.flyOverInterval.isPlaying():
return
if self.camInterval and self.camInterval.isPlaying():
self.camInterval.pause()
if base.camera.getParent() == self.ballFollow:
if doInterval:
curHpr = camera.getHpr(render)
angle = PythonUtil.closestDestAngle2(curHpr[0], 0)
self.camInterval = Sequence(Func(base.camera.wrtReparentTo, render), LerpPosHprInterval(base.camera, 2, self.camTopViewPos, self.camTopViewHpr))
self.camInterval.start()
else:
base.camera.reparentTo(render)
base.camera.setPos(self.camTopViewPos)
base.camera.setHpr(self.camTopViewHpr)
elif doInterval:
curHpr = camera.getHpr(self.ballFollow)
angle = PythonUtil.closestDestAngle2(curHpr[0], 0)
self.camInterval = Sequence(Func(base.camera.wrtReparentTo, self.ballFollow), LerpPosHprInterval(base.camera, 2, self.camPosBallFollow, self.camHprBallFollow))
self.camInterval.start()
else:
base.camera.reparentTo(self.ballFollow)
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
def doFlyOverMovie(self, avId):
title = GolfGlobals.getCourseName(self.golfCourse.courseId) + ' :\n ' + GolfGlobals.getHoleName(self.holeId) + '\n' + TTLocalizer.GolfPar + ' : ' + '%s' % self.holeInfo['par']
self.titleLabel = DirectLabel(parent=aspect2d, relief=None, pos=(0, 0, 0.8), text_align=TextNode.ACenter, text=title, text_scale=0.12, text_font=ToontownGlobals.getSignFont(), text_fg=(1, 0.8, 0.4, 1))
self.titleLabel.setBin('opaque', 19)
self.titleLabel.hide()
self.needToDoFlyOver = False
bamFile = self.holeInfo['terrainModel']
fileName = bamFile.split('/')[-1]
dotIndex = fileName.find('.')
baseName = fileName[0:dotIndex]
camModelName = baseName + '_cammodel.bam'
cameraName = baseName + '_camera.bam'
path = bamFile[0:bamFile.find(fileName)]
camModelFullPath = path + camModelName
cameraAnimFullPath = path + cameraName
try:
self.flyOverActor = Actor.Actor(camModelFullPath, {'camera': cameraAnimFullPath})
except StandardError:
self.notify.debug("Couldn't find flyover %s" % camModelFullPath)
return False
base.transitions.noIris()
self.flyOverActor.reparentTo(render)
self.flyOverActor.setBlend(frameBlend=True)
flyOverJoint = self.flyOverActor.find('**/camera1')
children = flyOverJoint.getChildren()
numChild = children.getNumPaths()
for i in xrange(numChild):
childNodePath = children.getPath(i)
childNodePath.removeNode()
self.flyOverJoint = flyOverJoint
self.flyOverInterval = Sequence(Func(base.camera.reparentTo, flyOverJoint), Func(base.camera.clearTransform), Func(self.titleLabel.show), ActorInterval(self.flyOverActor, 'camera'), Func(base.camera.reparentTo, self.ballFollow), Func(base.camera.setPos, self.camPosBallFollow), Func(base.camera.setHpr, self.camHprBallFollow))
if avId == localAvatar.doId:
self.flyOverInterval.append(Func(self.setCamera2Ball))
self.flyOverInterval.append(Func(self.safeRequestToState, 'ChooseTee'))
else:
self.flyOverInterval.append(Func(self.setCamera2Ball))
self.flyOverInterval.append(Func(self.safeRequestToState, 'WatchTee'))
self.flyOverInterval.append(Func(self.titleLabel.hide))
self.flyOverInterval.append(Func(self.takeOutToon, avId))
self.flyOverInterval.start()
return True
def avExited(self, avId):
if self.state == 'Playback' and self.currentGolfer == avId:
pass
else:
self.ballDict[avId]['golfBallGeom'].hide()
def orientCameraRay(self):
pos = base.camera.getPos(self.terrainModel)
self.cameraRayNodePath.setPos(pos)
self.cameraRayNodePath.lookAt(self.ballFollow)
renderPos = self.cameraRayNodePath.getPos(render)
if renderPos != pos:
self.notify.debug('orientCamerRay this should not happen')
ballPos = self.ballFollow.getPos(self.terrainModel)
dirCam = Vec3(ballPos - pos)
dirCam.normalize()
self.cameraRay.set(pos, dirCam)
def performSwing(self, ball, power, dirX, dirY):
startTime = globalClock.getRealTime()
avId = base.localAvatar.doId
position = ball.getPosition()
x = position[0]
y = position[1]
z = position[2]
if avId not in self.golfCourse.drivingToons:
x = position[0]
y = position[1]
z = position[2]
self.swingTime = cycleTime
lift = 0
ball = self.ball
forceMove = 2500
if power > 50:
lift = 0
ball.enable()
ball.setPosition(x, y, z)
ball.setLinearVel(0.0, 0.0, 0.0)
ball.setAngularVel(0.0, 0.0, 0.0)
ball.addForce(Vec3(dirX * forceMove * power / 100.0, dirY * forceMove * power / 100.0, lift))
self.initRecord()
safety = 0
self.llv = None
self.record(ball)
while ball.isEnabled() and len(self.recording) < 2000:
self.preStep()
self.simulate()
self.postStep()
self.record(ball)
safety += 1
self.record(ball)
midTime = globalClock.getRealTime()
self.processRecording()
self.processAVRecording()
self.notify.debug('Recording End time %s cycle %s len %s avLen %s' % (self.timingSimTime,
self.getSimCycleTime(),
len(self.recording),
len(self.aVRecording)))
self.request('WaitPlayback')
length = len(self.recording) - 1
x = self.recording[length][1]
y = self.recording[length][2]
z = self.recording[length][3]
self.ballPos[avId] = Vec3(x, y, z)
endTime = globalClock.getRealTime()
diffTime = endTime - startTime
fpsTime = self.frame / diffTime
self.notify.debug('Time Start %s Mid %s End %s Diff %s Fps %s frames %s' % (startTime,
midTime,
endTime,
diffTime,
fpsTime,
self.frame))
self.ballMovie2Client(cycleTime, avId, self.recording, self.aVRecording, self.ballInHoleFrame, self.ballTouchedHoleFrame, self.ballFirstTouchedHoleFrame)
return
def handleBallHitNonGrass(self, c0, c1):
if not self.inPlayBack:
return
golfBallPos = self.curGolfBall().getPosition()
if self.lastBumpSfxPos == golfBallPos:
return
if GolfGlobals.HARD_COLLIDE_ID in [c0, c1]:
if not self.bumpHardSfx.status() == self.bumpHardSfx.PLAYING:
distance = (golfBallPos - self.lastBumpSfxPos).length()
if distance > 2.0:
base.playSfx(self.bumpHardSfx)
self.lastBumpSfxPos = golfBallPos
elif GolfGlobals.MOVER_COLLIDE_ID in [c0, c1]:
if not self.bumpMoverSfx.status() == self.bumpMoverSfx.PLAYING:
base.playSfx(self.bumpMoverSfx)
self.lastBumpSfxPos = golfBallPos
elif GolfGlobals.WINDMILL_BASE_COLLIDE_ID in [c0, c1]:
if not self.bumpWindmillSfx.status() == self.bumpWindmillSfx.PLAYING:
base.playSfx(self.bumpWindmillSfx)
self.lastBumpSfxPos = golfBallPos
def safeRequestToState(self, newState):
doingRequest = False
if self.state in self.defaultTransitions:
if newState in self.defaultTransitions[self.state]:
self.request(newState)
doingRequest = True
if not doingRequest:
self.notify.warning('ignoring transition from %s to %s' % (self.state, newState))
def doMagicWordHeading(self, heading):
if self.state == 'Aim':
self.aimMomentum = 0.0
self.ballFollow.setH(float(heading))
def _handleClientCleanup(self):
self.removePlayBackDelayDelete()
self.ignore('clientCleanup')
| [
"anythingtechpro@gmail.com"
] | anythingtechpro@gmail.com |
cc08052dabe1fade19b113796f7f32584773ee51 | e72a7ced8b0fd4ef269117f26507b06d6e9df93c | /edi_835_parser/segments/location.py | a204ade901f3c3b4074d8698f1120444e42c6eec | [
"MIT"
] | permissive | javigoncompte/edi-835-parser | d211658e90c031676e7978677dc0f176840a909d | 0e44450adea0297bf6e0fbc74ad52cc01f709953 | refs/heads/main | 2023-06-27T11:10:29.760759 | 2021-08-05T00:04:10 | 2021-08-05T00:04:10 | 378,183,848 | 0 | 0 | MIT | 2021-06-18T14:55:39 | 2021-06-18T14:55:39 | null | UTF-8 | Python | false | false | 516 | py | from edi_835_parser.elements.identifier import Identifier
from edi_835_parser.segments.utilities import split_segment
class Location:
identification = 'N4'
identifier = Identifier()
def __init__(self, segment: str):
self.segment = segment
segment = split_segment(segment)
self.identifier = segment[0]
self.city = segment[1]
self.state = segment[2]
self.zip_code = segment[3]
def __repr__(self):
return '\n'.join(str(item) for item in self.__dict__.items())
if __name__ == '__main__':
pass
| [
"keiron.stoddart@gmail.com"
] | keiron.stoddart@gmail.com |
504420d710edfc66b1f1a47c0510b3db3d98bd57 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/remove-invalid-parentheses.py | dcc6a2c8e851f973a63f3c1085274a85e39313e6 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 2,367 | py | # Time: O(C(n, c)), try out all possible substrings with the minimum c deletion.
# Space: O(c), the depth is at most c, and it costs n at each depth
class Solution(object):
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: List[str]
"""
# Calculate the minimum left and right parantheses to remove
def findMinRemove(s):
left_removed, right_removed = 0, 0
for c in s:
if c == '(':
left_removed += 1
elif c == ')':
if not left_removed:
right_removed += 1
else:
left_removed -= 1
return (left_removed, right_removed)
# Check whether s is valid or not.
def isValid(s):
sum = 0
for c in s:
if c == '(':
sum += 1
elif c == ')':
sum -= 1
if sum < 0:
return False
return sum == 0
def removeInvalidParenthesesHelper(start, left_removed, right_removed):
if left_removed == 0 and right_removed == 0:
tmp = ""
for i, c in enumerate(s):
if i not in removed:
tmp += c
if isValid(tmp):
res.append(tmp)
return
for i in xrange(start, len(s)):
if right_removed == 0 and left_removed > 0 and s[i] == '(':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed - 1, right_removed)
del removed[i]
elif right_removed > 0 and s[i] == ')':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed, right_removed - 1)
del removed[i]
res, removed = [], {}
(left_removed, right_removed) = findMinRemove(s)
removeInvalidParenthesesHelper(0, left_removed, right_removed)
return res
| [
"noreply@github.com"
] | black-shadows.noreply@github.com |
914d596eb2723382f5eb836d111c4fb2786c03eb | ed53cfbfd6cad873a16648cbe9791652bd964f3a | /evaluation.py | 39a37178f7370734f24c2663e2ef95e9f08d6f77 | [] | no_license | marbin2050/tfm_synbio | f82f55284c34fb3ddb0c1e970deef4b0234394e3 | dc60899b611a637b2d2d8eaf668797871df676b2 | refs/heads/master | 2022-12-02T16:54:45.341268 | 2020-07-31T07:46:40 | 2020-07-31T07:46:40 | 260,749,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,707 | py | __author__ = '{Alfonso Aguado Bustillo}'
from sklearn import metrics
from scipy.stats import spearmanr
import numpy as np
from sklearn.metrics import multilabel_confusion_matrix, f1_score, precision_score, recall_score
def evaluate_regressor(partitions, prediction, fitting_time):
mae = metrics.mean_absolute_error(partitions.y_test, prediction)
mse = metrics.mean_squared_error(partitions.y_test, prediction)
rmse = np.sqrt(metrics.mean_squared_error(partitions.y_test, prediction))
spearman = spearmanr(partitions.y_test, prediction)
train_test_size = str(len(partitions.x_train)) + "/" + str(len(partitions.x_test))
results = {'train_test_size': train_test_size,
'mae': mae,
'mse': mse,
'rmse': rmse,
'spearman': spearman,
'time': fitting_time}
return results
def evaluate_classifier(partitions, prediction, fitting_time):
# confusion matrices
results = multilabel_confusion_matrix(partitions.y_test, prediction)
total = prediction.shape[0] # total number of sequences
train_test_size = str(len(partitions.x_train)) + "/" + str(len(partitions.x_test))
results = {'train_test_size': train_test_size,
'1': str(int((results[0][0][0] + results[0][1][1]) / total * 100)) + '%',
'2': str(int((results[1][0][0] + results[1][1][1]) / total * 100)) + '%',
'3': str(int((results[2][0][0] + results[2][1][1]) / total * 100)) + '%',
'4': str(int((results[3][0][0] + results[3][1][1]) / total * 100)) + '%',
'5': str(int((results[4][0][0] + results[4][1][1]) / total * 100)) + '%',
'6': str(int((results[5][0][0] + results[5][1][1]) / total * 100)) + '%',
'7': str(int((results[6][0][0] + results[6][1][1]) / total * 100)) + '%',
'8': str(int((results[7][0][0] + results[7][1][1]) / total * 100)) + '%',
'9': str(int((results[8][0][0] + results[8][1][1]) / total * 100)) + '%',
'10': str(int((results[9][0][0] + results[9][1][1]) / total * 100)) + '%',
'11': str(int((results[10][0][0] + results[10][1][1]) / total * 100)) + '%',
'12': str(int((results[11][0][0] + results[11][1][1]) / total * 100)) + '%',
'13': str(int((results[12][0][0] + results[12][1][1]) / total * 100)) + '%',
'14': str(int((results[13][0][0] + results[13][1][1]) / total * 100)) + '%',
'15': str(int((results[14][0][0] + results[14][1][1]) / total * 100)) + '%',
'16': str(int((results[15][0][0] + results[15][1][1]) / total * 100)) + '%',
'17': str(int((results[16][0][0] + results[16][1][1]) / total * 100)) + '%',
'18': str(int((results[17][0][0] + results[17][1][1]) / total * 100)) + '%',
'19': str(int((results[18][0][0] + results[18][1][1]) / total * 100)) + '%',
'20': str(int((results[19][0][0] + results[19][1][1]) / total * 100)) + '%',
'21': str(int((results[20][0][0] + results[20][1][1]) / total * 100)) + '%',
'22': str(int((results[21][0][0] + results[21][1][1]) / total * 100)) + '%',
'23': str(int((results[22][0][0] + results[22][1][1]) / total * 100)) + '%',
'precision score': str(round(precision_score(partitions.y_test, prediction, average="macro"), 2)),
'recall score': str(round(recall_score(partitions.y_test, prediction, average="macro"), 2)),
'f1 score': str(round(f1_score(partitions.y_test, prediction, average="macro"), 2)),
'time': round(fitting_time)}
return results
| [
"alfonso.aguado.bustillo@gmail.com"
] | alfonso.aguado.bustillo@gmail.com |
078ac216c363101e7b9e584cec70ad92886e456a | fa6e3b896657af844bdbc870b5c9e7f62c91a639 | /plot2--attractor.py | 8f11095c98fe739d32b7cad9f6e66f07c5755e64 | [] | no_license | omersayli/signals | ab6ac56f696caa0db4615f8b3e56049e5d1ecb89 | f4ad6213c23e915e5319588262140ca79e1591e9 | refs/heads/master | 2021-03-21T00:06:42.000935 | 2020-06-23T09:15:24 | 2020-06-23T09:15:24 | 247,245,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 17:29:39 2020
@author: omer
"""
# Inspired by
# https://danielredondo.com/posts/20190210_atractor/
from PyQt5 import QtWidgets
from pyqtgraph import PlotWidget, plot
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
import time
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
import numpy as np
M = 10000000
x = np.zeros(M)
y = np.zeros(M)
x[0], y[0] = 1 , 1
a1= -0.8; a2= 0.4; a3 = -1.1; a4 = 0.5; a5 = -0.6; a6= -0.1; a7 = -0.5;
a8 = 0.8; a9 = 1.0; a10 = -0.3; a11 = -0.6; a12 = -0.3; a13 = -1.2; a14 = -0.3;
t_0= time.time()
print('Computing variables.. may take 5 minutes\n')
# Matrix computation, gain?
for i in range(1, M):
# A = np.array([ [a1, a2, a3, a4, a6, 0, 0],
# [a8, a9, a10, 0, 0, a11, a13]])
# temp_x = np.array([1, x[i-1], y[i-1], np.power(np.abs(x[i-1]),a5) , np.power(np.abs(y[i-1]),a7),
# np.power(np.abs(x[i-1]), a12), np.power(np.abs(y[i-1]), a14)]).reshape(-1,1)
# x[i] , y[i] = A @ temp_x
x[i] = a1 + a2 * x[i-1] + a3 * y[i-1] + a4 * np.power(np.abs(x[i-1]), a5) + a6 * np.power(np.abs(y[i-1]), a7 )
y[i] = a8 + a9 * x[i-1] + a10 * y[i-1] + a11 * np.power(np.abs(x[i-1]), a12) + a13 * np.power(np.abs(y[i-1]), a14)
#x[i] = a1 + a2 * x[i-1] + a3 * y[i-1] + a4 * np.power(np.abs(x[i-1]), a5) + a6 * np.power(np.abs(y[i-1]), a7 )
#y[i] = a8 + a9 * x[i-1] + a10 * y[i-1] + a11 * np.power(np.abs(x[i-1]), a12) + a13 * np.power(np.abs(y[i-1]), a14)
print(f'Variables computed, took {time.time() - t_0} seconds\n')
print('Now plotting, may also take long time ')
mx = np.quantile(x, 0.01)
mX = np.quantile(x, 0.99)
my = np.quantile(y, 0.05)
mY = np.quantile(y, 0.95)
boolean_ = (x>mx) & (x<mX) & (y>my) & (y<mY)
super(MainWindow, self).__init__(*args, **kwargs)
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
self.graphWidget.setBackground('w')
pen = pg.mkPen(color=(255, 255, 255))
self.graphWidget.plot(x[boolean_],y[boolean_], pen=pen, symbol='o',
symbolSize=0.001, symbolBrush=('b'))
self.graphWidget.showGrid(x=False, y=False)
self.graphWidget.getPlotItem().hideAxis('bottom')
self.graphWidget.getPlotItem().hideAxis('left')
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | omersayli.noreply@github.com |
be329e9eedbf5fda027a8a474838442ce78f38ab | 8ba18238dacecd5792c34936f8b4634e600af270 | /data_handling/mover.py | 06a144564330b413dfbb4f5d3e9f91a9c91872db | [] | no_license | jsvay/seisClean | fd040da7762e55e062f9dd80463831f9ce34a446 | 94aca26a7028916bf9fea5242985762e43df37b0 | refs/heads/master | 2020-03-27T21:48:14.821695 | 2018-08-31T13:41:58 | 2018-08-31T13:41:58 | 147,176,756 | 2 | 4 | null | 2018-09-03T08:45:17 | 2018-09-03T08:45:16 | null | UTF-8 | Python | false | false | 828 | py | import os
import shutil
import numpy as np
import random
train = '/s0/SI/train/'
test = '/s0/SI/test/'
valid = '/s0/SI/valid/'
"""
ranger = np.linspace(0, 999, 1000)
ranger = ranger.astype(int)
ranger = ranger.tolist()
random.shuffle(ranger)
for x in range(150):
item = ranger.pop()
shutil.move(train + str(item) + '.npy', valid)
for x in range(50):
item = ranger.pop()
shutil.move(train + str(item) + '.npy', test)
"""
files = os.listdir(train)
y = []
n = []
for file in files:
if file.startswith('n'):
n.append(file)
else:
y.append(file)
random.shuffle(n)
random.shuffle(y)
a = [n, y]
for i in a:
for x in range(150):
item = i.pop()
shutil.move(train + item, valid)
for x in range(50):
item = i.pop()
shutil.move(train + item, test)
| [
"sigmunsl@student.matnat.uio.no"
] | sigmunsl@student.matnat.uio.no |
2a0c7b9841901436c823d4d5e7c6ff16f4b4e7cc | 38f765bc213d2c90e46f22922a7425cba28e6f00 | /fetchr/packages/amplify.py | 6632a0726d9f8c686a9b5f19a4ddb18a79ad98d3 | [] | no_license | yejianye/fetchr | 0d0572dc87beba93c7de3ece625682a4d753626e | 90f8922617d35fcf24d902f21af398009d80ded4 | refs/heads/master | 2021-01-10T02:48:41.737322 | 2013-01-19T09:10:12 | 2013-01-19T09:10:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from fetchr.packages.base import package, SimplePackage
@package
class Amplify(SimplePackage):
"""A set of tools solve problems of request, store, pub/sub"""
version = '1.1.0'
@property
def cdn_urls(self):
return ['//cdnjs.cloudflare.com/ajax/libs/amplifyjs/$version/amplify.min.js']
| [
"yejianye@gmail.com"
] | yejianye@gmail.com |
931156b973ae3e72b7742ef4fa3275c4739ad032 | ab076c98622fd08653a74377b8a2d8e13099c3c5 | /crawlab_mind/constants/list.py | f4b3a38fcab4d1f56568b8a41bafe489454a3680 | [
"BSD-3-Clause"
] | permissive | sumerzhang/crawlab-mind | 93a9fa9ed4b1c761773d3ed8d54d782927792d48 | c04f57c13da82f0b1b37f5bb0f751e1e923e4cbf | refs/heads/master | 2022-12-11T18:12:35.292757 | 2020-09-07T02:31:18 | 2020-09-07T02:31:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | class ListSelectMethod:
Custom = 'custom'
MeanMaxTextLength = 'mean_max_text_length'
MeanTextTagCount = 'mean_text_tag_count'
| [
"tikazyq@163.com"
] | tikazyq@163.com |
466df9883dd77db154963c24ceaca6e3b9317d43 | f77d436de9db3ec3d0b9cba8baf90895409f5b89 | /Term 1/EntropyCalculator/Misc.py | 39b727fe73f06496e26f612dae1e89355a733f6a | [] | no_license | markusmeyerhofer/SelfDrivingCarND | ecea9f13b018db0df8a718a6c0a8f52818ba04ea | 9421fc46b2a05fb9ca5c7aebd18ca4101d019d4c | refs/heads/master | 2020-07-04T06:10:21.617199 | 2019-08-13T16:17:43 | 2019-08-13T16:17:43 | 202,181,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,624 | py | def combine_and_normalize(self):
if len(self.car_features) > 0:
self.y = np.hstack((np.ones(len(self.car_features)), np.zeros(len(self.non_car_features))))
# Create an array stack of feature vectors
print
self.X = np.vstack((self.car_features, self.non_car_features)).astype(np.float64)
# Fit a per-column scaler
self.X_scaler = StandardScaler().fit(self.X)
# Apply the scaler to X
self.scaled_X = self.X_scaler.transform(self.X)
return
def save_classifier(self):
# save the classifier
# joblib.dump(self.SVC, 'SVC-Classifier.pkl')
print("Saving Classifier...")
svc_bin = {"svc": self.SVC, "scaler": self.X_scaler}
pickle.dump(svc_bin, open("svc_pickle.p", "wb"))
print("Classifier Saved.")
def load_classifier(self):
if os.path.isfile('svc_pickle.p'):
print("Loading Classifier...")
# load it again
# self.SVC = joblib.load('SVC-Classifier.pkl')
dist_pickle = pickle.load(open("svc_pickle.p", "rb"))
self.SVC = dist_pickle["svc"]
self.X_scaler = dist_pickle["scaler"]
print("Classifier Loaded.")
else:
print("No preprocessed classifier available yet. Preprocessing ...")
self.preprocess()
print("Preprocessing completed.")
def train(self):
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(self.scaled_X, self.y, test_size=0.1, random_state=rand_state)
print('Using:', 32, 'orientations', 32, 'pixels per cell and', 2, 'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
self.SVC = LinearSVC()
# Check the training time for the SVC
import time
t = time.time()
self.SVC.fit(X_train, y_train)
t2 = time.time()
self.save_classifier()
print(round(t2 - t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(self.SVC.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t = time.time()
n_predict = 100
print('My SVC predicts: ', self.SVC.predict(X_test[0:n_predict]))
print('For these', n_predict, 'labels: ', y_test[0:n_predict])
t2 = time.time()
print(round(t2 - t, 5), 'Seconds to predict', n_predict, 'labels with SVC')
def preprocess(self):
self.car_features = self.extract_features(self.car_images)
self.non_car_features = self.extract_features(self.non_car_images)
self.combine_and_normalize()
self.train()
def predict_image(self, img):
if self.X_scaler is not None:
img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_NEAREST)
features = self.extract_image_features(img)
features = self.X_scaler.transform(np.array(features).reshape(1, -1))
return self.SVC.predict(features)
else:
print("No SVC or X_scaler loaded/trained yet.")
def convert_color(self, img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
def print_bboxes(self, img, bboxes, print=False):
for box in bboxes:
cv2.rectangle(img, (box[0][0], box[0][1]), (box[1][0], box[1][1]), (0, 0, 255), 6)
if print == True:
plt.figure(figsize=(30, 10))
plt.subplot(1, 2, 1)
plt.hold(True)
plt.imshow(img)
def find_cars(self, img, scale=1.5):
bboxes = []
draw_img = np.copy(img)
# img = img.astype(np.float32) / 255
img_tosearch = img[self.ystart:self.ystop, :, :]
ctrans_tosearch = self.convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
ch1 = ctrans_tosearch[:, :, 0]
ch2 = ctrans_tosearch[:, :, 1]
ch3 = ctrans_tosearch[:, :, 2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // self.pix_per_cell) - 1
nyblocks = (ch1.shape[0] // self.pix_per_cell) - 1
nfeat_per_block = self.orient * self.cell_per_block ** 2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // self.pix_per_cell) - 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = self.get_hog_features(ch1, feature_vec=False)
hog2 = self.get_hog_features(ch2, feature_vec=False)
hog3 = self.get_hog_features(ch3, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos * self.pix_per_cell
ytop = ypos * self.pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))
# Get color features
spatial_features = self.bin_spatial(subimg)
hist_features = self.color_hist(subimg)
# Scale features and make a prediction
test_features = self.X_scaler.transform(
np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
# test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = self.SVC.predict(test_features)
box = []
if test_prediction == 1:
xbox_left = np.int(xleft * scale)
ytop_draw = np.int(ytop * scale)
win_draw = np.int(window * scale)
box = ((xbox_left, ytop_draw + self.ystart), (xbox_left + win_draw, ytop_draw + win_draw + self.ystart))
bboxes.append(box)
self.print_bboxes(draw_img, bboxes)
return bboxes | [
"markus.meyerhofer@me.com"
] | markus.meyerhofer@me.com |
aed331d4d6ee44d53a4aaabf482cb87ccd2a77f6 | 376e365edc3953187329ce0a11a92a056a3cb897 | /FUCTION.py | 0c17f8df4d441c3087188d1ff22b36a2fe016d0c | [] | no_license | cordoba14/FOR | 6578465f252553733e869ef073c9faa04a8579eb | c55328fcdce8854884056d67f9b0bbc3e11dd8b0 | refs/heads/master | 2021-02-17T22:43:56.096789 | 2020-03-09T14:42:27 | 2020-03-09T14:42:27 | 245,133,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # def myfunc():
# print(" python is" + x)
# myfunc()
# x = " awesome"
# def myfunc():
# x = "fantastic"
# print(" python is " + x)
# def greeting()
# print("hello, are you")
# print("my name is bogan")
# print(" its is my pleasure to be working with you")
# greeting()
# print ("what if i want to greet someone else?")
# greeting()
#def greeting(name, sex):
# if sex == "m":
# prefix = "Mr."
# elif sex == "f":
#prefix = "Mrs."
#else:
# prefix = ""
#print("hello {} {}, are you".format(prefix, name))
#print("my name is bogan")
#print(" its is my pleasure to be working with you")
#Person = input("who do you want to greet")
#prefix = input("sex")
#greeting(Person, prefix)
#print("what if i want to greet someone else?")
#greeting(Person, prefix)
fp = open ( "ols")
list = []
fp.contents = fp.read()
line = " "
punct = ",- , ."
for p in fp.contents:
if p not in punct:
line = line + p
line = line.split()
for x in line:
if x not in list:
list.append(x)
print(" Harry poter has ", len (list), " unique word")
#fp2.write(line.replace(p, " "))
#call the split method on line
#if x in list:
| [
"armandocordoba14@gmail.com"
] | armandocordoba14@gmail.com |
7ec50b205ebdd21727ac18bdc72f99240215aed0 | b8d08e940767d6e525c58da28a22c020e3ff29ce | /app.py | 5d85da3b22b533a438fe642b7b83278b4c232990 | [] | no_license | jakewmayfield/sqlalchemy_challenge | e5ba171771b9e5f0da5461b827d9f0f21b2d9f09 | 82c16ae234ebbec0bee85a0bdd9aac22e94055a6 | refs/heads/main | 2023-08-04T17:37:52.351380 | 2021-09-29T02:02:36 | 2021-09-29T02:02:36 | 410,353,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,639 | py | import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all routes available."""
return (
f"Available Routes: <br/>"
f"Precipitation: /api/v1.0/precipitation<br/>"
f"List of Stations: /api/v1.0/stations<br/>"
f"Temperature for one year: /api/v1.0/tobs<br/>"
f"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>"
f"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd"
)
@app.route('/api/v1.0/precipitation')
def precipitation():
session = Session(engine)
sel = [Measurement.date,Measurement.prcp]
query_result = session.query(*sel).all()
session.close()
precipitation = []
for date, prcp in query_result:
prcp_dict = {}
prcp_dict["Date"] = date
prcp_dict["Precipitation"] = prcp
precipitation.append(prcp_dict)
return jsonify(precipitation)
@app.route('/api/v1.0/stations')
def stations():
session = Session(engine)
sel = [Station.station,Station.name,Station.latitude,Station.longitude,Station.elevation]
query_result = session.query(*sel).all()
session.close()
stations = []
for station,name,lat,lon,el in query_result:
station_dict = {}
station_dict["Station"] = station
station_dict["Name"] = name
stations.append(station_dict)
return jsonify(stations)
@app.route('/api/v1.0/tobs')
def tobs():
session = Session(engine)
recent_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]
most_recent_date = dt.datetime.strptime(recent_date, '%Y-%m-%d')
query_date = dt.date(most_recent_date.year -1, most_recent_date.month, most_recent_date.day)
sel = [Measurement.date,Measurement.tobs]
query_result = session.query(*sel).filter(Measurement.date >= query_date).all()
session.close()
tobs_all = []
for date, tobs in query_result:
tobs_dict = {}
tobs_dict["Date"] = date
tobs_dict["Tobs"] = tobs
tobs_all.append(tobs_dict)
return jsonify(tobs_all)
@app.route('/api/v1.0/<start>')
def get_start(start):
session = Session(engine)
query_result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
session.close()
tobs_all = []
for min,avg,max in query_result:
tobs_dict = {}
tobs_dict["Min"] = min
tobs_dict["Average"] = avg
tobs_dict["Max"] = max
tobs_all.append(tobs_dict)
return jsonify(tobs_all)
@app.route('/api/v1.0/<start>/<stop>')
def get_start_stop(start,stop):
session = Session(engine)
query_result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= stop).all()
session.close()
tobs_all = []
for min,avg,max in query_result:
tobs_dict = {}
tobs_dict["Min"] = min
tobs_dict["Average"] = avg
tobs_dict["Max"] = max
tobs_all.append(tobs_dict)
return jsonify(tobs_all)
if __name__ == '__main__':
app.run(debug=True) | [
"jakewmayfield@gmail.com"
] | jakewmayfield@gmail.com |
0679de2a277237837b78807b1a26166544904efd | e33c2de2a932ed44302eac005dcdfb7aec751c2d | /Lesson3/diffequ18.py | 13d53cc7ee2f6a83a24b4fac80b29f4622497e6e | [] | no_license | thuythai/Udacity-Diff-Equ-in-Action | 8126144de57d460ed019ec0663747f1fb000b763 | a4fb758f5d09ae6c0070aa409d5dacf64870c952 | refs/heads/master | 2021-09-03T15:25:49.209998 | 2018-01-10T04:57:51 | 2018-01-10T04:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | # Udacity problem (from Problem Set 3)
# PROBLEM 1
#
# Include waning immunity in the SIR model, so
# that people can eventually become susceptible
# again. Define the time constant waning_time
# such that eventually there are twice as many
# recovered people as there are infected people.
import numpy
import matplotlib.pyplot
h = 0.5 # days
end_time = 60. # days
num_steps = int(end_time / h)
times = h * numpy.array(range(num_steps + 1))
def waning():
transmission_coeff = 5e-9 # 1 / day person
infectious_time = 5. # days
waning_time = 2 * infectious_time # days
s = numpy.zeros(num_steps + 1)
i = numpy.zeros(num_steps + 1)
r = numpy.zeros(num_steps + 1)
s[0] = 1e8 - 1e6 - 1e5
i[0] = 1e5
r[0] = 1e6
for step in range(num_steps):
s2i = h * transmission_coeff * s[step] * i[step]
i2r = h / infectious_time * i[step]
r2s = h / waning_time * r[step]
s[step + 1] = s[step] - s2i + r2s
i[step + 1] = i[step] + s2i - i2r
r[step + 1] = r[step] + i2r - r2s
return s, i, r
s, i, r = waning()
def plot_me():
s_plot = matplotlib.pyplot.plot(times, s, label='S')
i_plot = matplotlib.pyplot.plot(times, i, label='I')
r_plot = matplotlib.pyplot.plot(times, r, label='R')
matplotlib.pyplot.legend(('S', 'I', 'R'), loc='upper right')
axes = matplotlib.pyplot.gca()
axes.set_xlabel('Time in days')
axes.set_ylabel('Number of persons')
matplotlib.pyplot.xlim(xmin=0.)
matplotlib.pyplot.ylim(ymin=0.)
matplotlib.pyplot.show()
plot_me()
| [
"noreply@github.com"
] | thuythai.noreply@github.com |
e96a7477cee560e1e780cdf761cd950154211ed8 | 4911ef1024c44f9695203f26588ed67d1a96a87e | /joystick.py | 3cfee64ae5e0e058c9eede475c4207e7caaac39e | [] | no_license | jorgenmiller/SenseHat | 9342fcdff719a9f5502cf5cd3259579665d1638e | 187ce095fdfaf372c6b62264869795a46804c64c | refs/heads/master | 2020-04-17T12:28:30.915815 | 2019-06-07T21:37:35 | 2019-06-07T21:37:35 | 166,580,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from sense_hat import SenseHat
import time
sense = SenseHat()
sense.clear() #clear LED matrix
while True:
print(sense.stick.wait_for_event()) #print each event triggered
| [
"jorgen.j.miller@gmail.com"
] | jorgen.j.miller@gmail.com |
afbcbf4ec0e6b37b4933d77a4afd25197f780bb3 | c80a0452d890dd6248df5dc7053d806f2d0f2a0c | /py/problem_crowler.py | 30ce3d9fb047c8725f74934566b2c9293d2cb405 | [] | no_license | emetko/project-euler | 78db5bd4cc634b3186352d85e5a1facbc1dbe062 | aa89f626aa2c4f39b92aea6863f53a75cfff78b6 | refs/heads/master | 2021-01-21T17:45:53.769449 | 2014-06-05T15:17:52 | 2014-06-05T15:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python3
from urllib import urlopen
from bs4 import BeautifulSoup
from string import Template
def get_problem_info(num):
url = 'http://projecteuler.net/problem='
soup = BeautifulSoup(urlopen(url + str(num)).read())
p = soup.find(id='content')
title = p.h2.text
description = p.find(class_='problem_content').text.strip('\r\n')
return {
'title': title,
'description': description,
'problem_num': num
}
def gen_problem(num):
info = get_problem_info(num)
print info
t = Template(open('file_template.py').read())
f = t.safe_substitute(info)
filename = "euler_{0:03d}.py".format(num)
w = open(filename, 'w')
w.write(f.encode('UTF8'))
for i in range(77,81):
gen_problem(i)
| [
"emetko@gmail.com"
] | emetko@gmail.com |
9651a4fc490d15703e32a4df5fb8fa9be4af3ba9 | 09eee2435b4a8b2d8b8acf9e6aeb13285778cbb4 | /script/part2_test1/Junacheng_Part2_Test1.py | db6caead7dc9cfa047dd5c7d7b863b6eebc3e36f | [] | no_license | wareenpower/AutoCheck | 85046a7b09fcf4f5c208c64bcee9d0035437828a | 8774208b4e51a279674c1a1a1d25cd1a843dbc90 | refs/heads/master | 2020-04-06T13:32:32.469483 | 2019-02-25T07:23:15 | 2019-02-25T07:23:15 | 157,504,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from collections import Counter
import re
class BStringObjectTest:
def __init__(self):
self.__doc__ = "This is the answer for StringObjectTest in 2018-11-01 !"
self.__author = "Junacheng"
@staticmethod
def compute_frequency(str_input):
"""
Given a str str_input,return the number of distinct character it contains (space not included) and frequency of each character !
:param str str_input: str object to test
:return: the number of distinct character it contains (space not included) and frequency of each character !
Example
-------
>>> #1. one str that contains several space
# >>> StringObjectTest().compute_frequency("wo are you")
# (7, {'o': 2, 'a': 1, 'e': 1, 'r': 1, 'u': 1, 'w': 1, 'y': 1})
>>> #2. one str that contains no space
# >>> StringObjectTest().compute_frequency("you are my super star!")
# (10, {'r': 3, 'a': 2, 'e': 2, 's': 2, 'u': 2, 'y': 2, 'm': 1, 'o': 1, 'p': 1, 't': 1})
"""
if not isinstance(str_input, str):
pass
final_str = re.sub(r"[\s+.;!/,$&%^*()<>\[\]\"\'?@|:`~{}#]+|[—!\\,。=?、:“”‘’¥…()《》【】]", "", str_input)
map_character_frequency = Counter(final_str)
return len(map_character_frequency), dict(map_character_frequency)
# if __name__ == "__main__":
print BStringObjectTest.compute_frequency("We are fasdasdfasdfadf2q341234adf,./,/ipmissy1122—_.") | [
"33952428+wareenpower@users.noreply.github.com"
] | 33952428+wareenpower@users.noreply.github.com |
139b52a654e7e288a4b0a4ebaec109926cb274a6 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.cloudwatch.EventPermission.organization-access-python/__main__.py | 96a5858ff755253665d97a47249d9fe835f082f2 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import pulumi
import pulumi_aws as aws
organization_access = aws.cloudwatch.EventPermission("organizationAccess",
condition={
"key": "aws:PrincipalOrgID",
"type": "StringEquals",
"value": aws_organizations_organization["example"]["id"],
},
principal="*",
statement_id="OrganizationAccess")
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
6d3d6d1292a7c1f4e7e3f6355ceaf2c1855422f4 | 928d4d6c09a9d9422e69d072f081f4182cc41a1c | /notebooks/mul.py | ef423387c5da8018aaae608b9ff00cdae61c4224 | [] | no_license | Ivandanilovich/opendemo | 679822aaceb10ff91ccc3b4201b9438b861eae91 | 272d10997973360611a0e24ae83fe6fd9b9041c4 | refs/heads/master | 2022-12-30T08:19:12.179985 | 2020-10-12T03:24:41 | 2020-10-12T03:24:41 | 300,537,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | import os
from multiprocessing import Pool
from alrosademo.ImageProcessor import ImageProcessor
from alrosademo.LandmarkDetector import LandmarkDetector
from alrosademo.SSDDetector import SSDDetector, SSDBox
from alrosademo.VideoProcessor import VideoProcessor
import time
if __name__ == '__main__':
frames = VideoProcessor.split_videoget_frames('f781d7cc10fc41a7a24d5dcfb38e1d07')
len(frames)
start = time.time()
imageProcessor = ImageProcessor()
ssd = SSDDetector('../models/palm_detection_builtin.tflite')
landmark = LandmarkDetector('../models/hand_landmark.tflite')
for image in frames:
original_image, padded_image, norm_image, pad = imageProcessor.preprocess_image(image)
stored_box = []
stored_keys = []
stored_handness = []
stored_handflag = []
for box in ssd.predict(norm_image):
stored_box.append(box)
ssdbox = SSDBox(box, pad, padded_image.shape)
k = ssdbox.det
angle = ssdbox.calc_angle()
rotated_image = imageProcessor.rotate_image(
original_image,
angle,
k['center'].copy())
keys, handness, handflag = landmark.predict(rotated_image, ssdbox)
stored_keys.append(keys)
stored_handness.append(handness)
stored_handflag.append(handflag)
end = time.time()
print(end - start)
| [
"ivandanilovich99@gmail.com"
] | ivandanilovich99@gmail.com |
7c475267b567ec4ef4bf1a2b5a74d63149e855eb | ae373712c4091dce7e00617b453728960b88f300 | /cryptodetector/methods/hello_world/hello_world_method.py | 438d8e02363ed84727c2f29af5eb901cfd704818 | [
"MIT"
] | permissive | mahmad2504/nscriptwallet-old | ddbfe66ad166667ac74f5960ed0b50d97da0c117 | 50968fddb703751d5cff35bfa40ef4543c045b03 | refs/heads/master | 2023-06-08T18:02:11.719274 | 2021-06-09T15:51:58 | 2021-06-09T15:51:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | """
Copyright (c) 2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
"""
import re
from cryptodetector import Method
class HelloWorldScanner(Method):
"""Hello, World template method
"""
method_id = "hello_world"
# options={
# "example_value": 123,
# "example_array": [],
# "example_boolean": False
# }
# These help messages will be printed when the user brings up the help guide with -h
# options_help = {
# "example_value": "This is an example help message describing example_value",
# "example_array": "This is an example help message describing example_array",
# "example_boolean": "This is an example help message describing example_boolean"
# }
def supports_scanning_file(self, language):
"""This method supports scanning all text files
Args:
language: (string) see langauges.py
Returns:
(bool) whether it supports scanning a file in the given language
"""
return language.is_text
def quick_search(self, content, language):
"""Quickly search the content for the string "Hello, World"
Args:
content: (string) the file content in which to search
language: (string) see langauges.py
Returns:
(bool) whether it found a match anywhere in the content
"""
return "Hello, World" in content
def search(self, content, language):
"""Search all occurances of the string "Hello, World"
Args:
content: (string) the file content in which to search
language: (string) see langauges.py
Returns:
(list) list of matches. A match is a dict object containing the output fields
"""
result = []
for match in re.finditer("Hello, World", content):
result.append({"evidence_type": "generic", \
"matched_text": "Hello, World", \
"file_index_begin": match.start(), \
"file_index_end": match.end()})
return result
| [
"mumtazahmad2504@gmail.com"
] | mumtazahmad2504@gmail.com |
74361fda2476e8a8ecc212047504d3bb4c0708d7 | 245c98f9e117fe912b1d115f3b5607068010eced | /stardate/migrations/0005_auto_20160109_1343.py | aa96388d39b25b8c03f0c9132493089dfc566c6d | [] | permissive | blturner/django-stardate | 1d4e318fbb3d19245731dc84b457466ad78d7c3b | d3edf99aafd2233a13cd3b86bae3628803eb64ff | refs/heads/master | 2021-11-30T12:44:42.422125 | 2021-03-07T00:03:51 | 2021-03-07T00:03:51 | 10,006,257 | 0 | 0 | BSD-3-Clause | 2021-11-05T00:02:36 | 2013-05-11T22:39:27 | Python | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-09 21:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stardate', '0004_auto_20160109_1321'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(),
),
migrations.AlterUniqueTogether(
name='post',
unique_together=set([('blog', 'slug')]),
),
]
| [
"benturn@gmail.com"
] | benturn@gmail.com |
f65a438757bcaf65aa57641968314c0051f60a58 | 3c0832e0608deed84e03c1bf6982b5f6d826b8fd | /Codeforces/CF768-D2-A/Oath_of_the_Nights_Watch.py | 9b9454faf2757d4ba86184da3c91271030fd88ce | [] | no_license | HaoES/ProblemSolving | d7f179a5526de9df14b6ea832f03987f57c77ea2 | fa18864e4e28559239b6f803637298f41308dbf4 | refs/heads/main | 2023-03-07T00:04:42.641375 | 2021-02-22T11:03:01 | 2021-02-22T11:03:01 | 323,340,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/env python3
import sys
input = sys.stdin.readline
############ ---- Input Functions, coutery of 'thekushalghosh' ---- ############
def inp():
return(int(input()))
def inlt():
return(list(map(int, input().split())))
def insr():
s = input()
return(list(s[:len(s) - 1]))
def invr():
return(map(int, input().split()))
if __name__ == "__main__":
n = inp()
stewards = inlt()
count = 0
maximum = max(stewards)
minimum = min(stewards)
for steward in stewards:
if steward < maximum and steward > minimum:
count += 1
print(count)
| [
"hamza.essamaali@gmail.com"
] | hamza.essamaali@gmail.com |
0413dd82bd1d093951ad7e81be7ed7194ca76528 | c5f926f853eaca8d42d8cb7405426698f40e3bb9 | /MySineWave.py | aa14eb92e8ab577142855e327628e08d0d12f6b2 | [] | no_license | Astronomer-Mohit/firstrepo | 538bd11b0cf813617bbeb8d003e1aa30e8d6452d | 786c6ebf54a2ed3d0c283992d491bedcb3d008c0 | refs/heads/master | 2021-06-03T18:33:38.455137 | 2016-08-12T16:27:18 | 2016-08-12T16:27:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #pi = 3.14159
def MySineWave(x):
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(-np.pi*2, np.pi*2, 0.1)
plt.plot(t, np.cos(2*np.pi*(t/x)))
plt.show()
| [
"f2012651@pilani.bits-pilani.ac.in"
] | f2012651@pilani.bits-pilani.ac.in |
43debcbdd0da1d6b371925257656e247f95fd2b8 | 028b24c8a3772c788819e2cd820fee73cade681a | /GameClient/Framework/BuildExcel/Windows/py3-Tools/BuildConfig.py | 570d51fd1ca4f7b561b6446ad8d6608e0d0c2723 | [] | no_license | zyxyuanxiao/GameCompanyTechnicalCode | 382cba24523cdbfe09932f540bca0a1ca2dd7ab6 | eec60e4941dd15adf4948af44bd423d65239aafa | refs/heads/main | 2023-02-25T08:25:00.402931 | 2021-01-29T13:52:26 | 2021-01-29T13:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,786 | py | from ExcelParser import Sheet
from ExcelParser.Env import ParsingEnv, ConfigMgr
import traceback
'''
TODO:目前有一些自动生成的文件是废弃的()
'''
import csv
import os
import copy
import subprocess
from Misc.Util import *
from tkinter import messagebox
class BuildConfig:
def __init__(self, absolutePath, region, CSharpLuaFlag, isBuildAll):
self.region = region
self.CSharpLuaFlag = str(CSharpLuaFlag).split(",") # 是否打 Lua 或者 CS
self.absolutePath = absolutePath
self.isBuildAll = isBuildAll
# 遍历当前文件夹,并取出所有的 csv 文件
all_files = list(set(GetDirFiles(absolutePath)))
all_csv_files = [x for x in all_files if x.split('.')[-1] == 'csv']
self.absolute_paths = []
for csv_file_path in all_csv_files:
if "$" in csv_file_path:
continue
if "~" in csv_file_path:
continue
if "@" in csv_file_path:
continue
if "#" in csv_file_path:
continue
# 记录所有的csv路径 self.absolute_paths
self.absolute_paths.append(csv_file_path)
def Build(self):
if self.isBuildAll:
path = os.path.dirname(os.getcwd()) + "/Assets/lua/protobuf_conf_parser/config.lua"
with open(path, "w+", encoding="utf8") as lua_file:
pass
path = os.path.dirname(
os.getcwd()) + "/Assets/Extensions/Protobuf/protobuf_config/pbconf_res/gdata_bytes_list.bytes"
with open(path, 'w+') as bytes_list_file:
pass
for path in self.absolute_paths:
absolute_path = path.replace('\\', '/')
absolute_path = absolute_path.replace('//', '/')
config_name = absolute_path.split('/')[-1].split('.')[0]
# 如果是翻译表变化了,只能通过强制打表打,潜规则
if ParsingEnv.GetLocalization().HasLocaliztionInfo(config_name) \
or ParsingEnv.GetLocalization_value().HasLocaliztionInfo(config_name):
ParsingEnv.GetLocalization().PrepareForParseConfig(config_name, self.region)
try:
self.__DoBuild(config_name, path)
print("打表:{} 成功!".format(config_name))
except ValueError as Argument:
print("打表:{} 失败!, 错误为: {}".format(config_name, Argument))
print(traceback.format_exc())
messagebox.showerror(config_name,
"如果修复不好,请找程序处理:\n" + "打表:{} 失败!, 错误为: {}\n".format(config_name,
Argument) + "请先检查数据是否为空\n" + str(
traceback.format_exc()))
raise
except Exception as e:
if str(e) == 'Server Is Need':
# 如果报错为不需要打表,则跳出打表
print("服务器使用,客户端无需打这张表:" + config_name)
pass
else:
print("打表:{} 失败!, 错误为: {}".format(config_name, e))
print(traceback.format_exc())
messagebox.showerror(config_name,
"如果修复不好,请找程序处理:\n" + "打表:{} 失败!, 错误为: {}".format(config_name, e) + str(
traceback.format_exc()))
raise
finally:
pass
def __DoBuild(self, config_name, full_path):
sheet = Sheet.Sheet()
sheet.Init(config_name, full_path)
# print(self.CSharpLuaFlag, config_name, full_path)
#有服务器使用的数据
strLine = str(sheet.first_line)
#有客户端使用的数据
if "C" in strLine:
for flag in self.CSharpLuaFlag:
if flag == "Lua":
self.__Build_lua(sheet)
if flag == "CSharp":
self.__Build_cs(sheet)
# 不管怎样,打 lua或者 打 cs 都会打二进制文件
self.__Build_Client_Binary(sheet)
# 不管怎样,都会打这个gdata_bytes_list
self.__Build_GenBytesList(sheet)
if "S" in strLine:
# 不管怎样,都会打服务器的proto文件
self.__Build_Server_Proto(sheet)
# 不管怎样,都会打服务器的二进制文件
self.__Build_Server_Bin(sheet)
def __Build_cs(self, sheet):
path = os.path.dirname(os.getcwd()) + "/Assets/Extensions/Protobuf/protobuf_config/ProtoGen/"
file_path = path + sheet.config_name.lower() + ".cs"
# 如果不存在则创建
if not os.path.exists(file_path):
with open(file_path, 'w+') as csharp_file:
pass
with open(file_path, 'r+', encoding='utf-8') as csharp_file:
str_new = sheet.GenCSharpStr()
str_old = csharp_file.read()
if str_new != str_old:
try:
csharp_file.seek(0)
csharp_file.truncate()
csharp_file.write(sheet.GenCSharpStr())
except:
csharp_file.close()
os.remove(file_path)
raise
def __Build_lua(self, sheet):
path = os.path.dirname(os.getcwd()) + "/Assets/lua/protobuf_conf_parser/config.lua"
with open(path, "r+", encoding="utf8") as lua_file:
delimiter = "---------" + sheet.config_name.lower() + "---------"
allContent = lua_file.read()
if allContent.find(delimiter) > 0:
allListContent = str(allContent).split(delimiter)
# 旧文件里面的数据和当前打出的数据不一致时,重新给其赋值
if allListContent[1] != sheet.GenLuaStr().strip():
allListContent[1] = delimiter + "\n" + sheet.GenLuaStr().strip() + "\n" + delimiter + "\n" + "\n"
allContent = "".join(allListContent)
allContent = allContent.replace("\n\n\n\n", "\n\n")
lua_file.seek(0)
lua_file.truncate()
lua_file.write(allContent)
else:
# 没有找到当前打表的 lua 数据,就需要向里面写入
lua_file.write(delimiter + "\n")
lua_file.write(sheet.GenLuaStr().strip() + "\n")
lua_file.write(delimiter + "\n" + "\n")
def __Build_Client_Binary(self, sheet):
path = os.path.dirname(os.getcwd()) + "/Assets/Extensions/Protobuf/protobuf_config/pbconf_res/"
file_path = path + 'dataconfig_{}.bytes'.format(sheet.config_name.lower())
with open(file_path, 'wb+', ) as bin_file:
try:
sheet.Marshal(bin_file, self.region)
except:
bin_file.close()
os.remove(file_path)
raise
def __Build_GenBytesList(self, sheet):
path = os.path.dirname(
os.getcwd()) + "/Assets/Extensions/Protobuf/protobuf_config/pbconf_res/gdata_bytes_list.bytes"
name = 'dataconfig_{}.bytes'.format(sheet.config_name.lower())
with open(path, 'r+') as bytes_list_file:
str_bytes = bytes_list_file.read()
if "False" not in str_bytes:
str_bytes = "False" + "\n" + str_bytes
if name not in str_bytes:
str_bytes = str_bytes + "\n" + name
bytes_list_file.seek(0)
bytes_list_file.truncate()
bytes_list_file.write(str_bytes)
# 打服务器的proto文件
def __Build_Server_Proto(self, sheet):
path = os.path.dirname(os.getcwd()) + "/BuildDataConfig/Data/Server/server_proto/"
file_path = path + 'dataconfig_{}.proto'.format(sheet.config_name.lower())
with open(file_path, 'w+', encoding='utf-8') as proto_file:
proto_file.write(sheet.GenProto())
# 打服务器的二进制文件
def __Build_Server_Bin(self, sheet):
path = os.path.dirname(os.getcwd()) + "/BuildDataConfig/Data/Server/server_bytes/" + self.region + "/"
bin_file_path = path + 'dataconfig_{}.bytes'.format(sheet.config_name.lower())
with open(bin_file_path, 'wb+', ) as bin_file:
try:
sheet.MarshalPB(bin_file, self.region)
except:
bin_file.close()
os.remove(bin_file_path)
raise | [
"1487842110@qq.cin"
] | 1487842110@qq.cin |
1fc2f47ad8904ffc5836d4fc5bec9a5ec58ff618 | be9cf12ec824b3cb9f57446adde579bb6f5ac43a | /todoList/urls.py | dc36902d49a619b706298095eb85e3a2248fecd8 | [] | no_license | qwslmq/TodoList | c84b16558076bf615b886eee7311b970bc0eb2c9 | 493410d3c769e519fa521bea9f166c160a6e853e | refs/heads/master | 2020-03-14T16:14:45.429355 | 2018-07-18T02:48:11 | 2018-07-18T02:48:11 | 131,694,234 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""todoList URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from todoApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^index/', views.index),
url(r'^login/', views.login),
url(r'^register/', views.register),
url(r'^logout/', views.logout),
url(r'^todoItems/',views.todoItems)
]
| [
"qwslmq@126.com"
] | qwslmq@126.com |
9c2d9a898db1b9765259f287859f7910b04c5de5 | c2092dbf89e74e1484f0468d21badfda2eafe19d | /backend/users/migrations/0002_auto_20201113_0020.py | 2383bc55cf5cafb959552769c381e49d82d28c70 | [] | no_license | crowdbotics-apps/jd-searcher-22572 | 1a47422d2c4f393562fc4389422b0906fed594b8 | a194536595837400bf0e1fe0c1be5bbd262ff6b2 | refs/heads/master | 2023-01-06T21:14:55.102493 | 2020-11-13T00:20:57 | 2020-11-13T00:20:57 | 312,428,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Generated by Django 2.2.17 on 2020-11-13 00:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
41d7d2427377bac87ea25552fed71f5f62f8a20a | ca2a722448cef975a7b5aa4aa6c952c75aaffb68 | /python_sokusyu/modules1.py | f9698db28b27deb1074afdac3280df1f015c7167 | [] | no_license | ChibaYuki347/machin_learning | 53552a26079dac8668edaf6eb735f9ee98f559fa | 507e67227165a90799b0148d99042770921e04e6 | refs/heads/master | 2020-07-14T16:43:25.916712 | 2019-08-31T01:45:12 | 2019-08-31T01:45:12 | 205,355,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | # modules1.py
def mul(a,b):
return a * b
def div(a,b):
return a // b | [
"y.chiba208@gmail.com"
] | y.chiba208@gmail.com |
7ffac13b474c6809ed7bf4cc8c0cea2d8bc3ac77 | 6da62d86cfcc4552655d4eeb33ec0453743e2c9f | /cut_picture/ImageProcess.py | c5b0379a2130f0199773b774f92c2638a33638b7 | [] | no_license | FrankSun96/FYP_GANs | 826fa0460674501a3d148d4402d48653add2d7a6 | 6ea3ca70247ed5c4389ecec8300853fe1c571714 | refs/heads/master | 2020-04-18T02:37:07.970317 | 2019-04-01T13:04:07 | 2019-04-01T13:04:07 | 167,169,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py |
# coding: utf-8
# In[ ]:
import os
import sys
from PIL import Image
from ImageCut import Graphics
def list_img_file(directory):
"""列出目录下所有文件,并筛选出图片文件列表返回"""
old_list = os.listdir(directory)
# print old_list
new_list = []
for filename in old_list:
name, fileformat = filename.split(".")
if fileformat.lower() == "jpg" or fileformat.lower() == "png" or fileformat.lower() == "gif":
new_list.append(filename)
# print new_list
return new_list
def make_directory(directory):
"""创建目录"""
os.makedirs(directory)
def directory_exists(directory):
"""判断目录是否存在"""
if os.path.exists(directory):
return True
else:
return False
def cut_photo():
"""裁剪算法
参数:None
------
调用Graphics类中的裁剪算法,将src_dir目录下的文件进行裁剪(裁剪成正方形)
"""
src_dir = os.getcwd() + "/photos/"
dst_dir = os.getcwd() + "/cropped_photos/"
if directory_exists(src_dir):
if not directory_exists(src_dir):
make_directory(src_dir)
# business logic
file_list = list_img_file(src_dir)
# print file_list
if file_list:
# print_help()
for infile in file_list:
img = Image.open(src_dir + infile)
#Graphics(infile=src_dir + infile, outfile=src_dir + infile).cut_by_ratio() #原地替换
Graphics(infile=src_dir + infile, outfile=dst_dir + infile).cut_by_ratio() #图片转储
else:
pass
else:
print("source directory not exist!")
if __name__ == "__main__":
cut_photo()
| [
"franksun1996@gmail.com"
] | franksun1996@gmail.com |
3ec6fd0605e0331c6507d84973d381ea376a8eeb | 6b27bc09fc97a4d413cf5073fa724b9821301463 | /Odd_or_Even.py | 711ce8fd60dedacba5786b444c797ef26ce6cd26 | [] | no_license | jusKutz/ControlFlow_and_LogicalOperators | c7e8993479f3d0fc741ba7e4fa880333caf8a71d | dc396e1c20e1b174c0c82dc211b576997ce69b55 | refs/heads/master | 2023-02-09T02:16:48.052386 | 2021-01-05T14:24:44 | 2021-01-05T14:24:44 | 326,247,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # 🚨 Don't change the code below 👇
number = int(input("Which number do you want to check? "))
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
result = number % 2
if result == 0 :
print ("This is an even number.")
else:
print ("This is an odd number.")
| [
"heartsniper0403@gmail.com"
] | heartsniper0403@gmail.com |
e0360292e7da702547ad0ee01da3fe76921d065b | 28dff6f1707ebb62213d89fae42ae087add5ba9b | /Converters.py | 52c5047c74148c816adbc9a5e753639df0c00510 | [] | no_license | theprofi/Trump-tweet-pediction | 3d30fd722b95592a6c8bb31d1e79c08a8dbd8e16 | c2374ac3c71457b0339631397fde5b0e3c3b2b89 | refs/heads/master | 2020-03-23T03:25:43.955875 | 2018-07-26T13:31:33 | 2018-07-26T13:31:33 | 141,030,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | import string
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import decomposition
from ParseOrganize import BEFORE, AFTER
class TweetToLetvec:
def __init__(self,):
self.len_before = 0
self.len_after = 0
self.train_x = []
def init(self, tweets_before, tweets_after):
for tweet in tweets_before + tweets_after:
self.train_x.append(self.convert(tweet))
self.len_before = len(tweets_before)
self.len_after = len(tweets_after)
def get_train_xy(self, ):
train_y = [BEFORE] * self.len_before + [AFTER] * self.len_after
return self.train_x, train_y
def convert(self, tweet):
# The vector is of the size of the english alphabet + 1 for space char
letvec = np.zeros(27)
for letter in tweet:
if letter.lower() in 'abcdefghijklmnopqrstuvwxyz':
letvec[string.ascii_lowercase.index(letter.lower())] += 1
elif letter.lower() == " ":
letvec[26] += 1
return letvec
class TweetToBow:
def __init__(self):
self.len_before = 0
self.len_after = 0
self.train_x = []
self.vectorizer = None
def init(self, tweets_before, tweets_after):
self.vectorizer = CountVectorizer()
self.train_x = self.vectorizer.fit_transform(tweets_before + tweets_after).toarray()
self.len_before = len(tweets_before)
self.len_after = len(tweets_after)
return self
def get_train_xy(self, ):
train_y = [BEFORE] * self.len_before + [AFTER] * self.len_after
return self.train_x, train_y
def convert(self, tweet):
return self.vectorizer.transform([tweet]).toarray()[0]
class TweetToBowWithPca:
def __init__(self, new_dim=900):
self.len_before = 0
self.len_after = 0
self.train_x = []
self.train_y = []
self.new_dim = new_dim
self.pca = None
self.ttb = None
def init(self, tweets_before, tweets_after):
self.ttb = TweetToBow().init(tweets_before, tweets_after)
self.train_x, self.train_y = self.ttb.get_train_xy()
self.pca = decomposition.PCA(n_components=self.new_dim)
self.train_x = self.pca.fit_transform(self.train_x)
self.len_before = len(tweets_before)
self.len_after = len(tweets_after)
return self
def get_train_xy(self, ):
train_y = [BEFORE] * self.len_before + [AFTER] * self.len_after
return self.train_x, train_y
def convert(self, tweet):
vec = self.ttb.convert(tweet)
return self.pca.transform([vec])[0]
class TweetToTweet:
def __init__(self):
self.len_before = 0
self.len_after = 0
self.train_x = []
def init(self, tweets_before, tweets_after):
self.train_x = tweets_before + tweets_after
self.len_before = len(tweets_before)
self.len_after = len(tweets_after)
return self
def get_train_xy(self, ):
train_y = [BEFORE] * self.len_before + [AFTER] * self.len_after
return self.train_x, train_y
def convert(self, tweet):
return tweet | [
"noreply@github.com"
] | theprofi.noreply@github.com |
4c340b330451ba30962d6b42b55c433d5c586d23 | 704976ea552111c6a5af9cd7cb62b9d9abaf3996 | /dotviewer/drawgraph.py | e247c773bafd2411cfdb7998286c36a81e749422 | [
"BSD-3-Clause"
] | permissive | mesalock-linux/mesapy | 4f02c5819ce7f2f6e249d34840f1aa097577645d | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | refs/heads/mesapy2.7 | 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 | NOASSERTION | 2020-04-01T03:05:18 | 2018-06-04T20:45:17 | Python | UTF-8 | Python | false | false | 44,450 | py | """
A custom graphic renderer for the '.plain' files produced by dot.
"""
from __future__ import generators
import re, os, math
import pygame
from pygame.locals import *
from strunicode import forceunicode
this_dir = os.path.dirname(os.path.abspath(__file__))
FONT = os.path.join(this_dir, 'font', 'DroidSans.ttf')
FIXEDFONT = os.path.join(this_dir, 'font', 'DroidSansMono.ttf')
COLOR = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'antiquewhite1': (255, 239, 219),
'antiquewhite2': (238, 223, 204),
'antiquewhite3': (205, 192, 176),
'antiquewhite4': (139, 131, 120),
'aquamarine': (127, 255, 212),
'aquamarine1': (127, 255, 212),
'aquamarine2': (118, 238, 198),
'aquamarine3': (102, 205, 170),
'aquamarine4': (69, 139, 116),
'azure': (240, 255, 255),
'azure1': (240, 255, 255),
'azure2': (224, 238, 238),
'azure3': (193, 205, 205),
'azure4': (131, 139, 139),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'bisque1': (255, 228, 196),
'bisque2': (238, 213, 183),
'bisque3': (205, 183, 158),
'bisque4': (139, 125, 107),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blue1': (0, 0, 255),
'blue2': (0, 0, 238),
'blue3': (0, 0, 205),
'blue4': (0, 0, 139),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'brown1': (255, 64, 64),
'brown2': (238, 59, 59),
'brown3': (205, 51, 51),
'brown4': (139, 35, 35),
'burlywood': (222, 184, 135),
'burlywood1': (255, 211, 155),
'burlywood2': (238, 197, 145),
'burlywood3': (205, 170, 125),
'burlywood4': (139, 115, 85),
'cadetblue': (95, 158, 160),
'cadetblue1': (152, 245, 255),
'cadetblue2': (142, 229, 238),
'cadetblue3': (122, 197, 205),
'cadetblue4': (83, 134, 139),
'chartreuse': (127, 255, 0),
'chartreuse1': (127, 255, 0),
'chartreuse2': (118, 238, 0),
'chartreuse3': (102, 205, 0),
'chartreuse4': (69, 139, 0),
'chocolate': (210, 105, 30),
'chocolate1': (255, 127, 36),
'chocolate2': (238, 118, 33),
'chocolate3': (205, 102, 29),
'chocolate4': (139, 69, 19),
'coral': (255, 127, 80),
'coral1': (255, 114, 86),
'coral2': (238, 106, 80),
'coral3': (205, 91, 69),
'coral4': (139, 62, 47),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'cornsilk1': (255, 248, 220),
'cornsilk2': (238, 232, 205),
'cornsilk3': (205, 200, 177),
'cornsilk4': (139, 136, 120),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'cyan1': (0, 255, 255),
'cyan2': (0, 238, 238),
'cyan3': (0, 205, 205),
'cyan4': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgoldenrod1': (255, 185, 15),
'darkgoldenrod2': (238, 173, 14),
'darkgoldenrod3': (205, 149, 12),
'darkgoldenrod4': (139, 101, 8),
'darkgreen': (0, 100, 0),
'darkkhaki': (189, 183, 107),
'darkolivegreen': (85, 107, 47),
'darkolivegreen1': (202, 255, 112),
'darkolivegreen2': (188, 238, 104),
'darkolivegreen3': (162, 205, 90),
'darkolivegreen4': (110, 139, 61),
'darkorange': (255, 140, 0),
'darkorange1': (255, 127, 0),
'darkorange2': (238, 118, 0),
'darkorange3': (205, 102, 0),
'darkorange4': (139, 69, 0),
'darkorchid': (153, 50, 204),
'darkorchid1': (191, 62, 255),
'darkorchid2': (178, 58, 238),
'darkorchid3': (154, 50, 205),
'darkorchid4': (104, 34, 139),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkseagreen1': (193, 255, 193),
'darkseagreen2': (180, 238, 180),
'darkseagreen3': (155, 205, 155),
'darkseagreen4': (105, 139, 105),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategray1': (151, 255, 255),
'darkslategray2': (141, 238, 238),
'darkslategray3': (121, 205, 205),
'darkslategray4': (82, 139, 139),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deeppink1': (255, 20, 147),
'deeppink2': (238, 18, 137),
'deeppink3': (205, 16, 118),
'deeppink4': (139, 10, 80),
'deepskyblue': (0, 191, 255),
'deepskyblue1': (0, 191, 255),
'deepskyblue2': (0, 178, 238),
'deepskyblue3': (0, 154, 205),
'deepskyblue4': (0, 104, 139),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'dodgerblue1': (30, 144, 255),
'dodgerblue2': (28, 134, 238),
'dodgerblue3': (24, 116, 205),
'dodgerblue4': (16, 78, 139),
'firebrick': (178, 34, 34),
'firebrick1': (255, 48, 48),
'firebrick2': (238, 44, 44),
'firebrick3': (205, 38, 38),
'firebrick4': (139, 26, 26),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'gold1': (255, 215, 0),
'gold2': (238, 201, 0),
'gold3': (205, 173, 0),
'gold4': (139, 117, 0),
'goldenrod': (218, 165, 32),
'goldenrod1': (255, 193, 37),
'goldenrod2': (238, 180, 34),
'goldenrod3': (205, 155, 29),
'goldenrod4': (139, 105, 20),
'gray': (192, 192, 192),
'gray0': (0, 0, 0),
'gray1': (3, 3, 3),
'gray10': (26, 26, 26),
'gray100': (255, 255, 255),
'gray11': (28, 28, 28),
'gray12': (31, 31, 31),
'gray13': (33, 33, 33),
'gray14': (36, 36, 36),
'gray15': (38, 38, 38),
'gray16': (41, 41, 41),
'gray17': (43, 43, 43),
'gray18': (46, 46, 46),
'gray19': (48, 48, 48),
'gray2': (5, 5, 5),
'gray20': (51, 51, 51),
'gray21': (54, 54, 54),
'gray22': (56, 56, 56),
'gray23': (59, 59, 59),
'gray24': (61, 61, 61),
'gray25': (64, 64, 64),
'gray26': (66, 66, 66),
'gray27': (69, 69, 69),
'gray28': (71, 71, 71),
'gray29': (74, 74, 74),
'gray3': (8, 8, 8),
'gray30': (77, 77, 77),
'gray31': (79, 79, 79),
'gray32': (82, 82, 82),
'gray33': (84, 84, 84),
'gray34': (87, 87, 87),
'gray35': (89, 89, 89),
'gray36': (92, 92, 92),
'gray37': (94, 94, 94),
'gray38': (97, 97, 97),
'gray39': (99, 99, 99),
'gray4': (10, 10, 10),
'gray40': (102, 102, 102),
'gray41': (105, 105, 105),
'gray42': (107, 107, 107),
'gray43': (110, 110, 110),
'gray44': (112, 112, 112),
'gray45': (115, 115, 115),
'gray46': (117, 117, 117),
'gray47': (120, 120, 120),
'gray48': (122, 122, 122),
'gray49': (125, 125, 125),
'gray5': (13, 13, 13),
'gray50': (127, 127, 127),
'gray51': (130, 130, 130),
'gray52': (133, 133, 133),
'gray53': (135, 135, 135),
'gray54': (138, 138, 138),
'gray55': (140, 140, 140),
'gray56': (143, 143, 143),
'gray57': (145, 145, 145),
'gray58': (148, 148, 148),
'gray59': (150, 150, 150),
'gray6': (15, 15, 15),
'gray60': (153, 153, 153),
'gray61': (156, 156, 156),
'gray62': (158, 158, 158),
'gray63': (161, 161, 161),
'gray64': (163, 163, 163),
'gray65': (166, 166, 166),
'gray66': (168, 168, 168),
'gray67': (171, 171, 171),
'gray68': (173, 173, 173),
'gray69': (176, 176, 176),
'gray7': (18, 18, 18),
'gray70': (179, 179, 179),
'gray71': (181, 181, 181),
'gray72': (184, 184, 184),
'gray73': (186, 186, 186),
'gray74': (189, 189, 189),
'gray75': (191, 191, 191),
'gray76': (194, 194, 194),
'gray77': (196, 196, 196),
'gray78': (199, 199, 199),
'gray79': (201, 201, 201),
'gray8': (20, 20, 20),
'gray80': (204, 204, 204),
'gray81': (207, 207, 207),
'gray82': (209, 209, 209),
'gray83': (212, 212, 212),
'gray84': (214, 214, 214),
'gray85': (217, 217, 217),
'gray86': (219, 219, 219),
'gray87': (222, 222, 222),
'gray88': (224, 224, 224),
'gray89': (227, 227, 227),
'gray9': (23, 23, 23),
'gray90': (229, 229, 229),
'gray91': (232, 232, 232),
'gray92': (235, 235, 235),
'gray93': (237, 237, 237),
'gray94': (240, 240, 240),
'gray95': (242, 242, 242),
'gray96': (245, 245, 245),
'gray97': (247, 247, 247),
'gray98': (250, 250, 250),
'gray99': (252, 252, 252),
'green': (0, 255, 0),
'green1': (0, 255, 0),
'green2': (0, 238, 0),
'green3': (0, 205, 0),
'green4': (0, 139, 0),
'greenyellow': (173, 255, 47),
'grey': (192, 192, 192),
'grey0': (0, 0, 0),
'grey1': (3, 3, 3),
'grey10': (26, 26, 26),
'grey100': (255, 255, 255),
'grey11': (28, 28, 28),
'grey12': (31, 31, 31),
'grey13': (33, 33, 33),
'grey14': (36, 36, 36),
'grey15': (38, 38, 38),
'grey16': (41, 41, 41),
'grey17': (43, 43, 43),
'grey18': (46, 46, 46),
'grey19': (48, 48, 48),
'grey2': (5, 5, 5),
'grey20': (51, 51, 51),
'grey21': (54, 54, 54),
'grey22': (56, 56, 56),
'grey23': (59, 59, 59),
'grey24': (61, 61, 61),
'grey25': (64, 64, 64),
'grey26': (66, 66, 66),
'grey27': (69, 69, 69),
'grey28': (71, 71, 71),
'grey29': (74, 74, 74),
'grey3': (8, 8, 8),
'grey30': (77, 77, 77),
'grey31': (79, 79, 79),
'grey32': (82, 82, 82),
'grey33': (84, 84, 84),
'grey34': (87, 87, 87),
'grey35': (89, 89, 89),
'grey36': (92, 92, 92),
'grey37': (94, 94, 94),
'grey38': (97, 97, 97),
'grey39': (99, 99, 99),
'grey4': (10, 10, 10),
'grey40': (102, 102, 102),
'grey41': (105, 105, 105),
'grey42': (107, 107, 107),
'grey43': (110, 110, 110),
'grey44': (112, 112, 112),
'grey45': (115, 115, 115),
'grey46': (117, 117, 117),
'grey47': (120, 120, 120),
'grey48': (122, 122, 122),
'grey49': (125, 125, 125),
'grey5': (13, 13, 13),
'grey50': (127, 127, 127),
'grey51': (130, 130, 130),
'grey52': (133, 133, 133),
'grey53': (135, 135, 135),
'grey54': (138, 138, 138),
'grey55': (140, 140, 140),
'grey56': (143, 143, 143),
'grey57': (145, 145, 145),
'grey58': (148, 148, 148),
'grey59': (150, 150, 150),
'grey6': (15, 15, 15),
'grey60': (153, 153, 153),
'grey61': (156, 156, 156),
'grey62': (158, 158, 158),
'grey63': (161, 161, 161),
'grey64': (163, 163, 163),
'grey65': (166, 166, 166),
'grey66': (168, 168, 168),
'grey67': (171, 171, 171),
'grey68': (173, 173, 173),
'grey69': (176, 176, 176),
'grey7': (18, 18, 18),
'grey70': (179, 179, 179),
'grey71': (181, 181, 181),
'grey72': (184, 184, 184),
'grey73': (186, 186, 186),
'grey74': (189, 189, 189),
'grey75': (191, 191, 191),
'grey76': (194, 194, 194),
'grey77': (196, 196, 196),
'grey78': (199, 199, 199),
'grey79': (201, 201, 201),
'grey8': (20, 20, 20),
'grey80': (204, 204, 204),
'grey81': (207, 207, 207),
'grey82': (209, 209, 209),
'grey83': (212, 212, 212),
'grey84': (214, 214, 214),
'grey85': (217, 217, 217),
'grey86': (219, 219, 219),
'grey87': (222, 222, 222),
'grey88': (224, 224, 224),
'grey89': (227, 227, 227),
'grey9': (23, 23, 23),
'grey90': (229, 229, 229),
'grey91': (232, 232, 232),
'grey92': (235, 235, 235),
'grey93': (237, 237, 237),
'grey94': (240, 240, 240),
'grey95': (242, 242, 242),
'grey96': (245, 245, 245),
'grey97': (247, 247, 247),
'grey98': (250, 250, 250),
'grey99': (252, 252, 252),
'honeydew': (240, 255, 240),
'honeydew1': (240, 255, 240),
'honeydew2': (224, 238, 224),
'honeydew3': (193, 205, 193),
'honeydew4': (131, 139, 131),
'hotpink': (255, 105, 180),
'hotpink1': (255, 110, 180),
'hotpink2': (238, 106, 167),
'hotpink3': (205, 96, 144),
'hotpink4': (139, 58, 98),
'indianred': (205, 92, 92),
'indianred1': (255, 106, 106),
'indianred2': (238, 99, 99),
'indianred3': (205, 85, 85),
'indianred4': (139, 58, 58),
'indigo': (75, 0, 130),
'invis': (255, 255, 254),
'ivory': (255, 255, 240),
'ivory1': (255, 255, 240),
'ivory2': (238, 238, 224),
'ivory3': (205, 205, 193),
'ivory4': (139, 139, 131),
'khaki': (240, 230, 140),
'khaki1': (255, 246, 143),
'khaki2': (238, 230, 133),
'khaki3': (205, 198, 115),
'khaki4': (139, 134, 78),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lavenderblush1': (255, 240, 245),
'lavenderblush2': (238, 224, 229),
'lavenderblush3': (205, 193, 197),
'lavenderblush4': (139, 131, 134),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lemonchiffon1': (255, 250, 205),
'lemonchiffon2': (238, 233, 191),
'lemonchiffon3': (205, 201, 165),
'lemonchiffon4': (139, 137, 112),
'lightblue': (173, 216, 230),
'lightblue1': (191, 239, 255),
'lightblue2': (178, 223, 238),
'lightblue3': (154, 192, 205),
'lightblue4': (104, 131, 139),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightcyan1': (224, 255, 255),
'lightcyan2': (209, 238, 238),
'lightcyan3': (180, 205, 205),
'lightcyan4': (122, 139, 139),
'lightgoldenrod': (238, 221, 130),
'lightgoldenrod1': (255, 236, 139),
'lightgoldenrod2': (238, 220, 130),
'lightgoldenrod3': (205, 190, 112),
'lightgoldenrod4': (139, 129, 76),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightpink1': (255, 174, 185),
'lightpink2': (238, 162, 173),
'lightpink3': (205, 140, 149),
'lightpink4': (139, 95, 101),
'lightsalmon': (255, 160, 122),
'lightsalmon1': (255, 160, 122),
'lightsalmon2': (238, 149, 114),
'lightsalmon3': (205, 129, 98),
'lightsalmon4': (139, 87, 66),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightskyblue1': (176, 226, 255),
'lightskyblue2': (164, 211, 238),
'lightskyblue3': (141, 182, 205),
'lightskyblue4': (96, 123, 139),
'lightslateblue': (132, 112, 255),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightsteelblue1': (202, 225, 255),
'lightsteelblue2': (188, 210, 238),
'lightsteelblue3': (162, 181, 205),
'lightsteelblue4': (110, 123, 139),
'lightyellow': (255, 255, 224),
'lightyellow1': (255, 255, 224),
'lightyellow2': (238, 238, 209),
'lightyellow3': (205, 205, 180),
'lightyellow4': (139, 139, 122),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'magenta1': (255, 0, 255),
'magenta2': (238, 0, 238),
'magenta3': (205, 0, 205),
'magenta4': (139, 0, 139),
'maroon': (176, 48, 96),
'maroon1': (255, 52, 179),
'maroon2': (238, 48, 167),
'maroon3': (205, 41, 144),
'maroon4': (139, 28, 98),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumorchid1': (224, 102, 255),
'mediumorchid2': (209, 95, 238),
'mediumorchid3': (180, 82, 205),
'mediumorchid4': (122, 55, 139),
'mediumpurple': (147, 112, 219),
'mediumpurple1': (171, 130, 255),
'mediumpurple2': (159, 121, 238),
'mediumpurple3': (137, 104, 205),
'mediumpurple4': (93, 71, 139),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'mistyrose1': (255, 228, 225),
'mistyrose2': (238, 213, 210),
'mistyrose3': (205, 183, 181),
'mistyrose4': (139, 125, 123),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navajowhite1': (255, 222, 173),
'navajowhite2': (238, 207, 161),
'navajowhite3': (205, 179, 139),
'navajowhite4': (139, 121, 94),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'none': (255, 255, 254),
'oldlace': (253, 245, 230),
'olivedrab': (107, 142, 35),
'olivedrab1': (192, 255, 62),
'olivedrab2': (179, 238, 58),
'olivedrab3': (154, 205, 50),
'olivedrab4': (105, 139, 34),
'orange': (255, 165, 0),
'orange1': (255, 165, 0),
'orange2': (238, 154, 0),
'orange3': (205, 133, 0),
'orange4': (139, 90, 0),
'orangered': (255, 69, 0),
'orangered1': (255, 69, 0),
'orangered2': (238, 64, 0),
'orangered3': (205, 55, 0),
'orangered4': (139, 37, 0),
'orchid': (218, 112, 214),
'orchid1': (255, 131, 250),
'orchid2': (238, 122, 233),
'orchid3': (205, 105, 201),
'orchid4': (139, 71, 137),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'palegreen1': (154, 255, 154),
'palegreen2': (144, 238, 144),
'palegreen3': (124, 205, 124),
'palegreen4': (84, 139, 84),
'paleturquoise': (175, 238, 238),
'paleturquoise1': (187, 255, 255),
'paleturquoise2': (174, 238, 238),
'paleturquoise3': (150, 205, 205),
'paleturquoise4': (102, 139, 139),
'palevioletred': (219, 112, 147),
'palevioletred1': (255, 130, 171),
'palevioletred2': (238, 121, 159),
'palevioletred3': (205, 104, 137),
'palevioletred4': (139, 71, 93),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peachpuff1': (255, 218, 185),
'peachpuff2': (238, 203, 173),
'peachpuff3': (205, 175, 149),
'peachpuff4': (139, 119, 101),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'pink1': (255, 181, 197),
'pink2': (238, 169, 184),
'pink3': (205, 145, 158),
'pink4': (139, 99, 108),
'plum': (221, 160, 221),
'plum1': (255, 187, 255),
'plum2': (238, 174, 238),
'plum3': (205, 150, 205),
'plum4': (139, 102, 139),
'powderblue': (176, 224, 230),
'purple': (160, 32, 240),
'purple1': (155, 48, 255),
'purple2': (145, 44, 238),
'purple3': (125, 38, 205),
'purple4': (85, 26, 139),
'red': (255, 0, 0),
'red1': (255, 0, 0),
'red2': (238, 0, 0),
'red3': (205, 0, 0),
'red4': (139, 0, 0),
'rosybrown': (188, 143, 143),
'rosybrown1': (255, 193, 193),
'rosybrown2': (238, 180, 180),
'rosybrown3': (205, 155, 155),
'rosybrown4': (139, 105, 105),
'royalblue': (65, 105, 225),
'royalblue1': (72, 118, 255),
'royalblue2': (67, 110, 238),
'royalblue3': (58, 95, 205),
'royalblue4': (39, 64, 139),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'salmon1': (255, 140, 105),
'salmon2': (238, 130, 98),
'salmon3': (205, 112, 84),
'salmon4': (139, 76, 57),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seagreen1': (84, 255, 159),
'seagreen2': (78, 238, 148),
'seagreen3': (67, 205, 128),
'seagreen4': (46, 139, 87),
'seashell': (255, 245, 238),
'seashell1': (255, 245, 238),
'seashell2': (238, 229, 222),
'seashell3': (205, 197, 191),
'seashell4': (139, 134, 130),
'sienna': (160, 82, 45),
'sienna1': (255, 130, 71),
'sienna2': (238, 121, 66),
'sienna3': (205, 104, 57),
'sienna4': (139, 71, 38),
'skyblue': (135, 206, 235),
'skyblue1': (135, 206, 255),
'skyblue2': (126, 192, 238),
'skyblue3': (108, 166, 205),
'skyblue4': (74, 112, 139),
'slateblue': (106, 90, 205),
'slateblue1': (131, 111, 255),
'slateblue2': (122, 103, 238),
'slateblue3': (105, 89, 205),
'slateblue4': (71, 60, 139),
'slategray': (112, 128, 144),
'slategray1': (198, 226, 255),
'slategray2': (185, 211, 238),
'slategray3': (159, 182, 205),
'slategray4': (108, 123, 139),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'snow1': (255, 250, 250),
'snow2': (238, 233, 233),
'snow3': (205, 201, 201),
'snow4': (139, 137, 137),
'springgreen': (0, 255, 127),
'springgreen1': (0, 255, 127),
'springgreen2': (0, 238, 118),
'springgreen3': (0, 205, 102),
'springgreen4': (0, 139, 69),
'steelblue': (70, 130, 180),
'steelblue1': (99, 184, 255),
'steelblue2': (92, 172, 238),
'steelblue3': (79, 148, 205),
'steelblue4': (54, 100, 139),
'tan': (210, 180, 140),
'tan1': (255, 165, 79),
'tan2': (238, 154, 73),
'tan3': (205, 133, 63),
'tan4': (139, 90, 43),
'thistle': (216, 191, 216),
'thistle1': (255, 225, 255),
'thistle2': (238, 210, 238),
'thistle3': (205, 181, 205),
'thistle4': (139, 123, 139),
'tomato': (255, 99, 71),
'tomato1': (255, 99, 71),
'tomato2': (238, 92, 66),
'tomato3': (205, 79, 57),
'tomato4': (139, 54, 38),
'transparent': (255, 255, 254),
'turquoise': (64, 224, 208),
'turquoise1': (0, 245, 255),
'turquoise2': (0, 229, 238),
'turquoise3': (0, 197, 205),
'turquoise4': (0, 134, 139),
'violet': (238, 130, 238),
'violetred': (208, 32, 144),
'violetred1': (255, 62, 150),
'violetred2': (238, 58, 140),
'violetred3': (205, 50, 120),
'violetred4': (139, 34, 82),
'wheat': (245, 222, 179),
'wheat1': (255, 231, 186),
'wheat2': (238, 216, 174),
'wheat3': (205, 186, 150),
'wheat4': (139, 126, 102),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellow1': (255, 255, 0),
'yellow2': (238, 238, 0),
'yellow3': (205, 205, 0),
'yellow4': (139, 139, 0),
'yellowgreen': (154, 205, 50),
}
re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)')
re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)')
def combine(color1, color2, alpha):
r1, g1, b1 = color1
r2, g2, b2 = color2
beta = 1.0 - alpha
return (int(r1 * alpha + r2 * beta),
int(g1 * alpha + g2 * beta),
int(b1 * alpha + b2 * beta))
def highlight_color(color):
if color == (0, 0, 0): # black becomes magenta
return (255, 0, 255)
elif color == (255, 255, 255): # white becomes yellow
return (255, 255, 0)
intensity = sum(color)
if intensity > 191 * 3:
return combine(color, (128, 192, 0), 0.2)
else:
return combine(color, (255, 255, 0), 0.2)
def getcolor(name, default):
if name in COLOR:
return COLOR[name]
elif name.startswith('#') and len(name) == 7:
rval = COLOR[name] = (int(name[1:3],16), int(name[3:5],16), int(name[5:7],16))
return rval
else:
return default
class GraphLayout:
fixedfont = False
def __init__(self, scale, width, height):
self.scale = scale
self.boundingbox = width, height
self.nodes = {}
self.edges = []
self.links = {}
def add_node(self, *args):
n = Node(*args)
self.nodes[n.name] = n
def add_edge(self, *args):
self.edges.append(Edge(self.nodes, *args))
def get_display(self):
from graphdisplay import GraphDisplay
return GraphDisplay(self)
def display(self):
self.get_display().run()
def reload(self):
return self
# async interaction helpers
def display_async_quit():
pygame.event.post(pygame.event.Event(QUIT))
def display_async_cmd(**kwds):
pygame.event.post(pygame.event.Event(USEREVENT, **kwds))
EventQueue = []
def wait_for_events():
if not EventQueue:
EventQueue.append(pygame.event.wait())
EventQueue.extend(pygame.event.get())
def wait_for_async_cmd():
# wait until another thread pushes a USEREVENT in the queue
while True:
wait_for_events()
e = EventQueue.pop(0)
if e.type in (USEREVENT, QUIT): # discard all other events
break
EventQueue.insert(0, e) # re-insert the event for further processing
class Node:
def __init__(self, name, x, y, w, h, label, style, shape, color, fillcolor):
self.name = forceunicode(name)
self.x = float(x)
self.y = float(y)
self.w = float(w)
self.h = float(h)
self.label = forceunicode(label)
self.style = style
self.shape = shape
self.color = color
self.fillcolor = fillcolor
self.highlight = False
def sethighlight(self, which):
self.highlight = bool(which)
class Edge:
label = None
def __init__(self, nodes, tail, head, cnt, *rest):
self.tail = nodes[forceunicode(tail)]
self.head = nodes[forceunicode(head)]
cnt = int(cnt)
self.points = [(float(rest[i]), float(rest[i+1]))
for i in range(0, cnt*2, 2)]
rest = rest[cnt*2:]
if len(rest) > 2:
self.label, xl, yl = rest[:3]
self.xl = float(xl)
self.yl = float(yl)
rest = rest[3:]
self.style, self.color = rest
linematch = re_linewidth.match(self.style)
if linematch:
num = linematch.group(1)
self.linewidth = int(round(float(num)))
self.style = self.style[linematch.end(0):]
else:
self.linewidth = 1
self.highlight = False
self.cachedbezierpoints = None
self.cachedarrowhead = None
self.cachedlimits = None
def sethighlight(self, which):
self.highlight = bool(which)
def limits(self):
result = self.cachedlimits
if result is None:
points = self.bezierpoints()
xs = [point[0] for point in points]
ys = [point[1] for point in points]
self.cachedlimits = result = (min(xs), max(ys), max(xs), min(ys))
return result
def bezierpoints(self):
result = self.cachedbezierpoints
if result is None:
result = []
pts = self.points
for i in range(0, len(pts)-3, 3):
result += beziercurve(pts[i], pts[i+1], pts[i+2], pts[i+3])
self.cachedbezierpoints = result
return result
def arrowhead(self):
result = self.cachedarrowhead
if result is None:
# we don't know if the list of points is in the right order
# or not :-( try to guess...
def dist(node, pt):
return abs(node.x - pt[0]) + abs(node.y - pt[1])
error_if_direct = (dist(self.head, self.points[-1]) +
dist(self.tail, self.points[0]))
error_if_reversed = (dist(self.tail, self.points[-1]) +
dist(self.head, self.points[0]))
if error_if_direct > error_if_reversed: # reversed edge
head = 0
dir = 1
else:
head = -1
dir = -1
n = 1
while True:
try:
x0, y0 = self.points[head]
x1, y1 = self.points[head+n*dir]
except IndexError:
result = []
break
vx = x0-x1
vy = y0-y1
try:
f = 0.12 / math.sqrt(vx*vx + vy*vy)
vx *= f
vy *= f
result = [(x0 + 0.9*vx, y0 + 0.9*vy),
(x0 + 0.4*vy, y0 - 0.4*vx),
(x0 - 0.4*vy, y0 + 0.4*vx)]
break
except (ZeroDivisionError, ValueError):
n += 1
self.cachedarrowhead = result
return result
def beziercurve((x0,y0), (x1,y1), (x2,y2), (x3,y3), resolution=8):
result = []
f = 1.0/(resolution-1)
append = result.append
for i in range(resolution):
t = f*i
t0 = (1-t)*(1-t)*(1-t)
t1 = t *(1-t)*(1-t) * 3.0
t2 = t * t *(1-t) * 3.0
t3 = t * t * t
append((x0*t0 + x1*t1 + x2*t2 + x3*t3,
y0*t0 + y1*t1 + y2*t2 + y3*t3))
return result
def segmentdistance((x0,y0), (x1,y1), (x,y)):
"Distance between the point (x,y) and the segment (x0,y0)-(x1,y1)."
vx = x1-x0
vy = y1-y0
try:
l = math.hypot(vx, vy)
vx /= l
vy /= l
dlong = vx*(x-x0) + vy*(y-y0)
except (ZeroDivisionError, ValueError):
dlong = -1
if dlong < 0.0:
return math.hypot(x-x0, y-y0)
elif dlong > l:
return math.hypot(x-x1, y-y1)
else:
return abs(vy*(x-x0) - vx*(y-y0))
class GraphRenderer:
MARGIN = 0.6
SCALEMIN = 3
SCALEMAX = 100
FONTCACHE = {}
def __init__(self, screen, graphlayout, scale=75):
self.graphlayout = graphlayout
self.setscale(scale)
self.setoffset(0, 0)
self.screen = screen
self.textzones = []
self.highlightwords = graphlayout.links
self.highlight_word = None
self.visiblenodes = []
self.visibleedges = []
def wordcolor(self, word):
info = self.highlightwords[word]
if isinstance(info, tuple) and len(info) >= 2:
color = info[1]
else:
color = None
if color is None:
color = (128,0,0)
if word == self.highlight_word:
return ((255,255,80), color)
else:
return (color, None)
def setscale(self, scale):
scale = max(min(scale, self.SCALEMAX), self.SCALEMIN)
self.scale = float(scale)
w, h = self.graphlayout.boundingbox
self.margin = int(self.MARGIN * scale)
self.width = int(w * scale) + (2 * self.margin)
self.height = int(h * scale) + (2 * self.margin)
self.bboxh = h
size = int(15 * (scale-10) / 75)
self.font = self.getfont(size)
def getfont(self, size):
if size in self.FONTCACHE:
return self.FONTCACHE[size]
elif size < 5:
self.FONTCACHE[size] = None
return None
else:
if self.graphlayout.fixedfont:
filename = FIXEDFONT
else:
filename = FONT
font = self.FONTCACHE[size] = pygame.font.Font(filename, size)
return font
def setoffset(self, offsetx, offsety):
"Set the (x,y) origin of the rectangle where the graph will be rendered."
self.ofsx = offsetx - self.margin
self.ofsy = offsety - self.margin
def shiftoffset(self, dx, dy):
self.ofsx += dx
self.ofsy += dy
def getcenter(self):
w, h = self.screen.get_size()
return self.revmap(w//2, h//2)
def setcenter(self, x, y):
w, h = self.screen.get_size()
x, y = self.map(x, y)
self.shiftoffset(x-w//2, y-h//2)
def shiftscale(self, factor, fix=None):
if fix is None:
fixx, fixy = self.screen.get_size()
fixx //= 2
fixy //= 2
else:
fixx, fixy = fix
x, y = self.revmap(fixx, fixy)
self.setscale(self.scale * factor)
newx, newy = self.map(x, y)
self.shiftoffset(newx - fixx, newy - fixy)
def reoffset(self, swidth, sheight):
offsetx = noffsetx = self.ofsx
offsety = noffsety = self.ofsy
width = self.width
height = self.height
# if it fits, center it, otherwise clamp
if width <= swidth:
noffsetx = (width - swidth) // 2
else:
noffsetx = min(max(0, offsetx), width - swidth)
if height <= sheight:
noffsety = (height - sheight) // 2
else:
noffsety = min(max(0, offsety), height - sheight)
self.ofsx = noffsetx
self.ofsy = noffsety
def getboundingbox(self):
"Get the rectangle where the graph will be rendered."
return (-self.ofsx, -self.ofsy, self.width, self.height)
def visible(self, x1, y1, x2, y2):
"""Is any part of the box visible (i.e. within the bounding box)?
We have to perform clipping ourselves because with big graphs the
coordinates may sometimes become longs and cause OverflowErrors
within pygame.
"""
w, h = self.screen.get_size()
return x1 < w and x2 > 0 and y1 < h and y2 > 0
def computevisible(self):
del self.visiblenodes[:]
del self.visibleedges[:]
w, h = self.screen.get_size()
for node in self.graphlayout.nodes.values():
x, y = self.map(node.x, node.y)
nw2 = int(node.w * self.scale)//2
nh2 = int(node.h * self.scale)//2
if x-nw2 < w and x+nw2 > 0 and y-nh2 < h and y+nh2 > 0:
self.visiblenodes.append(node)
for edge in self.graphlayout.edges:
x1, y1, x2, y2 = edge.limits()
x1, y1 = self.map(x1, y1)
if x1 < w and y1 < h:
x2, y2 = self.map(x2, y2)
if x2 > 0 and y2 > 0:
self.visibleedges.append(edge)
def map(self, x, y):
return (int(x*self.scale) - (self.ofsx - self.margin),
int((self.bboxh-y)*self.scale) - (self.ofsy - self.margin))
def revmap(self, px, py):
return ((px + (self.ofsx - self.margin)) / self.scale,
self.bboxh - (py + (self.ofsy - self.margin)) / self.scale)
def draw_node_commands(self, node):
xcenter, ycenter = self.map(node.x, node.y)
boxwidth = int(node.w * self.scale)
boxheight = int(node.h * self.scale)
fgcolor = getcolor(node.color, (0,0,0))
bgcolor = getcolor(node.fillcolor, (255,255,255))
if node.highlight:
fgcolor = highlight_color(fgcolor)
bgcolor = highlight_color(bgcolor)
text = node.label
lines = text.replace('\\l','\\l\n').replace('\r','\r\n').split('\n')
# ignore a final newline
if not lines[-1]:
del lines[-1]
wmax = 0
hmax = 0
commands = []
bkgndcommands = []
if self.font is None:
if lines:
raw_line = lines[0].replace('\\l','').replace('\r','')
if raw_line:
for size in (12, 10, 8, 6, 4):
font = self.getfont(size)
img = TextSnippet(self, raw_line, (0, 0, 0), bgcolor, font=font)
w, h = img.get_size()
if (w >= boxwidth or h >= boxheight):
continue
else:
if w>wmax: wmax = w
def cmd(img=img, y=hmax, w=w):
img.draw(xcenter-w//2, ytop+y)
commands.append(cmd)
hmax += h
break
else:
for line in lines:
raw_line = line.replace('\\l','').replace('\r','') or ' '
if '\f' in raw_line: # grayed out parts of the line
imgs = []
graytext = True
h = 16
w_total = 0
for linepart in raw_line.split('\f'):
graytext = not graytext
if not linepart.strip():
continue
if graytext:
fgcolor = (128, 160, 160)
else:
fgcolor = (0, 0, 0)
img = TextSnippet(self, linepart, fgcolor, bgcolor)
imgs.append((w_total, img))
w, h = img.get_size()
w_total += w
if w_total > wmax: wmax = w_total
def cmd(imgs=imgs, y=hmax):
for x, img in imgs:
img.draw(xleft+x, ytop+y)
commands.append(cmd)
else:
img = TextSnippet(self, raw_line, (0, 0, 0), bgcolor)
w, h = img.get_size()
if w>wmax: wmax = w
if raw_line.strip():
if line.endswith('\\l'):
def cmd(img=img, y=hmax):
img.draw(xleft, ytop+y)
elif line.endswith('\r'):
def cmd(img=img, y=hmax, w=w):
img.draw(xright-w, ytop+y)
else:
def cmd(img=img, y=hmax, w=w):
img.draw(xcenter-w//2, ytop+y)
commands.append(cmd)
hmax += h
#hmax += 8
# we know the bounding box only now; setting these variables will
# have an effect on the values seen inside the cmd() functions above
xleft = xcenter - wmax//2
xright = xcenter + wmax//2
ytop = ycenter - hmax//2
x = xcenter-boxwidth//2
y = ycenter-boxheight//2
if node.shape == 'box':
rect = (x-1, y-1, boxwidth+2, boxheight+2)
def cmd():
self.screen.fill(bgcolor, rect)
bkgndcommands.append(cmd)
def cmd():
pygame.draw.rect(self.screen, fgcolor, rect, 1)
commands.append(cmd)
elif node.shape == 'ellipse':
rect = (x-1, y-1, boxwidth+2, boxheight+2)
def cmd():
pygame.draw.ellipse(self.screen, bgcolor, rect, 0)
bkgndcommands.append(cmd)
def cmd():
pygame.draw.ellipse(self.screen, fgcolor, rect, 1)
commands.append(cmd)
elif node.shape == 'octagon':
step = 1-math.sqrt(2)/2
points = [(int(x+boxwidth*fx), int(y+boxheight*fy))
for fx, fy in [(step,0), (1-step,0),
(1,step), (1,1-step),
(1-step,1), (step,1),
(0,1-step), (0,step)]]
def cmd():
pygame.draw.polygon(self.screen, bgcolor, points, 0)
bkgndcommands.append(cmd)
def cmd():
pygame.draw.polygon(self.screen, fgcolor, points, 1)
commands.append(cmd)
return bkgndcommands, commands
def draw_commands(self):
nodebkgndcmd = []
nodecmd = []
for node in self.visiblenodes:
cmd1, cmd2 = self.draw_node_commands(node)
nodebkgndcmd += cmd1
nodecmd += cmd2
edgebodycmd = []
edgeheadcmd = []
for edge in self.visibleedges:
fgcolor = getcolor(edge.color, (0,0,0))
if edge.highlight:
fgcolor = highlight_color(fgcolor)
points = [self.map(*xy) for xy in edge.bezierpoints()]
def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth):
pygame.draw.lines(self.screen, fgcolor, False, points, width)
edgebodycmd.append(drawedgebody)
points = [self.map(*xy) for xy in edge.arrowhead()]
if points:
def drawedgehead(points=points, fgcolor=fgcolor):
pygame.draw.polygon(self.screen, fgcolor, points, 0)
edgeheadcmd.append(drawedgehead)
if edge.label:
x, y = self.map(edge.xl, edge.yl)
img = TextSnippet(self, edge.label, (0, 0, 0))
w, h = img.get_size()
if self.visible(x-w//2, y-h//2, x+w//2, y+h//2):
def drawedgelabel(img=img, x1=x-w//2, y1=y-h//2):
img.draw(x1, y1)
edgeheadcmd.append(drawedgelabel)
return edgebodycmd + nodebkgndcmd + edgeheadcmd + nodecmd
def render(self):
self.computevisible()
bbox = self.getboundingbox()
ox, oy, width, height = bbox
dpy_width, dpy_height = self.screen.get_size()
# some versions of the SDL misinterpret widely out-of-range values,
# so clamp them
if ox < 0:
width += ox
ox = 0
if oy < 0:
height += oy
oy = 0
if width > dpy_width:
width = dpy_width
if height > dpy_height:
height = dpy_height
self.screen.fill((224, 255, 224), (ox, oy, width, height))
# gray off-bkgnd areas
gray = (128, 128, 128)
if ox > 0:
self.screen.fill(gray, (0, 0, ox, dpy_height))
if oy > 0:
self.screen.fill(gray, (0, 0, dpy_width, oy))
w = dpy_width - (ox + width)
if w > 0:
self.screen.fill(gray, (dpy_width-w, 0, w, dpy_height))
h = dpy_height - (oy + height)
if h > 0:
self.screen.fill(gray, (0, dpy_height-h, dpy_width, h))
# draw the graph and record the position of texts
del self.textzones[:]
for cmd in self.draw_commands():
cmd()
def findall(self, searchstr):
"""Return an iterator for all nodes and edges that contain a searchstr.
"""
for item in self.graphlayout.nodes.itervalues():
if item.label and searchstr in item.label:
yield item
for item in self.graphlayout.edges:
if item.label and searchstr in item.label:
yield item
def at_position(self, (x, y)):
"""Figure out the word under the cursor."""
for rx, ry, rw, rh, word in self.textzones:
if rx <= x < rx+rw and ry <= y < ry+rh:
return word
return None
def node_at_position(self, (x, y)):
"""Return the Node under the cursor."""
x, y = self.revmap(x, y)
for node in self.visiblenodes:
if 2.0*abs(x-node.x) <= node.w and 2.0*abs(y-node.y) <= node.h:
return node
return None
def edge_at_position(self, (x, y), distmax=14):
"""Return the Edge near the cursor."""
# XXX this function is very CPU-intensive and makes the display kinda sluggish
distmax /= self.scale
xy = self.revmap(x, y)
closest_edge = None
for edge in self.visibleedges:
pts = edge.bezierpoints()
for i in range(1, len(pts)):
d = segmentdistance(pts[i-1], pts[i], xy)
if d < distmax:
distmax = d
closest_edge = edge
return closest_edge
class TextSnippet:
def __init__(self, renderer, text, fgcolor, bgcolor=None, font=None):
self.renderer = renderer
self.imgs = []
self.parts = []
if font is None:
font = renderer.font
if font is None:
return
parts = self.parts
for word in re_nonword.split(text):
if not word:
continue
if word in renderer.highlightwords:
fg, bg = renderer.wordcolor(word)
bg = bg or bgcolor
else:
fg, bg = fgcolor, bgcolor
parts.append((word, fg, bg))
# consolidate sequences of words with the same color
for i in range(len(parts)-2, -1, -1):
if parts[i][1:] == parts[i+1][1:]:
word, fg, bg = parts[i]
parts[i] = word + parts[i+1][0], fg, bg
del parts[i+1]
# delete None backgrounds
for i in range(len(parts)):
if parts[i][2] is None:
parts[i] = parts[i][:2]
# render parts
i = 0
while i < len(parts):
part = parts[i]
word = part[0]
try:
img = font.render(word, True, *part[1:])
except pygame.error:
del parts[i] # Text has zero width
else:
self.imgs.append(img)
i += 1
def get_size(self):
if self.imgs:
sizes = [img.get_size() for img in self.imgs]
return sum([w for w,h in sizes]), max([h for w,h in sizes])
else:
return 0, 0
def draw(self, x, y):
for part, img in zip(self.parts, self.imgs):
word = part[0]
self.renderer.screen.blit(img, (x, y))
w, h = img.get_size()
self.renderer.textzones.append((x, y, w, h, word))
x += w
| [
"mssun@mesalock-linux.org"
] | mssun@mesalock-linux.org |
250f5e5ede5cb22913d1d025af9938648ffb870c | 046f84183a4f942813230a4ef59f0f7d4e3ba9c1 | /Grade 10/Unit 1/Bp_Hello_World/Bp_Hello_World_V2.py | ab647eb26f3b07baedbfb0bed8f228f82aeb0d2b | [] | no_license | thebazman1998/School-Projects | 388a53cfb564fdaa38daf8b501fb1cbf4297f4ca | 6cc187c670641bb6f7664b003a056dbca6c2a4cb | refs/heads/master | 2020-12-31T00:18:26.751597 | 2015-05-27T19:24:18 | 2015-05-27T19:24:18 | 36,387,871 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | '''
Created on <04/02/14>
@author: Basil_Pocklington
This program is my first Python program.
'''
print "Hello World!"
print "This is my first program\n\n."
print 1,000,000
print "n=7"
print 7+5
print 5.2, "this is less than", 7 - 1,"Which is less than", 9
print 6*(1-2)+3 | [
"bg.88888@hotmail.com"
] | bg.88888@hotmail.com |
c7e6440558b93f5fc476fcac8a70f196cd06f1d2 | 3d44f08daca843bc5231e5a8cf1a9b6ad7b02039 | /contact_prediction/coupling_prior/check_derivatives.py | 4e48dfe1ea657a47332dc9e2af15c56e5e10f78e | [] | no_license | susannvorberg/contact_prediction | c9b128b503ee9dd0a28893113fa0dd7ef65696be | d65c0799ebeee9854d4df828f29d585b0aed9bf3 | refs/heads/master | 2021-01-16T20:45:52.662839 | 2018-06-29T13:06:43 | 2018-06-29T13:06:43 | 61,809,573 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,121 | py | #!/usr/bin/env python
#===============================================================================
### This scripts compares analytical gradients to numerical gradients
#===============================================================================
import argparse
import os
from coupling_prior.likelihood import LikelihoodFct
from coupling_prior.parameters import Parameters
def parse_args():
parser = argparse.ArgumentParser(description="Infer parameters for coupling prior", add_help=False)
flags = parser.add_argument_group('flag arguments')
flags.add_argument('-h', '--help', action='help')
flags.add_argument("-p", dest="parameter_dir", default=None, help="Save parameters in parameter_dir", required = True)
flags.add_argument("-o", dest="plot_dir", default=None, help="Save optimization log plot in plot_dir", required = True)
flags.add_argument("-b", dest="braw_dir", default=None, help="Path to directory with CCMPred binary raw files.", required = True)
flags.add_argument("-q", dest="qij_dir", default=None, help="Path to directory with CCMPred binary qij files.", required = True)
flags.add_argument("-a", dest="alignment_dir", default=None, help="Path to directory with alignment files (PSICOV format).", required = True)
flags.add_argument("-s", dest="structure_dir", default=None, help="Path to directory with PDB files.", required = True)
grp_data = parser.add_argument_group("dataset settings")
grp_data.add_argument("--contact_thr", dest="contact_thr", default=8, type=int, help="Set threshold for definition of contact. [default Cb distance < %(default)s]")
grp_data.add_argument("--non_contact_thr", dest="non_contact_thr", default=25, type=int, help="Set threshold for definition of not in contact. [default Cb distance > %(default)s]")
grp_data.add_argument("--sequence_separation", dest="sequence_separation", default=8, type=int, help="Ignore residue pairs that are close in sequence. [default |i-j| > %(default)s positions]")
grp_data.add_argument("--max_gap_percentage", dest="max_gap_percentage", default=0.5, type=float, help="Ignore residue pairs that have > X percent gaps. [default %(default)s]")
grp_data.add_argument("--filter_gap_columns", dest="filter_gap_columns", default=False, action="store_true", help="Filter out alignment columns that contain > X% gaps. [default %(default)s]")
grp_data.add_argument("--filter_pairs_by_Nij", dest="filter_pairs_by_Nij", default=False, action="store_true", help="Ignore residue pairs that have incorrect qij values of Nij < 1. [default %(default)s]")
grp_data.add_argument("--filter_best_pairs", dest="filter_best_pairs", default=False, action="store_true", help="Ignore residue pairs that have APC corrected score < 0. [default %(default)s]")
grp_data.add_argument("--maxcontacts_per_protein", dest="maxcontacts_per_protein", default=250, type=int, help="Choose at max this many contacts from one protein. [default %(default)s]")
grp_data.add_argument("--maxnoncontacts_per_protein", dest="maxnoncontacts_per_protein", default=500, type=int, help="Choose at max this many non-contacts from one protein. [default %(default)s]")
grp_data.add_argument("--diversity_thr", dest="diversity_thr", default=0.3, type=float, help="Use only proteins with alignment diversity > d. [default %(default)s ]")
grp_data.add_argument("--balance", dest="balance", default=1, type=int, help="Specify proportion of non-contacts vs contact (#non-contacts = balance * nr_training_pairs [default %(default)s ]")
grp_data.add_argument("--nr_crossval_pairs", dest="nr_crossval_pairs", default=1000, type=int, help="Specify number of residue pairs per class (contact/non-contact) in cross validation set. [default %(default)s ]")
grp_data.add_argument("--nr_training_pairs", dest="nr_training_pairs", default=10000, type=int, help="Specify number of residue pairs per class (contact/non-contact) for training. [default %(default)s ]")
grp_data.add_argument("--seed", dest="seed", default=123, type=int, help="Set seed. [default %(default)s ]")
grp_lik = parser.add_argument_group("likelihood function settings")
grp_lik.add_argument("--nr_threads", dest="nr_threads", default=1, type=int, help="Set the number of threads for OMP parallelization (parallelized over proteins). [default %(default)s ]")
grp_lik.add_argument("--nr_components", dest="nr_components", default=3, type=int, help="Set number of components for Gaussian mixture prior. [default %(default)s ]")
grp_lik.add_argument("--reg_coeff_mu", dest="reg_coeff_mu", default=0.0, type=float, help="Set regularization coefficient for L2 regularizer on MU. [default %(default)s == no regularization]")
grp_lik.add_argument("--reg_coeff_diagPrec", dest="reg_coeff_diagPrec", default=0.0, type=float, help="Set regularization coefficient for L2 regularizer on diagonal elements of precMat. [default %(default)s == no regularization]")
grp_lik.add_argument("--sigma", dest="sigma", default="diagonal", type=str, help="Set type of precision Matrix. One of ['diagonal', 'isotrope', 'full']. [default %(const)s]")
grp_lik.add_argument("--fixed_parameters", dest="fixed_parameters", default='weight_bg_0,weight_contact_0', type=str, help="Parameters that will not be optimized. Weights of first component are by default fixed due to softmax overparametrization. [default %(default)s]")
grp_opt = parser.add_argument_group("Optimization settings:")
grp_opt.add_argument("--method", dest="method", default="L-BFGS-B", type=str, help="Set the optimization method. One of ['L-BFGS-B', 'CG'] [default %(default)s ]")
grp_opt.add_argument("--maxit", dest="maxit", default=1000, type=int, help="Set maximum number of iterations for optimization. [default %(default)s ]")
parser.add_argument("--debug_mode", dest="debug_mode", default=0, type=int, help="Set level of verbosity. [default %(default)s ]")
args = parser.parse_args()
if(args.max_gap_percentage < 1.0):
args.filter_gap_columns =True
else:
args.filter_gap_columns =False
fixed_parameters = []
for p in args.fixed_parameters.split(","):
fixed_parameters.append(p)
args.fixed_parameters = fixed_parameters
return args
def main():
opt = parse_args()
parameter_dir = opt.parameter_dir
plot_dir = opt.plot_dir
braw_dir = opt.braw_dir
qijab_dir = opt.qij_dir
psicov_dir = opt.alignment_dir
pdb_dir = opt.structure_dir
sigma = opt.sigma
nr_components = opt.nr_components
reg_coeff_mu = opt.reg_coeff_mu
reg_coeff_diagPrec = opt.reg_coeff_diagPrec
parameter_dir = "/home/vorberg/"
plot_dir = "/home/vorberg/"
data_dir = os.environ['DATA']
braw_dir = data_dir + "/benchmarkset_cathV4.1/contact_prediction/ccmpredpy_cd_gd/braw/"
qijab_dir = data_dir + "/benchmarkset_cathV4.1/contact_prediction/ccmpredpy_cd_gd/qij/"
pdb_dir = data_dir + "/benchmarkset_cathV4.1/pdb_renum_combs/"
psicov_dir = data_dir + "/benchmarkset_cathV4.1/psicov/"
sigma = 'diagonal'
nr_components = 3
reg_coeff_mu = 0
reg_coeff_diagPrec = 1000
nr_crossval_pairs = 10
nr_training_pairs = 100
maxcontacts_per_protein = 100
maxnoncontacts_per_protein = 100
debug_mode = 0
diversity_thr = 0.3
fixed_parameters = ['weight_bg_0', 'weight_contact_0']
seed = 234
contact_thr = 8
non_contact_thr = 25
sequence_separation = 8
max_gap_percentage = 0.5
filter_gap_columns = True
filter_pairs_by_Nij = True
filter_best_pairs = False
prec_wrt_L = False
balance = 1
nr_threads = 1
python_parallel = True
# create dataset
data = CouplingData()
data.specify_paths_to_data(braw_dir, qijab_dir, psicov_dir, pdb_dir)
data.set_seed(seed)
data.set_nr_residue_pairs_for_crossval(nr_crossval_pairs)
data.set_nr_residue_pairs_for_training(nr_training_pairs)
data.set_balance(balance)
data.set_contact_thr(contact_thr)
data.set_non_contact_thr(non_contact_thr)
data.set_seqsep(sequence_separation)
data.set_filter_gap_columns(filter_gap_columns)
data.set_max_gap_percentage(max_gap_percentage)
data.set_filter_pairs_by_Nij(filter_pairs_by_Nij)
data.set_filter_best_pairs(filter_best_pairs)
data.set_maxcontacts_per_protein(maxcontacts_per_protein)
data.set_maxnoncontacts_per_protein(maxnoncontacts_per_protein)
data.set_diversity_thr(diversity_thr)
data.initialise(protein_set=[])
data.print_dataset_info()
#initialize the parametes
parameters = Parameters(parameter_dir)
parameters.initialise_parameters(nr_components, sigma, prec_wrt_L, fixed_parameters)
# initialise parameters randomly around origin
likelihood = LikelihoodFct(plot_dir)
likelihood.set_debug_mode(debug_mode, python_parallel)
likelihood.set_nr_threads_per_protein(nr_threads)
likelihood.set_parameters(parameters)
likelihood.set_regularizer(reg_coeff_mu, reg_coeff_diagPrec)
likelihood.set_data(data)
# check numerical gradients
#python version
likelihood.numerical_gradient(check_weights=True, check_mu=True, check_prec=True, use_py=True)
#cpp version
likelihood.numerical_gradient(check_weights=True, check_mu=True, check_prec=True, use_py=False)
| [
"susann.vorberg@gmail.com"
] | susann.vorberg@gmail.com |
76ea48dc0afba38b6fa6d34656e0022f07777599 | a724baa4d3cbf1a8926c4ae8f0b33b249c526a24 | /src/utils/net.py | 9ccd04584b50b5c820ff9129bc8fc1d45a7e90d5 | [
"MIT"
] | permissive | ykumards/dl-stencil | 308534b4d2112a5c5129fc32342d615b46332dda | 08df28a43c19a9cb5f60a9d71823b5562f5ea78d | refs/heads/master | 2022-07-04T21:17:01.084975 | 2020-05-17T18:19:49 | 2020-05-17T18:19:49 | 241,896,069 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | """Functions for manipulating networks."""
import itertools
import math
import torch
import torch.nn as nn
from core.config import cfg
def init_weights(m):
"""Performs weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, "final_bn") and m.final_bn and cfg.BN.ZERO_INIT_FINAL_GAMMA
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
@torch.no_grad()
def compute_precise_bn_stats(model, loader):
"""Computes precise BN stats on training data."""
# Compute the number of minibatches to use
num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))
# Retrieve the BN layers
bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]
# Initialize stats storage
mus = [torch.zeros_like(bn.running_mean) for bn in bns]
sqs = [torch.zeros_like(bn.running_var) for bn in bns]
# Remember momentum values
moms = [bn.momentum for bn in bns]
# Disable momentum
for bn in bns:
bn.momentum = 1.0
# Accumulate the stats across the data samples
for batch in itertools.islice(loader, num_iter):
model(batch[0].to(cfg.DEVICE))
# Accumulate the stats for each BN layer
for i, bn in enumerate(bns):
m, v = bn.running_mean, bn.running_var
sqs[i] += (v + m * m) / num_iter
mus[i] += m / num_iter
# Set the stats and restore momentum values
for i, bn in enumerate(bns):
bn.running_var = sqs[i] - mus[i] * mus[i]
bn.running_mean = mus[i]
bn.momentum = moms[i]
def reset_bn_stats(model):
"""Resets running BN stats."""
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.reset_running_stats()
def drop_connect(x, drop_ratio):
"""Drop connect (adapted from DARTS)."""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def get_flat_weights(model):
"""Gets all model weights as a single flat vector."""
return torch.cat([p.data.view(-1, 1) for p in model.parameters()], 0)
def set_flat_weights(model, flat_weights):
"""Sets all model weights from a single flat vector."""
k = 0
for p in model.parameters():
n = p.data.numel()
p.data.copy_(flat_weights[k : (k + n)].view_as(p.data))
k += n
assert k == flat_weights.numel()
| [
"ykumar@nyu.edu"
] | ykumar@nyu.edu |
85847224dd923697aabf30f3dbcf78a54f67e6ee | f157332bb970b6a9dbbf60399ccb71596ede20a5 | /gpkit/gpkit/constraints/relax.py | 2639ffb47f3a7ce4153c00b4a345066a44301f21 | [
"MIT"
] | permissive | UCLA-StarAI/LearnFairNB | e57b854f8af70e4f3c92baba36b850bcc7c47381 | f922d885399955737bd9f16a104f700004cd3846 | refs/heads/master | 2020-06-02T10:34:22.040504 | 2019-07-02T01:15:16 | 2019-07-02T01:15:16 | 191,128,598 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,822 | py | """Models for assessing primal feasibility"""
from .set import ConstraintSet
from ..nomials import Variable, VectorVariable, parse_subs, NomialArray
from ..keydict import KeyDict
from .. import NamedVariables, MODELNUM_LOOKUP
from gpkit import SignomialsEnabled
class ConstraintsRelaxedEqually(ConstraintSet):
"""Relax constraints the same amount, as in Eqn. 10 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvar : Variable
The variable controlling the relaxation. A solved value of 1 means no
relaxation. Higher values indicate the amount by which all constraints
have been made easier: e.g., a value of 1.5 means all constraints were
50 percent easier in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvar = Variable("C")
with SignomialsEnabled():
for constraint in constraints.flat(constraintsets=False):
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvar))
ConstraintSet.__init__(self, [relconstraints,
self.relaxvar >= 1], substitutions)
class ConstraintsRelaxed(ConstraintSet):
"""Relax constraints, as in Eqn. 11 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constraint.
Higher values indicate the amount by which that constraint has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvars = VectorVariable(len(constraints), "C")
with SignomialsEnabled():
for i, constraint in enumerate(
constraints.flat(constraintsets=False)):
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvars[i]))
ConstraintSet.__init__(self, [relconstraints,
self.relaxvars >= 1], substitutions)
class ConstantsRelaxed(ConstraintSet):
"""Relax constants in a constraintset.
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
include_only : set
if declared, variable names must be on this list to be relaxed
exclude : set
if declared, variable names on this list will never be relaxed
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constant.
Higher values indicate the amount by which that constant has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem. Of course, this
can also be determined by looking at the constant's new value directly.
"""
def __init__(self, constraints, include_only=None, exclude=None): # pylint:disable=too-many-locals
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
exclude = frozenset(exclude) if exclude else frozenset()
include_only = frozenset(include_only) if include_only else frozenset()
substitutions = KeyDict(constraints.substitutions)
constants, _, linked = parse_subs(constraints.varkeys, substitutions)
constrained_varkeys = constraints.constrained_varkeys()
if linked:
kdc = KeyDict(constants)
combined = {k: f(kdc) for k, f in linked.items()
if k in constrained_varkeys}
combined.update({k: v for k, v in constants.items()
if k in constrained_varkeys})
else:
combined = constants
self.constants = KeyDict(combined)
relaxvars, relaxation_constraints = [], []
self.origvars = []
self.num = MODELNUM_LOOKUP["Relax"]
self._unrelaxmap = {}
MODELNUM_LOOKUP["Relax"] += 1
for key, value in combined.items():
if value == 0:
continue
elif include_only and key.name not in include_only:
continue
elif key.name in exclude:
continue
descr = key.descr.copy()
descr.pop("value", None)
descr.pop("veckey", None)
descr["models"] = descr.pop("models", [])+["Relax"]
descr["modelnums"] = descr.pop("modelnums", []) + [self.num]
relaxvardescr = descr.copy()
relaxvardescr["unitrepr"] = "-"
relaxvar = Variable(**relaxvardescr)
relaxvars.append(relaxvar)
del substitutions[key]
var = Variable(**key.descr)
self.origvars.append(var)
unrelaxeddescr = descr.copy()
unrelaxeddescr["name"] += "_{before}"
unrelaxed = Variable(**unrelaxeddescr)
self._unrelaxmap[unrelaxed.key] = key
substitutions[unrelaxed] = value
relaxation_constraints.append([relaxvar >= 1,
unrelaxed/relaxvar <= var,
var <= unrelaxed*relaxvar])
self.relaxvars = NomialArray(relaxvars)
ConstraintSet.__init__(self, [constraints, relaxation_constraints])
self.substitutions = substitutions
def process_result(self, result):
ConstraintSet.process_result(self, result)
csenss = result["sensitivities"]["constants"]
for const, origvar in self._unrelaxmap.items():
csenss[origvar] = csenss[const]
del csenss[const]
| [
"yjchoi0122@gmail.com"
] | yjchoi0122@gmail.com |
731c8a798fd64aede0e5496616a069e3fb2b473a | 0b8145658325831d964a67d86e1f282c5f604a7e | /riiid-test-answer-prediction/features_util.py | 14197b5ea0a5eb1a4cdadc62912b21f434869608 | [] | no_license | tarun-ssharma/ml | 16797bfc4a63c281184ac0b1b26b76c088de8fd1 | c547fdde432f5eaea631157b74abbbe35e521bdc | refs/heads/main | 2023-03-22T16:33:47.979164 | 2021-02-28T17:53:51 | 2021-02-28T17:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,315 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import os
import random
from copy import deepcopy
import _pickle as pickle
import gc
from multiprocess import Pool
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from tensorflow.keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import KBinsDiscretizer
from tensorflow.keras.optimizers import Adam, SGD
def save(file,name, folder = ""):
if folder != "":
outfile = open('./'+folder+'/'+name+'.pickle', 'wb')
else:
outfile = open(name+'.pickle', 'wb')
pickle.dump(file, outfile, protocol=4)
outfile.close
def load(name, folder = ""):
if folder != "":
outfile = open('./'+folder+'/'+name+'.pickle', 'rb')
else:
outfile = open(name+'.pickle', 'rb')
file = pickle.load(outfile)
outfile.close
return file
class Discretiser:
def __init__(self, nbins):
self.nbins = nbins-1
self.map_to = np.arange(self.nbins)/self.nbins
def fit(self, X):
## X is a one dimension np array
self.map_from = np.quantile(X, self.map_to)
def transform(self, X):
X1 = (np.interp(X, self.map_from, self.map_to, left=0, right=1, period=None) * self.nbins).astype(int)
return X1
from tf_transformers2 import *
from tensorflow.keras.layers import Input, Dense, Dropout, TimeDistributed, LSTM
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
tokenizer = load('tokenizer')
dico_utags, dico_gtags, dico_parts, dico_tags = load('dico_tags')
dico_question = load('dico_questions_mean')
dico_cluster = load('dico_cluster')
#timestamp_enc, elapsed_enc,lag_time_enc, qmean_enc = load('discrete_encoders')
#w2v = load('w2v_ids_embeddings')
time_windows = ['all']
time_windows += ['pos_first_' + str(i) for i in [10,20]]
time_windows += ['pos_last_' + str(i) for i in [10,20]]
time_windows += ['tim_first_' + str(i) for i in [1,6,24,168]]
time_windows += ['tim_last_' + str(i) for i in [1,6,24,168]]
same_contents = ['all', 'content', 'parts', 'cluster']
all_contents = [] #['parts_' + str(i) for i in range(1,8)]# + ['cluster_' + str(i) for i in range(20)]
content_types = ['question', 'lectures']
kpis = {
'question' : {
'correctness' : ['mean', 'std', 'slope'],
'timestamp_hour' : ['mean_diff', 'max_diff', 'mean_diff'],
'prior_question_had_explanation' : ['mean', 'std'],
# 'position' : ['mean_diff', 'max_diff', 'mean_diff'],
'question_mean' : ['mean', 'std', 'slope'],
'elapsed_time_seconds' : ['max', 'mean', 'slope'],
},
'lectures' : {
'timestamp_hour' : ['max_diff', 'min_diff', 'mean_diff'],
'position' : ['mean'],
}
}
def map_dict(ids, dico):
def replace_dico(x):
try:
return dico[x]
except:
return 0
return np.array(list(map(replace_dico,ids)))
def map_question_type(ids):
def mapping(x):
if x[0] == 'l':
return 0
elif x[0] == 'q':
return 1
else:
return -1
return np.array(list(map(mapping,ids)))
def apply_tokenizer(ids):
return np.array(tokenizer.texts_to_sequences(ids)).reshape(len(ids))
def safe_divide(a, b):
c = deepcopy(b)
c[c == 0] = 1e9
return a/c
def prepare(user_dico):
user = deepcopy(user_dico)
user['exercise_id_num'] = apply_tokenizer(user['exercise_id'])
user['exercise_type'] = map_question_type(user['exercise_id'])
user['parts'] = map_dict(user['exercise_id'], dico_parts)
user['cluster'] = map_dict(user['exercise_id'], dico_cluster)
# user['tags'] = map_dict(user['exercise_id'], dico_tags)
user['position'] = np.arange(len(user['exercise_id_num']))
user['timestamp_seconds'] = user['timestamp']/(1000)
user['timestamp_hour'] = user['timestamp']/(1000*3600)
user['timestamp_minutes'] = user['timestamp']/(1000*60)
user['timestamp_days'] = user['timestamp']/(1000*3600*24)
user['question_mean'] = map_dict(user['exercise_id'], dico_question)
user['elapsed_time_seconds'] = user['elapsed_time']/1000
return user
def get_current_past(user, i):
current = {
'exercise_id_num' : user['exercise_id_num'][i],
'exercise_type' : user['exercise_type'][i],
'position' : user['position'][i],
'parts' : user['parts'][i],
'cluster' : user['cluster'][i],
'timestamp_hour' : user['timestamp_hour'][i],
'timestamp_seconds' : user['timestamp_seconds'][i],
'question_mean' : user['question_mean'][i],
'correctness' : user['correctness'][i],
}
past = {elt : user[elt][:i] for elt in user}
return current, past
def get_time_windows_mask(current, past, strat):
## the array will be either time in hours, or time
if strat == 'all':
return past['position'] >= 0
else:
parsed_strat = strat.split('_')
t = parsed_strat[0]
s = parsed_strat[1]
n = float(parsed_strat[2])
if t == 'pos':
array = past['position']
c = current['position']
else:
array = past['timestamp_hour']
c = current['timestamp_hour']
if s == 'first':
m = 0
M = min(n, c)
else:
M = c
m = c - n
return (array >= m) & (array < M)
def apply_mask(past, mask):
return {elt : past[elt][mask] for elt in past}
def get_same_content_mask(current, past, strat):
if strat == 'content':
return past['exercise_id_num'] == current['exercise_id_num']
elif strat == 'part':
return past['parts'] == current['parts']
elif strat == 'all':
return past['position'] >=0
else:
return past['cluster'] == current['cluster']
def get_content_mask(past, strat):
strat = strat.split('_')
t = strat[0]
n = int(strat[1])
if t == 'parts':
return past['parts'] == n
else:
return past['cluster'] == n
def get_content_type_mask(past, strat):
if strat == 'question':
return past['exercise_type'] == 1
else:
return past['exercise_type'] == 0
def get_kpis(current, past, key, metric):
if key != 'correctness' and key != 'elapsed_time_seconds' and key != 'prior_question_had_explanation':
c = current[key]
array = past[key]
if len(array) == 0:
return -10
else:
if metric == 'sum':
return array.sum()
elif metric == 'mean':
return array.mean()
elif metric == 'min':
return array.min()
elif metric == 'max':
return array.max()
elif metric == 'std':
return array.std()
elif metric == 'max_diff':
return (c - array).max()
elif metric == 'min_diff':
return (c - array).min()
elif metric == 'mean_diff':
return (c - array).mean()
elif metric == 'slope' and len(array)>=2:
l = len(array)//2
return array[-l:].mean() - array[:l].mean()
else:
return -10
def hmean(a, b):
if a + b == 0:
return -10
else:
return 2*a*b/(a+b)
def get_features(current, past):
final = deepcopy(current)
for st_cont in same_contents:
m_c = get_same_content_mask(current, past, st_cont)
past_c = apply_mask(past, m_c)
for st_time in time_windows:
m_t = get_time_windows_mask(current, past_c, st_time)
past_t = apply_mask(past_c, m_t)
for st_type in content_types:
m_ty = get_content_type_mask(past_t, st_type)
past_ty = apply_mask(past_t, m_ty)
for field in kpis[st_type]:
for metric in kpis[st_type][field]:
final[st_cont+'-'+st_time+'-'+st_type+'-'+field+'-'+metric] = get_kpis(current, past_ty, field, metric)
for st_cont in all_contents:
m_c = get_content_mask(past, st_cont)
past_c = apply_mask(past, m_c)
for st_time in time_windows:
m_t = get_time_windows_mask(current, past_c, st_time)
past_t = apply_mask(past_c, m_t)
for st_type in content_types:
m_ty = get_content_type_mask(past_t, st_type)
past_ty = apply_mask(past_t, m_ty)
for field in kpis[st_type]:
for metric in kpis[st_type][field]:
final[st_cont+'-'+st_time+'-'+st_type+'-'+field+'-'+metric] = get_kpis(current, past_ty, field, metric)
qm = final['question_mean'] ## question Mean
um = final['all-all-question-correctness-mean'] # user mean
cm = final['content-all-question-correctness-mean'] # user mean on content
keys = list(final.keys())
for elt in keys:
if 'correctness' in elt:
if 'mean' in elt:
final[elt+'-question_hmean'] = hmean(qm, final[elt])
final[elt+'-user_hmean'] = hmean(um, final[elt])
final[elt+'-user_content_hmean'] = hmean(cm, final[elt])
return final | [
"gabriel.olympie@student.ecp.fr"
] | gabriel.olympie@student.ecp.fr |
5ea079a246153c51efaee0a478cb091326370e2d | 82c50f82f9e743b93dcb581d7ec95dd5e3a44380 | /browseapp/browse_main.py | 3dabd002cc8ff02326dfbfaac5cdff7b3bd4642e | [] | no_license | ivanerill/collecTF | 4f1c7981e616989d1044db9dd308377b3e3450c6 | 541f8f3852fdc740798f4f892d8ff1ef8b2225df | refs/heads/master | 2021-01-18T12:34:56.368336 | 2013-07-15T18:07:35 | 2013-07-15T18:07:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from browse_site import browse_by_site
from browse_curation import browse_curation
| [
"sefakilic@gmail.com"
] | sefakilic@gmail.com |
345cbf32fb3ab848ad9139be9f0d92ac5203427f | d6b6b534e057266fd4c1911281f1f969ad9abb2b | /TaskApp/migrations/0001_initial.py | 025243d7c1886e2569c45e08eec1631c660d700d | [] | no_license | lobanovskysvet/App | 35289f5a42246c28a7a302e1b4da462d52c7949c | 3db27cfd60fb1b8bfb468406bfb39082a6211bf1 | refs/heads/master | 2021-05-03T14:27:58.901205 | 2018-02-06T13:06:42 | 2018-02-06T13:06:42 | 120,456,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # Generated by Django 2.0.1 on 2018-01-27 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=70)),
('first', models.CharField(blank=True, max_length=25)),
('last', models.CharField(blank=True, max_length=25)),
],
),
]
| [
"lobanovskysvet@gmail.com"
] | lobanovskysvet@gmail.com |
a37919e7a146f13a19e835358e5621879447a92f | ac193a836d7ce8b4f27959013795868d4ddc189a | /myfirstapp/migrations/0001_initial.py | 8269cc62bfa7b5fe5567749882cc0b430466135b | [] | no_license | ArienM444/MyPO | 86e404b1e8fbe392b1e3a6399b3afd5fb8fd7789 | 051a2b941b9c3a61062723d3a06b03b491d629e9 | refs/heads/master | 2023-08-23T10:35:21.755245 | 2021-10-20T16:00:23 | 2021-10-20T16:00:23 | 419,395,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # Generated by Django 3.2.7 on 2021-10-20 15:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=11)),
('surname', models.CharField(max_length=11)),
('date_of_birth', models.IntegerField()),
('gender', models.CharField(choices=[('F', 'Female'), ('M', 'Male')], max_length=5)),
],
),
]
| [
"arenmamyan3644@gmail.com"
] | arenmamyan3644@gmail.com |
a25b93a52f067702d23fb15074ccf12559fff3d1 | bf977912c9fdd6a6b515fa14fc082a7312669305 | /Introdução à programação com Python/Cap 3/3.7.py | c565d395755f53c4ef45b58db6c63b15858ac665 | [] | no_license | thsimoes/Codes | a5f0d999fa3204d535d394520d5e1f9265f52156 | 5227d6f27cb513d0e5808bc7657f21a0920975e9 | refs/heads/main | 2023-09-06T03:54:07.754689 | 2021-11-24T18:33:38 | 2021-11-24T18:33:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | a = int(input("Qual é seu primeiro número? "))
b = int(input("Qual é seu segundo número? "))
c = a + b
print(f"A soma é {c}") | [
"simoesthsa@gmail.com"
] | simoesthsa@gmail.com |
16f2a5e757149cc4dd5347724b6410fe4a4aa806 | af0d9d91942a66f1db2962997471e9a693e39a9b | /CardImpls/Hinterlands/__init__.py | 393b2929c8ac278bbd217058f83295771d57869d | [] | no_license | boudrejp/domsim | 0c955a37a0960c872a4b74433edba2a8062fc904 | 98d4c687ed4e046d768e03987a0fcbb134458876 | refs/heads/master | 2021-05-16T01:29:36.242233 | 2017-02-27T18:41:14 | 2017-02-27T18:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | __author__ = 'breppert'
import JackOfAllTrades
import TradingPost
import BorderVillage
import SilkRoad
import Oasis
import Tunnel
import Cartographer
import Haggler | [
"breppert@appnexus.com"
] | breppert@appnexus.com |
0c4be6c2b828f8b331175d09a11526e92c08bb41 | b5687632d220f7228954c43b181aac376782c9c9 | /google/detailplace.py | 74efaf812d5ca34485700ff3bbfb147935ee1544 | [] | no_license | CasparChou/SightBus-RealtimeServer | 856e6f0048e8a7f8b7bdb1b82a54d0f8ec7088f1 | a871438b1ee0f357dfb1970fefc5e5b0960018fe | refs/heads/master | 2021-01-19T11:53:25.654903 | 2017-04-12T02:48:34 | 2017-04-12T02:48:34 | 88,000,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-.
# This is a program for fetching real bus stop position via google map api
import sys, time, datetime, requests
import re
from include.util import *
from math import ceil
from random import randint
time = str(int(time.time()))
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=25.009599,121.457963&radius=300&types=bus_station&sensor=true&key=AIzaSyDveM91dVZcXP3wwYLcZN-VwLsGJVMMM8g"
def intoDB(data):
if len(data) == 0:
return
db, cursor = db_connect()
sql = "INSERT IGNORE INTO GoogleRoutes( place_id, name ) VALUES "
db_commit(db, cursor, sql+ data);
db.close()
def demo(data):
for i, row in enumerate(data):
if i < 5:
for cel in row:
if cel == "SubRoutes" :
for a in row[cel]:
print a
#print cel.encode('utf-8') + ": "+ str(d[cel]).encode('utf-8')
print " "
def main():
db, cursor = db_connect()
cursor.execute("SELECT place_id, name from GoogleStops \
where place_id not in (select distinct place_id from GoogleRoutes) \
ORDER BY place_id ASC")
key= []
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
key+= ["&key=API_KEY_HAS_BEEN_REMOVED"]
privous = 0
keyExpect = []
errorTimes = 0
errorcount = 0
bar = progress(total=cursor.rowcount)
fetch = cursor.fetchall()
for i in range(len(fetch)):
placeid, name = fetch[i]
print str(name) + ": "+ str(placeid)
while True:
url = "https://maps.googleapis.com/maps/api/place/details/json?"
url+= "placeid=%s"%(placeid)
url+= "&language=zh-TW®ion=TW"
url+= key[ randint(0, len(key)-1) ]
print url
print
#data = readURL(url)
response = requests.get(url, proxies={"http":"http://USER_NAME_HAS_BEEN_REMOVED:PASSWORD_HAS_BEEN_REMOVED@authproxy.fju.edu.tw:3128"})
data = json.loads(response.text)
if errorTimes == len(key):
quit()
if "status" not in data:
quit()
if data["status"] != "OVER_QUERY_LIMIT":
break
#errorTimes += 1
#print "OVER_QUERY_LIMIT"
print data["result"]["url"]
#get = readContent(data["result"]["url"]+"&hl=zh_TW")
response = requests.get(data["result"]["url"]+"&hl=zh_TW", proxies={"https":"https://USER_NAME_HAS_BEEN_REMOVED:PASSWORD_HAS_BEEN_REMOVED@authproxy.fju.edu.tw:3128"})
get = response.text.encode("utf-8")
print get
if get.find("sorry") > 0:
quit()
results = ""
for route in re.findall('"(\w+)",\w+,"#ffffff"', get.decode("utf-8"), re.UNICODE):
print route.encode("utf-8")
results += ( ", " if len( results ) > 0 else " ")
results += " ('%s', '%s') "%(placeid, route)
intoDB(results)
bar.getStep()
print "Curr:::" + str(i)
print "Curr:::" + str(i)
print "Curr:::" + str(i)
print "Curr:::" + str(i)
bar.next()
#demo()
if __name__ == "__main__":
main()
| [
"starcaspar@gmail.com"
] | starcaspar@gmail.com |
1a95d366947058c89f9419baffce0086c13280a6 | 36978086cf5f34e16ceac7c2649b49ccb4c5ac90 | /config/munin/mongodb_replset_lag | 0c2f3ed4bdbabde170d68abc6b6e9b74d14b19de | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | aragilar/NewsBlur | 04e754093cd52bc2d9957ea767747d6d604dfbba | 64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9 | refs/heads/master | 2021-08-28T17:39:50.734396 | 2013-06-06T01:52:20 | 2013-06-06T01:52:37 | 10,520,281 | 0 | 0 | MIT | 2021-08-13T05:35:33 | 2013-06-06T06:26:24 | Objective-C | UTF-8 | Python | false | false | 1,790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from munin.mongodb import MuninMongoDBPlugin
PRIMARY_STATE = 1
SECONDARY_STATE = 2
class MongoReplicaSetLag(MuninMongoDBPlugin):
vlabel = "seconds"
title = "MongoDB Replica Set Lag"
fields = [("optimeLag", {'label': "Oldest secondary lag"}), ("oplogLength", {"label": "Primary oplog length" })]
def _get_oplog_length(self):
oplog = self.connection['local'].oplog.rs
last_op = oplog.find({}, {'ts': 1}).sort([('$natural', -1)]).limit(1)[0]['ts'].time
first_op = oplog.find({}, {'ts': 1}).sort([('$natural', 1)]).limit(1)[0]['ts'].time
oplog_length = last_op - first_op
return oplog_length
def _get_max_replication_lag(self):
status = self.connection.admin.command('replSetGetStatus')
members = status['members']
primary_optime = None
oldest_secondary_optime = None
for member in members:
member_state = member['state']
optime = member['optime']
if member_state == PRIMARY_STATE:
primary_optime = optime.time
elif member_state == SECONDARY_STATE:
if not oldest_secondary_optime or optime.time < oldest_secondary_optime.time:
oldest_secondary_optime = optime.time
if not primary_optime or not oldest_secondary_optime:
raise Exception("Replica set is not healthy")
return primary_optime - oldest_secondary_optime
def execute(self):
oplog_length = self._get_oplog_length()
replication_lag = self._get_max_replication_lag()
return {
"optimeLag": replication_lag,
"oplogLength": oplog_length
}
if __name__ == "__main__":
MongoReplicaSetLag().run()
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com | |
99906cf43c54aaafdbb5841dea05182db0a4e2d1 | a362695a27c85d4091f8adc707614d3c67ce3e85 | /story/migrations/0006_event_details.py | d048c54faac4ee1741b58e77549c6cafea358da7 | [] | no_license | altheasmith/thailand2010 | 97e3804903f8d3cdf41720f0c2e5244f6446b6b1 | e0a05fe3683cd65a4cffcf3006bd741dc72d5d38 | refs/heads/master | 2022-11-29T23:38:53.057048 | 2020-08-11T16:01:41 | 2020-08-11T16:01:41 | 285,666,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 3.1 on 2020-08-06 22:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('story', '0005_institutiondetail'),
]
operations = [
migrations.AddField(
model_name='event',
name='details',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"althea.o.smith@gmail.com"
] | althea.o.smith@gmail.com |
2c5b7a5a2b3360627bc165c89f46250a2e5ed9d5 | 9146155242cd65d96de3734c2078244af68c2da0 | /EstruturaDeDecisao25.py | c9a41656eed0c77438d03173ebc39a0a5ed8610f | [] | no_license | fabricio24530/ListaPythonBrasil | 364961a5c7970831017a8aa4846c746724c66d84 | 5062f830380ad459ce39b643bacf4104fd8e4327 | refs/heads/master | 2020-12-15T13:21:32.657388 | 2020-03-22T13:10:27 | 2020-03-22T13:10:27 | 235,116,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | '''Faça um programa que faça 5 perguntas para uma pessoa sobre um crime. As perguntas são:
"Telefonou para a vítima?"
"Esteve no local do crime?"
"Mora perto da vítima?"
"Devia para a vítima?"
"Já trabalhou com a vítima?" O programa deve no final emitir uma classificação sobre a participação da pessoa no crime.
Se a pessoa responder positivamente a 2 questões ela deve ser classificada como "Suspeita", entre 3 e 4 como "Cúmplice"
e 5 como "Assassino". Caso contrário, ele será classificado como "Inocente".'''
lista = list()
p1 = input('Telefonou para a vítima? [S] - Sim [N] - Não ').strip().lower()
lista.append(p1)
p2 = input('Esteve no local do crime? [S] - Sim [N] - Não ').strip().lower()
lista.append(p2)
p3 = input('Mora perto da vítima? [S] - Sim [N] - Não ').strip().lower()
lista.append(p3)
p4 = input('Devia para a vítima? [S] - Sim [N] - Não ').strip().lower()
lista.append(p4)
p5 = input('Já trabalhou com a vítima? [S] - Sim [N] - Não ').strip().lower()
lista.append(p5)
print()
print()
total_sim = lista.count('s')
if total_sim == 2:
print('Supeita!')
elif total_sim == 4 or total_sim == 3:
print('Cumplice!')
elif total_sim == 5:
print('Culpada!')
else:
print('Inocente!')
| [
"noreply@github.com"
] | fabricio24530.noreply@github.com |
2050ebc33bd9b78f63a5dc635ed83c17658e2d4a | b4983a1245c141acca097f12b726720c480f903a | /luther_09-10/cs151/tree.py | 0014c2015309b8fcc58b79758a1514d80578306a | [] | no_license | gerisc01/school_work | eb890ef2d836f67e371358f7dcf98812109a71de | 7488632f26b63d9f950157a97f5cf6fb72b1b255 | refs/heads/master | 2021-01-10T20:21:26.165662 | 2013-06-27T18:41:57 | 2013-06-27T18:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | from cTurtle import *
import random
def drawTree(t, length):
if length >= 1:
t.forward(length)
numbranches = random.randint(3,5)
if numbranches == 3:
n = random.randint(25,35)
t.right(n)
t.left(n-30)
drawTree(t,length/random.randrange(2,5))
m = random.randint(25,35)
t.left(m)
t.right(m-30)
drawTree(t,length/random.randrange(2,5))
p = random.randint(25,35)
t.left(p)
t.right(p-30)
drawTree(t,length/random.randrange(2,5))
q = random.randint(25,35)
t.right(q)
t.left(q-30)
t.backward(length)
if numbranches == 4:
n = random.randint(13,23)
t.right(n)
t.left(n-18)
drawTree(t,length/random.randrange(2,5))
m = random.randint(13,23)
t.left(m)
t.right(m-18)
drawTree(t,length/random.randrange(2,5))
p = random.randint(13,23)
t.left(p)
t.right(p-18)
drawTree(t,length/random.randrange(2,5))
q = random.randint(13,23)
t.left(q)
t.right(q-18)
r = random.randint(13,23)
t.right(r)
t.left(r-18)
t.backward(length)
def main():
myt = Turtle()
#position at bottom center, facing up
myt.up()
myt.right(90)
myt.forward(200)
myt.left(180)
myt.down()
myt.tracer(10)
drawTree(myt, 200)
myt.exitOnClick()
main()
| [
"gerisc01@luther.edu"
] | gerisc01@luther.edu |
9cb1272f31e583f835c09420936429f9ccd9acac | c1938ec2593fd50a6cb19700ffe812a8d827d3eb | /cleanedup/neurips_experiments/experiment_flat.py | c3ea4237aab1d2aac98dc0aa739afff9152b4dc9 | [] | no_license | Kohsin/power_law | 577922130bbf739a47dca2b2d8fcce66dd59c9f0 | 4032f67b4205f9d1d7be0c1f05dacb25a0a43efe | refs/heads/master | 2023-03-21T04:16:02.434196 | 2020-12-05T17:46:49 | 2020-12-05T17:46:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | import sys
from tqdm import tqdm
import fire
from itertools import product
sys.path.append('..')
from ModelDefs.models import ModelFactory
from DataDefs.data import get_data
import torch
import numpy as np
import copy
import os
from joblib import Parallel, delayed
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def bad_boy(tau=10, activation='tanh', cuda=False, num_epochs=100, vanilla=False, dataset='CIFAR10', arch='cnn_flat',
realizations=3):
lr = 1e-4
if arch == 'flat':
dims = [(28 * 28, 1_000), (1_000, 1_000), (1_000, 1_000), (1_000, 10)]
batch_size = 1500
dataset = 'MNIST'
elif arch == 'cnn_flat':
lr = 1e-4
if dataset == 'MNIST':
dims = [2, (1, 16), (16, 32), (800, 1000), (1000, 10)]
batch_size = 6000
else:
dims = [2, (3, 16), (16, 32), (1152, 1000), (1000, 10)]
batch_size = 6000
else:
print("Doesnt exist!!")
train_loader, _, full_loader = get_data(dataset=dataset, batch_size=batch_size, _seed=0,
validate=False, data_dir='data/')
if vanilla:
train_loader, _, full_loader = get_data(dataset=dataset, batch_size=batch_size, _seed=0,
validate=False, data_dir='data/')
kwargs = {"dims": dims,
"activation": activation,
"architecture": arch,
"trainer": "vanilla",
"regularizer": "no",
'alpha_jacob': 1e-4,
'bn': False,
'alpha_spectra': 1,
'optimizer': 'adam',
'lr': lr,
'weight_decay': 0,
'cuda': cuda,
'eps': 0.3,
'only_last': True,
'gradSteps': 40,
'noRestarts': 1,
'lr_pgd': 1e-2,
'training_type': 'FGSM',
'slope': [1.00],
'eig_start': tau,
'demean': True}
models = [ModelFactory(**kwargs) for j in range(realizations)]
for j in range(realizations):
for epoch in tqdm(range(num_epochs)):
models[j].train_epoch(train_loader)
model_params = []
for idx in range(len(models)):
model_params.append((kwargs, models[idx].state_dict()))
torch.save(model_params,
'experiment_2/' + dataset + '/flat_vanilla_arch=' + arch + '_activation=' + activation + '_epochs=' + str(num_epochs))
else:
regularizers_strengths = [5., 2., 1.]
# In[]
"Load in data loader"
X_full, _ = next(iter(full_loader)) # load in full training set for eigenvectors
# In[]
kwargs = {"dims": dims,
"activation": activation,
"architecture": arch,
"trainer": "vanilla",
"regularizer": "eig",
'alpha_jacob': 1e-4,
'bn': False,
'alpha_spectra': 1.0,
'optimizer': 'adam',
'lr': lr,
'weight_decay': 0,
'cuda': cuda,
'eps': 0.3,
'only_last': True,
'gradSteps': 40,
'noRestarts': 1,
'lr_pgd': 1e-2,
'training_type': 'FGSM',
'slope': 1.00,
'eig_start': tau,
'demean': True}
counter = 0
for reg_strength in regularizers_strengths:
kwargs['alpha_spectra'] = reg_strength
models = [ModelFactory(**kwargs) for j in range(realizations)]
print('no vibes')
for j in range(realizations):
for epoch in tqdm(range(num_epochs)):
models[j].train_epoch(train_loader, X_full)
model_params = []
for idx in range(len(models)):
model_params.append((kwargs, models[idx].state_dict()))
torch.save(model_params, 'experiment_2/' + dataset + '/flat_tau=' + str(tau) + '_arch=' + arch + '_activation=' + activation + '_epochs=' + str(
num_epochs) + '_alpha=' + str(1) + '_beta=' + str(reg_strength))
counter += 1
print(str(len(regularizers_strengths) - counter) + " combos left")
if __name__ == '__main__':
fire.Fire(bad_boy)
| [
"piotr.sokol@stonybrook.edu"
] | piotr.sokol@stonybrook.edu |
602a2e0ad279811b45e061bfc044f21c53fd8a57 | 690c502e514842e346b1f0a272072c2a659c1b9d | /quiz/admin.py | 21a39ab93fe017d6e814fd25a600da6eba30635e | [] | no_license | hsrambo07/Django_QuizApp | 5f79137ab06c1f3a7009e3a40498eb202f135d51 | d50f89061fa7193769169a679600973a04f17815 | refs/heads/main | 2023-01-09T10:56:53.754263 | 2020-11-17T16:59:59 | 2020-11-17T16:59:59 | 313,567,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Quiz)
admin.site.register(Question)
admin.site.register(Choice)
| [
"hs2002singhal@gmail.com"
] | hs2002singhal@gmail.com |
4680394e14442b9e016dc3834172a4f40eede73b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/239/32114/submittedfiles/swamee.py | aac7bfcabaa9fa811d2304d01539530e593c0d46 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input("Digite aqui o valor de f: "))
l = float(input("Digite aqui o valor de l: "))
q = float(input("Digite aqui o valor de q: "))
DH = float(input("Digite aqui o valor de Delta H: "))
v = float(input("Digite aqui o valor de v: "))
g = 9.81
e = 0.000002
pi = 3.14
D = sqrt(8*f*l*q**q)/(pi*pi*g*DH)
print("D=%.4f"%D)
Rey = (4*q)/(pi*D*v)
print("Rey=%.4f"%Rey)
k = 0.25/(math.log10((e)/(3.7*D))+(5.74)/((Rey**0.9))**2)
print("k=%.4f"%k)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
dbf52a834f34fa4f3c3318bcab831ea4e23f15a0 | 2748d523c4ced916b61e8f2a0ebd6c7237705f69 | /core/forms.py | 8d56e99d1aec9c278839b44ba66ef4cdee9daa37 | [] | no_license | Titowisk/meubaz | 52d5101bc107081c7175f27bb538efc6fecf5b24 | 1af586195123ffd13818695cff8cc286018a1c7b | refs/heads/master | 2021-08-23T16:47:54.960522 | 2017-12-05T19:02:09 | 2017-12-05T19:02:09 | 106,558,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from django import forms
from django.conf import settings
from django.core.mail import send_mail
class ContactForm(forms.Form):
name = forms.CharField(label="Nome", max_length=100)
email = forms.EmailField(label="E-mail")
message = forms.CharField(label="Mensagem", widget=forms.Textarea)
def send_mail(self):
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
message = "Nome: {0}\nEmail: {1}\nMensagem: {2}".format(name, email, message)
send_mail(
subject='Contato do MeuBaz',
message=message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[settings.DEFAULT_FROM_EMAIL]
)
"""
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['message'].widget.attrs['class'] = 'form-control'
self.fields['message'].widget.attrs['rows'] = '4'
"""
# self.fields['name'] acessa o campo
#<input type="text" name="name" maxlength="100" required="" id="id_name">
# .widget.attrs[''] = '' pode criar argumentos dentro do campo | [
"rabelo51@gmail.com"
] | rabelo51@gmail.com |
0e27c1cde1975d5d82729fd78224ec5fb7625502 | 8f36a6aacb7ee9cf0bd8726941903da69350b76a | /pipeline_source/cifarlibs/hypertune/testhypertune.py | 410d8da459a8c0ec1d516cda610c573134d5056c | [] | no_license | tanle2694/cifar10_kubeflow | 02466f9219116d91100308ed5443fbe6ca578eb7 | 97e588470938eb3622bb019a2dbe1d27a02b4be1 | refs/heads/master | 2023-06-03T14:07:37.383016 | 2021-06-21T04:41:22 | 2021-06-21T04:41:22 | 370,740,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | from cifarlibs.hypertune.hypertunner import HyperTunner
objective_spec = {
"type": "minimize",
"goal": 0.001,
"objective_metric_name": "loss",
"additional_metric_names": ["accuracy"]
}
parameters_spec = [
{
"name": "lr",
"parameter_type": "double",
"feasible_space": { "min": "0.01",
"max": "0.06",
"list": None,
"step": None},
"description": "learning rate"
},
{
"name": "momentum",
"parameter_type": "double",
"feasible_space": {"min": "0.5",
"max": "0.9",
"list": None,
"step": None},
"description": "momentum"
},
]
container = {
"name": "pytorch",
"image": "pytorch-mnist:latest",
"command": ["python", "/opt/pytorch-mnist/mnist.py", "--epochs=1",
"--lr=${trialParameters.lr}",
"--momentum=${trialParameters.momentum}"],
"envs": [{
"name": "PYTHONPATH",
"value": "/opt/pytorch-mnist/"
}]
}
pvcs = [
{
"claimName": "cifar-git-src" ,
"mountPath": "/tmp/gitcifarsrc"
}
]
katib_tunner = HyperTunner(
namespace="kubeflow-user-example-com",
experiment_name="testkatib",
algorithm_name="random",
objective_spec=objective_spec,
parameters_spec=parameters_spec,
container=container,
pvcs=pvcs,
max_trial_count=3,
max_failed_trial_count=22,
parallel_trial_count=1
)
# print(katib_tunner._experiment)
katib_tunner.start_experiments()
katib_tunner.wait_for_completion()
print(katib_tunner.get_optimal_hyperparameters()) | [
"tan.le2@ntq-solution.com.vn"
] | tan.le2@ntq-solution.com.vn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.