blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aec5cebc7c02dfa2d6a9bd26431eef3f3eb82c51 | 9870d2c6880fd3fa558c46e3bf160aae20c74157 | /permuteUnique.py | f104cb1cf5024240cfeb1b15ac8dd83327f3196d | [] | no_license | Yigang0622/LeetCode | e7f7f115c6e730c486296ef2f1a3dd1a3fdca526 | c873cd1ee70a2bdb54571bdd50733db9f6475e9e | refs/heads/master | 2023-03-03T14:32:25.498633 | 2021-02-15T13:59:00 | 2021-02-15T13:59:00 | 281,423,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # LeetCode
# permuteUnique
# Created by Yigang Zhou on 2020/9/18.
# Copyright © 2020 Yigang Zhou. All rights reserved.
# 47. 全排列 II
# https://leetcode-cn.com/problems/permutations-ii/
from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
ans = []
visited = [0] * len(nums)
nums.sort()
self.dfs([], visited,0,nums,ans)
return ans
def dfs(self, current, visited, i, nums, ans):
if i == len(nums):
ans.append(current[:])
return
for j, each in enumerate(nums):
if visited[j] == 1 or (j > 0 and nums[j] == nums[j - 1] and visited[j - 1] == 0):
continue
visited[j] = 1
current.append(each)
self.dfs(current, visited, i+1, nums, ans)
visited[j] = 0
current.pop()
nums = [1,1,2]
r = Solution().permuteUnique(nums)
print(r) | [
"zhou@zygmail.com"
] | zhou@zygmail.com |
b74676e45149ad9bbe55f3f25d2e2048b5786119 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/AHU_Heating_Demand_Setpoint.py | ec0c93fb165063c910beab5029a9309ddd5da42c | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Heating_Demand_Setpoint import Heating_Demand_Setpoint
class AHU_Heating_Demand_Setpoint(Heating_Demand_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').AHU_Heating_Demand_Setpoint
| [
"Andre.Ponnouradjane@non.schneider-electric.com"
] | Andre.Ponnouradjane@non.schneider-electric.com |
ef044ae299db9f398d052e194ee8cd25f4f925be | 71cc41abe5aa896e4e09faa004b7b3e67004946e | /programming-with-guis/ex-02-06b.py | 94fbf509813f6223b20c8b516912a5e0187f7241 | [
"Apache-2.0"
] | permissive | psiborg/FutureLearn | 0f1872c0e194dab2b3e311d8122838816196c9f0 | f6ca371d740a1054d3a3fcbb3502b44fe9f73f59 | refs/heads/main | 2023-04-25T02:34:29.236519 | 2021-05-06T04:27:52 | 2021-05-06T04:27:52 | 352,233,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #!/usr/bin/env python3
# programming-with-guis
# Ex. 2.6b
from guizero import App, Text
# this time we have not set the layout to auto as it is default
app = App(title="align")
# create a series of widgets each with a different alignment
top_text_1 = Text(app, text="top_text_1", align="top", bg="#330000", color="#FFFFFF", width="fill", height="fill")
top_text_2 = Text(app, text="top_text_2", align="top", bg="#990000", color="#FFFFFF")
top_text_3 = Text(app, text="top_text_3", align="top", bg="#FF0000", color="#FFFFFF")
bottom_text_1 = Text(app, text="bottom_text_1", align="bottom", bg="#003333", color="#FFFFFF", width="fill")
bottom_text_2 = Text(app, text="bottom_text_2", align="bottom", bg="#009999", color="#FFFFFF")
bottom_text_3 = Text(app, text="bottom_text_3", align="bottom", bg="#00FFFF", color="#000000")
left_text_1 = Text(app, text="left_text_1", align="left", bg="#003300", color="#FFFFFF")
left_text_2 = Text(app, text="left_text_2", align="left", bg="#009900", color="#FFFFFF", height=5)
left_text_3 = Text(app, text="left_text_3", align="left", bg="#00FF00", color="#000000", width="fill", height=10)
right_text_1 = Text(app, text="right_text_1", align="right", bg="#000033", color="#FFFFFF", height="fill")
right_text_2 = Text(app, text="right_text_2", align="right", bg="#000099", color="#FFFFFF")
right_text_3 = Text(app, text="right_text_3", align="right", bg="#0000FF", color="#FFFFFF")
app.display()
| [
"jim.ing@gmail.com"
] | jim.ing@gmail.com |
a1ac6616d7ceb4489edaba41e6502f59f0a9cb80 | 4ab99b7fb72f95f76a11dbeaf179120ab57de53f | /RN2.py | c25ff80d02f7c26dbe8d3626bee05b27881fca0f | [] | no_license | sepideh06/ProcessingDataAnalysisCommunicationBehavior | 04c7f01ae450f11d97c3fbe67c19bfa895924e64 | 285464ec13f1bf14f72eda76b5d6a36b90f5349f | refs/heads/master | 2021-07-09T15:01:37.732509 | 2020-08-01T14:27:11 | 2020-08-01T14:27:11 | 177,779,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
import tensorflow as tf
# Just disables the warning, doesn't enable AVX/FMA
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
apple_training_processed = pd.read_csv('UpStateFunction(1).csv', usecols=[1])
scaler = MinMaxScaler(feature_range = (0, 1))
apple_training_scaled = scaler.fit_transform(apple_training_processed)
features_set = []
labels = []
for i in range(10, 5022):
features_set.append(apple_training_scaled[i-10:i, 0])
labels.append(apple_training_scaled[i, 0])
features_set, labels = np.array(features_set), np.array(labels)
features_set = np.reshape(features_set, (features_set.shape[0], features_set.shape[1], 1))
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(features_set.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units = 1))
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
model.fit(features_set, labels, epochs = 20, batch_size = 32)
apple_testing_processed = pd.read_csv('upstatefunction_testing.csv',usecols=[1])
apple_total = pd.concat((apple_training_processed['Up-State-Value'], apple_testing_processed['Up-State-Value']), axis=0)
test_inputs = apple_total[len(apple_total) - len(apple_testing_processed) - 10:].values
test_inputs = test_inputs.reshape(-1,1)
test_inputs = scaler.transform(test_inputs)
test_features = []
for i in range(10, 2476):
test_features.append(test_inputs[i-10:i, 0])
test_features = np.array(test_features)
test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
predictions = model.predict(test_features)
predictions = scaler.inverse_transform(predictions)
plt.figure(figsize=(10,6))
plt.plot(apple_testing_processed, color='blue', label='Actual Up-State Function values')
plt.plot(predictions , color='red', label='Predicted Up-State Function values')
plt.title('Up-State Function Prediction')
plt.xlabel('TimeInterval')
plt.ylabel('Up-State Function values')
plt.legend()
plt.show()
| [
"noreply@github.com"
] | sepideh06.noreply@github.com |
24e062b0a347efb144c793470090a11891eac1ea | cae6097057099a88324f5ecaf2417ff85cf46ad4 | /CodigoML/Regresion/regresion.py | 75c3ad8958b0b71fa55384a58b90135291d6e90c | [] | no_license | mayralina/python-code | ec5adaf8d0638558ad249efdf1606b1eda530997 | 244c0714878ab6cdeca5934491550fa543470139 | refs/heads/master | 2020-06-18T22:08:06.764226 | 2019-07-11T21:43:36 | 2019-07-11T21:43:36 | 196,468,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #import
import matplotlib.pyplot as plt #Para hacer graficas
import numpy as np #Para preparae mis dat
#importing from scikit-learn (librerias de Machine Learning)
from sklearn import datasets, linear_model
#Load dataset
house_price = [245,321,279,308,199,219,405,324,319,255]
size = [1400,1600,1700,1875,1100,1550,2350,2450,1425,1700]
print(len(house_price))
print(len(size))
#Reshape the input to your regression
size2 = np.array(size).reshape((-1,1))
print(size2)
#by using fit module in linear regresion, user can fit the data rapido y seguido
regr = linear_model.LinearRegression()
regr.fit(size2, house_price)
print('Coeficients: {0}'.format(regr.coef_))
print('intercept: {0}'.format(regr.intercept_))
# #Formula obtained for the trained model
def graph(formula, x_range):
x = np.array(x_range)
y = eval(formula)
plt.plot(x,y)
#Plotting the prediction line
graph('regr.coef_*x + regr.intercept_', range(500,3000))
plt.scatter(size,house_price, color='black')
plt.ylabel('house price')
plt.xlabel('size of house')
plt.savefig("mayra.pdf")
| [
"root@nn2.innovalabsnet.net"
] | root@nn2.innovalabsnet.net |
3d0acf4566d6c149de4d7e08be4584349f50dc07 | 9f98a10049e2c1417ca08d8b03f8bdbd7f228fa4 | /py_test/algorithm/等差数列.py | fcfa382b780475ce40abcddc5cf4af29d4eee511 | [] | no_license | gallofb/Python_linux | 73870ac6e3cd1aa66c89fdacd1fc6fefb27c69cf | ec330d2bbbb7c05367ed55f8801614ae48444d7c | refs/heads/master | 2020-04-13T23:52:10.724678 | 2019-05-09T13:13:25 | 2019-05-09T13:13:25 | 163,517,895 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | # 题目描述
# 如果一个数列S满足对于所有的合法的i,都有S[i + 1] = S[i] + d, 这里的d也可以是负数和零,我们就称数列S为等差数列。
# 小易现在有一个长度为n的数列x,小易想把x变为一个等差数列。小易允许在数列上做交换任意两个位置的数值的操作,并且交换操作允许交换多次。但是有些数列通过交换还是不能变成等差数列,小易需要判别一个数列是否能通过交换操作变成等差数列
# 输入描述:
#
# 输入包括两行,第一行包含整数n(2 ≤ n ≤ 50),即数列的长度。
# 第二行n个元素x[i](0 ≤ x[i] ≤ 1000),即数列中的每个整数。
#
# 输出描述:
#
# 如果可以变成等差数列输出"Possible",否则输出"Impossible"。
#
# 示例1
# 输入
#
# 3
# 3 1 2
#
# 输出
#
# Possible
#
| [
"854591086@qq.com"
] | 854591086@qq.com |
742775c2a1a92a8b26038369364d0226d91ec913 | 23579a08f36a6c5e627e489458f85edd44400710 | /mosaic_constants.py | 63034fcbd7447f052002019053bfe0f6b82c6433 | [] | no_license | just-jason/qd_mosaic | 42aeac053a878972a7fa6f1a3075e3224fd77dd3 | 27e6a2d05c03467c5a9356579a21198f530c402e | refs/heads/master | 2022-12-05T09:44:27.769635 | 2020-08-25T19:14:52 | 2020-08-25T19:14:52 | 290,292,356 | 0 | 0 | null | 2020-08-25T18:27:06 | 2020-08-25T18:27:06 | null | UTF-8 | Python | false | false | 232 | py | render_path = "renders"
json_path = "jdata"
set_path = "thumbs"
thumbs_path = "thumbs"
bdata_path = "bdata"
# this is intended to simulate a medium pen stroke if the 256x256 thumbnail represents a quarter inch
thumb_pen_width = 15
| [
"jim.bumgardner@disney.com"
] | jim.bumgardner@disney.com |
bba14463d406a243a6adeccd39f8417cc67a5a1f | 5e8c431bd8cf9717e62c3205f59520c21b872026 | /model/attention.py | 5ea1f43c9b575640d4bc77036e0e390a23a70c18 | [
"MIT"
] | permissive | helang818/MA-Net | 3543fc13559b797200b7da49d265c322c478f274 | c90cfd2e05046945a1aefe1c9c998202c298c224 | refs/heads/main | 2023-08-24T04:59:41.359213 | 2021-11-08T02:26:25 | 2021-11-08T02:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels))
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool )
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out)
return x * scale
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.SpatialGate = SpatialGate()
def forward(self, x):
x_out = self.ChannelGate(x)
x_out = self.SpatialGate(x_out)
return x_out
| [
"noreply@github.com"
] | helang818.noreply@github.com |
76503fd44f477611362807b50923b0b3951640ba | 277d0a33645f2f821216d15f1b4b1be6aa6b5fa7 | /oneflow/python/test/ops/test_TestSourceMultiGpuFixedOutNum.py | 9432f0170af3b51cfae13dc814bfc53f941a8e8e | [
"Apache-2.0"
] | permissive | lenran/oneflow | 504a255af0ffae8e24f6515311859524650e110c | 7c34e35ad6505bc12b48ce0fafa20322d280b2b1 | refs/heads/master | 2023-01-24T17:52:33.857444 | 2020-11-20T08:50:07 | 2020-11-20T08:50:07 | 283,967,294 | 0 | 0 | Apache-2.0 | 2020-11-20T08:49:39 | 2020-07-31T07:05:59 | null | UTF-8 | Python | false | false | 1,811 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
def my_test_source(name, out_num):
return (
flow.user_op_builder(name)
.Op("TestSourceMultiGpuFixedOutNum")
.Output("out")
.Attr("out_num", out_num)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@flow.unittest.skip_unless_1n1d()
class Test_TestSourceMultiGpuFixedOutNum(flow.unittest.TestCase):
def test_testsource_2_gpu(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def TestSourceJob():
with flow.scope.placement("cpu", "0:0-1"):
ret = my_test_source("my_cc_test_source_op", 10)
# print("cons_test_source_batch_axis", ret.batch_axis)
test_case.assertTrue(ret.batch_axis is not None and ret.batch_axis == 0)
return ret
y = TestSourceJob().get().numpy()
test_case.assertTrue(
np.array_equal(y, np.append(np.arange(5.0), np.arange(5.0)))
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | lenran.noreply@github.com |
3a1d924c60e44b082e8e80f48a9fc1f1261a5e56 | fd099ad76c29e5dd740d920127ef11bd16a45c97 | /server/jobeditor/jsonencoder/__init__.py | 5cd6b57b38365695c45a876b4ef99899f2b400b9 | [
"MIT"
] | permissive | zhangyanwei/job-chain-editor | 5de614ca49640c9fc17d9e74618c313c3f625d73 | 56a2b2b10718abb3dc7e4e313a0ec11c10f5a82a | refs/heads/master | 2023-01-13T02:19:35.264405 | 2019-11-13T16:02:23 | 2019-11-13T16:02:23 | 221,485,814 | 0 | 0 | MIT | 2023-01-04T13:10:34 | 2019-11-13T15:05:38 | Vue | UTF-8 | Python | false | false | 892 | py | import importlib
import os
from flask.json import JSONEncoder
class AutoJSONEncoder(JSONEncoder):
registered_encoders = dict()
def default(self, obj):
try:
encoder = AutoJSONEncoder.registered_encoders.get(type(obj), None)
if encoder:
return encoder(obj)
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
def register(app):
app.json_encoder = AutoJSONEncoder
for file in os.listdir(os.path.dirname(__file__)):
if not file.startswith('__'):
mod_name = file[:-3] # strip .py at the end
m = importlib.import_module('.' + mod_name, package=__package__)
r = getattr(m, 'register', None)
if r:
r(AutoJSONEncoder.registered_encoders)
| [
"verdigris@163.com"
] | verdigris@163.com |
3eee96b13023b5a6cebfa82b4a4612ea73ac4b80 | bf304def5b7993bc940cdd79321557401934747e | /backend/accounts/migrations/0012_remove_user_name.py | f6dd64ddf6ea00107d384251f2cd42d5c9f74345 | [] | no_license | simaogoncalves10/projeto_vce | c325c412ade1809e9f29600c7de294149e131eee | dfdbe8d2e2398052c3b6f64a428a7ed0fa424275 | refs/heads/master | 2023-04-16T07:04:04.103378 | 2021-04-11T13:32:02 | 2021-04-11T13:32:02 | 352,689,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | # Generated by Django 3.1.7 on 2021-04-05 21:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_user_name'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='name',
),
]
| [
"49321479+HFaria10@users.noreply.github.com"
] | 49321479+HFaria10@users.noreply.github.com |
a2ac6bbdfff0323c8f4fe83e2d2d49e05e23d939 | 06f0e1477b41c543f9a37e46582038d9f9a78c35 | /sysin.py | 8178a3570331feadf1c925cbd3c359a3f9f93f54 | [] | no_license | imtiyaz86/firstrepo | ab0cb93c78d65deef30951758e8679334d0c00d3 | 5a42a129dc3e556815ce27d29dd17ebd1e65d1ce | refs/heads/master | 2020-05-09T23:59:53.708525 | 2019-05-14T21:03:17 | 2019-05-14T21:03:17 | 181,518,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/usr/local/bin/python2.7
from pysysinfo import disk_func
import subprocess
def tmp_space():
tmp_usage = "du"
tmp_arg = "-h"
path = "/tmp"
print "Space used in /tmp directory"
subprocess.call([tmp_usage, tmp_arg, path])
def main():
disk_func()
tmp_space()
if __name__ == "__main__":
main()
| [
"imtiyaz@linux2.(none)"
] | imtiyaz@linux2.(none) |
570db6accc88fe50729a6579d12fd3b3c150c75c | 65c3e7139829829dd1410228e17f85c285ab0706 | /Aniyom Ebenezer/Phase 2/STRINGS/Day_29_Challenge_Solution/Question 8 Solution.py | d443f353ceb9d57814ceb49adf93228d5ddd05d5 | [
"MIT"
] | permissive | eaniyom/python-challenge-solutions | 167e9d897d0a72f1e264ff2fed0e4cc5541b0164 | 21f91e06421afe06b472d391429ee2138c918c38 | refs/heads/master | 2022-11-24T02:57:39.920755 | 2020-08-05T09:23:04 | 2020-08-05T09:23:04 | 277,686,791 | 1 | 0 | MIT | 2020-07-07T01:31:00 | 2020-07-07T01:30:59 | null | UTF-8 | Python | false | false | 308 | py | """
Write a Python program that takes a list of words and retuerns the length of the longest one.
"""
def longest_words(word_list):
word_len = []
for n in word_list:
word_len.append((len(n), n))
word_len.sort()
return word_len[-1][1]
print(longest_words(["PHP", "Python", "Backend"])) | [
"eaniyom@gmail.com"
] | eaniyom@gmail.com |
3bc8b0b394a8af2e20d9c16af406ae9f47ba8965 | 7951f2a1453ac33f53e49e465d61ebd74f3a0b86 | /Test03.py | 9dd851bda1ba466d18bb1931d889a3a12adb9318 | [
"Apache-2.0"
] | permissive | lebronjames/python1 | 363845885b44f8be473aa19d26e954a942e30ed4 | 06bfb3d85ea4415217d65ab37ca22b10e217a74a | refs/heads/master | 2021-05-14T04:13:15.070058 | 2018-01-12T08:54:02 | 2018-01-12T08:54:02 | 116,638,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | a = 21
b = 10
c = 0
if (a == b):
print("1 - a 等于 b")
else:
print("1 - a 不等于 b")
if (a != b):
print("2 - a 不等于 b")
else:
print("2 - a 等于 b")
if (a < b):
print("3 - a 小于 b")
else:
print("3 - a 大于等于 b")
if (a > b):
print("4 - a 大于 b")
else:
print("4 - a 小于等于 b")
# 修改变量 a 和 b 的值
a = 5;
b = 20;
if (a <= b):
print("5 - a 小于等于 b")
else:
print("5 - a 大于 b")
if (b >= a):
print("6 - b 大于等于 a")
else:
print("6 - b 小于 a")
c = a + b
print("1 - c 的值为:", c)
c += a
print("2 - c 的值为:", c)
c *= a
print("3 - c 的值为:", c)
c /= a
print("4 - c 的值为:", c)
c = 2
c %= a
print("5 - c 的值为:", c)
c **= a
print("6 - c 的值为:", c)
c //= a
print("7 - c 的值为:", c)
a = 60 # 60 = 0011 1100
b = 13 # 13 = 0000 1101
c = 0
c = a & b; # 12 = 0000 1100
print("1 - c 的值为:", c)
c = a | b; # 61 = 0011 1101
print("2 - c 的值为:", c)
c = a ^ b; # 49 = 0011 0001
print("3 - c 的值为:", c)
c = ~a; # -61 = 1100 0011
print("4 - c 的值为:", c)
c = a << 2; # 240 = 1111 0000
print("5 - c 的值为:", c)
c = a >> 2; # 15 = 0000 1111
print("6 - c 的值为:", c)
a = 10
b = 20
if (a and b):
print("1 - 变量 a 和 b 都为 true")
else:
print("1 - 变量 a 和 b 有一个不为 true")
if (a or b):
print("2 - 变量 a 和 b 都为 true,或其中一个变量为 true")
else:
print("2 - 变量 a 和 b 都不为 true")
# 修改变量 a 的值
a = 0
if (a and b):
print("3 - 变量 a 和 b 都为 true")
else:
print("3 - 变量 a 和 b 有一个不为 true")
if (a or b):
print("4 - 变量 a 和 b 都为 true,或其中一个变量为 true")
else:
print("4 - 变量 a 和 b 都不为 true")
if not (a and b):
print("5 - 变量 a 和 b 都为 false,或其中一个变量为 false")
else:
print("5 - 变量 a 和 b 都为 true") | [
"798258843@qq.com"
] | 798258843@qq.com |
a6c7e17f4c7f3b8822cda3e26700ed7e9cc210f9 | 6bd31df3ea78052d451cf6d81a27f28192f51bad | /app/__init__.py | 7e413f39fa982200733a2b980f4e5c4dbbd6c2b6 | [] | no_license | hasanmehmood/KeyValue_pair_increment | 94e2efc4b355abcd4446573fba02a3bb497b3a69 | e23ca7f42ec70a16cfc7a304040b2a1388c1c84c | refs/heads/master | 2023-05-12T12:48:51.562072 | 2020-04-08T18:18:38 | 2020-04-08T18:18:38 | 39,795,268 | 2 | 1 | null | 2023-05-01T20:14:46 | 2015-07-27T20:04:17 | Python | UTF-8 | Python | false | false | 226 | py | from flask import Flask
# Loading all configurations to flask app
app = Flask(__name__)
app.config.from_object('app.settings')
from app.models import KeyValue
from app.resources import keyvalue
from app import routes, views
| [
"hasanmehmood123@yahoo.com"
] | hasanmehmood123@yahoo.com |
aeb7c078d7b9c8346f32a9a6fabb972f2683eadf | 9bce3862ef1c6235212a49cf49d300e0532be92c | /politico/Exceptions/handler.py | 6e8b2d397fc6c7909a4eefba84197c2cfe36c130 | [] | no_license | erycoking/Politico_API | d7f5458267dee0736ba154e602869d75514abde2 | 004727b03bfb77bd8d717bcf59b3fa3af5d699a0 | refs/heads/develop | 2022-12-09T08:24:37.589001 | 2019-03-06T01:40:29 | 2019-03-06T01:40:29 | 169,312,409 | 1 | 1 | null | 2022-12-08T01:37:36 | 2019-02-05T21:03:40 | Python | UTF-8 | Python | false | false | 511 | py | from flask import Flask, Blueprint, make_response, jsonify
from werkzeug.exceptions import default_exceptions
error = Blueprint('error_handler', __name__)
@error.app_errorhandler(Exception)
def handle_error(err):
message = str(err)
err_list = message.split(' ')
if str(err_list[0]).isdigit():
status_code = int(err_list[0])
else:
status_code = 500
return make_response(jsonify({
'status': status_code,
'error': message
}), status_code)
| [
"erycoking360@gmail.com"
] | erycoking360@gmail.com |
b41a85c4442578767973735198a8e87f0a30bf9f | 404fa161db6c3e2a2ef5b657d64f38804774fd51 | /pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py | 15ddd998adf3530403650602ad0654b03878630a | [
"Apache-2.0"
] | permissive | masir110/analytics-zoo | 6e6d409a40db0c98256f03cb73da1d0ab2bb236b | 6a8f90a8b4886026696843a12cf362861ee26a2f | refs/heads/master | 2022-12-11T14:27:22.891358 | 2020-08-26T01:27:29 | 2020-08-26T01:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import numpy as np
from zoo.orca.learn.tf2 import Estimator
from zoo.ray import RayContext
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config):
import tensorflow as tf
batch_size = config["batch_size"]
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config):
import tensorflow as tf
batch_size = config["batch_size"]
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
args = {
"optimizer": tf.keras.optimizers.Adam(),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
class TestTFRayEstimator(TestCase):
def test_fit_and_evaluate(self):
import tensorflow as tf
ray_ctx = RayContext.get()
batch_size = 32
global_batch_size = batch_size * ray_ctx.num_ray_nodes
config = {
"batch_size": batch_size
}
trainer = Estimator(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=config)
# model baseline performance
start_stats = trainer.evaluate(create_test_dataset,
steps=NUM_TEST_SAMPLES // global_batch_size)
print(start_stats)
def scheduler(epoch):
if epoch < 2:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (2 - epoch))
scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
# train for 2 epochs
trainer.fit(create_train_datasets, epochs=2, callbacks=[scheduler])
trainer.fit(create_train_datasets, epochs=2, callbacks=[scheduler])
# model performance after training (should improve)
end_stats = trainer.evaluate(create_test_dataset,
steps=NUM_TEST_SAMPLES // global_batch_size)
print(end_stats)
# sanity check that training worked
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
| [
"noreply@github.com"
] | masir110.noreply@github.com |
28d5e3dae132663d27b2d5c4430019896f8b3eef | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/combinatorics/free_groups.py | 2150e670e4015d91706a458585d33802adc1eba1 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 40,158 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int, string_types
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol, symbols as _symbols
from sympy.core.sympify import CantSympify
from sympy.core import S
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import flatten
from sympy.utilities.magic import pollute
from sympy import sign
@public
def free_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1))``.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> x**2*y**-1
x**2*y**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group,) + tuple(_free_group.generators)
@public
def xfree_group(symbols):
"""Construct a free group returning ``(FreeGroup, (f_0, f_1, ..., f_(n-1)))``.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import xfree_group
>>> F, (x, y, z) = xfree_group("x, y, z")
>>> F
<free group on the generators (x, y, z)>
>>> y**2*x**-2*z**-1
y**2*x**-2*z**-1
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
return (_free_group, _free_group.generators)
@public
def vfree_group(symbols):
"""Construct a free group and inject ``f_0, f_1, ..., f_(n-1)`` as symbols
into the global namespace.
Parameters
----------
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (may be empty)
Examples
========
>>> from sympy.combinatorics.free_groups import vfree_group
>>> vfree_group("x, y, z")
<free group on the generators (x, y, z)>
>>> x**2*y**-2*z
x**2*y**-2*z
>>> type(_)
<class 'sympy.combinatorics.free_groups.FreeGroupElement'>
"""
_free_group = FreeGroup(symbols)
pollute([sym.name for sym in _free_group.symbols], _free_group.generators)
return _free_group
def _parse_symbols(symbols):
if not symbols:
return tuple()
if isinstance(symbols, string_types):
return _symbols(symbols, seq=True)
elif isinstance(symbols, Expr or FreeGroupElement):
return (symbols,)
elif is_sequence(symbols):
if all(isinstance(s, string_types) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise ValueError("The type of `symbols` must be one of the following: "
"a str, Symbol/Expr or a sequence of "
"one of these types")
##############################################################################
# FREE GROUP #
##############################################################################
_free_group_cache = {}
class FreeGroup(DefaultPrinting):
"""
Free group with finite or infinite number of generators. Its input API
is that of a str, Symbol/Expr or a sequence of one of
these types (which may be empty)
References
==========
[1] http://www.gap-system.org/Manuals/doc/ref/chap37.html
[2] https://en.wikipedia.org/wiki/Free_group
See Also
========
sympy.polys.rings.PolyRing
"""
is_associative = True
is_group = True
is_FreeGroup = True
is_PermutationGroup = False
relators = tuple()
def __new__(cls, symbols):
symbols = tuple(_parse_symbols(symbols))
rank = len(symbols)
_hash = hash((cls.__name__, symbols, rank))
obj = _free_group_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj._rank = rank
# dtype method is used to create new instances of FreeGroupElement
obj.dtype = type("FreeGroupElement", (FreeGroupElement,), {"group": obj})
obj.symbols = symbols
obj.generators = obj._generators()
obj._gens_set = set(obj.generators)
for symbol, generator in zip(obj.symbols, obj.generators):
if isinstance(symbol, Symbol):
name = symbol.name
if hasattr(obj, name):
setattr(obj, name, generator)
_free_group_cache[_hash] = obj
return obj
def _generators(group):
"""Returns the generators of the FreeGroup.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> F.generators
(x, y, z)
"""
gens = []
for sym in group.symbols:
elm = ((sym, 1),)
gens.append(group.dtype(elm))
return tuple(gens)
def clone(self, symbols=None):
return self.__class__(symbols or self.symbols)
def __contains__(self, i):
"""Return True if ``i`` is contained in FreeGroup."""
if not isinstance(i, FreeGroupElement):
return False
group = i.group
return self == group
def __hash__(self):
return self._hash
def __len__(self):
return self.rank
def __str__(self):
if self.rank > 30:
str_form = "<free group with %s generators>" % self.rank
else:
str_form = "<free group on the generators "
gens = self.generators
str_form += str(gens) + ">"
return str_form
__repr__ = __str__
def __getitem__(self, index):
symbols = self.symbols[index]
return self.clone(symbols=symbols)
def __eq__(self, other):
"""No ``FreeGroup`` is equal to any "other" ``FreeGroup``.
"""
return self is other
def index(self, gen):
"""Return the index of the generator `gen` from ``(f_0, ..., f_(n-1))``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.index(y)
1
>>> F.index(x)
0
"""
if isinstance(gen, self.dtype):
return self.generators.index(gen)
else:
raise ValueError("expected a generator of Free Group %s, got %s" % (self, gen))
def order(self):
"""Return the order of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> F.order()
oo
>>> free_group("")[0].order()
1
"""
if self.rank == 0:
return 1
else:
return S.Infinity
@property
def elements(self):
"""
Return the elements of the free group.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> (z,) = free_group("")
>>> z.elements
{<identity>}
"""
if self.rank == 0:
# A set containing Identity element of `FreeGroup` self is returned
return {self.identity}
else:
raise ValueError("Group contains infinitely many elements"
", hence can't be represented")
@property
def rank(self):
r"""
In group theory, the `rank` of a group `G`, denoted `G.rank`,
can refer to the smallest cardinality of a generating set
for G, that is
\operatorname{rank}(G)=\min\{ |X|: X\subseteq G, \langle X\rangle =G\}.
"""
return self._rank
@property
def is_abelian(self):
"""Returns if the group is Abelian.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.is_abelian
False
"""
if self.rank == 0 or self.rank == 1:
return True
else:
return False
@property
def identity(self):
"""Returns the identity element of free group."""
return self.dtype()
def contains(self, g):
"""Tests if Free Group element ``g`` belong to self, ``G``.
In mathematical terms any linear combination of generators
of a Free Group is contained in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> f.contains(x**3*y**2)
True
"""
if not isinstance(g, FreeGroupElement):
return False
elif self != g.group:
return False
else:
return True
def center(self):
"""Returns the center of the free group `self`."""
return {self.identity}
############################################################################
# FreeGroupElement #
############################################################################
class FreeGroupElement(CantSympify, DefaultPrinting, tuple):
"""Used to create elements of FreeGroup. It can not be used directly to
create a free group element. It is called by the `dtype` method of the
`FreeGroup` class.
"""
is_assoc_word = True
def new(self, init):
return self.__class__(init)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.group, frozenset(tuple(self))))
return _hash
def copy(self):
return self.new(self)
@property
def is_identity(self):
if self.array_form == tuple():
return True
else:
return False
@property
def array_form(self):
"""
SymPy provides two different internal kinds of representation
of associative words. The first one is called the `array_form`
which is a tuple containing `tuples` as its elements, where the
size of each tuple is two. At the first position the tuple
contains the `symbol-generator`, while at the second position
of tuple contains the exponent of that generator at the position.
Since elements (i.e. words) don't commute, the indexing of tuple
makes that property to stay.
The structure in ``array_form`` of ``FreeGroupElement`` is of form:
``( ( symbol_of_gen , exponent ), ( , ), ... ( , ) )``
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> (x*z).array_form
((x, 1), (z, 1))
>>> (x**2*z*y*x**2).array_form
((x, 2), (z, 1), (y, 1), (x, 2))
See Also
========
letter_repr
"""
return tuple(self)
@property
def letter_form(self):
"""
The letter representation of a ``FreeGroupElement`` is a tuple
of generator symbols, with each entry corresponding to a group
generator. Inverses of the generators are represented by
negative generator symbols.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b, c, d = free_group("a b c d")
>>> (a**3).letter_form
(a, a, a)
>>> (a**2*d**-2*a*b**-4).letter_form
(a, a, -d, -d, a, -b, -b, -b, -b)
>>> (a**-2*b**3*d).letter_form
(-a, -a, b, b, b, d)
See Also
========
array_form
"""
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
def __getitem__(self, i):
group = self.group
r = self.letter_form[i]
if r.is_Symbol:
return group.dtype(((r, 1),))
else:
return group.dtype(((-r, -1),))
def index(self, gen):
if len(gen) != 1:
raise ValueError()
return (self.letter_form).index(gen.letter_form[0])
@property
def letter_form_elm(self):
"""
"""
group = self.group
r = self.letter_form
return [group.dtype(((elm,1),)) if elm.is_Symbol \
else group.dtype(((-elm,-1),)) for elm in r]
@property
def ext_rep(self):
"""This is called the External Representation of ``FreeGroupElement``
"""
return tuple(flatten(self.array_form))
def __contains__(self, gen):
return gen.array_form[0][0] in tuple([r[0] for r in self.array_form])
def __str__(self):
if self.is_identity:
return "<identity>"
symbols = self.group.symbols
str_form = ""
array_form = self.array_form
for i in range(len(array_form)):
if i == len(array_form) - 1:
if array_form[i][1] == 1:
str_form += str(array_form[i][0])
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1])
else:
if array_form[i][1] == 1:
str_form += str(array_form[i][0]) + "*"
else:
str_form += str(array_form[i][0]) + \
"**" + str(array_form[i][1]) + "*"
return str_form
__repr__ = __str__
def __pow__(self, n):
n = as_int(n)
group = self.group
if n == 0:
return group.identity
if n < 0:
n = -n
return (self.inverse())**n
result = self
for i in range(n - 1):
result = result*self
# this method can be improved instead of just returning the
# multiplication of elements
return result
def __mul__(self, other):
"""Returns the product of elements belonging to the same ``FreeGroup``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x*y**2*y**-4
x*y**-2
>>> z*y**-2
z*y**-2
>>> x**2*y*y**-1*x**-2
<identity>
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
if self.is_identity:
return other
if other.is_identity:
return self
r = list(self.array_form + other.array_form)
zero_mul_simp(r, len(self.array_form) - 1)
return group.dtype(tuple(r))
def __div__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return self*(other.inverse())
def __rdiv__(self, other):
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be multiplied")
return other*(self.inverse())
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __add__(self, other):
return NotImplemented
def inverse(self):
"""
Returns the inverse of a ``FreeGroupElement`` element
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> x.inverse()
x**-1
>>> (x*y).inverse()
y**-1*x**-1
"""
group = self.group
r = tuple([(i, -j) for i, j in self.array_form[::-1]])
return group.dtype(r)
def order(self):
"""Find the order of a ``FreeGroupElement``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> (x**2*y*y**-1*x**-2).order()
1
"""
if self.is_identity:
return 1
else:
return S.Infinity
def commutator(self, other):
"""
Return the commutator of `self` and `x`: ``~x*~self*x*self``
"""
group = self.group
if not isinstance(other, group.dtype):
raise ValueError("commutator of only FreeGroupElement of the same "
"FreeGroup exists")
else:
return self.inverse()*other.inverse()*self*other
def eliminate_words(self, words, _all=False, inverse=True):
'''
Replace each subword from the dictionary `words` by words[subword].
If words is a list, replace the words by the identity.
'''
again = True
new = self
if isinstance(words, dict):
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, words[sub], _all=_all, inverse=inverse)
if new != prev:
again = True
else:
while again:
again = False
for sub in words:
prev = new
new = new.eliminate_word(sub, _all=_all, inverse=inverse)
if new != prev:
again = True
return new
def eliminate_word(self, gen, by=None, _all=False, inverse=True):
"""
For an associative word `self`, a subword `gen`, and an associative
word `by` (identity by default), return the associative word obtained by
replacing each occurrence of `gen` in `self` by `by`. If `_all = True`,
the occurrences of `gen` that may appear after the first substitution will
also be replaced and so on until no occurrences are found. This might not
always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`).
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y = free_group("x y")
>>> w = x**5*y*x**2*y**-4*x
>>> w.eliminate_word( x, x**2 )
x**10*y*x**4*y**-4*x**2
>>> w.eliminate_word( x, y**-1 )
y**-11
>>> w.eliminate_word(x**5)
y*x**2*y**-4*x
>>> w.eliminate_word(x*y, y)
x**4*y*x**2*y**-4*x
See Also
========
substituted_word
"""
if by == None:
by = self.group.identity
if self.is_independent(gen) or gen == by:
return self
if gen == self:
return by
if gen**-1 == by:
_all = False
word = self
l = len(gen)
try:
i = word.subword_index(gen)
k = 1
except ValueError:
if not inverse:
return word
try:
i = word.subword_index(gen**-1)
k = -1
except ValueError:
return word
word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by)
if _all:
return word.eliminate_word(gen, by, _all=True, inverse=inverse)
else:
return word
def __len__(self):
"""
For an associative word `self`, returns the number of letters in it.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> len(w)
13
>>> len(a**17)
17
>>> len(w**0)
0
"""
return sum(abs(j) for (i, j) in self)
def __eq__(self, other):
"""
Two associative words are equal if they are words over the
same alphabet and if they are sequences of the same letters.
This is equivalent to saying that the external representations
of the words are equal.
There is no "universal" empty word, every alphabet has its own
empty word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> f
<free group on the generators (swapnil0, swapnil1)>
>>> g, swap0, swap1 = free_group("swap0 swap1")
>>> g
<free group on the generators (swap0, swap1)>
>>> swapnil0 == swapnil1
False
>>> swapnil0*swapnil1 == swapnil1/swapnil1*swapnil0*swapnil1
True
>>> swapnil0*swapnil1 == swapnil1*swapnil0
False
>>> swapnil1**0 == swap0**0
False
"""
group = self.group
if not isinstance(other, group.dtype):
return False
return tuple.__eq__(self, other)
def __lt__(self, other):
"""
The ordering of associative words is defined by length and
lexicography (this ordering is called short-lex ordering), that
is, shorter words are smaller than longer words, and words of the
same length are compared w.r.t. the lexicographical ordering induced
by the ordering of generators. Generators are sorted according
to the order in which they were created. If the generators are
invertible then each generator `g` is larger than its inverse `g^{-1}`,
and `g^{-1}` is larger than every generator that is smaller than `g`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> b < a
False
>>> a < a.inverse()
False
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
l = len(self)
m = len(other)
# implement lenlex order
if l < m:
return True
elif l > m:
return False
for i in range(l):
a = self[i].array_form[0]
b = other[i].array_form[0]
p = group.symbols.index(a[0])
q = group.symbols.index(b[0])
if p < q:
return True
elif p > q:
return False
elif a[1] < b[1]:
return True
elif a[1] > b[1]:
return False
return False
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, x, y, z = free_group("x y z")
>>> y**2 > x**2
True
>>> y*z > z*y
False
>>> x > x.inverse()
True
"""
group = self.group
if not isinstance(other, group.dtype):
raise TypeError("only FreeGroup elements of same FreeGroup can "
"be compared")
return not self <= other
def __ge__(self, other):
return not self < other
def exponent_sum(self, gen):
"""
For an associative word `self` and a generator or inverse of generator
`gen`, ``exponent_sum`` returns the number of times `gen` appears in
`self` minus the number of times its inverse appears in `self`. If
neither `gen` nor its inverse occur in `self` then 0 is returned.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.exponent_sum(x)
2
>>> w.exponent_sum(x**-1)
-2
>>> w = x**2*y**4*x**-3
>>> w.exponent_sum(x)
-1
See Also
========
generator_count
"""
if len(gen) != 1:
raise ValueError("gen must be a generator or inverse of a generator")
s = gen.array_form[0]
return s[1]*sum([i[1] for i in self.array_form if i[0] == s[0]])
def generator_count(self, gen):
"""
For an associative word `self` and a generator `gen`,
``generator_count`` returns the multiplicity of generator
`gen` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x**2*y**3
>>> w.generator_count(x)
2
>>> w = x**2*y**4*x**-3
>>> w.generator_count(x)
5
See Also
========
exponent_sum
"""
if len(gen) != 1 or gen.array_form[0][1] < 0:
raise ValueError("gen must be a generator")
s = gen.array_form[0]
return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])
def subword(self, from_i, to_j, strict=True):
"""
For an associative word `self` and two positive integers `from_i` and
`to_j`, `subword` returns the subword of `self` that begins at position
`from_i` and ends at `to_j - 1`, indexing is done with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.subword(2, 6)
a**3*b
"""
group = self.group
if not strict:
from_i = max(from_i, 0)
to_j = min(len(self), to_j)
if from_i < 0 or to_j > len(self):
raise ValueError("`from_i`, `to_j` must be positive and no greater than "
"the length of associative word")
if to_j <= from_i:
return group.identity
else:
letter_form = self.letter_form[from_i: to_j]
array_form = letter_form_to_array_form(letter_form, group)
return group.dtype(array_form)
def subword_index(self, word, start = 0):
'''
Find the index of `word` in `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**2*b*a*b**3
>>> w.subword_index(a*b*a*b)
1
'''
l = len(word)
self_lf = self.letter_form
word_lf = word.letter_form
index = None
for i in range(start,len(self_lf)-l+1):
if self_lf[i:i+l] == word_lf:
index = i
break
if index is not None:
return index
else:
raise ValueError("The given word is not a subword of self")
def is_dependent(self, word):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**4*y**-3).is_dependent(x**4*y**-2)
True
>>> (x**2*y**-1).is_dependent(x*y)
False
>>> (x*y**2*x*y**2).is_dependent(x*y**2)
True
>>> (x**12).is_dependent(x**-4)
True
See Also
========
is_independent
"""
try:
return self.subword_index(word) != None
except ValueError:
pass
try:
return self.subword_index(word**-1) != None
except ValueError:
return False
def is_independent(self, word):
"""
See Also
========
is_dependent
"""
return not self.is_dependent(word)
def contains_generators(self):
"""
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y, z = free_group("x, y, z")
>>> (x**2*y**-1).contains_generators()
{x, y}
>>> (x**3*z).contains_generators()
{x, z}
"""
group = self.group
gens = set()
for syllable in self.array_form:
gens.add(group.dtype(((syllable[0], 1),)))
return set(gens)
def cyclic_subword(self, from_i, to_j):
group = self.group
l = len(self)
letter_form = self.letter_form
period1 = int(from_i/l)
if from_i >= l:
from_i -= l*period1
to_j -= l*period1
diff = to_j - from_i
word = letter_form[from_i: to_j]
period2 = int(to_j/l) - 1
word += letter_form*period2 + letter_form[:diff-l+from_i-l*period2]
word = letter_form_to_array_form(word, group)
return group.dtype(word)
def cyclic_conjugates(self):
"""Returns a words which are cyclic to the word `self`.
References
==========
http://planetmath.org/cyclicpermutation
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w = x*y*x*y*x
>>> w.cyclic_conjugates()
{x*y*x**2*y, x**2*y*x*y, y*x*y*x**2, y*x**2*y*x, x*y*x*y*x}
>>> s = x*y*x**2*y*x
>>> s.cyclic_conjugates()
{x**2*y*x**2*y, y*x**2*y*x**2, x*y*x**2*y*x}
"""
return {self.cyclic_subword(i, i+len(self)) for i in range(len(self))}
def is_cyclic_conjugate(self, w):
"""
Checks whether words ``self``, ``w`` are cyclic conjugates.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> w1 = x**2*y**5
>>> w2 = x*y**5*x
>>> w1.is_cyclic_conjugate(w2)
True
>>> w3 = x**-1*y**5*x**-1
>>> w3.is_cyclic_conjugate(w2)
False
"""
l1 = len(self)
l2 = len(w)
if l1 != l2:
return False
w1 = self.identity_cyclic_reduction()
w2 = w.identity_cyclic_reduction()
letter1 = w1.letter_form
letter2 = w2.letter_form
str1 = ' '.join(map(str, letter1))
str2 = ' '.join(map(str, letter2))
if len(str1) != len(str2):
return False
return str1 in str2 + ' ' + str2
def number_syllables(self):
"""Returns the number of syllables of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, swapnil0, swapnil1 = free_group("swapnil0 swapnil1")
>>> (swapnil1**3*swapnil0*swapnil1**-1).number_syllables()
3
"""
return len(self.array_form)
def exponent_syllable(self, i):
"""
Returns the exponent of the `i`-th syllable of the associative word
`self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.exponent_syllable( 2 )
2
"""
return self.array_form[i][1]
def generator_syllable(self, i):
"""
Returns the symbol of the generator that is involved in the
i-th syllable of the associative word `self`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.generator_syllable( 3 )
b
"""
return self.array_form[i][0]
def sub_syllables(self, from_i, to_j):
"""
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
"""
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
def substituted_word(self, from_i, to_j, by):
"""
Returns the associative word obtained by replacing the subword of
`self` that begins at position `from_i` and ends at position `to_j - 1`
by the associative word `by`. `from_i` and `to_j` must be positive
integers, indexing is done with origin 0. In other words,
`w.substituted_word(w, from_i, to_j, by)` is the product of the three
words: `w.subword(0, from_i)`, `by`, and
`w.subword(to_j len(w))`.
See Also
========
eliminate_word
"""
lw = len(self)
if from_i >= to_j or from_i > lw or to_j > lw:
raise ValueError("values should be within bounds")
# otherwise there are four possibilities
# first if from=1 and to=lw then
if from_i == 0 and to_j == lw:
return by
elif from_i == 0: # second if from_i=1 (and to_j < lw) then
return by*self.subword(to_j, lw)
elif to_j == lw: # third if to_j=1 (and from_i > 1) then
return self.subword(0, from_i)*by
else: # finally
return self.subword(0, from_i)*by*self.subword(to_j, lw)
def is_cyclically_reduced(self):
r"""Returns whether the word is cyclically reduced or not.
A word is cyclically reduced if by forming the cycle of the
word, the word is not reduced, i.e a word w = `a_1 ... a_n`
is called cyclically reduced if `a_1 \ne a_n^{−1}`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**-1*x**-1).is_cyclically_reduced()
False
>>> (y*x**2*y**2).is_cyclically_reduced()
True
"""
if not self:
return True
return self[0] != self[-1]**-1
def identity_cyclic_reduction(self):
"""Return a unique cyclically reduced version of the word.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).identity_cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).identity_cyclic_reduction()
x**2*y**-1
References
==========
http://planetmath.org/cyclicallyreduced
"""
word = self.copy()
group = self.group
while not word.is_cyclically_reduced():
exp1 = word.exponent_syllable(0)
exp2 = word.exponent_syllable(-1)
r = exp1 + exp2
if r == 0:
rep = word.array_form[1: word.number_syllables() - 1]
else:
rep = ((word.generator_syllable(0), exp1 + exp2),) + \
word.array_form[1: word.number_syllables() - 1]
word = group.dtype(rep)
return word
def cyclic_reduction(self, removed=False):
"""Return a cyclically reduced version of the word. Unlike
`identity_cyclic_reduction`, this will not cyclically permute
the reduced word - just remove the "unreduced" bits on either
side of it. Compare the examples with those of
`identity_cyclic_reduction`.
When `removed` is `True`, return a tuple `(word, r)` where
self `r` is such that before the reduction the word was either
`r*word*r**-1`.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> (x**2*y**2*x**-1).cyclic_reduction()
x*y**2
>>> (x**-3*y**-1*x**5).cyclic_reduction()
y**-1*x**2
>>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)
(y**-1*x**2, x**-3)
"""
word = self.copy()
group = self.group
g = self.group.identity
while not word.is_cyclically_reduced():
exp1 = abs(word.exponent_syllable(0))
exp2 = abs(word.exponent_syllable(-1))
exp = min(exp1, exp2)
start = word[0]**abs(exp)
end = word[-1]**abs(exp)
word = start**-1*word*end**-1
g = g*start
if removed:
return word, g
return word
def power_of(self, other):
'''
Check if `self == other**n` for some integer n.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> ((x*y)**2).power_of(x*y)
True
>>> (x**-3*y**-2*x**3).power_of(x**-3*y*x**3)
True
'''
if self.is_identity:
return True
l = len(other)
if l == 1:
# self has to be a power of one generator
gens = self.contains_generators()
s = other in gens or other**-1 in gens
return len(gens) == 1 and s
# if self is not cyclically reduced and it is a power of other,
# other isn't cyclically reduced and the parts removed during
# their reduction must be equal
reduced, r1 = self.cyclic_reduction(removed=True)
if not r1.is_identity:
other, r2 = other.cyclic_reduction(removed=True)
if r1 == r2:
return reduced.power_of(other)
return False
if len(self) < l or len(self) % l:
return False
prefix = self.subword(0, l)
if prefix == other or prefix**-1 == other:
rest = self.subword(l, len(self))
return rest.power_of(other)
return False
def letter_form_to_array_form(array_form, group):
"""
This method converts a list given with possible repetitions of elements in
it. It returns a new list such that repetitions of consecutive elements is
removed and replace with a tuple element of size two such that the first
index contains `value` and the second index contains the number of
consecutive repetitions of `value`.
"""
a = list(array_form[:])
new_array = []
n = 1
symbols = group.symbols
for i in range(len(a)):
if i == len(a) - 1:
if a[i] == a[i - 1]:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -1))
else:
new_array.append((a[i], 1))
return new_array
elif a[i] == a[i + 1]:
n += 1
else:
if (-a[i]) in symbols:
new_array.append((-a[i], -n))
else:
new_array.append((a[i], n))
n = 1
def zero_mul_simp(l, index):
"""Used to combine two reduced words."""
while index >=0 and index < len(l) - 1 and l[index][0] == l[index + 1][0]:
exp = l[index][1] + l[index + 1][1]
base = l[index][0]
l[index] = (base, exp)
del l[index + 1]
if l[index][1] == 0:
del l[index]
index -= 1
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
bf948013cefb5f8b525ed00d1b23ecdad0ebc9c0 | d1c5176a1cb370addfb3ef0dabb303fefb9c6f87 | /struktury_danych/gra 10x10.py | 624d921c63a93bfdc1f98db4bc2d321dc6623dea | [] | no_license | fzubowicz/Python_bootcamp | 72d58f49f744ae5711f65446433b49be26e4351c | 12d6624f0070038a8a0f28a2d09325a2100f0941 | refs/heads/master | 2020-03-22T12:58:41.499223 | 2018-07-08T14:58:27 | 2018-07-08T14:58:27 | 140,074,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import random
gracz_x = random.randrange(1,11)
gracz_y = random.randrange(1,11)
skarb_x = random.randrange(1,10)
skarb_y = random.randrange(1,10)
print(f'Debug: gracz x: {gracz_x}, y: {gracz_y}')
print(f'Debug: skarb x: {skarb_x}, y: {skarb_y}')
krok = print(f'Podaj w którą stronę idziesz:\n'
f'[l]ewo\n'
f'[p]rawo\n'
f'[g]óra\n'
f'[d]ół\n'
f'Wyjdziesz za planszę to zginiesz')
while True:
print(f'Debug: gracz x: {gracz_x}, y: {gracz_y}')
print(f'Debug: skarb x: {skarb_x}, y: {skarb_y}')
krok = input('Gdzie idziesz?')
p_gracz_x = gracz_x
p_gracz_y = gracz_y
if krok == 'l':
gracz_x -= 1
if krok == 'p':
gracz_x += 1
if krok == 'g':
gracz_y -= 1
if krok == 'd':
gracz_y += 1
if not (0 < gracz_x < 11 and 0 < gracz_y < 11):
exit('Wyszedłeś poza planszę')
if gracz_x == skarb_x and gracz_y == skarb_y:
break
if abs(gracz_x - skarb_x) + abs(gracz_y - skarb_y) < abs(p_gracz_x - skarb_x) + abs(p_gracz_y - skarb_y):
print('Ciepło')
else:
print('Zimno')
print(f'Twoja aktualna pozycja to: x: {gracz_x}, y: {gracz_y} ')
print('Brawo! Znalazłeś skarb')
| [
"fzubowicz@gmail.com"
] | fzubowicz@gmail.com |
d50ee5614b6458f9b37e671f7f4b5bf5761a9488 | ca082797e89e12138e7daac2d5fda41d072ac3bf | /constants.py | 14fe1728db33e6fd09f69ae6097d6f32f74a6e8f | [] | no_license | SmileyJoe/pygame_tetris | 18a05df529afcc58c1a031076932d65e15d1ad7a | bcff26fc5f29a2a14af2a72de8be487cacea4d05 | refs/heads/master | 2021-08-18T04:41:03.495808 | 2020-08-08T21:08:49 | 2020-08-08T21:08:49 | 212,444,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | BLOCK_WIDTH = 20
| [
"noreply@github.com"
] | SmileyJoe.noreply@github.com |
2a814317557e930c6e5fe3fcb7968d5290cec338 | 548659eb5d932a338a4399ff920b4efe91461d64 | /day-07/part-2/remi.py | 7f327b14abbf2a737fda1c3cbace12a21bee63f6 | [
"MIT"
] | permissive | lypnol/adventofcode-2019 | 7b3535d3e17b3283db237c86055b8531704ba080 | ca244804d4bcb08404a0ba011c05d9b8ca322a98 | refs/heads/master | 2022-02-06T00:03:02.821888 | 2022-01-23T14:27:38 | 2022-01-23T14:27:38 | 223,915,089 | 9 | 6 | MIT | 2022-01-23T14:27:38 | 2019-11-25T09:53:40 | Python | UTF-8 | Python | false | false | 3,637 | py | from tool.runners.python import SubmissionPy
from itertools import permutations
class RemiSubmission(SubmissionPy):
def run(self, s):
p = [int(n) for n in s.split(",")]
m = 0
for sequence in permutations([5, 6, 7, 8, 9]):
p_input = 0
amps = [IntCode(p.copy(), sequence[i]) for i in range(5)]
while not amps[-1].exited:
for amp in amps:
amp.p_input.append(p_input)
amp.execute()
p_input = amp.p_output.pop()
if p_input > m:
m = p_input
return m
class IntCode:
def __init__(self, p, p_input):
self.p = p
self.pc = 0
self.p_input = [p_input]
self.p_output = []
self.exited = False
def get_param(self, p, opcode, index, param):
modes = opcode // 100
for _ in range(index):
modes //= 10
mode = modes % 10
if mode == 0:
return p[param]
elif mode == 1:
return param
def execute(self):
if self.exited:
return
while True:
opcode = self.p[self.pc]
if opcode % 100 == 1:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
self.p[self.p[self.pc + 3]] = a + b
self.pc += 4
elif opcode % 100 == 2:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
self.p[self.p[self.pc + 3]] = a * b
self.pc += 4
elif opcode % 100 == 3:
try:
self.p[self.p[self.pc + 1]] = self.p_input[0]
self.p_input = self.p_input[1:]
except:
return
self.pc += 2
elif opcode % 100 == 4:
self.p_output.append(
self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
)
self.pc += 2
elif opcode % 100 == 5:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
if a != 0:
self.pc = b
continue
self.pc += 3
elif opcode % 100 == 6:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
if a == 0:
self.pc = b
continue
self.pc += 3
elif opcode % 100 == 7:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
if a < b:
self.p[self.p[self.pc + 3]] = 1
else:
self.p[self.p[self.pc + 3]] = 0
self.pc += 4
elif opcode % 100 == 8:
a = self.get_param(self.p, opcode, 0, self.p[self.pc + 1])
b = self.get_param(self.p, opcode, 1, self.p[self.pc + 2])
if a == b:
self.p[self.p[self.pc + 3]] = 1
else:
self.p[self.p[self.pc + 3]] = 0
self.pc += 4
elif opcode % 100 == 99:
self.exited = True
break
return
| [
"remicalixte.rmc@gmail.com"
] | remicalixte.rmc@gmail.com |
6bf1ec396da74c8fa242c13abb1eb5677f823eb5 | 8936c124497e3fbd3108f77ca90c69b0a71403d2 | /Project 3 - Graph plan/search (1).py | 6cc31ff36f6bc9c6c6f3b3090dc5ad92df506a46 | [] | no_license | shaharec/into-to-AI | 752f426867f639775eb2919047f2a46045978f66 | 8e1006789b047972f8b6f72d2938e11de6870453 | refs/heads/main | 2023-05-08T11:37:25.160133 | 2021-05-30T19:18:15 | 2021-05-30T19:18:15 | 348,135,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,577 | py | # search.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
from util import Stack;
from util import Queue;
from util import PriorityQueue;
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first [p 74].
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm [Fig. 3.18].
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
currentPos = problem.getStartState();
"""the Nodes in this function contain as value
a tuple of a position and the direction traveled from parent to get to this node"""
currentNode = Node((currentPos,None),None);
frontier = Stack();
frontier.push(currentNode);
explored = set();
foundGoal = False;
while not frontier.isEmpty() and not foundGoal:
currentNode = frontier.pop();
currentPos = currentNode.getValue()[0];
if problem.isGoalState(currentPos):
foundGoal = True;
elif currentPos not in explored:
explored.add(currentPos);
successors = problem.getSuccessors(currentPos);
for suc in successors:
frontier.push(Node((suc[0],suc[1]),currentNode));
if not foundGoal:
return [];
"if foundGoal == True then currentNode holds the goal node that has been found"
result = [];
"creates the result by going back the path of parents from the goal node"
if foundGoal:
while currentNode.getParent() != None:
result.insert(0,currentNode.getValue()[1]);
currentNode = currentNode.getParent();
return result;
def breadthFirstSearch(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
currentPos = problem.getStartState();
"""the Nodes in this function contain as value
a tuple of a position and the direction traveled from parent to get to this node"""
currentNode = Node((currentPos,None),None);
frontier = Queue();
frontier.push(currentNode);
explored = set();
foundGoal = False;
while not frontier.isEmpty() and not foundGoal:
currentNode = frontier.pop();
currentPos = currentNode.getValue()[0];
if problem.isGoalState(currentPos):
foundGoal = True;
elif currentPos not in explored:
explored.add(currentPos);
successors = problem.getSuccessors(currentPos);
for suc in successors:
frontier.push(Node((suc[0],suc[1]),currentNode));
if not foundGoal:
return [];
"if foundGoal == True then currentNode holds the goal node that has been found"
result = [];
"creates the result by going back the path of parents from the goal node"
if foundGoal:
while currentNode.getParent() != None:
result.insert(0,currentNode.getValue()[1]);
currentNode = currentNode.getParent();
return result;
def uniformCostSearch(problem):
"Search the node of least total cost first. "
currentPos = problem.getStartState();
"""the Nodes in this function contain as value
a triple of a position, the direction traveled from parent to get to this node
and the cost of traveling to this node from the start node"""
currentNode = Node((currentPos,None,0),None);
frontier = PriorityQueue();
frontier.push(currentNode,0);
explored = set();
foundGoal = False;
while not frontier.isEmpty() and not foundGoal:
currentNode = frontier.pop();
currentPos = currentNode.getValue()[0];
currentCost = currentNode.getValue()[2];
if problem.isGoalState(currentPos):
foundGoal = True;
elif currentPos not in explored:
explored.add(currentPos);
successors = problem.getSuccessors(currentPos);
for suc in successors:
frontier.push(Node((suc[0],suc[1],currentCost+suc[2]),currentNode), currentCost+suc[2]);
if not foundGoal:
return [];
"if foundGoal == True then currentNode holds the goal node that has been found"
result = [];
"creates the result by going back the path of parents from the goal node"
if foundGoal:
while currentNode.getParent() != None:
result.insert(0,currentNode.getValue()[1]);
currentNode = currentNode.getParent();
return result;
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
currentPos = problem.getStartState();
"""the Nodes in this function contain as value
a triple of a position, the direction traveled from parent to get to this node
and the cost of traveling to this node from the start node"""
currentNode = Node((currentPos,None,0),None);
frontier = PriorityQueue();
frontier.push(currentNode,heuristic(currentPos,problem));
explored = list();
foundGoal = False;
while not frontier.isEmpty() and not foundGoal:
currentNode = frontier.pop();
currentPos = currentNode.getValue()[0];
currentCost = currentNode.getValue()[2];
if problem.isGoalState(currentPos):
foundGoal = True;
elif currentPos not in explored:
explored.append(currentPos);
successors = problem.getSuccessors(currentPos);
for suc in successors:
frontier.push(Node((suc[0],suc[1],currentCost+suc[2]),currentNode),
currentCost+suc[2]+heuristic(suc[0],problem));
if not foundGoal:
return [];
"if foundGoal == True then currentNode holds the goal node that has been found"
result = [];
"creates the result by going back the path of parents from the goal node"
if foundGoal:
while currentNode.getParent() != None:
result.insert(0,currentNode.getValue()[1]);
currentNode = currentNode.getParent();
return result;
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
class Node:
"A container with a value and a parent (the Node which led to this Node's discovery)"
def __init__(self, val, p):
self.value = val;
self.parent = p;
def getValue(self):
"returns the node's value"
return self.value;
def getParent(self):
"returns the node's parent"
return self.parent;
| [
"noreply@github.com"
] | shaharec.noreply@github.com |
0f95d1f0afd35793d831794f083971ba12078c33 | 335747796db247660fae03be38d39dc4030520c3 | /tests/repositories/test_service_repository.py | 5cb8f47174b8d660c079eb718c0863ff48d26569 | [
"Apache-2.0"
] | permissive | Neoklosch/Motey | 633f550c90815c523265afa5ab2307a3c5aa2df3 | d3f07d9d161d97ec2c19f66167dfb26eb9c6e616 | refs/heads/master | 2022-08-25T09:04:35.480653 | 2019-06-27T20:16:18 | 2019-06-27T20:16:18 | 90,272,879 | 0 | 1 | Apache-2.0 | 2022-08-06T05:22:14 | 2017-05-04T14:28:01 | Python | UTF-8 | Python | false | false | 2,546 | py | import unittest
import uuid
from unittest import mock
from tinydb import TinyDB, Query
from motey.repositories import service_repository
class TestServiceRepository(unittest.TestCase):
@classmethod
def setUp(self):
self.text_service_id = uuid.uuid4().hex
self.test_service = {'id': self.text_service_id, 'service_name': 'test service name', 'images': ['test image']}
service_repository.config = {'DATABASE': {'path': '/tmp/testpath'}}
service_repository.BaseRepository = mock.Mock(service_repository.BaseRepository)
service_repository.TinyDB = mock.Mock(TinyDB)
service_repository.Query = mock.Mock(Query)
self.test_service_repository = service_repository.ServiceRepository()
def test_construction(self):
self.assertIsNotNone(self.test_service_repository.db)
def test_add_service_does_not_exist(self):
self.test_service_repository.has = mock.MagicMock(return_value=False)
self.test_service_repository.db.insert = mock.MagicMock(return_value='123')
self.test_service_repository.add(service=self.test_service)
self.assertTrue(self.test_service_repository.db.insert.called)
def test_add_servie_exist(self):
self.test_service_repository.has = mock.MagicMock(return_value=True)
self.test_service_repository.db.insert = mock.MagicMock(return_value='123')
self.test_service_repository.add(service=self.test_service)
self.assertFalse(self.test_service_repository.db.insert.called)
def test_udpate(self):
self.test_service_repository.update(service=self.test_service)
self.assertTrue(self.test_service_repository.db.update.called)
def test_remove(self):
self.test_service_repository.remove(service_id=self.test_service['id'])
self.assertTrue(self.test_service_repository.db.remove.called)
def test_has_entry(self):
self.test_service_repository.db.search = mock.MagicMock(return_value=[1, 2])
result = self.test_service_repository.has(service_id=self.test_service['id'])
self.assertTrue(self.test_service_repository.db.search.called)
self.assertTrue(result)
def test_has_no_entry(self):
self.test_service_repository.db.search = mock.MagicMock(return_value=[])
result = self.test_service_repository.has(service_id=self.test_service['id'])
self.assertTrue(self.test_service_repository.db.search.called)
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
| [
"markus.paeschke@gmail.com"
] | markus.paeschke@gmail.com |
e4b140bd4a3681c4aff2b85b0c7660c38588549f | 9c0eebdeb427db1ea1ce33987947e22b2c897440 | /map.py | 1925fc08dc7034e1f11acc9e148e356e5ec8fb80 | [] | no_license | dkotenko/npuzz | 6d52c2ca9d733c8d59450af65f89c8bbac938134 | 461a864659893ec8276fafe3e58f73d853d1e42c | refs/heads/main | 2023-06-22T06:30:10.979771 | 2021-07-13T23:54:21 | 2021-07-13T23:54:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | from Printer import Printer
import sys
def parse_int(s):
n = 0
try:
n = int(s)
except ValueError:
s_value = s.strip() if s.strip() else '{empty value}'
Printer.print_error_exit(f"map error: string {s_value} is not an integer")
return n
def validate_map(b):
nums = [parse_int(s) for s in b.split("/")]
dict_count = {i: nums.count(i) for i in nums}
if max(dict_count.values()) > 1:
[Printer.print_error(f'map error: duplicated number {key}') for key, val in dict_count if val > 1]
sys.exit(1)
if list(filter(lambda x: x >= len(nums) or x < 0, nums)):
for n in nums:
if n >= len(nums) or n < 1:
Printer.print_error(f'map error: invalid number {n}: must be in range 0:{int(math.sqrt(nums))}')
sys.exit(1)
def parse_map(file_name):
try:
f = open(file_name)
except FileNotFoundError:
Printer.print_error_exit(f"there is no file {file_name}")
with open(file_name, "r") as file:
bb = ''
line = file.readline()
l_p = line.partition('#')[0]
while not l_p:
line = file.readline()
l_p = line.partition("#")[0]
size_matr = parse_int(l_p)
line = file.readline()
n_str = 1
while line:
line = line.partition('#')[0]
while not line:
line = file.readline()
line = line.partition("#")[0]
plus = '/'.join(line.split())
bb += '/'.join(line.split())
bb += '/' # где конец строки нечего заменять =(
line = file.readline()
if (len(plus.split('/'))) != size_matr:
Printer.print_error_exit(f"invalid map: invalid values number at row {n_str}")
exit(0)
n_str += 1
bb = bb[0: -1]
if (n_str - 1) != size_matr:
Printer.print_error_exit(f'invalid map: invalid rows number = {n_str - 1}')
return bb | [
"you@example.com"
] | you@example.com |
0a5e75d64b3e21d510ec0712d34ca4a20c0fb542 | 90c3318ee2beec4c11cd5d4b3afd35036b5539bb | /users/migrations/0001_initial.py | 0bc92427506d348f68138bbb47d34378429f7ea3 | [] | no_license | 17100262/Django-Blog | c81ec0714643081a604ad260f086fcd34d413e6b | acce7013c4da7d0a50d5f42ec334bf396829b8e1 | refs/heads/master | 2022-12-18T09:54:15.250520 | 2020-09-11T16:35:27 | 2020-09-11T16:35:27 | 294,742,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # Generated by Django 3.1 on 2020-08-30 18:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"maueezahmed@gmail.com"
] | maueezahmed@gmail.com |
77ab769c1b8aec8167006b5f2add5683816dfb8c | 39f6aac24796f9cec335f7c5ad1e41fc9eb82bd3 | /scratch/method_1_poly.py | 33afd259e2beebb9eb847d1941eba4b44e8a08b2 | [] | no_license | saqibnizami/DSI-Project-2 | 578dba10c2da2273c82d5b006315fbfd87c8f20d | 03a76f10297a699e0a613d660a199237e29cce48 | refs/heads/master | 2020-07-17T07:34:07.493021 | 2018-07-17T12:10:26 | 2018-07-17T12:10:26 | 205,975,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | # Get data
t = pd.read_csv("train.csv")
tt = pd.read_csv("test.csv")
t.columns,tt.columns
len(yt)
t.drop(['Id',], axis=1, inplace=True)
tt.drop('Id', axis=1, inplace=True)
t.shape,tt.shape
train = pd.concat((t,tt)).reset_index(drop=True)
train['SalePrice'].isnull().sum()
y = pd.DataFrame(train['SalePrice'])
y.shape
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
y= imp.fit_transform(y)
y.shape
obj_ranker(X)
X[rankcol]
X["Overall"] = X["Overall Qual"] * X["Overall Cond"]
X["External"] = X["Exter Qual"] * X["Exter Cond"]
X["Garage"] = X["Garage Qual"] * X["Garage Cond"]
X["Garage Area Qual"] = 2*(X["Garage Area"] + X["Garage Qual"])
X["Kitchen"] = X["Kitchen AbvGr"] * X["Kitchen Qual"]
X["Fireplace"] = X["Fireplaces"] * X["Fireplace Qu"]
X["Pool"] = X["Pool Area"] * X["Pool QC"]
X["Total SqFt"] = X["Gr Liv Area"] + X["Total Bsmt SF"]
X["Abv Grade SqFt"] = X["1st Flr SF"] + X["2nd Flr SF"]
X["Porch SqFt"] = (X["Open Porch SF"] + X["Enclosed Porch"] +
X["3Ssn Porch"] + X["Screen Porch"])
X["Total Bath Num"] = (X["Bsmt Full Bath"] + (0.5 * X["Bsmt Half Bath"]) +
X["Full Bath"] + (0.5 * X["Half Bath"]))
# X[over50p].isnull().sum()
# X[over50p] = X[over50p].fillna(method='median')
imp = Imputer(strategy='median', axis=0)
X[over50p] = imp.fit_transform(X[over50p])
forpoly = X[over50p]
rest_of_X = X.drop(columns=over50p)
poly = PolynomialFeatures(degree=3, include_bias=False, interaction_only=True)
polyx = pd.DataFrame(poly.fit_transform(forpoly), columns=poly.get_feature_names(forpoly.columns))
X = polyx.join(rest_of_X, how='left')
catfeats = X.select_dtypes(include = ["object"]).columns
numfeats = X.select_dtypes(exclude = ["object"]).columns
numfeats = numfeats.drop("SalePrice")
print("Numerical features : " + str(len(numfeats)))
print("Categorical features : " + str(len(catfeats)))
Xnum = X[numfeats]
Xcat = X[catfeats]
print("NAs for numerical features in X : " + str(Xnum.isnull().values.sum()))
Xnum = Xnum.fillna(Xnum.median())
print("Remaining NAs for numerical features in X : " + str(Xnum.isnull().values.sum()))
skewness = Xnum.apply(lambda x: skew(x))
skewness = skewness[abs(skewness) > 0.5]
print(str(skewness.shape[0]) + " skewed numerical features to log transform")
skewed = skewness.index
Xnum[skewed] = np.log1p(Xnum[skewed])
print("NAs for categorical features in X : " + str(Xcat.isnull().values.sum()))
Xcat = pd.get_dummies(Xcat)
print("Remaining NAs for categorical features in X : " + str(Xcat.isnull().values.sum()))
X = pd.concat([Xnum, Xcat], axis = 1)
print("New number of features : " + str(X.shape[1]))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify=y,random_state = 19)
print("X_train : " + str(X_train.shape))
print("X_test : " + str(X_test.shape))
print("y_train : " + str(y_train.shape))
print("y_test : " + str(y_test.shape))
ss = StandardScaler()
X_train.loc[:, numfeats] = ss.fit_transform(X_train.loc[:, numfeats])
X_test.loc[:, numfeats] = ss.transform(X_test.loc[:, numfeats]) | [
"github@generalassemb.ly"
] | github@generalassemb.ly |
fab03a8abd843c2bbb6cdfa193cea2dbb5d74fd0 | baea4ae8aac7f57cbd374e4c1fe7b21206e9b380 | /forecast/urls.py | 709f1bdb1a9a444d43c6fb1925fbd5c8dc52c71a | [] | no_license | IHautaI/missions-forecast | ac2deb3ab24a5505e1261dc42d2b701996a696f1 | 9ebe4d5b9a9a38d355383fa6f96bf6a93bd821c8 | refs/heads/master | 2021-01-10T18:41:03.489170 | 2015-06-15T14:12:37 | 2015-06-15T14:12:37 | 37,431,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^specialist/(?P<specialist_id>[0-9]+)/$', views.specialist, name='specialist'),
url(r'^country/(?P<country_name>(?:\w+\s?)+)/$', views.country, name='country'),
url(r'^country/(?P<country_name>(?:\w+\s?)+)/relevant/$', \
views.country_relevant, name='country-relevant'),
url(r'^code/(?P<code>[\w\.-]+)/$', views.NAICS, name='NAICS'),
url(r'^project/(?P<project_id>\w+)/$', views.project, name='project')
]
| [
"greshjs@gmail.com"
] | greshjs@gmail.com |
d58c5d69ac4d4936a7aeabe6f33219107db46479 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467519/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_7/run_cfg.py | ecd7e088550ddaf95adcd0944c067d9074645308 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467519/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_26_1_qK2.root',
'/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_27_1_vSH.root',
'/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-1000_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_28_1_O6M.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
ae706498d56f2fd63d7b4d2949855d2574c6d9f6 | 81588eafe6a247bc5f12daf9ad2090f68526c45d | /knn.py | bda1cc2396725665e4916506204f23a5ce4c289f | [] | no_license | Unishshah/K-nearest-neighbor | 169e5f32436cbc8cc1f079b2c50b1097046ed97f | 6565516b313776b291fad0902f4a25043480a76e | refs/heads/master | 2021-08-15T12:18:06.934037 | 2017-11-17T20:35:50 | 2017-11-17T20:35:50 | 111,146,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import sklearn.metrics
df = pd.read_csv(r'''C:\Users\unish\Desktop\datat.csv''')
df['sex'] = df['sex'].map({'M': 0, 'F': 1})
df['address'] = df['address'].map({'U': 0, 'R': 1})
df['guardian'] = df['guardian'].map({'mother': 0, 'father': 1})
#df['internet'] = df['internet'].map({'no': 0, 'yes': 1})
predictors = df.values[:, 0:11]
targets = df.values[:,12]
pred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size= 0.25)
print(pred_train.shape)
print(pred_test.shape)
print(tar_train.shape)
print(tar_test.shape)
neigh = KNeighborsClassifier(n_neighbors = 1, weights='uniform', algorithm='auto')
neigh.fit(pred_train, tar_train)
y_pred = neigh.predict(pred_test)
#accuracy
print("Accuracy is ", accuracy_score(tar_test, y_pred, normalize = True))
#classification error
print("Classification error is",1- accuracy_score(tar_test, y_pred, normalize = True))
#sensitivity
print("sensitivity is", sklearn.metrics.recall_score(tar_test, y_pred, labels=None, average = 'micro', sample_weight=None))
#specificity
print("specificity is", 1 - sklearn.metrics.recall_score(tar_test, y_pred,labels=None, average = 'micro', sample_weight=None)) | [
"unishshah@gmail.com"
] | unishshah@gmail.com |
d8ee1a504c9bd9c6cc1553f20e8b4ea2c99322c3 | b2df4e2e84f8d82bafa2d248fbdf77e24f6e2c27 | /config.py | d5391f7936579e947ba978b277d29bcbd0a0d316 | [] | no_license | oracletosun/WHATEVER1 | 79efefa6718d255f52d9ac2155f622e25f71abdd | 71e7ee1c3f6196f8c0b4a6157eecc8d52abe01a0 | refs/heads/master | 2020-03-24T20:30:13.173127 | 2018-07-31T07:57:41 | 2018-07-31T07:57:41 | 142,981,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | import random
TIME = random.randint(6,10)
| [
"noreply@github.com"
] | oracletosun.noreply@github.com |
aaf256ba2bdd21c457ade542d5729057eeced03f | 6e65ad467e8a873fede9f5ce63aff87ad64e4b9b | /usure/preprocessing/application/__init__.py | 71575516b235c9466dafa9ade02ac350efa58976 | [
"MIT"
] | permissive | PaNuMo/usure | 019b103ab345c6a9746dcbced2c0b8a05bb434c3 | ca3633ecafbab01239b074e152ded9eccddc8809 | refs/heads/master | 2020-06-18T21:19:59.515361 | 2019-07-10T05:32:18 | 2019-07-10T05:32:18 | 196,452,616 | 0 | 0 | null | 2019-07-11T19:18:49 | 2019-07-11T19:18:49 | null | UTF-8 | Python | false | false | 45 | py |
__name__="application"
from .etl import ETL | [
"cristian_ang@live.com"
] | cristian_ang@live.com |
34775d8e2f5c422962edb8210a4931541eb496d0 | 08d61da8cff80519bc3290b38e2008d480b257e2 | /whimpy/startup.py | 11fdcca802aab014198cbc62cd1d1567e9b15617 | [
"MIT"
] | permissive | miraculixx/whereismypackage | 6a7f57922f1579d597aec6d19d1e16accaaf5e7a | d6b42b825fa800fb435b70d4301b031d642b63be | refs/heads/master | 2021-06-17T14:40:11.502572 | 2020-05-28T23:49:20 | 2020-05-28T23:49:20 | 27,425,031 | 1 | 0 | MIT | 2021-06-10T19:46:34 | 2014-12-02T09:29:46 | JavaScript | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/env python
import os
config = {
'PORT' : os.environ.get('VCAP_APP_PORT', 80),
'IP' : os.environ.get('VCAP_APP_HOST', '0.0.0.0')
}
c = 0
while True and c < 10:
c = c + 1
try:
os.system('python manage.py runserver {IP}:{PORT}'.format(**config))
except:
pass
| [
"ps@novapp.ch"
] | ps@novapp.ch |
4210864d8351f209dc4661f80b9a92c07bf86f3d | 17b6caab6e0047ff7fffd258da8297436e7615d2 | /src/tests/tankFill/constants.py | 7fdefbd4d2b39cd67e7880ea59c6c6d9bdbf6592 | [] | no_license | eweilow/SF2567-hybrid-rocket-simulation-project | bf75644a73546754d5a1e9b1897058f3e8741fa4 | 3a60b20ebf15b4856b0425c0cdb9631cfeb3be14 | refs/heads/main | 2023-01-21T23:51:27.780956 | 2020-12-01T18:27:05 | 2020-12-01T18:27:05 | 310,228,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | class Lengths:
m = 1
dm = m/10
cm = dm/10
mm = cm/10
class Volume:
m3 = 1
dm3 = m3/(10**3)
cm3 = dm3/(10**3)
liter = dm3
class Pressure:
kPa = 1e3
MPa = 1e6
bar = 100 * kPa
class Area:
cm2 = Lengths.cm * Lengths.cm | [
"eweilow@digitalkoppar.se"
] | eweilow@digitalkoppar.se |
5d39ce404f49ee9a6d244e769bf4dc0993630cc7 | d2b7f8aa21782e61dd645bdbb291e835827baf16 | /tictactoe_env/__init__.py | 877250048cbe4a96480735badd5639b6d510c514 | [] | no_license | lucadivit/Adversarial_RL_TicTacToe | d27d837b1ac84236994f87906438ae5574222321 | 470c53b4b5ba854f075ea43e1e9246fccb94734d | refs/heads/master | 2020-08-08T18:35:34.104088 | 2019-10-09T12:59:43 | 2019-10-09T12:59:43 | 213,889,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from gym.envs.registration import register
register(
id='tictactoe-v0',
entry_point='tictactoe_env.envs:TicTacToeEnv') | [
"noreply@github.com"
] | lucadivit.noreply@github.com |
ecc3e6b8d119081e510084e3005d631f9d895d53 | 23c4f6d8a2a6b97077628c2a012b2b402c816d91 | /LeetCode算法题/0190_颠倒二进制位/颠倒二进制.py | a253597ca1dc577aa84d9985492621b0937a38bc | [] | no_license | exueyuanAlgorithm/AlgorithmDemo | 7ef6ff8104e8da5a81037795184115fb0ac8ca9a | d34d4b592d05e9e0e724d8834eaf9587a64c5034 | refs/heads/master | 2023-07-16T19:00:05.664780 | 2021-09-04T11:31:07 | 2021-09-04T11:31:07 | 277,327,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | class Solution:
def reverseBits(self, n: int) -> int:
result_num = 0
for i in range(31):
if n % 2 == 1:
result_num = result_num + 1 << 1
else:
result_num = result_num << 1
n = n >> 1
if n % 2 == 1:
result_num += 1
return result_num
solution = Solution()
print(solution.reverseBits(0b111)) | [
"1079240024@qq.com"
] | 1079240024@qq.com |
7c3cbb509670d4a23d7afd068652fab8eda4ab8b | f56662c97e1ce4e723b5dfb18cdbef2c7de5684e | /commands/reactions.py | aae7d8ddbbfa4a76adabb732f8e037e55c05fb8f | [] | no_license | NeGaiGEN7/RubyRoseBot | feb2b81e1a9dc2268db0c839ffce5508cde0be4b | e1485290c37bb96333d1cb42415a0f532e4c8942 | refs/heads/master | 2021-01-18T23:32:01.440165 | 2017-03-18T18:59:49 | 2017-03-18T18:59:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py | from discord.ext import commands
class Reactions():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ropestore(self):
"""Kill yourselve."""
await self.bot.say("http://ropestore.org")
@commands.command(pass_context=True)
async def rekt(self, ctx):
"""#REKT"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/rekt.gif")
@commands.command(pass_context=True)
async def roasted(self, ctx):
"""MY NIGGA YOU JUST GOT ROASTED!"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/roasted.gif")
@commands.command(pass_context=True)
async def tableflip(self, ctx):
# I hope this unicode doesn't break
"""(╯°□°)╯︵ ┻━┻"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/tableflip.gif")
@commands.command(pass_context=True)
async def unflip(self, ctx):
# I hope this unicode doesn't break
"""┬─┬ ノ( ゜-゜ノ)"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/unflip.gif")
@commands.command(pass_context=True)
async def triggered(self, ctx):
"""DID YOU JUST ASSUME MY GENDER? *TRIGGERED*"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/triggered.gif")
@commands.command(pass_context=True)
async def delet(self, ctx):
"""Delet this"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/delet_this.jpg")
@commands.command(pass_context=True)
async def what(self, ctx):
"""what?"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/what.gif")
@commands.command(pass_context=True)
async def weirdshit(self, ctx):
"""WHY ARE YOU POSTING WEIRD SHIT?!?!?!"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/weirdshit.jpg")
@commands.command(pass_context=True)
async def filth(self, ctx):
"""THIS IS ABSOLUTELY FILTHY!"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/filth.gif")
@commands.command(pass_context=True)
async def heckoff(self, ctx):
"""heck off fools"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/heckoff.png")
@commands.command(pass_context=True)
async def lewd(self, ctx):
"""WOAH THERE THAT'S LEWD!"""
await self.bot.send_typing(ctx.message.channel)
await self.bot.send_file(ctx.message.channel, "assets/imgs/reactions/lewd.gif")
def setup(bot):
bot.add_cog(Reactions(bot))
| [
"creeperseth@mail.com"
] | creeperseth@mail.com |
919946fed5be2a8d467bc2f1db784427042a3c56 | b49420c84905523ee14fe9d724742163daa4f1ef | /ikea_ros_pkg/script/train.py | 5828f3241b20d5a27c2a95ed2ace1ed624e848ff | [] | no_license | himlen1990/ikea_assembly | de138e277c6aba4158fe87e2d8634e05f669a6af | d5a0dff451a073ffb92069737b9ce084e9dd7396 | refs/heads/master | 2020-03-29T21:43:11.052964 | 2018-09-28T03:03:53 | 2018-09-28T03:03:53 | 95,186,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from read_data import dataReader
import numpy as np
import tensorflow as tf
from model import regression_model
import skimage.io as io
import cv2
def train():
sess = tf.Session()
model = regression_model(sess)
dr = dataReader('./dataset')
iteration = 15000
saver = tf.train.Saver()
test_image_batch, test_label_batch = dr.get_test_data(10)
#test_image_batch = test_image_batch - 144
for i in range(iteration):
train_image_batch, train_label_batch = dr.sample_batch(5)
#train_image_batch = train_image_batch-144
model.learn(train_image_batch, train_label_batch)
if i % 20 == 0:
print "iteration--- ",i
print "train loss"
model.eval(train_image_batch, train_label_batch)
print "test loss"
model.eval(test_image_batch, test_label_batch)
saver.save(sess, './params')
#test_image = image_batch[-1,:,:,:]
#result = model.predict(test_image)
#print result
#a = test_image.astype(np.uint8)
#io.imshow(a)
#io.show()
if __name__=='__main__':
train()
| [
"himlen1990@gmail.com"
] | himlen1990@gmail.com |
c394f10b52fe8b7d449085b3dfe3eb4347b47a19 | 2ac12bae4d53a03dbdce865f674243cd45ae5abf | /analysis_curve/select_example.py | d90fd58cfade2431b8de556c1b94a156497fd1b4 | [] | no_license | lidashuai123/SRCC_classification-and-stratification | 9bee42526d2f992c45be6baa15a532cd20550987 | b6eae7186fac8f9db669cf793408ec4059812678 | refs/heads/master | 2022-12-23T14:31:34.159424 | 2020-09-21T11:45:44 | 2020-09-21T11:50:44 | 297,318,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | ####印戒细胞癌课题,将PVP的最大层面移动到一个文件夹中方便选出合适的肿瘤
import os
import shutil
import numpy as np
path = r'G:\West China Hospotal-Gastric Cancer SRCC\Max_ROI_save\PVP'
target_path = r'G:\West China Hospotal-Gastric Cancer SRCC\Max_ROI_save\select_example'
for folder in os.listdir(path):
for item in os.listdir(os.path.join(path, folder)):
shutil.copy(os.path.join(path, folder, item), target_path)
| [
"2714241070@qq.com"
] | 2714241070@qq.com |
bcd1c2f1c3fdc0f2088fe69ccbcb0cb8fb88b0de | 960dd60c263cea329e27584b03bb430b025fe05a | /venv/lib/python3.6/site-packages/bigquery/client.py | eedafc23b7799a04e4b141860937b508bc7d12ac | [] | no_license | RuchiBhardwaj/covid_pipeline | 18b3c0ae5836487b150ad112d86e312544d19f9d | f21a98593383caed532b9e7178e70172984cd635 | refs/heads/master | 2022-12-04T09:02:47.076901 | 2020-06-08T14:12:18 | 2020-06-08T14:12:18 | 268,835,744 | 0 | 2 | null | 2022-11-27T19:32:17 | 2020-06-02T15:17:20 | Python | UTF-8 | Python | false | false | 70,802 | py | import calendar
import json
from logging import getLogger, NullHandler
from collections import defaultdict
from datetime import datetime, timedelta
from hashlib import sha256
from io import StringIO
from time import sleep, time
from functools import reduce
import six
from bigquery.errors import (BigQueryTimeoutException, JobExecutingException,
JobInsertException, UnfinishedQueryException)
from googleapiclient.discovery import build, DISCOVERY_URI
from googleapiclient.errors import HttpError
from httplib2 import Http
BIGQUERY_SCOPE = [
'https://www.googleapis.com/auth/bigquery'
]
BIGQUERY_SCOPE_READ_ONLY = [
'https://www.googleapis.com/auth/bigquery.readonly'
]
CACHE_TIMEOUT = timedelta(seconds=30)
JOB_CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
JOB_CREATE_NEVER = 'CREATE_NEVER'
JOB_WRITE_TRUNCATE = 'WRITE_TRUNCATE'
JOB_WRITE_APPEND = 'WRITE_APPEND'
JOB_WRITE_EMPTY = 'WRITE_EMPTY'
JOB_ENCODING_UTF_8 = 'UTF-8'
JOB_ENCODING_ISO_8859_1 = 'ISO-8859-1'
JOB_PRIORITY_INTERACTIVE = 'INTERACTIVE'
JOB_PRIORITY_BATCH = 'BATCH'
JOB_COMPRESSION_NONE = 'NONE'
JOB_COMPRESSION_GZIP = 'GZIP'
JOB_FORMAT_CSV = 'CSV'
JOB_FORMAT_NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
JOB_SOURCE_FORMAT_DATASTORE_BACKUP = 'DATASTORE_BACKUP'
JOB_SOURCE_FORMAT_NEWLINE_DELIMITED_JSON = JOB_FORMAT_NEWLINE_DELIMITED_JSON
JOB_SOURCE_FORMAT_CSV = JOB_FORMAT_CSV
JOB_DESTINATION_FORMAT_AVRO = 'AVRO'
JOB_DESTINATION_FORMAT_NEWLINE_DELIMITED_JSON = \
JOB_FORMAT_NEWLINE_DELIMITED_JSON
JOB_DESTINATION_FORMAT_CSV = JOB_FORMAT_CSV
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def get_client(project_id=None, credentials=None,
service_url=None, service_account=None,
private_key=None, private_key_file=None,
json_key=None, json_key_file=None,
readonly=True, swallow_results=True,
num_retries=0):
"""Return a singleton instance of BigQueryClient. Either
AssertionCredentials or a service account and private key combination need
to be provided in order to authenticate requests to BigQuery.
Parameters
----------
project_id : str, optional
The BigQuery project id, required unless json_key or json_key_file is
provided.
credentials : oauth2client.client.SignedJwtAssertionCredentials, optional
AssertionCredentials instance to authenticate requests to BigQuery
(optional, must provide `service_account` and (`private_key` or
`private_key_file`) or (`json_key` or `json_key_file`) if not included
service_url : str, optional
A URI string template pointing to the location of Google's API
discovery service. Requires two parameters {api} and {apiVersion} that
when filled in produce an absolute URI to the discovery document for
that service. If not set then the default googleapiclient discovery URI
is used. See `credentials`
service_account : str, optional
The Google API service account name. See `credentials`
private_key : str, optional
The private key associated with the service account in PKCS12 or PEM
format. See `credentials`
private_key_file : str, optional
The name of the file containing the private key associated with the
service account in PKCS12 or PEM format. See `credentials`
json_key : dict, optional
The JSON key associated with the service account. See `credentials`
json_key_file : str, optional
The name of the JSON key file associated with the service account. See
`credentials`.
readonly : bool
Bool indicating if BigQuery access is read-only. Has no effect if
credentials are provided. Default True.
swallow_results : bool
If set to False, then return the actual response value instead of
converting to boolean. Default True.
num_retries : int, optional
The number of times to retry the request. Default 0 (no retry).
Returns
-------
BigQueryClient
An instance of the BigQuery client.
"""
if not credentials:
assert (service_account and (private_key or private_key_file)) or (
json_key or json_key_file), \
'Must provide AssertionCredentials or service account and P12 key\
or JSON key'
if not project_id:
assert json_key or json_key_file, \
'Must provide project_id unless json_key or json_key_file is\
provided'
if service_url is None:
service_url = DISCOVERY_URI
scope = BIGQUERY_SCOPE_READ_ONLY if readonly else BIGQUERY_SCOPE
if private_key_file:
credentials = _credentials().from_p12_keyfile(service_account,
private_key_file,
scopes=scope)
if private_key:
try:
if isinstance(private_key, basestring):
private_key = private_key.decode('utf-8')
except NameError:
# python3 -- private_key is already unicode
pass
credentials = _credentials().from_p12_keyfile_buffer(
service_account,
StringIO(private_key),
scopes=scope)
if json_key_file:
with open(json_key_file, 'r') as key_file:
json_key = json.load(key_file)
if json_key:
credentials = _credentials().from_json_keyfile_dict(json_key,
scopes=scope)
if not project_id:
project_id = json_key['project_id']
bq_service = _get_bq_service(credentials=credentials,
service_url=service_url)
return BigQueryClient(bq_service, project_id, swallow_results,
num_retries)
def get_projects(bq_service):
"""Given the BigQuery service, return data about all projects."""
projects_request = bq_service.projects().list().execute()
projects = []
for project in projects_request.get('projects', []):
project_data = {
'id': project['id'],
'name': project['friendlyName']
}
projects.append(project_data)
return projects
def _get_bq_service(credentials=None, service_url=None):
"""Construct an authorized BigQuery service object."""
assert credentials, 'Must provide ServiceAccountCredentials'
http = credentials.authorize(Http())
service = build(
'bigquery',
'v2',
http=http,
discoveryServiceUrl=service_url,
cache_discovery=False
)
return service
def _credentials():
"""Import and return SignedJwtAssertionCredentials class"""
from oauth2client.service_account import ServiceAccountCredentials
return ServiceAccountCredentials
class BigQueryClient(object):
def __init__(self, bq_service, project_id, swallow_results=True,
num_retries=0):
self.bigquery = bq_service
self.project_id = project_id
self.swallow_results = swallow_results
self.num_retries = num_retries
self.cache = {}
def _get_project_id(self, project_id=None):
""" Get new project_id
Default is self.project_id, which is the project client authenticate to.
A new project_id is specified when client wants to authenticate to 1 project,
but run jobs in a different project.
Parameters
----------
project_id : str
BigQuery project_id
Returns
-------
project_id: BigQuery project_id
"""
if project_id is None:
project_id = self.project_id
return project_id
def _submit_query_job(self, query_data):
""" Submit a query job to BigQuery.
This is similar to BigQueryClient.query, but gives the user
direct access to the query method on the offical BigQuery
python client.
For fine-grained control over a query job, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#query
Parameters
----------
query_data
query object as per "configuration.query" in
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
Returns
-------
tuple
job id and query results if query completed. If dry_run is True,
job id will be None and results will be [cacheHit and totalBytesProcessed] if the query is valid
or a dict containing the response if invalid.
Raises
------
BigQueryTimeoutException
On timeout
"""
logger.debug('Submitting query job: %s' % query_data)
job_collection = self.bigquery.jobs()
try:
query_reply = job_collection.query(
projectId=self.project_id, body=query_data).execute(
num_retries=self.num_retries)
except HttpError as e:
if query_data.get("dryRun", False):
return None, json.loads(e.content.decode('utf8'))
raise
job_id = query_reply['jobReference'].get('jobId')
schema = query_reply.get('schema', {'fields': None})['fields']
rows = query_reply.get('rows', [])
job_complete = query_reply.get('jobComplete', False)
cache_hit = query_reply['cacheHit']
total_bytes_processed = query_reply['totalBytesProcessed']
# raise exceptions if it's not an async query
# and job is not completed after timeout
if not job_complete and query_data.get("timeoutMs", False):
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException()
if query_data.get("dryRun", True):
return job_id, [cache_hit, total_bytes_processed]
return job_id, [self._transform_row(row, schema) for row in rows]
def _get_job_reference(self, job_id):
""" Get job reference from job_id
For more details, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#resource
Parameters
----------
job_id:
Id of the job
Returns
-------
job_reference: json of job_reference
"""
job_reference = {
"projectId": self.project_id,
"jobId": job_id
}
return job_reference
def _insert_job(self, body_object):
""" Submit a job to BigQuery
Direct proxy to the insert() method of the offical BigQuery
python client.
Able to submit load, link, query, copy, or extract jobs.
For more details, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#insert
Parameters
----------
body_object : body object passed to bigquery.jobs().insert()
Returns
-------
response of the bigquery.jobs().insert().execute() call
Raises
------
BigQueryTimeoutException on timeout
"""
logger.debug('Submitting job: %s' % body_object)
job_collection = self.bigquery.jobs()
return job_collection.insert(
projectId=self.project_id,
body=body_object
).execute(num_retries=self.num_retries)
def query(self, query, max_results=None, timeout=0, dry_run=False, use_legacy_sql=None, external_udf_uris=None):
"""Submit a query to BigQuery.
Parameters
----------
query : str
BigQuery query string
max_results : int, optional
The maximum number of rows to return per page of results.
timeout : float, optional
How long to wait for the query to complete, in seconds before
the request times out and returns.
dry_run : bool, optional
If True, the query isn't actually run. A valid query will return
cache hit, and total bytes processed, while an invalid one will return the same error
message it would if it wasn't a dry run.
use_legacy_sql : bool, optional. Default True.
If False, the query will use BigQuery's standard SQL (https://cloud.google.com/bigquery/sql-reference/)
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
Returns
-------
tuple
(job id, query results) if the query completed. If dry_run is True,
job id will be None and results will be [cacheHit and totalBytesProcessed] if the query is valid
or a ``dict`` containing the response if invalid.
Raises
------
BigQueryTimeoutException
on timeout
"""
logger.debug('Executing query: %s' % query)
query_data = {
'query': query,
'timeoutMs': timeout * 1000,
'dryRun': dry_run,
'maxResults': max_results
}
if use_legacy_sql is not None:
query_data['useLegacySql'] = use_legacy_sql
if external_udf_uris:
query_data['userDefinedFunctionResources'] = \
[ {'resourceUri': u} for u in external_udf_uris ]
return self._submit_query_job(query_data)
def get_query_schema(self, job_id):
"""Retrieve the schema of a query by job id.
Parameters
----------
job_id : str
The job_id that references a BigQuery query
Returns
-------
list
A ``list`` of ``dict`` objects that represent the schema.
"""
query_reply = self.get_query_results(job_id, offset=0, limit=0)
if not query_reply['jobComplete']:
logger.warning('BigQuery job %s not complete' % job_id)
raise UnfinishedQueryException()
return query_reply['schema']['fields']
def get_table_schema(self, dataset, table, project_id=None):
"""Return the table schema.
Parameters
----------
dataset : str
The dataset containing the `table`.
table : str
The table to get the schema for
project_id: str, optional
The project of the dataset.
Returns
-------
list
A ``list`` of ``dict`` objects that represent the table schema. If
the table doesn't exist, None is returned.
"""
project_id = self._get_project_id(project_id)
try:
result = self.bigquery.tables().get(
projectId=project_id,
tableId=table,
datasetId=dataset).execute(num_retries=self.num_retries)
except HttpError as e:
if int(e.resp['status']) == 404:
logger.warn('Table %s.%s does not exist', dataset, table)
return None
raise
return result['schema']['fields']
def check_job(self, job_id):
"""Return the state and number of results of a query by job id.
Parameters
----------
job_id : str
The job id of the query to check.
Returns
-------
tuple
(``bool``, ``int``) Whether or not the query has completed and the
total number of rows included in the query table if it has
completed (else 0)
"""
query_reply = self.get_query_results(job_id, offset=0, limit=0)
return (query_reply.get('jobComplete', False),
int(query_reply.get('totalRows', 0)))
def get_query_rows(self, job_id, offset=None, limit=None, timeout=0):
"""Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
"""
# Get query results
query_reply = self.get_query_results(job_id, offset=offset,
limit=limit, timeout=timeout)
if not query_reply['jobComplete']:
logger.warning('BigQuery job %s not complete' % job_id)
raise UnfinishedQueryException()
schema = query_reply["schema"]["fields"]
rows = query_reply.get('rows', [])
page_token = query_reply.get("pageToken")
records = [self._transform_row(row, schema) for row in rows]
# Append to records if there are multiple pages for query results
while page_token and (not limit or len(records) < limit):
query_reply = self.get_query_results(
job_id, offset=offset, limit=limit, page_token=page_token,
timeout=timeout)
page_token = query_reply.get("pageToken")
rows = query_reply.get('rows', [])
records += [self._transform_row(row, schema) for row in rows]
return records[:limit] if limit else records
def check_dataset(self, dataset_id, project_id=None):
"""Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
"""
dataset = self.get_dataset(dataset_id, project_id)
return bool(dataset)
def get_dataset(self, dataset_id, project_id=None):
"""Retrieve a dataset if it exists, otherwise return an empty dict.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
dict
Contains dataset object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
dataset = self.bigquery.datasets().get(
projectId=project_id, datasetId=dataset_id).execute(
num_retries=self.num_retries)
except HttpError:
dataset = {}
return dataset
def check_table(self, dataset, table, project_id=None):
"""Check to see if a table exists.
Parameters
----------
dataset : str
The dataset to check
table : str
The name of the table
project_id: str, optional
The project the table is in
Returns
-------
bool
True if table exists, else False
"""
table = self.get_table(dataset, table, project_id)
return bool(table)
def get_table(self, dataset, table, project_id=None):
""" Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
table = self.bigquery.tables().get(
projectId=project_id, datasetId=dataset,
tableId=table).execute(num_retries=self.num_retries)
except HttpError:
table = {}
return table
def create_table(self, dataset, table, schema,
expiration_time=None, time_partitioning=False,
project_id=None):
"""Create a new table in the dataset.
Parameters
----------
dataset : str
The dataset to create the table in
table : str
The name of the table to create
schema : dict
The table schema
expiration_time : int or double, optional
The expiry time in milliseconds since the epoch.
time_partitioning : bool, optional
Create a time partitioning.
project_id: str, optional
The project to create the table in
Returns
-------
Union[bool, dict]
If the table was successfully created, or response from BigQuery
if swallow_results is set to False
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
'tableReference': {
'tableId': table,
'projectId': project_id,
'datasetId': dataset
}
}
if expiration_time is not None:
body['expirationTime'] = expiration_time
if time_partitioning:
body['timePartitioning'] = {'type': 'DAY'}
try:
table = self.bigquery.tables().insert(
projectId=project_id,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return table
except HttpError as e:
logger.error(('Cannot create table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def update_table(self, dataset, table, schema, project_id=None):
"""Update an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to update the table in
table : str
The name of the table to update
schema : dict
Table schema
project_id: str, optional
The project to update the table in
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully updated or not,
or response from BigQuery if swallow_results is set to False.
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
'tableReference': {
'tableId': table,
'projectId': project_id,
'datasetId': dataset
}
}
try:
result = self.bigquery.tables().update(
projectId=project_id,
tableId= table,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return result
except HttpError as e:
logger.error(('Cannot update table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def patch_table(self, dataset, table, schema, project_id=None):
"""Patch an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to patch the table in
table : str
The name of the table to patch
schema : dict
The table schema
project_id: str, optional
The project to patch the table in
Returns
-------
Union[bool, dict]
Bool indicating if the table was successfully patched or not,
or response from BigQuery if swallow_results is set to False
"""
project_id = self._get_project_id(project_id)
body = {
'schema': {'fields': schema},
}
try:
result = self.bigquery.tables().patch(
projectId=project_id,
datasetId=dataset,
tableId=table,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return result
except HttpError as e:
logger.error(('Cannot patch table {0}.{1}.{2}\n'
'Http Error: {3}').format(project_id, dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def create_view(self, dataset, view, query, use_legacy_sql=None, project_id=None):
"""Create a new view in the dataset.
Parameters
----------
dataset : str
The dataset to create the view in
view : str
The name of the view to create
query : dict
A query that BigQuery executes when the view is referenced.
use_legacy_sql : bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
project_id: str, optional
The project to create the view in
Returns
-------
Union[bool, dict]
bool indicating if the view was successfully created or not,
or response from BigQuery if swallow_results is set to False.
"""
project_id = self._get_project_id(project_id)
body = {
'tableReference': {
'tableId': view,
'projectId': project_id,
'datasetId': dataset
},
'view': {
'query': query
}
}
if use_legacy_sql is not None:
body['view']['useLegacySql'] = use_legacy_sql
try:
view = self.bigquery.tables().insert(
projectId=project_id,
datasetId=dataset,
body=body
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return view
except HttpError as e:
logger.error(('Cannot create view {0}.{1}\n'
'Http Error: {2}').format(dataset, view, e.content))
if self.swallow_results:
return False
else:
return {}
def delete_table(self, dataset, table, project_id=None):
"""Delete a table from the dataset.
Parameters
----------
dataset : str
The dataset to delete the table from.
table : str
The name of the table to delete
project_id: str, optional
String id of the project
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully deleted or not,
or response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
response = self.bigquery.tables().delete(
projectId=project_id,
datasetId=dataset,
tableId=table
).execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(('Cannot delete table {0}.{1}\n'
'Http Error: {2}').format(dataset, table, e.content))
if self.swallow_results:
return False
else:
return {}
def get_tables(self, dataset_id, app_id, start_time, end_time, project_id=None):
"""Retrieve a list of tables that are related to the given app id
and are inside the range of start and end times.
Parameters
----------
dataset_id : str
The BigQuery dataset id to consider.
app_id : str
The appspot name
start_time : Union[datetime, int]
The datetime or unix time after which records will be fetched.
end_time : Union[datetime, int]
The datetime or unix time up to which records will be fetched.
project_id: str, optional
String id of the project
Returns
-------
list
A ``list`` of table names.
"""
if isinstance(start_time, datetime):
start_time = calendar.timegm(start_time.utctimetuple())
if isinstance(end_time, datetime):
end_time = calendar.timegm(end_time.utctimetuple())
every_table = self._get_all_tables(dataset_id, project_id)
app_tables = every_table.get(app_id, {})
return self._filter_tables_by_time(app_tables, start_time, end_time)
def import_data_from_uris(
self,
source_uris,
dataset,
table,
schema=None,
job=None,
source_format=None,
create_disposition=None,
write_disposition=None,
encoding=None,
ignore_unknown_values=None,
max_bad_records=None,
allow_jagged_rows=None,
allow_quoted_newlines=None,
field_delimiter=None,
quote=None,
skip_leading_rows=None,
project_id=None,
):
"""
Imports data into a BigQuery table from cloud storage. Optional
arguments that are not specified are determined by BigQuery as
described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
source_urls : list
A ``list`` of ``str`` objects representing the urls on cloud
storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
schema : list, optional
Represents the BigQuery schema
job : str, optional
Identifies the job (a unique job id is automatically generated if
not provided)
source_format : str, optional
One of the JOB_SOURCE_FORMAT_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
encoding : str, optional
One of the JOB_ENCODING_* constants
ignore_unknown_values : bool, optional
Whether or not to ignore unknown values
max_bad_records : int, optional
Maximum number of bad records
allow_jagged_rows : bool, optional
For csv only
allow_quoted_newlines : bool, optional
For csv only
field_delimiter : str, optional
For csv only
quote : str, optional
Quote character for csv only
skip_leading_rows : int, optional
For csv only
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job response
Raises
------
JobInsertException
on http/auth failures or error in result
"""
source_uris = source_uris if isinstance(source_uris, list) \
else [source_uris]
project_id = self._get_project_id(project_id)
configuration = {
"destinationTable": {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
},
"sourceUris": source_uris,
}
if max_bad_records:
configuration['maxBadRecords'] = max_bad_records
if ignore_unknown_values:
configuration['ignoreUnknownValues'] = ignore_unknown_values
if create_disposition:
configuration['createDisposition'] = create_disposition
if write_disposition:
configuration['writeDisposition'] = write_disposition
if encoding:
configuration['encoding'] = encoding
if schema:
configuration['schema'] = {'fields': schema}
if source_format:
configuration['sourceFormat'] = source_format
if not job:
hex = self._generate_hex_for_uris(source_uris)
job = "{dataset}-{table}-{digest}".format(
dataset=dataset,
table=table,
digest=hex
)
if source_format == JOB_SOURCE_FORMAT_CSV:
if field_delimiter:
configuration['fieldDelimiter'] = field_delimiter
if allow_jagged_rows:
configuration['allowJaggedRows'] = allow_jagged_rows
if allow_quoted_newlines:
configuration['allowQuotedNewlines'] = allow_quoted_newlines
if quote:
configuration['quote'] = quote
if skip_leading_rows:
configuration['skipLeadingRows'] = skip_leading_rows
elif field_delimiter or allow_jagged_rows \
or allow_quoted_newlines or quote or skip_leading_rows:
all_values = dict(field_delimiter=field_delimiter,
allow_jagged_rows=allow_jagged_rows,
allow_quoted_newlines=allow_quoted_newlines,
skip_leading_rows=skip_leading_rows,
quote=quote)
non_null_values = dict((k, v) for k, v
in list(all_values.items())
if v)
raise Exception("Parameters field_delimiter, allow_jagged_rows, "
"allow_quoted_newlines, quote and "
"skip_leading_rows are only allowed when "
"source_format=JOB_SOURCE_FORMAT_CSV: %s"
% non_null_values)
body = {
"configuration": {
'load': configuration
},
"jobReference": self._get_job_reference(job)
}
logger.debug("Creating load job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def export_data_to_uris(
self,
destination_uris,
dataset,
table,
job=None,
compression=None,
destination_format=None,
print_header=None,
field_delimiter=None,
project_id=None,
):
"""
Export data from a BigQuery table to cloud storage. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
destination_uris : Union[str, list]
``str`` or ``list`` of ``str`` objects representing the URIs on
cloud storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
job : str, optional
String identifying the job (a unique jobid is automatically
generated if not provided)
compression : str, optional
One of the JOB_COMPRESSION_* constants
destination_format : str, optional
One of the JOB_DESTination_FORMAT_* constants
print_header : bool, optional
Whether or not to print the header
field_delimiter : str, optional
Character separating fields in delimited file
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
"""
destination_uris = destination_uris \
if isinstance(destination_uris, list) else [destination_uris]
project_id = self._get_project_id(project_id)
configuration = {
"sourceTable": {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
},
"destinationUris": destination_uris,
}
if compression:
configuration['compression'] = compression
if destination_format:
configuration['destinationFormat'] = destination_format
if print_header is not None:
configuration['printHeader'] = print_header
if field_delimiter:
configuration['fieldDelimiter'] = field_delimiter
if not job:
hex = self._generate_hex_for_uris(destination_uris)
job = "{dataset}-{table}-{digest}".format(
dataset=dataset,
table=table,
digest=hex
)
body = {
"configuration": {
'extract': configuration
},
"jobReference": self._get_job_reference(job)
}
logger.info("Creating export job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def write_to_table(
self,
query,
dataset=None,
table=None,
external_udf_uris=None,
allow_large_results=None,
use_query_cache=None,
priority=None,
create_disposition=None,
write_disposition=None,
use_legacy_sql=None,
maximum_billing_tier=None,
flatten=None,
project_id=None,
):
"""
Write query result to table. If dataset or table is not provided,
Bigquery will write the result to temporary table. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
query : str
BigQuery query string
dataset : str, optional
String id of the dataset
table : str, optional
String id of the table
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
allow_large_results : bool, optional
Whether or not to allow large results
use_query_cache : bool, optional
Whether or not to use query cache
priority : str, optional
One of the JOB_PRIORITY_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
use_legacy_sql: bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
maximum_billing_tier : integer, optional
Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If
unspecified, this will be set to your project default. For more
information,
see https://cloud.google.com/bigquery/pricing#high-compute
flatten : bool, optional
Whether or not to flatten nested and repeated fields
in query results
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
"""
configuration = {
"query": query,
}
project_id = self._get_project_id(project_id)
if dataset and table:
configuration['destinationTable'] = {
"projectId": project_id,
"tableId": table,
"datasetId": dataset
}
if allow_large_results is not None:
configuration['allowLargeResults'] = allow_large_results
if flatten is not None:
configuration['flattenResults'] = flatten
if maximum_billing_tier is not None:
configuration['maximumBillingTier'] = maximum_billing_tier
if use_query_cache is not None:
configuration['useQueryCache'] = use_query_cache
if use_legacy_sql is not None:
configuration['useLegacySql'] = use_legacy_sql
if priority:
configuration['priority'] = priority
if create_disposition:
configuration['createDisposition'] = create_disposition
if write_disposition:
configuration['writeDisposition'] = write_disposition
if external_udf_uris:
configuration['userDefinedFunctionResources'] = \
[ {'resourceUri': u} for u in external_udf_uris ]
body = {
"configuration": {
'query': configuration
}
}
logger.info("Creating write to table job %s" % body)
job_resource = self._insert_job(body)
self._raise_insert_exception_if_error(job_resource)
return job_resource
def wait_for_job(self, job, interval=5, timeout=60):
"""
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
"""
complete = False
job_id = str(job if isinstance(job,
(six.binary_type, six.text_type, int))
else job['jobReference']['jobId'])
job_resource = None
start_time = time()
elapsed_time = 0
while not (complete or elapsed_time > timeout):
sleep(interval)
request = self.bigquery.jobs().get(projectId=self.project_id,
jobId=job_id)
job_resource = request.execute(num_retries=self.num_retries)
self._raise_executing_exception_if_error(job_resource)
complete = job_resource.get('status').get('state') == u'DONE'
elapsed_time = time() - start_time
# raise exceptions if timeout
if not complete:
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException()
return job_resource
def push_rows(self, dataset, table, rows, insert_id_key=None,
skip_invalid_rows=None, ignore_unknown_values=None,
template_suffix=None, project_id=None):
"""Upload rows to BigQuery table.
Parameters
----------
dataset : str
The dataset to upload to
table : str
The name of the table to insert rows into
rows : list
A ``list`` of rows (``dict`` objects) to add to the table
insert_id_key : str, optional
Key for insertId in row.
You can use dot separated key for nested column.
skip_invalid_rows : bool, optional
Insert all valid rows of a request, even if invalid rows exist.
ignore_unknown_values : bool, optional
Accept rows that contain values that do not match the schema.
template_suffix : str, optional
Inserts the rows into an {table}{template_suffix}.
If table {table}{template_suffix} doesn't exist, create from {table}.
project_id: str, optional
The project to upload to
Returns
-------
Union[bool, dict]
bool indicating if insert succeeded or not, or response
from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
table_data = self.bigquery.tabledata()
rows_data = []
for row in rows:
each_row = {}
each_row["json"] = row
if insert_id_key is not None:
keys = insert_id_key.split('.')
val = reduce(lambda d, key: d.get(key) if d else None, keys, row)
if val is not None:
each_row["insertId"] = val
rows_data.append(each_row)
data = {
"kind": "bigquery#tableDataInsertAllRequest",
"rows": rows_data
}
if skip_invalid_rows is not None:
data['skipInvalidRows'] = skip_invalid_rows
if ignore_unknown_values is not None:
data['ignoreUnknownValues'] = ignore_unknown_values
if template_suffix is not None:
data['templateSuffix'] = template_suffix
try:
response = table_data.insertAll(
projectId=project_id,
datasetId=dataset,
tableId=table,
body=data
).execute(num_retries=self.num_retries)
if response.get('insertErrors'):
logger.error('BigQuery insert errors: %s' % response)
if self.swallow_results:
return False
else:
return response
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.exception('Problem with BigQuery insertAll')
if self.swallow_results:
return False
else:
return {
'insertErrors': [{
'errors': [{
'reason': 'httperror',
'message': e
}]
}]
}
def get_all_tables(self, dataset_id, project_id=None):
"""Retrieve a list of tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table data for.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
A ``list`` with all table names
"""
tables_data = self._get_all_tables_for_dataset(dataset_id, project_id)
tables = []
for table in tables_data.get('tables', []):
table_name = table.get('tableReference', {}).get('tableId')
if table_name:
tables.append(table_name)
return tables
def _get_all_tables(self, dataset_id, cache=False, project_id=None):
"""Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
"""
do_fetch = True
if cache and self.cache.get(dataset_id):
time, result = self.cache.get(dataset_id)
if datetime.now() - time < CACHE_TIMEOUT:
do_fetch = False
if do_fetch:
result = self._get_all_tables_for_dataset(dataset_id, project_id)
self.cache[dataset_id] = (datetime.now(), result)
return self._parse_table_list_response(result)
def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
"""Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
"""
project_id = self._get_project_id(project_id)
result = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
page_token = result.get('nextPageToken')
while page_token:
res = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id,
pageToken=page_token
).execute(num_retries=self.num_retries)
page_token = res.get('nextPageToken')
result['tables'] += res.get('tables', [])
return result
def _parse_table_list_response(self, list_response):
"""Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
"""
tables = defaultdict(dict)
for table in list_response.get('tables', []):
table_ref = table.get('tableReference')
if not table_ref:
continue
table_id = table_ref.get('tableId', '')
year_month, app_id = self._parse_table_name(table_id)
if not year_month:
continue
table_date = datetime.strptime(year_month, '%Y-%m')
unix_seconds = calendar.timegm(table_date.timetuple())
tables[app_id].update({table_id: unix_seconds})
# Turn off defualting
tables.default_factory = None
return tables
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]) and len(year_month) == 7:
return year_month, app_id
return None, None
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)]
def _in_range(self, start_time, end_time, time):
"""Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
"""
ONE_MONTH = 2764800 # 32 days
return start_time <= time <= end_time or \
time <= start_time <= time + ONE_MONTH or \
time <= end_time <= time + ONE_MONTH
def get_query_results(self, job_id, offset=None, limit=None,
page_token=None, timeout=0):
"""Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply
"""
job_collection = self.bigquery.jobs()
return job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_id,
startIndex=offset,
maxResults=limit,
pageToken=page_token,
timeoutMs=timeout * 1000).execute(num_retries=self.num_retries)
def _transform_row(self, row, schema):
"""Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
def _recurse_on_row(self, col_dict, nested_value):
"""Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
"""
row_value = None
# Multiple nested records
if col_dict['mode'] == 'REPEATED' and isinstance(nested_value, list):
row_value = [self._transform_row(record['v'], col_dict['fields'])
for record in nested_value]
# A single nested record
else:
row_value = self._transform_row(nested_value, col_dict['fields'])
return row_value
def _generate_hex_for_uris(self, uris):
"""Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
"""
return sha256((":".join(uris) + str(time())).encode()).hexdigest()
def _raise_insert_exception_if_error(self, job):
error_http = job.get('error')
if error_http:
raise JobInsertException(
"Error in export job API request: {0}".format(error_http))
# handle errorResult - API request is successful but error in result
error_result = job.get('status').get('errorResult')
if error_result:
raise JobInsertException(
"Reason:{reason}. Message:{message}".format(**error_result))
def _raise_executing_exception_if_error(self, job):
error_http = job.get('error')
if error_http:
raise JobExecutingException(
"Error in export job API request: {0}".format(error_http))
# handle errorResult - API request is successful but error in result
error_result = job.get('status').get('errorResult')
if error_result:
raise JobExecutingException(
"Reason:{reason}. Message:{message}".format(**error_result))
#
# DataSet manipulation methods
#
def create_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
dataset_data = self.dataset_resource(dataset_id,
project_id=project_id,
friendly_name=friendly_name,
description=description,
access=access,
location=location
)
response = datasets.insert(projectId=project_id,
body=dataset_data).execute(
num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot create dataset {0}, {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def get_datasets(self, project_id=None):
"""List all datasets in the project.
Parameters
----------
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
list
Dataset resources
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.list(projectId=project_id)
result = request.execute(num_retries=self.num_retries)
return result.get('datasets', [])
except HttpError as e:
logger.error("Cannot list datasets: {0}".format(e))
return None
def delete_dataset(self, dataset_id, delete_contents=False, project_id=None):
"""Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.delete(projectId=project_id,
datasetId=dataset_id,
deleteContents=delete_contents)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot delete dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def update_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.update(projectId=project_id,
datasetId=dataset_id,
body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot update dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def patch_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique string idenfitying the dataset with the project (the
referenceId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions.
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the patch was successful or not, or response
from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.patch(projectId=project_id,
datasetId=dataset_id, body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error('Cannot patch dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
def dataset_resource(self, ref_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""See
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource
Parameters
----------
ref_id : str
Dataset id (the reference id, not the integer id)
friendly_name : str, optional
An optional descriptive name for the dataset
description : str, optional
An optional description for the dataset
access : list, optional
Indicating access permissions
location: str, optional, 'EU' or 'US'
An optional geographical location for the dataset(EU or US)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
Representing BigQuery dataset resource
"""
project_id = self._get_project_id(project_id)
data = {
"datasetReference": {
"datasetId": ref_id,
"projectId": project_id
}
}
if friendly_name:
data["friendlyName"] = friendly_name
if description:
data["description"] = description
if access:
data["access"] = access
if location:
data["location"] = location
return data
@classmethod
def schema_from_record(cls, record):
"""Given a dict representing a record instance to be inserted into
BigQuery, calculate the schema.
Parameters
----------
record : dict
representing a record to be inserted into big query,
where all keys are ``str`` objects (representing column names in
the record) and all values are of type ``int``, ``str``,
``unicode``, ``float``, ``bool``, ``datetime``, or ``dict``. A
``dict`` value represents a record, and must conform to the same
restrictions as record.
Returns
-------
list
BigQuery schema
Notes
-----
Results are undefined if a different value type is provided for a
repeated field: E.g.
>>> { rfield: [ { x: 1}, {x: "a string"} ] } # undefined!
"""
from bigquery.schema_builder import schema_from_record
return schema_from_record(record)
| [
"ruchi.bhardwaj@nineleaps.com"
] | ruchi.bhardwaj@nineleaps.com |
10b5605b4bccd6d1f948a4c6810b3e573adb67ae | a961aa04d7c7d18fd2ac7da8a8016bacfabc6e1b | /elevennote/src/notes/migrations/0007_auto_20200509_1450.py | 38a6a80a43cd9fce7abbf51b8a93bfb99cfc98ae | [] | no_license | EgorovM/cs102 | a4f6423f3e96064c68a9015118cd141a8a7eea14 | 0f72f9027dbcda510c67f815348a8ce58f76d857 | refs/heads/master | 2021-06-21T16:21:10.880523 | 2020-06-06T08:34:28 | 2020-06-06T08:34:28 | 214,231,423 | 0 | 1 | null | 2021-06-10T22:52:37 | 2019-10-10T16:24:08 | JavaScript | UTF-8 | Python | false | false | 440 | py | # Generated by Django 2.0.1 on 2020-05-09 14:50
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0006_note_shared'),
]
operations = [
migrations.AlterField(
model_name='note',
name='shared',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
| [
"EgorovM@bitbucket.org"
] | EgorovM@bitbucket.org |
cde9c5c591a0868fda460d5f45c15e0897cb2d77 | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v6/services/services/gender_view_service/transports/base.py | c2715ba063f55d3fe0da66e820e30cd4ad4a3ba0 | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import gender_view
from google.ads.googleads.v6.services.types import gender_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GenderViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GenderViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_gender_view: gapic_v1.method.wrap_method(
self.get_gender_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_gender_view(
self,
) -> typing.Callable[
[gender_view_service.GetGenderViewRequest], gender_view.GenderView
]:
raise NotImplementedError
__all__ = ("GenderViewServiceTransport",)
| [
"noreply@github.com"
] | hurricanelennane.noreply@github.com |
8f648624a6f6672263fb311609c38a968b7e26d2 | 04d780d240580b60410ccd2f853f7aa528ae1710 | /polls/models.py | b5b5df0ff545dbd944e2bd407cbca6264901602e | [] | no_license | LocNguyenPV/student-management-django | b142c14a9336d7f6fba01b582d205a4dfb3cc21c | e33e5d801a81e0cf1d87422f8247854875ee1f4c | refs/heads/main | 2023-04-13T21:40:21.257053 | 2021-04-25T16:28:18 | 2021-04-25T16:28:18 | 361,262,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | from django.db import models
from datetime import datetime
# Create your models here.
class Student(models.Model):
registration_number = models.CharField(max_length=11,
primary_key = True,
verbose_name = "Registration Number"
)
name = models.CharField(max_length=200, verbose_name = "Name")
email = models.EmailField(max_length=200, verbose_name = "Email")
home_town = models.CharField(max_length=200, verbose_name = "Home town")
score = models.IntegerField(default=0, verbose_name = "Score")
date_of_birth = models.DateField(verbose_name = "Date of Birth")
register_date = models.DateTimeField(default=datetime.now())
modify_date = models.DateTimeField(null=True)
is_delete = models.BooleanField(default=False)
def __str__(self):
return self.name
| [
"noreply@github.com"
] | LocNguyenPV.noreply@github.com |
0a86e75c70dcb21815b1a3f7ca3483db5fd939cc | 707c6a7f3b3213c8a996967ede905aeb18a8c6d9 | /solutions/Insert-Interval.py | d680a3144665d7fbb6a2e681c4be95c980267521 | [] | no_license | Ziyilan/Pyleetcode | d35b9c2ae6c890dfd42804264b139bfddb8db563 | 81a9d98607b4ce554507d16763ee82f7dad49edd | refs/heads/master | 2020-12-11T02:11:38.470153 | 2015-10-27T18:46:47 | 2015-10-27T18:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | """
Author: Jing (https://github.com/gnijuohz)
Insert Interval: https://oj.leetcode.com/problems/insert-interval
Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).
You may assume that the intervals were initially sorted according to their start times.
Example 1:
Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
Example 2:
Given [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16].
This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
Tags
Array, Sort, Show Similar Problems, (H) Merge Intervals
"""
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param intervals, a list of Intervals
# @param newInterval, a Interval
# @return a list of Interval
def insert(self, intervals, newInterval):
intervals.append(newInterval)
return self.merge(intervals)
def merge(self, intervals):
if not intervals or len(intervals) == 1:
return intervals
intervals = sorted(intervals, key=operator.attrgetter('start'))
res = [intervals[0]]
for i in range(1, len(intervals)):
if intervals[i].start <= res[-1].end:
res[-1].end = max(res[-1].end, intervals[i].end)
else:
res.append(intervals[i])
return res | [
"gnijuohz@gmail.com"
] | gnijuohz@gmail.com |
101d77dd19df613d208d4d097e808037d9d2a2f5 | 6be49bcd473a7098cd0f6ea52475cd865bc4c158 | /creating_reference_geometries_for_eu_nuts.py | 65bf5366db2286103651c8a9997abaefc51f1daa | [
"BSD-2-Clause"
] | permissive | xarabiburacaramba/olu_v2 | 6c9520c4a2bf6e99ac3c7e6a6ba5af5ffe9a7808 | bd1cf9d91e90d704115fe1bc20eb88f29e0bb956 | refs/heads/main | 2023-08-30T17:12:47.068449 | 2021-11-05T11:50:11 | 2021-11-05T11:50:11 | 310,257,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,831 | py | from tridy import apply_function
import json
import sys
from osgeo import ogr
import psycopg2
#from credentials import connectionstring_localhost
import math
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
def sequence_generator(starting_number):
while starting_number<=10000000000000:
starting_number+=1
yield(starting_number)
def find_intersecting_features(geom,lyr):
features=[]
for i in range(lyr.GetFeatureCount()):
feature_in_lyr=lyr.GetNextFeature()
geom_in_lyr=feature_in_lyr.GetGeometryRef()
if geom.Intersects(geom_in_lyr):
features.append(feature_in_lyr)
else:
pass
return features
lpis_hilucs={'2':'111', '3':'111', '4':'111', '5':'111', '6':'110', '7':'110', '9':'110', '10':'110', '11':'110', '12':'631', '91':'120', '97':'142', '98':'120', '99':'120'}
ruian_zpv_hilucs={'1':'112', '2':'120', '3':'121', '4':'120', '5':'120', '6':'632', '7':'632', '8':'632', '9':'632', '10':'632', '11':'632', '12':'530', '13':'530', '14':'411', '15':'411', '16':'411', '17':'411', '18':'415', '19':'344', '20':'343', '21':'335', '22':'341', '23':'411', '24':'130', '25':'433', '26':'650', '27':'630', '28':'632', '29':'244','30':'630'}
ruian_dp_hilucs={'2':'111', '3':'111', '4':'111', '5':'500', '6':'110', '7':'110', '8':'110', '10':'120', '11':'632', '13':'510', '14':'660'}
corine_hilucs={'111':'500', '112':'500', '121':'660', '122':'410', '123':'414', '124':'413', '131':'130', '132':'433', '133':'660', '141':'340', '142':'340', '211':'111', '212':'111', '213':'111', '221':'111', '222':'111', '223':'111', '231':'111', '241':'111', '242':'111', '243':'111', '244':'120', '311':'120', '312':'120', '313':'120', '321':'631', '322':'631', '323':'631', '324':'631', '331':'631', '332':'631', '333':'631', '334':'620', '335':'631', '411':'631', '412':'631', '421':'631', '422':'631', '423':'631', '511':'414', '512':'660', '521':'660', '522':'414', '523':'660'}
ua_hilucs={'11100':'500', '11200':'500', '11210':'500', '11220':'500', '11230':'500', '11240':'500', '11300':'500', '12100':'300', '12200':'410', '12210':'411', '12220':'411', '12230':'412', '12300':'414', '12400':'413', '13100':'130', '13300':'600', '13400':'600', '14100':'344', '14200':'340', '21000':'110', '22000':'110', '23000':'110', '24000':'110', '25000':'110', '30000':'120', '31000':'120', '32000':'120', '33000':'660', '40000':'660', '50000':'660', '91000':'660', '92000':'660'}
lpis_clc={'2':'210','3':'220','4':'220','5':'220','6':'200','7':'231','9':'200','10':'200','11':'200','12':'240','91':'200','97':'500','98':'300','99':'300'}
ruian_zpv_clc={'1':'200','2':'200','3':'300','4':'300','5':'300','6':'510','7':'510','8':'510','9':'510','10':'510','11':'410','12':'100','13':'130','14':'122','15':'122','16':'122','17':'122','18':'122','19':'141','20':'142','21':'100','22':'100','23':'120','24':'131','25':'132','26':'100','27':'990','28':'510','29':'120','30':'300'}
ruian_dp_clc={'2':'210','3':'220','4':'220','5':'141','6':'200','7':'231','8':'231','10':'300','11':'510','13':'110','14':'990'}
ua_clc={'11100':'111','11200':'112','11210':'112','11220':'112','11230':'112','11240':'112','11300':'100','12100':'120','12210':'122','12200':'122','12220':'122','12230':'122','12300':'123','12400':'124','13100':'130','13300':'133','13400':'990','14100':'141','14200':'142','21000':'200','22000':'220','23000':'230','24000':'240','25000':'200','30000':'300','31000':'300','32000':'320','33000':'330','40000':'410','50000':'510','91000':'999','92000':'999'}
ua_conn=psycopg2.connect("dbname=urban_atlas host=localhost user=admin password=admin port=port_number")
corine_conn=psycopg2.connect("dbname=corine_land_cover host=localhost user=admin password=admin port=port_number")
olu_conn=psycopg2.connect("dbname=euoluv2 host=localhost user=admin password=admin port=port_number")
conn_corine=ogr.Open("PG: host=localhost dbname=corine_land_cover user=admin password=admin")
conn_ua=ogr.Open("PG: host=localhost dbname=urban_atlas user=admin password=admin")
ua_cur=ua_conn.cursor()
corine_cur=corine_conn.cursor()
olu_cur=olu_conn.cursor()
olu_cur2=olu_conn.cursor()
olu_cur.execute('SELECT last_value FROM olu2.olu_object_fid_seq')
olu_id_gen=sequence_generator(olu_cur.fetchone()[0])
olu_cur.execute('SELECT last_value FROM olu2.olu_attributes_fid_seq')
olu_atts_id_gen=sequence_generator(olu_cur.fetchone()[0])
olu_cur2.execute("select st_asbinary(geom), fid from olu2.administrative_unit where level_code=3")
def ua_transformations(object,object_fid,attributes_fid,admunit_id,object_table_name,attributes_table_name,attributes2object_table_name,object2nuts_table_name):
o_dict=\
{'feature':{'object':object},\
'feature_json':{'function':(lambda y: json.loads(y.ExportToJson()) ),'parameters':['feature']},\
'object_id':{'object':object_fid},\
'attributes_id':{'object':attributes_fid},\
'admunit_id':{'object':admunit_id},\
'object_table_name':{'object':object_table_name},\
'attributes_table_name':{'object':attributes_table_name},\
'attributes2object_table_name':{'object':attributes2object_table_name},\
'object2nuts_table_name':{'object':object2nuts_table_name},\
'object_geom':{'function':(lambda y: y.GetGeometryRef().ExportToWkt() ),'parameters':['feature']},\
'object_properties':{'function':(lambda x,y,z: {**{'fid':x},**{'dataset_id':3},**{'z_value':1000},**{'admunit_id':y},**{'valid_from':(z['properties']['prod_date']+'-01-01'),'valid_to':'2024-01-01'} }),'parameters':['object_id','admunit_id','feature_json']},
'attributes_properties':{'function':(lambda x,y: {**{'fid':x},**{'hilucs_value':ua_hilucs[y['properties']['code2012']]},**{'clc_value':ua_clc[y['properties']['code2012']]},**{'atts':y['properties']},**{'dataset_id':3}}),'parameters':['attributes_id','feature_json']},
'object_insert_statement':{'function':(lambda w,x,y,z: "INSERT INTO %s (fid,dataset_fid,z_value,geom,valid_from,valid_to) VALUES (%s,%s,%s,ST_SetSRID(ST_Multi(ST_GeomFromText('%s')),4326),'%s','%s')" % (w,x,z['dataset_id'],z['z_value'],y,z['valid_from'], z['valid_to'] ) ),'parameters':['object_table_name','object_id','object_geom','object_properties']},\
'attributes_insert_statement':{'function':(lambda w,x,y,z: "INSERT INTO %s (fid,hilucs_id,atts,dataset_fid,clc_id) VALUES (%s,%s,('%s'::json),%s,%s)" % (w, x,z['hilucs_value'],json.dumps(y['properties']),z['dataset_id'],z['clc_value']) ),'parameters':['attributes_table_name','attributes_id','feature_json','attributes_properties']},\
'attributes2object_insert_statement':{'function':(lambda x,y,z: "INSERT INTO %s (object_fid,atts_fid,atts_origin) VALUES (%s,%s,1)" % (z,x,y) ),'parameters':['object_id','attributes_id','attributes2object_table_name']},\
'object2admunit_insert_statement':{'function':(lambda x,y,z: "INSERT INTO %s (object_fid,unit_fid) VALUES (%s,'%s')" % (z,x,y) ),'parameters':['object_id','admunit_id','object2nuts_table_name']},\
'four_insert_statements':{'function':(lambda w,x,y,z: [w,x,y,z]),'parameters':['object_insert_statement','attributes_insert_statement','attributes2object_insert_statement','object2admunit_insert_statement']}
}
return o_dict
def corine_transformations(object,object_fid,attributes_fid,admunit_id,object_table_name,attributes_table_name,attributes2object_table_name,object2nuts_table_name):
o_dict=\
{'feature':{'object':object},\
'feature_json':{'function':(lambda y: json.loads(y.ExportToJson()) ),'parameters':['feature']},\
'object_id':{'object':object_fid},\
'attributes_id':{'object':attributes_fid},\
'admunit_id':{'object':admunit_id},\
'object_table_name':{'object':object_table_name},\
'attributes_table_name':{'object':attributes_table_name},\
'attributes2object_table_name':{'object':attributes2object_table_name},\
'object2nuts_table_name':{'object':object2nuts_table_name},\
'object_geom':{'function':(lambda y: y.GetGeometryRef().ExportToWkt() ),'parameters':['feature']},\
'object_properties':{'function':(lambda x,y: {**{'fid':x},**{'dataset_id':4},**{'z_value':100},**{'admunit_id':y},**{'valid_from':'2018-01-01','valid_to':'2024-01-01'} }),'parameters':['object_id','admunit_id']},
'attributes_properties':{'function':(lambda x,y: {**{'fid':x},**{'hilucs_value':corine_hilucs[y['properties']['clc_code']]},**{'clc_value':y['properties']['clc_code']},**{'atts':y['properties']},**{'dataset_id':4}}),'parameters':['attributes_id','feature_json']},
'object_insert_statement':{'function':(lambda w,x,y,z: "INSERT INTO %s (fid,dataset_fid,z_value,geom,valid_from,valid_to) VALUES (%s,%s,%s,ST_SetSRID(ST_Multi(ST_GeomFromText('%s')),4326),'%s','%s')" % (w,x,z['dataset_id'],z['z_value'],y,z['valid_from'], z['valid_to'] ) ),'parameters':['object_table_name','object_id','object_geom','object_properties']},\
'attributes_insert_statement':{'function':(lambda w,x,y,z: "INSERT INTO %s (fid,hilucs_id,atts,dataset_fid,clc_id) VALUES (%s,%s,('%s'::json),%s,%s)" % (w, x,z['hilucs_value'],json.dumps(y['properties']),z['dataset_id'],z['clc_value']) ),'parameters':['attributes_table_name','attributes_id','feature_json','attributes_properties']},\
'attributes2object_insert_statement':{'function':(lambda x,y,z: "INSERT INTO %s (object_fid,atts_fid,atts_origin) VALUES (%s,%s,1)" % (z,x,y) ),'parameters':['object_id','attributes_id','attributes2object_table_name']},\
'object2admunit_insert_statement':{'function':(lambda x,y,z: "INSERT INTO %s (object_fid,unit_fid) VALUES (%s,'%s')" % (z,x,y) ),'parameters':['object_id','admunit_id','object2nuts_table_name']},\
'four_insert_statements':{'function':(lambda w,x,y,z: [w,x,y,z]),'parameters':['object_insert_statement','attributes_insert_statement','attributes2object_insert_statement','object2admunit_insert_statement']}
}
return o_dict
while True:
geom, nuts_id= olu_cur2.fetchone()
olu_cur.execute('drop table if exists olu_object.%s' % nuts_id.lower())
olu_cur.execute('drop table if exists olu_attribute_set.%s' % nuts_id.lower())
olu_cur.execute('drop table if exists atts_to_object.%s' % nuts_id.lower())
olu_cur.execute('drop table if exists olu_object_to_admin_unit.%s' % nuts_id.lower())
olu_cur.execute('create table if not exists olu_object.%s () inherits (olu2.olu_object)' % nuts_id.lower())
olu_cur.execute('create table if not exists olu_attribute_set.%s () inherits (olu2.olu_attribute_set)' % nuts_id.lower())
olu_cur.execute('create table if not exists atts_to_object.%s () inherits (olu2.atts_to_object)' % nuts_id.lower())
olu_cur.execute('create table if not exists olu_object_to_admin_unit.%s () inherits (olu2.olu_object_to_admin_unit)' % nuts_id.lower())
olu_conn.commit()
olu_object_table='olu_object.%s' % nuts_id.lower()
olu_attribute_set_table='olu_attribute_set.%s' % nuts_id.lower()
olu_atts_to_object_table='atts_to_object.%s' % nuts_id.lower()
olu_object_to_admin_unit_table='olu_object_to_admin_unit.%s' % nuts_id.lower()
corine_cur.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'data' and table_name = '%s'" % nuts_id[:2].lower())
if corine_cur.fetchone() is not None:
corine_layer=conn_corine.ExecuteSQL("select * from data.%s where st_intersects(geom,st_geomfromtext('%s',4326)) and year='2018'" % (nuts_id[:2].lower(),ogr.CreateGeometryFromWkb(geom).ExportToWkt() ) )
parts=corine_layer.GetFeatureCount()
number=100
i=1
count=0
j=math.ceil(parts/number)
while i<j:
for k in range(number):
olu_id=next(olu_id_gen)
atts_id=next(olu_atts_id_gen)
f=corine_layer.GetNextFeature()
#[print(i) for i in apply_function(corine_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
[olu_cur.execute(i) for i in apply_function(corine_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
count+=1
olu_conn.commit()
i+=1
for k in range(parts-count):
olu_id=next(olu_id_gen)
atts_id=next(olu_atts_id_gen)
f=corine_layer.GetNextFeature()
[olu_cur.execute(i) for i in apply_function(corine_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
olu_conn.commit()
del(corine_layer)
ua_cur.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'data' and table_name = '%s'" % nuts_id[:2].lower())
if ua_cur.fetchone() is not None:
ua_layer=conn_ua.ExecuteSQL("select * from data.%s where st_intersects(geom,st_geomfromtext('%s',4326)) and code2012 not in ('12210','12220','12230')" % (nuts_id[:2].lower(),ogr.CreateGeometryFromWkb(geom).ExportToWkt() ) )
parts=ua_layer.GetFeatureCount()
number=100
i=1
count=0
j=math.ceil(parts/number)
while i<j:
for k in range(number):
olu_id=next(olu_id_gen)
atts_id=next(olu_atts_id_gen)
f=ua_layer.GetNextFeature()
#[print(i) for i in apply_function(corine_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
[olu_cur.execute(i) for i in apply_function(ua_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
count+=1
olu_conn.commit()
i+=1
for k in range(parts-count):
olu_id=next(olu_id_gen)
atts_id=next(olu_atts_id_gen)
f=ua_layer.GetNextFeature()
[olu_cur.execute(i) for i in apply_function(ua_transformations(f,olu_id,atts_id,nuts_id,olu_object_table,olu_attribute_set_table,olu_atts_to_object_table,olu_object_to_admin_unit_table),'four_insert_statements')]
olu_conn.commit()
del(ua_layer)
olu_cur.execute('SELECT max(fid) FROM olu2.olu_object')
max_value=olu_cur.fetchone()[0]
olu_cur.execute("select setval('olu2.olu_object_fid_seq', %s)" % max_value)
olu_cur.execute('SELECT max(fid) olu2.olu_attribute_set')
max_value=olu_cur.fetchone()[0]
olu_cur.execute("select setval('olu2.olu_attributes_fid_seq', %s)" % max_value)
| [
"dmitrii@seznam.cz"
] | dmitrii@seznam.cz |
fe49918b93bc0175098d4277f76f2d191bfbce49 | 5a69cab2b5ed410f944b57f3ec586b9c624a735c | /lib/demo_test/multi_platform_demo | 0cd943b292114efcecb53e660e9883bb98850ba5 | [
"Apache-2.0"
] | permissive | T3kton/architect | 214a176dd5f9a9bc340d358d692e16a61f362ebe | 3368a66c0c1836eca12dbc7af97f01d5ba13984a | refs/heads/master | 2021-01-20T09:03:25.451300 | 2018-09-17T23:03:24 | 2018-09-17T23:03:24 | 90,217,916 | 0 | 2 | Apache-2.0 | 2018-09-17T23:03:24 | 2017-05-04T03:29:18 | Python | UTF-8 | Python | false | false | 2,211 | #!/usr/bin/env python3
import os
import django
os.environ.setdefault( 'DJANGO_SETTINGS_MODULE', 'architect.settings' )
django.setup()
from datetime import datetime, timezone, timedelta
from architect.Contractor.models import Complex, BluePrint
from architect.Plan.models import Plan, PlanComplex, PlanBluePrint, PlanTimeSeries
from architect.TimeSeries.models import CostTS, AvailabilityTS, ReliabilityTS, RawTimeSeries
print( 'Giving Blueprints their names...')
for blueprint in BluePrint.objects.filter( name__isnull=True ):
blueprint.name = blueprint.contractor_id
blueprint.full_clean()
blueprint.save()
try:
plan = Plan.objects.get( name='demo' )
except Plan.DoesNotExist:
print( 'Creating the Plan...' )
plan = Plan( name='demo', description='demo', enabled=True )
plan.script = """
cut_off: 0
demo: weighted( *INDEX*, @count, ( 1 / *COST* ) )
#demo-web: above_inclusive( demo, cut_off )
#demo-ssh: below( demo, cut_off )
"""
plan.config_values = {}
plan.max_inflight = 10
plan.last_change = datetime.now( timezone.utc ) - timedelta( days=1 )
plan.can_build = True
plan.can_destroy = True
plan.full_clean()
plan.save()
ts = RawTimeSeries( metric='data.count' )
ts.full_clean()
ts.save()
pts = PlanTimeSeries( plan=plan, timeseries=ts, script_name='count' )
pts.full_clean()
pts.save()
print( 'setting up blueprint link...' )
blueprint = BluePrint.objects.get( name='demo-web' )
pb = PlanBluePrint( plan=plan, blueprint=blueprint )
pb.full_clean()
pb.save()
blueprint = BluePrint.objects.get( name='demo-ssh' )
pb = PlanBluePrint( plan=plan, blueprint=blueprint )
pb.full_clean()
pb.save()
print( 'Giving Complexes their tsnames, and setting up buckets...')
for complex in Complex.objects.filter( tsname__isnull=True ):
complex.tsname = complex.contractor_id
complex.full_clean()
complex.save()
costts = CostTS( complex=complex )
costts.save()
availts = AvailabilityTS( complex=complex )
availts.save()
reliabts = ReliabilityTS( complex=complex )
reliabts.save()
pc = PlanComplex( plan=plan, complex=complex )
pc.cost = costts
pc.availability = availts
pc.reliability = reliabts
pc.full_clean()
pc.save()
| [
"pnhowe@gmail.com"
] | pnhowe@gmail.com | |
0a6b7d03cc154ca43ef1f5ebee317ab1ffc0e58e | e9377cf065f226ba19abef0572c33d2f5b554f7a | /main.py | 82b099d9838c9fb55edb20c59aa60661f553eef1 | [] | no_license | gurup0907/wHACKIEST2021-ragequit | 86660a08447bef87fcfa76aff7e17d1f21124a36 | e7c5b540d9cbdbb09c043f60685702bb05601100 | refs/heads/main | 2023-03-29T17:51:25.698792 | 2021-04-03T18:59:10 | 2021-04-03T18:59:10 | 353,921,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | import cv2
import time
import numpy as np
import os
import json
import base64
# Preload models for performance
print("[INFO] loading YOLO...")
net = cv2.dnn.readNetFromDarknet("./yolo_configs/yolov3-obj.cfg", "./yolo_configs/posture_yolov3.weights")
print("[INFO] loading labels...")
with open("./yolo_configs/posture.names", 'rt') as f:
NAMES = f.read().rstrip('\n').split('\n')
# Assign colors for drawing bounding boxes
COLORS = [
[0, 200, 0], [20, 45, 144],
[157, 224, 173], [0, 0, 232],
[26, 147, 111], [64, 100, 44]
]
def rescale_image(input_img: np.ndarray) -> np.ndarray:
(h, w) = input_img.shape[:2]
return input_img if h < 1000 else cv2.resize(input_img, (int(w / (int(str(h)[0]) + 1)), int(h / (int(str(h)[0]) + 1))))
def predict_yolo(input_img: np.ndarray) -> list:
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(input_img, 1 / 255.0, (256, 256), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layer_outputs = net.forward(ln)
end = time.time()
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
return layer_outputs
def draw_bound(input_img: np.ndarray, layer_outputs: list, confidence_level: float, threshold: float) -> [np.ndarray, list]:
boxes = []
confidences = []
class_id = []
(H, W) = input_img.shape[:2]
for output in layer_outputs:
for detection in output:
scores = detection[5:]
class_ids = np.argmax(scores)
confidence = scores[class_ids]
if confidence > confidence_level:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_id.append(class_ids)
# Non maxima
idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_level, threshold)
results = []
if len(idxs) > 0:
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[class_id[i]]]
cv2.rectangle(input_img, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(NAMES[class_id[i]], confidences[i])
cv2.putText(input_img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
results.append([NAMES[class_id[i]], confidences[i]])
return [input_img, results]
def predict(filename) -> [{}]:
image = cv2.imdecode(np.fromstring(filename.read(),np.uint8), cv2.IMREAD_COLOR)
image = rescale_image(image)
layer_outputs = predict_yolo(image)
results = draw_bound(image, layer_outputs, 0.5, 0.4)
ret, buffer = cv2.imencode('.png', results[0])
encoded_im = base64.b64encode(buffer).decode()
mime = "image/jpeg"
#output = os.path.join("input/"+ filename+"")
#cv2.imwrite(output, results[0])
return [{
"prediction": results[1],
"uri": "data:%s;base64,%s" %(mime, encoded_im)
}]
if __name__ == "__main__":
for f in os.listdir("./input"):
image = cv2.imread("./input/" + f)
image = rescale_image(image)
layer_outputs = predict_yolo(image)
results = draw_bound(image, layer_outputs, 0.5, 0.4)
print(f, results[1])
# show the output image
cv2.imshow(f, results[0])
print("")
cv2.waitKey(0) | [
"krishnanyadu60@gmail.com"
] | krishnanyadu60@gmail.com |
ab19333820fd54ea2dcd676cff73a77ba47f222c | f0dcf2d670c0b472e234df501024dd7851b6aeed | /Game_Project/Sample.py | 9d12699ac78552ec203cf1b5a7af9ef4f91365dd | [] | no_license | CRTao/DCP3510_Introduction-to-Artificial-Intelligence | bfcb19a670f281b330fc68b55cb45e41d970c51c | 110060b16985920f8206d6bfb2728cf5b1028451 | refs/heads/master | 2022-11-20T10:23:52.978096 | 2020-07-14T10:33:12 | 2020-07-14T10:33:12 | 279,555,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py |
import STcpClient
'''
輪到此程式移動棋子
board : 棋盤狀態(list of list), board[i][j] = i row, j column 棋盤狀態(i, j 從 0 開始)
0 = 空、1 = 黑、2 = 白
is_black : True 表示本程式是黑子、False 表示為白子
return step
step : list of list, step = [(r1, c1), (r2, c2) ...]
r1, c1 表示要移動的棋子座標 (row, column) (zero-base)
ri, ci (i>1) 表示該棋子移動路徑
'''
def GetStep(board, is_black):
# fill your program here
pass
while(True):
(stop_program, id_package, board, is_black) = STcpClient.GetBoard()
if(stop_program):
break
listStep = GetStep(board, is_black)
STcpClient.SendStep(id_package, listStep)
| [
"ms0599027@gmail.com"
] | ms0599027@gmail.com |
6cc98ec5242239b297c10d8179c3d873d574fb9b | 52b6cf6fda991e6f6a3de53f1b79cc3bfbc69575 | /backend/app/api/users/routes.py | a8d3193944914bfe7cfd5d5f04f871f4724974ff | [] | no_license | nikitakuznetsoff/financial-predictor | 94315e0aff16e6cd124a61095ba1f7668fb9b328 | 80ab84757ebe68d482bd50ff4d0571ce1e311e41 | refs/heads/main | 2023-06-25T17:45:40.859436 | 2021-07-21T21:15:59 | 2021-07-21T21:15:59 | 354,827,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,235 | py | from flask import Blueprint, request, make_response, jsonify
from app.models import User
from app.repository import users_repo as repo
from datetime import datetime
bp = Blueprint('users', __name__, url_prefix='/api/users')
@bp.route('', methods=['POST', 'GET', 'DELETE'])
def user():
if request.method == 'POST':
try:
body = request.get_json()
email = body.get('email')
password = body.get('password')
username = body.get('username')
except:
return "incorrect request body", 400
user = repo.get_user_by_email(email=email)
if user:
return "user exist", 409
user = repo.get_user_by_username(username=username)
if user:
return "user exist", 409
user = repo.create_user(
email=email,
password=password,
username=username,
reg_date=datetime.now()
)
# try:
# user = repo.create_user(
# email=email,
# password=password,
# username=username,
# reg_date=datetime.now()
# )
# except:
# return "internal error", 500
user = repo.get_user_by_email(email=email)
token = user.generate_auth_token().decode('utf-8')
resp = make_response(jsonify({'user': user.id, 'token': token}), 200)
return resp
if request.method == 'GET':
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
except:
return "incorrect auth method or token", 401
if method != 'Basic':
return 'incorrect auth method', 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
user_data = user.get_dict_repr()
data = { 'user': user_data }
return make_response(
jsonify(data),200
)
if request.method == 'DELETE':
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
if method != 'Basic':
raise Exception
except:
return "incorrect auth method or token", 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
try:
repo.delete_user(user_id)
except:
return "interval error", 500
return "success", 200
return "incorrect method", 400
@bp.route('/auth', methods=['POST', 'GET'])
def auth_post():
if request.method == 'POST':
body = request.get_json()
email = body.get('email')
password = body.get('password')
user = repo.get_user_by_email(email)
if not user:
return "unregistered user", 404
if not user.verify_password(password):
return "incorrect password", 400
token = user.generate_auth_token().decode('utf-8')
d = { 'user': user.id, 'token': token }
resp = make_response(jsonify(d), 200)
return resp
else:
return "incorrect method", 400
@bp.route('/subscriptions', methods=['GET'])
def get_user_subscriptions():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
except:
return "incorrect auth method or token", 401
if method != 'Basic':
return 'incorrect auth method', 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
subs = user.subscriptions
resp_body = jsonify({'subscriptions': subs})
resp = make_response(resp_body, 200)
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
@bp.route('/subscribe', methods=['POST'])
def subscribe():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
except:
return "incorrect auth method or token", 401
if method != 'Basic':
'incorrect auth method', 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
try:
body = request.get_json()
secid = body.get('secid')
except:
return "incorrect request body", 400
repo.add_user_subscription(user.id, secid)
return "subscription successful", 200
@bp.route('/unsubscribe', methods=['POST'])
def unsubscribe():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
except:
return "incorrect auth method or token", 401
if method != 'Basic':
'incorrect auth method', 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
body = request.get_json()
try:
secid = body.get('secid')
except:
return "incorrect request body", 400
try:
repo.remove_user_subscriptions(user.id, secid)
except:
return "server error", 500
return "unsubscription successful", 200
@bp.route('/username', methods=['POST'])
def change_user_username():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
if method != 'Basic':
raise Exception
except:
return "incorrect auth method or token", 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
try:
body = request.get_json()
username = body.get('username')
except:
return "incorrect request body", 400
user = repo.get_user_by_username(username)
if user:
return "username already used", 409
user = repo.change_username(user_id, username)
if not user:
return "interval error", 500
return "success", 200
@bp.route('/email', methods=['POST'])
def change_user_email():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
if method != 'Basic':
raise Exception
except:
return "incorrect auth method or token", 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
try:
body = request.get_json()
email = body.get('email')
except:
return "incorrect request body", 400
user = repo.get_user_by_email(email)
if user:
return "email already used", 409
user = repo.change_email(user_id, email)
if not user:
return "interval error", 500
return "success", 200
@bp.route('/password', methods=['POST'])
def change_user_password():
if 'Authorization' not in request.headers:
return "unauthorized", 401
try:
method, token = request.headers['Authorization'].split(' ')
if method != 'Basic':
raise Exception
except:
return "incorrect auth method or token", 401
user_id = User.verify_auth_token(token)
if not user_id:
return "error with validation access token", 401
user = repo.get_user_by_id(user_id)
if not user:
return "incorrect user id", 400
try:
body = request.get_json()
password = body.get('password')
except:
return "incorrect request body", 400
user = repo.change_password(user_id, password)
if not user:
return "interval error", 500
return "success", 200
@bp.route('<int:id>', methods=['GET'])
def get_user(id):
user = repo.get_user_by_id(id)
if not user:
return "incorrect user id", 400
user_data = user.get_dict_repr()
resp = make_response(
jsonify({'data': user_data}), 200
)
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
| [
"devnkuznetsov@gmail.com"
] | devnkuznetsov@gmail.com |
6671a1befd491dd6f5399bc3c26c9e7b9427b1fa | 34ec03bef0371def736c89e3943a3b3f178d9615 | /Homeworks/03/dataset_downloader.py | 582ea2474b201eabd4a728297b37b18927041b2f | [] | no_license | Mikhail-off/DL-2019 | 3c4b936e41c75e2239e2db31fa31b3697ce17af1 | b7728179bbeb852c0c642af85b413a839466c61b | refs/heads/master | 2020-07-28T23:53:49.590405 | 2020-04-02T14:18:09 | 2020-04-02T14:18:09 | 209,586,350 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | import tarfile
import os
import urllib.request
DATASETS_REPO = 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/'
ARC_EXT = '.tar.gz'
TEMP_ARC_FILENAME = 'temp' + ARC_EXT
class DatasetDownloader:
def __init__(self, dataset_names, data_path):
self.dataset_names = dataset_names
self.data_path = data_path
def download(self):
for dataset_name in self.dataset_names:
if self._is_downloaded(dataset_name):
print(dataset_name, 'dataset is already downloaded')
continue
print('Downloading', dataset_name)
dataset_url = DATASETS_REPO + dataset_name + ARC_EXT
arc_file = os.path.join(self.data_path, TEMP_ARC_FILENAME)
urllib.request.urlretrieve(dataset_url, arc_file)
with tarfile.open(arc_file, 'r:gz') as arc:
print('Extracting', dataset_name)
arc.extractall(self.data_path)
os.remove(arc_file)
def _is_downloaded(self, dataset_name):
expected_path = os.path.join(self.data_path, dataset_name)
exists = os.path.exists(expected_path)
is_dir = os.path.isdir(expected_path)
return exists and is_dir
| [
"hukutoc46@gmail.com"
] | hukutoc46@gmail.com |
f4a5841f5d31f26e0da2530d937bbf5ce64db363 | ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04 | /Programming Fundamentals/objects_and_classes/class.py | 7289e5275a0ea25652981f2f7b0b49c310acc71b | [] | no_license | AssiaHristova/SoftUni-Software-Engineering | 9e904221e50cad5b6c7953c81bc8b3b23c1e8d24 | d4910098ed5aa19770d30a7d9cdf49f9aeaea165 | refs/heads/main | 2023-07-04T04:47:00.524677 | 2021-08-08T23:31:51 | 2021-08-08T23:31:51 | 324,847,727 | 1 | 0 | null | 2021-08-08T23:31:52 | 2020-12-27T20:58:01 | Python | UTF-8 | Python | false | false | 701 | py | class Class:
def __init__(self, name):
self.name = name
self.students = []
self.grades = []
__students_count = 22
def add_student(self, name, grade):
if len(self.students) < Class.__students_count:
self.students.append(name)
self.grades.append(grade)
def get_average_grade(self):
return sum(self.grades) / len(self.grades)
def __repr__(self):
return f"The students in {self.name}: {', '.join(self.students)}. Average grade: {Class.get_average_grade(self):.2f}"
a_class = Class("11B")
a_class.add_student("Peter", 4.80)
a_class.add_student("George", 6.00)
a_class.add_student("Amy", 3.50)
print(a_class)
| [
"assiaphristova@gmail.com"
] | assiaphristova@gmail.com |
dda48464dce73f3af0af909f3571d348d3d0d84e | f8dd8d046100f1223713e047074f30c7ce5a59cd | /testing/epilogue/decorators.py | 35dbdffbffc9e1b88e69fb384d455179a4f387c3 | [] | no_license | dotslash227/98fitcortex | 57aed99270799eff68fdff62db0b8c1d9aabd4a2 | bd4002151e5def00c3dea1f5a1abfb06ba3e809a | refs/heads/master | 2022-12-17T00:51:20.302948 | 2019-02-27T13:54:22 | 2019-02-27T13:54:22 | 197,362,824 | 0 | 0 | null | 2022-12-08T00:02:42 | 2019-07-17T09:55:14 | HTML | UTF-8 | Python | false | false | 3,694 | py | import functools
import datetime
from django.db import models
def last_days(days = 6):
today = datetime.datetime.today().date()
while days >= 0:
val = today - datetime.timedelta(days = days)
days -= 1
yield val
def last_weeks(weeks = 6):
today = datetime.datetime.today().date()
current_year , current_week , current_day = today.isocalendar()
start_week = current_week
year = current_year
if start_week >= 6:
while weeks >= 0:
yield (year ,current_week)
current_week -= 1
weeks -= 1
else:
while weeks >= 0:
yield (year , current_week)
current_week -= 1
current_week = abs(52+current_week)%52
if current_week == 0:
current_week = 52
year -= 1
weeks -= 1
def add_today(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
kwargs['today'] = datetime.datetime.today().date()
return f(*args , **kwargs)
return wrapper
def add_empty_day_in_week(defaults , days_range = 6):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
days = set(vals.values_list("date" , flat = True))
data = []
for e in last_days(days = days_range):
if e not in days:
d = {
"date" : e,
**defaults,
}
data.append(d)
return data + list(vals)
return wrapper
return decorator
def add_empty_weeks(defaults , sort = lambda x : (x['year'],x['week'])):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
weeks , data = f(*args , **kwargs)
for y,w in last_weeks():
if (y,w) not in weeks:
d = {
"week" : w,
"year" : y,
**defaults
}
data.append(d)
return sorted(data , key = sort)
return wrapper
return decorator
def sorter(key , reverse = False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
return sorted(vals , key = key , reverse = reverse)
return wrapper
return decorator
def scale_field(field,goal):
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args , **kwargs):
returned_value = fn(*args , **kwargs)
field_values = (e.get(field) for e in returned_value)
scaling_factor = 100/(max(goal ,max(field_values)))
for e in returned_value:
e['plotting_value'] = e.get(field , 0) * scaling_factor
return returned_value
return wrapper
return decorator
def weekly_average(field):
def decorator(f):
@functools.wraps(f)
def wrapper(*args , **kwargs):
vals = f(*args , **kwargs)
weeks = set(vals.values_list("week" , flat = True) )
data = []
curr_week = datetime.datetime.now().isocalendar()[1]
for e in range(curr_week - 6 , curr_week +1):
if e not in weeks:
data.append({
"week" : e,
"avg" : 0
})
continue
avg = vals.filter(
week = e
).aggregate(
avg = models.Sum(field)
)
d = {
"week" : e,
"avg" : avg['avg']
}
data.append(d)
return data
return wrapper
return decorator
def monthly_average(field):
def decorator(f):
@functools.wraps(f)
def wrapper(self):
vals = f(self)
months = set(vals.values_list("month" , flat = True) )
data = []
for e in months:
avg = vals.filter(
month = e
).aggregate(
avg = models.Avg(field)
)
d = {
"month" : e,
"avg" : avg['avg']
}
data.append(d)
return data
return wrapper
return decorator
def map_transform_queryset(iterable , *fields):
def decorator(f):
@functools.wraps(f)
def mapper(*args , **kwargs):
l = map(lambda x : functools.partial(x , *fields) , iterable)
val = f(*args , **kwargs)
d = {}
for e in l:
d.update(**e(val))
return d
return mapper
return decorator
| [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
3ac83d2ac2af4145c059505d5214c148e2fa8ab9 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-MailKit/PyObjCTest/test_memessagedecoder.py | 9ebaeafd6990e8c6d9d71d48d0c727eca4fb01ad | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 198 | py | from PyObjCTools.TestSupport import TestCase
import MailKit # noqa: F401
class TestMEMessageDecoder(TestCase):
def test_protocols(self):
self.assertProtocolExists("MEMessageDecoder")
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
0c631d542a5fcf84983bc34ab35fa12f1af594d9 | 6d548c4f4de49a1470113c80d2182e315e6c3811 | /LaboratorioDeAlgoritmos/exemplo_funcao_empacotamento.py | e4de180967bcf81c4dba2687d62112c3997c240f | [] | no_license | rodrigoksaito/anchieta | 646ca5a7d34e028b7a244d2ab9e57d3b114d36d6 | dd5221391e24603a9d1fe21a3c44a5277165d96b | refs/heads/master | 2021-07-08T14:36:04.029040 | 2018-11-02T00:56:15 | 2018-11-02T00:56:15 | 103,395,258 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | def somainteiros(* inteiros):
for valor in inteiros:
print(f'{valor} ', end='')
print('')
def containteiros (* inteiros):
tamanho = len(inteiros)
print(f'A quantidade de inteiros recebidos sao {tamanho}')
#chamada das funcoes
somainteiros(5, 4)
somainteiros(6, 2, 2)
somainteiros(5, 3, 2, 1)
containteiros(3, 1)
containteiros(3, 1, 5, 6, 8, 4)
#passando uma lista como parametro
def dobraLista(lista):
posicao = 0
while posicao < len(lista):
lista[posicao] *= 2
posicao += 1
listaValores = [1, 2, 3, 4, 5, 6]
dobraLista(listaValores)
print('Valores da lista dobrada: ',listaValores)
| [
"rodrigo.ksaito@gmail.com"
] | rodrigo.ksaito@gmail.com |
4291138fa9f83457d0086a492f9a410c62db6ac8 | 51131380cc9cbd114966c2d7b36980675917dd46 | /app/core/models.py | d52902f40ba92bcd404c6462bb1cf15ff85b8350 | [] | no_license | agasca94/recipe-api | e0c96ef5a7dcced44f5d9700a6682f49e666f44d | 3c22ae69fa07dcca665f606e71547cb2f71b58a9 | refs/heads/master | 2020-04-29T14:46:39.434231 | 2019-04-13T05:19:48 | 2019-04-13T05:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
from core import utils
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
"""Create and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **kwargs)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
return self.create_user(
email,
password,
is_staff=True,
is_superuser=True
)
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(
null=True,
upload_to=utils.recipe_image_file_path
)
def __str__(self):
return self.title
| [
"agasca1994@gmail.com"
] | agasca1994@gmail.com |
9a46dba46851efdae40042afb4cf0b10eb576bb5 | 3c8ce930338597ad18d7435e4e4f60b0dc4d1a66 | /ex13.5.py | 681614865994029b65927a32e536491f78e0cf88 | [] | no_license | nimbinatus/LPHW | 1af22668b3ecf32fd62f6d99911d1e6800b608a9 | 6fc14aa5f7e2e248dfb7ea8c2d1e1daba949920a | refs/heads/master | 2021-01-20T05:58:07.209743 | 2015-06-12T21:21:13 | 2015-06-12T21:21:13 | 35,133,396 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from sys import argv
script, name, age = argv
print "My name is:", name
print "My age is:", age
hame = raw_input("Name? ")
age = raw_input("Age? ")
| [
"nimbinatus@users.noreply.github.com"
] | nimbinatus@users.noreply.github.com |
7bec6a1f34fb836517447489c802a213b5bb4928 | 38c8e4ef46cfd86f03a931330a6d86d270f459db | /tests/integration/src/handlers/IngestS3Event/test_function.py | b47d590ee5dc027ce9dad904265351c2de6816af | [
"BSD-2-Clause"
] | permissive | praneetap/PhotoOps | 821a18656add95d36f30c80eb24c0a270f810513 | 01799afe4f324a879ba3d85e2c2f8e1a07643d1b | refs/heads/master | 2023-09-01T07:32:36.123839 | 2021-10-22T00:28:46 | 2021-10-22T01:21:28 | 422,656,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | '''Test IngestS3Event'''
import json
import os
import boto3
import pytest
from aws_lambda_powertools.utilities.data_classes import S3Event
STACK_NAME = 'PhotoOps-tom'
FUNCTION_NAME = os.path.abspath(__file__).split(os.path.sep)[-2]
DATA_DIR = './data'
EVENT_DIR = os.path.join(DATA_DIR, 'events')
IMAGE_DIR = os.path.join(DATA_DIR, 'images')
MODEL_DIR = os.path.join(DATA_DIR, 'models')
## AWS
@pytest.fixture()
def session():
'''Return a boto3 session'''
return boto3.Session()
@pytest.fixture()
def cfn_client(session):
'''Return a CFN client'''
return session.client('cloudformation')
@pytest.fixture()
def lambda_client(session):
'''Return a Lambda client'''
return session.client('lambda')
### Events
@pytest.fixture()
def event():
'''Return a test event'''
with open(os.path.join(EVENT_DIR, 'IngestS3Event-event-sns.json')) as f:
return json.load(f)
@pytest.fixture(params=['IngestS3Event-data-put.json', 'IngestS3Event-data-delete.json'])
def s3_notification(request):
'''Return an S3 notification'''
with open(os.path.join(EVENT_DIR, request.param)) as f:
return json.load(f)
def test_handler(event, s3_notification, cfn_client, lambda_client):
'''Test handler'''
s3_notification_event = S3Event(s3_notification)
event['Records'][0]['Sns']['Message'] = json.dumps(s3_notification_event._data)
function_info = cfn_client.describe_stack_resource(
StackName=STACK_NAME,
LogicalResourceId=FUNCTION_NAME
)
function_name = function_info['StackResourceDetail']['PhysicalResourceId']
resp = lambda_client.invoke(
FunctionName=function_name,
LogType='Tail',
Payload=json.dumps(event).encode('utf-8')
)
resp_body = resp.pop('Payload').read().decode()
assert resp['StatusCode'] == 200
assert json.loads(resp_body) == s3_notification
| [
"tom@serverlessops.io"
] | tom@serverlessops.io |
34f550e6a8ce03365c67f497c90b8d70b792a727 | eb61956c2221b38741ae045b492c2f52cb0c4515 | /ex_10_02/ex_10_02.py | 247ef73adc5e5087b093530f5aec6c8f20c497ea | [
"MIT"
] | permissive | ColinTing/Python-Specialization | 9552a4c35a322dff80d3e14ea97e51feec0ede06 | 684bfaa29fac729dbb76776cec29426f7d9853b1 | refs/heads/master | 2020-04-29T07:29:22.288768 | 2019-06-17T17:00:31 | 2019-06-17T17:00:31 | 175,954,595 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | fname = input("Enter file name: ")
if len(fname) < 1 :
fname = "mbox-short.txt"
fh = open(fname)
counts = dict()
for line in fh:
line = line.rstrip()
lst = line.split()
if len(lst)>5 and lst[0] == "From":
hour = lst[5].split(':')[0]
counts[hour] = counts.get(hour,0)+1
for key, val in sorted(counts.items()):
print(key,val)
#print(sorted([(val,key) for key, val in counts.items()]))
| [
"honey.hao.2012@gmail.com"
] | honey.hao.2012@gmail.com |
3925b4ce5bdbda0b5ed45b195289fc7d8cdc6c07 | 0aebc9c25f0e0fcdd9d2b3541ced519fa7230627 | /Binary_oreder_preorder_traversal.py | 9877b1b5a553c578920d7d3943178f3561f01c1c | [] | no_license | Divyanshushandilyakiet/Leet_code_solutions | 5da21074a5f0ef7b95ad1dec44732b18ed3920a8 | c91170df24c1382a0b954918640ecea0de3811c6 | refs/heads/master | 2023-08-22T22:43:06.367132 | 2021-09-24T14:54:27 | 2021-09-24T14:54:27 | 382,666,643 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
result = []
self.traverse(root, result)
return result
def traverse(self, root, result):
if root == None:
return
result.append(root.val)
self.traverse(root.left, result)
self.traverse(root.right, result)
| [
"noreply@github.com"
] | Divyanshushandilyakiet.noreply@github.com |
13552287bb4df48a8d6a782b4cc05ba180beb7b7 | c6907805a1075e733ad0a176faa5936fc5657d5f | /testscript/asst1-03-cat.py | a81c4285fc54fe34e4185ac1aabb825c36a564f0 | [] | no_license | john-yan/OS161 | aaf5aa07b580c00f462ef0bbc705f5bb732aa897 | fa0b9676a0febc63cf8c4f5ee05e0aacb91b4693 | refs/heads/master | 2020-05-22T12:45:32.955071 | 2014-03-12T01:21:59 | 2014-03-12T01:21:59 | 17,651,018 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,869 | py | #!/usr/bin/python
import core
import sys
# TODO: should make the code work for arbitrary number of animals, bowls, etc.
def catmouse(test, cmd):
test.send_command(cmd)
bowls = [ -1, -1] # cat or mouse nr who is eating
mouse_cat = 0 # 1 when cats are eating, 2 when mouse are eating
nr_eating = 0 # 0, 1, 2: depending on how may bowls are in use
max_cats_eating = 0
max_mice_eating = 0
cat_iterations = [ -1 ] * 6 # current iteration nr for each cat
mouse_iterations = [ -1 ] * 2 # current iteration nr for each mouse
# look for 64 outputs =
# 8 (6 cats + 2 mouse) * 2 (start, end) * 4 iterations
for i in range(64):
out = test.look_for( \
['cat: (\d) starts eating: bowl (\d), iteration (\d)', \
'mouse: (\d) starts eating: bowl (\d), iteration (\d)', \
'cat: (\d) ends eating: bowl (\d), iteration (\d)', \
'mouse: (\d) ends eating: bowl (\d), iteration (\d)'])
if out < 0:
return -1 # no match failure
nr, bowl, iteration = test.kernel().match.groups()
nr = int(nr)
bowl = int(bowl)
iteration = int(iteration)
# sanity check
if bowl < 1 or bowl > 2:
print 'bowl nr should be 1 or 2'
return -1
if iteration < 0 or iteration > 3:
print 'iteration should be 0, 1, 2 or 3'
return -1
if out == 0 or out == 2:
if nr < 0 or nr > 6:
print 'cat nr should be 1-6'
return -1
else:
if nr < 0 or nr > 1:
print 'mouse nr should be 1-6'
return -1
# now check that the cat/mouse consistency is met
bowl = bowl - 1
if out == 0:
if mouse_cat == 2:
print 'mouse is already eating'
return -1
if bowls[bowl] != -1:
print 'bowl = ' + str(bowl) + 'is already in use'
return -1
if nr_eating >= 2:
print 'weird: too many cats eating' # shouldn't happen
return -1
if cat_iterations[nr] != iteration - 1:
print 'cat iteration ' + str(iteration) + 'is not correct'
return -1
mouse_cat = 1
bowls[bowl] = nr
nr_eating = nr_eating + 1
cat_iterations[nr] = iteration
max_cats_eating = max(max_cats_eating, nr_eating)
elif out == 1:
if mouse_cat == 1:
print 'cat is already eating'
return -1
if bowls[bowl] != -1:
print 'bowl = ' + str(bowl) + 'is already in use'
return -1
if nr_eating >= 2:
print 'weird: too many mouse eating' # shouldn't happen
return -1
if mouse_iterations[nr] != iteration - 1:
print 'mouse iteration ' + str(iteration) + 'is not correct'
return -1
mouse_cat = 2
bowls[bowl] = nr
nr_eating = nr_eating + 1
mouse_iterations[nr] = iteration
max_mice_eating = max(max_mice_eating, nr_eating)
elif out == 2:
if bowls[bowl] != nr:
print 'cat = ' + str(nr) + 'exiting without entering'
return -1
if nr_eating <= 0:
print 'weird: too few cats eating' # shouldn't happen
return -1
bowls[bowl] = -1
nr_eating = nr_eating - 1
if nr_eating == 0:
mouse_cat = 0
elif out == 3:
if bowls[bowl] != nr:
print 'mouse = ' + str(nr) + 'exiting without entering'
return -1
if nr_eating <= 0:
print 'weird: too few mouse eating' # shouldn't happen
return -1
bowls[bowl] = -1
nr_eating = nr_eating - 1
if nr_eating == 0:
mouse_cat = 0
if test.verbose():
if max_cats_eating < 2:
print 'Maximum number of cats eating at a time = ' \
+ str(max_cats_eating) + ' < 2'
if max_mice_eating < 2:
print 'Maximum number of mice eating at a time = ' \
+ str(max_mice_eating) + ' < 2'
return 0
def main():
global test
result = 0
# try cat with semaphores
kernel_name = str(sys.argv[1])
test = core.TestUnit(kernel_name, "Testing cat/mouse using semaphores")
result = catmouse(test, '1a')
if result < 0:
# try cat with locks
test = core.TestUnit(kernel_name, "Testing cat/mouse using locks")
result = catmouse(test, '1b')
test.print_result(result)
if __name__ == "__main__":
main()
| [
"john.yan@mail.utoronto.ca"
] | john.yan@mail.utoronto.ca |
0482cb5e7c5b58fb5081addda9ef1529b0337c5f | 91789995d08a71c8c9afaa04eba171efc842b94d | /DOCX_Creator.py | 9751ba3f6bf6cd84d1f9e83744d7055f17b959dd | [] | no_license | richardbing/VCRI | 7e3a9f9e7b8f5f5c754e8f0cdd45cf002b651273 | 9f6125c584d734d5d3b4591ef7b01f082bf5a673 | refs/heads/master | 2020-03-19T13:36:25.443427 | 2019-01-21T14:54:03 | 2019-01-21T14:54:03 | 136,587,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | from docx import Document
from docx.opc import exceptions
def DocxWrite(title, tableContents):
try:
document = Document()
except exceptions.PackageNotFoundError:
import os
import sys
dir_path = os.getcwd()
full_path = os.path.join(dir_path, 'docx\templates\default.docx')
print full_path
document = Document(full_path)
document.add_heading(title, 0)
table = document.add_table(rows=1, cols=2)
table.style = 'Table Grid'
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Requirement'
hdr_cells[1].text = 'Test Protocol Results Names'
#table = document.add_table()
try:
with open("page_number.txt", 'r') as f:
page_number_file = "page_number.txt"
addPageNum(tableContents, page_number_file)
except IOError:
pass
for key in sorted(tableContents):
row_cells = table.add_row().cells
row_cells[0].text = str(key)
if type(tableContents[key]) is list:
row_cells[1].text = ', '.join(tableContents[key])
else:
row_cells[1].text = tableContents[key]
document.save( title + '.docx')
def addPageNum(lst, file):
f = open(file, 'r')
lines = f.readlines()
f.close()
pageNum = []
for line in lines:
try:
if line[0] == '3' and line[3] == '.' or line[4] == '.':
pageNum.append(line[line.find('\t')+len('\t'):].replace('\t', ' ').rstrip())
except IndexError:
pass
for key in lst:
if type(lst[key]) is list:
for i in range(len(lst[key])):
for j in pageNum:
if lst[key][i] in j:
lst[key][i] = j
else:
for j in pageNum:
if lst[key] in j:
lst[key] = j
| [
"mabi@tcd.ie"
] | mabi@tcd.ie |
ba93f39d8f0eb7f2d6f9d915fa08222c70e5556f | a0557aca65760466806bae630b03d0637cfed482 | /Juego2 Movimiento comida.py | c4019db44ca30c9f1603661252b39ea446cb2355 | [] | no_license | IvanAlvarez15/Juego2 | 033faa3bbe1133f31a0aac2e2d8dbade13cac6dd | ee7722f57e995f648f13b1c1e5672496b1b81c5b | refs/heads/main | 2023-01-04T13:01:04.958383 | 2020-10-28T16:15:52 | 2020-10-28T16:15:52 | 307,773,934 | 0 | 1 | null | 2020-10-28T16:12:49 | 2020-10-27T17:13:13 | Python | UTF-8 | Python | false | false | 2,383 | py | # Juego2 comida mov..py
# Ivan Alvarez y Jesus Daniel
# Juego de la Víbora
from turtle import *
from random import randrange
from freegames import square, vector
import random
food = vector(0, 0)
snake = [vector(10, 0)]
aim = vector(0, -10)
colors = ["green","blue","orange","purple","black"] #Generamos una lista con 5 posibles colores
color_snake = random.choice(colors) #Seleccion aleatoria de un color de la lista mediante random.choice
colors.remove(color_snake) #Quitamos el color seleccionado para la serpiete para evitar que sea el mismo para la comida
color_food = random.choice(colors) #Seleccion aleatoria de un color de la lista mediante random.choice
def change(x, y):
"Change snake direction."
aim.x = x
aim.y = y
def inside(head):
"Return True if head inside boundaries."
return -200 < head.x < 190 and -200 < head.y < 190
def inside2(food):
"Return True if food inside boundaries."
return -200 < food.x < 190 and -200 < food.y < 190
def move():
"Move snake forward one segment."
head = snake[-1].copy()
head.move(aim)
if not inside(head) or head in snake: #Choca con sigo mismo
square(head.x, head.y, 9, 'red')
update()
return
snake.append(head)
if head == food: #Cuando se come las cosas
print('Snake:', len(snake))# Imprime el largo de la serpiente
food.x = randrange(-15, 15) * 10
food.y = randrange(-15, 15) * 10
else:
snake.pop(0)
clear()
for body in snake:
square(body.x, body.y, 9, color_snake)
square(food.x, food.y, 9, color_food)
update()
ontimer(move, 100)
def movefood(): #Se agrega una funcion que mueve la comida en 4 posibles direcciones
position=random.choice((1,2,3,4)) #Donde "1" es derecha, "2" es izquierda, "3" es arriba y "4" es abajo
if position==1:
food.x= food.x+10
elif position==2:
food.x= food.x-10
elif position==3:
food.y= food.x+10
elif position==4:
food.y= food.x-10
ontimer(movefood,200)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
onkey(lambda: change(10, 0), 'Right')
onkey(lambda: change(-10, 0), 'Left')
onkey(lambda: change(0, 10), 'Up')
onkey(lambda: change(0, -10), 'Down')
move()
movefood()
done() | [
"noreply@github.com"
] | IvanAlvarez15.noreply@github.com |
f975c93923abd391b57c737a6a91d07e74706955 | d5552b232a74ab968571ece1fbdc08cae028ce6d | /Neural Networks/MLP_LSTM_ANN/MLP_model.py | c67f2484ef6efc296c9f42b5664fb27006859883 | [] | no_license | pneal1995/Python | 9d9d91105a42f8ddb74f19305b30402df24590d9 | 63f783fab220a55aa721b43d4e89ae51518aecf3 | refs/heads/master | 2020-05-18T09:51:43.398066 | 2019-05-28T20:05:17 | 2019-05-28T20:05:17 | 184,338,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | import get_prices as hist
import tensorflow as tf
from preprocessing import DataProcessing
# import pandas_datareader.data as pdr if using the single test below
import fix_yahoo_finance as fix
fix.pdr_override()
start = "2003-01-01"
end = "2018-01-01"
hist.get_stock_data("AAPL", start_date=start, end_date=end)
process = DataProcessing("stock_prices.csv", 0.9)
process.gen_test(10)
process.gen_train(10)
X_train = process.X_train / 200
Y_train = process.Y_train / 200
X_test = process.X_test / 200
Y_test = process.Y_test / 200
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu))
model.compile(optimizer="adam", loss="mean_squared_error")
model.fit(X_train, Y_train, epochs=100)
print(model.evaluate(X_test, Y_test))
# If instead of a full backtest, you just want to see how accurate the model is for a particular prediction, run this:
# data = pdr.get_data_yahoo("AAPL", "2017-12-19", "2018-01-03")
# stock = data["Adj Close"]
# X_predict = np.array(stock).reshape((1, 10)) / 200
# print(model.predict(X_predict)*200)
| [
"noreply@github.com"
] | pneal1995.noreply@github.com |
89888a2b8d51fe177fcd8053f9abdf21227ff496 | d3fcd8a969d8204124e73fbb892ce051f6c0f0f2 | /app.py | 0a784a34a487b50ca4d32def44a51d43f4cf61b0 | [] | no_license | rahulyadav170923/stockulator | c10ded6422fea8ee2d4af51ab455049f1cb0cac6 | 9ad7050b946ca6a256faf9ddd7f1005c7ab0dcf6 | refs/heads/master | 2021-01-10T04:54:58.204322 | 2016-02-20T11:59:44 | 2016-02-20T11:59:44 | 52,151,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | from flask import Flask,render_template,request,url_for,jsonify,json
import unirest
import math
import numpy
app = Flask(__name__)
@app.route('/<company>/<selected_date>',methods=['GET'])
def get_stock_by_date(company,selected_date):
url='data/'+company+'.json'
with open (url) as f:
data = json.loads(f.read())
dict={}
dict['graph_plot']=plotgraph()
for i in data:
if i["Date"]==selected_date:
dict["stock_details"]=i
return jsonify(dict)
return "date related data not found"
@app.route('/<company>/<float:opening_price>',methods=['GET'])
def predict(company,opening_price):
estimate=predict_closing_price(company,opening_price)
#estimate=json.dumps(estimate)
return jsonify(estimate)
def predict_closing_price(company,opening_price):
url='data/'+company+'.json'
with open (url) as f:
data = json.loads(f.read())
n=[]
for i in range(0,len(data)-1):
p=math.log(float(data[i+1]['Open'])/float(data[i]['Open']))
n.append(p)
avg=numpy.mean(n)
variance=numpy.var(n)
delta=float(1.00000000000/(len(n)-1))
volatility_sigma=math.sqrt(variance/delta)
drift_mu=((avg+(variance/2))/delta)
estimate=opening_price*(math.exp(drift_mu*delta))
dict={}
dict['closing_price']=estimate
dict['percentage_error']=(((float(data[0]['Close'])-estimate)/float(data[0]['Close']))*100)
return dict
#@app.route('/plotgraph',methods=['GET'])
def plotgraph():
with open ('data/wipro.json') as f:
data = json.loads(f.read())
opening_prices=[]
for i in range(0,220):
dict={}
dict["open_price"]=data[i]["Open"]
dict["date"]=data[i]["Date"]
opening_prices.append(dict)
return opening_prices
@app.route('/plotgraph',methods=['GET'])
def plotgraph_():
with open ('data/wipro.json') as f:
data = json.loads(f.read())
opening_prices=[]
for i in range(0,220):
dict={}
dict["open_price"]=data[i]["Open"]
dict["date"]=data[i]["Date"]
opening_prices.append(dict)
opening_prices=json.dumps(opening_prices)
return opening_prices
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
| [
"rahulyadav170923@gmail.com"
] | rahulyadav170923@gmail.com |
ee62c946bacf7cf765e57fe18224aea84ff72185 | 2fcf361eb89f1f01fe4d677d4772ddaba89b06ad | /hydrus/HydrusGlobals.py | 06ab47dbf5399492ca0bfda15b4892944f211c47 | [
"WTFPL"
] | permissive | matjojo/hydrus | 9f13f35e817bfe7e170ec7be22e18b64e393cb01 | 8f87206ea6ef242bc38235d7053bb33b5a785e68 | refs/heads/master | 2021-05-17T03:26:19.183503 | 2020-03-27T23:32:58 | 2020-03-27T23:32:58 | 250,597,596 | 0 | 0 | NOASSERTION | 2020-03-27T17:18:53 | 2020-03-27T17:18:52 | null | UTF-8 | Python | false | false | 1,239 | py | import threading
controller = None
client_controller = None
server_controller = None
test_controller = None
view_shutdown = False
model_shutdown = False
no_daemons = False
no_wal = False
no_db_temp_files = False
db_memory_journaling = False
db_synchronous_override = None
import_folders_running = False
export_folders_running = False
callto_report_mode = False
db_report_mode = False
db_profile_mode = False
file_report_mode = False
media_load_report_mode = False
gui_report_mode = False
shortcut_report_mode = False
subprocess_report_mode = False
subscription_report_mode = False
hover_window_report_mode = False
file_import_report_mode = False
phash_generation_report_mode = False
menu_profile_mode = False
network_report_mode = False
pubsub_report_mode = False
pubsub_profile_mode = False
ui_timer_profile_mode = False
daemon_report_mode = False
force_idle_mode = False
no_page_limit_mode = False
thumbnail_debug_mode = False
currently_uploading_pending = False
shutting_down_due_to_already_running = False
do_idle_shutdown_work = False
program_is_shutting_down = False
shutdown_complete = False
restart = False
emergency_exit = False
twisted_is_broke = False
dirty_object_lock = threading.Lock()
server_busy = threading.Lock()
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
3f4e8b56e0d0f91975ff1694c039ef1492b922da | 6ad7ba4261054436ff99a64341676fe484f3eb66 | /day3/while_demo/ex9.py | bfb2eb456ab6c936a7698085ac4ee663474128aa | [] | no_license | Schrodingfa/PY3_TRAINING | e5b3eeb227cc48fe9f63159f1c9e2c97b922fd1c | 1277a1b1d9959becea0ffb6a959dd4cea29775d8 | refs/heads/master | 2021-07-13T07:38:13.001635 | 2020-07-08T09:31:05 | 2020-07-08T09:31:05 | 177,097,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Author:jxy
# 猜数字 1.0
# 规则:系统生成 1 -- 100 之间的随机数
# 让用户一直猜,直到猜对为止
# 提示:大了 小了 猜对了
# 猜数字 2.0
# 最多只能猜10次
import random
# 1.0
# goal = random.randint(1,100) #产生随机数
# while True:
# num = int(input("please input a number:"))
# flag += 1
# if num > goal:
# print("too bigger")
# elif num < goal:
# print("too small")
# else:
# print("bingo!")
# break
# 2.0
goal = random.randint(1,100) #产生随机数
flag = 0
while flag < 10: # 0 -- 9
num = int(input("please input a number:"))
flag += 1
if num > goal:
print("too bigger")
elif num < goal:
print("too small")
else:
print("bingo!")
break
# if flag == 10:
# print("you have try too many times.")
else:
# 只有while条件不符合才会执行else语句
# 从循环体内部break不会执行
print("you have try too many times.") | [
"82779721@qq.com"
] | 82779721@qq.com |
ac4d37c42caf015e05fe2241de6dd28a1002d777 | 613c532f982c6c9f994e848ef6cf0a268b7dd19f | /mysite/polls/views.py | 127f442d12ced9fdde1945ee557fd236930df299 | [
"MIT"
] | permissive | youwei1-sudo/CMPUT404_lab4 | 05125f50d087f2f90b03e32289bb027bfec91cb9 | 57ed0c5c0d2e127a1994f75bdf07861d68a1c121 | refs/heads/main | 2023-02-28T06:44:00.905383 | 2021-02-02T19:53:15 | 2021-02-02T19:53:15 | 335,120,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | # Create your views here.
# from django.http import HttpResponse
# from django.shortcuts import get_object_or_404, render
# from .models import Question
#part3
# # v1
# def index(request):
# return HttpResponse("Hello, world. You're at the polls index.")
#V2
# def index(request):
# # get 5 newest question
# # http://127.0.0.1:8000/polls/ we can get questions
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# output = ', '.join([q.question_text for q in latest_question_list])
# return HttpResponse(output)
# V3
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list}
# return render(request, 'polls/index.html', context)
#part3
# def detail(request, question_id):
# return HttpResponse("You're looking at question %s." % question_id)
# V2
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
# #part3
# def results(request, question_id):
# response = "You're looking at the results of question %s."
# return HttpResponse(response % question_id)
# part3
# def vote(request, question_id):
# return HttpResponse("You're voting on question %s." % question_id)
# #part4
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question})
# #Part4
# def vote(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# try:
# selected_choice = question.choice_set.get(pk=request.POST['choice'])
# except (KeyError, Choice.DoesNotExist):
# # Redisplay the question voting form.
# return render(request, 'polls/detail.html', {
# 'question': question,
# 'error_message': "You didn't select a choice.",
# })
# else:
# selected_choice.votes += 1
# selected_choice.save()
# # Always return an HttpResponseRedirect after successfully dealing
# # with POST data. This prevents data from being posted twice if a
# # user hits the Back button.
# return HttpResponseRedirect(reverse('results', args=(question.id,))) # 'polls:results'
# v2
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
#Part4
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('results', args=(question.id,))) # 'polls:results' | [
"davidchenyouwei1999@gmail.com"
] | davidchenyouwei1999@gmail.com |
b7673eb6fe474f69b1874f855d4420fa94be6795 | fdbb5a57c16c0bbfdf990ce17dcdcbbf4a4b0489 | /khansole_academy.py | dcd93504ef957f34ed5ed6314b961bcaa4f28351 | [] | no_license | jackreacher80/khansoleacademy | 5151dd83d866f465473cc11b9d9c9644934ec9f5 | d8849fdae5a3cb268ea7763f066864d2f43c7c8c | refs/heads/master | 2022-09-03T00:57:34.264288 | 2020-06-01T11:23:33 | 2020-06-01T11:23:33 | 268,501,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,712 | py | """
File: khansole_academy.py
-------------------------
Add your comments here.
"""
import random
def main():
"""
The main function calls four functions addition_console(), subtraction_console(),multiplication_console() and division_console()
that tests the user on his knowledge of addition, subtraction, multiplication and division.
"""
addition_console()
print("Congratulations! You mastered addition")
subtraction_console()
print("Congratulations! You mastered subtraction")
multiplication_console()
print("Congratulations! You mastered multiplication")
division_console()
print("Congratulations! You mastered division")
"""This function asks the user an addition problem and gives him feedback on his results. The user must be correct 3 times for the system to move
move from addition to subtraction to multiplication to division"""
def addition_console():
correct_answer_count = 0
"""pre-condition-correct_answer_count is not equal to 3
post-condition-correct_answer_count is equal to 3 and loop terminates
"""
while correct_answer_count != 3:
"""num1 and num2 are numbers generated randomly. num3 stores the result of the addition of num1 and num2"""
num1 = random.randint(10, 99)
num2 = random.randint(10, 99)
num3 = num1 + num2
print("What is " + str(num1) + "+ " + str(num2) + "? ")
"""user is asked for his input to the addition problem"""
num4 = float(input("your answer: "))
"""User input is compared to the actual result and it is shown to him"""
if num4 == num3:
correct_answer_count = correct_answer_count + 1
print("You've gotten " + str(correct_answer_count) + " correct in a row ")
else:
print("Incorrect. The expected answer is " + str(num3))
"""This function asks the user an subtraction problem and gives him feedback on his results. The user must be correct 3 times for the system to move
move from addition to subtraction to multiplication to division"""
def subtraction_console():
correct_answer_count = 0
"""pre-condition-correct_answer_count is not equal to 3
post-condition-correct_answer_count is equal to 3 and loop terminates
"""
while correct_answer_count != 3:
"""num1 and num2 are numbers generated randomly. num3 stores the result of the subtraction of num1 and num2"""
num1 = random.randint(10, 99)
num2 = random.randint(10, 99)
num3 = num1 - num2
print("What is " + str(num1) + "- " + str(num2) + "? ")
"""user is asked for his input to the subtraction problem"""
num4 = float(input("your answer: "))
"""User input is compared to the actual result and it is shown to him"""
if num4 == num3:
correct_answer_count = correct_answer_count + 1
print("You've gotten " + str(correct_answer_count) + " correct in a row ")
else:
print("Incorrect. The expected answer is " + str(num3))
"""This function asks the user a multiplication problem and gives him feedback on his results
The user must be correct 3 times for the system to move from addition to subtraction to multiplication to division
"""
def multiplication_console():
correct_answer_count = 0
"""pre-condition-correct_answer_count is not equal to 3
post-condition-correct_answer_count is equal to 3 and loop terminates
"""
while correct_answer_count != 3:
"""num1 and num2 are numbers generated randomly. num3 stores the result of the multiplication of num1 and num2"""
num1 = random.randint(10, 99)
num2 = random.randint(10, 99)
num3 = num1 * num2
print("What is " + str(num1) + "*" + str(num2) + "? ")
"""user is asked for his input to the multiplication problem"""
num4 = float(input("your answer: "))
"""User input is compared to the actual result and it is shown to him"""
if num4 == num3:
correct_answer_count = correct_answer_count + 1
print("You've gotten " + str(correct_answer_count) + " correct in a row ")
else:
print("Incorrect. The expected answer is " + str(num3))
"""This function asks the user a division problem and gives him feedback on his results
The user must be correct 3 times for the system to move from addition to subtraction to multiplication to division
"""
def division_console():
correct_answer_count = 0
"""pre-condition-correct_answer_count is not equal to 3
post-condition-correct_answer_count is equal to 3 and loop terminates
"""
while correct_answer_count != 3:
"""num1 and num2 are numbers generated randomly. num3 stores the result of the division of num1 and num2"""
num1 = random.randint(10, 99)
num2 = random.randint(10, 99)
num3 = num1 / num2
print("What is " + str(num1) + "/" + str(num2) + "? ")
"""user is asked for his input to the multiplication problem"""
print("Please enter a value with 17 digits after the decimal")
num4 = float(input("your answer: "))
"""User input is compared to the actual result and it is shown to him"""
if num4 == num3:
correct_answer_count = correct_answer_count + 1
print("You've gotten " + str(correct_answer_count) + " correct in a row ")
else:
print("Incorrect. The expected answer is " + str(num3))
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main()
| [
"turisys.python@gmail.com"
] | turisys.python@gmail.com |
b935a12a246adc3ca19c8aa6d4847d63d250dfd0 | 6bb509842d31bf1cae22d6a2ad66d4ebb42ee235 | /p5_HOG_test.py | bc652ed41954a7236340ee9a8d0f12f75b9ee1ad | [] | no_license | yulongl/p5_VehicleDetectionAndTracking | 1386ddc5bae8ee39cca192cf5ebec372ac8b07b8 | 3cafb0241ea2c075fe6d1e0f66e930e25da0793e | refs/heads/master | 2020-03-17T04:16:00.340370 | 2018-09-01T00:55:47 | 2018-09-01T00:55:47 | 133,268,633 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,088 | py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.preprocessing import StandardScaler
import glob
from skimage.feature import hog
from sklearn.model_selection import train_test_split
import time
from sklearn.svm import LinearSVC
import pickle
from scipy.ndimage.measurements import label
def convert_color(img, cspace='RGB'):
if cspace != 'BGR':
if cspace == 'HSV':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
elif cspace == 'LUV':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)
elif cspace == 'HLS':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
elif cspace == 'YUV':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
elif cspace == 'RGB':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
elif cspace == 'YCrCb':
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
return cvt_image
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:, :, 0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:, :, 1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:, :, 2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), block_norm= 'L2-Hys',
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), block_norm= 'L2-Hys',
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def find_cars(img, ystart, ystop, scale, img_size, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, box_list, colorspace='RGB'):
draw_img = np.copy(img)
#img = img.astype(np.float32) / 255
#img = img.astype(np.float32)
img_tosearch = img[ystart:ystop, :, :]
ctrans_tosearch = convert_color(img_tosearch, cspace=colorspace)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
ch1 = ctrans_tosearch[:, :, 0]
ch2 = ctrans_tosearch[:, :, 1]
ch3 = ctrans_tosearch[:, :, 2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient * cell_per_block ** 2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = img_size[0]
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos * pix_per_cell
ytop = ypos * pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], img_size)
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(
np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(hog_features.reshape(1, -1))
# test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
#test_prediction = svc.predict(test_features)
class_probabilities = svc.predict_proba(test_features)
if class_probabilities[0][1] > 0.99999999:
test_prediction = 1
else:
test_prediction = 0
#print(class_probabilities)
#print(test_prediction)
if test_prediction == 1:
xbox_left = np.int(xleft * scale)
ytop_draw = np.int(ytop * scale)
win_draw = np.int(window * scale)
cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),
(xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255), 6)
box_list.append(((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart)))
return draw_img, box_list
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap # Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1] + 1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)
# Return the image
return img
################################################################################################################
test_img = cv2.imread('test_images/test1.jpg')
heat = np.zeros_like(test_img[:, :, 0]).astype(np.float)
box_list = []
# get attributes of our svc object
dist_pickle = pickle.load(open("svc.pkl", "rb"))
svc = dist_pickle["svc"]
X_scaler = dist_pickle["X_scaler"]
colorspace = dist_pickle["colorspace"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
hog_channel = dist_pickle["hog_channel"]
img_size = dist_pickle["img_size"]
ystart = 400
ystop = 656
# colorspace = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
# orient = 12
# pix_per_cell = 8
# cell_per_block = 2
# hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
spatial_size = (16, 16)
hist_bins = 32
scale = 1
draw_img, box_list = find_cars(test_img, ystart, ystop, scale, img_size, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, box_list)
# scale = 1
# draw_img, box_list = find_cars(draw_img, ystart, ystop, scale, img_size, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, box_list)
# scale = 1.5
# draw_img, box_list = find_cars(draw_img, ystart, ystop, scale, img_size, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, box_list)
cv2.imshow('draw_img', draw_img)
cv2.waitKey(0)
# Add heat to each box in box list
heat = add_heat(heat, box_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 9)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(test_img), labels)
fig = plt.figure()
plt.subplot(121)
plt.imshow(draw_img)
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
fig.tight_layout()
plt.show()
| [
"noreply@github.com"
] | yulongl.noreply@github.com |
59c88a20af7ad0adc735f39520fd890402bbf27e | cf2bb64be5bda0afee2e60e1acc0de2df66b681c | /assessment/models.py | 6b4103bb193e3fdfa2dd8a370aa2be205a83e113 | [] | no_license | Rahul86999/Assessment | adbbba28484a0d61492b40eb96b0e4fde1737a77 | f082a95676959912b4b84c4d0845855a7be7558d | refs/heads/master | 2022-12-06T12:56:58.462474 | 2020-08-20T09:05:34 | 2020-08-20T09:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | from django.db import models
from django.contrib.auth import get_user_model
User=get_user_model()
# Create your models here.
class Standard(models.Model):
standard_name = models.CharField(max_length=20)
def __str__(self):
return self.standard_name
class Language(models.Model):
lang_name = models.CharField(max_length=30)
def __str__(self):
return self.lang_name
#class Subject(models.Model):
# sub_name = models.CharField(max_length=40)
class Test(models.Model):
test_year = models.IntegerField(default=2020)
quater = models.CharField(max_length=50,default='Quater1')
subject_name = models.CharField(max_length=90,default='Embedded')
test_duration = models.PositiveIntegerField()
package = models.CharField(max_length=50,default='p1')
test_date = models.DateTimeField()
created_at = models.DateTimeField(null=True,auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
standard = models.ForeignKey(Standard,on_delete=models.SET_NULL,null=True)
created_by = models.ForeignKey(User,related_name = 'created_by',on_delete=models.SET_NULL,null=True)
updated_by = models.ForeignKey(User,on_delete=models.SET_NULL,related_name = 'updated_by',null=True)
class TestLanguage(models.Model):
test = models.ForeignKey(Test,on_delete=models.CASCADE)
test_lang= models.CharField(max_length=50)
created_at = models.DateTimeField(null=True,auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class QuestionCategory(models.Model):
test = models.ForeignKey(Test,on_delete=models.CASCADE)
category_name = models.CharField(max_length=50)
question_to_deliver = models.IntegerField()
max_score = models.IntegerField()
created_at = models.DateTimeField(null=True,auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User,related_name = 'cat_created_by',on_delete=models.SET_NULL,null=True)
updated_by = models.ForeignKey(User,related_name = 'cat_updated_by',on_delete=models.SET_NULL,null=True)
class Question(models.Model):
question_category = models.ForeignKey(QuestionCategory,on_delete=models.SET_NULL,null=True)
test = models.ForeignKey(Test,on_delete=models.CASCADE)
question_type= (
('Single Correct','Single'),
('Multiple Correct','Multiple'),
('Passage Based','Passage')
)
language = models.ForeignKey(Language,on_delete=models.SET_NULL,null=True)
question_title = models.TextField(default='')
question_tags = models.CharField(max_length=60,choices=question_type,default="Single")
level = models.CharField(max_length=50,default="Easy",choices=(('Easy','Easy'),('Medium','Medium'),('Difficult','Difficult')))
created_at = models.DateTimeField(null=True,auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
updated_by = models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
class Options(models.Model):
question = models.ForeignKey(Question,related_name='options',on_delete=models.CASCADE)
answer = models.CharField(max_length=1000)
option1 = models.CharField(max_length=500,null=True,blank=True)
option2 = models.CharField(max_length=500,null=True,blank=True)
option3 = models.CharField(max_length=500,null=True,blank=True)
option4 = models.CharField(max_length=500,null=True,blank=True)
is_valid = models.BooleanField()
ans_point = models.FloatField(null=True, blank=True, default=0)
class QuestionHistory(models.Model):
test = models.ForeignKey(Test,on_delete=models.SET_NULL,null=True)
updated_by = models.ForeignKey(User,on_delete=models.SET_NULL,null=True)
created_at = models.DateTimeField(null=True,auto_now_add=True)
| [
"ganesharmy95@gmail.com"
] | ganesharmy95@gmail.com |
35c013a6e48a7fa35c0656fe8b663356438f02cb | e63c127ad19d0faa25cf2b0c25321e216d27556a | /sheets/utility.py | 97efae39a015689f17f9e9fd85b833dc1e10ecf3 | [] | no_license | lykius/iLectern | e4a04616b7c3fb5d9b42a1cd7b2f633d4be23d97 | dd4735674c5dcd5f048756ceffce1a71b53ed03b | refs/heads/master | 2021-01-13T16:15:28.118724 | 2019-03-02T08:18:29 | 2019-03-02T08:18:29 | 81,872,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import sys
import os
from wand.image import Image
from pdfrw import PdfReader
PDF_EXT = '.pdf'
IMAGE_EXT = '.jpg'
RESOLUTION = 500
def convert_pdf_to_image(pdf_file, dest_dir):
filename, ext = os.path.splitext(pdf_file)
if os.path.exists(pdf_file) and ext == PDF_EXT:
pages = PdfReader(pdf_file, decompress=False).pages
for i in range(len(pages)):
with Image(filename=pdf_file + '[' + str(i) + ']', resolution=RESOLUTION) as img:
img.save(filename=os.path.join(dest_dir, str(i) + IMAGE_EXT))
def count_images_in_dir(dir):
if (os.path.exists(dir)):
return len([f for f in os.listdir(dir)
if os.path.isfile(os.path.join(dir, f))
and f.endswith(IMAGE_EXT)])
| [
"lucadeluigi91@gmail.com"
] | lucadeluigi91@gmail.com |
f6ced2b4805a2ac25e3a6f5f5bc67b175ac0c922 | 69d3680f881833a0a4906ad708eac11401bc03c6 | /python3/515. 在每个树行中找最大值.py | 7f9663db2eb82e0576ad697414cd72b43c7432df | [] | no_license | menghuu/YALeetcode | 21df4b5ea6cb0a249263b0ce2df37e7580477ddd | 1959a884bb1cc9f2f1acb1ba6f413498ea0d1aca | refs/heads/master | 2023-08-18T03:55:41.470428 | 2021-09-11T12:39:02 | 2021-09-11T12:39:02 | 269,104,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 m <m@meng.hu>
#
# Distributed under terms of the MIT license.
"""
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root: TreeNode) -> List[int]:
if not root:
return []
ans = []
level = [root]
while level:
l = len(level)
m = float('-inf')
for i in range(l):
root = level[i]
m = max(root.val, m)
if root.left:
level.append(root.left)
if root.right:
level.append(root.right)
level = level[l:]
ans.append(m)
return ans
| [
"m@meng.hu"
] | m@meng.hu |
468645e9619fb25182bf7c27b275edf40ec84218 | afa4ad9cefeb12f78fa7176d2c80d71cc5a76d1c | /clastic/tests/common.py | e1327a4596c8a3033fab40fdefe4c40417973191 | [
"BSD-3-Clause"
] | permissive | slaporte/clastic | 0d88fdc56570de578efcd221d1a5182be661ac97 | d7734040160ece0bf2dd6ef10770be838776056f | refs/heads/master | 2021-01-16T22:36:30.852244 | 2013-09-15T01:43:11 | 2013-09-15T01:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | # -*- coding: utf-8 -*-
import clastic
from clastic import Middleware
def hello_world(name=None):
if name is None:
name = 'world'
return clastic.Response('Hello, %s!' % name)
def hello_world_str(name=None):
if name is None:
name = 'world'
return 'Hello, %s!' % name
def hello_world_html(name=None):
if name is None:
name = 'world'
return '<html><body><p>Hello, <b>%s</b>!</p></body></html>' % name
def hello_world_ctx(name=None):
if name is None:
name = 'world'
greeting = 'Hello, %s!' % name
return {'name': name,
'greeting': greeting}
def session_hello_world(session, name=None):
if name is None:
name = session.get('name') or 'world'
session['name'] = name
return 'Hello, %s!' % name
def complex_context(name=None, date=None):
from datetime import datetime
ret = hello_world_ctx(name)
if date is None:
date = datetime.utcnow()
ret['date'] = date
ret['example_middleware'] = RequestProvidesName
ret['a_lambda'] = lambda x: None
ret['true'] = True
ret['bool_vals'] = set([True, False])
ret['the_locals'] = locals()
ret['the_locals'].pop('ret')
return ret
class RequestProvidesName(Middleware):
provides = ('name',)
def __init__(self, default_name=None):
self.default_name = default_name
def request(self, next, request):
try:
ret = next(request.args.get('name', self.default_name))
except Exception as e:
print e
raise
return ret
class DummyMiddleware(Middleware):
def __init__(self, verbose=False):
self.verbose = verbose
def request(self, next, request):
name = '%s (%s)' % (self.__class__.__name__, id(self))
if self.verbose:
print name, '- handling', id(request)
try:
ret = next()
except Exception as e:
if self.verbose:
print name, '- uhoh:', repr(e)
raise
if self.verbose:
print name, '- hooray:', repr(ret)
return ret
| [
"mahmoudrhashemi@gmail.com"
] | mahmoudrhashemi@gmail.com |
b4a0be1470e467285d734202a3faced0aa92de3a | 954ceac52dfe831ed7c2b302311a20bb92452727 | /python/tvm/relax/dpl/__init__.py | e0bbdaff05127fcefdd39e14799047773730be0e | [
"Apache-2.0",
"LLVM-exception",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Zlib",
"Unlicense",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | tqchen/tvm | a0e4aefe8b8dccbdbe6f760549bed6e9545ad4a1 | 678d01dd4a4e75ef6186ce356bb1a20e584a7b24 | refs/heads/main | 2023-08-10T02:21:48.092636 | 2023-02-25T18:22:10 | 2023-02-25T18:22:10 | 100,638,323 | 23 | 8 | Apache-2.0 | 2023-02-20T16:28:46 | 2017-08-17T19:30:37 | Python | UTF-8 | Python | false | false | 876 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relax Dataflow Pattern Language."""
from .pattern import *
from .context import *
| [
"tianqi.tchen@gmail.com"
] | tianqi.tchen@gmail.com |
f23343b409b67f1ed8b81fd6a541d2127152e76b | 988781c5bd40b19300a4146d7643120f46a51753 | /commands/default_cmdsets.py | 1ae72ff7b2a4a261562b77494687c6a596c0ec92 | [] | no_license | philnelson/chronopolis | 9cf1b10af4ce5b62da2e417861860926afa703f5 | 1b2be502cf81411d6c7c2c9a8bb1949de5437a5a | refs/heads/master | 2020-03-08T16:57:34.813159 | 2018-04-09T17:34:37 | 2018-04-09T17:34:37 | 128,255,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | """
Command sets
All commands in the game must be grouped in a cmdset. A given command
can be part of any number of cmdsets and cmdsets can be added/removed
and merged onto entities at runtime.
To create new commands to populate the cmdset, see
`commands/command.py`.
This module wraps the default command sets of Evennia; overloads them
to add/remove commands from the default lineup. You can create your
own cmdsets by inheriting from them or directly from `evennia.CmdSet`.
"""
from evennia import default_cmds
from evennia import CmdSet
from commands import command
from evennia.contrib.mail import CmdMail
from evennia.contrib import dice
class CharacterCmdSet(default_cmds.CharacterCmdSet):
"""
The `CharacterCmdSet` contains general in-game commands like `look`,
`get`, etc available on in-game Character objects. It is merged with
the `AccountCmdSet` when an Account puppets a Character.
"""
key = "DefaultCharacter"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super(CharacterCmdSet, self).at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
self.add(dice.CmdDice())
self.add(command.CmdAttack())
self.add(command.CmdEquip())
class AccountCmdSet(default_cmds.AccountCmdSet):
"""
This is the cmdset available to the Account at all times. It is
combined with the `CharacterCmdSet` when the Account puppets a
Character. It holds game-account-specific commands, channel
commands, etc.
"""
key = "DefaultAccount"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super(AccountCmdSet, self).at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
self.add(CmdMail)
class UnloggedinCmdSet(default_cmds.UnloggedinCmdSet):
"""
Command set available to the Session before being logged in. This
holds commands like creating a new account, logging in, etc.
"""
key = "DefaultUnloggedin"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super(UnloggedinCmdSet, self).at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
class SessionCmdSet(default_cmds.SessionCmdSet):
"""
This cmdset is made available on Session level once logged in. It
is empty by default.
"""
key = "DefaultSession"
def at_cmdset_creation(self):
"""
This is the only method defined in a cmdset, called during
its creation. It should populate the set with command instances.
As and example we just add the empty base `Command` object.
It prints some info.
"""
super(SessionCmdSet, self).at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
| [
"phil@extrafuture.com"
] | phil@extrafuture.com |
57ee62c2b454803a41896a4bf9ceef507af16a53 | fa06915cb1f1d49d636ee2137889cfd66c6e55af | /metodos_confinamentos/secante.py | 18e79f85bf7e3f67cb40920e9a009f43320520b7 | [] | no_license | DarknessRdg/mat-computacional | 7ed45dd333bec52b509128e6d106efaa4a205cea | 30fd0dd144a10a91f3a11055d20ebdab72be3620 | refs/heads/main | 2023-04-03T10:27:35.510285 | 2021-04-16T04:30:38 | 2021-04-16T04:30:38 | 329,485,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import math
from utils import trunc
def secante(x1, x2, funcao, tolerancia):
f_x1 = 0
loop = 0
while loop == 0 or abs(f_x1) > tolerancia:
loop += 1
f_x1 = funcao(x1)
f_x2 = funcao(x2)
x3 = x2 - ((f_x2 * (x1 - x2)) / (f_x1 - f_x2))
feedback = (
'loop = {} '
'x1 = {} '
'x2 = {} '
'f(x1) = {} '
'f(x2) = {} '
'x3 = {} '
'f(x3) {} '
)
print(feedback.format(
loop, *map(trunc, (x1, x2, f_x1, f_x2, x3, funcao(x3)))
))
x1 = x2
x2 = x3
return x1
if __name__ == '__main__':
print(secante(
x1=1,
x2=2,
funcao=f,
tolerancia=10 ** -3
))
| [
"luanrodrigues007@hotmail.com"
] | luanrodrigues007@hotmail.com |
5a4cf44e3bbc3f22c141b8f8ed919d023ecd9365 | 382c4f85ca26d2d6a6bb287f14e70e1b485f77b4 | /2018/task5/solution.py | cd3895040a1a56ae5c0c8eb445192f44e17811b7 | [] | no_license | sapkos/advent_of_code | c5b783d9e773f2ede8a350d68f20c21650dd182e | 8005a3f7879f6a350e082de4a80264387dfacb35 | refs/heads/master | 2020-04-09T02:02:59.515353 | 2019-12-09T21:41:26 | 2019-12-09T21:41:26 | 159,926,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | from string import ascii_lowercase
with open('input.txt') as f:
word = f.read()
stosik = []
def is_reacting(a, b):
return ((a==b.lower()) and (a.upper()==b)) or ((a.lower()==b) and (a==b.upper()))
for char in word:
if stosik and is_reacting(stosik[-1], char):
stosik.pop()
else:
stosik.append(char)
print(len(stosik)-1)
minimum = len(word)
for x in ascii_lowercase:
stosik = []
for char in word.replace(x, '').replace(x.upper(), ''):
if stosik and is_reacting(stosik[-1], char):
stosik.pop()
else:
stosik.append(char)
minimum = min(len(stosik)-1, minimum)
print(minimum)
| [
"szymon.sapkowski@samba.tv"
] | szymon.sapkowski@samba.tv |
0e3558e47561e850419df0c5701c93bfd1818048 | 2772f804bae2bf1dad1c9fcab435c98696465c65 | /二刷+题解/每日一题/minCameraCover.py | aba1966661634456e10dc88a4eea8520e49f8eee | [] | no_license | 1oser5/LeetCode | 75e15a2f7a1a7de1251fe5f785ad06a58b4b8889 | 40726506802d2d60028fdce206696b1df2f63ece | refs/heads/master | 2021-07-19T04:40:17.637575 | 2020-09-30T00:16:39 | 2020-09-30T00:16:39 | 211,662,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # -*- encoding: utf-8 -*-
'''
@File : minCameraCover.py
@Time : 2020/09/22 08:52:25
@Author : Xia
@Version : 1.0
@Contact : snoopy98@163.com
@License : (C)Copyright 2019-2020, HB.Company
@Desc : None
'''
# here put the import lib
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
res = 0
def minCameraCover(self, root: TreeNode) -> int:
def dfs(root):
if not root:
return 1
left, right = dfs(root.left), dfs(root.right)
if left == 0 or right == 0:
self.res += 1
return 2
if left == 1 and right == 1:
return 0
if (left + right) >= 3:
return 1
return -1
if dfs(root) == 0:
self.res += 1
return self.res
if __name__ == '__main__':
pass | [
"snoopy98@163.com"
] | snoopy98@163.com |
e21c4b5c7730ecd49513a9820a31aa059f4ceb1c | 6d9e4ab7e786617042d5e09173144d95b88b142d | /typeidea/custom_site.py | 556c9fb026ecb56ca0deeddeefe64c0d056d7187 | [] | no_license | Yzhanjiang/typeidea | 3fc44707e673a8769f582bf0a20ed838fd5bdae7 | 6b669f1d2112eb036af3be0bc0cbe788e6967dfa | refs/heads/master | 2021-01-23T10:34:38.542295 | 2018-10-03T02:52:14 | 2018-10-03T02:52:14 | 101,458,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | #!/usr/bin/env python
#coding:utf8
class custom_site():
pass | [
"251642766@qq.com"
] | 251642766@qq.com |
2c452f4e7e72702d7354e8fa50f100cf8bc38220 | 923ebf37cfd9311c1de0f0d7b96589fbe6e1b1d8 | /Functions/Internal Force Calculations/InterpolatePanelPressure.py | 25356ab420462379a0ef2f486c4d04a3ae71bf1f | [] | no_license | jsolvang/MUFP-Optimisation | f134f4792de372cf04a17e8fd8e09ea1039be89d | c05ad645638ca8ab032448b6c2a7506226d94cf7 | refs/heads/main | 2023-08-31T07:57:59.571608 | 2021-10-05T16:22:45 | 2021-10-05T16:22:45 | 413,427,395 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from scipy import interpolate
import numpy as np
# Interpolation function
def interpolate_panel_pressure(sim, column_pressure, f_rad):
panel_pressure_interp = np.zeros(shape=(2, len(f_rad), len(column_pressure[0, 0, :, 0]), 21))
for ii, _ in enumerate(column_pressure[:, 0, 0, 0]):
for jj, _ in enumerate(column_pressure[0, 0, :, 0]):
for kk, _ in enumerate(column_pressure[0, 0, 0, :]):
func_f = interpolate.interp1d(sim.wave_disc[:, 4], column_pressure[ii, :, jj, kk])
panel_pressure_interp[ii, :, jj, kk] = func_f(f_rad)
return panel_pressure_interp
| [
"67953495+jsolvang@users.noreply.github.com"
] | 67953495+jsolvang@users.noreply.github.com |
d9d6170dc9afb8e6db2dc891ea9ed441996d7528 | 280086eb5c09278a0b558c11b7b9b08883e9a65d | /funstuff/funstuff.py | 1879fffc627615eeb6861ec4ded98178bb7376d8 | [] | no_license | TechStar123/Kurvoid | fa7e139da6e83fc739c1fb12bdad482c2917a18d | e7b62f43b0e87607a086e4ed3f07e6160efa796f | refs/heads/main | 2023-02-12T13:02:57.069005 | 2021-01-12T01:04:31 | 2021-01-12T01:04:31 | 328,828,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,856 | py | from redbot.core import commands
from redbot.core.utils.chat_formatting import pagify
import random
from typing import Literal
from redbot.core import Config, bank, commands, checks
from redbot.core.utils import AsyncIter
from redbot.core.errors import BalanceTooHigh
import asyncio
import discord
from .animals import Animal, racers
from redbot.core import commands
import datetime
import time
from enum import Enum
from random import randint, choice
from typing import Final
import urllib.parse
import aiohttp
import discord
from redbot.core import commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
from redbot.core.utils.chat_formatting import (
bold,
escape,
italics,
humanize_number,
humanize_timedelta,
)
racers = (
(":rabbit2:", "fast"),
(":monkey:", "fast"),
(":cat2:", "fast"),
(":mouse2:", "slow"),
(":chipmunk:", "fast"),
(":rat:", "fast"),
(":dove:", "fast"),
(":bird:", "fast"),
(":dromedary_camel:", "steady"),
(":camel:", "steady"),
(":dog2:", "steady"),
(":poodle:", "steady"),
(":racehorse:", "steady"),
(":ox:", "abberant"),
(":cow2:", "abberant"),
(":elephant:", "abberant"),
(":water_buffalo:", "abberant"),
(":ram:", "abberant"),
(":goat:", "abberant"),
(":sheep:", "abberant"),
(":leopard:", "predator"),
(":tiger2:", "predator"),
(":dragon:", "special"),
(":unicorn:", "special"),
(":turtle:", "slow"),
(":bug:", "slow"),
(":rooster:", "slow"),
(":snail:", "slow"),
(":scorpion:", "slow"),
(":crocodile:", "slow"),
(":pig2:", "slow"),
(":turkey:", "slow"),
(":duck:", "slow"),
(":baby_chick:", "slow"),
)
class Animal:
def __init__(self, emoji, _type):
self.emoji = emoji
self._type = _type
self.track = "• " * 20
self.position = 80
self.turn = 0
self.current = self.track + self.emoji
def move(self):
self._update_postion()
self.turn += 1
return self.current
def _update_postion(self):
distance = self._calculate_movement()
self.current = "".join(
(
self.track[: max(0, self.position - distance)],
self.emoji,
self.track[max(0, self.position - distance) :],
)
)
self.position = self._get_position()
def _get_position(self):
return self.current.find(self.emoji)
def _calculate_movement(self):
if self._type == "slow":
return random.randint(1, 3) * 3
elif self._type == "fast":
return random.randint(0, 4) * 3
elif self._type == "steady":
return 2 * 3
elif self._type == "abberant":
if random.randint(1, 100) >= 90:
return 5 * 3
else:
return random.randint(0, 2) * 3
elif self._type == "predator":
if self.turn % 2 == 0:
return 0
else:
return random.randint(2, 5) * 3
elif self._type == ":unicorn:":
if self.turn % 3:
return random.choice([len("blue"), len("red"), len("green")]) * 3
else:
return 0
else:
if self.turn == 1:
return 14 * 3
elif self.turn == 2:
return 0
else:
return random.randint(0, 2) * 3
# Libs
BaseCog = getattr(commands, "Cog", object)
class Avatar(BaseCog):
"""Get user's avatar URL."""
@commands.command()
async def avatar(self, ctx, *, user: discord.Member=None):
"""Returns user avatar URL.
User argument can be user mention, nickname, username, user ID.
Default to yourself when no argument is supplied.
"""
author = ctx.author
if not user:
user = author
if user.is_avatar_animated():
url = user.avatar_url_as(format="gif")
if not user.is_avatar_animated():
url = user.avatar_url_as(static_format="png")
await ctx.send("{}'s Avatar URL : {}".format(user.name, url))
_ = T_ = Translator("General", __file__)
class RPS(Enum):
rock = "\N{MOYAI}"
paper = "\N{PAGE FACING UP}"
scissors = "\N{BLACK SCISSORS}\N{VARIATION SELECTOR-16}"
class RPSParser:
def __init__(self, argument):
argument = argument.lower()
if argument == "rock":
self.choice = RPS.rock
elif argument == "paper":
self.choice = RPS.paper
elif argument == "scissors":
self.choice = RPS.scissors
else:
self.choice = None
MAX_ROLL: Final[int] = 2 ** 64 - 1
@cog_i18n(_)
class General(commands.Cog):
"""General commands."""
global _
_ = lambda s: s
ball = [
_("As I see it, yes"),
_("It is certain"),
_("It is decidedly so"),
_("Most likely"),
_("Outlook good"),
_("Signs point to yes"),
_("Without a doubt"),
_("Yes"),
_("Yes – definitely"),
_("You may rely on it"),
_("Reply hazy, try again"),
_("Ask again later"),
_("Better not tell you now"),
_("Cannot predict now"),
_("Concentrate and ask again"),
_("Don't count on it"),
_("My reply is no"),
_("My sources say no"),
_("Outlook not so good"),
_("Very doubtful"),
]
_ = T_
def __init__(self):
super().__init__()
self.stopwatches = {}
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
@commands.command()
async def choose(self, ctx, *choices):
"""Choose between multiple options.
To denote options which include whitespace, you should use
double quotes.
"""
choices = [escape(c, mass_mentions=True) for c in choices if c]
if len(choices) < 2:
await ctx.send(_("Not enough options to pick from."))
else:
await ctx.send(choice(choices))
@commands.command()
async def roll(self, ctx, number: int = 100):
"""Roll a random number.
The result will be between 1 and `<number>`.
`<number>` defaults to 100.
"""
author = ctx.author
if 1 < number <= MAX_ROLL:
n = randint(1, number)
await ctx.send(
"{author.mention} :game_die: {n} :game_die:".format(
author=author, n=humanize_number(n)
)
)
elif number <= 1:
await ctx.send(_("{author.mention} Maybe higher than 1? ;P").format(author=author))
else:
await ctx.send(
_("{author.mention} Max allowed number is {maxamount}.").format(
author=author, maxamount=humanize_number(MAX_ROLL)
)
)
@commands.command()
async def flip(self, ctx, user: discord.Member = None):
"""Flip a coin... or a user.
Defaults to a coin.
"""
if user is not None:
msg = ""
if user.id == ctx.bot.user.id:
user = ctx.author
msg = _("Nice try. You think this is funny?\n How about *this* instead:\n\n")
char = "abcdefghijklmnopqrstuvwxyz"
tran = "ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz"
table = str.maketrans(char, tran)
name = user.display_name.translate(table)
char = char.upper()
tran = "∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z"
table = str.maketrans(char, tran)
name = name.translate(table)
await ctx.send(msg + "(╯°□°)╯︵ " + name[::-1])
else:
await ctx.send(_("*flips a coin and... ") + choice([_("HEADS!*"), _("TAILS!*")]))
@commands.command()
async def rps(self, ctx, your_choice: RPSParser):
"""Play Rock Paper Scissors."""
author = ctx.author
player_choice = your_choice.choice
if not player_choice:
return await ctx.send(
_("This isn't a valid option. Try {r}, {p}, or {s}.").format(
r="rock", p="paper", s="scissors"
)
)
red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))
cond = {
(RPS.rock, RPS.paper): False,
(RPS.rock, RPS.scissors): True,
(RPS.paper, RPS.rock): True,
(RPS.paper, RPS.scissors): False,
(RPS.scissors, RPS.rock): False,
(RPS.scissors, RPS.paper): True,
}
if red_choice == player_choice:
outcome = None # Tie
else:
outcome = cond[(player_choice, red_choice)]
if outcome is True:
await ctx.send(
_("{choice} You win {author.mention}!").format(
choice=red_choice.value, author=author
)
)
elif outcome is False:
await ctx.send(
_("{choice} You lose {author.mention}!").format(
choice=red_choice.value, author=author
)
)
else:
await ctx.send(
_("{choice} We're square {author.mention}!").format(
choice=red_choice.value, author=author
)
)
@commands.command(name="8", aliases=["8ball"])
async def _8ball(self, ctx, *, question: str):
"""Ask 8 ball a question.
Question must end with a question mark.
"""
if question.endswith("?") and question != "?":
await ctx.send("`" + T_(choice(self.ball)) + "`")
else:
await ctx.send(_("That doesn't look like a question."))
@commands.command(aliases=["sw"])
async def stopwatch(self, ctx):
"""Start or stop the stopwatch."""
author = ctx.author
if author.id not in self.stopwatches:
self.stopwatches[author.id] = int(time.perf_counter())
await ctx.send(author.mention + _(" Stopwatch started!"))
else:
tmp = abs(self.stopwatches[author.id] - int(time.perf_counter()))
tmp = str(datetime.timedelta(seconds=tmp))
await ctx.send(
author.mention + _(" Stopwatch stopped! Time: **{seconds}**").format(seconds=tmp)
)
self.stopwatches.pop(author.id, None)
@commands.command()
async def lmgtfy(self, ctx, *, search_terms: str):
"""Create a lmgtfy link."""
search_terms = escape(urllib.parse.quote_plus(search_terms), mass_mentions=True)
await ctx.send("https://lmgtfy.com/?q={}".format(search_terms))
@commands.command(hidden=True)
@commands.guild_only()
async def hug(self, ctx, user: discord.Member, intensity: int = 1):
"""Because everyone likes hugs!
Up to 10 intensity levels.
"""
name = italics(user.display_name)
if intensity <= 0:
msg = "(っ˘̩╭╮˘̩)っ" + name
elif intensity <= 3:
msg = "(っ´▽`)っ" + name
elif intensity <= 6:
msg = "╰(*´︶`*)╯" + name
elif intensity <= 9:
msg = "(つ≧▽≦)つ" + name
elif intensity >= 10:
msg = "(づ ̄ ³ ̄)づ{} ⊂(´・ω・`⊂)".format(name)
else:
# For the purposes of "msg might not be defined" linter errors
raise RuntimeError
await ctx.send(msg)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def serverinfo(self, ctx, details: bool = False):
"""
Show server information.
`details`: Shows more information when set to `True`.
Default to False.
"""
guild = ctx.guild
passed = (ctx.message.created_at - guild.created_at).days
created_at = _("Created on {date}. That's over {num} days ago!").format(
date=guild.created_at.strftime("%d %b %Y %H:%M"),
num=humanize_number(passed),
)
online = humanize_number(
len([m.status for m in guild.members if m.status != discord.Status.offline])
)
total_users = humanize_number(guild.member_count)
text_channels = humanize_number(len(guild.text_channels))
voice_channels = humanize_number(len(guild.voice_channels))
if not details:
data = discord.Embed(description=created_at, colour=await ctx.embed_colour())
data.add_field(name=_("Region"), value=str(guild.region))
data.add_field(name=_("Users online"), value=f"{online}/{total_users}")
data.add_field(name=_("Text Channels"), value=text_channels)
data.add_field(name=_("Voice Channels"), value=voice_channels)
data.add_field(name=_("Roles"), value=humanize_number(len(guild.roles)))
data.add_field(name=_("Owner"), value=str(guild.owner))
data.set_footer(
text=_("Server ID: ")
+ str(guild.id)
+ _(" • Use {command} for more info on the server.").format(
command=f"{ctx.clean_prefix}serverinfo 1"
)
)
if guild.icon_url:
data.set_author(name=guild.name, url=guild.icon_url)
data.set_thumbnail(url=guild.icon_url)
else:
data.set_author(name=guild.name)
else:
def _size(num: int):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1024.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1024.0
return "{0:.1f}{1}".format(num, "YB")
def _bitsize(num: int):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1000.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1000.0
return "{0:.1f}{1}".format(num, "YB")
shard_info = (
_("\nShard ID: **{shard_id}/{shard_count}**").format(
shard_id=humanize_number(guild.shard_id + 1),
shard_count=humanize_number(ctx.bot.shard_count),
)
if ctx.bot.shard_count > 1
else ""
)
# Logic from: https://github.com/TrustyJAID/Trusty-cogs/blob/master/serverstats/serverstats.py#L159
online_stats = {
_("Humans: "): lambda x: not x.bot,
_(" • Bots: "): lambda x: x.bot,
"\N{LARGE GREEN CIRCLE}": lambda x: x.status is discord.Status.online,
"\N{LARGE ORANGE CIRCLE}": lambda x: x.status is discord.Status.idle,
"\N{LARGE RED CIRCLE}": lambda x: x.status is discord.Status.do_not_disturb,
"\N{MEDIUM WHITE CIRCLE}\N{VARIATION SELECTOR-16}": lambda x: (
x.status is discord.Status.offline
),
"\N{LARGE PURPLE CIRCLE}": lambda x: any(
a.type is discord.ActivityType.streaming for a in x.activities
),
"\N{MOBILE PHONE}": lambda x: x.is_on_mobile(),
}
member_msg = _("Users online: **{online}/{total_users}**\n").format(
online=online, total_users=total_users
)
count = 1
for emoji, value in online_stats.items():
try:
num = len([m for m in guild.members if value(m)])
except Exception as error:
print(error)
continue
else:
member_msg += f"{emoji} {bold(humanize_number(num))} " + (
"\n" if count % 2 == 0 else ""
)
count += 1
vc_regions = {
"vip-us-east": _("__VIP__ US East ") + "\U0001F1FA\U0001F1F8",
"vip-us-west": _("__VIP__ US West ") + "\U0001F1FA\U0001F1F8",
"vip-amsterdam": _("__VIP__ Amsterdam ") + "\U0001F1F3\U0001F1F1",
"eu-west": _("EU West ") + "\U0001F1EA\U0001F1FA",
"eu-central": _("EU Central ") + "\U0001F1EA\U0001F1FA",
"europe": _("Europe ") + "\U0001F1EA\U0001F1FA",
"london": _("London ") + "\U0001F1EC\U0001F1E7",
"frankfurt": _("Frankfurt ") + "\U0001F1E9\U0001F1EA",
"amsterdam": _("Amsterdam ") + "\U0001F1F3\U0001F1F1",
"us-west": _("US West ") + "\U0001F1FA\U0001F1F8",
"us-east": _("US East ") + "\U0001F1FA\U0001F1F8",
"us-south": _("US South ") + "\U0001F1FA\U0001F1F8",
"us-central": _("US Central ") + "\U0001F1FA\U0001F1F8",
"singapore": _("Singapore ") + "\U0001F1F8\U0001F1EC",
"sydney": _("Sydney ") + "\U0001F1E6\U0001F1FA",
"brazil": _("Brazil ") + "\U0001F1E7\U0001F1F7",
"hongkong": _("Hong Kong ") + "\U0001F1ED\U0001F1F0",
"russia": _("Russia ") + "\U0001F1F7\U0001F1FA",
"japan": _("Japan ") + "\U0001F1EF\U0001F1F5",
"southafrica": _("South Africa ") + "\U0001F1FF\U0001F1E6",
"india": _("India ") + "\U0001F1EE\U0001F1F3",
"dubai": _("Dubai ") + "\U0001F1E6\U0001F1EA",
"south-korea": _("South Korea ") + "\U0001f1f0\U0001f1f7",
}
verif = {
"none": _("0 - None"),
"low": _("1 - Low"),
"medium": _("2 - Medium"),
"high": _("3 - High"),
"extreme": _("4 - Extreme"),
}
features = {
"ANIMATED_ICON": _("Animated Icon"),
"BANNER": _("Banner Image"),
"COMMERCE": _("Commerce"),
"COMMUNITY": _("Community"),
"DISCOVERABLE": _("Server Discovery"),
"FEATURABLE": _("Featurable"),
"INVITE_SPLASH": _("Splash Invite"),
"MEMBER_LIST_DISABLED": _("Member list disabled"),
"MEMBER_VERIFICATION_GATE_ENABLED": _("Membership Screening enabled"),
"MORE_EMOJI": _("More Emojis"),
"NEWS": _("News Channels"),
"PARTNERED": _("Partnered"),
"PREVIEW_ENABLED": _("Preview enabled"),
"PUBLIC_DISABLED": _("Public disabled"),
"VANITY_URL": _("Vanity URL"),
"VERIFIED": _("Verified"),
"VIP_REGIONS": _("VIP Voice Servers"),
"WELCOME_SCREEN_ENABLED": _("Welcome Screen enabled"),
}
guild_features_list = [
f"\N{WHITE HEAVY CHECK MARK} {name}"
for feature, name in features.items()
if feature in guild.features
]
joined_on = _(
"{bot_name} joined this server on {bot_join}. That's over {since_join} days ago!"
).format(
bot_name=ctx.bot.user.name,
bot_join=guild.me.joined_at.strftime("%d %b %Y %H:%M:%S"),
since_join=humanize_number((ctx.message.created_at - guild.me.joined_at).days),
)
data = discord.Embed(
description=(f"{guild.description}\n\n" if guild.description else "") + created_at,
colour=await ctx.embed_colour(),
)
data.set_author(
name=guild.name,
icon_url="https://cdn.discordapp.com/emojis/457879292152381443.png"
if "VERIFIED" in guild.features
else "https://cdn.discordapp.com/emojis/508929941610430464.png"
if "PARTNERED" in guild.features
else discord.Embed.Empty,
)
if guild.icon_url:
data.set_thumbnail(url=guild.icon_url)
data.add_field(name=_("Members:"), value=member_msg)
data.add_field(
name=_("Channels:"),
value=_(
"\N{SPEECH BALLOON} Text: {text}\n"
"\N{SPEAKER WITH THREE SOUND WAVES} Voice: {voice}"
).format(text=bold(text_channels), voice=bold(voice_channels)),
)
data.add_field(
name=_("Utility:"),
value=_(
"Owner: {owner}\nVoice region: {region}\nVerif. level: {verif}\nServer ID: {id}{shard_info}"
).format(
owner=bold(str(guild.owner)),
region=f"**{vc_regions.get(str(guild.region)) or str(guild.region)}**",
verif=bold(verif[str(guild.verification_level)]),
id=bold(str(guild.id)),
shard_info=shard_info,
),
inline=False,
)
data.add_field(
name=_("Misc:"),
value=_(
"AFK channel: {afk_chan}\nAFK timeout: {afk_timeout}\nCustom emojis: {emoji_count}\nRoles: {role_count}"
).format(
afk_chan=bold(str(guild.afk_channel))
if guild.afk_channel
else bold(_("Not set")),
afk_timeout=bold(humanize_timedelta(seconds=guild.afk_timeout)),
emoji_count=bold(humanize_number(len(guild.emojis))),
role_count=bold(humanize_number(len(guild.roles))),
),
inline=False,
)
if guild_features_list:
data.add_field(name=_("Server features:"), value="\n".join(guild_features_list))
if guild.premium_tier != 0:
nitro_boost = _(
"Tier {boostlevel} with {nitroboosters} boosts\n"
"File size limit: {filelimit}\n"
"Emoji limit: {emojis_limit}\n"
"VCs max bitrate: {bitrate}"
).format(
boostlevel=bold(str(guild.premium_tier)),
nitroboosters=bold(humanize_number(guild.premium_subscription_count)),
filelimit=bold(_size(guild.filesize_limit)),
emojis_limit=bold(str(guild.emoji_limit)),
bitrate=bold(_bitsize(guild.bitrate_limit)),
)
data.add_field(name=_("Nitro Boost:"), value=nitro_boost)
if guild.splash:
data.set_image(url=guild.splash_url_as(format="png"))
data.set_footer(text=joined_on)
await ctx.send(embed=data)
@commands.command()
async def urban(self, ctx, *, word):
"""Search the Urban Dictionary.
This uses the unofficial Urban Dictionary API.
"""
try:
url = "https://api.urbandictionary.com/v0/define"
params = {"term": str(word).lower()}
headers = {"content-type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers, params=params) as response:
data = await response.json()
except aiohttp.ClientError:
await ctx.send(
_("No Urban Dictionary entries were found, or there was an error in the process.")
)
return
if data.get("error") != 404:
if not data.get("list"):
return await ctx.send(_("No Urban Dictionary entries were found."))
if await ctx.embed_requested():
# a list of embeds
embeds = []
for ud in data["list"]:
embed = discord.Embed()
title = _("{word} by {author}").format(
word=ud["word"].capitalize(), author=ud["author"]
)
if len(title) > 256:
title = "{}...".format(title[:253])
embed.title = title
embed.url = ud["permalink"]
description = _("{definition}\n\n**Example:** {example}").format(**ud)
if len(description) > 2048:
description = "{}...".format(description[:2045])
embed.description = description
embed.set_footer(
text=_(
"{thumbs_down} Down / {thumbs_up} Up, Powered by Urban Dictionary."
).format(**ud)
)
embeds.append(embed)
if embeds is not None and len(embeds) > 0:
await menu(
ctx,
pages=embeds,
controls=DEFAULT_CONTROLS,
message=None,
page=0,
timeout=30,
)
else:
messages = []
for ud in data["list"]:
ud.setdefault("example", "N/A")
message = _(
"<{permalink}>\n {word} by {author}\n\n{description}\n\n"
"{thumbs_down} Down / {thumbs_up} Up, Powered by Urban Dictionary."
).format(word=ud.pop("word").capitalize(), description="{description}", **ud)
max_desc_len = 2000 - len(message)
description = _("{definition}\n\n**Example:** {example}").format(**ud)
if len(description) > max_desc_len:
description = "{}...".format(description[: max_desc_len - 3])
message = message.format(description=description)
messages.append(message)
if messages is not None and len(messages) > 0:
await menu(
ctx,
pages=messages,
controls=DEFAULT_CONTROLS,
message=None,
page=0,
timeout=30,
)
else:
await ctx.send(
_("No Urban Dictionary entries were found, or there was an error in the process.")
)
class Penis(commands.Cog):
"""Penis related commands."""
def __init__(self):
pass
@commands.command()
async def penis(self, ctx, *users: discord.Member):
"""Detects user's penis length
This is 100% accurate.
Enter multiple users for an accurate comparison!"""
dongs = {}
msg = ""
state = random.getstate()
for user in users:
random.seed(user.id)
dongs[user] = "8{}D".format("=" * random.randint(0, 30))
random.setstate(state)
dongs = sorted(dongs.items(), key=lambda x: x[1])
for user, dong in dongs:
msg += "**{}'s size:**\n{}\n".format(user.display_name, dong)
for page in pagify(msg):
await ctx.send(page)
__author__ = "Redjumpman"
__version__ = "2.0.15"
guild_defaults = {
"Wait": 60,
"Mode": "normal",
"Prize": 100,
"Pooling": False,
"Payout_Min": 0,
"Bet_Multiplier": 2,
"Bet_Min": 10,
"Bet_Max": 50,
"Bet_Allowed": True,
"Games_Played": 0,
}
# First, Second, and Third place wins
member_defaults = {"Wins": {"1": 0, "2": 0, "3": 0}, "Losses": 0}
class Race(commands.Cog):
"""Cog for racing animals"""
def __init__(self):
self.config = Config.get_conf(self, 5074395009, force_registration=True)
self.config.register_guild(**guild_defaults)
self.config.register_member(**member_defaults)
self.active = False
self.started = False
self.winners = []
self.players = []
self.bets = {}
async def red_delete_data_for_user(
self, *, requester: Literal["discord", "owner", "user", "user_strict"], user_id: int
):
all_members = await self.config.all_members()
async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100):
if user_id in guild_data:
await self.config.member_from_ids(guild_id, user_id).clear()
@commands.group()
@commands.guild_only()
async def race(self, ctx):
"""Race related commands."""
pass
@race.command()
async def start(self, ctx):
"""Begins a new race.
You cannot start a new race until the active on has ended.
If you are the only player in the race, you will race against
your bot.
The user who started the race is automatically entered into the race.
"""
if self.active:
return await ctx.send("A race is already in progress! Type `[p]race enter` to enter!")
self.active = True
self.players.append(ctx.author)
wait = await self.config.guild(ctx.guild).Wait()
current = await self.config.guild(ctx.guild).Games_Played()
await self.config.guild(ctx.guild).Games_Played.set(current + 1)
await ctx.send(
f"🚩 A race has begun! Type {ctx.prefix}race enter "
f"to join the race! 🚩\nThe race will begin in "
f"{wait} seconds!\n\n**{ctx.author.mention}** entered the race!"
)
await asyncio.sleep(wait)
self.started = True
await ctx.send("🏁 The race is now in progress. 🏁")
await self.run_game(ctx)
settings = await self.config.guild(ctx.guild).all()
currency = await bank.get_currency_name(ctx.guild)
color = await ctx.embed_colour()
msg, embed = self._build_end_screen(settings, currency, color)
await ctx.send(content=msg, embed=embed)
await self._race_teardown(settings)
@race.command()
async def stats(self, ctx, user: discord.Member = None):
"""Display your race stats."""
if not user:
user = ctx.author
color = await ctx.embed_colour()
user_data = await self.config.member(user).all()
player_total = sum(user_data["Wins"].values()) + user_data["Losses"]
server_total = await self.config.guild(ctx.guild).Games_Played()
try:
percent = round((player_total / server_total) * 100, 1)
except ZeroDivisionError:
percent = 0
embed = discord.Embed(color=color, description="Race Stats")
embed.set_author(name=f"{user}", icon_url=user.avatar_url)
embed.add_field(
name="Wins",
value=(
f"1st: {user_data['Wins']['1']}\n2nd: {user_data['Wins']['2']}\n3rd: {user_data['Wins']['3']}"
),
)
embed.add_field(name="Losses", value=f'{user_data["Losses"]}')
embed.set_footer(
text=(
f"You have played in {player_total} ({percent}%) races out "
f"of {server_total} total races on the server."
)
)
await ctx.send(embed=embed)
@race.command()
async def bet(self, ctx, bet: int, user: discord.Member):
"""Bet on a user in the race."""
if await self.bet_conditions(ctx, bet, user):
self.bets[user] = {"Bets": [(ctx.author, bet)]}
currency = await bank.get_currency_name(ctx.guild)
await bank.withdraw_credits(ctx.author, bet)
await ctx.send(f"{ctx.author.mention} placed a {bet} {currency} bet on {str(user)}.")
@race.command()
async def enter(self, ctx):
"""Allows you to enter the race.
This command will return silently if a race has already started.
By not repeatedly telling the user that they can't enter the race, this
prevents spam.
"""
if self.started:
return await ctx.send(
"A race has already started. Please wait for the first one to finish before entering or starting a race."
)
elif not self.active:
return await ctx.send("A race must be started before you can enter.")
elif ctx.author in self.players:
return await ctx.send("You have already entered the race.")
elif len(self.players) >= 14:
return await ctx.send("The maximum number of players has been reached.")
else:
self.players.append(ctx.author)
await ctx.send(f"{ctx.author.mention} has joined the race.")
@race.command(hidden=True)
@checks.admin_or_permissions(administrator=True)
async def clear(self, ctx):
"""ONLY USE THIS COMMAND FOR DEBUG PURPOSES
You shouldn't use this command unless the race is stuck
or you are debugging."""
self.clear_local()
await ctx.send("Race cleared")
@race.command()
@checks.admin_or_permissions(administrator=True)
async def wipe(self, ctx):
"""This command will wipe ALL race data.
You are given a confirmation dialog when using this command.
If you decide to wipe your data, all stats and settings will be deleted.
"""
await ctx.send(
f"You are about to clear all race data including stats and settings. "
f"If you are sure you wish to proceed, type `{ctx.prefix}yes`."
)
choices = (f"{ctx.prefix}yes", f"{ctx.prefix}no")
check = lambda m: (m.author == ctx.author and m.channel == ctx.channel and m.content in choices)
try:
choice = await ctx.bot.wait_for("message", timeout=20.0, check=check)
except asyncio.TimeoutError:
return await ctx.send("No response. Race wipe cancelled.")
if choice.content.lower() == f"{ctx.prefix}yes":
await self.config.guild(ctx.guild).clear()
await self.config.clear_all_members(ctx.guild)
return await ctx.send("Race data has been wiped.")
else:
return await ctx.send("Race wipe cancelled.")
@race.command()
async def version(self, ctx):
"""Displays the version of race"""
await ctx.send(f"You are running race version {__version__}.")
@commands.group()
@checks.admin_or_permissions(administrator=True)
async def setrace(self, ctx):
"""Race settings commands"""
pass
@setrace.command()
async def wait(self, ctx, wait: int):
"""Changes the wait time before a race starts.
This only affects the period where race is still waiting
for more participants to join the race."""
if wait < 0:
return await ctx.send("Really? You're an idiot.")
await self.config.guild(ctx.guild).Wait.set(wait)
await ctx.send(f"Wait time before a race begins is now {wait} seconds.")
@setrace.group(name="bet")
async def _bet(self, ctx):
"""Bet settings for race"""
pass
@_bet.command(name="min")
async def _min(self, ctx, amount: int):
"""Sets the betting minimum."""
if amount < 0:
return await ctx.send("Come on now. Let's be reasonable.")
maximum = await self.config.guild(ctx.guild).Bet_Max()
if amount > maximum:
return await ctx.send(f"Minimum must be lower than the set max of {maximum}.")
await self.config.guild(ctx.guild).Bet_Min.set(amount)
await ctx.send(f"Minimum bet amount set to {amount}.")
@_bet.command(name="max")
async def _max(self, ctx, amount: int):
"""Sets the betting maximum."""
if amount < 0:
return await ctx.send("Come on now. Let's be reasonable.")
minimum = await self.config.guild(ctx.guild).Bet_Min()
if amount < minimum:
return await ctx.send(f"Maximum must be higher than the set min of {minimum}.")
await self.config.guild(ctx.guild).Bet_Max.set(amount)
await ctx.send(f"Maximum bet amount set to {amount}.")
@_bet.command()
async def multiplier(self, ctx, multiplier: float):
"""Sets the betting multiplier."""
if multiplier < 0:
return await ctx.send("So... you want them to lose money...when they win. I'm not doing that.")
if multiplier == 0:
return await ctx.send("That means they win nothing. Just turn off betting.")
await self.config.guild(ctx.guild).Bet_Multiplier.set(multiplier)
await ctx.send(f"Betting multiplier set to {multiplier}.")
@_bet.command()
async def toggle(self, ctx):
"""Toggles betting on and off."""
current = await self.config.guild(ctx.guild).Bet_Allowed()
await self.config.guild(ctx.guild).Bet_Allowed.set(not current)
await ctx.send(f"Betting is now {'OFF' if current else 'ON'}.")
@setrace.command()
async def mode(self, ctx, mode: str):
"""Changes the race mode
Race can either be in normal mode or zoo mode.
Normal Mode:
All racers are turtles.
Zoo Mode:
Racers are randomly selected from a list of animals with
different attributes.
"""
if mode.lower() not in ("zoo", "normal"):
return await ctx.send("Must select either `zoo` or `normal` as a mode.")
await self.config.guild(ctx.guild).Mode.set(mode.lower())
await ctx.send(f"Mode changed to {mode.lower()}")
@setrace.command()
async def prize(self, ctx, prize: int):
"""Sets the prize pool for winners.
Set the prize to 0 if you do not wish any credits to be distributed.
When prize pooling is enabled (see `setrace pool`) the prize will be
distributed as follows:
1st place 60%
2nd place 30%
3rd place 10%.
Example:
100 results in 60, 30, 10
130 results in 78, 39, 13
When prize pooling is disabled, only first place will win, and they take
100% of the winnings.
"""
if prize < 0:
return await ctx.send("... that's not how prizes work buddy.")
if prize == 0:
return await ctx.send("No prizes will be awarded to the winners.")
else:
currency = await bank.get_currency_name(ctx.guild)
await self.config.guild(ctx.guild).Prize.set(prize)
await ctx.send(f"Prize set for {prize} {currency}.")
@setrace.command(name="togglepool")
async def _tooglepool(self, ctx):
"""Toggles on/off prize pooling.
Makes it so that prizes are pooled between 1st, 2nd, and 3rd.
It's a 60/30/10 split rounded to the nearest whole number.
There must be at least four human players, otherwise, only first
place wins.
"""
pool = await self.config.guild(ctx.guild).Pooling()
await self.config.guild(ctx.guild).Pooling.set(not pool)
await ctx.send(f"Prize pooling is now {'OFF' if pool else 'ON'}.")
@setrace.command()
async def payoutmin(self, ctx, players: int):
"""Sets the number of players needed to payout prizes and bets.
This sets the required number of players needed to payout prizes.
If the number of racers aren't met, then nothing is paid out.
If you want race to always pay out, then set players to 0.
"""
if players < 0:
return await ctx.send("I don't have time for this shit.")
await self.config.guild(ctx.guild).Payout_Min.set(players)
if players == 0:
await ctx.send("Races will now always payout.")
else:
await ctx.send(f"Races will only payout if there are {players} human players.")
async def stats_update(self):
names = [player for player, emoji in self.winners]
for player in self.players:
if player in names:
position = names.index(player) + 1
current = await self.config.member(player).Wins.get_raw(str(position))
await self.config.member(player).Wins.set_raw(str(position), value=current + 1)
else:
current = await self.config.member(player).Losses()
await self.config.member(player).Losses.set(current + 1)
async def _race_teardown(self, settings):
await self.stats_update()
await self.distribute_prizes(settings)
await self.bet_payouts(settings)
self.clear_local()
def clear_local(self):
self.players.clear()
self.winners.clear()
self.bets.clear()
self.active = False
self.started = False
async def distribute_prizes(self, settings):
if settings["Prize"] == 0 or (settings["Payout_Min"] > len(self.players)):
return
if settings["Pooling"] and len(self.players) > 3:
first, second, third = self.winners
for player, percentage in zip((first[0], second[0], third[0]), (0.6, 0.3, 0.1)):
if player.bot:
continue
await bank.deposit_credits(player, int(settings["Prize"] * percentage))
else:
if self.winners[0][0].bot:
return
try:
await bank.deposit_credits(self.winners[0][0], settings["Prize"])
except BalanceTooHigh as e:
await bank.set_balance(self.winners[0][0], e.max_balance)
async def bet_payouts(self, settings):
if not self.bets or not settings["Bet_Allowed"]:
return
multiplier = settings["Bet_Multiplier"]
first = self.winners[0]
for user, wagers in self.bets.items():
for jockey, bet in wagers["Bets"]:
if jockey == first:
await bank.deposit_credits(user.id, (bet * multiplier))
async def bet_conditions(self, ctx, bet, user):
if not self.active:
await ctx.send("There isn't a race right now.")
return False
elif self.started:
await ctx.author.send("You can't place a bet after the race has started.")
return False
elif user not in self.players:
await ctx.send("You can't bet on someone who isn't in the race.")
return False
elif ctx.author in self.bets:
await ctx.send("You have already entered a bet for the race.")
return False
# Separated the logic such that calls to config only happen if the statements
# above pass.
data = await self.config.guild(ctx.guild).all()
allowed = data["Bet_Allowed"]
minimum = data["Bet_Min"]
maximum = data["Bet_Max"]
if not allowed:
await ctx.send("Betting has been turned off.")
return False
elif not await bank.can_spend(ctx.author, bet):
await ctx.send("You do not have enough money to cover the bet.")
elif minimum <= bet <= maximum:
return True
else:
await ctx.send(f"Bet must not be lower than {minimum} or higher than {maximum}.")
return False
def _build_end_screen(self, settings, currency, color):
if len(self.winners) == 3:
first, second, third = self.winners
else:
first, second, = self.winners
third = None
payout_msg = self._payout_msg(settings, currency)
footer = self._get_bet_winners(first[0])
race_config = (
f"Prize: {settings['Prize']} {currency}\n"
f"Prize Pooling: {'ON' if settings['Pooling'] else 'OFF'}\n"
f"Players needed for payout: {settings['Payout_Min']}\n"
f"Betting Allowed: {'YES' if settings['Bet_Allowed'] else 'NO'}"
)
embed = discord.Embed(colour=color, title="Race Results")
embed.add_field(name=f"{first[0].name} 🥇", value=first[1].emoji)
embed.add_field(name=f"{second[0].name} 🥈", value=second[1].emoji)
if third:
embed.add_field(name=f"{third[0].name} 🥉", value=third[1].emoji)
embed.add_field(name="-" * 90, value="\u200b", inline=False)
embed.add_field(name="Payouts", value=payout_msg)
embed.add_field(name="Settings", value=race_config)
embed.set_footer(text=f"Bet winners: {footer}")
mentions = "" if first[0].bot else f"{first[0].mention}"
mentions += "" if second[0].bot else f", {second[0].mention}" if not first[0].bot else f"{second[0].mention}"
mentions += "" if third is None or third[0].bot else f", {third[0].mention}"
return mentions, embed
def _payout_msg(self, settings, currency):
if settings["Prize"] == 0:
return "No prize money was distributed."
elif settings["Payout_Min"] > len(self.players):
return "Not enough racers to give prizes."
elif not settings["Pooling"] or len(self.players) < 4:
if self.winners[0][0].bot:
return f"{self.winners[0][0]} is the winner!"
return f"{self.winners[0][0]} received {settings['Prize']} {currency}."
if settings["Pooling"]:
msg = ""
first, second, third = self.winners
for player, percentage in zip((first[0], second[0], third[0]), (0.6, 0.3, 0.1)):
if player.bot:
continue
msg += f'{player.name} received {int(settings["Prize"] * percentage)} {currency}. '
return msg
def _get_bet_winners(self, winner):
bet_winners = []
for better in self.bets:
for jockey, _ in self.bets[better]["Bets"]:
if jockey == winner:
bet_winners.append(better.name)
return ", ".join(bet_winners) if bet_winners else ""
async def _game_setup(self, ctx):
mode = await self.config.guild(ctx.guild).Mode()
users = self.players
if mode == "zoo":
players = [(Animal(*random.choice(racers)), user) for user in users]
if len(players) == 1:
players.append((Animal(*random.choice(racers)), ctx.bot.user))
else:
players = [(Animal(":turtle:", "slow"), user) for user in users]
if len(players) == 1:
players.append((Animal(":turtle:", "slow"), ctx.bot.user))
return players
async def run_game(self, ctx):
players = await self._game_setup(ctx)
setup = "\u200b\n" + "\n".join(
f":carrot: **{animal.current}** 🏁[{jockey.name}]" for animal, jockey in players
)
track = await ctx.send(setup)
while not all(animal.position == 0 for animal, jockey in players):
await asyncio.sleep(2.0)
fields = []
for animal, jockey in players:
if animal.position == 0:
fields.append(f":carrot: **{animal.current}** 🏁 [{jockey.name}]")
continue
animal.move()
fields.append(f":carrot: **{animal.current}** 🏁 [{jockey.name}]")
if animal.position == 0 and len(self.winners) < 3:
self.winners.append((jockey, animal))
t = "\u200b\n" + "\n".join(fields)
await track.edit(content=t)
| [
"noreply@github.com"
] | TechStar123.noreply@github.com |
3181dad9678f15a10294fae0a6fb7334ab5f4cba | 14ca48dc9252a779c0b82a511d7687a763192b8f | /listoperators.py | 0adbde1a046f544be8f504a4591f49b89c6766a6 | [
"MIT"
] | permissive | Coder4OO/listoperators | 398663d1650781263a1c28126869f1d30b94ef0e | 8f660f4fbec65a9c547e1cae55fcdce575a3e178 | refs/heads/main | 2023-06-27T02:36:36.893182 | 2021-07-24T15:18:25 | 2021-07-24T15:18:25 | 389,129,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from functools import reduce
import operator
def list_multiply(l):
return reduce(operator.mul, l)
def list_divide(l):
return reduce(operator.div, l)
def list_subtract(l):
return reduce(operator.sub, l)
def list_sum(l):
return sum(l)
| [
"noreply@github.com"
] | Coder4OO.noreply@github.com |
e88de8a6f97600d2db0bca15862523e3391bb291 | 821e27e52b077edf9ba9f30ee770394d38b46412 | /cxr_covid_predictor/urls.py | 97f6b3d367ea1511afcb85006699d8a791d02322 | [] | no_license | dimdejesus/cxr_covid_predictor | 7e520d892a6f1babf0b7a505d9c7b83175f1fc78 | 2a38c9ce8b9b007fbe417c1228b0fba8b1bc2587 | refs/heads/main | 2023-08-15T17:53:46.473746 | 2021-10-04T12:36:16 | 2021-10-04T12:36:16 | 413,134,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | """cxr_covid_predictor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='home'),
path('predict', views.predict, name='predict')
]
# only in development
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"ddejesus0320@gmail.com"
] | ddejesus0320@gmail.com |
db6ba2fc2635e56052c35ca36a819d6348f32bd3 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/ctypes/macholib/dylib.py | 55b791f15df2416f3ae4ab32269899edf999a3d8 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,276 | py | # 2017.05.04 15:31:20 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/ctypes/macholib/dylib.py
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile('(?x)\n(?P<location>^.*)(?:^|/)\n(?P<name>\n (?P<shortname>\\w+?)\n (?:\\.(?P<version>[^._]+))?\n (?:_(?P<suffix>[^._]+))?\n \\.dylib$\n)\n')
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
else:
return is_dylib.groupdict()
def test_dylib_info():
def d(location = None, name = None, shortname = None, version = None, suffix = None):
return dict(location=location, name=name, shortname=shortname, version=version, suffix=suffix)
raise dylib_info('completely/invalid') is None or AssertionError
raise dylib_info('completely/invalide_debug') is None or AssertionError
raise dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo') or AssertionError
raise dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug') or AssertionError
raise dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A') or AssertionError
raise dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A') or AssertionError
raise dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug') or AssertionError
return
if __name__ == '__main__':
test_dylib_info()
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\ctypes\macholib\dylib.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:31:20 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
0eb91115050dd84862b4b5adc45e51414a098dc9 | 5faa3f139f30c0d290e327e04e3fd96d61e2aabb | /mininet-wifi/SIGCOMM-2016/hybridVirtualPhysical.py | a318efac432e79db502409c7800249359668848f | [] | no_license | hongyunnchen/reproducible-research | c6dfc3cd3c186b27ab4cf25949470b48d769325a | ed3a7a01b84ebc9bea96c5b02e0c97705cc2f7c6 | refs/heads/master | 2021-05-07T08:24:09.586976 | 2017-10-31T13:08:05 | 2017-10-31T13:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,853 | py | #!/usr/bin/python
"""Code created to be presented with the paper titled:
"Rich Experimentation through Hybrid Physical-Virtual Software-Defined Wireless Networking Emulation"
authors: Ramon dos Reis Fontes and Christian Esteve Rothenberg"""
"""Topology
(2)ap2(3)
/ \
(3) (2)
wlan1(2)phyap1 ap3(4)wlan0
(4) (3)
\ /
(3)ap4(2) """
from mininet.net import Mininet
from mininet.node import RemoteController, OVSKernelSwitch, UserAP, Controller
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.node import Node
from mininet.log import setLogLevel
import os
import time
def topology():
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, accessPoint=UserAP )
staList = []
internetIface = 'eth0'
usbDongleIface = 'wlan11'
print "*** Creating nodes"
for n in range(10):
staList.append(n)
staList[n] = net.addStation( 'sta%s' % (n+1), wlans=2, mac='00:00:00:00:00:%s' % (n+1), ip='192.168.0.%s/24' % (n+1) )
phyap1 = net.addPhysicalBaseStation( 'phyap1', protocols='OpenFlow13', ssid='Sigcomm-2016-Mininet-WiFi', mode= 'g', channel= '1', position='50,115,0', phywlan=usbDongleIface )
ap2 = net.addAccessPoint( 'ap2', protocols='OpenFlow13', ssid='ap-ssid2', mode= 'g', channel= '11', position='100,175,0' )
ap3 = net.addAccessPoint( 'ap3', protocols='OpenFlow13', ssid='ap-ssid3', mode= 'g', channel= '6', position='150,115,0' )
ap4 = net.addAccessPoint( 'ap4', protocols='OpenFlow13', ssid='ap-ssid4', mode= 'g', channel= '11', position='100,55,0' )
c5 = net.addController( 'c5', controller=RemoteController, port=6653 )
sta11 = net.addStation( 'sta11', ip='10.0.0.111/8', position='60,100,0')
h12 = net.addHost( 'h12', ip='10.0.0.109/8')
root = net.addHost( 'root', ip='10.0.0.254/8', inNamespace=False )
print "*** Configuring wifi nodes"
net.configureWifiNodes()
print "*** Creating links"
for sta in staList:
net.addMesh(sta, ssid='meshNet')
"""uncomment to plot graph"""
net.plotGraph(max_x=240, max_y=240)
"""Routing"""
net.meshRouting('custom')
"""Seed"""
net.seed(20)
print "*** Associating and Creating links"
net.addLink(phyap1, ap2)
net.addLink(ap2, ap3)
net.addLink(sta11, ap2)
net.addLink(ap3, ap4)
net.addLink(ap4, phyap1)
net.addLink(root, ap3)
net.addLink(phyap1, h12)
print "*** Starting network"
net.build()
c5.start()
phyap1.start( [c5] )
ap2.start( [c5] )
ap3.start( [c5] )
ap4.start( [c5] )
time.sleep(2)
"""output=all,flood"""
ap3.cmd('dpctl unix:/tmp/ap3 meter-mod cmd=add,flags=1,meter=1 drop:rate=100')
ap3.cmd('dpctl unix:/tmp/ap3 flow-mod table=0,cmd=add in_port=4,eth_type=0x800,ip_dst=10.0.0.100,meter:1 apply:output=flood')
phyap1.cmd('dpctl unix:/tmp/phyap1 flow-mod table=0,cmd=add in_port=2,ip_dst=10.0.0.109,eth_type=0x800,ip_proto=6,tcp_dst=80 apply:set_field=tcp_dst:80,set_field=ip_dst:10.0.0.111,output=5')
phyap1.cmd('dpctl unix:/tmp/phyap1 flow-mod table=0,cmd=add in_port=1,eth_type=0x800,ip_proto=6,tcp_src=80 apply:set_field=ip_src:10.0.0.109,output=2')
fixNetworkManager( root, 'root-eth0' )
startNAT(root, internetIface)
sta11.cmd('ip route add default via 10.0.0.254')
sta11.cmd('pushd /home/fontes; python3 -m http.server 80 &')
ip = 201
for sta in staList:
sta.setIP('10.0.0.%s/8' % ip, intf="%s-wlan1" % sta)
sta.cmd('ip route add default via 10.0.0.254')
ip+=1
"*** Available models: RandomWalk, TruncatedLevyWalk, RandomDirection, RandomWayPoint, GaussMarkov, ReferencePoint, TimeVariantCommunity ***"
net.startMobility(startTime=0, model='RandomWalk', max_x=200, max_y=200, min_v=0.1, max_v=0.2)
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
def startNAT( root, inetIntf, subnet='10.0/8', localIntf = None ):
"""Start NAT/forwarding between Mininet and external network
root: node to access iptables from
inetIntf: interface for internet access
subnet: Mininet subnet (default 10.0/8)"""
# Identify the interface connecting to the mininet network
if localIntf == None:
localIntf = root.defaultIntf()
# Flush any currently active rules
root.cmd( 'iptables -F' )
root.cmd( 'iptables -t nat -F' )
# Create default entries for unmatched traffic
root.cmd( 'iptables -P INPUT ACCEPT' )
root.cmd( 'iptables -P OUTPUT ACCEPT' )
root.cmd( 'iptables -P FORWARD DROP' )
# Configure NAT
root.cmd( 'iptables -I FORWARD -i', localIntf, '-d', subnet, '-j DROP' )
root.cmd( 'iptables -A FORWARD -i', localIntf, '-s', subnet, '-j ACCEPT' )
root.cmd( 'iptables -A FORWARD -i', inetIntf, '-d', subnet, '-j ACCEPT' )
root.cmd( 'iptables -t nat -A POSTROUTING -o ', inetIntf, '-j MASQUERADE' )
# Instruct the kernel to perform forwarding
root.cmd( 'sysctl net.ipv4.ip_forward=1' )
def fixNetworkManager( root, intf ):
"""Prevent network-manager from messing with our interface,
by specifying manual configuration in /etc/network/interfaces
root: a node in the root namespace (for running commands)
intf: interface name"""
cfile = '/etc/network/interfaces'
line = '\niface %s inet manual\n' % intf
config = open( cfile ).read()
if ( line ) not in config:
print '*** Adding', line.strip(), 'to', cfile
with open( cfile, 'a' ) as f:
f.write( line )
# Probably need to restart network-manager to be safe -
# hopefully this won't disconnect you
root.cmd( 'service network-manager restart' )
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
| [
"ramonreisfontes@gmail.com"
] | ramonreisfontes@gmail.com |
3af9d4502ed80b048f1d39d29d825f2cbe48b44d | 374f5e3e2f65b2e9bf341beb76cafd37eff1fb3b | /Lib/site-packages/rails/response.py | 900ca6b69de38b9d481044cf07d19cf7e2a5bd05 | [
"BSD-3-Clause"
] | permissive | FredyJoelSis/herokuapi | 67300b8bae37eae023dfcb1b3fe2b3aa9b817da7 | e203e1c7ba70ff384d43db27f6fa8b1d56c73b76 | refs/heads/main | 2023-07-05T13:20:32.341841 | 2021-08-12T04:03:53 | 2021-08-12T04:03:53 | 395,184,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from webob import Response as WebobResponse
class Response(WebobResponse):
"""
Extended response object.
"""
pass
| [
"85387339+FredyJoelSis@users.noreply.github.com"
] | 85387339+FredyJoelSis@users.noreply.github.com |
d39dd211bfebe08a6e11256b0b84abca0233e94a | ec6b8a36bc3ae0ab9dff0117bffa2a4adff42681 | /venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/compat/v2/__init__.py | 7ae5e3e43d5d11c6f8bb55bb5de9d81ff99a29c6 | [] | no_license | niloofarhp/vehicle-detection-Recognition | ce523fbc9863c50d20491d5a54c18ce5a96aa6ef | d235269d87cf1c00d60d1882a1a9aafc982919c6 | refs/heads/main | 2023-07-07T06:50:39.364213 | 2021-08-01T17:47:12 | 2021-08-01T17:47:12 | 390,743,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,592 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import logging as _logging
import os as _os
import six as _six
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# pylint: disable=g-bad-import-order
from . import compat
from tensorflow._api.v2.compat.v2 import __internal__
from tensorflow._api.v2.compat.v2 import __operators__
from tensorflow._api.v2.compat.v2 import audio
from tensorflow._api.v2.compat.v2 import autodiff
from tensorflow._api.v2.compat.v2 import autograph
from tensorflow._api.v2.compat.v2 import bitwise
from tensorflow._api.v2.compat.v2 import config
from tensorflow._api.v2.compat.v2 import data
from tensorflow._api.v2.compat.v2 import debugging
from tensorflow._api.v2.compat.v2 import distribute
from tensorflow._api.v2.compat.v2 import dtypes
from tensorflow._api.v2.compat.v2 import errors
from tensorflow._api.v2.compat.v2 import experimental
from tensorflow._api.v2.compat.v2 import feature_column
from tensorflow._api.v2.compat.v2 import graph_util
from tensorflow._api.v2.compat.v2 import image
from tensorflow._api.v2.compat.v2 import io
from tensorflow._api.v2.compat.v2 import linalg
from tensorflow._api.v2.compat.v2 import lite
from tensorflow._api.v2.compat.v2 import lookup
from tensorflow._api.v2.compat.v2 import math
from tensorflow._api.v2.compat.v2 import mixed_precision
from tensorflow._api.v2.compat.v2 import mlir
from tensorflow._api.v2.compat.v2 import nest
from tensorflow._api.v2.compat.v2 import nn
from tensorflow._api.v2.compat.v2 import profiler
from tensorflow._api.v2.compat.v2 import quantization
from tensorflow._api.v2.compat.v2 import queue
from tensorflow._api.v2.compat.v2 import ragged
from tensorflow._api.v2.compat.v2 import random
from tensorflow._api.v2.compat.v2 import raw_ops
from tensorflow._api.v2.compat.v2 import saved_model
from tensorflow._api.v2.compat.v2 import sets
from tensorflow._api.v2.compat.v2 import signal
from tensorflow._api.v2.compat.v2 import sparse
from tensorflow._api.v2.compat.v2 import strings
from tensorflow._api.v2.compat.v2 import summary
from tensorflow._api.v2.compat.v2 import sysconfig
from tensorflow._api.v2.compat.v2 import test
from tensorflow._api.v2.compat.v2 import tpu
from tensorflow._api.v2.compat.v2 import train
from tensorflow._api.v2.compat.v2 import types
from tensorflow._api.v2.compat.v2 import version
from tensorflow._api.v2.compat.v2 import xla
from tensorflow.python.data.ops.optional_ops import OptionalSpec
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.framework.device_spec import DeviceSpecV2 as DeviceSpec
from tensorflow.python.framework.dtypes import DType
from tensorflow.python.framework.dtypes import as_dtype
from tensorflow.python.framework.dtypes import bfloat16
from tensorflow.python.framework.dtypes import bool
from tensorflow.python.framework.dtypes import complex128
from tensorflow.python.framework.dtypes import complex64
from tensorflow.python.framework.dtypes import double
from tensorflow.python.framework.dtypes import float16
from tensorflow.python.framework.dtypes import float32
from tensorflow.python.framework.dtypes import float64
from tensorflow.python.framework.dtypes import half
from tensorflow.python.framework.dtypes import int16
from tensorflow.python.framework.dtypes import int32
from tensorflow.python.framework.dtypes import int64
from tensorflow.python.framework.dtypes import int8
from tensorflow.python.framework.dtypes import qint16
from tensorflow.python.framework.dtypes import qint32
from tensorflow.python.framework.dtypes import qint8
from tensorflow.python.framework.dtypes import quint16
from tensorflow.python.framework.dtypes import quint8
from tensorflow.python.framework.dtypes import resource
from tensorflow.python.framework.dtypes import string
from tensorflow.python.framework.dtypes import uint16
from tensorflow.python.framework.dtypes import uint32
from tensorflow.python.framework.dtypes import uint64
from tensorflow.python.framework.dtypes import uint8
from tensorflow.python.framework.dtypes import variant
from tensorflow.python.framework.importer import import_graph_def
from tensorflow.python.framework.indexed_slices import IndexedSlices
from tensorflow.python.framework.indexed_slices import IndexedSlicesSpec
from tensorflow.python.framework.load_library import load_library
from tensorflow.python.framework.load_library import load_op_library
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import convert_to_tensor_v2_with_dispatch as convert_to_tensor
from tensorflow.python.framework.ops import device_v2 as device
from tensorflow.python.framework.ops import init_scope
from tensorflow.python.framework.ops import inside_function
from tensorflow.python.framework.ops import name_scope_v2 as name_scope
from tensorflow.python.framework.ops import no_gradient
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.framework.sparse_tensor import SparseTensorSpec
from tensorflow.python.framework.tensor_conversion_registry import register_tensor_conversion_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
from tensorflow.python.framework.tensor_util import constant_value as get_static_value
from tensorflow.python.framework.tensor_util import is_tf_type as is_tensor
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.type_spec import TypeSpec
from tensorflow.python.framework.type_spec import type_spec_from_value
from tensorflow.python.framework.versions import COMPILER_VERSION as __compiler_version__
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as __cxx11_abi_flag__
from tensorflow.python.framework.versions import GIT_VERSION as __git_version__
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as __monolithic_build__
from tensorflow.python.framework.versions import VERSION as __version__
from tensorflow.python.module.module import Module
from tensorflow.python.ops.array_ops import batch_to_space_v2 as batch_to_space
from tensorflow.python.ops.array_ops import boolean_mask_v2 as boolean_mask
from tensorflow.python.ops.array_ops import broadcast_dynamic_shape
from tensorflow.python.ops.array_ops import broadcast_static_shape
from tensorflow.python.ops.array_ops import concat
from tensorflow.python.ops.array_ops import edit_distance
from tensorflow.python.ops.array_ops import expand_dims_v2 as expand_dims
from tensorflow.python.ops.array_ops import fill
from tensorflow.python.ops.array_ops import fingerprint
from tensorflow.python.ops.array_ops import gather_nd_v2 as gather_nd
from tensorflow.python.ops.array_ops import gather_v2 as gather
from tensorflow.python.ops.array_ops import identity
from tensorflow.python.ops.array_ops import meshgrid
from tensorflow.python.ops.array_ops import newaxis
from tensorflow.python.ops.array_ops import one_hot
from tensorflow.python.ops.array_ops import ones
from tensorflow.python.ops.array_ops import ones_like_v2 as ones_like
from tensorflow.python.ops.array_ops import pad_v2 as pad
from tensorflow.python.ops.array_ops import parallel_stack
from tensorflow.python.ops.array_ops import rank
from tensorflow.python.ops.array_ops import repeat
from tensorflow.python.ops.array_ops import required_space_to_batch_paddings
from tensorflow.python.ops.array_ops import reshape
from tensorflow.python.ops.array_ops import reverse_sequence_v2 as reverse_sequence
from tensorflow.python.ops.array_ops import searchsorted
from tensorflow.python.ops.array_ops import sequence_mask
from tensorflow.python.ops.array_ops import shape_n
from tensorflow.python.ops.array_ops import shape_v2 as shape
from tensorflow.python.ops.array_ops import size_v2 as size
from tensorflow.python.ops.array_ops import slice
from tensorflow.python.ops.array_ops import space_to_batch_v2 as space_to_batch
from tensorflow.python.ops.array_ops import split
from tensorflow.python.ops.array_ops import squeeze_v2 as squeeze
from tensorflow.python.ops.array_ops import stack
from tensorflow.python.ops.array_ops import strided_slice
from tensorflow.python.ops.array_ops import tensor_scatter_nd_update
from tensorflow.python.ops.array_ops import transpose_v2 as transpose
from tensorflow.python.ops.array_ops import unique
from tensorflow.python.ops.array_ops import unique_with_counts
from tensorflow.python.ops.array_ops import unstack
from tensorflow.python.ops.array_ops import where_v2 as where
from tensorflow.python.ops.array_ops import zeros
from tensorflow.python.ops.array_ops import zeros_like_v2 as zeros_like
from tensorflow.python.ops.batch_ops import batch_function as nondifferentiable_batch_function
from tensorflow.python.ops.check_ops import assert_equal_v2 as assert_equal
from tensorflow.python.ops.check_ops import assert_greater_v2 as assert_greater
from tensorflow.python.ops.check_ops import assert_less_v2 as assert_less
from tensorflow.python.ops.check_ops import assert_rank_v2 as assert_rank
from tensorflow.python.ops.check_ops import ensure_shape
from tensorflow.python.ops.clip_ops import clip_by_global_norm
from tensorflow.python.ops.clip_ops import clip_by_norm
from tensorflow.python.ops.clip_ops import clip_by_value
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case_v2 as case
from tensorflow.python.ops.control_flow_ops import cond_for_tf_v2 as cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import switch_case
from tensorflow.python.ops.control_flow_ops import tuple_v2 as tuple
from tensorflow.python.ops.control_flow_ops import while_loop_v2 as while_loop
from tensorflow.python.ops.critical_section_ops import CriticalSection
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.custom_gradient import grad_pass_through
from tensorflow.python.ops.custom_gradient import recompute_grad
from tensorflow.python.ops.functional_ops import foldl_v2 as foldl
from tensorflow.python.ops.functional_ops import foldr_v2 as foldr
from tensorflow.python.ops.functional_ops import scan_v2 as scan
from tensorflow.python.ops.gen_array_ops import bitcast
from tensorflow.python.ops.gen_array_ops import broadcast_to
from tensorflow.python.ops.gen_array_ops import extract_volume_patches
from tensorflow.python.ops.gen_array_ops import guarantee_const
from tensorflow.python.ops.gen_array_ops import identity_n
from tensorflow.python.ops.gen_array_ops import quantize_and_dequantize_v4
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse
from tensorflow.python.ops.gen_array_ops import scatter_nd
from tensorflow.python.ops.gen_array_ops import space_to_batch_nd
from tensorflow.python.ops.gen_array_ops import stop_gradient
from tensorflow.python.ops.gen_array_ops import tensor_scatter_add as tensor_scatter_nd_add
from tensorflow.python.ops.gen_array_ops import tensor_scatter_max as tensor_scatter_nd_max
from tensorflow.python.ops.gen_array_ops import tensor_scatter_min as tensor_scatter_nd_min
from tensorflow.python.ops.gen_array_ops import tensor_scatter_sub as tensor_scatter_nd_sub
from tensorflow.python.ops.gen_array_ops import tile
from tensorflow.python.ops.gen_array_ops import unravel_index
from tensorflow.python.ops.gen_control_flow_ops import no_op
from tensorflow.python.ops.gen_data_flow_ops import dynamic_partition
from tensorflow.python.ops.gen_data_flow_ops import dynamic_stitch
from tensorflow.python.ops.gen_linalg_ops import matrix_square_root
from tensorflow.python.ops.gen_logging_ops import timestamp
from tensorflow.python.ops.gen_math_ops import acosh
from tensorflow.python.ops.gen_math_ops import add
from tensorflow.python.ops.gen_math_ops import asin
from tensorflow.python.ops.gen_math_ops import asinh
from tensorflow.python.ops.gen_math_ops import atan
from tensorflow.python.ops.gen_math_ops import atan2
from tensorflow.python.ops.gen_math_ops import atanh
from tensorflow.python.ops.gen_math_ops import cos
from tensorflow.python.ops.gen_math_ops import cosh
from tensorflow.python.ops.gen_math_ops import greater
from tensorflow.python.ops.gen_math_ops import greater_equal
from tensorflow.python.ops.gen_math_ops import less
from tensorflow.python.ops.gen_math_ops import less_equal
from tensorflow.python.ops.gen_math_ops import logical_and
from tensorflow.python.ops.gen_math_ops import logical_not
from tensorflow.python.ops.gen_math_ops import logical_or
from tensorflow.python.ops.gen_math_ops import maximum
from tensorflow.python.ops.gen_math_ops import minimum
from tensorflow.python.ops.gen_math_ops import neg as negative
from tensorflow.python.ops.gen_math_ops import real_div as realdiv
from tensorflow.python.ops.gen_math_ops import sin
from tensorflow.python.ops.gen_math_ops import sinh
from tensorflow.python.ops.gen_math_ops import square
from tensorflow.python.ops.gen_math_ops import tan
from tensorflow.python.ops.gen_math_ops import tanh
from tensorflow.python.ops.gen_math_ops import truncate_div as truncatediv
from tensorflow.python.ops.gen_math_ops import truncate_mod as truncatemod
from tensorflow.python.ops.gen_string_ops import as_string
from tensorflow.python.ops.gradients_impl import HessiansV2 as hessians
from tensorflow.python.ops.gradients_impl import gradients_v2 as gradients
from tensorflow.python.ops.gradients_util import AggregationMethod
from tensorflow.python.ops.histogram_ops import histogram_fixed_width
from tensorflow.python.ops.histogram_ops import histogram_fixed_width_bins
from tensorflow.python.ops.init_ops_v2 import Constant as constant_initializer
from tensorflow.python.ops.init_ops_v2 import Ones as ones_initializer
from tensorflow.python.ops.init_ops_v2 import RandomNormal as random_normal_initializer
from tensorflow.python.ops.init_ops_v2 import RandomUniform as random_uniform_initializer
from tensorflow.python.ops.init_ops_v2 import Zeros as zeros_initializer
from tensorflow.python.ops.linalg_ops import eig
from tensorflow.python.ops.linalg_ops import eigvals
from tensorflow.python.ops.linalg_ops import eye
from tensorflow.python.ops.linalg_ops import norm_v2 as norm
from tensorflow.python.ops.logging_ops import print_v2 as print
from tensorflow.python.ops.manip_ops import roll
from tensorflow.python.ops.map_fn import map_fn_v2 as map_fn
from tensorflow.python.ops.math_ops import abs
from tensorflow.python.ops.math_ops import acos
from tensorflow.python.ops.math_ops import add_n
from tensorflow.python.ops.math_ops import argmax_v2 as argmax
from tensorflow.python.ops.math_ops import argmin_v2 as argmin
from tensorflow.python.ops.math_ops import cast
from tensorflow.python.ops.math_ops import complex
from tensorflow.python.ops.math_ops import cumsum
from tensorflow.python.ops.math_ops import divide
from tensorflow.python.ops.math_ops import equal
from tensorflow.python.ops.math_ops import exp
from tensorflow.python.ops.math_ops import floor
from tensorflow.python.ops.math_ops import linspace_nd as linspace
from tensorflow.python.ops.math_ops import matmul
from tensorflow.python.ops.math_ops import multiply
from tensorflow.python.ops.math_ops import not_equal
from tensorflow.python.ops.math_ops import pow
from tensorflow.python.ops.math_ops import range
from tensorflow.python.ops.math_ops import reduce_all
from tensorflow.python.ops.math_ops import reduce_any
from tensorflow.python.ops.math_ops import reduce_logsumexp
from tensorflow.python.ops.math_ops import reduce_max
from tensorflow.python.ops.math_ops import reduce_mean
from tensorflow.python.ops.math_ops import reduce_min
from tensorflow.python.ops.math_ops import reduce_prod
from tensorflow.python.ops.math_ops import reduce_sum
from tensorflow.python.ops.math_ops import round
from tensorflow.python.ops.math_ops import saturate_cast
from tensorflow.python.ops.math_ops import scalar_mul_v2 as scalar_mul
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import sign
from tensorflow.python.ops.math_ops import sqrt
from tensorflow.python.ops.math_ops import subtract
from tensorflow.python.ops.math_ops import tensordot
from tensorflow.python.ops.math_ops import truediv
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensorSpec
from tensorflow.python.ops.script_ops import eager_py_func as py_function
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.ops.sort_ops import argsort
from tensorflow.python.ops.sort_ops import sort
from tensorflow.python.ops.special_math_ops import einsum
from tensorflow.python.ops.tensor_array_ops import TensorArray
from tensorflow.python.ops.tensor_array_ops import TensorArraySpec
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.ops.variable_scope import variable_creator_scope
from tensorflow.python.ops.variables import Variable
from tensorflow.python.ops.variables import VariableAggregationV2 as VariableAggregation
from tensorflow.python.ops.variables import VariableSynchronization
from tensorflow.python.platform.tf_logging import get_logger
# WRAPPER_PLACEHOLDER
# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.compat.v2.summary API due to missing TensorBoard "
"installation.")
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v2.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
if _os.environ.get("_PREFER_OSS_KERAS", False):
_keras_module = "keras.api._v2.keras"
keras = _LazyLoader("keras", globals(), _keras_module)
_module_dir = _module_util.get_parent_dir_for_name(_keras_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "keras", keras)
else:
try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if not _six.PY2:
import typing as _typing
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v2 import estimator
# pylint: enable=g-import-not-at-top
# We would like the following to work for fully enabling 2.0 in a 1.0 install:
#
# import tensorflow.compat.v2 as tf
# tf.enable_v2_behavior()
#
# This make this one symbol available directly.
from tensorflow.python.compat.v2_compat import enable_v2_behavior # pylint: disable=g-import-not-at-top
setattr(_current_module, "enable_v2_behavior", enable_v2_behavior)
# Add module aliases
if hasattr(_current_module, 'keras'):
# It is possible that keras is a lazily loaded module, which might break when
# actually trying to import it. Have a Try-Catch to make sure it doesn't break
# when it doing some very initial loading, like tf.compat.v2, etc.
if _os.environ.get("_PREFER_OSS_KERAS", False):
try:
_keras_package = "keras.api._v2.keras."
losses = _LazyLoader("losses", globals(), _keras_package + "losses")
metrics = _LazyLoader("metrics", globals(), _keras_package + "metrics")
optimizers = _LazyLoader(
"optimizers", globals(), _keras_package + "optimizers")
initializers = _LazyLoader(
"initializers", globals(), _keras_package + "initializers")
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
except ImportError:
pass
else:
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
| [
"niloofar.hp@gmail.com"
] | niloofar.hp@gmail.com |
328378e9ec6da68b38cdd54ef721e443cd715fb1 | 801f29f66832f128ca0790c7b1b71739980d821b | /tensorflow-score/tf_load.py | 6694135eaca2dade06e98b97d9fd6180c67fb567 | [] | no_license | Tembryo/dota-analysis | b70bf06e2417110405ea03705a9f701ee452dc6f | ddae01ce8b1d438fb73a118e983012a43c76e9e2 | refs/heads/master | 2021-01-18T18:37:44.735093 | 2016-07-29T14:02:28 | 2016-07-29T14:02:28 | 44,391,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,628 | py | import csv
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler
import collections
import random
def importCSV(file):
reader = csv.DictReader(file)
result = []
for row in reader:
ok = True
for key in row:
if row[key] is None:
ok = False
break
if not ok:
continue
result.append(row)
return result
Dataset = collections.namedtuple('Dataset', ['labels','steamids', 'features', 'features_hero', 'feature_encoder', 'hero_encoder', 'feature_scaler', 'rowset'])
#fraction_test = 0.1
def generateData(rows, settings = None):
labels = [] #either MMRs for training or steamids for rating
heroes = []
own_team_heroes = [ [] for i in range(5) ]
enemy_team_heroes = [ [] for i in range(5) ]
parsed_features = []
for row in rows:
#print row
label = None
hero_dict = None
own_team = []
enemy_team = []
parsed_row = {}
for key in row:
if key == "MMR":
label = {"mmr": float(row[key])}
elif key == "steamid":
label = row[key]
elif key == "hero":
hero_dict = {"hero": row[key]}
elif key == "own_team":
own_team = row[key].split("#")
elif key == "enemy_team":
enemy_team = row[key].split("#")
else:
try:
parsed_row[key] = float(row[key])
except ValueError:
parsed_row[key] = row[key]
heroes.append(hero_dict)
for i in range(5):
if i < len(own_team):
own_team_heroes[i].append( {"hero": own_team[i]} )
else:
own_team_heroes[i].append( {} )
if i < len(enemy_team):
enemy_team_heroes[i].append( {"hero": enemy_team[i]} )
else:
enemy_team_heroes[i].append( {} )
labels.append(label)
parsed_features.append(parsed_row)
if settings is None:
hero_encoder = DictVectorizer(dtype=np.float32)
heroes_encoded = hero_encoder.fit_transform(heroes).todense()
else:
hero_encoder = settings["hero-encoder"]
heroes_encoded = hero_encoder.transform(heroes).todense()
own_team_encoded = np.zeros(heroes_encoded.shape, dtype=np.float32)
enemy_team_encoded = np.zeros(heroes_encoded.shape, dtype=np.float32)
for i in range(5):
own_team_encoded += hero_encoder.transform(own_team_heroes[i]).todense()
enemy_team_encoded += hero_encoder.transform(enemy_team_heroes[i]).todense()
collected_hero_features = np.concatenate([heroes_encoded, own_team_encoded, enemy_team_encoded], axis=1)
if settings is None:
feature_encoder = DictVectorizer(dtype=np.float32)
features_encoded = feature_encoder.fit_transform(parsed_features).todense()
scaler = StandardScaler().fit(features_encoded)
features_scaled = scaler.transform(features_encoded)
else:
feature_encoder = settings["feature-encoder"]
features_encoded = feature_encoder.transform(parsed_features).todense()
scaler = settings["feature-scaler"]
features_scaled = scaler.transform(features_encoded)
if settings is None or settings["load_labels"]:
labels_encoder = DictVectorizer(dtype=np.float32)
labels_encoded = labels_encoder.fit_transform(labels).todense()
steamids=[]
else:
#scoring, keep labels
labels_encoded = np.zeros((len(labels),1),dtype=np.float32)
steamids = labels
row_set = [i for i in range(len(labels))]
result = Dataset(
labels=labels_encoded,
steamids=steamids,
features=features_scaled,
features_hero=heroes_encoded,
feature_encoder=feature_encoder,
hero_encoder=hero_encoder,
feature_scaler = scaler,
rowset=row_set)
return result
batch_size = 1000
def get_batch(data, batched = True):
if batched:
indices = random.sample(data.rowset, batch_size)
batch = {
"features": data.features[indices],
"features_hero": data.features_hero[indices],
"labels": data.labels[indices]
}
else:
batch = {
"features": data.features,
"features_hero": data.features_hero,
"labels": data.labels
}
return batch | [
"fischerq.de@gmail.com"
] | fischerq.de@gmail.com |
d1ac0d2d109d40e4f9ba60558808dbce428e8e9c | 55d3f77fa214901b59a1dfd7e66c52369da1d948 | /hubi/models/wiz_create_productprice.py | 7d125efd73b71b5602ae7b67a2f67501d59ca782 | [] | no_license | arsenepoutsi/Projet_HUBI | 22f5fa87579dc801aa8f46f6ce420d67f43398f2 | a614cfae2535dba86442659a9b6baf49215ef2d4 | refs/heads/master | 2020-11-25T08:08:05.122700 | 2019-12-17T08:32:13 | 2019-12-17T08:32:13 | 228,568,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,411 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from datetime import date, timedelta, datetime
from odoo.exceptions import ValidationError
class Wizard_productprice(models.TransientModel):
_name = "wiz.productprice"
_description = "Wizard creation of price"
category_id = fields.Many2one('product.category', 'Internal Category', required=True)
date_start = fields.Date('Start Date', help="Starting date for the pricelist item validation")
date_end = fields.Date('End Date', help="Ending valid for the pricelist item validation")
pricelist_ids = fields.Many2many("product.pricelist")
message = fields.Text(string="Information")
@api.multi
def create_price_item_old(self):
#pricelist = self.env['product.pricelist'].browse(context['active_ids'])[0]
pricelist_ids = self.env.context.get('active_ids', [])
for p in self.env['product.pricelist'].sudo().browse(pricelist_ids):
#for p in self.env.context["active_ids"]:
product_categ=self.env['product.template'].search([('categ_id','=', self.category_id.id)])
for categ in product_categ:
price_vals = {
'pricelist_id':p.id,
'product_tmpl_id': categ.id,
'applied_on':'1_product',
'min_quantity':'1',
'compute_price':'fixed',
'sequence':'5',
'base':'list_price',
'fixed_price':'0',
'price_discount':'0',
'price_max_margin':'0',
'percent_price':'0',
'price_surchage':'0',
'price_round':'0',
'price_min_margin':'0',
}
if self.env.context.get('tx_currency_id'):
price_vals['currency_id'] = self.env.context.get('tx_currency_id')
price = self.env['product.pricelist.item'].create(price_vals)
#price.post()
view_id = self.env["ir.model.data"].get_object_reference("hubi", "wiz_create_productprice_step2")
self.message = ("%s %s %s %s %s ") % ("Create Price OK"," / p.id= ",p.id, "/ self.category_id.id = ",self.category_id.id)
return {"type":"ir.actions.act_window",
"view_mode":"form",
"view_type":"form",
"views":[(view_id[1], "form")],
"res_id":self.id,
"target":"new",
"res_model":"wiz.productprice"
}
@api.multi
def create_price_item(self):
#'fixed_price':_('0'),
pricelist_ids = self.env.context.get('active_ids', [])
for p in self.env['product.pricelist'].sudo().browse(pricelist_ids):
product_count = 0
product_code = p.id
categ_code = self.category_id.id
company_code = p.company_id.id
query_args = {'product_code': product_code,'categ_code' : categ_code, 'company_id' : company_code}
query = """SELECT Product_template.id, Product_template.list_price, barcode,
case Product_template.weight when 0 then Product_template.list_price
else round(Product_template.list_price/Product_template.weight,3) end as price_w
FROM Product_template
inner join product_product on product_product.product_tmpl_id = Product_template.id
WHERE categ_id = %(categ_code)s
AND Product_template.type <> 'service'
AND Product_template.company_id = %(company_id)s
AND Product_template.id not in
(SELECT product_tmpl_id FROM product_pricelist_item
WHERE pricelist_id= %(product_code)s AND product_tmpl_id is not null)
AND product_product.id not in
(SELECT product_id FROM mrp_bom_line WHERE product_id is not null)
ORDER BY Product_template.id"""
self.env.cr.execute(query, query_args)
ids = [(r[0], r[1], r[2], r[3]) for r in self.env.cr.fetchall()]
for categ, price_total, barcode, price_weight in ids:
price_vals = {
'pricelist_id':p.id,
'product_tmpl_id': categ,
'applied_on':'1_product',
'min_quantity':'1',
'compute_price':'fixed',
'sequence':'5',
'base':'list_price',
'fixed_price': price_total,
'price_discount':'0',
'price_max_margin':'0',
'percent_price':'0',
'price_surchage':'0',
'price_round':'0',
'price_min_margin':'0',
'price_EAN13': barcode,
'date_start': self.date_start,
'date_end': self.date_end,
'price_weight': price_weight,
}
if self.env.context.get('tx_currency_id'):
price_vals['currency_id'] = self.env.context.get('tx_currency_id')
price = self.env['product.pricelist.item'].create(price_vals)
product_count = product_count + 1
view_id = self.env["ir.model.data"].get_object_reference("hubi", "wiz_create_productprice_step2")
self.message = ("%s %s %s %s %s %s %s") % ("Create Price OK for category = (",self.category_id.id, ") ", self.category_id.complete_name, " for ", product_count, " lines")
return {"type":"ir.actions.act_window",
"view_mode":"form",
"view_type":"form",
"views":[(view_id[1], "form")],
"res_id":self.id,
"target":"new",
"res_model":"wiz.productprice"
}
@api.model
def default_get(self, fields):
res = super(Wizard_productprice, self).default_get(fields)
res["pricelist_ids"] = self.env.context["active_ids"]
if not self.env.context["active_ids"]:
raise ValidationError("No select record")
return res
| [
"arsenepoutsi@gmail.com"
] | arsenepoutsi@gmail.com |
e1a0cc35139d2225a59c6a38f7203f7ca8deabee | e76683547c26a6f902e3c68ebfc207a1d9abe79f | /03_RequestsHandlingInDjango/board/board/middleware/entry_sleep_middleware.py | 94557765cc10f5aaaf8ae48a4ec622e23ac01426 | [] | no_license | antoshkoo/python_django | 03b19f3307b9aef1cc4c2bf2858b550334442ebe | ecb8b1c3edfe3e1af6a531cb237bb5d9f4e50601 | refs/heads/master | 2023-03-19T18:52:54.257903 | 2021-03-05T16:09:56 | 2021-03-05T16:09:56 | 339,375,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import time
class EntrySleepMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# time.sleep(1)
responce = self.get_response(request)
return responce
| [
"antoshkoo@gmail.com"
] | antoshkoo@gmail.com |
85acd8c33a4d86b37f68e76988f2f8be9b1fe762 | 9c3f071b8fe19850324444f9c06f7088de4778a6 | /Solar_Position_Determination/SolarPositionDetermination.py | 5e3da4ccb3dafc024b39a565c3f46a5738555e9d | [] | no_license | rayarka/EG3301R_Data_and_Programs | dfb20b7443ffb375b05b515a12ba7b9fe50b7efd | 13cd8bbed8c9addba313072d23b0fac0257f1da2 | refs/heads/main | 2023-02-16T18:20:14.180487 | 2021-01-19T03:34:33 | 2021-01-19T03:34:33 | 313,043,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | from pvlib import solarposition
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# CHANGE THE FOLLOWING PARAMETERS TO GET DIFFERENT RESULTS
# Timezone - Singapore's timezone taken from online tz database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)
timezone = 'Asia/Singapore'
# Latitude and Longitude
lat, lon = 1.4, 103.8
# Datetime Range
startdate = '2020-01-01 00:00:00'
enddate = '2021-01-01'
# Finding solar position for year 2020, in an hourly frequency. Date range can be changed if need be.
times = pd.date_range(startdate, enddate, closed='left', freq='H', tz=timezone)
# Use inbuilt solarposition function to derive a dataframe with information about solar position, such as zenith, elevation, azimuth, and equation of time.
solpos = solarposition.get_solarposition(times, lat, lon)
# Remove nighttime timings as they don't affect us. To do this, we remove values from solar position dataframe where the sun's elevation is below 0 i.e. Sun is below the horizon
solpos = solpos.loc[solpos['apparent_elevation'] > 0, :]
# The following lines are written so that we only calculate and retain the solar positions of the sun between 12 pm to 2 pm i.e. the time period around solar noon.
# Removing/commenting out the following 2 lines will show the whole range of solar positions while the sun is above the horizon.
solpos = solpos.loc[solpos.index.hour > 11, :]
solpos = solpos.loc[solpos.index.hour < 15, :]
# Given that we're trying to plot the position of the sun in the sky, a polar projection is the most appropriate graphical representation.
ax = plt.subplot(1, 1, 1, projection='polar')
# Plot the analemma loops by using a scatter diagram for each point of the year, and a colourbar to represent the distance of the plotted date from the start of the year
points = ax.scatter(np.radians(solpos.azimuth), solpos.apparent_zenith, s=2, label=None, c=solpos.index.dayofyear)
ax.figure.colorbar(points)
# For each hour in the solar position dataframe, get the azimuth and zenith positions. Then prepare to plot them for each hour's analemma loop.
for hour in np.unique(solpos.index.hour):
subset = solpos.loc[solpos.index.hour == hour, :]
r = subset.apparent_zenith
pos = solpos.loc[r.idxmin(), :]
ax.text(np.radians(pos['azimuth']), pos['apparent_zenith'], str(hour))
# Now that the hours are saved, highlight the key dates you want to show. For our case, because we're presenting this (originally) near Septermber 21, I've included that date.
for date in pd.to_datetime(['2020-06-21', '2020-09-21', '2020-12-21']):
times = pd.date_range(date, date+pd.Timedelta('24h'), freq='5min', tz=timezone)
solpos = solarposition.get_solarposition(times, lat, lon)
solpos = solpos.loc[solpos['apparent_elevation'] > 0, :]
label = date.strftime('%Y-%m-%d')
ax.plot(np.radians(solpos.azimuth), solpos.apparent_zenith, label=label)
# Add the legend of the dates
ax.figure.legend(loc='upper left')
# Orientate the polar diagram such that North is on top.
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rmax(90)
plt.show() | [
"noreply@github.com"
] | rayarka.noreply@github.com |
400e0cbc8ddf1af81c4576306a061e3557142533 | 94d1e542e44b508c88d76b00d709f9f6a1a16289 | /insert_data.py | 9dd4505adb238a5d9b3a3637539e657f97323054 | [] | no_license | Tom-Leee/Python_Chatbot | 8abbf4d37a9e2b08e8e48c1bce0149e2d01d39b5 | e64d23c4fd9c6fa763f639e32049b5a5ac0607ab | refs/heads/master | 2023-02-24T04:05:06.325229 | 2021-02-03T02:48:31 | 2021-02-03T02:48:31 | 334,850,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | import pymysql
db = None
try:
# pymysql.connect() 함수를 사용하면 DB 서버에 접속할 수 있습니다.
# connect() 함수의 인자는 다음과 같습니다.
# DB 호스트 정보에 맞게 입력
db = pymysql.connect(
# 데이터 베이스 서버가 존재하는 호스트 주소
host='localhost',
# 데이터베이스 로그인 유저
user='root',
# 데이터베이스 로그인 패스워드
passwd='joker77&',
# 데이터베이스 명
db='k_digital',
# 데이터베이스에서 사용할 charset 인코딩
charset='utf8'
)
print("DB 연결 성공 ")
# 테이블 삽입 sql 정의 -- ①
sql = '''
INSERT user(name, age, address) values('주뗌리', 30, 'Korea')
'''
# 테이블 생성 -- ②
# 앞서 생성한 user 테이블에 데이터를 삽입하기 위해 정의한 SQL 구문입니다.
with db.cursor() as cursor:
# cursor 객체의 execute() 함수로 SQL 구문을 실행합니다
cursor.execute(sql)
# DB호스트에 연결된 객체(db)에 commit()를 통해 수정된 내용을
# DB에 반영하여 줍니다.
db.commit()
except Exception as e:
# DB 연결 실패 시 오류 내용 출력
print(e)
finally:
# DB 가 연결된 경우에만 접속 닫기 시도
if db is not None:
# 데이터베이스 서버 닫기
db.close()
# print('table 생성 완료')
print('data 삽입 완료')
print("DB 연결 닫기 성공") | [
"joohyoung.t.lee@gmail.com"
] | joohyoung.t.lee@gmail.com |
9428c285bc4fc2aa86c8fd8e7488a758f45bfba7 | c7262c5612d6a0a100b854ec7b93ea6b87bd7357 | /tkgui/__init__.py | f8b69a0b0a1c5643c5da210236514b3d4a4f357a | [
"ISC"
] | permissive | Pidgeot/python-lnp | 71d8207d1b72d310b16e82a2d88f5ee9fa3127a2 | 6d1e9452155618367918945548a1cb41463a1e34 | refs/heads/master | 2023-03-17T00:40:48.893000 | 2023-03-10T14:52:52 | 2023-03-10T14:52:52 | 103,780,162 | 64 | 12 | ISC | 2023-09-05T16:52:23 | 2017-09-16T19:59:23 | Python | UTF-8 | Python | false | false | 28 | py | """TKinter UI for PyLNP."""
| [
"michael@birdiesoft.dk"
] | michael@birdiesoft.dk |
0f46f52ab83c0ecc174c3ed6da715bbc140ed3b1 | e48344cc54b1ad6e362b55a0aabf54b38738056c | /OpenCV_1_입출력과 GUI/Canny_with_GUI.py | e0da28be0bd83dbd5297a3a3955105e87450fa19 | [] | no_license | junsoofeb/Deep_Learning_for_CV | b458a4ca7baf7d2a6d3a47c536e054f2ba69076a | 47004821aeddbbe5a0e97b5da4b6a1f094ae7b04 | refs/heads/master | 2020-09-13T06:38:59.776614 | 2020-01-04T13:58:12 | 2020-01-04T13:58:12 | 222,683,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | import cv2 as cv
import numpy as np
import sys
fill_val = np.array([255, 255], np.uint8)
min = 50
max = 150
def trackbar_callback_min(min, im):
global img
img = cv.Canny(im, min, 100)
def trackbar_callback_max(max, im):
global img
img = cv.Canny(im, 100, max)
img = cv.imread("/home/junsoofeb/py_project/OpenCV/lena.jpg", 0) # grayscale
if img.size == 0:
print("img load err!!")
sys.exit()
cv.namedWindow("Lena and Canny edge!")
cv.createTrackbar("Min Val", "Lena and Canny edge!", 50, 255, lambda v: trackbar_callback_min(v, img))
cv.createTrackbar("Max Val", "Lena and Canny edge!", 150, 255, lambda v: trackbar_callback_max(v, img))
while True:
cv.imshow("Lena and Canny edge!", img)
key = cv.waitKey(3)
if key == 27:
break
cv.destroyAllWindows() | [
"noreply@github.com"
] | junsoofeb.noreply@github.com |
88648b48ace3d9b3b880559928464aa7fe7e236a | 4e992864069f301cefa7ab19d092a6484c0a2a2d | /test.py | 629cbf409f35a50a1be0d58fff87e6cb37a0ad4d | [] | no_license | HoanGnR/MCN_week3_prefixCodeImplementation | 9532e86c89fcb2c8b16b0f374cc7868b5d9d91d4 | 4facdabcfef816259953459e78cad5178986b78e | refs/heads/master | 2022-12-29T02:00:38.004734 | 2020-09-30T19:23:55 | 2020-09-30T19:23:55 | 300,035,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from prefixcodetree import *
codebook = {
'x1': [0],
'x2': [1,0,0],
'x3': [1,0,1],
'x4': [1,1]
}
codeTree = PrefixCodeTree() # create a prefix code tree `codeTree`
# Initialize codeTree with codebook
for symbol in codebook:
codeTree.insert(codebook[symbol], symbol)
#print (n)
#print(str(codeTree.findNode(n,0)))
message = codeTree.decode(b'\xd2\x9f\x20', 21)
print (message)
# Below is explaination:
# data = b'\xd2\x9f\x20' --> 24 bit
# datalen = 21 --> use only 21 bit, 3 remaining bits are not used
#
# Decoded data should be :
#
# 11 0 100 10|1 0 0 11 11 1|00 100 000
# x4 x1 x2 x3 x1 x1 x4 x4 x2 x2
| [
"noreply@github.com"
] | HoanGnR.noreply@github.com |
88714f0674500d5b3ab3962a0d7892afb809344c | c277ce8e73ecfeaacd6e45c6302c0fd48655c1e6 | /QAC Python Challenges/Easy Challenges/'Near' Exercise.py | 97ce8a2319d4dec906a18dda6d175d05ce54be82 | [] | no_license | joshuahigginson1/Python_Study_QA | 32c50bb9e06d99d5a3baa5cc3584e790eebec84f | 2da4d3608091dbe05fda325f958e8ac24aa8d37d | refs/heads/master | 2022-11-30T21:54:14.579549 | 2020-08-13T13:54:10 | 2020-08-13T13:54:10 | 280,158,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | """
Task: When given two strings of letters,
determine whether the second can be made from the first by removing one letter.
The remaining letters must stay in the same order.
near ("reset", "rest") => true
near ("dragoon", "dragon") => true
near ("eave", "leave") => false
near ("sleet", "lets") => false
Dev Notes:
1. Convert each string to list.
delete each character index, and compare that that to the given second string.
for loop to do this.
If statement to return true or false.
"""
# Define Functions ---------------------------------------------------------------------------------------------------
def near_function(first, second):
first_string_list = list(first)
second_string_list = list(second)
first_string_list_destroy = first_string_list
for letters in range(len(first_string_list)):
first_string_list_destroy = list(first_string_list) # Without converting to a new list, python will share ID.
first_string_list_destroy.pop(letters) # Delete letter from that particular index.
if first_string_list_destroy == second_string_list:
return True
else:
continue
# Declare Variables --------------------------------------------------------------------------------------------------
first_string = "Bananas"
second_string = "Banana"
# Execute Code -------------------------------------------------------------------------------------------------------
if near_function(first_string, second_string):
print(f"The word: {second_string} fits into the word: {first_string}")
else:
print(f"The word: {second_string} does NOT fit into the word: {first_string}")
| [
"joshuahigginson1@gmail.com"
] | joshuahigginson1@gmail.com |
ae0a246b65ae31d255a3cc51cecfd9d393fe018a | 9c73dd3043f7db7c9ec76d560484e99ad134fdb6 | /students/Shirin_A/lesson08/assignment/tests/test_inventory.py | eac91903a6023b907b02159b4072e8250af9075c | [] | no_license | UWPCE-PythonCert-ClassRepos/py220-online-201904-V2 | 546b316025b680ca28d24b523663095398616b13 | ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56 | refs/heads/master | 2022-12-10T03:14:25.514630 | 2019-06-11T02:14:17 | 2019-06-11T02:14:17 | 179,139,181 | 1 | 19 | null | 2022-12-08T01:43:38 | 2019-04-02T18:49:10 | Python | UTF-8 | Python | false | false | 920 | py | """
Autograde Lesson 8 assignment
"""
import pytest
import inventory as I
@pytest.fixture
def _add_furniture():
return ("../data/invoice_file.csv", "Elisa Miles", "LR04", "Leather Sofa", 25)
@pytest.fixture
def _single_customer():
return ([['LR01', 'Small lamp', 7.5],
['LR02', 'Television', 28.0],
['BR07', 'LED lamp', 5.5],
['KT08', 'Basic refrigerator', 40.0]])
def test_add_furniture(_add_furniture):
"""test add_furniture"""
response = I.add_furniture("../data/invoice_file.csv", "Elisa Miles",
"LR04", "Leather Sofa", 25)
assert response == _add_furniture
def test_single_customer(_single_customer):
"""test single_customer"""
create_invoice = I.single_customer("Susan Wong", "../data/invoice_file.csv")
response = create_invoice("../data/test_items.csv")
assert response == _single_customer
| [
"sirin_ruet@yahoo.com"
] | sirin_ruet@yahoo.com |
b280a34f704939651ee03930ae7366dd84668ae3 | a7b13784688d7ae5392ddd3332fde375419df8e8 | /setup.py | 272968036885bd430102f3c775818d1f1e4bf36e | [] | no_license | ArunSinghJ123/aws-python-boto3 | dcb0853e1497fcb6c43c0fe4704848b1f3891b0b | dd4b56d093aa2bc8ac1ac17fe5237a82fce8abae | refs/heads/master | 2021-04-18T18:50:01.039530 | 2020-04-07T05:00:07 | 2020-04-07T05:00:07 | 126,759,353 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import sys
from setuptools import setup
setup(
name="awshammer",
version="0.0.1",
author="Arun",
author_email="arunsinghkce@gmail.com",
description="Packaging boto3 aws scripts",
url="https://github.com/ArunSinghJ123/aws-python-boto3",
install_requires=['boto3', 'awscli'],
packages=['iam', 's3_bucketpolicy'],
classifiers=(
"Programming Language :: Python :: 2.7",
"Operating System :: Mac/Linux",
),
entry_points={
'console_scripts': ['iam = iam.roles:main',
'bucket = s3_bucketpolicy.s3_bucket_policy:main']}
)
| [
"arun@Arunsinghs-MBP.home"
] | arun@Arunsinghs-MBP.home |
21d3f37f0ebe4ec592d700a1d4acdf2080efe131 | c77a40408bc40dc88c466c99ab0f3522e6897b6a | /Programming_basics/Exercise_6/PasswordGenerator.py | d16198682725a9db48d9d7f698aaecd4211c4375 | [] | no_license | vbukovska/SoftUni | 3fe566d8e9959d390a61a4845381831929f7d6a3 | 9efd0101ae496290313a7d3b9773fd5111c5c9df | refs/heads/main | 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | num = int(input())
let = int(input())
for i_1 in range(1, num+1):
for i_2 in range(1, num+1):
for i_3 in range(97, 97+let):
for i_4 in range(97, 97+let):
for i_5 in range(max(i_1, i_2)+1, num+1):
print(f'{str(i_1)+str(i_2)+chr(i_3)+chr(i_4)+str(i_5)}', end=' ')
| [
"vbukovska@yahoo.com"
] | vbukovska@yahoo.com |
6d63bfb92c5a72e38f2a7c3c8ebbe32b7e9ad516 | 457e2f5b2a26877df739e314ec1560e8a3ecfb97 | /controllerMaker/controllerMaker.py | 80cee9d6f2d79933805ee707da01d112e70e8ee8 | [] | no_license | mappp7/tools | f6685d9a682bd540d59c1bff0cebb60f79fd6556 | c537e7648112c51ba4f44225418e773ee6b8be6c | refs/heads/master | 2021-01-14T16:40:44.450790 | 2020-10-30T05:30:27 | 2020-10-30T05:30:27 | 242,682,763 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,757 | py | #encoding=utf-8
#!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# Dexter Rigging Team
#
# yunhyuk.jung
#
# 2017.11.21 ver.1.2.3
#-------------------------------------------------------------------------------
import os
import site
import maya.cmds as cmds
import maya.OpenMayaUI as omui
import maya.mel as mel
import json
# add sysPath
#site.addsitedir('/dexter/Cache_DATA/CRT/RiggingRnD/baseRig/')
import xml.etree.ElementTree as xml
from cStringIO import StringIO
from Qt import QtGui, QtCore, QtWidgets, load_ui
from Qt.QtGui import *
from Qt.QtCore import *
from Qt.QtWidgets import *
from baseRigUtil.undoCommand import undo
from baseRigUtil.homeNul import*
# ui Path Setting
basePath = os.path.abspath( __file__ + '/../' )
print basePath
uiFile = os.path.join( basePath, 'controllerMaker.ui' )
print uiFile
def maya_main_window():
'''
Return the Maya main window widget as a Python object
'''
main_window_ptr = omui.MQtbaseRig.util.mainWindow()
return wrapInstance(long(main_window_ptr), QtGui.QWidget)
class uiMainWindow( QtWidgets.QMainWindow ):
def __init__(self, parent=None):
super(uiMainWindow, self).__init__(parent)
self.ui = load_ui(uiFile)
self.ButtonConnection()
self.makerIcon()
self.temp_CON = []
self.mayaFolder = cmds.workspace(q=True, dir=True)
if self.mayaFolder:
self.ui.expConPath_LIE.setText( self.mayaFolder )
# buttonAction def Group
def ButtonConnection(self):
self.ui.Shape_Replace_BTN.clicked.connect(self.shapeReplace)
self.ui.Do_Resize_BTN.clicked.connect(self.controllerResize)
self.ui.FK_Control_Maker_BTN.clicked.connect(self.FKcontrolMaker)
self.ui.homeNull_BTN.clicked.connect(self.homeNull)
self.ui.mirrorCon_X_BTN.clicked.connect(self.mirrorControl)
self.ui.mirrorCon_Y_BTN.clicked.connect(self.mirrorControl)
self.ui.mirrorCon_Z_BTN.clicked.connect(self.mirrorControl)
self.ui.rotateCon_X_BTN.clicked.connect(self.rotateControl)
self.ui.rotateCon_Y_BTN.clicked.connect(self.rotateControl)
self.ui.rotateCon_Z_BTN.clicked.connect(self.rotateControl)
self.ui.curveClear_BTN.clicked.connect(self.curveClear)
self.ui.expConPath_BTN.clicked.connect(self.setMayaFolder)
self.ui.exportCON_BTN.clicked.connect(self.exportCON_JSON)
self.ui.importCON_BTN.clicked.connect(self.importCON_JSON)
self.ui.reset_BTN.clicked.connect(self.resetBTN_cmd)
self.ui.multiSel_BTN.clicked.connect(self.multiSelhierarchy)
self.ui.chainParent_BTN.clicked.connect(self.chainParents)
self.ui.crvConnect_BTN.clicked.connect(self.crvConnect)
for i in range(1,33):
eval( "self.ui.color_%s_BTN.clicked.connect(self.overrideColor)" %i )
for i in range(1,29):
self.con_Make = '%0*d' % ( 2, i )
eval( "self.ui.conMake_%s_BTN.clicked.connect(self.controllerMake)" %self.con_Make )
def makerIcon(self):
setIconPath = 'C:/tools/tools/controllerMaker/icon/'
self.ui.conMake_01_BTN.setIcon(QtGui.QIcon('%sbox.jpg' %setIconPath))
self.ui.conMake_02_BTN.setIcon(QtGui.QIcon('%scircle.jpg' %setIconPath))
self.ui.conMake_03_BTN.setIcon(QtGui.QIcon('%svolumeCircle.jpg' %setIconPath))
self.ui.conMake_04_BTN.setIcon(QtGui.QIcon('%scross.jpg' %setIconPath))
self.ui.conMake_05_BTN.setIcon(QtGui.QIcon('%sfatCross.jpg' %setIconPath))
self.ui.conMake_06_BTN.setIcon(QtGui.QIcon('%slocator.jpg' %setIconPath))
self.ui.conMake_07_BTN.setIcon(QtGui.QIcon('%ssphere.jpg' %setIconPath))
self.ui.conMake_08_BTN.setIcon(QtGui.QIcon('%soctagon.jpg' %setIconPath))
self.ui.conMake_09_BTN.setIcon(QtGui.QIcon('%svolumeOctagon.jpg' %setIconPath))
self.ui.conMake_10_BTN.setIcon(QtGui.QIcon('%srombus.jpg' %setIconPath))
self.ui.conMake_11_BTN.setIcon(QtGui.QIcon('%sroot.jpg' %setIconPath))
self.ui.conMake_12_BTN.setIcon(QtGui.QIcon('%shexagon.jpg' %setIconPath))
self.ui.conMake_13_BTN.setIcon(QtGui.QIcon('%ssquare.jpg' %setIconPath))
self.ui.conMake_14_BTN.setIcon(QtGui.QIcon('%spyramid.jpg' %setIconPath))
self.ui.conMake_15_BTN.setIcon(QtGui.QIcon('%sboxPin.jpg' %setIconPath))
self.ui.conMake_16_BTN.setIcon(QtGui.QIcon('%spin.jpg' %setIconPath))
self.ui.conMake_17_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_18_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_19_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_20_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_21_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_22_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_23_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_24_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_25_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_26_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_27_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
self.ui.conMake_28_BTN.setIcon(QtGui.QIcon('%snone.jpg' %setIconPath))
#self.ui.homeNull_BTN.setIcon(QtGui.QIcon('%shomeNull.jpg' %setIconPath))
def overrideColor(self):
buttonHandle = self.sender()
btn = buttonHandle.objectName()
btn_number = btn.split('_')
sel_name = cmds.ls(sl=True)
for i in sel_name:
if self.ui.viewPort_CKB.isChecked() == True:
try:
if cmds.listRelatives( i, s=True )[0] != None:
sel_Shape = cmds.listRelatives( i, s=True )[0]
cmds.setAttr("%s.overrideEnabled" %sel_Shape, 1)
cmds.setAttr("%s.overrideColor" %sel_Shape, int(btn_number[1])-1)
except:
cmds.setAttr("%s.overrideEnabled" %i, 1)
cmds.setAttr("%s.overrideColor" %i, int(btn_number[1])-1)
if self.ui.outLiner_CKB.isChecked() == True:
cmds.setAttr('%s.useOutlinerColor' %i, 1)
self.outLinerCC(i,btn_number[1])
def outLinerCC(self,obj,number):
cc_list = {1:[0.3515, 0.3515, 0.3515],
2:[0,0,0],
3:[0.0497, 0.0497, 0.0497],
4:[0.2122, 0.2122, 0.2122],
5:[0.602, 0, 0.039],
6:[0, 0.005, 0.458],
7:[0, 0.284, 1.000],
8:[0, 0.447, 0.067],
9:[0.165, 0, 0.503],
10:[0.812, 0, 0.812],
11:[0.674, 0.170, 0.089],
12:[0.503, 0.166, 0.143],
13:[0.702, 0.041, 0],
14:[1.0000, 0, 0],
15:[0, 1.0000, 0],
16:[0, 0.198, 0.906],
17:[1.0000, 1.0000, 1.0000],
18:[1.0000, 1.0000, 0],
19:[0.1022, 0.7157, 1.0000],
20:[0.0561, 1.0000, 0.3613],
21:[1.0000, 0.4286, 0.4286],
22:[0.923, 0.495, 0.230],
23:[1.0000, 1.0000, 0.1221],
24:[0, 0.685, 0.186],
25:[0.715, 0.443, 0.0],
26:[0.677, 0.696, 0.058],
27:[0.274, 0.696, 0.058],
28:[0.047, 0.552, 0.171],
29:[0.060, 0.713, 0.713],
30:[0.074, 0.341, 0.884],
31:[0.477, 0.151, 0.851],
32:[0.895, 0.074, 0.355],
}
cmds.setAttr('%s.outlinerColor' %obj ,cc_list[int(number)][0],cc_list[int(number)][1],cc_list[int(number)][2] )
def resetBTN_cmd(self):
sel_name = cmds.ls(sl=True)
for i in sel_name:
if self.ui.viewPort_CKB.isChecked() == True:
try:
sel_Shape = cmds.listRelatives( i, s=True )[0]
cmds.setAttr("%s.overrideEnabled" %sel_Shape, 0)
except:
cmds.setAttr("%s.overrideEnabled" %i, 0)
if self.ui.outLiner_CKB.isChecked() == True:
cmds.setAttr('%s.useOutlinerColor' %i, 0)
def shapeReplace(self):
sel = cmds.ls(sl=True)
sel0_shape = cmds.listRelatives( sel[0], s=True )[0]
sel1_shape = cmds.listRelatives( sel[1], s=True )[0]
cmds.parent( sel0_shape , sel[1], r=True, s=True)
cmds.delete(sel[0])
cmds.delete(sel1_shape)
cmds.rename(sel0_shape , "%sShape" %sel[1])
@undo
def FKcontrolMaker(self):
sel = cmds.ls(sl=True)
fkController = list()
fkNullGroup = list()
for i in sel:
jointName = i
if len(self.temp_CON) == 1:
Control = cmds.duplicate(self.temp_CON, n= jointName.replace(jointName.split('_')[-1],'ctl'))
else:
Control = cmds.circle(nr=(1,0,0),c=(0,0,0),r=1, n= jointName.replace(jointName.split('_')[-1],'ctl'))
cmds.DeleteHistory(Control[0])
cmds.setAttr("%sShape.overrideEnabled" %Control[0], 1)
cmds.setAttr("%sShape.overrideColor" %Control[0], 17)
cmds.DeleteHistory(Control[0])
cmds.group( Control[0] )
nullGroup = (cmds.rename(jointName.replace(jointName.split('_')[-1],'ctl_G')))
fkController.append("%s" %Control[0])
fkNullGroup.append("%s" %nullGroup)
cmds.delete(cmds.parentConstraint(jointName,nullGroup, w=True))
for x in range(len(sel)-1):
q = -1-x
k = -2-x
cmds.parent(fkNullGroup[q], fkController[k])
for y in range(len(sel)):
cmds.parentConstraint(fkController[y], sel[y], mo=1 , w=1)
def homeNull(self):
sel = cmds.ls(sl=True)
for i in sel:
if 'ctl' in i:
homeNul( i )
else:
homeNul( i , i+'_ctl_G' )
def crvConnect(self):
sel = cmds.ls(sl=True)
startName = sel[0]
endName = sel[1]
curveName = cmds.curve( d=1, p=[(0,0,0),(0,0,0)], name = startName.replace( startName.split('_')[-1], 'CRV' ) )
curveShape = cmds.listRelatives( curveName, s=True )[0]
curveShapeName = cmds.rename( curveShape, '%sShape' % curveName )
cvNum = cmds.getAttr( '%s.spans' % curveShapeName ) + cmds.getAttr( '%s.degree' % curveShapeName )
MMX_list = []
DCM_list = []
for x in range( cvNum ):
# Create Node
MMX = cmds.createNode( 'multMatrix', n=startName.replace( '_%s' % startName.split('_')[-1], '%s_MMX' % (x+1) ) )
MMX_list.append(MMX)
DCM = cmds.createNode( 'decomposeMatrix', n=startName.replace( '_%s' % startName.split('_')[-1], '%s_DCM' % (x+1) ) )
DCM_list.append(DCM)
# Connection Node
cmds.connectAttr( '%s.worldMatrix[0]' % startName, '%s.matrixIn[0]' % MMX_list[0] )
cmds.connectAttr( '%s.matrixSum' % MMX_list[0], '%s.inputMatrix' % DCM_list[0] )
cmds.connectAttr( '%s.worldInverseMatrix[0]' % curveName, '%s.matrixIn[1]' % MMX_list[0] )
cmds.connectAttr( '%s.outputTranslate' % DCM_list[0], '%s.controlPoints[0]' % curveShapeName )
cmds.connectAttr( '%s.worldMatrix[0]' % endName, '%s.matrixIn[0]' % MMX_list[1] )
cmds.connectAttr( '%s.matrixSum' % MMX_list[1], '%s.inputMatrix' % DCM_list[1] )
cmds.connectAttr( '%s.worldInverseMatrix[0]' % curveName, '%s.matrixIn[1]' % MMX_list[1] )
cmds.connectAttr( '%s.outputTranslate' % DCM_list[1], '%s.controlPoints[1]' % curveShapeName )
def chainParents(self):
sel = cmds.ls(sl=True)
for x in range(len(sel)-1):
cmds.parent(sel[x], sel[x+1])
def multiSelhierarchy(self):
sel = cmds.ls(sl=True)
for i in sel:
cmds.select("%s" %i, hierarchy=True, add=True )
@undo
def controllerResize(self):
number = float(self.ui.Do_Resize_DSB.text())
XYZ = ["X","Y","Z"]
sel = cmds.ls(sl=True)
for x in sel:
curveName = x
curveShape = cmds.listRelatives( curveName, s=True )[0]
cvNum = cmds.getAttr( '%s.spans' % curveShape ) + cmds.getAttr( '%s.degree' % curveShape )
conPivot = cmds.xform("%s" %curveName, q=True, ws=True, rp=True)
cmds.select( "%s.cv[0:%s]" %(curveName,cvNum))
cluster = cmds.cluster()
cmds.move( conPivot[0], conPivot[1], conPivot[2], "%s.scalePivot" %cluster[1] , "%s.rotatePivot" %cluster[1],absolute=True)
if number > 0:
for i in XYZ:
cmds.setAttr( "%s.scale%s" %(cluster[1],i), number)
else:
nagative_number = 1 - number*(-0.1)
for i in XYZ:
cmds.setAttr( "%s.scale%s" %(cluster[1],i), nagative_number)
cmds.DeleteHistory(cmds.select( sel ))
@undo
def mirrorControl(self):
self.mirrorJointList=[]
self.mirrorJointList_add=[]
self.parentGroupList=[]
sel_re = []
sel=cmds.ls(sl=True)
for k in sel:
tempParentGroupList = cmds.listRelatives( k, allParents=True )
self.parentGroupList.append(tempParentGroupList)
for i in sel:
cmds.select(cl=True)
cmds.select(i ,r=True)
tempMirrorJoint = cmds.joint(n='%s_temp_JNT' %i )
cmds.parent(tempMirrorJoint , w=True)
cmds.parent(i , tempMirrorJoint)
self.mirrorJointList.append(tempMirrorJoint)
# 미러조인트 방법이랑 방향 가져와서 세팅
buttonXYZ = self.sender()
btn = buttonXYZ.objectName()
btn_XYZ = btn.split('_')
for i in self.mirrorJointList:
if self.ui.mirrorCON_world_RDB.isChecked() == True:
if btn_XYZ[1] == 'X':
tempMirrorJoint2 = cmds.mirrorJoint(i , mxy = True , mirrorBehavior=False , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
elif btn_XYZ[1] == 'Y':
tempMirrorJoint2 = cmds.mirrorJoint(i , myz = True , mirrorBehavior=False , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
elif btn_XYZ[1] == 'Z':
tempMirrorJoint2 = cmds.mirrorJoint(i , mxz = True , mirrorBehavior=False , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
if self.ui.mirrorCON_behavior_RDB.isChecked() == True:
if btn_XYZ[1] == 'X':
tempMirrorJoint2 = cmds.mirrorJoint(i , mxy = True , mirrorBehavior=True , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
elif btn_XYZ[1] == 'Y':
tempMirrorJoint2 = cmds.mirrorJoint(i , myz = True , mirrorBehavior=True , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
elif btn_XYZ[1] == 'Z':
tempMirrorJoint2 = cmds.mirrorJoint(i , mxz = True , mirrorBehavior=True , searchReplace=('L_', 'R_'))
self.mirrorJointList_add.append(tempMirrorJoint2[0])
cmds.ungroup(self.mirrorJointList_add)
cmds.ungroup(self.mirrorJointList)
def rotateControl(self):
buttonXYZ = self.sender()
btn = buttonXYZ.objectName()
btn_XYZ = btn.split('_')
sel = cmds.ls(sl=True)
for i in sel:
curveName = i
curveShape = cmds.listRelatives( curveName, s=True )[0]
cvNum = cmds.getAttr( '%s.spans' % curveShape ) + cmds.getAttr( '%s.degree' % curveShape )
cmds.select( "%s.cv[0:%s]" %(curveName,cvNum))
if btn_XYZ[1] == 'X':
cmds.rotate(90,0,0,r=True,os =True)
elif btn_XYZ[1] == 'Y':
cmds.rotate(0,90,0,r=True,os =True)
elif btn_XYZ[1] == 'Z':
cmds.rotate(0,0,90,r=True,os =True)
cmds.select(cl=True)
for i in sel:
cmds.select(i,tgl=True)
def controllerMake(self):
temp_sel = cmds.ls(sl=True)
buttonHandle = self.sender()
btn = buttonHandle.objectName()
btn_number = btn.split('_')
n = btn_number[1]
if len(temp_sel) >= 1:
for i in range(len(temp_sel)):
self.chooseConShape(n)
selMakeCON = cmds.ls(sl=True)
cmds.delete(cmds.parentConstraint(temp_sel[i],selMakeCON,mo=0,w=1))
cmds.rename('%s_ctl' %temp_sel[i])
self.homeNull()
else:
self.chooseConShape(n)
#selMakeCON = cmds.ls(sl=True)
self.homeNull()
def chooseConShape(self,number):
# box
n = number
if n == '01':
mel.eval('curve -d 1 -p 0.5 0.5 0.5 -p 0.5 0.5 -0.5 -p -0.5 0.5 -0.5 -p -0.5 -0.5 -0.5 -p 0.5 -0.5 -0.5 -p 0.5 0.5 -0.5 -p -0.5 0.5 -0.5 -p -0.5 0.5 0.5 -p 0.5 0.5 0.5 -p 0.5 -0.5 0.5 -p 0.5 -0.5 -0.5 -p -0.5 -0.5 -0.5 -p -0.5 -0.5 0.5 -p 0.5 -0.5 0.5 -p -0.5 -0.5 0.5 -p -0.5 0.5 0.5 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 -k 13 -k 14 -k 15;')
self.CON_name = 'box'
# circle
elif n == '02':
mel.eval('circle -c 0 0 0 -nr 0 1 0 -sw 360 -r 0.5 -d 3 -ut 0 -tol 0.01 -s 8 -ch 0')
self.CON_name = 'circle'
# volume circle
elif n == '03':
mel.eval('curve -d 1 -p -0.504004 -0.0167178 -1.8995e-06 -p -0.486389 -0.0167178 0.130304 -p -0.436069 -0.0167178 0.251787 -p -0.356385 -0.0167178 0.356383 -p -0.251789 -0.0167178 0.436067 -p -0.130306 -0.0167178 0.486387 -p 0 -0.0167178 0.504002 -p 0.130306 -0.0167178 0.486387 -p 0.251789 -0.0167178 0.436067 -p 0.356385 -0.0167178 0.356383 -p 0.436069 -0.0167178 0.251787 -p 0.486389 -0.0167178 0.130304 -p 0.504004 -0.0167178 -1.8995e-06 -p 0.486389 -0.0167178 -0.130308 -p 0.436069 -0.0167178 -0.251791 -p 0.356385 -0.0167178 -0.356387 -p 0.251789 -0.0167178 -0.436071 -p 0.130306 -0.0167178 -0.486392 -p 0 -0.0167178 -0.504002 -p -0.130306 -0.0167178 -0.486392 -p -0.251789 -0.0167178 -0.436071 -p -0.356385 -0.0167178 -0.356387 -p -0.436069 -0.0167178 -0.251791 -p -0.486389 -0.0167178 -0.130308 -p -0.504004 -0.0167178 -1.8995e-06 -p -0.504004 0.0167178 -1.8995e-06 -p -0.486389 0.0167178 0.130304 -p -0.436069 0.0167178 0.251787 -p -0.356385 0.0167178 0.356383 -p -0.251789 0.0167178 0.436067 -p -0.130306 0.0167178 0.486387 -p 0 0.0167178 0.504002 -p 0 -0.0167178 0.504002 -p 0 0.0167178 0.504002 -p 0.130306 0.0167178 0.486387 -p 0.251789 0.0167178 0.436067 -p 0.356385 0.0167178 0.356383 -p 0.436069 0.0167178 0.251787 -p 0.486389 0.0167178 0.130304 -p 0.504004 0.0167178 -1.8995e-06 -p 0.504004 -0.0167178 -1.8995e-06 -p 0.504004 0.0167178 -1.8995e-06 -p 0.486389 0.0167178 -0.130308 -p 0.436069 0.0167178 -0.251791 -p 0.356385 0.0167178 -0.356387 -p 0.251789 0.0167178 -0.436071 -p 0.130306 0.0167178 -0.486392 -p 0 0.0167178 -0.504002 -p 0 -0.0167178 -0.504002 -p 0 0.0167178 -0.504002 -p -0.130306 0.0167178 -0.486392 -p -0.251789 0.0167178 -0.436071 -p -0.356385 0.0167178 -0.356387 -p -0.436069 0.0167178 -0.251791 -p -0.486389 0.0167178 -0.130308 -p -0.504004 0.0167178 -1.8995e-06 -p -0.504004 -0.0167178 -1.8995e-06 -p -0.504004 0.0167178 -1.8995e-06;')
self.CON_name = 'volume circle '
# cross
elif n == '04':
mel.eval('curve -d 1 -p 0.165 0 0.495 -p 0.165 0 0.165 -p 0.495 0 0.165 -p 0.495 0 -0.165 -p 0.165 0 -0.165 -p 0.165 0 -0.495 -p -0.165 0 -0.495 -p -0.165 0 -0.165 -p -0.495 0 -0.165 -p -0.495 0 0.165 -p -0.165 0 0.165 -p -0.165 0 0.495 -p 0.165 0 0.495;')
self.CON_name = 'cross'
# fat cross
elif n == '05':
mel.eval('curve -d 1 -p 0.25 0 0.5 -p 0.25 0 0.25 -p 0.5 0 0.25 -p 0.5 0 -0.25 -p 0.25 0 -0.25 -p 0.25 0 -0.5 -p -0.25 0 -0.5 -p -0.25 0 -0.25 -p -0.5 0 -0.25 -p -0.5 0 0.25 -p -0.25 0 0.25 -p -0.25 0 0.5 -p 0.25 0 0.5;')
self.CON_name = 'fat cross'
# locator
elif n == '06':
mel.eval('curve -d 1 -p 0 0 0 -p 0.5 0 0 -p -0.5 0 0 -p 0 0 0 -p 0 0 0.5 -p 0 0 -0.5 -p 0 0 0 -p 0 -0.5 0 -p 0 0.5 0 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8;')
self.CON_name = 'locator'
# sphere
elif n == '07':
mel.eval('curve -d 1 -p 0 0 0.5 -p 0.353554 0 0.353554 -p 0.5 0 0 -p 0.353554 0 -0.353554 -p 0 0 -0.5 -p -0.353554 0 -0.353554 -p -0.5 0 0 -p -0.353554 0 0.353554 -p 0 0 0.5 -p 0 0.25 0.433013 -p 0 0.433013 0.25 -p 0 0.5 0 -p 0 0.433013 -0.25 -p 0 0.25 -0.433013 -p 0 0 -0.5 -p 0 -0.25 -0.433013 -p 0 -0.433013 -0.25 -p 0 -0.5 0 -p 0 -0.433013 0.25 -p 0 -0.25 0.433013 -p 0 0 0.5 -p 0.353554 0 0.353554 -p 0.5 0 0 -p 0.433013 0.25 0 -p 0.25 0.433013 0 -p 0 0.5 0 -p -0.25 0.433013 0 -p -0.433013 0.25 0 -p -0.5 0 0 -p -0.433013 -0.25 0 -p -0.25 -0.433013 0 -p 0 -0.5 0 -p 0.25 -0.433013 0 -p 0.433013 -0.25 0 -p 0.5 0 0;')
self.CON_name = 'sphere'
# octagon
elif n == '08':
mel.eval('curve -d 1 -p 0.246168 0 0.492335 -p 0.492335 0 0.246168 -p 0.492335 0 -0.246168 -p 0.246168 0 -0.492335 -p -0.246168 0 -0.492335 -p -0.492335 0 -0.246168 -p -0.492335 0 0.246168 -p -0.246168 0 0.492335 -p 0.246168 0 0.492335;')
self.CON_name ='octagon'
# volume octagon
elif n == '09':
mel.eval('curve -d 1 -p 0.246503 -0.044 0.493007 -p 0.493007 -0.044 0.246503 -p 0.493007 -0.044 -0.246503 -p 0.246503 -0.044 -0.493007 -p -0.246503 -0.044 -0.493007 -p -0.493007 -0.044 -0.246503 -p -0.493007 -0.044 0.246503 -p -0.246503 -0.044 0.493007 -p 0.246503 -0.044 0.493007 -p 0.246503 0.044 0.493007 -p 0.493007 0.044 0.246503 -p 0.493007 -0.044 0.246503 -p 0.493007 0.044 0.246503 -p 0.493007 0.044 -0.246503 -p 0.493007 -0.044 -0.246503 -p 0.493007 0.044 -0.246503 -p 0.246503 0.044 -0.493007 -p 0.246503 -0.044 -0.493007 -p 0.246503 0.044 -0.493007 -p -0.246503 0.044 -0.493007 -p -0.246503 -0.044 -0.493007 -p -0.246503 0.044 -0.493007 -p -0.493007 0.044 -0.246503 -p -0.493007 -0.044 -0.246503 -p -0.493007 0.044 -0.246503 -p -0.493007 0.044 0.246503 -p -0.493007 -0.044 0.246503 -p -0.493007 0.044 0.246503 -p -0.246503 0.044 0.493007 -p -0.246503 -0.044 0.493007 -p -0.246503 0.044 0.493007 -p 0.246503 0.044 0.493007 -p 0.246503 -0.044 0.493007;')
self.CON_name = 'volume octagon'
# rombus
elif n == '10':
mel.eval('curve -d 1 -p 0 0.5 0 -p 0.5 0 0 -p 0 0 0.5 -p -0.5 0 0 -p 0 0 -0.5 -p 0 0.5 0 -p 0 0 0.5 -p 0 -0.5 0 -p 0 0 -0.5 -p 0.5 0 0 -p 0 0.5 0 -p -0.5 0 0 -p 0 -0.5 0 -p 0.5 0 0;')
self.CON_name = 'rombus'
# root
elif n == '11':
mel.eval('curve -d 3 -p 0 0 0.514016 -p 0.215045 0 0.43009 -p 0.215045 0 0.43009 -p 0.215045 0 0.43009 -p 0.107523 0 0.43009 -p 0.107523 0 0.43009 -p 0.107523 0 0.43009 -p 0.107523 0 0.43009 -p 0.107523 0 0.348839 -p 0.107523 0 0.348839 -p 0.107523 0 0.348839 -p 0.165418 0 0.3301 -p 0.269267 0 0.268173 -p 0.330916 0 0.164045 -p 0.348514 0 0.107561 -p 0.348514 0 0.107561 -p 0.348514 0 0.107561 -p 0.348514 0 0.107561 -p 0.43009 0 0.107523 -p 0.43009 0 0.107523 -p 0.43009 0 0.107523 -p 0.43009 0 0.215045 -p 0.43009 0 0.215045 -p 0.43009 0 0.215045 -p 0.514016 0 0 -p 0.514016 0 0 -p 0.514016 0 0 -p 0.43009 0 -0.215045 -p 0.43009 0 -0.215045 -p 0.43009 0 -0.215045 -p 0.43009 0 -0.215045 -p 0.43009 0 -0.107523 -p 0.43009 0 -0.107523 -p 0.43009 0 -0.107523 -p 0.34749 0 -0.107523 -p 0.34749 0 -0.107523 -p 0.34749 0 -0.107523 -p 0.330753 0 -0.16432 -p 0.268043 0 -0.270089 -p 0.161744 0 -0.33227 -p 0.103842 0 -0.349651 -p 0.103842 0 -0.349651 -p 0.103842 0 -0.349651 -p 0.107523 0 -0.43009 -p 0.107523 0 -0.43009 -p 0.107523 0 -0.43009 -p 0.215045 0 -0.43009 -p 0.215045 0 -0.43009 -p 0.215045 0 -0.43009 -p 0 0 -0.514016 -p 0 0 -0.514016 -p 0 0 -0.514016 -p -0.215045 0 -0.43009 -p -0.215045 0 -0.43009 -p -0.215045 0 -0.43009 -p -0.215045 0 -0.43009 -p -0.107523 0 -0.43009 -p -0.107523 0 -0.43009 -p -0.107523 0 -0.43009 -p -0.107523 0 -0.43009 -p -0.106926 0 -0.348711 -p -0.106926 0 -0.348711 -p -0.106926 0 -0.348711 -p -0.106926 0 -0.348711 -p -0.163653 0 -0.331148 -p -0.268767 0 -0.269043 -p -0.330943 0 -0.163999 -p -0.348078 0 -0.107523 -p -0.348078 0 -0.107523 -p -0.348078 0 -0.107523 -p -0.348078 0 -0.107523 -p -0.43009 0 -0.107523 -p -0.43009 0 -0.107523 -p -0.43009 0 -0.107523 -p -0.43009 0 -0.215045 -p -0.43009 0 -0.215045 -p -0.43009 0 -0.215045 -p -0.43009 0 -0.215045 -p -0.514016 0 0 -p -0.514016 0 0 -p -0.514016 0 0 -p -0.514016 0 0 -p -0.43009 0 0.215045 -p -0.43009 0 0.215045 -p -0.43009 0 0.215045 -p -0.43009 0 0.215045 -p -0.43009 0 0.107523 -p -0.43009 0 0.107523 -p -0.43009 0 0.107523 -p -0.43009 0 0.107523 -p -0.347279 0 0.107523 -p -0.347279 0 0.107523 -p -0.347279 0 0.107523 -p -0.347279 0 0.107523 -p -0.331036 0 0.163843 -p -0.269226 0 0.268353 -p -0.164939 0 0.330385 -p -0.109006 0 0.348061 -p -0.109006 0 0.348061 -p -0.109006 0 0.348061 -p -0.109006 0 0.348061 -p -0.107523 0 0.43009 -p -0.107523 0 0.43009 -p -0.107523 0 0.43009 -p -0.107523 0 0.43009 -p -0.215045 0 0.43009 -p -0.215045 0 0.43009 -p -0.215045 0 0.43009 -p 0 0 0.514016 -p 0 0 0.514016 -p 0 0 0.514016 -p 0 0 0.514016;')
self.CON_name = 'root'
# hexagon
elif n == '12':
mel.eval('curve -d 1 -p -0.257187 0 0.445461 -p 0.257187 0 0.445461 -p 0.514375 0 2.51218e-07 -p 0.257187 0 -0.445461 -p -0.257187 0 -0.445461 -p -0.514375 0 1.69509e-07 -p -0.257187 0 0.445461;')
self.CON_name = 'hexagon'
# square
elif n == '13':
mel.eval('curve -d 1 -p -0.5 0 0.5 -p 0.5 0 0.5 -p 0.5 0 -0.5 -p -0.5 0 -0.5 -p -0.5 0 0.5;')
self.CON_name = 'square'
# pyramid
elif n == '14':
mel.eval('curve -d 1 -p 0 0.405941 -0.405941 -p 0 0.405941 0.405941 -p 0 -0.405941 0.405941 -p 2.029703 0 0 -p 0 0.405941 0.405941 -p 0 0.405941 -0.405941 -p 2.029703 0 0 -p 0 -0.405941 -0.405941 -p 0 0.405941 -0.405941 -p 0 0.405941 0.405941 -p 0 -0.405941 0.405941 -p 0 -0.405941 -0.405941 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 ;')
self.CON_name = 'pyramid'
# boxPin
elif n == '15':
mel.eval('curve -d 1 -p 0 0.521014 0 -p 0 0.370427 0.150588 -p 0 0.370427 0.301176 -p 0 -0.231927 0.301176 -p 0 -0.231927 -0.301176 -p 0 0.370427 -0.301176 -p 0 0.370427 -0.150588 -p 0 0.521014 0 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 ;')
self.CON_name = 'boxPin'
# pin
elif n == '16':
mel.eval('curve -d 1 -p 0 -0.402236 0 -p 0 0.330789 0 -p 0 0.351072 0.128059 -p 0 0.409934 0.243582 -p 0 0.501614 0.335262 -p 0 0.617137 0.394124 -p 0 0.745196 0.414406 -p 0 0.873254 0.394124 -p 0 0.988778 0.335262 -p 0 1.080458 0.243582 -p 0 1.13932 0.128059 -p 0 1.159602 0 -p 0 1.13932 -0.128059 -p 0 1.080458 -0.243582 -p 0 0.988778 -0.335262 -p 0 0.873255 -0.394124 -p 0 0.745196 -0.414406 -p 0 0.617137 -0.394124 -p 0 0.501614 -0.335262 -p 0 0.409934 -0.243582 -p 0 0.351072 -0.128059 -p 0 0.330789 0 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 -k 13 -k 14 -k 15 -k 16 -k 17 -k 18 -k 19 -k 20 -k 21 ;')
self.CON_name = 'pin'
self.ui.curveClear_BTN.setText(self.CON_name)
# elif i == '17':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '18':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '19':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '20':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '21':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '22':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# elif i == '23':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# pyramid
# elif n == '24':
# cmds.curve( d=1 , p =[(5, 0, 0),( 0, 1, -1 ),( 0, 1, 1 ),( 5 ,0, 0 ),( 0 ,-1, 1 ),( 0, 1, 1 ),( 0, 1, -1 ),( 0, -1, -1 ),( 0, -1, 1 ),( 0, -1, -1 ),( 5, 0, 0 )], k = [0,1,2,3,4,5,6,7,8,9,10] )
# # half arrow
# elif n == '25':
# cmds.curve( d=1 , p =[(0, 0, 0 ),( 0, -1, 0 ),( 0.35, -0.4, 0 ),( 0, -0.4, 0 )], k = [0,1,2,3] )
# # fat arrow
# elif n == '26':
# cmds.curve( d=1 , p =[(1.5 ,0, 0),( 0.9, 0.3, 0),( 0.9 ,0.15, 0 ),( 0 ,0.15, 0 ),( 0 ,-0.15, 0 ),( 0.9 ,-0.15, 0 ),( 0.9, -0.3, 0 ),( 1.5, 0, 0)], k = [0,1,2,3,4,5,6,7] )
# # circle2
# elif n == '27':
# cmds.circle(nr=(1,0,0), sw=360, r=1, d=3 , ut=0, s=8, ch=1)
# # box2
# elif n == '28':
# cmds.curve( d=1, p =[(-1,-1,-1),(-1,-1,1),(1,-1,1),(1,-1,-1),(-1,-1,-1),(-1,1,-1),(-1,1,1),(-1,-1,1),(1,-1,1),(1,1,1),(-1,1,1),(-1,1,-1),(1,1,-1),(1,1,1),(1,-1,1),(1,-1,-1),(1,1,-1)], k =[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] )
def curveClear(self):
self.CON_name = 'Controller Maker'
del self.temp_CON[0:len(self.temp_CON)]
self.ui.curveClear_BTN.setText(self.CON_name)
def setFilename(self):
self.JsonName = str(self.ui.fileName_LIE.text())
return self.JsonName
def setMayaFolder(self):
# #cmds.workspace (dir=startFolder)
self.mayaFolder = cmds.fileDialog2(fm=3, caption = "Set")[0]
# print self.mayaFolder
# # When you press cancel or close, mayaFolder would be None and not running this code.
if self.mayaFolder:
#setPath = self.browseForFolderCallback(mayaFolder[0])
self.ui.expConPath_LIE.setText( self.mayaFolder )
def makeCurveDic(self):
self.curveDic = {}
selCurveShape = cmds.ls(sl=True, dag=True, ni=True, type='nurbsCurve')
cvCountList = []
for i in range(len(selCurveShape)):
if cmds.getAttr('%s.f' %selCurveShape[i])==2:
cvCount = cmds.getAttr( '%s.spans' %selCurveShape[i])
else:
cvCount = cmds.getAttr( '%s.spans' %selCurveShape[i]) + cmds.getAttr( '%s.degree' % selCurveShape[i] )
cvCountList.append( cvCount )
count = 0
while count < len( cvCountList ):
self.curveDic[selCurveShape[count]] = {}
for cvNum in range( cvCountList[count]):
cvPosition = cmds.pointPosition( '%s.cv[%s]' %( selCurveShape[count], cvNum ) ,l=True)
self.curveDic[selCurveShape[count]][cvNum] = cvPosition
count = count + 1
def exportCON_JSON(self):
self.setFilename()
self.makeCurveDic()
filePath = self.mayaFolder +'/'+ self.JsonName +'.json'
# write
F = open( filePath, 'w' )
F.write(json.dumps( self.curveDic, indent = 4 ))
F.close()
QtWidgets.QMessageBox.information(self, "Done", 'Your curves exported here. "%s"' %filePath,
QtWidgets.QMessageBox.Ok)
def importCON_JSON(self):
# self.mayaFolder2 = cmds.fileDialog2(fm=3, caption = "Set")[0]
# filePath = self.mayaFolder + self.JsonName +'.json'
basicFilter = "*.json"
filePath = cmds.fileDialog2(ff=basicFilter, dialogStyle=1,fm=1,rf=True)
# load
F = open( str(filePath[0]) )
self.loadedData = json.load( F )
F.close()
jsonKeys = self.loadedData.keys()
for i in jsonKeys:
curveShapeKeys = self.loadedData[i].keys()
for j in curveShapeKeys:
controlPoints = self.loadedData[i][j]
cmds.setAttr("%s.controlPoints[%s].xValue" %(i,j), controlPoints[0])
cmds.setAttr("%s.controlPoints[%s].yValue" %(i,j), controlPoints[1])
cmds.setAttr("%s.controlPoints[%s].zValue" %(i,j), controlPoints[2])
#------------------------------------------------------------------------------------------------------------------------
def OPEN():
global Window
try:
Window.close()
Window.deleteLater()
except: pass
Window = uiMainWindow()
Window.ui.show()
| [
"56536931+mappp7@users.noreply.github.com"
] | 56536931+mappp7@users.noreply.github.com |
ec3279a0d583a81c3f3babb1c9cf24cf74075378 | 2e4023d59718d87e1940b27ada9155a9a47a7668 | /tests/serialization/serializers_test.py | 78ee84a4e45f73535abd4bd8f1ecd15917121351 | [
"Apache-2.0"
] | permissive | olukas/hazelcast-python-client | c71038a22b73de894320d641dbf617509049c63d | 63bcbaaef0bf755e4e94e8e536d19d964e02144a | refs/heads/master | 2020-03-20T21:27:02.460282 | 2018-06-18T11:50:39 | 2018-06-19T12:02:37 | 137,741,377 | 0 | 0 | null | 2018-06-18T11:02:55 | 2018-06-18T11:02:55 | null | UTF-8 | Python | false | false | 3,526 | py | import binascii
from hzrc.ttypes import Lang
from hazelcast.config import SerializationConfig, INTEGER_TYPE
from hazelcast.serialization.data import Data
from hazelcast.serialization.serialization_const import CONSTANT_TYPE_DOUBLE
from hazelcast.serialization.service import SerializationServiceV1
from tests.base import SingleMemberTestCase
class SerializersTestCase(SingleMemberTestCase):
def setUp(self):
config = SerializationConfig()
config.default_integer_type = INTEGER_TYPE.BIG_INT
self.service = SerializationServiceV1(serialization_config=config)
def tearDown(self):
self.service.destroy()
def test_none_serializer(self):
none = None
data_n = self.service.to_data(none)
self.assertIsNone(data_n)
self.assertIsNone(self.service.to_object(Data()))
def test_boolean_serializer(self):
true = True
false = False
data_t = self.service.to_data(true)
data_f = self.service.to_data(false)
obj_t = self.service.to_object(data_t)
obj_f = self.service.to_object(data_f)
self.assertEqual(true, obj_t)
self.assertEqual(false, obj_f)
def test_char_type_serializer(self):
buff = bytearray(binascii.unhexlify("00000000fffffffb00e7"))
data = Data(buff)
obj = self.service.to_object(data)
self.assertEqual(unichr(0x00e7), obj)
def test_float(self):
buff = bytearray(binascii.unhexlify("00000000fffffff700000000"))
data = Data(buff)
obj = self.service.to_object(data)
self.assertEqual(0.0, obj)
def test_double(self):
double = 1.0
data = self.service.to_data(double)
obj = self.service.to_object(data)
self.assertEqual(data.get_type(), CONSTANT_TYPE_DOUBLE)
self.assertEqual(double, obj)
def test_datetime(self):
year = 2000
month = 11
day = 15
hour = 23
minute = 59
second = 49
script = """
from java.util import Date, Calendar
cal = Calendar.getInstance()
cal.set({}, ({}-1), {}, {}, {}, {})
result=instance_0.getSerializationService().toBytes(cal.getTime())
""".format(year, month, day, hour, minute, second)
response = self.rc.executeOnController(self.cluster.id, script, Lang.PYTHON)
data = Data(response.result)
val = self.service.to_object(data)
self.assertEqual(year, val.year)
self.assertEqual(month, val.month)
self.assertEqual(day, val.day)
self.assertEqual(hour, val.hour)
self.assertEqual(minute, val.minute)
self.assertEqual(second, val.second)
def test_big_int_small(self):
self._big_int_test(12)
def test_big_int_small_neg(self):
self._big_int_test(-13)
def test_big_int(self):
self._big_int_test(1234567890123456789012345678901234567890)
def test_big_int_neg(self):
self._big_int_test(-1234567890123456789012345678901234567890)
def _big_int_test(self, big_int):
script = """from java.math import BigInteger
result=instance_0.getSerializationService().toBytes(BigInteger("{}",10))""".format(big_int)
response = self.rc.executeOnController(self.cluster.id, script, Lang.PYTHON)
data = Data(response.result)
val = self.service.to_object(data)
data_local = self.service.to_data(big_int)
self.assertEqual(binascii.hexlify(data._buffer), binascii.hexlify(data_local._buffer))
self.assertEqual(big_int, val)
| [
"arslanasim@gmail.com"
] | arslanasim@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.