blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2
values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684
values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22
values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147
values | src_encoding stringclasses 25
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 128 12.7k | extension stringclasses 142
values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7cd7280a8fe0e76ea694356b8e664387c4b0dd8 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/edcps/apis/DescribeInstanceRaidRequest.py | ffe9791f476342b0cfcad38c975a790eaf25c446 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,552 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeInstanceRaidRequest(JDCloudRequest):
"""
查询单个分布式云物理服务器已安装的RAID信息,包括系统盘RAID信息和数据盘RAID信息
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeInstanceRaidRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:describeInstanceRaid', 'GET', header, version)
self.parameters = parameters
class DescribeInstanceRaidParameters(object):
def __init__(self, regionId, instanceId, ):
"""
:param regionId: 地域ID,可调用接口(describeEdCPSRegions)获取分布式云物理服务器支持的地域
:param instanceId: 分布式云物理服务器ID
"""
self.regionId = regionId
self.instanceId = instanceId
| [
"tancong@jd.com"
] | tancong@jd.com |
b64a06b58deef6f2abcc090b1cd40042604dfc0f | e315715504e54e2c4af04abec0e179d233a003de | /example.py | 67dd869c62199db4639d2f4c85dd184b6b038086 | [
"MIT"
] | permissive | CaliDog/tachikoma | d80ce280c33a6bf39644fb7eb6bc3af0186b4db6 | 3955ff97c14ba9747c42324cb6a2955419c6e71e | refs/heads/master | 2021-09-22T09:49:54.030468 | 2018-09-07T16:38:03 | 2018-09-07T16:38:03 | 108,040,909 | 21 | 3 | null | 2018-09-07T16:10:15 | 2017-10-23T21:20:50 | Python | UTF-8 | Python | false | false | 511 | py | import tachikoma
from tachikoma import analyzers
from tachikoma import generators
from tachikoma import emitters
pipeline = tachikoma.Pipeline(
generators={
"slack": generators.SlackGenerator(),
"aws.iam": generators.AWSACMGenerator(),
"aws.acm": generators.AWSIAMGenerator(),
},
analyzers={
"aws.*": analyzers.AllAWSAnalyzer(),
"slack": analyzers.SlackAnalyzer()
},
emitters={
"aws.*": emitters.SlackEmitter()
},
)
pipeline.execute()
| [
"fitblip@gmail.com"
] | fitblip@gmail.com |
e6b701c4e85ace8f8f42cbd77905813b1d824f87 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/insights/v20191017preview/get_private_link_scoped_resource.py | 0294eb716232b389cedb14f651bdf588d455d348 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,981 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetPrivateLinkScopedResourceResult',
'AwaitableGetPrivateLinkScopedResourceResult',
'get_private_link_scoped_resource',
]
@pulumi.output_type
class GetPrivateLinkScopedResourceResult:
"""
A private link scoped resource
"""
def __init__(__self__, linked_resource_id=None, name=None, provisioning_state=None, type=None):
if linked_resource_id and not isinstance(linked_resource_id, str):
raise TypeError("Expected argument 'linked_resource_id' to be a str")
pulumi.set(__self__, "linked_resource_id", linked_resource_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="linkedResourceId")
def linked_resource_id(self) -> Optional[str]:
"""
The resource id of the scoped Azure monitor resource.
"""
return pulumi.get(self, "linked_resource_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkScopedResourceResult(GetPrivateLinkScopedResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkScopedResourceResult(
linked_resource_id=self.linked_resource_id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_link_scoped_resource(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkScopedResourceResult:
"""
Use this data source to access information about an existing resource.
:param str name: The name of the scoped resource object.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Monitor PrivateLinkScope resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:insights/v20191017preview:getPrivateLinkScopedResource', __args__, opts=opts, typ=GetPrivateLinkScopedResourceResult).value
return AwaitableGetPrivateLinkScopedResourceResult(
linked_resource_id=__ret__.linked_resource_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
832eeb22d69bb86c317491a4cdcc67e51034ff48 | f40e5c91a18fc5c7e0b4d96fe964a493f924e958 | /supervised_learning/0x00-binary_classification/15-neural_network.py | 8741dca8460da62b04755c3d6e890d729cf38caa | [] | no_license | jgadelugo/holbertonschool-machine_learning | ab46f71477998371ca5e3623455d61fe334ab221 | e20b284d5f1841952104d7d9a0274cff80eb304d | refs/heads/master | 2023-02-01T03:52:43.723569 | 2020-12-10T19:28:57 | 2020-12-10T19:28:57 | 256,043,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,674 | py | #!/usr/bin/env python3
"""
Class defines a single neuron performing binary classification
"""
import numpy as np
import matplotlib.pyplot as plt
class NeuralNetwork:
""" class neuron"""
def __init__(self, nx, nodes):
""" initialize """
# nx is the number of input features
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
elif nx < 1:
raise ValueError("nx must be a positive integer")
# is the number of nodes found in the hidden layer
if not isinstance(nodes, int):
raise TypeError("nodes must be an integer")
elif nodes < 1:
raise ValueError("nodes must be a positive integer")
# Hidden layer
# weights vector
self.__W1 = np.random.normal(size=(nodes, nx))
# bias
self.__b1 = np.zeros((nodes, 1))
# activated output
self.__A1 = 0
# Output neuron
# weights vector
self.__W2 = np.random.normal(size=(1, nodes))
# bias
self.__b2 = 0
# activated output(prediction)
self.__A2 = 0
@property
def W1(self):
""" weights vector for Hidden Layer """
return self.__W1
@property
def b1(self):
""" bias for Hidden Layer """
return self.__b1
@property
def A1(self):
""" activated output for Hidden Layer """
return self.__A1
@property
def W2(self):
""" Weight for Output Neuron """
return self.__W2
@property
def b2(self):
""" Bias for Output Neuron """
return self.__b2
@property
def A2(self):
""" Activated output(prediction) for Output Neuron"""
return self.__A2
def forward_prop(self, X):
""" Calculates the forward propagation of the neural network """
val1 = self.W1 @ X + self.b1
# sigmoid formula
self.__A1 = 1/(1 + np.exp(-val1))
val2 = self.W2 @ self.A1 + self.b2
# sigmoid formula
self.__A2 = 1/(1 + np.exp(-val2))
return (self.A1, self.A2)
def cost(self, Y, A):
"""Calculates the cost of the model using logistic regression"""
m = Y.shape[1]
ct = -(1 / m) * ((Y * (np.log(A))) + ((1 - Y) * np.log(1.0000001 - A)))
return ct.sum()
def evaluate(self, X, Y):
""" Evaluates the neurons predictions """
A1, A2 = self.forward_prop(X)
prob = np.where(A2 <= 0.5, 0, 1)
return (prob, self.cost(Y, A2))
def gradient_descent(self, X, Y, A1, A2, alpha=0.05):
""" Calculates one pass of gradient descent on the neuron """
dZ2 = A2 - Y
dW2 = (dZ2 @ A1.T) / X.shape[1]
db2 = np.sum(dZ2, axis=1, keepdims=True) / X.shape[1]
dZ1 = (self.W2.T @ dZ2) * (A1 - (A1 ** 2))
dW1 = (dZ1 @ X.T) / X.shape[1]
db1 = np.sum(dZ1, axis=1, keepdims=True) / X.shape[1]
self.__b1 = self.__b1 - alpha * db1
self.__W1 = self.__W1 - alpha * dW1
self.__b2 = self.__b2 - alpha * db2
self.__W2 = self.__W2 - alpha * dW2
def train(self, X, Y, iterations=5000, alpha=0.05, verbose=True,
graph=True, step=100):
""" method to train the neuron """
if isinstance(iterations, int) is False:
raise TypeError("iterations must be an integer")
if iterations <= 0:
raise ValueError("iterations must be a positive integer")
if isinstance(alpha, float) is False:
raise TypeError("alpha must be a float")
if alpha <= 0:
raise ValueError("alpha must be positive")
if verbose or graph:
if isinstance(step, int) is False:
raise TypeError("step must be an integer")
if step < 1 or step > iterations:
raise ValueError("step must be positive and <= iterations")
count = 0
while iterations:
A1, A2 = self.forward_prop(X)
self.gradient_descent(X, Y, A1, A2, alpha)
iterations -= 1
cost = self.evaluate(X, Y)[1]
if verbose:
if count == step or count == 0 or count == iterations:
print("Cost after {} iterations: {}".format(count, cost))
if graph:
if count == step or count == 0 or count == iterations:
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title('Training Cost')
plt.plot(cost, 'b')
plt.show()
count += 1
iterations -= 1
return self.evaluate(X, Y)
| [
"alvarezdelugo.jose@gmail.com"
] | alvarezdelugo.jose@gmail.com |
fc39807e7990bb84b9dd55a1058e1b467a921a81 | 8b22963b0ac1581249552ed9f61e6730b0d1898f | /src/encoded/commands/dev_servers.py | e2a122a2d970812b135f6b3e7e7db1bfc68835d9 | [
"MIT"
] | permissive | brianleesc/encoded | a070e07f59c59a19220908fc7f8f71db015a1c73 | 0d2961f04cf542f78e6f29c9a08da1b2913782c5 | refs/heads/master | 2021-01-17T12:11:10.578814 | 2015-02-06T21:38:13 | 2015-02-06T21:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,334 | py | """\
Examples
For the development.ini you must supply the paster app name:
%(prog)s development.ini --app-name app --init --clear
"""
from pyramid.paster import get_app
import atexit
import logging
import os.path
import select
import shutil
import sys
EPILOG = __doc__
logger = logging.getLogger(__name__)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Run development servers", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--clear', action="store_true", help="Clear existing data")
parser.add_argument('--init', action="store_true", help="Init database")
parser.add_argument('--load', action="store_true", help="Load test set")
parser.add_argument('--datadir', default='/tmp/encoded', help="path to datadir")
args = parser.parse_args()
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
from encoded.tests import elasticsearch_fixture, postgresql_fixture
from encoded.commands import create_mapping
datadir = os.path.abspath(args.datadir)
pgdata = os.path.join(datadir, 'pgdata')
esdata = os.path.join(datadir, 'esdata')
if args.clear:
for dirname in [pgdata, esdata]:
if os.path.exists(dirname):
shutil.rmtree(dirname)
if args.init:
postgresql_fixture.initdb(pgdata, echo=True)
postgres = postgresql_fixture.server_process(pgdata, echo=True)
elasticsearch = elasticsearch_fixture.server_process(esdata, echo=True)
processes = [postgres, elasticsearch]
@atexit.register
def cleanup_process():
for process in processes:
if process.poll() is None:
process.terminate()
for process in processes:
try:
for line in process.stdout:
sys.stdout.write(line)
except IOError:
pass
process.wait()
if args.init:
app = get_app(args.config_uri, args.app_name)
create_mapping.run(app)
if args.load:
from webtest import TestApp
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = TestApp(app, environ)
from encoded.loadxl import load_all
from pkg_resources import resource_filename
inserts = resource_filename('encoded', 'tests/data/inserts/')
docsdir = [resource_filename('encoded', 'tests/data/documents/')]
load_all(testapp, inserts, docsdir)
print('Started. ^C to exit.')
stdouts = [p.stdout for p in processes]
# Ugly should probably use threads instead
while True:
readable, writable, err = select.select(stdouts, [], stdouts, 5)
for stdout in readable:
for line in iter(stdout.readline, ''):
sys.stdout.write(line)
if err:
for stdout in err:
for line in iter(stdout.readline, ''):
sys.stdout.write(line)
break
if __name__ == '__main__':
main()
| [
"laurence@lrowe.co.uk"
] | laurence@lrowe.co.uk |
eab02dfd11097c80f771656034ab7171b20fe987 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/196/usersdata/264/76878/submittedfiles/atividade.py | 3740bbe7d78d83dbfaf5e714b13d553895fe31d1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # -*- coding: utf-8 -*-
import math
n= int(input('Digite o valor de n: '))
a=0
s=0
i=1
numerador=1
if (n<0):
n= n*(-1)
else:
n=n
while (i<=n):
s= (numerador)/(n+a)+s
i=i+1
a=a-1
numerador= numerador + 1
print ('%.5f' %s) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
aaaa1b695c26d13e55cfc7fffabe7e8d8b5ce84f | 50f04c633f36e9d64c40c4f1b434ed0c24e447c7 | /tkinter-examples/left_tab.py | 76d9d1583711f1ae8196f13e7fddc2e109f2d34b | [] | no_license | sarahchou/python-practice | 883ba7dedd60b2cc18d5d73ef7d3cbb74f09dede | 2a3d10144b74460d8ec513e3c7d49bdb48107596 | refs/heads/master | 2022-11-11T10:06:12.944579 | 2018-06-11T22:14:06 | 2018-06-11T22:14:06 | 136,985,077 | 0 | 1 | null | 2022-10-20T08:48:36 | 2018-06-11T21:54:46 | Python | UTF-8 | Python | false | false | 1,620 | py | import tkinter as tk
from tkinter import ttk
class TFEnvSelectionScreen(tk.Frame):
"""
This is where we give the user the chance to select their Terraform environment.
Uses a drop down menu for environment selection.
"""
def __init__(self, parent, controller):
root = tk.Tk()
style = ttk.Style(root)
style.configure('lefttab.TNotebook', tabposition='wn')
notebook = ttk.Notebook(root, style='lefttab.TNotebook')
f1 = tk.Frame(notebook, bg="red", width=200, height=200)
f1.__init__(self, parent)
env_options = ['Dev', 'Stage', 'Prod']
select_type_label = tk.Label(f1, text='Select Terraform Environment:')
select_type_label.grid(row=0,sticky='w')
env_var = tk.StringVar(self)
env_menu = ttk.Combobox(f1,textvariable=env_var, values=env_options)
env_menu.grid(row=0, column=1)
env_menu.current(1)
def get_env():
print("Environment selected is: " + env_var.get())
continue_button = tk.Button(f1, text='Continue', command=get_env)
continue_button.grid(row=3, column=0, padx=10,sticky='w')
continue_button.config(width=10,fg='DodgerBlue3')
cancel_button = tk.Button(f1, text='Cancel', command=self.quit())
cancel_button.grid(row=3, column=1, padx=10, sticky='e')
cancel_button.config(width=10)
env_var.set('')
f2 = tk.Frame(notebook, bg="blue", width=200, height=200)
notebook.add(f1, text="Environment")
notebook.add(f2, text="Components")
notebook.pack()
root.mainloop() | [
"chou.s@husky.neu.edu"
] | chou.s@husky.neu.edu |
1591790717be37542d2bbec2c3978a7a11fec374 | 732c0303ecfe8e915548846144e2a257d0ba0bd0 | /prob139.py | dea51619317d90ea968cedd82d869736ac4078c6 | [] | no_license | mercurium/proj_euler | e2c041d833b80369f0e7b7aa493a9ff5c1e22d91 | a8326af80cac040fa515350cf9972dca6f116f82 | refs/heads/master | 2020-04-06T05:24:31.185785 | 2017-05-06T23:50:56 | 2017-05-06T23:50:56 | 9,540,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | import time
START = time.time()
SIZE = 10**8
pythagTriplet = set()
count = 0
for m in xrange(1,SIZE):
if 2*m*(m+1) > SIZE:
break
diff = 2 if m%2 == 0 else 1
nLim = SIZE/(2*m) - m
for n in xrange(1,min(m,nLim+1),diff):
d,f = m*m,n*n
a,b,c = d-f,2*m*n,d+f
if a+b+c >= SIZE:
break
if a > b:
a,b = b,a
if c %(b-a) == 0:
for k in xrange(1,SIZE/(2*(d+f))+1):
pythagTriplet.add((a*k,b*k,c*k))
print len(pythagTriplet)
print "Time Taken:", time.time()-START
"""
~/Desktop/python_projects/proj_euler $python prob139.py
10057761
Time Taken: 253.662650108 (slow, naive method)
Time Taken: 26.9965119362 (reordered the loops)
Method of attack: a = m^2-n^2,b = 2mn, c = m^2 +n^2
So we know that since we can tile the square, we have (b-a)|c.
After this, we only need to check the k's when we have a valid equation... :x
"""
| [
"jerrychen434@gmail.com"
] | jerrychen434@gmail.com |
ca64a4a361b84710defb666ee771b4f6d6ebac6a | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/accesscontextmanager/v1alpha/resources.py | a27052377017c6cf020e34d96072355a779e411c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://accesscontextmanager.googleapis.com/v1alpha/'
DOCS_URL = 'https://cloud.google.com/access-context-manager/docs/reference/rest/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
ACCESSPOLICIES = (
'accessPolicies',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}',
},
[u'name']
)
ACCESSPOLICIES_ACCESSLEVELS = (
'accessPolicies.accessLevels',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}/accessLevels/'
'{accessLevelsId}',
},
[u'name']
)
ACCESSPOLICIES_ACCESSZONES = (
'accessPolicies.accessZones',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}/accessZones/{accessZonesId}',
},
[u'name']
)
OPERATIONS = (
'operations',
'{+name}',
{
'':
'operations/{operationsId}',
},
[u'name']
)
def __init__(self, collection_name, path, flat_paths, params):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
781b936bd6b7a6d6bca1a2cd172a259c68c9c05d | 368b89707805e8ac6d5baf26a11b280369995ca5 | /src/server/db/__init__.py | a67a6f3951ae05d2b8532be1e5c10e3a9612a4c5 | [] | no_license | cheng93/fof | 2940d739c1399c88db06b5c99e8075e918fbf060 | 0115a6d3f6775c5c1c8a30cfd7f6f7f9006e75fe | refs/heads/develop | 2020-03-19T06:55:20.274365 | 2018-07-25T22:14:11 | 2018-07-25T22:14:11 | 136,067,138 | 0 | 0 | null | 2018-07-25T22:14:12 | 2018-06-04T18:33:35 | Python | UTF-8 | Python | false | false | 489 | py | from db.conference import conference
from db.division import division
from db.draft import draft
from db.game import game
from db.player import player
from db.player_history import player_history
from db.position import position
from db.team import team
from db.staff import staff
from db.staff_group import staff_group
from db.staff_history import staff_history
from db.staff_role import staff_role
from db.stage import stage
from db.stage_type import stage_type
from db.year import year
| [
"derek.c@hotmail.co.uk"
] | derek.c@hotmail.co.uk |
c749d8f7dbf564892ee366d7c3c6f7047ba94386 | 03dad0fb0d76b61524ab172e342e3e4ec22614d7 | /blender/arm/assets.py | cad343ada758334d3d277a06a79765bf10c75dff | [
"GPL-2.0-only",
"Zlib"
] | permissive | ceostevenjrogers/armory | 1739f1ddec20d7c720baaa7fd4952d14872c375a | c50a086d244dc2acac102ba91cb33e4b47bf40be | refs/heads/master | 2020-04-09T11:17:06.347466 | 2018-12-02T15:45:43 | 2018-12-02T15:45:43 | 160,304,146 | 1 | 0 | Zlib | 2018-12-04T05:33:45 | 2018-12-04T05:33:44 | null | UTF-8 | Python | false | false | 5,194 | py | import shutil
import os
import stat
import bpy
import arm.utils
assets = []
reserved_names = ['return.']
khafile_defs = []
khafile_defs_last = []
embedded_data = []
shaders = []
shaders_last = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_passes_assets = {}
shader_cons = {}
def reset():
global assets
global khafile_defs
global khafile_defs_last
global embedded_data
global shaders
global shaders_last
global shaders_external
global shader_datas
global shader_passes
global shader_cons
assets = []
khafile_defs_last = khafile_defs
khafile_defs = []
embedded_data = []
shaders_last = shaders
shaders = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_cons = {}
shader_cons['mesh_vert'] = []
shader_cons['depth_vert'] = []
shader_cons['depth_frag'] = []
shader_cons['voxel_vert'] = []
shader_cons['voxel_frag'] = []
shader_cons['voxel_geom'] = []
def add(file):
global assets
if file in assets:
return
base = os.path.basename(file)
for f in assets:
if f.endswith(base):
print('Armory Warning: Asset name "{0}" already exists, skipping'.format(base))
return
assets.append(file)
# Reserved file name
for f in reserved_names:
if f in file:
print('Armory Warning: File "{0}" contains reserved keyword, this will break C++ builds!'.format(file))
def add_khafile_def(d):
global khafile_defs
if d not in khafile_defs:
khafile_defs.append(d)
def add_embedded_data(file):
global embedded_data
if file not in embedded_data:
embedded_data.append(file)
def add_shader(file):
global shaders
global shaders_last
if file not in shaders:
shaders.append(file)
def add_shader_data(file):
global shader_datas
if file not in shader_datas:
shader_datas.append(file)
def add_shader_pass(data_name):
global shader_passes
# Shader data for passes are written into single shader_datas.arm file
add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')
if data_name not in shader_passes:
shader_passes.append(data_name)
def add_shader_external(file):
global shaders_external
shaders_external.append(file)
name = file.split('/')[-1].split('\\')[-1]
add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)
invalidate_enabled = True # Disable invalidating during build process
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def invalidate_shader_cache(self, context):
# compiled.inc changed, recompile all shaders next time
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Shaders'):
shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/html5-resources'):
shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/krom-resources'):
shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/krom-resources'):
shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/windows-resources'):
shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/linux-resources'):
shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/osx-resources'):
shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)
def invalidate_compiled_data(self, context):
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled'):
shutil.rmtree(fp + '/compiled', onerror=remove_readonly)
def invalidate_mesh_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/meshes'):
shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)
def invalidate_envmap_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/envmaps'):
shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)
def invalidate_unpacked_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/unpacked'):
shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)
def shader_equal(sh, ar, shtype):
# Merge equal shaders
for e in ar:
if sh.is_equal(e):
sh.context.data[shtype] = e.context.data[shtype]
sh.is_linked = True
return
ar.append(sh)
def vs_equal(c, ar):
shader_equal(c.vert, ar, 'vertex_shader')
def fs_equal(c, ar):
shader_equal(c.frag, ar, 'fragment_shader')
def gs_equal(c, ar):
shader_equal(c.geom, ar, 'geometry_shader')
def tcs_equal(c, ar):
shader_equal(c.tesc, ar, 'tesscontrol_shader')
def tes_equal(c, ar):
shader_equal(c.tese, ar, 'tesseval_shader')
| [
"lubos.lenco@gmail.com"
] | lubos.lenco@gmail.com |
b7461bb69fa37381ee883fe28d2995c0f9a596d9 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_check_spline4.py | 594c19bac098d7a9a6471e37e50d007ad70eca84 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from xcp2k.inputsection import InputSection
from _each309 import _each309
class _check_spline4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each309()
self._name = "CHECK_SPLINE"
self._keywords = {'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Log_print_key': 'LOG_PRINT_KEY', 'Add_last': 'ADD_LAST', 'Filename': 'FILENAME'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
149c01d58071fd26c34db132b17aa214201a0652 | 3bcb4cba2caa77acf7e418455c29074d06553f7e | /scripts/files-touched-check.py | dc8077d357f74af375dae10523da758a621226c1 | [] | no_license | eleccoin/gitian.sigs | 7c02be84ee2dbf05334b1863c05e0f860ee5f0d8 | 056e34324642b28659d7b47832115bd3358b17fa | refs/heads/master | 2021-07-15T23:36:46.830658 | 2021-02-11T11:18:58 | 2021-02-11T11:18:58 | 237,335,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import sys
import subprocess
travis_commit_range = os.getenv('TRAVIS_COMMIT_RANGE')
if not travis_commit_range:
print("Travis commit range is empty, exiting...")
sys.exit(1)
try:
result = subprocess.check_output(['git', 'diff', '--no-commit-id', '--name-status', '-r', travis_commit_range])
except Exception as e:
print(e.output)
raise e
files_added = result.decode('utf-8').splitlines()
print(files_added)
subdir_name = ""
for file_added in files_added:
file_added = file_added.split(maxsplit=1)
# Exclude certain files from some checks
excluded_files = ['README.md', '.travis.yml', '.gitattributes', 'scripts/extract-sig.py', 'scripts/files-touched-check.py']
if file_added[1] in excluded_files:
print("Warning: modified non-gitian file", file_added[1])
continue
# Fail if file isn't a gitian file
if not file_added[1].endswith(".assert") and not file_added[1].endswith(".assert.sig"):
print("Error: file type is not valid:", file_added[1])
sys.exit(1)
# Check that files are only added, not modified or deleted
if file_added[0] != 'A':
print("Error: modified or removed existing file:", file_added[1])
sys.exit(1)
# Check that files added are only added to a single subdirectory name
if file_added[1].count('/') >= 1:
directories = file_added[1].split('/')
current_subdir = directories[1]
if not subdir_name:
subdir_name = current_subdir
if subdir_name != current_subdir:
print("Error: files added to multiple subdirectories. Already seen", subdir_name, "got", file_added[1])
sys.exit(1)
# Check if directory depth is accurate
if len(directories) != 3:
print("Error: Directory depth is not 3")
sys.exit(1)
# Check if directory structures match excepcted
if not directories[0].endswith(('-linux', '-osx-signed', '-osx-unsigned', '-win-signed', '-win-unsigned')):
print("Error: top directory name is not valid:", directories[0])
sys.exit(1)
else:
print("Error: unhandled file in pull request:", file_added[1])
sys.exit(1)
sys.exit(0)
| [
"unify@eleccoin.org"
] | unify@eleccoin.org |
5ea95e780de03641bc3fa55a4a4d96545eb0e332 | a1f6290c078b3d9bd004c777972ce4d5bc8af749 | /IVote/.history/app_20211026134219.py | 716bafce93fc77cff60084c9051fa852e2da6c98 | [] | no_license | CS699-IITB-Autumn-2021/project-alpha_team | 2803b99b49dcfe6f1acdcdf768791d58e0441d05 | d3a7105d6d0d702d4b31a80a331b3772a03f2428 | refs/heads/master | 2023-08-19T17:32:01.401161 | 2021-10-27T19:14:08 | 2021-10-27T19:14:08 | 413,135,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,419 | py | from posixpath import lexists
import sqlite3
import os.path
from flask import Flask, render_template, request,redirect,session
from flask.helpers import url_for
from datetime import date
from datetime import datetime
from pathlib import Path
from werkzeug.utils import redirect
from generateResult import generateResults
app = Flask(__name__)
app.secret_key="ivote"
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Voters(name TEXT,email TEXT,cardno TEXT,password TEXT,voted TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS admin(email TEXT,password TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS election(electionid INTEGER,topic TEXT,startdate TEXT,enddate TEXT,numcand INTEGER,candidate TEXT,ended Text)")
c.execute("CREATE TABLE IF NOT EXISTS candidate(name TEXT,electionid INTEGER,candidateid TEXT,age INTEGER,mobno INTEGER,email TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS result(election_id Text,cand_id Text, noofvotes Number)")
c.execute("SELECT electionid FROM election")
r = c.fetchall()
for i in r:
fle = Path("static/blockchain/"+str(i[0])+".txt")
c.execute("CREATE TABLE IF NOT EXISTS election"+str(i[0])+"(secret_code TEXT ,name_of_blockchain TEXT,voter_id TEXT,vote_given TEXT)")
fle.touch(exist_ok=True)
f = open(fle)
conn.commit()
conn.close()
@app.route('/',methods=['GET','POST'])
def login():
r = ""
if request.method=="POST":
email = request.form["email"]
password = request.form["password"]
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * FROM Voters WHERE email='"+email+"' and password='"+password+"'")
r = c.fetchall()
for i in r:
if email==i[1] and password == i[3]:
#session[]
return redirect(url_for("voter"))
return render_template('home.html')
@app.route('/signup.html',methods=['GET','POST'])
def signup():
if request.method=="POST":
name = request.form["name"]
email = request.form["email"]
cardno = request.form["id"]
password = request.form["password"]
confirm = request.form["confirm"]
if password==confirm:
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("INSERT INTO Voters VALUES('"+name+"','"+email+"','"+cardno+"','"+password+"')")
conn.commit()
conn.close()
return render_template('login.html')
return render_template('signup.html')
@app.route('/Login.html',methods=['GET','POST'])
def adminlogin():
r = ""
if request.method=="POST":
email = request.form["email"]
password = request.form["password"]
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * FROM admin WHERE email='"+email+"' and password='"+password+"'")
r = c.fetchall()
for i in r:
if email==i[0] and password == i[1]:
return redirect(url_for("admin"))
return render_template('Login.html')
@app.route('/forgotPassword.html',methods=['GET','POST'])
def forgot():
return render_template('forgotPassword.html')
@app.route('/admin.html',methods = ['GET','POST'])
def admin():
msg = None
if request.method=="POST":
id = request.form['id']
topic = request.form['topic']
start = request.form['startdate']
end = request.form['enddate']
numcand = request.form['numcand']
select = request.form['select']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r)>=1:
msg = "Election with this id already exist"
else :
c.execute("INSERT INTO election VALUES('"+id+"','"+topic+"','"+start+"','"+end+"','"+numcand+"','"+select+",'T'')")
conn.commit()
conn.close()
msg = "Election created"
return render_template('admin.html',msg = msg)
@app.route("/addcandidate.html",methods = ['GET','POST'])
def add():
if request.method=="POST":
name = request.form['name1']
id = request.form['id']
candid = request.form['candid']
age = request.form['age']
mobile = request.form['mobile']
email = request.form['email']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("INSERT INTO candidate VALUES('"+name+"','"+id+"','"+candid+"','"+age+"','"+mobile+"','"+email+"')")
conn.commit()
conn.close()
return render_template('addcandidate.html')
@app.route("/results.html",methods=['GET','POST'])
def result():
msg = None
print("Working")
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
print("Working")
return redirect(url_for("viewresults",id = id))
else:
msg = "Please enter correct ID"
return render_template('results.html',msg = msg)
@app.route("/election",methods=['GET','POST'])
def election():
id = request.form.get("id",None)
return render_template('election.html')
@app.route("/voter.html",methods=['GET','POST'])
def voter():
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
return redirect(url_for("election",id = id))
return render_template('voter.html')
@app.route("/voterresult.html")
def results():
msg = None
print("Working")
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
print("Working")
return redirect(url_for("viewresults",id = id))
else:
msg = "Please enter correct ID"
return render_template("voterresult.html",msg=msg)
@app.route("/view",methods=["GET","POST"])
def viewresults():
id = request.form.get('id',None)
print(id)
return render_template("view.html")
@app.route("/logout")
def logout():
return redirect(url_for("login "))
@app.route("/genResult.html",methods=["GET","POST"])
def genresult():
msg=""
if request.method=="POST":
GR = generateResults()
id = request.form.get('id',None)
msg=GR.genResult(id)
print(msg)
return render_template("genResult.html",msg=msg)
@app.route("/viewblockchain.html")
def viewblockchain():
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT electionid FROM election")
r = c.fetchall()
allbc=[]
for i in r:
fle = Path("static/blockchain/"+str(i[0])+".txt")
allbc.append("static/blockchain/"+str(i[0])+".txt")
fle.touch(exist_ok=True)
f = open(fle)
conn.commit()
conn.close()
return render_template('viewblockchain.html',allbc=allbc,r=r)
if __name__=="__main__":
app.run(debug=True)
| [
"ashwinpatidar609@gmail.com"
] | ashwinpatidar609@gmail.com |
626020df48e5f7b97d39391e1c4073a4c6431329 | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2S/2S-2L_wat_20Abox/set_1ns_equi.py | 312c2ffe7eec84ca46369451358d2b944b6fa62c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2S/wat_20Abox/ti_one-step/2S_2L/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2S-2L_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a1c9ce48603c67ab62bd83e7a1b5276abec33b83 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03026/s122350460.py | 02e08d83c023e291595149871e806ce417d61dfc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #D問題
import heapq
N = int(input())
AB = [[] for i in range(N)]
for i in range(N-1):
a,b = map(int,input().split())
a-=1
b-=1
AB[a].append(b)
AB[b].append(a)
C = list(map(int,input().split()))
C.sort(reverse=True)
var = [0 for i in range(N)]
var[0] = C[0]
Q = []
heapq.heappush(Q,0)
ind = 1
for i in range(N-1):
q = heapq.heappop(Q)
for j in AB[q]:
if var[j] == 0:
var[j] = C[ind]
ind+=1
heapq.heappush(Q,j)
print(sum(C)-C[0])
for v in var:
print(v,end=" ")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
910c11f006b429ae64433b34f69b78eb30c952c9 | cb0bde8ab641d5e411e91477728ade090836b729 | /sdk/python/pulumi_azure_nextgen/datashare/v20200901/list_share_subscription_source_share_synchronization_settings.py | fc3e394afb17f91b49ec35a8c117a7a0f398233d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | rchamorro/pulumi-azure-nextgen | 7debd444063f0f9810ac0ee5fe11e7e8913b4886 | 09987cba1c466657730a23f5083aa62ec3dc8247 | refs/heads/master | 2023-03-03T09:32:59.634185 | 2021-02-10T16:13:24 | 2021-02-10T16:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,670 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListShareSubscriptionSourceShareSynchronizationSettingsResult',
'AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult',
'list_share_subscription_source_share_synchronization_settings',
]
@pulumi.output_type
class ListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
List response for get source share Synchronization settings
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The Url of next result page.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.ScheduledSourceSynchronizationSettingResponseResult']:
"""
Collection of items of type DataTransferObjects.
"""
return pulumi.get(self, "value")
class AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(ListShareSubscriptionSourceShareSynchronizationSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=self.next_link,
value=self.value)
def list_share_subscription_source_share_synchronization_settings(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_subscription_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
:param str skip_token: Continuation token
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareSubscriptionName'] = share_subscription_name
__args__['skipToken'] = skip_token
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datashare/v20200901:listShareSubscriptionSourceShareSynchronizationSettings', __args__, opts=opts, typ=ListShareSubscriptionSourceShareSynchronizationSettingsResult).value
return AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| [
"noreply@github.com"
] | rchamorro.noreply@github.com |
c4bf1aaa0079951e7f08ba9f8ba36f87a8665688 | 7b1a5db0a067766a9805fe04105f6c7f9ff131f3 | /pysal/lib/cg/ops/_accessors.py | fee7fd6a8618795c68306552da8fcaf9a7104661 | [] | permissive | ocefpaf/pysal | 2d25b9f3a8bd87a7be3f96b825995a185624e1d0 | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | refs/heads/master | 2020-06-26T17:13:06.016203 | 2019-07-31T19:54:35 | 2019-07-31T19:54:35 | 199,696,188 | 0 | 0 | BSD-3-Clause | 2019-07-30T17:17:19 | 2019-07-30T17:17:18 | null | UTF-8 | Python | false | false | 1,368 | py | import functools as _f
__all__ = [ 'area', 'bbox', 'bounding_box', 'centroid', 'holes', 'len',
'parts', 'perimeter', 'segments', 'vertices']
def get_attr(df, geom_col='geometry', inplace=False, attr=None):
outval = df[geom_col].apply(lambda x: x.__getattribute__(attr))
if inplace:
outcol = 'shape_{}'.format(func.__name__)
df[outcol] = outval
return None
return outval
_doc_template =\
"""
Tabular accessor to grab a geometric object's {n} attribute
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
geom_col: string
the name of the column in df containing the geometry
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived column will be under 'shape_{n}'
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a series.
See Also
---------
For further documentation about the attributes of the object in question, refer
to shape classes in pysal.cg.shapes
"""
_accessors = dict()
for k in __all__:
_accessors[k] = _f.partial(get_attr, attr=k)
_accessors[k].__doc__ = _doc_template.format(n=k)
globals().update(_accessors)
| [
"sjsrey@gmail.com"
] | sjsrey@gmail.com |
1f2497cd5546b8b5809730d42af84ca132224a9b | daa5a7e8deaa5b2b1db76a907077a8c1bb3313b2 | /problem14/p14.py | f9e25df205ff783be0670d4bc40b485f8cc2bca3 | [] | no_license | janFrancoo/Project-Euler | 175933ca643ccca42cf1b7a27cc49694fe22da5c | 24f7d913939883786aaf68f485b31eda99f657b3 | refs/heads/master | 2020-06-19T19:11:35.681184 | 2019-11-23T06:54:21 | 2019-11-23T06:54:21 | 196,838,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | def chain_length(number, limit, chain):
length = 1
copyNum = number
while number != 1:
if number < limit:
if chain[number] > 0:
length += chain[number]
break
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
length += 1
chain[copyNum] = length
return length
def find_longest_chain(limit):
chain = [0] * limit
max = 0
for num in range(1, limit):
length = chain_length(num, limit, chain)
if length > max:
max = length
perfectNum = num
return perfectNum
print(find_longest_chain(1000000))
| [
"noreply@github.com"
] | janFrancoo.noreply@github.com |
77c7244b91eb34417f48b52335e55d62f077c237 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03210/s465817633.py | de6f12b053fb956866a6a4cb9c9b6a6c7f24de52 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | import sys
from bisect import *
from heapq import *
from collections import *
from itertools import *
from functools import *
from math import *
from fractions import *
sys.setrecursionlimit(100000000)
input = lambda: sys.stdin.readline().rstrip()
def main():
print('YES' if int(input()) in [3, 5, 7] else 'NO')
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5ef6fb29abe779f4c919e528eceadcae6e5b6d6c | f0fefeafdc4836fa76c5ccd493a40470839a61de | /core/virtualNetworkFunction.py | 193ceb2a03572f715a42de65de2f5257881fcb2a | [] | no_license | wuyangzhang/vnf | bb94b43bc29e78f8e218b4c0da7b32f12682c1e9 | 26e524f4efa5f161dac071169448cb7bef810cdd | refs/heads/master | 2022-11-10T02:25:53.743598 | 2020-06-19T19:41:44 | 2020-06-19T19:41:44 | 271,430,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | import random
import uuid
class VirtualNetworkFunction:
def __init__(self, name, cpu, mem, thr):
self.id = uuid.uuid4()
self.name = name
self.CPU = cpu # count
self.memory = mem # GB
self.throughput = thr # Mbps
self.attached_server = None
def __str__(self):
return 'id: {}, requested CPU num: {}, requested memory: {} GB, throughput: {} Mbps'.format(self.name, self.CPU, self.memory, self.throughput)
def attach_server(self, server):
self.attached_server = server
@staticmethod
def get_random_vnf():
return random.choice([VirtualNetworkFunction.vnf1(),
VirtualNetworkFunction.vnf2(),
VirtualNetworkFunction.vnf3(),
VirtualNetworkFunction.vnf4(),
VirtualNetworkFunction.vnf5(),
VirtualNetworkFunction.vnf6(),
VirtualNetworkFunction.vnf7(),
VirtualNetworkFunction.vnf8()])
@staticmethod
def vnf1():
return VirtualNetworkFunction('firewall_small', 4, 2, 100)
@staticmethod
def vnf2():
return VirtualNetworkFunction('firewall_normal', 4, 8, 200)
@staticmethod
def vnf3():
return VirtualNetworkFunction('firewall_large', 4, 8, 400)
@staticmethod
def vnf4():
return VirtualNetworkFunction('IDS', 4, 6, 80)
@staticmethod
def vnf5():
return VirtualNetworkFunction('IPSec_normal', 4, 4, 268)
@staticmethod
def vnf6():
return VirtualNetworkFunction('IPSec_large', 4, 8, 580)
@staticmethod
def vnf7():
return VirtualNetworkFunction('wan_opt_normal', 2, 2, 10)
@staticmethod
def vnf8():
return VirtualNetworkFunction('wan_opt_large', 2, 4, 50)
if __name__ == '__main__':
vnf = VirtualNetworkFunction.get_random_vnf()
print(vnf) | [
"you@example.com"
] | you@example.com |
1f4c242409eb31a5e3cf9e347891200845218a79 | c33496682b760deac61fedecba3e82ce4e41dfde | /scripts/e284.py | 12aa868641e9ef20219b57ffb0df9a540a6225c2 | [
"MIT"
] | permissive | ferasalsaab/neuralnilm_prototype | c5e9cde02d475ac499b15fea62143e76adff07d0 | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | refs/heads/master | 2020-04-16T14:38:03.615279 | 2018-01-29T15:30:43 | 2018-01-29T15:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,751 | py | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2013-07-01"),
seq_length=512,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
# subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
1000: 5e-05,
2000: 1e-05,
3000: 5e-06,
4000: 1e-06,
10000: 5e-07,
50000: 1e-07
},
plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| [
"jack-list@xlk.org.uk"
] | jack-list@xlk.org.uk |
5517778c89fa3a66841d7073b230af2f660c2348 | c652797f5303bb7102967fc6603e5704025afb36 | /gamelayer/boilerplates/scene/extension.py | 3fec054ba472343048494727bd1b360db95fb06e | [
"MIT"
] | permissive | Windspar/Gamelayer | fc1ce499cccb6530a4dcd446f9d86fd44026e564 | 65e1cf11548bc02bc49348eb265c209172c14844 | refs/heads/master | 2022-06-13T08:06:37.828771 | 2020-05-07T17:17:59 | 2020-05-07T17:17:59 | 258,047,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py |
class Extension:
def __init__(self):
self._logic = []
self._extension = []
def add(self, callback):
self._extension.append(callback)
def add_logic(self, callback):
self._logic.append(callback)
def process(self, manager):
for extension in self._extension:
extension(manager)
def process_logic(self, manager):
for extension in self._logic:
extension(manager)
| [
"kdrakemagi@gmail.com"
] | kdrakemagi@gmail.com |
12f9e2c63739664ce40db1a2bc6707196ddec657 | 2119953dd04916fa2adf3f42a487f3f9754d1f66 | /modules/google-earth-engine/docker/src/sepal/image_operation.py | 67fc4e0360c44bc318732e200a493abe58beb22a | [
"MIT"
] | permissive | sarahwertz/sepal | 91d12e3317cd07ad4c99469d5b6211d74013b330 | efbbc33ac99db332fc13f9dfd4c777a8d2c1b41e | refs/heads/master | 2020-06-11T07:42:08.835556 | 2019-05-27T14:21:28 | 2019-05-27T14:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | import ee
import math
class ImageOperation(object):
def __init__(self, image):
super(ImageOperation, self).__init__()
self.image = image
self.input_band_names = image.bandNames()
def select(self, name):
return self.image.select(name)
def set(self, name, toAdd, args={}):
toAdd = self.toImage(toAdd, args)
self.image = self.image.addBands(toAdd.rename([name]), None, True)
def setIf(self, name, condition, trueValue, args={}):
self.setIfElse(name, condition, trueValue, name, args)
def setIfElse(self, name, condition, trueValue, falseValue, args={}):
self.set(name,
self.toImage(falseValue, args)
.where(self.toImage(condition, args), self.toImage(trueValue, args)))
def setAll(self, image):
# Replace bands in source image, to ensure all image properties are preserved
self.image = self.image.addBands(image, None, True)
def invertMask(self, mask):
return mask.multiply(-1).add(1)
def toImage(self, band, args={}):
if isinstance(band, basestring):
if band.find('.') > -1 or band.find(' ') > -1 or band.find('{') > -1:
band = self.image.expression(self.format(band, args), {'i': self.image})
else:
band = self.image.select(band)
return ee.Image(band)
def format(self, s, args={}):
if not args:
args = {}
allArgs = self.merge({'pi': math.pi}, args)
result = str(s).format(**allArgs)
if result.find('{') > -1:
return format(result, args)
return result
def isMasked(self, band):
return self.toImage(band).mask().reduce('min').eq(0)
def updateMask(self, condition):
self.image = self.image.updateMask(self.toImage(condition))
def merge(self, o1, o2):
return dict(list(o1.iteritems()) + list(o2.iteritems()))
| [
"daniel.wiell@fao.org"
] | daniel.wiell@fao.org |
5af9e7c4d040127cae9671591ab150e165fef3b5 | 354d28102374bf51bc6e74dd8d952dc036925356 | /user_guide/src/examples/pivot/lazy.py | 8ce4ef07d645efa05a0f472e162dd43319fe4fae | [] | no_license | simonw/polars-book | 8c30c3707716ea1134a5a92e938055bcffd84b36 | 841f86dcc0f7c338de5eb1b34efbc405922c74ef | refs/heads/master | 2023-08-30T10:05:35.360224 | 2021-11-14T07:06:36 | 2021-11-14T07:06:36 | 428,919,663 | 0 | 0 | null | 2021-11-17T05:27:07 | 2021-11-17T05:27:06 | null | UTF-8 | Python | false | false | 144 | py | from .dataset import df
q = df.lazy().map(lambda df: df.groupby("foo").pivot(pivot_column="bar", values_column="N").first())
out = q.collect()
| [
"ritchie46@gmail.com"
] | ritchie46@gmail.com |
84465ec3badfad506dd593f543fb4098424ac9eb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_organists.py | be4d1d8e2ad43f99e39a4fc06c52700fa9cc7b7c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._organist import _ORGANIST
#calss header
class _ORGANISTS(_ORGANIST, ):
def __init__(self,):
_ORGANIST.__init__(self)
self.name = "ORGANISTS"
self.specie = 'nouns'
self.basic = "organist"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
61948e3b28ade065e37d16bf8131afe690b1ce63 | 5e381364c2ab31ff3618369085afffba6caa8edb | /recipes/fakeit/all/conanfile.py | 1cd82fc64805d459038f563de4e2470341513252 | [
"MIT"
] | permissive | CAMOBAP/conan-center-index | 16aea68a6d22da22831ba985773125e8eda08f00 | 67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1 | refs/heads/master | 2023-07-30T08:58:57.285571 | 2021-10-02T14:57:54 | 2021-10-02T14:57:54 | 323,262,699 | 1 | 0 | MIT | 2021-05-29T13:37:04 | 2020-12-21T07:30:02 | Python | UTF-8 | Python | false | false | 2,011 | py | from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
class FakeItConan(ConanFile):
name = "fakeit"
license = "MIT"
homepage = "https://github.com/eranpeer/FakeIt"
url = "https://github.com/conan-io/conan-center-index"
description = "C++ mocking made easy. A simple yet very expressive, headers only library for c++ mocking."
topics = ("mock", "fake", "spy")
settings = "compiler"
options = {
"integration": ["boost", "catch", "cute", "gtest", "mettle", "nunit", "mstest", "qtest", "standalone", "tpunit"]
}
default_options = {"integration": "standalone"}
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
if self.options.integration == "boost":
self.requires("boost/1.75.0")
elif self.options.integration == "catch":
self.requires("catch2/2.13.4")
elif self.options.integration == "gtest":
self.requires("gtest/cci.20210126")
elif self.options.integration == "qtest":
self.requires("qt/6.0.2")
elif self.options.integration == "standalone":
pass
else:
raise ConanInvalidConfiguration("%s is not (yet) available on cci" % self.options.integration)
def configure(self):
minimal_cpp_standard = "11"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "FakeIt-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="fakeit.hpp", dst="include", src=os.path.join(self._source_subfolder, "single_header", str(self.options.integration)))
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
def package_id(self):
del self.settings.compiler
| [
"noreply@github.com"
] | CAMOBAP.noreply@github.com |
5ab4128daad6fc7ec81e3f308d5ded319af70f7c | 18219d0fc95936ded56fe44f9a65ecb27f015232 | /35 Laboratory Converter units.py | ed1ee2497ae870063545fbc0a031cc8248d0e0f6 | [] | no_license | JDavid121/Script-Curso-Cisco-Python | 20a61b91b09376dcaef54f8ae5f86fe252de5c33 | 6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b | refs/heads/master | 2021-05-18T04:54:59.948970 | 2020-03-29T20:19:53 | 2020-03-29T20:19:53 | 251,120,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | """
Programa que convierte millas de kilometros y viceversa
"""
"""
1 milla = 1.61 km
"""
#Programa que transforma millas a kilometros
#******************************************************************
print("Programa que transforma millas a kilometros")
mile = input("Ingresar la distancia en millas\n\t")
mile=float(mile)
print("La distancia ingresada en millas es...",mile)
mile_to_kilometers = mile*1.61 # Transformación de millas a km
print(mile,"millas equivalen a",round(mile_to_kilometers,3),"kilometros")
print("Fin del programa")
print()
print("Programa que transforma kilometros a millas")
kilometer = input("Ingresar la distancia en kilometros\n\t")
kilometer=float(kilometer)
print("La distancia ingresada en kilometros es...",kilometer)
kilometers_to_mile = kilometer/1.61 #Transformación de km a millas
print(kilometer,"kilómetros euivalen a",round(kilometers_to_mile,3),"millas")
print("Fin del programa")
#Programa que transforma de grados ceisius a farhenheit
#******************************************************************
# expresión °C = (°F-32°)*(5/9)
print("Programa que transforma los grados Farhenheit a Celsius")
farhenheit=input("Ingresar los grados Farenheit\n\t")
farhenheit=float(farhenheit)
farhenheit=round(farhenheit,3) #Redondeamos la entrada a 3 decimales
print("\tLos grados farhenheit ingresados son",farhenheit,"°F")
celsius_to_farh=(farhenheit-32)*(5/9) #Transformación de grados farhenheit a celsius
celsius_to_farh=round(celsius_to_farh,3)
print("\t",farhenheit,"°F","equivalen a",celsius_to_farh,"°C")
print("Fin del programa")
print()
print("Programa que transforma los grados Celsius a Farhenheit")
celsius=input("Ingresar los grados Celsius\n\t")
celsius=float(celsius)
celsius=round(celsius,3) #Redondeando la entrada a 3 decimales.
farh_to_celsius=32+(9/5)*celsius
farh_to_celsius=round(farh_to_celsius,3)
print("\t",celsius,"°C equivalen a",farh_to_celsius,"°F")
print("Fin del programa")
| [
"noreply@github.com"
] | JDavid121.noreply@github.com |
ba12f92e88b5290922b85f50d0b6bf924df5dbe7 | cd9f819b968def4f9b57448bdd926dc5ffa06671 | /B_輕鬆學python3_孫宏明_碁峰_2017/29-1/Graph/main.py | e65b7e9e5b5aa31d70a8bcc2c37da8a8c20e3540 | [] | no_license | AaronCHH/jb_pyoop | 06c67f3c17e722cf18147be4ae0fac81726e4cbc | 356baf0963cf216db5db7e11fb67234ff9b31b68 | refs/heads/main | 2023-04-02T05:55:27.477763 | 2021-04-07T01:48:04 | 2021-04-07T01:48:13 | 344,676,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import shape as sh
# 建立Circle、Rectangle和Triangle類別的物件
c = sh.Circle(10)
r = sh.Rectangle(5, 2)
t = sh.Triangle(8, 3)
# 把物件加入Tuple資料組
shapes = c, r, t
# 用For迴圈顯示每一個物件的內容和面積
for s in shapes:
s.show_shape_info()
print('面積:' + str(s.get_area()))
| [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
e3fbb8c9d7fd01422301e9908dc1df4fb26ab066 | f714db4463dd37fc33382364dc4b1963a9053e49 | /tests/sentry_plugins/pivotal/test_pivotal_plugin.py | cf8294f3310c500617cfeb7ce3dbf7f8b51b6967 | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | macher91/sentry | 92171c2ad23564bf52627fcd711855685b138cbd | dd94d574403c95eaea6d4ccf93526577f3d9261b | refs/heads/master | 2021-07-07T08:23:53.339912 | 2020-07-21T08:03:55 | 2020-07-21T08:03:55 | 140,079,930 | 0 | 0 | BSD-3-Clause | 2020-05-13T11:28:35 | 2018-07-07T11:50:48 | Python | UTF-8 | Python | false | false | 2,171 | py | from __future__ import absolute_import
from exam import fixture
from django.core.urlresolvers import reverse
from sentry.testutils import PluginTestCase
from sentry.utils import json
from sentry_plugins.pivotal.plugin import PivotalPlugin
class PivotalPluginTest(PluginTestCase):
@fixture
def plugin(self):
return PivotalPlugin()
def test_conf_key(self):
assert self.plugin.conf_key == "pivotal"
def test_entry_point(self):
self.assertPluginInstalled("pivotal", self.plugin)
def test_get_issue_label(self):
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_label(group, 1) == "#1"
def test_get_issue_url(self):
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_url(group, 1) == "https://www.pivotaltracker.com/story/show/1"
def test_is_configured(self):
assert self.plugin.is_configured(None, self.project) is False
self.plugin.set_option("token", "1", self.project)
self.plugin.set_option("project", "1", self.project)
assert self.plugin.is_configured(None, self.project) is True
def test_no_secrets(self):
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.plugin.set_option("token", "abcdef", self.project)
url = reverse(
"sentry-api-0-project-plugin-details",
args=[self.org.slug, self.project.slug, "pivotal"],
)
res = self.client.get(url)
config = json.loads(res.content)["config"]
token_config = [item for item in config if item["name"] == "token"][0]
assert token_config.get("type") == "secret"
assert token_config.get("value") is None
assert token_config.get("hasSavedValue") is True
assert token_config.get("prefix") == "abcd"
| [
"noreply@github.com"
] | macher91.noreply@github.com |
c818fa2fbe0a931e015d9c72ca30b11428d45ae9 | 0983a837b8ca96c215a3bad0dfda0aba9b79b89f | /single_header/preprocess.py | 5170d044c37f03d160ccb053d2efc9947a1cb873 | [] | no_license | omardrwch/rlcpp | 53a950d3f3b4e75010c1acf7d047b3fe48c99720 | 5b12133c3f85cd2a7158915914beace31fdcd13e | refs/heads/master | 2020-08-13T15:38:28.366012 | 2020-03-07T20:40:09 | 2020-03-07T20:40:09 | 214,994,055 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import os
import shutil
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
project_path = os.path.dirname(dir_path)
dir_to_copy = os.path.join(project_path, 'rlcpp')
dir_destination = os.path.join(dir_path, 'all_files')
if not os.path.exists(dir_destination):
os.makedirs(dir_destination)
# Copy all project files (.cpp and .h) from dir_to_copy to dir_destination
for root, dirs, files in os.walk(dir_to_copy):
for file in files:
path_file = os.path.join(root,file)
shutil.copy2(path_file,dir_destination)
"""
Create header file to be used by acme.py
"""
header_contents = "#ifndef __RLCPP_H__ \n#define __RLCPP_H__ \n"
# List all source files
source_dir = dir_destination
source_files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(source_dir):
for filename in f:
if '.h' in filename and filename != "rlcpp.hpp":
print(filename)
header_contents += "#include " + "\"" + filename + "\"" + "\n"
for r, d, f in os.walk(source_dir):
for filename in f:
if '.cpp' in filename:
print(filename)
header_contents += "#include " + "\"" + filename + "\"" + "\n"
header_contents += "#endif"
header_file = open(os.path.join(dir_destination, "rlcpp.hpp"),"w+")
header_file.write(header_contents)
header_file.close() | [
"omar.drwch@gmail.com"
] | omar.drwch@gmail.com |
b11f3da5b0ec58c42441646efad1247fda2d1a9e | 147715fa98fe40b919784ef703dcddb8e8ab37c4 | /indy_common/config_helper.py | 9250ac18b12a1047999603e56dc61347844a6a7a | [
"Apache-2.0"
] | permissive | RaghuVamz/indy-node | ae069bfe2766248937fb2662ec65736c305a5cd9 | 977249b9100de62290ed45e74f1df1c2a1c7afd9 | refs/heads/master | 2021-08-23T20:08:57.192581 | 2017-12-05T23:41:54 | 2017-12-05T23:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | import os
from plenum.common.config_helper import PConfigHelper
class ConfigHelper(PConfigHelper):
@property
def log_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LOG_DIR, self.config.NETWORK_NAME))
@property
def genesis_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.GENESIS_DIR, self.config.NETWORK_NAME))
@property
def keys_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.KEYS_DIR, self.config.NETWORK_NAME, 'keys'))
@property
def ledger_base_dir(self):
return self.chroot_if_needed(self.config.LEDGER_DIR)
@property
def ledger_data_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LEDGER_DIR, self.config.NETWORK_NAME, 'data'))
@property
def log_base_dir(self):
return self.chroot_if_needed(self.config.LOG_DIR)
class NodeConfigHelper(ConfigHelper):
def __init__(self, name: str, config, *, chroot='/'):
assert name is not None
super().__init__(config, chroot=chroot)
self.name = name
@property
def ledger_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LEDGER_DIR, self.config.NETWORK_NAME, 'data', self.name))
| [
"alexander.sherbakov@dsr-company.com"
] | alexander.sherbakov@dsr-company.com |
20eb845f27d8f3d0b45c4495eab5e80b15e6fcc1 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/tests/unit/security/appsecure/test_apbr_commands.py | 23646f813ef71033f238a57643ad7cc20473dad9 | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,672 | py | from mock import patch
import unittest2 as unittest
from mock import MagicMock
from jnpr.toby.security.appsecure import apbr_commands
from jnpr.toby.hldcl.juniper.security.srx import Srx
class Response:
def __init__(self, x=""):
self.resp = x
def response(self):
return self.resp
class UnitTest(unittest.TestCase):
mocked_obj = MagicMock(spec=Srx)
mocked_obj.log = MagicMock()
def test_get_apbr_profile(self):
try:
apbr_commands.get_apbr_profile()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'pic': 0
}
}
}
self.mocked_obj.execute_as_rpc_command = MagicMock(return_value=dict_to_return)
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), {})
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'profile-name': "abc",
'zone-name': "trust"
}
}
}
self.mocked_obj.execute_as_rpc_command.return_value = dict_to_return
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), {"abc":"trust"})
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'profile-name': ["abc", "def"],
'zone-name': ["trust", "untrust"]
}
}
}
x = {"abc":"trust", "def":"untrust"}
self.mocked_obj.execute_as_rpc_command.return_value = dict_to_return
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), x)
def test_verify_apbr_profile(self):
try:
apbr_commands.verify_apbr_profile()
except Exception as err:
self.assertEqual(err.args[0], "'device' is a mandatory argument")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "'profile_name' is a mandatory argument")
x = {"abc": "trust", "def": "untrust"}
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="def", zone_name="untrust"), True)
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict={}, no_profile=True),True)
p = patch("jnpr.toby.security.appsecure.apbr_commands.get_apbr_profile", new=MagicMock(return_value=x))
p.start()
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_name="abc"),True)
p.stop()
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict={}, profile_name="abc", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "No profiles configured")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, no_profile=True)
except Exception as err:
self.assertEqual(err.args[0], "Expected-NO profile, but some profile was found")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="abc", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "Zone name NOT matching")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="abcd", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "Profile name not found")
def test_get_apbr_stats(self):
try:
apbr_commands.get_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
dict_to_return = {'apbr-statistics':
{'apbr-statistics': {'pic': 0
}
}
}
self.mocked_obj.execute_as_rpc_command = MagicMock(return_value=dict_to_return)
self.assertEqual(apbr_commands.get_apbr_stats(device=self.mocked_obj), {'pic':0})
def test_verify_apbr_stats(self):
try:
apbr_commands.verify_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "counter_values is None, it is mandatory argument")
x = {"a" : "1", "b":"2", "c":"3"}
p = patch("jnpr.toby.security.appsecure.apbr_commands.get_apbr_stats", new=MagicMock(return_value=x))
p.start()
self.assertEqual(apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"b":2, "c":3}), True)
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"b": 1, "c": 3})
except Exception as err:
self.assertEqual(err.args[0], "APBR statistics validation failed")
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"d": 1, "c": 3})
except Exception as err:
self.assertEqual(err.args[0], "APBR statistics validation failed")
def test_clear_apbr_stats(self):
try:
apbr_commands.clear_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0],"Device handle is a mandatory argument" )
self.mocked_obj.cli = MagicMock(return_value=Response(""))
try:
apbr_commands.clear_apbr_stats(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "APBR stats couldn't be cleared")
self.mocked_obj.cli.return_value = Response("Advance-policy-based-routing statistics clear done")
self.assertEqual(apbr_commands.clear_apbr_stats(device=self.mocked_obj), True)
if __name__ == '__main__':
unittest.main() | [
"srigupta@juniper.net"
] | srigupta@juniper.net |
e1d5705fadca5206369d9fc28471dee0f1be801f | ba60d3ccf11157abaf6c7bcf3a81aace27c6af88 | /spoj/wtk.py | c87681f874913fd405f05378c83c0744d2e5991b | [] | no_license | eightnoteight/compro | 9a09628593cdd3201f4d3bcf271f1ca6a4e5efca | 1e5d32ee83e9d8f27623dee7262decad3d107bd5 | refs/heads/master | 2021-01-21T04:19:02.746824 | 2016-08-01T20:51:16 | 2016-08-01T20:51:16 | 44,669,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from sys import stdin
try:
range = xrange
input = raw_input
except:
pass
def wtk(n):
ans = 1
for x in range(2, n + 1):
ans = ((ans + (n + 1 - x) - 1) % x) + 1
return ans
inp = stdin.readlines()
for _ in range(int(inp[0])):
print(wtk(int(inp[_ + 1])))
| [
"mr.eightnoteight@gmail.com"
] | mr.eightnoteight@gmail.com |
5600718a422aecd517e3e3db0aa2ade322992a29 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1657.py | b6882f38110c00e2f3f7f2637ba12cf028460824 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | import math
def ispalindrome(n): # ok, didn't really need this, but since i wrote it i might as well use it
digits = [int(x) for x in str(n)]
length = len(digits)
# if even number of digits, we want to compare the first half to the second half
# if odd, we don't want to compare the middle to itself, so the truncating should be perfect
for i in range(length/2):
if digits[i] != digits[length-i-1]:
return 0
return 1
def getnextpal(n):
digits = [int(x) for x in str(n)]
length = len(digits)
digits = [0] + digits # extra digit in case of overflow
for i in range(length/2):
x = i + 1
y = length-i
#print "{} {}... {} {}".format(x, y, digits[x], digits[y])
if digits[x] > digits[y]:
digits[y] = digits[x]
elif digits[x] < digits[y]:
# find next incremental digit to the left of y, and increment it
# the leftmost possible z is x. x cannot be 9 (not incrementable), because digits[x] < digits[y]
z = y - 1
while digits[z] == 9:
z -= 1
digits[z] += 1
#now y is free to be any digit
digits[y] = digits[x]
#but we have to zero out the digits in between
for k in range(z+1,y):
digits[k] = 0
# else equal, in which case keep going
return int("".join(str(x) for x in digits))
def fairsquare(A,B):
fscount = 0
minroot = int(math.ceil(math.sqrt(A))) # minimum val that you can square to get A
nextpal = getnextpal(minroot)
nextsquare = nextpal * nextpal
while(nextsquare) <= B:
if ispalindrome(nextsquare):
fscount += 1
nextpal = getnextpal(nextpal+1)
nextsquare = nextpal * nextpal
return fscount
f = open('C-large-1.in', 'r')
T = int(f.readline())
for i in range(T):
nums = f.readline().split(' ')
A = int(nums[0])
B = int(nums[1])
print "Case #{}: {}".format(i+1, fairsquare(A,B)) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
06cdb1171a3fca287acf3cd73cf81f7a7ac189a4 | 63bc95150f6af526199454602e5689bfadc882ba | /12/ex12-4.py | b134b84938d34ba5eb34a0dc964e2c228b3343a3 | [] | no_license | veenary/python-src | fd61d22a58d452ccb251402fecb0b7babd5372a7 | d61374bc32b8ebe3b2be366a6de259680821a4e1 | refs/heads/master | 2023-03-17T10:07:27.704611 | 2021-03-10T01:31:56 | 2021-03-10T01:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family='Malgun Gothic')
xdata = ['안지영', '홍지수', '황예린']
ydata1 = [90, 85, 88]
ydata2 = [83, 88, 91]
plt.plot(xdata, ydata1, label='국어')
plt.plot(xdata, ydata2, label='영어')
plt.legend(loc='upper center')
plt.title('세명 학생의 국어, 영어 성적')
plt.show() | [
"park.cheongu@gmail.com"
] | park.cheongu@gmail.com |
d6c67ffa4c48863a4c30baeec6a7167f27d0edd3 | 84868118de838472bca33c9f0455674e7e563d1b | /WLCG_GSoC_Task_Server/views.py | 5f7a847621fab880c815c0a05099b392a92a3702 | [
"Apache-2.0"
] | permissive | maany/MOOC-CA-Server | afa7b065fd5a059a94abec687236122ec6afd376 | 917109c7e5f37f3e7ee63ec0c5d1be3409b27f93 | refs/heads/master | 2022-12-12T12:03:52.430010 | 2019-03-12T13:46:06 | 2019-03-12T13:46:06 | 156,133,195 | 0 | 0 | Apache-2.0 | 2022-12-08T01:01:48 | 2018-11-04T22:56:11 | HTML | UTF-8 | Python | false | false | 392 | py | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from evaluator.models import Task, Applicant
@login_required
def profile(request):
tasks = Task.objects.filter(applicant=request.user.applicant)
context = {
'tasks': tasks,
}
return render(request,'registration/profile.html', context)
| [
"imptodefeat@gmail.com"
] | imptodefeat@gmail.com |
6279b7c4ae80c6d7996d9e4072efb25f43a2d80b | 536538af28cfe40e10ff1ce469cd0f81e8b3a8fe | /majority_element_II.py | 02813649ad35d4fef5a7d02c179be39c5526676a | [] | no_license | ShunKaiZhang/LeetCode | 7e10bb4927ba8581a3a7dec39171eb821c258c34 | ede2a2e19f27ef4adf6e57d6692216b8990cf62b | refs/heads/master | 2021-09-01T07:41:03.255469 | 2017-12-25T19:22:18 | 2017-12-25T19:22:18 | 104,136,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # python3
# Given an integer array of size n, find all elements that appear more than ⌊ n/3 ⌋ times.
# The algorithm should run in linear time and in O(1) space.
# My solution
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums.sort()
n = len(nums) // 3
out = set()
for i in range(n, len(nums)):
if nums[i] == nums[i - n]:
out.add(nums[i])
return list(out)
| [
"noreply@github.com"
] | ShunKaiZhang.noreply@github.com |
a091cf762f8da9cecbb9794772a0d394ad9338f9 | 16fe74651e6692ea3d8d0302b40ac42f3d58e0ca | /Divide_Two_Integers.py | 331dae5f9352e2e8f00995e3f8aaf0d11552e651 | [
"MIT"
] | permissive | Ahmed--Mohsen/leetcode | 7574f71b10dfb9582f62e856bbc2559d3b21b2a1 | ad8967a5d85ac54f53b3fcce04df1b4bdec5fd9e | refs/heads/master | 2021-01-18T14:34:06.987665 | 2015-12-23T21:17:27 | 2015-12-23T21:17:27 | 33,744,104 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
"""
class Solution:
# @return an integer
def divide(self, dividend, divisor):
p = abs(dividend)
q = abs(divisor)
ans = 0
while p >= q:
counter = 0
while p >= (q << counter): # detect 2^n that p is divisible by
counter += 1
ans += 1 << (counter - 1)
p -= q << (counter - 1)
if (dividend < 0 and divisor > 0) or (dividend > 0 and divisor < 0):
ans = -ans
return ans | [
"ahmed7890@gmail.com"
] | ahmed7890@gmail.com |
55040119c5bec7c52ab31cb072da66f9e8f23c54 | 1719920a92f7194766624474b98d59ef8d6eddaf | /models/search_result.py | 1e44051445d3173c464a9e9db35eddc2d60c9dd2 | [
"MIT"
] | permissive | MIchaelMainer/msgraph-v10-models-python | cfa5e3a65ba675383975a99779763211ed9fa0a9 | adad66363ebe151be2332f3ef74a664584385748 | refs/heads/master | 2020-03-19T12:51:06.370673 | 2018-06-08T00:16:12 | 2018-06-08T00:16:12 | 136,544,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class SearchResult(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def on_click_telemetry_url(self):
"""Gets and sets the onClickTelemetryUrl
Returns:
str:
The onClickTelemetryUrl
"""
if "onClickTelemetryUrl" in self._prop_dict:
return self._prop_dict["onClickTelemetryUrl"]
else:
return None
@on_click_telemetry_url.setter
def on_click_telemetry_url(self, val):
self._prop_dict["onClickTelemetryUrl"] = val
| [
"mmainer@microsoft.com"
] | mmainer@microsoft.com |
1098ae39b7e485b89db97e585ff61d78edb48860 | 1a66df726d2ecc20d6b5ff9d35dac4ea7ba5cb66 | /upseto/recursivegit.py | 2382959e2c66d5083c1f207a1d9b02424a21ec09 | [
"Apache-2.0"
] | permissive | shlomimatichin/upseto | 83c601d7a6d625d00fad3134fe9192dcfec73950 | 0fedc8b7d628b971d07b92b61c7a29431ad55d22 | refs/heads/master | 2021-01-15T11:06:41.285140 | 2015-03-05T13:22:38 | 2015-03-05T13:22:38 | 28,646,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from upseto import gitwrapper
from upseto import traverse
import sys
class RecursiveGit:
def __init__(self, baseDir=".."):
self._traverse = traverse.Traverse(baseDir)
def run(self, mani, commandLine):
git = gitwrapper.GitWrapper(".")
sys.stdout.write('#upseto %s\n' % git.directory())
sys.stdout.write(git.run(commandLine))
for dependency in self._traverse.traverse(mani):
git = gitwrapper.GitWrapper(dependency.projectDir)
sys.stdout.write('#upseto %s\n' % git.directory())
sys.stdout.write(git.run(commandLine))
| [
"shlomi@stratoscale.com"
] | shlomi@stratoscale.com |
442148600eefdf14a8d8ae73741d95f019dc6024 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/permutations_20200723154912.py | faaf21e3ad0dd8cb060bd06a3f837bc62ea900d1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def perm(arr):
# sort the array
if len(arr) == 0:
return 0
else:
arr.sort()
for i in range(len(arr)-1):
if arr[i] +1 != arr[i+1]:
return 0
return 1
print(perm([4,1,3,2])) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
fe0f7faff241dde12ec27f6c4fd6b1011ef5bf46 | 2bebb669112e2955de612e7d0532fe545b609733 | /goatools/semsim/termwise/wang.py | 0228ed10453e6dc1055ce126d7f3dbcb4c539a3a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | rexdwyer/goatools | 008885ad534b73a1137fa59c9eb50e21b147f2ab | ef14f99844a2a42d36aa1ab2e40161b8bc7be78e | refs/heads/main | 2022-12-31T06:08:31.203278 | 2020-10-15T01:14:59 | 2020-10-15T01:14:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | """Wang's termwise semantic similarity for GO terms"""
__copyright__ = "Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
__author__ = "DV Klopfenstein"
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.semsim.termwise.dag_a import DagA
class SsWang:
"""Wang's termwise semantic similarity for GO terms"""
def __init__(self, godag, relationships=None, rel2scf=None):
self.godag = godag
self.rels = relationships
self.rel2scf = rel2scf
self.w_e = self._init_edge_weight_factor(rel2scf)
self.go2subdag = {}
def add_goid(self, goid, prt=None):
"""Add a GO ID which will be compared using semantic similarity"""
self.add_goids([goid], prt)
def add_goids(self, goids, prt=None):
"""Add GO IDs which will be compared using semantic similarity"""
# go2svalue = wang.get_go2svalue('go:0043231')
s_godag = self.godag
s_rels = self.rels
s_go2subdag = self.go2subdag
s_rel2scf = self.w_e
for goid in goids:
if goid in s_godag:
gosubdag = GoSubDag([goid], s_godag, s_rels, prt=prt)
dag = DagA(goid, gosubdag, s_rel2scf)
s_go2subdag[goid] = dag
def get_semsim(self, go_a, go_b):
"""Get Wang's semantic similarity between two GO terms"""
if self._not_loaded(go_a, go_b):
return None
dag_a = self.go2subdag[go_a]
dag_b = self.go2subdag[go_b]
gos_ab = set(dag_a.go2svalue.keys()).intersection(dag_b.go2svalue.keys())
s_a = dag_a.get_svalues(gos_ab)
s_b = dag_b.get_svalues(gos_ab)
s_ab = sum([a + b for a, b in zip(s_a, s_b)])
return s_ab/(dag_a.get_sv() + dag_b.get_sv())
def _not_loaded(self, go_a, go_b):
"""Check that both GO IDs are in the go2subdag dict"""
if go_a not in self.go2subdag:
print('**ERROR: {GO} NOT LOADED INTO SsWang'.format(GO=go_a))
return True
if go_b not in self.go2subdag:
print('**ERROR: {GO} NOT LOADED INTO SsWang'.format(GO=go_b))
return True
return False
@staticmethod
def _init_edge_weight_factor(rel2scf):
"""Initialize semantic contribution factor (scf) for weights for edge types (w_e)"""
if rel2scf is None:
return {
'is_a': 0.8,
'part_of': 0.6,
}
return rel2scf
# Copyright (C) 2020-present DV Klopfenstein. All rights reserved.
| [
"dvklopfenstein@users.noreply.github.com"
] | dvklopfenstein@users.noreply.github.com |
88b31bcd1e68bb87db4eed1c8f9d1dc272541ee1 | 12e78946542250f64792bc6c1d8c8ff1ffecdaf7 | /Python/OOP/bikes.py | 3354bab3e9923ca80f29aa3f2dbd96f61c135e2b | [] | no_license | mkrabacher/CodingDojoAssignments | 0fde5adf7223a9eac07a4867499a243e230a300e | 4afef4aaf4f129fb56376e57d8be437d1f124521 | refs/heads/master | 2021-05-14T13:38:03.570533 | 2018-02-23T00:09:24 | 2018-02-23T00:09:24 | 113,722,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | class bike(object):
def __init__(self, price, max_speed):
bike.price = price
bike.max_speed = max_speed
bike.miles = 0
def display_info(self):
print 'This bike costs {}, has a max speed of {}, and have been ridden {} miles total'.format(self.price, self.max_speed, self.miles)
return self
def ride(self):
print 'ridin now boss.'
self.miles += 10
return self
def reverse(self):
print 'reversin now boss'
self.miles -= 5
return self
print '--------------------------------------------------------------------------'
bike1 = bike(200, "25mph")
bike1.display_info().ride().reverse().ride().ride().reverse().display_info()
print '--------------------------------------------------------------------------'
bike2 = bike(100, "21mph")
bike2.display_info().ride().reverse().reverse().reverse().reverse().display_info()
print '--------------------------------------------------------------------------'
bike3 = bike(400, "254mph")
bike3.display_info().reverse().ride().ride().display_info()
print '--------------------------------------------------------------------------' | [
"matt.krabacher@gmail.com"
] | matt.krabacher@gmail.com |
45f245fa90ff36c4bd8ccba8af71faf3d28d06aa | 1025bc2aa5aaa40970ad1a51d8d0b1202a1ea11e | /StatTools/test/test_morph.py | 098bd9e136b860bf73ff5b65867dbd350b8bd07f | [] | no_license | uwcms/FinalStateAnalysis | f2be318546728621676a4b90ed2678b2560c94e6 | bcb164a8e27d459a9ac438780f6c8730d3e856bf | refs/heads/miniAOD_9_4_0 | 2022-11-09T01:28:52.199025 | 2019-03-15T19:25:10 | 2019-03-15T19:25:10 | 5,201,989 | 5 | 32 | null | 2020-11-19T17:02:32 | 2012-07-27T07:51:18 | Python | UTF-8 | Python | false | false | 646 | py | '''
Stupid tests of the th1fmorph tool
'''
from FinalStateAnalysis.StatTools.morph import morph
from rootpy.io import open, DoesNotExist
file = open('$fsa/VHiggs/test/plotting/wh_shapes.root')
hist1 = file.get('mmt_mumu_final_140_MuTauMass/VH140')
hist2 = file.get('mmt_mumu_final_120_MuTauMass/VH120')
hist130true = file.get('mmt_mumu_final_130_MuTauMass/VH130')
print '140', hist1.Integral(), hist1.GetMean()
print '130 true', hist130true.Integral(), hist130true.GetMean()
print '120', hist2.Integral(), hist2.GetMean()
# Try to morph to 130
m130 = morph('130', '130', 130, hist1, 140, hist2, 120)
print m130.Integral(), m130.GetMean()
| [
"Silvia.Taroni@cern.ch"
] | Silvia.Taroni@cern.ch |
363cff7ddb7a55c545d517ea771c5a424f188146 | 7e9b45a66b3637cf571eb1e16c07dd888963d8ba | /ITcast/ITcast/spiders/itcast.py | a87e81bec5c50033e10d8550fb5f3e8c1c968117 | [] | no_license | tusonggao/scrapy-scripts | 60d4c7449819c6a2861c208c34f0fb8078ed94d4 | 2dd97c0a55e02c51a43c7a335c91ac64d8bbaf1b | refs/heads/master | 2020-04-08T18:01:21.351922 | 2018-12-02T01:57:03 | 2018-12-02T01:57:03 | 159,590,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # -*- coding: utf-8 -*-
import scrapy
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['http://www.itcast.cn']
start_urls = ['http://http://www.itcast.cn/']
def parse(self, response):
pass
| [
"tusonggao@163.com"
] | tusonggao@163.com |
6a2fa3268aaa3900c0d2c08b01d2ad48f9be95f7 | e5333b2e54f1adf2e5bc88a9a242234c5f15851a | /misoclib/com/liteeth/core/mac/core/crc.py | f08302b164d4733c2a5cc4d933ae70d2372c05e0 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hoangt/misoc | 1aaf850c18bab5b18db1fcc788feb96afbbc464e | 6c13879fb605a1ee2bd5a3b35669e093f9a4267b | refs/heads/master | 2021-01-21T02:55:59.398987 | 2015-07-13T15:00:03 | 2015-07-13T15:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,639 | py | from misoclib.com.liteeth.common import *
class LiteEthMACCRCEngine(Module):
"""Cyclic Redundancy Check Engine
Compute next CRC value from last CRC value and data input using
an optimized asynchronous LFSR.
Parameters
----------
data_width : int
Width of the data bus.
width : int
Width of the CRC.
polynom : int
Polynom of the CRC (ex: 0x04C11DB7 for IEEE 802.3 CRC)
Attributes
----------
data : in
Data input.
last : in
last CRC value.
next :
next CRC value.
"""
def __init__(self, data_width, width, polynom):
self.data = Signal(data_width)
self.last = Signal(width)
self.next = Signal(width)
# # #
def _optimize_eq(l):
"""
Replace even numbers of XORs in the equation
with an equivalent XOR
"""
d = OrderedDict()
for e in l:
if e in d:
d[e] += 1
else:
d[e] = 1
r = []
for key, value in d.items():
if value%2 != 0:
r.append(key)
return r
# compute and optimize CRC's LFSR
curval = [[("state", i)] for i in range(width)]
for i in range(data_width):
feedback = curval.pop() + [("din", i)]
for j in range(width-1):
if (polynom & (1<<(j+1))):
curval[j] += feedback
curval[j] = _optimize_eq(curval[j])
curval.insert(0, feedback)
# implement logic
for i in range(width):
xors = []
for t, n in curval[i]:
if t == "state":
xors += [self.last[n]]
elif t == "din":
xors += [self.data[n]]
self.comb += self.next[i].eq(optree("^", xors))
@DecorateModule(InsertReset)
@DecorateModule(InsertCE)
class LiteEthMACCRC32(Module):
"""IEEE 802.3 CRC
Implement an IEEE 802.3 CRC generator/checker.
Parameters
----------
data_width : int
Width of the data bus.
Attributes
----------
d : in
Data input.
value : out
CRC value (used for generator).
error : out
CRC error (used for checker).
"""
width = 32
polynom = 0x04C11DB7
init = 2**width-1
check = 0xC704DD7B
def __init__(self, data_width):
self.data = Signal(data_width)
self.value = Signal(self.width)
self.error = Signal()
# # #
self.submodules.engine = LiteEthMACCRCEngine(data_width, self.width, self.polynom)
reg = Signal(self.width, reset=self.init)
self.sync += reg.eq(self.engine.next)
self.comb += [
self.engine.data.eq(self.data),
self.engine.last.eq(reg),
self.value.eq(~reg[::-1]),
self.error.eq(self.engine.next != self.check)
]
class LiteEthMACCRCInserter(Module):
"""CRC Inserter
Append a CRC at the end of each packet.
Parameters
----------
description : description
description of the dataflow.
Attributes
----------
sink : in
Packets input without CRC.
source : out
Packets output with CRC.
"""
def __init__(self, crc_class, description):
self.sink = sink = Sink(description)
self.source = source = Source(description)
self.busy = Signal()
# # #
dw = flen(sink.data)
crc = crc_class(dw)
fsm = FSM(reset_state="IDLE")
self.submodules += crc, fsm
fsm.act("IDLE",
crc.reset.eq(1),
sink.ack.eq(1),
If(sink.stb & sink.sop,
sink.ack.eq(0),
NextState("COPY"),
)
)
fsm.act("COPY",
crc.ce.eq(sink.stb & source.ack),
crc.data.eq(sink.data),
Record.connect(sink, source),
source.eop.eq(0),
If(sink.stb & sink.eop & source.ack,
NextState("INSERT"),
)
)
ratio = crc.width//dw
if ratio > 1:
cnt = Signal(max=ratio, reset=ratio-1)
cnt_done = Signal()
fsm.act("INSERT",
source.stb.eq(1),
chooser(crc.value, cnt, source.data, reverse=True),
If(cnt_done,
source.eop.eq(1),
If(source.ack, NextState("IDLE"))
)
)
self.comb += cnt_done.eq(cnt == 0)
self.sync += \
If(fsm.ongoing("IDLE"),
cnt.eq(cnt.reset)
).Elif(fsm.ongoing("INSERT") & ~cnt_done,
cnt.eq(cnt - source.ack)
)
else:
fsm.act("INSERT",
source.stb.eq(1),
source.eop.eq(1),
source.data.eq(crc.value),
If(source.ack, NextState("IDLE"))
)
self.comb += self.busy.eq(~fsm.ongoing("IDLE"))
class LiteEthMACCRC32Inserter(LiteEthMACCRCInserter):
def __init__(self, description):
LiteEthMACCRCInserter.__init__(self, LiteEthMACCRC32, description)
class LiteEthMACCRCChecker(Module):
"""CRC Checker
Check CRC at the end of each packet.
Parameters
----------
description : description
description of the dataflow.
Attributes
----------
sink : in
Packets input with CRC.
source : out
Packets output without CRC and "error" set to 0
on eop when CRC OK / set to 1 when CRC KO.
"""
def __init__(self, crc_class, description):
self.sink = sink = Sink(description)
self.source = source = Source(description)
self.busy = Signal()
# # #
dw = flen(sink.data)
crc = crc_class(dw)
self.submodules += crc
ratio = crc.width//dw
error = Signal()
fifo = InsertReset(SyncFIFO(description, ratio + 1))
self.submodules += fifo
fsm = FSM(reset_state="RESET")
self.submodules += fsm
fifo_in = Signal()
fifo_out = Signal()
fifo_full = Signal()
self.comb += [
fifo_full.eq(fifo.fifo.level == ratio),
fifo_in.eq(sink.stb & (~fifo_full | fifo_out)),
fifo_out.eq(source.stb & source.ack),
Record.connect(sink, fifo.sink),
fifo.sink.stb.eq(fifo_in),
self.sink.ack.eq(fifo_in),
source.stb.eq(sink.stb & fifo_full),
source.sop.eq(fifo.source.sop),
source.eop.eq(sink.eop),
fifo.source.ack.eq(fifo_out),
source.payload.eq(fifo.source.payload),
source.error.eq(sink.error | crc.error),
]
fsm.act("RESET",
crc.reset.eq(1),
fifo.reset.eq(1),
NextState("IDLE"),
)
self.comb += crc.data.eq(sink.data)
fsm.act("IDLE",
If(sink.stb & sink.sop & sink.ack,
crc.ce.eq(1),
NextState("COPY")
)
)
fsm.act("COPY",
If(sink.stb & sink.ack,
crc.ce.eq(1),
If(sink.eop,
NextState("RESET")
)
)
)
self.comb += self.busy.eq(~fsm.ongoing("IDLE"))
class LiteEthMACCRC32Checker(LiteEthMACCRCChecker):
def __init__(self, description):
LiteEthMACCRCChecker.__init__(self, LiteEthMACCRC32, description)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
7529c1864d46cd2e1e57b64e66c2b36754ac89dc | 1713334f9b68255f9adab70175c21f399d0460f3 | /python/090_Subsets_II.py | 520727cc72e1a7221eaa68275d4716961b9b0695 | [
"MIT"
] | permissive | coy0725/leetcode | 0a798b7adafe80f726e51c06c34835c4aa51b563 | 743a0bfa22402ec39858dc9c4c7dc531f825b953 | refs/heads/master | 2020-05-21T18:25:09.683714 | 2019-05-11T13:00:40 | 2019-05-11T13:00:40 | 186,132,894 | 2 | 0 | MIT | 2019-05-11T12:55:22 | 2019-05-11T12:55:21 | null | UTF-8 | Python | false | false | 1,369 | py | class Solution(object):
# def subsetsWithDup(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# nums.sort()
# res = []
# for i in range(1 << len(nums)):
# res.append(self.get_subsets(nums, i))
# # remove duplicate
# final_res = {}
# for subset in res:
# hash_key = ''.join([str(t) for t in subset])
# try:
# final_res[hash_key]
# except:
# final_res[hash_key] = subset
# return final_res.values()
#
# def get_subsets(self, nums, magic):
# res = []
# for i in range(len(nums)):
# if (1 << i) & magic != 0:
# res.append(nums[i])
# return res
def subsetsWithDup(self, nums):
nums.sort()
res = [[]]
begin = 0
for index in range(len(nums)):
if index == 0 or nums[index] != nums[index - 1]:
# generate all
begin = 0
size = len(res)
# use existing subsets to generate new subsets
for j in range(begin, size):
curr = list(res[j])
curr.append(nums[index])
res.append(curr)
# avoid duplicate subsets
begin = size
return res
| [
"qiyuangong@gmail.com"
] | qiyuangong@gmail.com |
ea825aa8dfb7fdbdf0f38041fed13b0c901d9a7f | 281c9bea63bf7d1188b40ae2cf3f2aa53f97a297 | /sections/views.py | 26113411fba416b064af202d600ebcb71728b3a6 | [] | no_license | pydatageek/fazla | 3ec13c5e8f4a621eb82e1d82e003e0e0e68f3657 | 0890de73f23e3f72b41095130d703a793745765e | refs/heads/master | 2023-02-14T01:11:17.477576 | 2021-01-05T10:15:58 | 2021-01-05T10:15:58 | 294,544,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render
from django.urls import resolve, reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.defaults import server_error
from django.views.generic import (
DetailView, ListView, TemplateView)
from django.views.generic.edit import FormView
from core.choices import titles
from core.forms import ContactForm
def handler400(request, exception):
return render(request, 'lte/400.html', status=400)
def handler403(request, exception):
return render(request, 'lte/403.html', status=403)
def handler404(request, exception):
return render(request, 'lte/404.html', status=404)
def handler500(request):
return server_error(request, 'lte/500.html')
class HomeView(TemplateView):
""""""
# Fazla.net facts and stats
class SourceView(TemplateView):
""""""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('Sources')
return context
class AboutView(TemplateView):
""""""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('About')
return context
class ContactView(SuccessMessageMixin, FormView):
""""""
form_class = ContactForm
success_url = reverse_lazy('contact')
success_message = _('Your form submission is successful, thank you.')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('Contact Us')
return context
def form_valid(self, form):
form.save()
return super().form_valid(form)
| [
"pydatageek@gmail.com"
] | pydatageek@gmail.com |
241e3a94e942b64cd327a21199bf89a2fa868b9f | c522b0332ee42d01f1ee5bdd3cdd3d72eb9af24b | /venv/lib/python3.8/site-packages/lusid/models/transaction_set_configuration_data.py | 451ff60c8d6a19320df52968e69d9e43274314de | [] | no_license | Jeffkent01coder/trackphone | e5aad6f99efb0f0c11f260d1f2a0b232d5453dfe | 3570375938c7e947eb272d2cec1589202351141c | refs/heads/master | 2023-05-10T22:27:40.255686 | 2021-06-02T10:23:17 | 2021-06-02T10:23:17 | 373,125,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,941 | py | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2820
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TransactionSetConfigurationData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'transaction_configs': 'list[TransactionConfigurationData]',
'side_definitions': 'list[SideConfigurationData]',
'links': 'list[Link]'
}
attribute_map = {
'transaction_configs': 'transactionConfigs',
'side_definitions': 'sideDefinitions',
'links': 'links'
}
required_map = {
'transaction_configs': 'required',
'side_definitions': 'optional',
'links': 'optional'
}
def __init__(self, transaction_configs=None, side_definitions=None, links=None): # noqa: E501
"""
TransactionSetConfigurationData - a model defined in OpenAPI
:param transaction_configs: Collection of transaction type models (required)
:type transaction_configs: list[lusid.TransactionConfigurationData]
:param side_definitions: Collection of side definitions
:type side_definitions: list[lusid.SideConfigurationData]
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._transaction_configs = None
self._side_definitions = None
self._links = None
self.discriminator = None
self.transaction_configs = transaction_configs
self.side_definitions = side_definitions
self.links = links
@property
def transaction_configs(self):
"""Gets the transaction_configs of this TransactionSetConfigurationData. # noqa: E501
Collection of transaction type models # noqa: E501
:return: The transaction_configs of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[TransactionConfigurationData]
"""
return self._transaction_configs
@transaction_configs.setter
def transaction_configs(self, transaction_configs):
"""Sets the transaction_configs of this TransactionSetConfigurationData.
Collection of transaction type models # noqa: E501
:param transaction_configs: The transaction_configs of this TransactionSetConfigurationData. # noqa: E501
:type: list[TransactionConfigurationData]
"""
if transaction_configs is None:
raise ValueError("Invalid value for `transaction_configs`, must not be `None`") # noqa: E501
self._transaction_configs = transaction_configs
@property
def side_definitions(self):
"""Gets the side_definitions of this TransactionSetConfigurationData. # noqa: E501
Collection of side definitions # noqa: E501
:return: The side_definitions of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[SideConfigurationData]
"""
return self._side_definitions
@side_definitions.setter
def side_definitions(self, side_definitions):
"""Sets the side_definitions of this TransactionSetConfigurationData.
Collection of side definitions # noqa: E501
:param side_definitions: The side_definitions of this TransactionSetConfigurationData. # noqa: E501
:type: list[SideConfigurationData]
"""
self._side_definitions = side_definitions
@property
def links(self):
"""Gets the links of this TransactionSetConfigurationData. # noqa: E501
:return: The links of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this TransactionSetConfigurationData.
:param links: The links of this TransactionSetConfigurationData. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionSetConfigurationData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"geofreyerastus956@gmail.com"
] | geofreyerastus956@gmail.com |
665c35990e477086523bce03d9fee6d46495ae84 | e6f16fbba8fba750099252c3490f00079cb19101 | /算法/350_两个数组的交集Ⅱ.py | ad88f718a1d9fe87106cf709d3b5ade3707c38aa | [] | no_license | hookeyplayer/exercise.io | 0a36fbec9df6c24b60ff6f97de27d3d5ae7769d4 | 605c81cb44443efd974db9fa0a088ddcd5a96f0f | refs/heads/master | 2023-06-20T17:03:20.310816 | 2021-07-31T12:50:21 | 2021-07-31T12:50:21 | 277,175,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # 结果尊重实际的个数,含重复
from typing import List
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums2.sort()
nums1.sort()
ans = []
pos1, pos2 = 0, 0
while pos1 < len(nums1) and pos2 < len(nums2):
if nums1[pos1] == nums2[pos2]:
ans.append(nums1[pos1])
pos1 += 1
pos2 += 1
elif nums1[pos1] < nums2[pos2]:
pos1 += 1
else:
pos2 += 1
return ans
test = Solution()
nums1 = [1,2,2,1]
nums2 = [2,2]
print(test.intersect(nums1, nums2)) # [2, 2] | [
"noreply@github.com"
] | hookeyplayer.noreply@github.com |
fa3d62dc6ea23306d2e45604b8b2469de665a70d | 4ede96380f20c65e014f7e5748789c81a4700115 | /enums/enum_example_pb2.py | 89ed4506ac879d427eb539c122205ebaab314ea2 | [] | no_license | DavidWalshe93/Python_Protobuf | 1724689fc4d24c51d2bf40cb5ac2655355ed9aae | 1af5ecf7ac9fd479b7e283d9cb5ef1c5dd54a94a | refs/heads/master | 2022-04-20T08:04:38.660161 | 2020-04-18T00:26:51 | 2020-04-18T00:26:51 | 256,630,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,370 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: enum_example.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='enum_example.proto',
package='example.enumerations',
syntax='proto3',
serialized_options=b'Z\006enumpb',
serialized_pb=b'\n\x12\x65num_example.proto\x12\x14\x65xample.enumerations\"V\n\x0b\x45numMessage\x12\n\n\x02id\x18\x01 \x01(\x05\x12;\n\x0f\x64\x61y_of_the_week\x18\x02 \x01(\x0e\x32\".example.enumerations.DayOfTheWeek*w\n\x0c\x44\x61yOfTheWeek\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MONDAY\x10\x01\x12\x0b\n\x07TUESDAY\x10\x02\x12\r\n\tWEDNESDAY\x10\x03\x12\x0c\n\x08THURSDAY\x10\x04\x12\n\n\x06\x46RIDAY\x10\x05\x12\x0c\n\x08SATURDAY\x10\x06\x12\n\n\x06SUNDAY\x10\x07\x42\x08Z\x06\x65numpbb\x06proto3'
)
_DAYOFTHEWEEK = _descriptor.EnumDescriptor(
name='DayOfTheWeek',
full_name='example.enumerations.DayOfTheWeek',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MONDAY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUESDAY', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEDNESDAY', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='THURSDAY', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FRIDAY', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SATURDAY', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUNDAY', index=7, number=7,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=132,
serialized_end=251,
)
_sym_db.RegisterEnumDescriptor(_DAYOFTHEWEEK)
DayOfTheWeek = enum_type_wrapper.EnumTypeWrapper(_DAYOFTHEWEEK)
UNKNOWN = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
_ENUMMESSAGE = _descriptor.Descriptor(
name='EnumMessage',
full_name='example.enumerations.EnumMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='example.enumerations.EnumMessage.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='day_of_the_week', full_name='example.enumerations.EnumMessage.day_of_the_week', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=130,
)
_ENUMMESSAGE.fields_by_name['day_of_the_week'].enum_type = _DAYOFTHEWEEK
DESCRIPTOR.message_types_by_name['EnumMessage'] = _ENUMMESSAGE
DESCRIPTOR.enum_types_by_name['DayOfTheWeek'] = _DAYOFTHEWEEK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EnumMessage = _reflection.GeneratedProtocolMessageType('EnumMessage', (_message.Message,), {
'DESCRIPTOR' : _ENUMMESSAGE,
'__module__' : 'enum_example_pb2'
# @@protoc_insertion_point(class_scope:example.enumerations.EnumMessage)
})
_sym_db.RegisterMessage(EnumMessage)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"david.walshe93@gmail.com"
] | david.walshe93@gmail.com |
b13c1da60cd7c88594500929261c5c9b25d14f78 | ffdd64174bee59665833535c9ae618abd6ddcc7a | /graphs_to_tensors.py | 1be5bca997a64a463d7dbcb57d8ec8dceb298fed | [] | no_license | pengyuan2020/ProPPR-python | dee9e8e2713169af8538ac2cbc0a1f29c93f0d9a | 2977e9e0801d94048d114df8349d13b52091a7a8 | refs/heads/master | 2022-04-10T06:29:45.588641 | 2017-12-21T00:50:53 | 2017-12-21T00:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,856 | py | import numpy as np
import networkx as nx
import sparse
import nltk
import cPickle as pickle
import os
from setting import *
from joblib import Parallel, delayed
def get_proof_graph(ppr_grounded_line, feature_vector_size):
graph = nx.DiGraph()
array = ppr_grounded_line.strip().split(' ')
query_example = array[0]
query_node = int(array[1])
pos_nodes = map(int, array[2].split(','))
neg_nodes = map(int, array[3].split(','))
nodes_count = int(array[4])
edges_count = int(array[5])
label_dependencies_count = int(array[6])
edges = array[7:]
nodes = []
for e in ppr_grounded_line.strip().split(' '):
if '->' in e:
nodes.append(int(e.split('->')[0]))
nodes.append(int(e.split('->')[1].split(':')[0]))
nodes = list(set(nodes))
for node in nodes:
if node in pos_nodes:
graph.add_node(node, Label=1)
elif node in neg_nodes:
graph.add_node(node, Label=-1)
elif node == query_node:
graph.add_node(node, Label=2)
else:
graph.add_node(node, Label=0)
for edge in edges:
source, target, feature_weights = edge.replace('->', ':').split(':')
source = int(source)
target = int(target)
feature_weights = [
feature.split('@') for feature in feature_weights.split(',')
]
feature_weights = [(int(feature_weight[0]), float(feature_weight[1]))
for feature_weight in feature_weights]
vector = [0.0] * feature_vector_size
for feature_weight in feature_weights:
vector[feature_weight[0]] = feature_weight[1]
# graph.add_edge( source, target, {'feature_vector': ",".join( map(str, vector) ) } )
graph.add_edge(source, target, {'feature_vector': vector})
# nx.write_graphml( graph, "graph.graphml" )
return graph
def get_proof_graph_tensor(proof_graph, feature_vector_size):
node_list = proof_graph.nodes()
adjacency_matrix = nx.adjacency_matrix(proof_graph, weight=None)
adjacency_matrix = adjacency_matrix.astype(float)
size = len(node_list)
featured_adjacency_matrix = np.array([[[0.0] * feature_vector_size
for x in range(size)]
for y in range(size)])
for edge in proof_graph.edges_iter():
source = edge[0]
target = edge[1]
source_index = node_list.index(edge[0])
target_index = node_list.index(edge[1])
feature_vector = proof_graph[source][target]['feature_vector']
featured_adjacency_matrix[source_index][target_index] = feature_vector
featured_adjacency_matrix = np.reshape(featured_adjacency_matrix,
[size, size, -1])
correct_answer_vector = np.zeros([size, 1], dtype=np.float32)
incorrect_answer_vector = np.zeros([size, 1], dtype=np.float32)
one_hot_query_vector = np.zeros([size, 1], dtype=np.float32)
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == 1:
correct_answer_vector[node_index] = 1.0
else:
correct_answer_vector[node_index] = 0.0
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == -1:
incorrect_answer_vector[node_index] = 1.0
else:
incorrect_answer_vector[node_index] = 0.0
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == 2:
one_hot_query_vector[node_index] = 1.0
else:
one_hot_query_vector[node_index] = 0.0
return [
one_hot_query_vector, featured_adjacency_matrix, correct_answer_vector,
incorrect_answer_vector
]
##### (END) Conversion of question objects/graphs to feature representations #####
def dump_graph_tensor(idx, tensors_dir, ppr_grounded_line):
feature_vector_size = int(ppr_grounded_line.strip().split('\t')[6]) + 1
proof_graph = get_proof_graph(ppr_grounded_line, feature_vector_size)
data = get_proof_graph_tensor(proof_graph, feature_vector_size)
sparse_data = [sparse.COO(item) for item in data]
sparse_tensor_path = os.path.join(tensors_dir,
'{}-sparse.pickle'.format(idx))
with open(sparse_tensor_path, 'wb') as g:
pickle.dump(sparse_data, g, protocol=pickle.HIGHEST_PROTOCOL)
return feature_vector_size
# ppr_grounded_line = 'predict(train00004,X1). 1 6 5 6 13 42 6->6:9@1.0 6->1:2@1.0 5->5:9@1.0 5->1:2@1.0 4->6:10@0.6097,14@0.4334,13@0.6097,12@0.2572,11@0.051 4->1:2@1.0 3->5:8@0.6097,7@0.4334,6@0.2572,5@0.6097,4@0.051 3->1:2@1.0 2->3:3@1.0 2->4:3@1.0 2->1:2@1.0 1->2:1@1.0 1->1:2@1.0'
processed_data_dir = os.path.join('ProcessedData', program_name)
set_names = ['train', 'test']
process_count = 4
for set_name in set_names:
print 'In set {}'.format(set_name)
sld_grounded_path = os.path.join(
processed_data_dir, program_name + '-{}.grounded'.format(set_name))
tensors_dir = os.path.join(processed_data_dir, 'Tensors', set_name)
if not os.path.exists(tensors_dir):
os.makedirs(tensors_dir)
with open(sld_grounded_path) as f:
ppr_grounded_lines = f.readlines()
feature_vector_sizes = Parallel(n_jobs=process_count)(
delayed(dump_graph_tensor)(idx, tensors_dir, ppr_grounded_line)
for idx, ppr_grounded_line in enumerate(ppr_grounded_lines))
feature_vector_size = feature_vector_sizes[0]
print 'set {} processed'.format(set_name)
feature_vector_size_path = os.path.join(processed_data_dir, 'feat_size.txt')
with open(feature_vector_size_path, 'w') as f:
f.write(str(feature_vector_size))
| [
"harshjtrivedi94@gmail.com"
] | harshjtrivedi94@gmail.com |
f0231d6bbd4a58f6b16cf5bba65790be6216608a | 0d0b8236ff06027037d2a8a724d13a1866a9999c | /0x11-python-network_1/5-hbtn_header.py | 34b55fdc04a4eacfad8cc0eaf47eda1070829434 | [] | no_license | Danucas/holbertonschool-higher_level_programming | 3f8e81a610bf80890280b764362b56ad8803e2df | b963d41af8bccf764dff67f80ea16f1184c0a96d | refs/heads/master | 2022-07-31T05:53:57.046789 | 2020-05-21T21:29:54 | 2020-05-21T21:29:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | #!/usr/bin/python3
"""
Python script to fetch an https request
"""
import requests
import sys
def main():
response = requests.get(sys.argv[1])
try:
print(response.headers['X-Request-Id'])
except:
pass
if __name__ == '__main__':
main()
| [
"danrodcastillo1994@gmail.com"
] | danrodcastillo1994@gmail.com |
9e7d0dfdbc7930489b9ea969fab38d8a0d270139 | 46942a623716ef361d892274f3f47d499fe91a97 | /service/wikipedia_scrape_service/service.py | e4bb2d38d1c8ec127bcfd478b23537916a54b77b | [] | no_license | ramosjanoah/gegeelisa | 86ebac3bfd1f03f45b087f4b78e9a1a9da857924 | 4e782b247bdc024153bf9ff271368957ab95727a | refs/heads/master | 2020-11-27T10:50:35.631254 | 2019-12-21T12:39:12 | 2019-12-21T12:39:12 | 229,410,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from urllib.request import urlopen
from functools import lru_cache
import repository
from repository import redis
from repository import beautiful_soup
from model import WikipediaPage, WikipediaPageComponent, PageStateEnum
from .scrape_unfounded_page import ScrapeUnfoundedPage
from .scrape_founded_page import ScrapeFoundedPage
from helper import *
class WikipediaScrapeService():
def __init__(self):
pass
def scrape_unfounded_page(self, page_id, **kwargs):
return ScrapeUnfoundedPage(page_id, kwargs).perform()
def scrape_founded_page(self, page_id, **kwargs):
return ScrapeFoundedPage(page_id, kwargs).perform()
if __name__ == '__main__':
pass
else:
WikipediaScrapeService = WikipediaScrapeService()
| [
"email@example.com"
] | email@example.com |
6c0d12ea106d70215d25e9ad8be29acdaaef13c0 | 0d1576eea1733c04e227fda48b2fc1d13f5f37cb | /PyFBA/gapfill/limit_reactions.py | d460fbcfb12cc965ec07c19c71590b8997bd6773 | [
"MIT"
] | permissive | linsalrob/PyFBA | d207b976a7cc3a6dc682647b4a72396bde83a892 | 3723e95e6f96481c4fc470a004dc88da3617f3c3 | refs/heads/master | 2023-03-09T10:16:11.812589 | 2023-02-25T21:54:19 | 2023-02-25T21:54:19 | 46,241,465 | 27 | 15 | MIT | 2021-05-22T22:56:22 | 2015-11-15T23:51:34 | Python | UTF-8 | Python | false | false | 1,267 | py |
def limit_reactions_by_compound(reactions, reactions2run, suggestions, max_rcts=50):
"""
Limit the reactions in suggestions based on the compounds present in
the reactions in reactions2run and the number of reactions that each
compound is associated with.
We need to have < max_rcts reactions per compound for it to be
considered. This is to avoid things like H2O that have a lot of
connections
:param reactions: The reactions dict
:type reactions: dict
:param reactions2run: our base set of reactions that we will run
:type reactions2run: set
:param suggestions: the reactions we are considering adding
:type suggestions: set
:param max_rcts: the maximum number of reactions per compound
:type max_rcts: int
:return: a set of reactions which is those members of suggestions that meet our criteria
:rtype: set
"""
cpd = {}
for r in reactions2run:
for c in reactions[r].all_compounds():
cpd[str(c)] = cpd.get(str(c), 0) + 1
keep = set()
for r in suggestions:
for c in reactions[r].all_compounds():
if str(c) in cpd and (cpd[str(c)] < max_rcts):
keep.add(r)
keep.difference_update(reactions2run)
return keep
| [
"raedwards@gmail.com"
] | raedwards@gmail.com |
ad29e7018315f14b50adfa5ff59da8fa74676902 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/benchmarks/src/benchmarks/simplejson-bench.py | a634f84c31fc67358e8e6a81d8aaa1487d1b1e20 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import sys, time, os
# setup path
simplejson_path = os.path.join(os.path.dirname(__file__), 'simplejson')
sys.path.append(simplejson_path)
from simplejson.encoder import JSONEncoder
encoder = JSONEncoder()
def produceData():
lst = [i for i in range(3000)]
return lst
DATA = produceData()
def encodeList(n):
for i in range(n):
json = encoder.refactored_encode(DATA)
return json
def encodeObject():
class Foo:
def for_json(self):
return {'a':1, 'b':2, 'c': [i for i in range(3000)]}
return encoder.refactored_encode(Foo())
def measure():
print("Start timing...")
start = time.time()
json = encodeList(num)
duration = "%.3f\n" % (time.time() - start)
# print(json)
print("simplejson-encode: " + duration)
# warm up
num = int(sys.argv[1]) # 10000
for i in range(100):
json = encodeList(100)
measure()
| [
"ndrzmansn@gmail.com"
] | ndrzmansn@gmail.com |
37353ff9bc0e9011bda19ecf7bd8839ecc8cfae9 | 6a990a3d549ca3d1c607b60b13b10f14c0e15787 | /game/tuitongzi/command/client/reconnection_cmd.py | 8b4f70d45f713635ef02826a32818bd913f407f4 | [] | no_license | qianyc1020/server | 217a766f7df5a0acfb983d3fc48317a932fe092e | 3c897e5d6ee453d0a2f3b371b5eda5af954b8d1a | refs/heads/master | 2020-06-10T08:55:31.996449 | 2019-06-24T06:26:43 | 2019-06-24T06:26:43 | 193,626,075 | 1 | 0 | null | 2019-06-25T03:12:39 | 2019-06-25T03:12:37 | null | UTF-8 | Python | false | false | 3,693 | py | # coding=utf-8
import traceback
import core.globalvar as gl
from game.tuitongzi.mode.game_status import GameStatus
from protocol.base.base_pb2 import REENTER_GAME, SELF_INFO, SELF_PLAYED
from protocol.base.game_base_pb2 import RecReEnterGame, RecUpdateGameUsers
from protocol.game.bairen_pb2 import BaiRenScore
def execute(userId, message, messageHandle):
redis = gl.get_v("redis")
if redis.exists(str(userId) + "_room"):
roomNo = redis.get(str(userId) + "_room")
gameid = redis.get(str(roomNo) + "_gameId")
if 7 != gameid:
return
redis.lock("lockroom_" + str(roomNo))
try:
room = redis.getobj("room_" + str(roomNo))
seat = room.getWatchSeatByUserId(userId)
if seat is not None:
room.sendBetScore(messageHandle)
room.save(redis)
recReEnterGame = RecReEnterGame()
recReEnterGame.gameState = room.gameStatus
recReEnterGame.state = True
recReEnterGame.curPlayCount = room.gameCount
messageHandle.send_to_gateway(REENTER_GAME, recReEnterGame)
room.recUpdateGameInfo(messageHandle)
if room.getSeatByUserId(userId) is not None:
room.recUpdateScore(messageHandle, 0)
s = room.getSeatByUserId(userId)
userInfo = RecUpdateGameUsers.UserInfo()
userInfo.account = s.account
userInfo.playerId = s.userId
userInfo.headUrl = s.head
userInfo.createTime = s.createDate
userInfo.ip = s.ip
userInfo.online = s.online
userInfo.nick = s.nickname
userInfo.ready = s.ready
userInfo.score = s.score - s.playScore
userInfo.sex = s.sex
userInfo.totalCount = s.total_count
userInfo.loc = s.seatNo
userInfo.consumeVip = s.level
messageHandle.send_to_gateway(SELF_INFO, userInfo)
room.updateBankerList(messageHandle, userId)
else:
room.recUpdateScore(messageHandle, userId)
room.updateBankerList(messageHandle, userId)
room.updateTrend(messageHandle, userId)
room.updateWatchSize(messageHandle, userId)
if room.gameStatus != GameStatus.WAITING:
room.recReEnterGameInfo(messageHandle, userId)
if seat.playScore > 0:
score = BaiRenScore()
for position in room.positions:
score.score.append(0 if userId not in position.playScores else position.playScores[userId])
messageHandle.send_to_gateway(SELF_PLAYED, score)
room.executeAsk(messageHandle, userId, 2)
else:
if room.started:
room.recReEnterGameInfo(messageHandle, userId)
if seat.playScore > 0:
score = BaiRenScore()
for position in room.positions:
score.score.append(
0 if userId not in position.playScores else position.playScores[userId])
messageHandle.send_to_gateway(SELF_PLAYED, score)
room.executeAsk(messageHandle, userId, 1)
except:
print traceback.print_exc()
redis.unlock("lockroom_" + str(roomNo))
| [
"pengyi9627@gmail.com"
] | pengyi9627@gmail.com |
39430d9671b2910b0caaeea21a9fb271311ea5f1 | 753de2f926ba91986742a12904736443190766b6 | /personas/asgi.py | bc8be5ecff5241e6528591f0dccae80119bba61c | [] | no_license | nachovidondo/Documentacion_Personas | 236170274071c87bf9915614fac35798d916f1d0 | ab43e94e72cb5c31286dd7b32092498ff1496e0f | refs/heads/master | 2023-03-21T05:25:46.264032 | 2021-03-20T22:57:00 | 2021-03-20T22:57:00 | 349,852,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for personas project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'personas.settings')
application = get_asgi_application()
| [
"ignaciovidondo@hotmail.com"
] | ignaciovidondo@hotmail.com |
5803be5af3a275fe6d0c28041530f9decf49b8fe | 377d86194fd6d23c8ef3df3e6f7d90092dd8f9b4 | /workout_tracker/tests/unit/muscles/test_urls.py | 45a1cf524438ed183d455dd3dba59ca62ea62734 | [
"MIT"
] | permissive | e-dang/Workout-Tracker | f20f44b012e895244bad413a46103415ffae5732 | 00a27597ea628cff62b320d616f56b2df4f344a0 | refs/heads/master | 2022-12-28T07:49:34.179307 | 2020-10-12T20:48:28 | 2020-10-12T20:48:28 | 293,937,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from rest_framework.reverse import reverse
from tests.utils import add_api_prefix
def test_muscles_list_url():
assert reverse('muscle-list') == add_api_prefix('muscles/')
def test_muscles_detail_url():
pk = 1
assert reverse('muscle-detail', kwargs={'pk': pk}) == add_api_prefix(f'muscles/{pk}/')
| [
"edang830@gmail.com"
] | edang830@gmail.com |
d1d93378ea4d333133881b4caa5a37b7c65de232 | 79ad169779336d346b58e9bd9652ce64e9e4856a | /dynamic_rest/fields/choice.py | 605be5cfe3669fa1f93ddad262ffad9a97a4dc00 | [
"MIT"
] | permissive | asaak-co/dynamic-rest | 0546444034926ff3b8b8e96834fbb6a1576aeaf6 | 09f510063a35898a871ca86b5a130595a046c7aa | refs/heads/master | 2023-08-09T02:42:28.577865 | 2023-07-28T14:00:38 | 2023-07-28T14:00:38 | 132,636,828 | 0 | 1 | MIT | 2018-08-09T18:05:29 | 2018-05-08T16:34:39 | Python | UTF-8 | Python | false | false | 434 | py | from .base import DynamicField
from rest_framework.serializers import ChoiceField
from dynamic_rest.meta import Meta
class DynamicChoiceField(
DynamicField,
ChoiceField,
):
def admin_render_value(self, value):
model = self.parent_model
source = self.source or self.field_name
choices = Meta(model).get_field(source).choices
choices = dict(choices).get(value, None)
return choices
| [
"alonetiev@gmail.com"
] | alonetiev@gmail.com |
248901b372b2f892552f9ecbc8f14fe246636431 | da0a7446122a44887fa2c4f391e9630ae033daa2 | /python/ray/serve/tests/test_deployment_graph_autoscaling.py | b390d9c5f360612ba766020637ad10aec2e5ea01 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | whiledoing/ray | d8d9ba09b7545e8fd00cca5cfad451278e61fffd | 9272bcbbcae1630c5bb2db08a8279f0401ce6f92 | refs/heads/master | 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 | Apache-2.0 | 2023-03-04T08:57:20 | 2020-04-02T10:07:23 | Python | UTF-8 | Python | false | false | 6,753 | py | import sys
import pytest
from python.ray.serve import constants as serve_constants
import ray
from ray import serve
from ray.serve.drivers import DAGDriver
from ray.dag.input_node import InputNode
from ray.serve.deployment_state import ReplicaState
from ray._private.test_utils import SignalActor, wait_for_condition
# Magic number to use for speed up scale from 0 replica
serve_constants.HANDLE_METRIC_PUSH_INTERVAL_S = 1
def get_num_running_replicas(controller, deployment_name):
replicas = ray.get(
controller._dump_replica_states_for_testing.remote(deployment_name)
)
running_replicas = replicas.get([ReplicaState.RUNNING])
return len(running_replicas)
def test_autoscaling_0_replica(serve_instance):
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": 0,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 0,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
)
class Model:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
with InputNode() as user_input:
model = Model.bind(1)
output = model.forward.bind(user_input)
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
).bind(output)
dag_handle = serve.run(serve_dag)
assert 2 == ray.get(dag_handle.predict.remote(1))
@pytest.mark.parametrize("min_replicas", [0, 1])
def test_autoscaling_with_chain_nodes(min_replicas, serve_instance):
signal = SignalActor.remote()
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": min_replicas,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 30,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model1:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
ray.get(signal.wait.remote())
return input + self.weight
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model2:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
with InputNode() as user_input:
model1 = Model1.bind(0)
model2 = Model2.bind(1)
output = model1.forward.bind(user_input)
output2 = model2.forward.bind(output)
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
).bind(output2)
dag_handle = serve.run(serve_dag)
controller = serve_instance._controller
# upscaling
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 1
)
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 2
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) >= 2, timeout=40
)
signal.send.remote()
wait_for_condition(
lambda: get_num_running_replicas(controller, Model2.name) >= 1, timeout=40
)
# downscaling
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) == min_replicas,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) == min_replicas,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model2.name) == min_replicas,
timeout=60,
)
def test_autoscaling_with_ensemble_nodes(serve_instance):
signal = SignalActor.remote()
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": 0,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 30,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
def combine(value_refs):
ray.get(signal.wait.remote())
return sum(ray.get(value_refs))
with InputNode() as user_input:
model1 = Model.bind(0)
model2 = Model.bind(1)
output1 = model1.forward.bind(user_input)
output2 = model2.forward.bind(user_input)
output = combine.bind([output1, output2])
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
).bind(output)
dag_handle = serve.run(serve_dag)
controller = serve_instance._controller
assert get_num_running_replicas(controller, "Model") == 0
assert get_num_running_replicas(controller, "Model_1") == 0
assert get_num_running_replicas(controller, "combine") == 0
# upscaling
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 1
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model") >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model_1") >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "combine") >= 2, timeout=40
)
signal.send.remote()
# downscaling
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model") == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model_1") == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "combine") == 0, timeout=60
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| [
"noreply@github.com"
] | whiledoing.noreply@github.com |
e0f32a920e709012ccfe001490c91293713eac63 | 67b4c76ba0a94c023e085706cc3e0760f700eecd | /prod_settings.py | 22087417dac1072182d4d1a36ff665e4adf163d4 | [] | no_license | rlr/sitesprint09 | 928ef7129f0b120a2a1d02226b154a28d462e0bd | 3f74631c6feb9dcab50dc263dba145755cefab6f | refs/heads/master | 2021-01-10T21:30:30.745816 | 2010-01-30T02:35:45 | 2010-01-30T02:35:45 | 388,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
LOCAL_DEV = False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
#sorl-thumbnail
THUMBNAIL_DEBUG = False
#django-contact-form
DEFAULT_FROM_EMAIL = 'rickyrosario@gmail.com'
MANAGERS = (
('Ricky Rosario','rickyrosario@gmail.com'),
)
DATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'db_X' # Or path to database file if using sqlite3.
DATABASE_USER = 'pg_X' # Not used with sqlite3.
DATABASE_PASSWORD = 'X' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'ABC'
EMAIL_HOST_PASSWORD = 'ABC'
EMAIL_USE_TLS = True
CACHE_BACKEND = 'memcached://127.0.0.1:11211'
CACHE_MIDDLEWARE_SECONDS = 60*5
CACHE_MIDDLEWARE_KEY_PREFIX = 'rr.'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
### DEBUG-TOOLBAR SETTINGS
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
### django-markup
MARKUP_CHOICES = (
'none',
'markdown',
'textile',
) | [
"rickyrosario@gmail.com"
] | rickyrosario@gmail.com |
6b6f2a16f52435fd0b9bf3df97aef77d9bb2f821 | 22e9d7c194cf22513d68b61b97c49405a47e8708 | /Hashing/count_No_Teams.py | 2d1043accf6dbc6c8c2b8b3d941b2330ed460298 | [] | no_license | SandeepPadhi/Algorithmic_Database | 44c26f9300a99539781c5beb5587997b3ecadfe1 | ab8040a7dad94c84ec88f40e44b8520edcbe2443 | refs/heads/main | 2023-06-22T02:04:29.362315 | 2021-07-19T17:48:40 | 2021-07-19T17:48:40 | 338,329,340 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | """
Date:24/02/2021
1395. Count Number of Teams - Leetcode - Medium
The following program is using Hashing
"""
class Solution:
def numTeams(self, rating: List[int]) -> int:
highindex={}
lowindex={}
for i in range(len(rating)):
highindex[i]=[]
lowindex[i]=[]
for j in range(i+1,len(rating)):
if rating[j]>rating[i]:
highindex[i].append(j)
elif rating[j]<rating[i]:
lowindex[i].append(j)
count=0
for i in range(len(rating)):
for j in highindex[i]:
count+=len(highindex[j])
for j in lowindex[i]:
count+=len(lowindex[j])
return count
"""
Date:24/02/2021
The following program is solved using simple counting .
"""
"""
def numTeams(self, rating: List[int]) -> int:
Ans=0
for i in range(1,len(rating)-1):
leftlow,lefthigh,rightlow,righthigh=0,0,0,0
j=i-1
while(j>=0):
if rating[j]<rating[i]:
leftlow+=1
elif rating[j]>rating[i]:
lefthigh+=1
j-=1
j=i+1
while(j<len(rating)):
if rating[i]<rating[j]:
righthigh+=1
elif rating[i]>rating[j]:
rightlow+=1
j+=1
Ans+=(leftlow*righthigh + lefthigh*rightlow)
return Ans
""" | [
"padhisandeep96@gmail.com"
] | padhisandeep96@gmail.com |
4c08c95b73a2a6f9d94cb46833129c2508a1bf92 | c633bf9cbfa588ecd26d6daebb5434b08542bcb3 | /warehouse_by_active_user/models/warehouse.py | 1ffa053b1cf9a9fa92043664614a9bdc53ac7e66 | [] | no_license | rosalesdc/am_testing | 57c6afa0f6e028569c682d8bfff7d0e80d08c12d | b78be0ef4eb6a6ab916e4840d900a73cca427a0e | refs/heads/master | 2020-09-08T08:48:00.213951 | 2019-11-11T22:51:34 | 2019-11-11T22:51:34 | 221,083,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- encoding: utf-8 -*-
#
# Module written to Odoo, Open Source Management Solution
#
# Copyright (c) 2017 Telematel - http://www.telematel.com/
# All Rights Reserved.
#
# Developer(s): Luis Ernesto Garcia Medina
# (ernesto.garcia@telematel.com)
#
########################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from odoo import api, models, fields
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
user_ids = fields.One2many('res.users', 'warehouse_id')
| [
"rosales9146@gmail.com"
] | rosales9146@gmail.com |
37f3382aaf1b32e126dda9c8ba924399ca4eae1e | 783d136927a55eb83734bfd4eee4f4de06ababe2 | /DeepFM/DeepFM1.py | 04b1f6b1cb52f55d52af811bd940d31a38c169a1 | [] | no_license | robertaaa/code_study | 51bf9f87cf32c6c131c2711a0626fbd0e6ceec57 | 3fb264ed9fb36abe156c6663316b2e80169c26ac | refs/heads/master | 2023-09-05T21:58:20.055755 | 2021-11-08T05:07:49 | 2021-11-08T05:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py | import pandas as pd
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from deepctr.models import DeepFM
from deepctr.utils import SingleFeat
if __name__ == "__main__":
data = pd.read_csv('./criteo_sample.txt')
#拆分稀疏和稠密特征
sparse_features = ['C' + str(i) for i in range(1, 27)]
dense_features = ['I' + str(i) for i in range(1, 14)]
data[sparse_features] = data[sparse_features].fillna('-1', )
data[dense_features] = data[dense_features].fillna(0, )
target = ['label']
# 1.类别特征的编码与稠密特征做归一化
for feat in sparse_features:
lbe = LabelEncoder()
data[feat] = lbe.fit_transform(data[feat])
mms = MinMaxScaler(feature_range=(0, 1))
data[dense_features] = mms.fit_transform(data[dense_features])
# 2.统计稀疏特征类别特征个数,记录稠密特征类目
sparse_feature_list = [SingleFeat(feat, data[feat].nunique()) for feat in sparse_features]
dense_feature_list = [SingleFeat(feat, 0,) for feat in dense_features]
# 3.生成模型输入特征
train, test = train_test_split(data, test_size=0.2)
train_model_input = [train[feat.name].values for feat in sparse_feature_list] + \
[train[feat.name].values for feat in dense_feature_list]
test_model_input = [test[feat.name].values for feat in sparse_feature_list] + \
[test[feat.name].values for feat in dense_feature_list]
# 4.定义模型、预测、评估模型
model = DeepFM({"sparse": sparse_feature_list, "dense": dense_feature_list}, task='binary')
model.compile("adam", "binary_crossentropy", metrics=['binary_crossentropy'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
pred_ans = model.predict(test_model_input, batch_size=256)
print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4))
print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))
| [
"jiangpeng.jiang@zhaopin.com.cn"
] | jiangpeng.jiang@zhaopin.com.cn |
c8a8925d67f8d6367f31e9f8874fb6c12a1cef90 | eb731db339fd09757cc816823aa875bcaf15abeb | /jobs/migrations/0024_auto_20150116_2026.py | 31697f0f928f47b0adcf929e74d0db4c10f09ed4 | [] | no_license | sokogfb/1source | a34c17b90e23744686d904d8ed195182ecdd5e1f | a866e16e52aa34800f0da60e615f47952e084735 | refs/heads/master | 2020-12-01T08:50:43.138251 | 2016-10-23T01:33:47 | 2016-10-23T01:33:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0023_application_hired_date'),
]
operations = [
migrations.AddField(
model_name='application',
name='interview_date',
field=models.DateField(blank=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='application',
name='interviewed',
field=models.BooleanField(default=False, help_text='Where they interviewed?'),
preserve_default=True,
),
]
| [
"devon.warren@gmail.com"
] | devon.warren@gmail.com |
ab3dd0815e9f713d51735f87335bdf9ccefb193a | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1484496_0/Python/Abraham/3.py | 3eb056330663b609738fc113d40d686cfd9d04a0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | import sys
infile=open(sys.argv[1])
#contents=infile.read()
#print contents
line=infile.readline()
numofcases=int(line)
print numofcases,"cases"
count=0
outfile=open("out.txt","w")
for i in range(numofcases):
count+=1
outfile.write("Case #"+str(count)+":\n")
line=infile.readline()
print line
numbers=[]
for n in line.split():
numbers.append(int(n))
numofnum=numbers.pop(0)
print numofnum
print numbers
sum={}
for n in numbers:
sum[n]=[n,]
found=False
for i in range(len(numbers)):
#for i in sum.keys():
# print i,sum[i]
for j in numbers:
if found:
break
newsum={}
for val in sum.keys():
if j not in sum[val]:
if j+val in sum.keys():
l1=sum[val][:]
l1.append(j)
if set(sum[j+val]) != set(l1):
str1=""
for i in l1:
str1+=str(i)+" "
str1+="\n"
str2=""
for i in sum[j+val]:
str2+=str(i)+" "
str2+="\n"
outfile.write(str1)
outfile.write(str2)
print l1
print sum[j+val]
found=True
break
else:
if j+val in newsum.keys():
l1=sum[val][:]
l1.append(j)
if set(newsum[j+val]) != set(l1):
print l1
print newsum[j+val]
str1=""
for i in l1:
str1+=str(i)+" "
str1+="\n"
str2=""
for i in newsum[j+val]:
str2+=str(i)+" "
str2+="\n"
outfile.write(str1)
outfile.write(str2)
found=True
break
else:
newsum[val+j]=sum[val][:]
newsum[val+j].append(j)
if found:
break
sum.update(newsum)
if not found:
outfile.write("Impossible"+"\n")
print "impossible"
outfile.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
e56fe243ff26e0c4ed95e36aed0397d5793f59fd | c64f2412f7a7ebc3f6bce2a2dcbb99f6b88a2268 | /users/arxiv/users/legacy/__init__.py | 09538c9dd112574852d0fa5619c20aa21da9e6c8 | [
"MIT"
] | permissive | Quang7hong81/arxiv-auth | 565614667f6900d0d9644d010b832acfee3ba983 | ca2b0091682248d84154cf0a0d70c28e2e4e9f91 | refs/heads/master | 2023-08-11T06:58:09.849172 | 2021-05-04T20:53:44 | 2021-05-04T20:53:44 | 415,073,254 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | """
Integrations with the legacy arXiv database for users and sessions.
This package provides integrations with legacy user and sessions data in the
classic DB. These components were pulled out as a separate package because
they are required by both the accounts service and the authn/z middlware,
and maintaining them in both places would create too much duplication.
"""
from . import sessions, exceptions, authenticate, models, accounts, util, \
endorsements
from .util import create_all, init_app, current_session, drop_all, \
is_configured, is_available, transaction
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
80b5a121e4ba0b0b9fa3de601259a3ed358a8f19 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations_async/_available_delegations_operations_async.py | 9bd5ba9d9b004892ecb52fedad1a086d3101ab89 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 5,085 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableDelegationsOperations:
"""AvailableDelegationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.AvailableDelegationsResult"]:
"""Gets all of the available subnet delegations for this subscription in this region.
:param location: The location of the subnet.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableDelegationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.AvailableDelegationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableDelegationsResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableDelegationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableDelegations'} # type: ignore
| [
"noreply@github.com"
] | YijunXieMS.noreply@github.com |
a6b8938429590ad4f25219b94088b7a9e3c816ca | 4d21da5a3d07f4d05b997e80119cd79692ac0d25 | /Leetcode/301-400/326. Power of Three.py | 6dd5aa5bcea9575eb62407cbaf95c65965ef696e | [] | no_license | ErinC123/Algorithm | 92b2789ec3b36c49f9e65f2e7a702bb4b732e8ba | 4544fee91e811a6625000921c32ad054df550f1e | refs/heads/master | 2021-06-17T14:03:33.955233 | 2017-06-18T21:20:55 | 2017-06-18T21:20:55 | 75,894,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # Question: 326. Power of Three
# Difficulty: Easy
# Tags: Math
'''
Given an integer, write a function to determine if it is a power of three.
Follow up:
Could you do it without using any loop / recursion?
'''
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0:
return False
if n == 1:
return True
while n%3 == 0:
n = n/3
if n == 1:
return True
return False
| [
"zhencao93@gmail.com"
] | zhencao93@gmail.com |
850b0fdbbdcd7b8c51b89ad013305f361b5402cd | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/request_body_post_uniqueitems_false_validation_request_body/post.pyi | 8fd7d1c438a0e379c0e8540738e4ed3eb5e2bfdb | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 4,583 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.uniqueitems_false_validation import UniqueitemsFalseValidation
# body param
SchemaForRequestBodyApplicationJson = UniqueitemsFalseValidation
class BaseApi(api_client.Api):
def _post_uniqueitems_false_validation_request_body_oapg(
self: api_client.Api,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_body.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostUniqueitemsFalseValidationRequestBody(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_uniqueitems_false_validation_request_body(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_uniqueitems_false_validation_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_uniqueitems_false_validation_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"noreply@github.com"
] | FallenRiteMonk.noreply@github.com |
a2271435a5491a60527f6283ff52ff6e8a33fd46 | b4bc1807fb084a83c02b2e39de690a570be64ebd | /events_site_example/apps/common_app/tests/helpers.py | f72cc39f95eac0b551c88db21280cf7ecbef4c68 | [] | no_license | pahaz/events-site-example | 81d7a088a36d0073b6c743d9bf417403ea945746 | f0d0ae18b137dd39afc6b09d01048bc4403dff92 | refs/heads/master | 2023-08-26T20:00:04.157919 | 2012-11-21T06:58:09 | 2012-11-21T06:58:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | # -*- coding: utf-8 -*-
from django.test import TestCase
class TestConditionBase(TestCase):
# model = ModelClass
# queryset_method_name = 'queryset_filter_method_name'
# instance_property_name = 'model_instance_property_name'
def setUp(self):
self.instance = self.create_instance()
# leave only 1 model instance
self.model.objects.all().exclude(pk=self.instance.pk).delete()
self.queryset_filter_method = getattr(self.model.objects, self.queryset_method_name)
def create_instance(self):
"""Creates instance of model"""
raise NotImplementedError('Subclasses must define this method.')
def assertInstanceMethodResponseReturns(self, response, msg):
self.assertEqual(getattr(self.instance, self.instance_property_name), response, msg=msg)
def assertFound(self, msg):
self.assertEqual(self.queryset_filter_method().count(), 1, msg=msg)
self.assertEqual(self.queryset_filter_method()[0], self.instance, msg=msg)
def assertNotFound(self, msg):
self.assertEqual(self.queryset_filter_method().count(), 0, msg=msg)
def assertConditionTrue(self, msg=None):
# test instance method
self.assertInstanceMethodResponseReturns(True, msg=msg)
# test QuerySet filter method
self.assertFound(msg=msg)
def assertConditionFalse(self, msg=None):
# test instance method
self.assertInstanceMethodResponseReturns(False, msg=msg)
# test QuerySet filter method
self.assertNotFound(msg=msg) | [
"web-chib@yandex-team.ru"
] | web-chib@yandex-team.ru |
8fbd777be6a4409caac091f3dd090dc2debebeef | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/297/67077/submittedfiles/testes.py | c7507d5c71e3b61038c611e9a7d783a2490240fe | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
idade= int(input("sua idade(i): "))
altura= float(input("sua altura(f): "))
print(idade)
print('sua idade inteira eh %d e a altura eh %.2f'%(idade,altura))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
78f46b6b21fb1b994bdb1e8203bf4bf6920782c9 | 1f7d287ef90041e20468513a26a39e1f3d221289 | /Level-4/s01/guvi-L4-s02-py02.py | 8b0e9652fdfd4d786ce1b1f5461d7d32068eb13d | [] | no_license | ksthacker/python | d787d69f954c0e9b59b0cc96a8b8fc5c0594d8a0 | 3a3775e1b9349e313f8c96ea11eade54a7e9bf54 | refs/heads/master | 2021-04-27T16:32:40.923316 | 2019-08-21T04:50:22 | 2019-08-21T04:50:22 | 122,303,461 | 0 | 17 | null | 2019-10-03T14:59:51 | 2018-02-21T07:09:32 | Python | UTF-8 | Python | false | false | 221 | py | import sys, string, math
n,k = input().split()
n,k = int(n), int(k)
L = [ int(x) for x in input().split()]
for i in range(0,k) :
a,b = input().split()
a,b = int(a), int(b)
print(sum(L[a-1:b]))
| [
"noreply@github.com"
] | ksthacker.noreply@github.com |
9a92894a5d92bfd4c100d639f10ae5a43bca57c9 | c66810fadc39a6ff020b8a04ec70903a5570b378 | /behavioral_patterns/state_pattern/state_exp1.py | e386e71ec9bb372658d21c61b142c70b9400b8dd | [] | no_license | MaxOvcharov/python_desing_patterns | 364c56c392bef3a882fd13fbd00a9c5d1241ccfd | eacc3f1483e40778178523d82adfb6c443dbf1ae | refs/heads/master | 2021-05-14T00:45:47.094302 | 2020-02-26T20:00:11 | 2020-02-26T20:00:11 | 116,548,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python3
"""
EXAMPLE - https://sourcemaking.com/design_patterns/state.
Allow an object to alter its behavior when its internal state changes.
The object will appear to change its class.
"""
import abc
class Context:
"""
Define the interface of interest to clients.
Maintain an instance of a ConcreteState subclass that defines the
current state.
"""
def __init__(self, state):
self._state = state
def request(self):
self._state.handle()
class State(metaclass=abc.ABCMeta):
"""
Define an interface for encapsulating the behavior associated with a
particular state of the Context.
"""
@abc.abstractmethod
def handle(self):
pass
class ConcreteStateA(State):
"""
Implement a behavior associated with a state of the Context.
"""
def handle(self):
print(f'STATE: {self.__class__.__name__}')
class ConcreteStateB(State):
"""
Implement a behavior associated with a state of the Context.
"""
def handle(self):
print(f'STATE: {self.__class__.__name__}')
def main():
concrete_state_a = ConcreteStateA()
context = Context(concrete_state_a)
context.request()
if __name__ == "__main__":
main()
| [
"ovcharovmax@yandex.ru"
] | ovcharovmax@yandex.ru |
cc2c7d03ef3262a407a8d964d244a638cf7c6819 | ce71f783e00a82de64a0a80fe039beedc3ae4bab | /xing_ji/asgi.py | 6e85d1fe695c562046b21770cd85ee83bce7e9ac | [] | no_license | bayhax/xing_ji | d4105c55d87d8f885afe96fcff7aa905a91ca9bb | 239df41e130f84ee9c76b93fe56567ddbcc960b5 | refs/heads/master | 2023-02-07T20:30:35.923898 | 2020-12-30T07:19:28 | 2020-12-30T07:19:28 | 324,749,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for xing_ji project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xing_ji.settings')
application = get_asgi_application()
| [
"whlbayhax@163.com"
] | whlbayhax@163.com |
60f84921356470e29c35b00e56e242c0de1d90e8 | 1a6dd921861b56a0fb856856c6b318a24daf98d8 | /13 раздел/Задача N.py | 5b8663536888baef73200e665cd9ace5cdc83e17 | [
"WTFPL"
] | permissive | AlexeyZavar/informatics_solutions | cfd64529b1acd617d51315ba85b53c4d4f883471 | 66df4374b971b44a0165ad0c34b307a0b9f5fa15 | refs/heads/master | 2022-04-14T02:52:01.258939 | 2020-04-18T22:01:09 | 2020-04-18T22:01:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Дана строка, возможно, содержащая пробелы. Определите количество слов в этой строке. Слово — это несколько подряд идущих букв латинского алфавита (как заглавных, так и строчных).
#
import re as regular_ex
def removeTrash(s):
l1 = s.count('-')
for i in range(l1):
del s[s.index('-')]
return s
s = input()
r = r"[A-Za-z]+"
t = removeTrash(regular_ex.findall(r, s))
l = len(t)
print(l)
| [
"alexeyzavar@gmail.com"
] | alexeyzavar@gmail.com |
6aca5bca8820467d965afb4f15a33b9baf041b4d | 12fc0aa145792bc0175cb369758111321706ea15 | /test/arthash/journals/integration_test.py | 26decd44e641d04c4815eb5779a809acd9e37d2f | [
"LicenseRef-scancode-free-unknown",
"Artistic-2.0"
] | permissive | arthash/arthash | 3d39f412e1fafcaccf31c62c658d542a03a4b0d0 | 8d59700308cedf3b62118ed021064b677408edeb | refs/heads/master | 2021-01-15T08:47:17.073225 | 2018-07-24T14:07:56 | 2018-07-24T14:24:50 | 59,588,339 | 2 | 1 | Artistic-2.0 | 2018-06-24T16:59:44 | 2016-05-24T16:06:17 | Python | UTF-8 | Python | false | false | 4,310 | py | import datetime, json, os, random, shutil, sys, tempfile, unittest, zipfile
from unittest.mock import patch
from pyfakefs.fake_filesystem_unittest import TestCase
from arthash.journals import keeper, organization
from arthash.util import hasher
# TODO: I'm disabling this test because it really only tests
# the most boring part of this - writing things in journals.
#
# My plan is to create a proper integration test which fires up a webserver,
# runs the client and does some (simulated) cryptography, and then tests the
# results - and then I'll reuse this code.
class IntegrationTest: # (unittest.TestCase): disabled
def do_test(self, i):
errors = list(Reader(i).test())
self.assertEqual(errors, [])
def test_0(self):
self.do_test(0)
def test_1(self):
self.do_test(1)
def test_2(self):
self.do_test(2)
def test_3(self):
self.do_test(3)
class IntegrationDesc:
TESTS = (
(18, 2, 2),
(360, 4, 3),
(180, 2, 4),
(1100, 256, 4),
)
BASE_DIR = os.path.dirname(__file__)
JOURNAL_DIR = os.path.join(BASE_DIR, 'journals', 'journal')
ZIP_FORMAT = JOURNAL_DIR + '-{count}-{org.page_size}-{org.levels}.zip'
TIME_DELTA = datetime.timedelta(seconds=2)
TIMESTAMP = datetime.datetime(2018, 7, 6)
HEX = '0123456789abcdef'
def __init__(self, count, page_size, levels):
self.temp_dir = tempfile.TemporaryDirectory()
self.directory = self.temp_dir.name
self.count = count
self.org = organization.Organization(page_size, levels)
self.time = self.TIMESTAMP
@property
def zipname(self):
return self.ZIP_FORMAT.format(**vars(self))
def add_hashes(self):
hf = keeper.Keeper(self.directory, self.org)
random.seed(0)
with patch('arthash.journals.keeper.timestamp') as timestamp:
timestamp.side_effect = self.timestamp
for i in range(int(self.count)):
hf.add_record(arthash=self.random_hash())
def random_hash(self):
return ''.join(self.HEX[random.randrange(16)] for i in range(64))
def timestamp(self):
self.time += self.TIME_DELTA
return self.time.isoformat()
class Writer(IntegrationDesc):
def write(self):
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname, 'w', zipfile.ZIP_DEFLATED)
for rel_path in hasher.walk(self.directory):
abs_path = os.path.join(self.directory, rel_path)
zpf.write(abs_path, rel_path)
print('Wrote', self.count, 'hashes to', self.zipname)
@classmethod
def write_all(cls):
for test in cls.TESTS:
cls(*test).write()
class Reader(IntegrationDesc):
def __init__(self, i):
super().__init__(*self.TESTS[i])
def test(self):
# Yield a series of error messages.
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname)
actual_names = set(hasher.walk(self.directory))
zip_names = set(zpf.namelist())
az, za = actual_names - zip_names, zip_names - actual_names
for name in actual_names - zip_names:
yield 'Name %s was unknown' % name
for name in zip_names - actual_names:
yield 'Name %s was missing' % name
for name in sorted(set(actual_names) & set(zip_names)):
expected = zpf.open(name).read().decode()
actual_name = os.path.join(self.directory, name)
actual = open(actual_name).read()
if actual != expected:
error = BAD_CONTENTS_ERROR.format(**locals())
print(error)
yield error
def write(self):
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname, 'w', zipfile.ZIP_DEFLATED)
for rel_path in hasher.walk(self.directory):
abs_path = os.path.join(self.directory, rel_path)
zpf.write(abs_path, rel_path)
print('Wrote', self.count, 'hashes to', self.zipname)
@classmethod
def write_all(cls):
for test in cls.TESTS:
cls(*test).write()
BAD_CONTENTS_ERROR = """\
Contents differed for {name}:
Actual:
----
{actual}
----
Expected:
----
{expected}
"""
if __name__ == '__main__':
Writer.write_all()
| [
"tom@swirly.com"
] | tom@swirly.com |
8d75649c23883d73101fb513de73c707150077c3 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /Leg_102手舞足不蹈.py | 3faf9fd6df3bed1e64479965756f94af4c16b2b6 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # Write your code here :-)
from turtle import *
from math import *
from time import *
#============
def rot(x,y,a,b,L1,L2):
pu();goto(x,y+100); pd();
lt(a);fd(L1);lt(b);fd(L2);lt(b*0.7);fd(L2/4);pu();
#============
def main():
for k in range(10):
for g in range(-30,30,1):
a=g;b=1.5*g
clear();
#body:
pensize(38);pencolor(.5,0.2,0.8)
pu();goto(0,-20);pd();goto(0,100);pu()
#head:
pensize(28);pencolor(.5,0.1,0.3)
pu();goto(0,132);pd();goto(0,146);pu()
#arm:
pensize(8)
home();rot(18,0,a,b,80,70)
home();rot(-18,0,180-a,-b,80,70)
#leg:
pensize(18)
pu();goto(12,-30);pd();goto(38,-200);goto(58,-200);
pu();goto(-12,-30);pd();goto(-38,-200);goto(-58,-200);
sleep(0.01)
update()
#===========
tracer(False);ht();main();
| [
"noreply@github.com"
] | xiang-daode.noreply@github.com |
282729cc3d2bd08d97a5cf989ddb3de3ca1287c4 | 73346545e69194dc1cfd887314afe600076ff263 | /polling_stations/apps/addressbase/migrations/0004_onsad_ctry_flag.py | 83cfbf605be785239acb02402bf4599556ecd4fe | [] | permissive | chris48s/UK-Polling-Stations | c7a91f80c1ea423156ac75d88dfca31ca57473ff | 4742b527dae94f0276d35c80460837be743b7d17 | refs/heads/master | 2021-08-27T18:26:07.155592 | 2017-11-29T15:57:23 | 2017-11-29T15:57:23 | 50,743,117 | 1 | 0 | BSD-3-Clause | 2017-11-29T16:03:45 | 2016-01-30T20:20:50 | Python | UTF-8 | Python | false | false | 417 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addressbase', '0003_auto_20170406_0954'),
]
operations = [
migrations.AddField(
model_name='onsad',
name='ctry_flag',
field=models.CharField(max_length=1, blank=True),
),
]
| [
"chris.shaw480@gmail.com"
] | chris.shaw480@gmail.com |
7229c4e10aaba2855bd989d9e61b8c8526911dee | a04aff1baf3dac3ad96fd78e90b0de357140db84 | /scientific_expedition/yaml_more_tips.py | 2e0b1f3dd0eb44ecee45739b74e097b9bd27ce38 | [
"MIT"
] | permissive | NigrumAquila/py_checkio | f4f66fe8e87ba88d4e9258a55521902541ca33ba | df437c2c3ad325d84714665000e3299a70e91f32 | refs/heads/master | 2022-07-30T04:33:42.107806 | 2020-05-10T09:57:58 | 2020-05-10T09:57:58 | 262,756,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | import re
def yaml(a):
yaml_dict = {}
for el in a.split('\n'):
if el != '':
key, value = el.split(':')
value = value.lstrip(' ')
if (value == '') or (value == None) or (value == 'null'):
yaml_dict[key] = None
elif (value.lower() in ['true', 'false']):
yaml_dict[key] = True if value.lower() == 'true' else False
elif (re.search(r'[a-zA-Z]+', value)):
value = re.sub(r'\\"', r'"', value)
try:
value = re.search(r'\"([\w\W]*)\"', value).group(1)
yaml_dict[key] = value
except AttributeError:
yaml_dict[key] = value
else:
yaml_dict[key] = int(value)
return yaml_dict | [
"kononenko-1999@bk.ru"
] | kononenko-1999@bk.ru |
c774212f3b6e029c47eb1f0663b0ee7c4e45dbcd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/184/55977/submittedfiles/estatistica.py | 36c2b98bc0c1c7e60882c5efda63b29526938142 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
lista1=[]
lista2=[]
def media(lista):
soma1=0
for i in range(0,len(lista),1):
soma1=soma1+lista[i]
resultado=soma1/len(lista)
return (resultado)
def desviopadrao(lista):
soma2=0
for i in range(o,len(lista),1):
soma2=soma2+(lista[i]-media(lista))**(2)
desviopadrao=(soma2/(n-1))**(0.5)
return (desviopadrao)
for i in range(0,n,1):
numero=float(input('digite um numero:'))
lista1.append(numero)
for i in range(0,n,1):
numero=float(input('digite numero:'))
lista2.append(numero)
print('%.2f'%media(lista1))
print('%.2f'%desviopadrao(lista1))
print('%.2f'%media(lista2))
print('%.2f'%desviopadrao(lista2)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
378c64df5fde3bb6b7506292d59d95fb8a8be1f2 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/kusto/azext_kusto/vendored_sdks/kusto/operations/__init__.py | 5750cdc82c59179c4778eb4c699f175f4439341b | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 1,235 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._cluster_operations import ClusterOperations
from ._cluster_principal_assignment_operations import ClusterPrincipalAssignmentOperations
from ._database_operations import DatabaseOperations
from ._database_principal_assignment_operations import DatabasePrincipalAssignmentOperations
from ._attached_database_configuration_operations import AttachedDatabaseConfigurationOperations
from ._data_connection_operations import DataConnectionOperations
from ._operation_operations import OperationOperations
__all__ = [
'ClusterOperations',
'ClusterPrincipalAssignmentOperations',
'DatabaseOperations',
'DatabasePrincipalAssignmentOperations',
'AttachedDatabaseConfigurationOperations',
'DataConnectionOperations',
'OperationOperations',
]
| [
"noreply@github.com"
] | ebencarek.noreply@github.com |
1996a1e9a7e0d18dab21229e6a431be83d1a7acc | 11852c0c085ad1f635f45aa085ece3987ae45620 | /chejian/pspnet_train.py | 0d5c14c97670c49a1cc4028912ff89aa9a13f7e1 | [
"BSD-3-Clause"
] | permissive | heixialeeLeon/segment_piwise | 3dc14be9605a652676a559ec7ab6764d23c67735 | 4eaeeb4aa36298c1140c9fb8a6adef7a3bcfb03f | refs/heads/master | 2020-04-19T10:31:05.538819 | 2019-01-29T11:26:42 | 2019-01-29T11:26:42 | 168,142,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,472 | py | import numpy as np
import torch
from PIL import Image
from argparse import ArgumentParser
from torch.optim import SGD, Adam
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from piwise.dataset_chejian import ChejianDataSet
from model.pspnet import extractors
from model.pspnet.pspnet import *
from piwise.criterion import CrossEntropyLoss2d
from piwise.transform import Relabel, ToLabel, Colorize
from piwise.visualize import Dashboard
import torch.optim as optim
import argparse
NUM_CHANNELS = 3
NUM_CLASSES = 12
color_transform = Colorize()
image_transform = ToPILImage()
input_transform = Compose([
Resize(320),
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
target_transform = Compose([
Resize(320, interpolation=Image.NEAREST),
ToLabel(),
])
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
parser.add_argument('--datadir', required=False, default="/data_1/data/chejian/1207")
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--save_interval', default=10, type=int, metavar='N',
help='number of epochs to save the model')
parser.add_argument('--batch_size', default=4, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--cuda', action='store_true', default=True)
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--lr_step', '--learning-rate step', default=10, type=int,
help='learning rate step')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--save_folder', default='weights',
help='Directory for saving checkpoint models')
parser.add_argument('--steps-loss', type=int, default=100)
args = parser.parse_args()
print(args)
def train():
#model = SegNet(3, NUM_CLASSES)
#model = FCN16(NUM_CLASSES)
model = PSPNet(n_classes= NUM_CLASSES, sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18')
if args.cuda:
model = model.cuda()
weight = torch.ones(12)
loader = DataLoader(ChejianDataSet(args.datadir, input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
if args.cuda:
criterion = CrossEntropyLoss2d(weight.cuda())
else:
criterion = CrossEntropyLoss2d(weight)
#optimizer = Adam(model.parameters())
optimizer = SGD(model.parameters(), lr =args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 30, 40, 50], gamma=0.1)
for epoch in range(0, args.epochs+1):
epoch_loss = []
scheduler.step(epoch)
for step, (images, labels) in enumerate(loader):
if args.cuda:
images = images.cuda()
labels = labels.cuda()
inputs = Variable(images)
targets = Variable(labels)
outputs = model(inputs)
optimizer.zero_grad()
targets = targets.squeeze(1)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
if args.steps_loss > 0 and step % args.steps_loss == 0:
average = sum(epoch_loss) / len(epoch_loss)
print(f'loss: {average} (epoch: {epoch}, step: {step})')
if epoch % 2 == 0:
save_filename = "{}/model_{}.pth".format(args.save_folder,epoch)
torch.save(model.state_dict(), save_filename)
if __name__ == "__main__":
train() | [
"lipeizhao@em-data.com.cn"
] | lipeizhao@em-data.com.cn |
4f0245a842357c5dbe5e4147f485d8ff3038a40c | 8f0b0ec0a0a2db00e2134b62a1515f0777d69060 | /scripts/study_case/ID_5/matchzoo/datasets/snli/load_data.py | 06b9ec8754f14f19267ed9c9ad25faa384f88c44 | [
"Apache-2.0"
] | permissive | Liang813/GRIST | 2add5b4620c3d4207e7661eba20a79cfcb0022b5 | 544e843c5430abdd58138cdf1c79dcf240168a5f | refs/heads/main | 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 | Apache-2.0 | 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null | UTF-8 | Python | false | false | 3,224 | py | """SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import scripts.study_case.ID_5.matchzoo as matchzoo
from scripts.study_case.ID_5.matchzoo.engine.base_task import BaseTask
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `classification`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path, task, target_label)
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path, task, target_label):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
if task == 'ranking':
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
df['label'] = (df['label'] == target_label)
elif task == 'classification':
classes = ['entailment', 'contradiction', 'neutral', '-']
df['label'] = df['label'].apply(classes.index)
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
return matchzoo.pack(df, task)
| [
"793679547@qq.com"
] | 793679547@qq.com |
04dcf815696115c267f19ffa51ed91d4902092d3 | 73143826c727eb012bff8c732ab5776c051f6c9b | /pytext/data/test/data_test.py | 128b7b8b815f9a089da1385a6d747c501e7b84d2 | [
"BSD-3-Clause"
] | permissive | wehaveone/pytext | f649fb81bb7bcf76c88ee81af5d204820011b1be | 41c0f46abf81c2d84ee02f6e9b91438e18adf47f | refs/heads/master | 2020-04-26T01:42:31.263801 | 2019-03-01T00:58:17 | 2019-03-01T01:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pytext.common.constants import Stage
from pytext.data import Batcher, Data, types
from pytext.data.sources.data_source import SafeFileWrapper
from pytext.data.sources.tsv import TSVDataSource
from pytext.data.tensorizers import LabelTensorizer, WordTensorizer
from pytext.utils.test_utils import import_tests_module
tests_module = import_tests_module()
class DataTest(unittest.TestCase):
def setUp(self):
self.data_source = TSVDataSource(
SafeFileWrapper(tests_module.test_file("train_dense_features_tiny.tsv")),
SafeFileWrapper(tests_module.test_file("test_dense_features_tiny.tsv")),
eval_file=None,
field_names=["label", "slots", "text", "dense"],
schema={"text": types.Text, "label": types.Label},
)
self.tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label", allow_unknown=True),
}
def test_create_data_no_batcher_provided(self):
data = Data(self.data_source, self.tensorizers)
batches = list(data.batches(Stage.TRAIN))
# We should have made at least one non-empty batch
self.assertTrue(batches)
batch = next(iter(batches))
self.assertTrue(batch)
def test_create_batches(self):
data = Data(self.data_source, self.tensorizers, Batcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch = next(iter(batches))
self.assertEqual(set(self.tensorizers), set(batch))
tokens, seq_lens = batch["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual((10,), batch["labels"].size())
self.assertEqual({"tokens", "labels"}, set(batch))
self.assertEqual(10, len(tokens))
def test_create_batches_different_tensorizers(self):
tensorizers = {"tokens": WordTensorizer(column="text")}
data = Data(self.data_source, tensorizers, Batcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch = next(iter(batches))
self.assertEqual({"tokens"}, set(batch))
tokens, seq_lens = batch["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual(10, len(tokens))
def test_data_initializes_tensorsizers(self):
tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label"),
}
with self.assertRaises(AttributeError):
# verify WordTensorizer isn't in an initialized state yet
tensorizers["tokens"].vocab
Data(self.data_source, tensorizers)
# Tensorizers should have been initialized
self.assertEqual(49, len(tensorizers["tokens"].vocab))
self.assertEqual(7, len(tensorizers["labels"].labels))
def test_data_iterate_multiple_times(self):
data = Data(self.data_source, self.tensorizers)
batches = data.batches(Stage.TRAIN)
data1 = list(batches)
data2 = list(batches)
# We should have made at least one non-empty batch
self.assertTrue(data1)
self.assertTrue(data2)
batch1, _ = data1[0]
batch2, _ = data2[0]
# pytorch tensors don't have equals comparisons, so comparing the tensor
# dicts is non-trivial, but they should also be equal
self.assertEqual(batch1, batch2)
class BatcherTest(unittest.TestCase):
def test_batcher(self):
data = [{"a": i, "b": 10 + i, "c": 20 + i} for i in range(10)]
batcher = Batcher(batch_size=3)
batches = list(batcher.batchify(data))
self.assertEqual(len(batches), 4)
self.assertEqual(batches[1]["a"], [3, 4, 5])
self.assertEqual(batches[3]["b"], [19])
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ad22536d51ac320742b2792288061eebc2f26c36 | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /SWEA/Level 4/장훈이의 높은 선반.py | 73f3564f795b4cc2858a65eb719e2a3098834f2d | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import sys
sys.stdin = open('장훈이의 높은 선반.txt', 'r')
def powerset(n, k, sum):
global ans
if sum >= B:
if ans > (sum - B):
ans = sum - B
return
if n == k:
return
else:
A[k] = 1
powerset(n, k + 1, sum + worker[k])
A[k] = 0
powerset(n, k + 1, sum)
T = int(input())
for test_case in range(1, T+1):
N, B = map(int, input().split())
worker = list(map(int, input().split()))
A = [0] * N
ans = 999999
powerset(N, 0, 0)
print('#{} {}'.format(test_case, ans)) | [
"gjtjdtn201@naver.com"
] | gjtjdtn201@naver.com |
9fd4f0ca59f4e0e97cf89be5690b1697e00b2d62 | f44e571fdac5e927235b434151dcc03f597a5e3d | /pyq/25_container/fast_data_set/py1.py | 0947f61fe0395ea1c028583f9d48c2eda7c11926 | [] | no_license | uni51/python_tutorial | 72f74e319cf4bc2662117ec0ad7b58e8a7d0e84b | 0a78111e3760ff4ff9957d67ced3bb0a83baaf9f | refs/heads/master | 2020-04-20T11:10:39.417725 | 2019-03-04T00:35:14 | 2019-03-04T00:35:14 | 168,808,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # 集合のメソッド(追加等)
items = {'art'}
print("items:", items) # items: {'art'}
# 1要素を削除
result = items.pop() # popでは、位置を指定して削除ができません
print("items.pop():", result) # items.pop(): art
print("items:", items) # items: set()
# リストを追加 複数の要素の追加は、extendではなくupdateです。
items.update(['egg', 'fog'])
print("items.update(['egg', 'fog'])") #items.update(['egg', 'fog'])
print("items:", items) # items: {'fog', 'egg'}
# 全削除
items.clear()
print("items.clear()") # items.clear()
print("items:", items) # items: set()
# 追加 要素の追加は、appendではなくaddです。
items.add('doll')
print("items.add('doll')") # items.add('doll')
print("items:", items) # items: {'doll'}
# 削除
items.remove('doll')
print("items.remove('doll')") # items.remove('doll')
print("items:", items) # items: set()
| [
"uni51@users.noreply.github.com"
] | uni51@users.noreply.github.com |
559ebd08ef157a69a6fc774088be4501fea9ab47 | b604219d67f011d44fdf79d0c88d546b3a8bd250 | /알고리즘/기타/진수계산.py | 64f802bbcddfdfcd010017dcfa2dd64407a49694 | [] | no_license | HwangToeMat/Algorithm | 197648d35e46ea1230291901de1caf2afc720f64 | ee2bb829cb71499c39f68d7691a1e84899135da5 | refs/heads/master | 2023-01-19T08:24:07.155305 | 2020-11-20T16:15:11 | 2020-11-20T16:15:11 | 296,235,075 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | """
def convert(n, base):
T = "0123456789ABCDEF"
q, r = divmod(n, base)
if q == 0:
return T[r]
else:
return convert(q, base) + T[r]
"""
def convert(n, base):
q, r = divmod(n, base)
if q == 0:
return [r]
else:
return convert(q, base) + [r]
| [
"46777602+HwangToeMat@users.noreply.github.com"
] | 46777602+HwangToeMat@users.noreply.github.com |
c1a6980787fb3929b0979d41514f2c9fce2e6a15 | 687928e5bc8d5cf68d543005bb24c862460edcfc | /nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_vpnurl_binding.py | e30bcf60328d3b370cfee3e566097acf59f29ff5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | mbs91/nitro | c6c81665d6abd04de8b9f09554e5e8e541f4a2b8 | be74e1e177f5c205c16126bc9b023f2348788409 | refs/heads/master | 2021-05-29T19:24:04.520762 | 2015-06-26T02:03:09 | 2015-06-26T02:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,346 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_vpnurl_binding(base_resource) :
""" Binding class showing the vpnurl that can be bound to vpnglobal.
"""
def __init__(self) :
self._urlname = ""
self.___count = 0
@property
def urlname(self) :
"""The intranet url.
"""
try :
return self._urlname
except Exception as e:
raise e
@urlname.setter
def urlname(self, urlname) :
"""The intranet url.
"""
try :
self._urlname = urlname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_vpnurl_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_vpnurl_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnglobal_vpnurl_binding()
updateresource.urlname = resource.urlname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_vpnurl_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].urlname = resource[i].urlname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnglobal_vpnurl_binding()
deleteresource.urlname = resource.urlname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_vpnurl_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].urlname = resource[i].urlname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a vpnglobal_vpnurl_binding resources.
"""
try :
obj = vpnglobal_vpnurl_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of vpnglobal_vpnurl_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count vpnglobal_vpnurl_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of vpnglobal_vpnurl_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vpnglobal_vpnurl_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_vpnurl_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_vpnurl_binding = [vpnglobal_vpnurl_binding() for _ in range(length)]
| [
"bensassimaha@gmail.com"
] | bensassimaha@gmail.com |
338c0a147112cb40b6d8cbf00925df5f9410bf9a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/187/usersdata/354/65060/submittedfiles/al1.py | e2dd22058cfdabcf534c9ab2decc4d0b9297c6ca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | # -*- coding: utf-8 -*-
#ENTRADA
c=float(input('Digite a temperatura em Celcius: '))
#PROCESSAMENTO
F= (9c + 160)/5
#SAIDA
print(F)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
25a145b4c8108a145d6d3399933dc550426d704e | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/json/json_6.py | bd3667c3dd21fc36b25d45aa5824ede395a4ce01 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
import json
data = [{'a':'张三','b':(2,4),'c':3.0,('d',):'D tuple'}]
print 'First attempt'
try:
print json.dumps(data)
except (TypeError,ValueError), err:
print 'ERROR:',err
print
print 'Second attempt'
print json.dumps(data,skipkeys=True) | [
"zhuzhulang@126.com"
] | zhuzhulang@126.com |
ca9ba61eb5cd74330a7ec90e9ff0add6ab1f6e29 | 913fb9ec1e709a5140676ba7b2371b1976afca72 | /alignSeqs/test.countContigEnd.py | 32ef21f447d6d5f9be0711c83cc931b70af24a2d | [] | no_license | cgreer/ResearchScripts | 171cfe9555ea06fdeb91084c12d07d1b45a2335c | 1107803bb1459d6b6e1dfb1a89679d2b6fd49062 | refs/heads/master | 2016-09-05T10:43:19.090247 | 2012-04-12T21:38:11 | 2012-04-12T21:38:11 | 1,673,080 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | import bioLibCG
import matplotlib.pyplot as plt
def countEnd(fN):
count3 = {'A': [], 'T': [], 'G': [], 'C': []}
count5 = {'A': [], 'T': [], 'G': [], 'C': []}
countT = {'A': [], 'T': [], 'G': [], 'C': []}
f = open(fN, 'r')
for line in f:
ls = line.strip().split('\t')
seq = ls[1]
letter0 = seq[-1]
count = 0
for i in reversed(seq):
if i == letter0:
count += 1
else:
break
count3[letter0].append(count)
letter = seq[0]
countAnother = 0
for i in seq:
if i == letter:
countAnother += 1
else:
break
count5[letter].append(countAnother)
if count > countAnother:
countT[letter0].append(count)
else:
countT[letter].append(countAnother)
plt.hist(countT['C'], 15, facecolor='r', label='C', alpha = 1.00)
plt.hist(countT['G'], 15, facecolor='y', label='G', alpha = .55)
plt.hist(countT['T'], 15, facecolor='g', label='T', alpha = .55)
plt.hist(countT['A'], 15, facecolor='b', label='A', alpha = .55)
plt.xlabel('Length of Longest Contiguos End Region')
plt.ylabel('Number of Origin RNAs')
plt.legend()
plt.show()
if __name__ == "__main__":
import sys
bioLibCG.submitArgs(countEnd, sys.argv)
| [
"chrisgreer88@gmail.com"
] | chrisgreer88@gmail.com |
50736ba59f9669e816e9c0bd9a64716c3f93b03b | 79a484e91a8df432a0ded93806a1e8237df7c253 | /umibukela/migrations/0032_auto_20180511_1315.py | 0a99479bc8955d38f0508147fb04a451e1f73f09 | [
"MIT"
] | permissive | OpenUpSA/umibukela | 7ba14397ad543154d3a32ebfd84e89aa07f7011e | 34c1a29a429b88c2f574e9120cfe93ba524633da | refs/heads/master | 2023-07-26T19:45:12.531887 | 2023-07-10T15:53:07 | 2023-07-10T15:53:07 | 47,106,932 | 0 | 0 | MIT | 2023-02-02T01:36:59 | 2015-11-30T09:03:27 | Python | UTF-8 | Python | false | false | 1,702 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from umibukela.models import SurveyType
def poster_template(survey_type):
template = 'print-materials/posters/'
if 'paypoint' in survey_type.name.lower():
template += 'paypoint_poster.html'
elif 'health' in survey_type.name.lower():
template += 'health_clinic_poster.html'
elif 'service office' in survey_type.name.lower():
template += 'service_office_poster.html'
else:
template += 'poster_layout.html'
return template
def set_template_fields(apps, schema_editor):
for survey_type in SurveyType.objects.filter(id__lt=8).all():
survey_type.poster_template = poster_template(survey_type)
survey_type.has_handout = True
survey_type.save()
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0031_remodel-cycle-survey-type-crs'),
]
operations = [
migrations.AddField(
model_name='surveytype',
name='has_handout',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='surveytype',
name='poster_template',
field=models.CharField(help_text=b"Path of template from the application root. If it's blank, poster links won't be generated for this survey type.", max_length=1000, null=True, blank=True),
),
migrations.AlterField(
model_name='survey',
name='cycle',
field=models.ForeignKey(related_name='surveys', to='umibukela.Cycle'),
),
migrations.RunPython(set_template_fields),
]
| [
"jbothma@gmail.com"
] | jbothma@gmail.com |
a308a1c8547c55fc2212b03afdbcc101c0af9b33 | 87220ff6f99aef088d121f3f9d81e36a35b7d112 | /pulp_rpm/src/pulp_rpm/extension/admin/structure.py | 80052e7a4898d821d8533eeab15b871d57ad54cb | [] | no_license | ehelms/pulp_rpm | 2905294287899c591e82b12fe3a71d7b98adf1c3 | cd9c9ae1a625072df82c054f3f198728b8770a7a | refs/heads/master | 2021-01-18T11:49:14.196118 | 2012-11-12T21:16:18 | 2012-11-12T21:16:18 | 6,671,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | # Copyright (c) 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Contains methods related to the creation and navigation of the structure of the
Puppet branch of the CLI. This module should be used in place of the extensions
themselves creating or retrieving sections to centralize the organization of
the commands.
"""
from gettext import gettext as _
# -- constants ----------------------------------------------------------------
# Root section all RPM specific functionality will be located under
SECTION_ROOT = 'rpm'
SECTION_REPO = 'repo'
# Eventually there will be a consumer section
SECTION_COPY = 'copy'
SECTION_UPLOADS = 'uploads'
SECTION_REMOVE = 'remove'
SECTION_CONTENTS = 'content'
SECTION_SYNC = 'sync'
SECTION_SYNC_SCHEDULES = 'schedules'
SECTION_PUBLISH = 'publish'
SECTION_PUBLISH_SCHEDULES = 'schedules'
SECTION_EXPORT = 'export'
DESC_ROOT = _('manage RPM-related content and features')
DESC_REPO = _('repository lifecycle commands')
DESC_COPY = _('copies one or more content units between repositories')
DESC_UPLOADS = _('upload modules into a repository')
DESC_REMOVE = _('remove copied or uploaded modules from a repository')
DESC_CONTENTS = _('search the contents of a repository')
DESC_SYNC = _('run, schedule, or view the status of sync tasks')
DESC_SYNC_SCHEDULES = _('manage repository sync schedules')
DESC_PUBLISH = _('run, schedule, or view the status of publish tasks')
DESC_PUBLISH_SCHEDULES = _('manage repository publish schedules')
DESC_EXPORT = _('run or view the status of ISO export of a repository')
# -- creation -----------------------------------------------------------------
def ensure_root(cli):
"""
Verifies that the root of RPM-related commands exists in the CLI,
creating it using constants from this module if it does not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
root_section = cli.find_section(SECTION_ROOT)
if root_section is None:
root_section = cli.create_section(SECTION_ROOT, DESC_ROOT)
return root_section
def ensure_repo_structure(cli):
"""
Verifies that the repository section and all of its subsections are present
in the CLI, creating them using constants from this module if they are not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
# Make sure the root is in place
root_section = ensure_root(cli)
# There's nothing dynamic about setting up the structure, so if the repo
# section exists, it's a safe bet it's configured with its necessary
# subsections, so just punch out.
repo_section = root_section.find_subsection(SECTION_REPO)
if repo_section is not None:
return repo_section
repo_section = root_section.create_subsection(SECTION_REPO, DESC_REPO)
# Add the direct subsections of repo
direct_subsections = (
(SECTION_COPY, DESC_COPY),
(SECTION_REMOVE, DESC_REMOVE),
(SECTION_CONTENTS, DESC_CONTENTS),
(SECTION_UPLOADS, DESC_UPLOADS),
(SECTION_SYNC, DESC_SYNC),
(SECTION_PUBLISH, DESC_PUBLISH),
(SECTION_EXPORT, DESC_EXPORT),
)
for name, description in direct_subsections:
repo_section.create_subsection(name, description)
# Add specific third-tier sections
sync_section = repo_sync_section(cli)
sync_section.create_subsection(SECTION_SYNC_SCHEDULES, DESC_SYNC_SCHEDULES)
publish_section = repo_publish_section(cli)
publish_section.create_subsection(SECTION_PUBLISH_SCHEDULES, DESC_PUBLISH_SCHEDULES)
return repo_section
# -- section retrieval --------------------------------------------------------
def repo_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO)
def repo_copy_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_COPY)
def repo_remove_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_REMOVE)
def repo_uploads_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_UPLOADS)
def repo_contents_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_CONTENTS)
def repo_sync_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_SYNC)
def repo_sync_schedules_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_SYNC, SECTION_SYNC_SCHEDULES)
def repo_publish_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_PUBLISH)
def repo_export_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_EXPORT)
def repo_publish_schedules_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_PUBLISH, SECTION_PUBLISH_SCHEDULES)
# -- private ------------------------------------------------------------------
def _find_section(cli, *path):
"""
Follows the given path to return the indicated section from the CLI.
:param cli: CLI instance to search within
:type cli: pulp.client.extensions.core.PulpCli
:param path: path through the nest of sections to the desired section
:type path: list of str
:return: section instance that matches the path
:rtype: pulp.client.extensions.core.PulpCliSection
"""
section = cli.root_section
for p in path:
section = section.find_subsection(p)
return section
| [
"mhrivnak@redhat.com"
] | mhrivnak@redhat.com |
6ded8a1c64af93c3e14df5f2f04e0f4cce78f83b | 3b4c2fa85a358648614c60d295cc9d77eb7e4c81 | /Chapter 6.py/pr no 1.py | 67d746383c61994e02b3ea7fb1e4dd0ed770ba7d | [] | no_license | abbhowmik/PYTHON-Course | 465e2e6c42b41f6dd731562c57d5c53c6ca2e711 | 223071569ce9b98d6725e2a33fb271ac0add6f49 | refs/heads/main | 2023-06-05T11:28:36.632080 | 2021-06-29T18:14:47 | 2021-06-29T18:14:47 | 381,456,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | num1 = int(input('Enter number 1: '))
num2 = int(input('Enter number 2: '))
num3 = int(input('Enter number 3: '))
num4 = int(input('Enter number 4: '))
if num1>num4:
f1 = num1
else:
f1 = num4
if num2>num3:
f2 = num2
else:
f2 = num3
if f1>f2:
print(f'The greatest number is {f1}')
else:
print(f'The greatest number is {f2}')
| [
"ashisbhowmikisp@gmail.com"
] | ashisbhowmikisp@gmail.com |
9fad1b4a730f3b06087dfddb4f8961f2ce8a302c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04029/s543214320.py | 9f67a34087f19acbf01862689423ec3c7abeb734 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from statistics import median
#import collections
#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]
from fractions import gcd
from itertools import combinations # (string,3) 3回
from collections import deque
from collections import defaultdict
import bisect
#
# d = m - k[i] - k[j]
# if kk[bisect.bisect_right(kk,d) - 1] == d:
#
#
#
# pythonで無理なときは、pypyでやると正解するかも!!
#
#
import sys
sys.setrecursionlimit(10000000)
mod = 10**9 + 7
def readInts():
return list(map(int,input().split()))
def main():
n = int(input())
print((n*(n+1))//2)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3991f67831bc1074ca35abcaf224771ba4a4e056 | fa79a9b5eaa8b7b3cef1941ad3700106c00e57b6 | /function/update/update_novel-info.py | 18e9956da3f52ac2b860e6fa0124e83592367a15 | [
"MIT"
] | permissive | txu2k8/novel-web-server | 2e3899cb77ca6ba5d4d4d65f767c4e0a6aa0df1a | 1b77d637a6229d916b724ca2735afc00c6175368 | refs/heads/main | 2023-01-13T13:30:36.249802 | 2020-11-26T08:35:41 | 2020-11-26T08:35:41 | 316,153,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | '''
更新小说天涯网的所有小说基本信息,书名,类型,阅读数,作者等
'''
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import datetime
import time
import pymysql
from pyquery import PyQuery as pq
browser = webdriver.Chrome(executable_path='D:\chromedriver_win32\chromedriver.exe')
wait = WebDriverWait(browser,30) #显式等待
#翻页
def get_next_page(num):
url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=6' #现代都市31/37 1
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=7' #灵异悬疑31/101 2
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=1' #现代言情31/118 3
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=8' #职场官场28/27 4 网站将这部分数据删除了
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=5' #浪漫青春28/74 5
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=2' #古代言情15/14 6
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=4' #女生悬疑6/5 7
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=10' #历史军事 31/62 8
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=9' #奇幻玄幻 31/111 9
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=24' #武侠仙侠31/63 10
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=25' #科幻小说6/5 11
browser.get(url)
try:
print('\n\n翻到第%d页' % num)
input = wait.until(EC.presence_of_element_located((By.CLASS_NAME, "page")))
input.clear()
input.send_keys(num)
input.send_keys(Keys.ESCAPE) # 输入回车键
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "span.TY_view_page > a.on"), str(num))) #查看相应的页码是否高亮
# time.sleep(6) #等待数据渲染完成
html = browser.page_source
return html
except TimeoutError as e:
print(e)
#解析页面得到相应的数据
def parse_with_pq(html):
onePage_novel_info = []
doc = pq(html)
for item in doc.find('#list-books').children('.clearfix').items():
novel_info ={
'novel_name': item.find('.mbody .blue').text(),
'author' : item.find('.mhead').remove('.blue').remove('.gray').text(),
'read_num': int(item.find('.clearfix').children().eq(1).remove('.gray').text()),
# 'category': item.find('.clearfix').children().eq(0).remove('.gray').text(),
'novel_type':str(1),
'status': item.find('.clearfix').children().eq(5).remove('.gray').text(),
'id' : item.find('.hide .btn-r').attr('_bid'),
'web_update_time' : item.find('.clearfix').children().eq(4).remove('.gray').text()
}
onePage_novel_info.append(novel_info)
return onePage_novel_info
#插入spider_time,web_update_time两个字段
def insert_data(datas):
for data in datas:
data['spider_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return datas
#将数据插入到数据库
def save_to_MYSQL(datas):
#连接到数据库
db = pymysql.connect(host='localhost', user='root', password='test123456', port=3306, db='spiders')
cursor = db.cursor()
sql = "INSERT INTO novel_info(novel_name,author,read_num,novel_type,status,id,spider_time,web_update_time) " \
"values(%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE read_num=values(read_num),status=values(status)," \
"spider_time=values(spider_time),web_update_time=values(web_update_time),novel_type=values(novel_type)"
for data in datas:
try:
# print(data['novel_name'])
cursor.execute(sql, (data['novel_name'],data['author'],data['read_num'],data['novel_type'],data['status'],data['id'],data['spider_time'],data['web_update_time']))
db.commit()
print('插入/更新数据成功',data['novel_name'])
except Exception as e:
print('插入数据失败!!',e)
db.rollback()
if __name__ == '__main__':
for num in range(1,31):
html = get_next_page(num)
datas = parse_with_pq(html)
new_datas = insert_data(datas)
save_to_MYSQL(new_datas)
| [
"tao.xu2008@outlook.com"
] | tao.xu2008@outlook.com |
0fb6a382cc75c4ec7b64a0082fa4369bdb3697f2 | a8769709aeb7299fa3757f0e7bba5c617eb8cfe3 | /lesson-3/k8s/lib/python2.7/site-packages/kubernetes/client/models/v1beta1_http_ingress_rule_value.py | 50ef1b8838ade46c5a7092ec4b5f4fd5d04b3e71 | [
"Apache-2.0"
] | permissive | simox-83/workshop-k8s | 2ac5e8b282bb7c3337acc726a7d972717bf649cc | 04cb18e8b5925a3cfd84ca316952a6cb64960b31 | refs/heads/master | 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 | Apache-2.0 | 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null | UTF-8 | Python | false | false | 3,338 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HTTPIngressRuleValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'paths': 'list[V1beta1HTTPIngressPath]'
}
attribute_map = {
'paths': 'paths'
}
def __init__(self, paths=None):
"""
V1beta1HTTPIngressRuleValue - a model defined in Swagger
"""
self._paths = None
self.discriminator = None
self.paths = paths
@property
def paths(self):
"""
Gets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:return: The paths of this V1beta1HTTPIngressRuleValue.
:rtype: list[V1beta1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:param paths: The paths of this V1beta1HTTPIngressRuleValue.
:type: list[V1beta1HTTPIngressPath]
"""
if paths is None:
raise ValueError("Invalid value for `paths`, must not be `None`")
self._paths = paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1HTTPIngressRuleValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"simone.dandreta@concur.com"
] | simone.dandreta@concur.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.