blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40984c2fb2d800dd58b439a634f44d0ceae530a0 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/list_message_statistics_response.py | 1e7816007f8524b86b1888cb87a5c5deb1613cd5 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,215 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListMessageStatisticsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int'
}
attribute_map = {
'count': 'count'
}
def __init__(self, count=None):
"""ListMessageStatisticsResponse
The model defined in huaweicloud sdk
:param count: 所有消息总数
:type count: int
"""
super(ListMessageStatisticsResponse, self).__init__()
self._count = None
self.discriminator = None
if count is not None:
self.count = count
@property
def count(self):
"""Gets the count of this ListMessageStatisticsResponse.
所有消息总数
:return: The count of this ListMessageStatisticsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListMessageStatisticsResponse.
所有消息总数
:param count: The count of this ListMessageStatisticsResponse.
:type count: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListMessageStatisticsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d3f5f4ebf463dfdf6dea3516eec80760478a8772 | 5a2a4a4d9402b167039b3fc0c0ace186b50aa099 | /bonus.py | 03987745456039107f21411f95cf990ba6a032d1 | [] | no_license | uswdnh47/608-mod2 | d7b2bc6a98a823ce9e5f81b6c71f932f2d2777de | 35616e66d945c64d0295938b6a308fb5972abe4a | refs/heads/main | 2023-07-13T09:26:05.933462 | 2021-09-01T02:38:03 | 2021-09-01T02:38:03 | 401,530,012 | 1 | 0 | null | 2021-09-01T02:38:04 | 2021-08-31T01:00:28 | Python | UTF-8 | Python | false | false | 1,405 | py | import statistics as stats
values = [2000001, 2002001,2004001,2006001,2008001,2010001,2014001,2014001,2016001,2018001,2020001,2022001,2024001,2026001,2028001,2030001,2032001,2034001,2036001,2038001,2040001,2042001,2044001,2046001,2048001,2050001,2052001,2054001,2056001,2058001,2060001,2062001,2064001,2066001,2068001,2070001,2072001,2074001,2076001,2078001,2080001,2082001,2084001,2086001,2088001,2090001,2092001,2094001,2096001,2098001,2100001,2102001,2104001,2106001,2108001,2110001,2112001,2114001,2116001,2118001,2120001,2122001,2124001,2126001,2128001,2130001,2132001,2134001,2136001,2138001,2140001,2142001,2144001,2146001,2148001,2150001,2152001,2154001,2156001,2158001,2160001,2162001,2164001,2166001,2168001,2170001,2172001,2174001,2176001,2178001,2180001,2182001,2184001,2186001,2188001,2190001,2192001,2194001,2196001,2198001,2200001,2202001,2204001,2206001,2208001,2210001,2212001,2214001,2216001,2218001,2220001,2222001,2224001,2226001,2228001,2230001,2232001,2234001,2236001,2238001,2240001,2242001,2244001,2246001,2248001]
count = (len(values))
print (count)
sum = (sum(values))
print (sum)
mean = (stats.mean(values))
print(mean)
median = (stats.median(values))
print(median)
mode = (stats.mode(values))
print (mode)
print ("The total claims count was",(count),"with an average loss of",(mean),";the total severity was",(sum), "with and median of",(median),"and mode",(mode),"!")
| [
"noreply@github.com"
] | noreply@github.com |
cdc75150fd9e9b0bb84009d08bf0c00bb9a0f43b | 05ac6b13a380f1b0ed0676afaae9f8467b86b4a9 | /livegraph.py | d4bb9ed1fad2e85763f54554907e3f0591ba2853 | [
"MIT"
] | permissive | UncleEngineer/LiveGraph | fe6177473dca2bb16815dfb0f65dd3084b72c10e | 825dc11663fe3dbbfde6a722bf9ec35adac1c7f2 | refs/heads/main | 2023-02-13T09:19:44.307744 | 2021-01-25T16:28:16 | 2021-01-25T16:28:16 | 332,809,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | """
===============
Embedding in Tk
===============
"""
from tkinter import *
from tkinter import ttk
import random
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
GUI = Tk()
GUI.geometry('600x700')
GUI.wm_title("AutoUpdate Graph")
MF1 = Frame(GUI)
MF1.pack()
# toolbar = NavigationToolbar2Tk(canvas, GUI)
# toolbar.update()
# canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
#canvas.get_tk_widget().place(x=20,y=20)
#toolbar.pack_forget()
def UpdateData():
global y
global canvas
global cv
try:
cv.destroy()
except:
pass
# remove line
# create graph
fig = Figure(figsize=(6, 5), dpi=100)
t = [0,1,2,3,4]
y = []
for i in range(len(t)):
d = random.randint(30,70)
y.append(d)
label = ['A','B','C','D','E']
graph = fig.add_subplot(111)
graph.plot(t, y)
graph.axis([None, None, 0, 100])
canvas = FigureCanvasTkAgg(fig, master=MF1) # A tk.DrawingArea.
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
cv = canvas.get_tk_widget()
cv.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
MF1.after(5000,UpdateData)
#button = ttk.Button(master=GUI, text="Update Data", command=UpdateData)
#button.pack(ipadx=20 , ipady=10 ,pady=20)
UpdateData()
GUI.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
b664bb20187e7f0cdf540e04e82e5f1ca252f334 | 8e93a9c97b928937bd98f76eff45306bf7228e1a | /createWindows.py | eb2b267f273835937d2a6cc82cb3f646d837959e | [] | no_license | Krenil-Sapariya/Python-GUI-basics | 43de40dffb91ed31435b125637100026f6709713 | 6b14369c8b0574b4ad7e38162bbaf0df5be18cd7 | refs/heads/main | 2023-08-13T06:43:21.002947 | 2021-09-29T02:59:15 | 2021-09-29T02:59:15 | 410,449,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import tkinter
#define window
root = tkinter.Tk()
root.title('Window basics') # set title
# set icon of window
# root.iconbitmap('filename.jpg')
root.geometry('400x400') # size of window
root.resizable(0,0) # if you don't want your window to resize
root.config(bg='blue') # set bg color
# another window
root1 = tkinter.Toplevel()
root1.title('another window')
root1.config(bg='red')
root1.geometry('200x200+500+50') #+500+50 means at horizontal 500 unit and vertical 50 unit window will be set
# run root window's main loop
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
7083f94716d817a0f64bfe154b86ee5261c2109e | e17b0ad0ebeb361e5565eb3d12e717f296a7b878 | /SheetAPI/config_example.py | a3fa30cc58a3bb13f0e1eee83397cd254f4f0c2e | [] | no_license | easy-rpg/SheetAPI | 94ea732083c3a7a82577e59e3a882a878772d6eb | 5542197f8388eed761a15a79c6ccca4fd481ccba | refs/heads/master | 2022-12-11T17:01:16.130002 | 2018-07-05T00:26:48 | 2018-07-05T00:26:48 | 131,898,341 | 1 | 0 | null | 2022-11-22T02:30:09 | 2018-05-02T19:44:34 | Python | UTF-8 | Python | false | false | 231 | py | # DB Heroku
# import dj_database_url
# DATABASES = {'default': dj_database_url.config(conn_max_age=600, ssl_require=True)}
# DB LOCAL
DB_HOST = "localhost"
DB_PORT = ""
DB_NAME = "DB_NAME"
DB_USER = "DB_USER"
DB_PASSWORD = ""
| [
"rodrigondec@gmail.com"
] | rodrigondec@gmail.com |
aaa428f3cb53f1e23a659033c3fd740d63e515e0 | 50a956eef8afaab3dc9313f87968341942175f4b | /sdm/test/atest-sdm-lineuler-acousticvorticityentropypulse-2d.py | 6e7b04046a1dacbae99f7c3900a5ee441553f985 | [] | no_license | supermangithu/dcm | ad7a0abad7742f1fc6e93cfef5b1859e4bb632bd | 221f99ce52f9b2c822fe56c0557f366b4e16bc47 | refs/heads/master | 2022-06-02T15:45:38.706892 | 2014-02-23T11:54:11 | 2014-02-23T12:48:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,943 | py | import coolfluid as cf
import math
### Create new model specialized for SD
model = cf.root.create_component('accousticpulse_2d','cf3.dcm.Model');
### Load the mesh
mesh = model.domain.load_mesh(file = cf.URI('../../../resources/square100-quad-p2-50x50.msh'), name = 'square')
model.build_faces();
### Add the Partial Differential Equations to solve
lineuler = model.add_pde(name='lineuler',type='cf3.dcm.equations.lineuler.LinEuler2D',
shape_function='cf3.dcm.core.LegendreGaussEndP3')
lineuler.gamma = 1.
U0 = [0.5,0]
rho0 = 1
p0 = 1
lineuler.add_term( name='rhs', type='cf3.sdm.br2.lineuler_RightHandSide2D' )
### Add BC
lineuler.add_bc( name='farfield',
type='cf3.dcm.equations.lineuler.BCFarfield2D',
regions=[ mesh.topology.left, mesh.topology.bottom, mesh.topology.top ] )
lineuler.add_bc( name='outlet',
type='cf3.dcm.equations.lineuler.BCExtrapolation2D',
regions=[ mesh.topology.right ] )
### Initialize the solution
model.tools.init_field.init_field(
field=lineuler.solution,
functions=[
' exp( -log(2.)*((x)^2+y^2)/9. ) + 0.1*exp( -log(2.)*((x-67.)^2 + y^2)/25. )',
' 1.*0.04*y *exp( -log(2.)*((x-67.)^2+y^2)/25. )',
'-1.*0.04*(x-67.)*exp( -log(2.)*((x-67.)^2+y^2)/25. )',
'1.* exp( -log(2.)*((x)^2+y^2)/9. )' ] )
model.tools.init_field.init_field(
field=lineuler.background,
functions=[ str(rho0), str(U0[0]), str(U0[1]), str(p0) ] )
model.tools.init_field.init_field(
field=lineuler.bdry_background,
functions=[ str(rho0), str(U0[0]), str(U0[1]), str(p0) ] )
### Create the Solver for the Partial Differential Equations
solver = model.add_solver(pde=lineuler,name='optim_erk',solver='cf3.sdm.solver.optim_erkls.ERK_18_4')
solver.children.time_step_computer.cfl = 1.5*1.17418695241
### Time Stepping
model.time_stepping.end_time = 1 #90
model.time_stepping.time_step = 1 #10
while not model.time_stepping.properties.finished :
model.time_stepping.do_step()
mesh.write_mesh(file=cf.URI('solution'+str(model.time_stepping.step)+'.msh'),fields=[lineuler.solution.uri()])
## function describing entropy and vortex without acoustic pulse
#entropy_vortex = [
# '0.1*exp( -log(2.)*((x-67)^2 + y^2)/25. )',
# ' 0.04*y *exp( -log(2.)*((x-67)^2+y^2)/25. )',
# '-0.04*(x-67)*exp( -log(2.)*((x-67)^2+y^2)/25. )',
# '0'
#]
## function describing acoustic pulse only
#acoustic = [
# 'exp( -log(2.)*((x)^2+y^2)/9. )',
# ' 0',
# '-0',
# 'exp( -log(2.)*((x)^2+y^2)/9. )'
#]
#######################################
# POST-PROCESSING
#######################################
compute_char = model.tools.create_component('compute_characteristics','cf3.dcm.equations.lineuler.ComputeCharacteristicVariablesUniform2D')
compute_char.options().set('normal',[1.,0.])
compute_char.options().set('field',lineuler.solution)
compute_char.options().set('c0',math.sqrt(lineuler.gamma*p0/rho0))
compute_char.execute()
########################
# OUTPUT
########################
fields = [
lineuler.fields.solution.uri(),
lineuler.fields.char.uri(),
lineuler.fields.gradn_char.uri(),
]
mesh.write_mesh(file=cf.URI('file:lineuler-acousticvorticity-2d.msh'),fields=fields)
# Tecplot
#########
# Tecplot cannot write high-order meshes. A finer P1 mesh is generated,
# and fields are interpolated to the P1-mesh. The mesh is finer to visualize
# the high-order solution better.
mesh_generator = model.tools.create_component("mesh_generator","cf3.mesh.SimpleMeshGenerator")
# Generate visualization mesh
visualization_mesh = model.domain.create_component('visualization_mesh','cf3.mesh.Mesh')
mesh_generator.options().set("mesh",visualization_mesh.uri())
mesh_generator.options().set("nb_cells",[400,400])
mesh_generator.options().set("lengths",[200,200])
mesh_generator.options().set("offsets",[-100,-100])
mesh_generator.execute()
# Interpolate fields using solution polynomial
visualization_mesh.geometry.create_field(name='solution', variables='rho[1],rho0U[2],p[1]')
#visualization_mesh.get_child('geometry').create_field(name='char', variables='S[1],Shear[1],Aplus[1],Amin[1],A[1],omega[1]')
interpolator = model.tools.create_component('interpolator','cf3.mesh.ShapeFunctionInterpolator')
interpolator.interpolate(source=lineuler.fields.solution.uri(),
target=visualization_mesh.geometry.solution.uri())
#interpolator.interpolate(source=mesh.access_component("solution_space/char").uri(),
# target=visualization_mesh.access_component("geometry/char").uri())
fields = [
visualization_mesh.geometry.solution.uri(),
#visualization_mesh.access_component('geometry/char').uri()
]
# Write visualization mesh
visualization_mesh.write_mesh(file=cf.URI('file:lineuler-acousticvorticity-2d.plt'),fields=fields)
#####################
# Probe line y=0
#####################
# Generate 1D line mesh, for now only y=0 can be probed as the line has 1D coordinates only
probe_mesh = model.domain.create_component('probe_mesh','cf3.mesh.Mesh')
mesh_generator.options().set("mesh",probe_mesh.uri())
mesh_generator.options().set("nb_cells",[1000])
mesh_generator.options().set("lengths",[200])
mesh_generator.options().set("offsets",[-100])
mesh_generator.execute()
# Interpolate fields
probe_mesh.get_child('geometry').create_field(name='solution', variables='rho[1],rho0U[2],p[1]')
#probe_mesh.get_child('geometry').create_field(name='char', variables='S[1],Shear[1],Aplus[1],Amin[1],A[1],omega[1]')
interpolator.interpolate(source=lineuler.fields.solution.uri(),
target=probe_mesh.geometry.solution.uri())
#interpolator.interpolate(source=mesh.access_component("solution_space/char").uri(),
# target=probe_mesh.access_component("geometry/char").uri())
fields = [
probe_mesh.geometry.solution.uri(),
#probe_mesh.access_component('geometry/char').uri()
]
# Write probe mesh
probe_mesh.write_mesh(file=cf.URI('file:probe_liney0.plt'),fields=fields)
| [
"wdeconinck@me.com"
] | wdeconinck@me.com |
3b20f19e109bf8ecb64d75d8dbf9d358e61b7f30 | 9d891ee19c0a6ebec7285b10f6ec3ff8695fef81 | /wonder_stats/tests/test_consumers.py | c1bae7b6e3c63fe07fe7d195a8958395efae1bde | [] | no_license | wojhan/wonder-stats | abf2c92a4efd9997032b2ad3f8891b407c8120c4 | 9af9a4a4220edac0b9f87bc7c9354253bf1ed870 | refs/heads/master | 2022-12-01T09:00:22.676741 | 2020-08-15T17:46:12 | 2020-08-15T17:46:51 | 287,737,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import pytest
from wonder_stats import consumers
@pytest.mark.parametrize('message_type, body', [
('test', {'k1': 'v1', 'k2': 'v2'}, )
])
def test_websocket_message_initialization(message_type, body):
message = consumers.WebSocketMessage(message_type, **body)
for body_key, body_value in body.items():
assert getattr(message, body_key) == body_value
| [
"w.haniewski@gmail.com"
] | w.haniewski@gmail.com |
f193f7ef7cff8bde1f26661a8767a73db6db85f4 | 3e05978cc48a9aaaa31fedbd182f8f55901a0f12 | /Data-Structures/Dynamic Programming/Patterns/Pattern-1__01Knapscak/min_subset_sum_diff.py | 8ce4620b3bd5ab011dcc030f9faae83af7d48d71 | [] | no_license | Akashtyagi/DataStructure | 86f88e13f3dceb7bc69bd703c78a8fc05b37de46 | 8af84bd07dc7e1d73ead813ad8632ed8112166cb | refs/heads/master | 2022-12-19T19:04:47.450299 | 2020-10-02T12:12:48 | 2020-10-02T12:12:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 7 00:34:21 2020
@author: AkashTyagi
"""
def min_subset_sum_diff(nums):
l = len(nums)
summ = sum(nums)
m = summ//2
dp = [[False]*(m+1) for i in range(l+1)]
dp[0][0] = True
for i in range(1,l+1):
dp[i][0] = True
for j in range(1,m+1):
if nums[i-1]<=j:
dp[i][j] = dp[i-1][j-nums[i-1]] or dp[i-1][j]
else:
dp[i][j] = dp[i-1][j]
while dp[-1][m]!=True:
m-=1
return (summ-m)-m
nums = [1,11,7]
print("Minimum difference between subset possible is: ",min_subset_sum_diff(nums))
| [
"akashtyagi@qainfotech.com"
] | akashtyagi@qainfotech.com |
4652f613145fb60655bd9d03b2e0216af7a37090 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/retail/v2beta/retail-v2beta-py/google/cloud/retail_v2beta/types/product.py | e4a0de410942b7658ddae38bbd0a119804277476 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,576 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.retail_v2beta.types import common
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2beta',
manifest={
'Product',
},
)
class Product(proto.Message):
r"""Product captures all metadata information of items to be
recommended or searched.
Attributes:
name (str):
Immutable. Full resource name of the product, such as
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id".
The branch ID must be "default_branch".
id (str):
Immutable. [Product][google.cloud.retail.v2beta.Product]
identifier, which is the final component of
[name][google.cloud.retail.v2beta.Product.name]. For
example, this field is "id_1", if
[name][google.cloud.retail.v2beta.Product.name] is
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/id_1".
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`id <https://support.google.com/merchants/answer/6324405>`__.
Schema.org Property
`Product.sku <https://schema.org/sku>`__.
type_ (google.cloud.retail_v2beta.types.Product.Type):
Immutable. The type of the product. This
field is output-only.
primary_product_id (str):
Variant group identifier. Must be an
[id][google.cloud.retail.v2beta.Product.id], with the same
parent branch with this product. Otherwise, an error is
thrown.
For
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product]s, this field
can only be empty or set to the same value as
[id][google.cloud.retail.v2beta.Product.id].
For VARIANT [Product][google.cloud.retail.v2beta.Product]s,
this field cannot be empty. A maximum of 2,000 products are
allowed to share the same
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product]. Otherwise, an
INVALID_ARGUMENT error is returned.
Google Merchant Center Property
`item_group_id <https://support.google.com/merchants/answer/6324507>`__.
Schema.org Property
`Product.inProductGroupWithID <https://schema.org/inProductGroupWithID>`__.
This field must be enabled before it can be used. `Learn
more </recommendations-ai/docs/catalog#item-group-id>`__.
categories (Sequence[str]):
Product categories. This field is repeated for supporting
one product belonging to several parallel categories.
Strongly recommended using the full path for better search /
recommendation quality.
To represent full path of category, use '>' sign to separate
different hierarchies. If '>' is part of the category name,
please replace it with other character(s).
For example, if a shoes product belongs to both ["Shoes &
Accessories" -> "Shoes"] and ["Sports & Fitness" ->
"Athletic Clothing" -> "Shoes"], it could be represented as:
::
"categories": [
"Shoes & Accessories > Shoes",
"Sports & Fitness > Athletic Clothing > Shoes"
]
Must be set for
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product] otherwise an
INVALID_ARGUMENT error is returned.
At most 250 values are allowed per
[Product][google.cloud.retail.v2beta.Product]. Empty values
are not allowed. Each value must be a UTF-8 encoded string
with a length limit of 5,000 characters. Otherwise, an
INVALID_ARGUMENT error is returned.
Google Merchant Center property
`google_product_category <https://support.google.com/merchants/answer/6324436>`__.
Schema.org property [Product.category]
(https://schema.org/category).
title (str):
Required. Product title.
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`title <https://support.google.com/merchants/answer/6324415>`__.
Schema.org property
`Product.name <https://schema.org/name>`__.
description (str):
Product description.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`description <https://support.google.com/merchants/answer/6324468>`__.
schema.org property
`Product.description <https://schema.org/description>`__.
attributes (Sequence[google.cloud.retail_v2beta.types.Product.AttributesEntry]):
Highly encouraged. Extra product attributes to be included.
For example, for products, this could include the store
name, vendor, style, color, etc. These are very strong
signals for recommendation model, thus we highly recommend
providing the attributes here.
Features that can take on one of a limited number of
possible values. Two types of features can be set are:
Textual features. some examples would be the brand/maker of
a product, or country of a customer. Numerical features.
Some examples would be the height/weight of a product, or
age of a customer.
For example:
``{ "vendor": {"text": ["vendor123", "vendor456"]}, "lengths_cm": {"numbers":[2.3, 15.4]}, "heights_cm": {"numbers":[8.1, 6.4]} }``.
A maximum of 150 attributes are allowed. Otherwise, an
INVALID_ARGUMENT error is returned.
The key must be a UTF-8 encoded string with a length limit
of 5,000 characters. Otherwise, an INVALID_ARGUMENT error is
returned.
tags (Sequence[str]):
Custom tags associated with the product.
At most 250 values are allowed per
[Product][google.cloud.retail.v2beta.Product]. This value
must be a UTF-8 encoded string with a length limit of 1,000
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
This tag can be used for filtering recommendation results by
passing the tag as part of the
[PredictRequest.filter][google.cloud.retail.v2beta.PredictRequest.filter].
Google Merchant Center property
`custom_label_0–4 <https://support.google.com/merchants/answer/6324473>`__.
price_info (google.cloud.retail_v2beta.types.PriceInfo):
Product price and cost information.
Google Merchant Center property
`price <https://support.google.com/merchants/answer/6324371>`__.
available_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when this
[Product][google.cloud.retail.v2beta.Product] becomes
available recommendation and search.
availability (google.cloud.retail_v2beta.types.Product.Availability):
The online availability of the
[Product][google.cloud.retail.v2beta.Product]. Default to
[Availability.IN_STOCK][google.cloud.retail.v2beta.Product.Availability.IN_STOCK].
Google Merchant Center Property
`availability <https://support.google.com/merchants/answer/6324448>`__.
Schema.org Property
`Offer.availability <https://schema.org/availability>`__.
available_quantity (google.protobuf.wrappers_pb2.Int32Value):
The available quantity of the item.
uri (str):
Canonical URL directly linking to the product detail page.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`link <https://support.google.com/merchants/answer/6324416>`__.
Schema.org property `Offer.url <https://schema.org/url>`__.
images (Sequence[google.cloud.retail_v2beta.types.Image]):
Product images for the product.
A maximum of 300 images are allowed.
Google Merchant Center property
`image_link <https://support.google.com/merchants/answer/6324350>`__.
Schema.org property
`Product.image <https://schema.org/image>`__.
"""
class Type(proto.Enum):
r"""The type of this product."""
TYPE_UNSPECIFIED = 0
PRIMARY = 1
VARIANT = 2
COLLECTION = 3
class Availability(proto.Enum):
r"""Product availability. If this field is unspecified, the
product is assumed to be in stock.
"""
AVAILABILITY_UNSPECIFIED = 0
IN_STOCK = 1
OUT_OF_STOCK = 2
PREORDER = 3
BACKORDER = 4
name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.STRING, number=2)
type_ = proto.Field(proto.ENUM, number=3,
enum=Type,
)
primary_product_id = proto.Field(proto.STRING, number=4)
categories = proto.RepeatedField(proto.STRING, number=7)
title = proto.Field(proto.STRING, number=8)
description = proto.Field(proto.STRING, number=10)
attributes = proto.MapField(proto.STRING, proto.MESSAGE, number=12,
message=common.CustomAttribute,
)
tags = proto.RepeatedField(proto.STRING, number=13)
price_info = proto.Field(proto.MESSAGE, number=14,
message=common.PriceInfo,
)
available_time = proto.Field(proto.MESSAGE, number=18,
message=timestamp.Timestamp,
)
availability = proto.Field(proto.ENUM, number=19,
enum=Availability,
)
available_quantity = proto.Field(proto.MESSAGE, number=20,
message=wrappers.Int32Value,
)
uri = proto.Field(proto.STRING, number=22)
images = proto.RepeatedField(proto.MESSAGE, number=23,
message=common.Image,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
cd4b5d06ac6645f6260588192fe3ce2be88410b7 | 59bd9c968a3a31a73d17f252fe716a3eacdf7f4f | /portfolio/Python/scrapy/seapets/ebay_spider.py | 60ab55266368702543f063870e4045f0adfb606e | [
"Apache-2.0"
] | permissive | 0--key/lib | 113ff1e9cf75e446fa50eb065bc3bc36c090d636 | a619938ea523e96ab9e676ace51f5a129e6612e6 | refs/heads/master | 2023-06-23T22:17:54.244257 | 2023-06-21T17:42:57 | 2023-06-21T17:42:57 | 23,730,551 | 3 | 5 | null | 2016-03-22T08:19:30 | 2014-09-06T08:46:41 | Python | UTF-8 | Python | false | false | 1,675 | py | import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class EbaySpider(BaseSpider):
name = 'seapets-ebay.co.uk'
allowed_domains = ['ebay.co.uk']
start_urls = ['http://stores.ebay.co.uk/Nemos-Palace']
#def parse(self, response):
# hxs = HtmlXPathSelector(response)
# categories = hxs.select('//div[@class="lcat"]/ul[@class="lev1"]/li/a/@href').extract()
# for category in categories:
# url = urljoin_rfc(get_base_url(response), category)
# yield Request(url, callback=self.parse_products)
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="grid"]/tr/td')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'table/tr/td/div[@class="ttl g-std"]/a/@title')
loader.add_xpath('url', 'table/tr/td/div[@class="ttl g-std"]/a/@href')
loader.add_xpath('price', 'table/tr/td/div/table/tr/td/span[@itemprop="price"]/text()')
yield loader.load_item()
next = hxs.select('//td[@class="next"]/a/@href').extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url)
| [
"a.s.kosinov@gmail.com"
] | a.s.kosinov@gmail.com |
de259db007a1d40c2f20b2a0e71bb23c2f112f5d | a7e4e2e90ddfbf5f62bcdb6911297bcd36fd2869 | /Python/The while loop/The minimum divider.py | 1884a0c641314ace94a97ccd7bd005beb573b71b | [] | no_license | mottledZebra/Exercises | 613b1771987762103d6ff03c679bcef4f4146190 | a7680deff87a2f517b5739f77752be018261b028 | refs/heads/master | 2022-04-09T19:59:26.414248 | 2020-03-20T17:55:42 | 2020-03-20T17:55:42 | 48,891,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Дано целое число, не меньшее 2.
# Выведите его наименьший натуральный делитель, отличный от 1.
# Given an integer not less than 2.
# Output his smallest natural divisor other than 1.
print('Дано целое число, не меньшее 2.')
print('Вывести его наименьший натуральный делитель, отличный от 1.')
print()
ans = 'y'
while ans == 'y':
n = int(input('N = '))
i = 2
while i <= n:
if n % i == 0:
print(i)
break
i += 1
ans = input('Еще раз? y/n ')
| [
"tolsen@inbox.ru"
] | tolsen@inbox.ru |
a63504979166b461e22dfaf6d6afce7a89d18161 | d408df0235927f9bce6004d67aa5e842a8eee1ff | /inference.py | c992d301b9da8384f5b79acb29815484dc5b7161 | [] | no_license | khangt1k25/Contrastive-Bottleneck-Segmentation | 9ca41e04af1a4af6dd7502df2771588243b898cd | c6b6611b228e5a8dbda69cec18dcf01f829522da | refs/heads/master | 2023-09-04T03:20:08.200640 | 2021-10-12T10:22:23 | 2021-10-12T10:22:23 | 368,530,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.serialization import load
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor, ToPILImage
from models import *
from utils import *
from dataset import *
from collate import collate_custom
from tqdm import tqdm
from PIL import Image
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dset = VOC(root='./PASCAL_VOC', split='trainaug', res=224, transform=True, download=False)
loader = DataLoader(dset, batch_size=5, shuffle=True, num_workers=2, collate_fn=collate_custom)
maskgenerator = MaskGenerator("voc2012", out_channels=1)
encoder = SupConResNet(name="resnet18", head="mlp", feat_dim=128)
try:
path = './dumps/new_model.pt'
checkpoint = torch.load(path, map_location=device)
# print(checkpoint['maskgenerator_state_dict'])
# encoder.load_state_dict(checkpoint['encoder_state_dict'])
maskgenerator.load_state_dict(checkpoint['maskgenerator_state_dict'])
print("Load successful")
except:
print("Load fail")
maskgenerator.eval()
with torch.no_grad():
for i, batch in tqdm(enumerate(loader), leave=False):
images_base = batch['base']
images_da = batch['aug']
labels = batch['label']
images = images_base.to(device)
mask = maskgenerator(images)
segmented = images*mask
print(images.shape)
print(segmented.shape)
#print(mask)
for k in range(0, 5):
# img = ToPILImage()(images[k].cpu().squeeze()).show()
img2 = ToPILImage()(segmented[k].cpu().squeeze())
# img.save('./pics/img_origin{}.png'.format(k))
img2.save('./pics/img_after{}.png'.format(k))
break
# load model
| [
"khangruni@gmail.com"
] | khangruni@gmail.com |
35fef84bc6b217a74e816bced89470089abcb367 | acd5b6c0641ac3581a8f86a42f7ebb753504ae30 | /liberty_bell/components/ssd1351_display_adapter.py | 73b44bae5b092f927d000c4a78f40638ce1283e2 | [] | no_license | mattgrogan/liberty_bell | c9aa150cd493d480be54f58b3f621c9f3b1e9319 | 35ef4d55155d7d60ab15113ff068276c29ace510 | refs/heads/master | 2020-05-21T17:51:28.599919 | 2017-10-21T15:34:48 | 2017-10-21T15:34:48 | 61,948,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | """ This module holds code for all displays """
import time
from liberty_bell.components.ssd1351_display import SSD1351_Display
class SSD1351_Display_Adapter(object):
""" This wraps the SSD1351 """
def __init__(self, name, width, height, rst, dc, spi_port, spi_device):
""" Initialze the display """
self.width = width
self.height = height
self._oled = SSD1351_Display(
width, height, rst=rst, dc=dc, spi_port=spi_port, spi_device=spi_device)
self._oled.start_display()
self._oled.clear_buffer()
def display_image(self, image):
""" Load an image into the buffer """
self._oled.load_image(image)
self._oled.write_buffer()
def show_test_pattern(self):
""" Display the test bars """
from PIL import Image, ImageDraw
test_image = Image.new("RGB", (128, 128), "#000000")
draw = ImageDraw.Draw(test_image)
bar_colors = ["#FFFFFF", # white
"#FFFF00", # Yellow
"#00FFFF", # Cyan
"#00FF00", # Green
"#FF00FF", # Magenta
"#FF0000", # Red
"#000000", # Black
"#0000FF" # Blue
]
x_pos = 0
x_offset = 16
for color in bar_colors:
draw.rectangle([(x_pos, 0), (x_pos + x_offset, 128)],
outline=color, fill=color)
x_pos = x_pos + x_offset
self._oled.load_image(test_image)
self._oled.write_buffer()
def test(self):
""" Test the display """
self.show_test_pattern()
def clear(self):
""" Clear the display """
self._oled.clear_buffer()
self._oled.write_buffer()
def write_line(self, data):
""" Add row to the display """
color_data = []
for pixel in data:
r, g, b = pixel
color_data.append(color565(r, g, b))
self._oled.write_line(color_data)
#self._oled.write_line(data)
def color565(red, green=None, blue=None):
""" Define color in 16-bit RGB565. Red and blue
have five bits each and green has 6 (since the
eye is more sensitive to green).
Bit Format: RRRR RGGG GGGB BBBB
Usage:
color565(red=[0,255], green=[0,255], blue=[0,255])
color565(0xFFE92)
"""
if green is None and blue is None:
# We were passed the full value in the first argument
hexcolor = red
red = (hexcolor >> 16) & 0xFF
green = (hexcolor >> 8) & 0xFF
blue = hexcolor & 0xFF
# We have 8 bits coming in 0-255
# So we truncate the least significant bits
# until there's 5 bits for red and blue
# and six for green
red >>= 3
green >>= 2
blue >>= 3
# Now move them to the correct locations
red <<= 11
green <<= 5
# Then "or" them together
result = red | green | blue
return result
| [
"mvgnyc@gmail.com"
] | mvgnyc@gmail.com |
e93010ae26c2f452cbfb17ba59524682846ac2e7 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/asp.py | 5132eb9ef039bf880ace7b2535f8a47941dfbc54 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 162 | py | ii = [('WilkJMC3.py', 4), ('PettTHE.py', 11), ('WilkJMC2.py', 5), ('CoolWHM.py', 1), ('LyelCPG.py', 1), ('WestJIT2.py', 1), ('LandWPA2.py', 1), ('SomeMMH.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
66aa2e6b4bfaa38bb4426ce7d639aabfbde4bd97 | 0648aeda4e4c6da54428a040e2248a1efcb148ae | /code_initializer.py | 0602dd2455613a3264f40cf88b6bcbc63be260df | [] | no_license | ajaymaity/grid-search-with-keras | 3b86d30cba43eed752eddfa341cfb09b197c443c | 0bb3c29b4119183b28040371c69198ff1619a541 | refs/heads/master | 2020-05-21T12:13:41.240543 | 2019-05-11T15:45:33 | 2019-05-11T15:45:33 | 186,049,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | # Use scikit-learn to grid search the weight initialization
import numpy
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
# Function to create model, required for KerasClassifier
def create_model(init_mode='uniform'):
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer=init_mode, activation='relu'))
model.add(Dense(1, kernel_initializer=init_mode, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = KerasClassifier(build_fn=create_model, epochs=100, batch_size=10, verbose=0)
# define the grid search parameters
init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
param_grid = dict(init_mode=init_mode)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
grid_result = grid.fit(X, Y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
| [
"ajay_maity@optum.com"
] | ajay_maity@optum.com |
d506a23ac2b52d1efcdc9199a261eb8543b893c2 | 12cb93fbc6dc83f2574df4c5b612e8a701aed310 | /sprint12/2 Базовые структуры/122f_least_favorite_thing.py | 0835e8ef2b1261e09f79c3541ea9022ba2ee9981 | [] | no_license | dzanto/algorithmics | 85b871ea72069b42d2771b021d0ba02adbd2f021 | 462f4837818eb4f654228e051fcbff0902cad658 | refs/heads/master | 2023-01-09T04:58:06.280813 | 2020-11-11T10:41:38 | 2020-11-11T10:41:38 | 303,316,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | class Node:
def __init__(self, value, next_item=None):
self.value = value
self.next_item = next_item
def solution(node, idx):
if idx == 0:
head = node.next_item
return head
head = node
while idx-1:
node = node.next_item
idx -= 1
if node.next_item.next_item is None:
node.next_item = None
elif node.next_item.next_item is not None:
node.next_item = node.next_item.next_item
return head
| [
"dzanto@gmail.com"
] | dzanto@gmail.com |
1bccd7d930b5dec65eead7410ebb2acc4488735c | 66df0193ea0810a9ccde310b5ad1da305642747e | /8_1236.py | 50455fca59a7bf1feca42613971d6edd316b42a3 | [] | no_license | toriz7/solveAlogorithmProblem | 67facc71e8f53f60faedd588656fcaa95b7e9827 | 40d366e572a9be5fca2f159f53c8544448654dee | refs/heads/main | 2023-04-04T11:08:54.800565 | 2021-04-22T14:27:53 | 2021-04-22T14:27:53 | 305,416,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #8
# 스스로 풀지 못한 문제.
N,M= map(int, input().split())
Mat=[['.']*M for i in range(N)]
rowcan=0
colcan=0
#입력
for row in range(N):
input_data=input()
if 'X' not in input_data:# 해당 행에 경비원 없으면 행을 후보로 넣는다.
rowcan+=1
for col in range(M):
Mat[row][col] = input_data[col]
# 열 검사
for col in range(M):
check = False
for row in range(N):
if Mat[row][col] == 'X':
check=True
#마지막까지 없으면
if check==False:
colcan+=1
#후보군 정리 완료
#print(max(rowcan,colcan))
if rowcan > colcan:
print(rowcan)
else:
print(colcan)
| [
"noreply@github.com"
] | noreply@github.com |
d6b86b1af4acdf4dbdd08dc74083be5d5b602403 | cb5b17c50ac62bc5e0e5127a4bec9b47f6f7d646 | /lib/networks/losses.py | a8f8264d6d1e68e2f1819e317da9f10b9f5eb2c9 | [] | no_license | Regenerator/prns | 04a160ea706bac26460e97bf12d282fb94e00881 | cc99835519331c12dcba85bddbbdf6c0af7b3f80 | refs/heads/master | 2022-11-25T06:04:22.777959 | 2020-07-28T23:13:14 | 2020-07-28T23:13:14 | 198,115,777 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,636 | py | import numpy as np
import torch
import torch.nn as nn
class KLDivergence(nn.Module):
def __init__(self):
super(KLDivergence, self).__init__()
def forward(self, mu1, logvar1, mu2=None, logvar2=None):
if mu2 is None:
if mu1 is None:
loss = torch.zeros(1).cuda()
else:
loss = -0.5 * torch.mean(torch.sum(1 + logvar1 - (logvar1.exp() + mu1.pow(2)), dim=1))
else:
if mu1 is None:
loss = -0.5 * torch.mean(torch.sum(1 - logvar2 - ((1 + mu2.pow(2)) / logvar2.exp()), dim=1))
else:
loss = -0.5 * torch.mean(
torch.sum(1 + logvar1 - logvar2 - ((logvar1.exp() + (mu1 - mu2).pow(2)) / logvar2.exp()), dim=1)
)
return loss
class CrossEntropyLoss(nn.Module):
def __init__(self, **kwargs):
super(CrossEntropyLoss, self).__init__()
self.label_smoothing_mode = kwargs.get('label_smoothing_mode')
self.label_smoothing_rate = kwargs.get('label_smoothing_rate')
if self.label_smoothing_mode == 'None':
self.shift = 0.
else:
self.shift = torch.from_numpy(np.array([self.label_smoothing_rate / (1. - 2. * self.label_smoothing_rate)],
dtype=np.float32)).cuda()
def forward(self, logprobs, targets):
if self.shift > 0.:
if self.label_smoothing_mode == 'Const':
shifted_targets = torch.add(targets, self.shift)
elif self.label_smoothing_mode == 'Random':
shifted_targets = torch.add(targets, torch.abs(torch.randn_like(targets).mul(self.shift)))
shifted_targets = shifted_targets / shifted_targets.sum(dim=1, keepdim=True)
else:
shifted_targets = targets
return -torch.sum(shifted_targets * logprobs) / logprobs.size(0)
class MeanSquaredL2Norm(nn.Module):
def __init__(self):
super(MeanSquaredL2Norm, self).__init__()
def forward(self, input):
loss = torch.mean(torch.sum(input**2, 1))
return loss
class GNetLoss(nn.Module):
def __init__(self, **kwargs):
super(GNetLoss, self).__init__()
self.CEL = CrossEntropyLoss(**kwargs)
def forward(self, inputs, targets):
CEL = self.CEL(inputs['logprobs'], targets)
return CEL
class TLNLoss(nn.Module):
def __init__(self, **kwargs):
super(TLNLoss, self).__init__()
self.kl_weight = kwargs.get('kl_weight')
self.CEL = CrossEntropyLoss(**kwargs)
self.L2 = MeanSquaredL2Norm()
def forward(self, inputs, targets):
CEL = self.CEL(inputs['logprobs'], targets)
if inputs['img_prior_mus'] is None:
CL2 = self.L2(inputs['vox_posterior_mus'] - inputs['vox_prior_mus'])
else:
CL2 = self.L2(inputs['vox_posterior_mus'] - inputs['img_prior_mus'])
return CEL + self.kl_weight * CL2, CEL, CL2
class CVAELoss(nn.Module):
def __init__(self, **kwargs):
super(CVAELoss, self).__init__()
self.kl_weight = kwargs.get('kl_weight')
self.CEL = CrossEntropyLoss(**kwargs)
self.KLD = KLDivergence()
def forward(self, inputs, targets):
CEL = self.CEL(inputs['logprobs'], targets)
if inputs['img_prior_mus'] is None:
KLDI = self.KLD(inputs['vox_posterior_mus'], inputs['vox_posterior_logvars'],
mu2=inputs['vox_prior_mus'], logvar2=inputs['vox_prior_logvars'])
else:
KLDI = self.KLD(inputs['vox_posterior_mus'], inputs['vox_posterior_logvars'],
mu2=inputs['img_prior_mus'], logvar2=inputs['img_prior_logvars'])
return CEL + self.kl_weight * KLDI, CEL, KLDI
class DVAELoss(nn.Module):
def __init__(self, **kwargs):
super(DVAELoss, self).__init__()
self.kl_weight = kwargs.get('kl_weight')
self.kl_ratio = kwargs.get('kl_ratio')
self.CEL = CrossEntropyLoss(**kwargs)
self.KLD = KLDivergence()
def forward(self, inputs, targets):
CEL = self.CEL(inputs['logprobs'], targets)
KLDV = self.KLD(inputs['vox_posterior_mus'], inputs['vox_posterior_logvars'],
mu2=inputs['vox_prior_mus'], logvar2=inputs['vox_prior_logvars'])
KLDI = self.KLD(inputs['vox_posterior_mus'], inputs['vox_posterior_logvars'],
mu2=inputs['img_prior_mus'], logvar2=inputs['img_prior_logvars'])
return CEL + self.kl_weight * (self.kl_ratio * KLDV + (1.0 - self.kl_ratio) * KLDI), CEL, KLDV, KLDI
| [
"roman.klokov@inria.fr"
] | roman.klokov@inria.fr |
5b3c8eb87f14b56ba99b048c861e3741debcc9b0 | 5e10c81e138aa00778cf74f3412e3f8be6e16871 | /app/utils/detect.py | ad2e45e4e4e9517acd9a4eb1b555d81bebc3db7e | [] | no_license | Wanglingdu/ownModify | b92793e103d348dcbdc1249fccda094e4c60176a | 8b75f9ed8db67712183adb0fa0c4093294bbc6fd | refs/heads/master | 2022-11-07T00:32:44.425052 | 2018-11-19T05:58:41 | 2018-11-19T05:58:41 | 157,649,486 | 0 | 2 | null | 2022-10-10T11:36:50 | 2018-11-15T03:56:03 | Python | UTF-8 | Python | false | false | 5,582 | py | # coding:utf-8
from app import app
import time, pdb
import os
import re
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import datetime
import urllib
import json
import hashlib
def rop_detect(dest_dir, infos):
start_time = time.time()
img_name = set()
for filename in os.listdir(dest_dir):
if os.path.splitext(filename)[1].lower() == '.jpg' or \
os.path.splitext(filename)[1].lower() == '.png':
img_name.add(filename)
img_num = len(img_name)
all_imgs = [0] * img_num
size = (352, 264)
loc_x = 50
loc_y = 360
background = Image.open(app.config['BACKGROUND'])
num = 0
for i, filename in enumerate(img_name):
# im_np = cv2.imread(app.config['UPLOADED_PATH']+"/"+ filename)
# im_np = cv2.resize(im_np, (0,0), fx=0.2, fy=0.2)
# all_imgs[i] = np.copy(im_np)
# shutil.move(app.config['UPLOADED_PATH']+"/"+ filename, dest_dir+"/"+filename)
im = Image.open(dest_dir + "/" + filename)
# im1 = im.resize((320, 240), Image.ANTIALIAS)
# im_np = np.asarray(im1, dtype='float32')
# print(im_np.shape)
# Use newaxis object to create an axis of length one
# all_imgs[i] = np.copy(im_np)
im = im.resize(size, Image.ANTIALIAS)
# print(im)
# print(im)
if num < 9:
background.paste(im, (loc_x, loc_y, loc_x + size[0], loc_y + size[1]))
loc_x = loc_x + size[0] + 20
num += 1
if num % 3 == 0:
loc_x = 50
loc_y = loc_y + size[1] + 40
close_time = time.time()
print("img process before:" + str(close_time - start_time))
start_time = time.time()
request_url = app.config["ROP_SERVICE"]
msg_key = 'msg'
# input_list = [input_data.tolist() for input_data in all_imgs]
# input_list_json = json.dumps(input_list)
dest_dir = re.sub('/milab', '', dest_dir)
input_data = {'data_folder': dest_dir}
print('________________________dest_dir:' + dest_dir)
req = urllib.request.Request(url=request_url, data=urllib.parse.urlencode(input_data).encode("utf-8"))
res_data = urllib.request.urlopen(req)
close_time = time.time()
print("detect service:" + str(close_time - start_time))
start_time = time.time()
res_dict = eval(res_data.read())
print("____________________________________res_dict['code']:" + str(res_dict['code']))
if int(res_dict['code']) == 1:
if res_dict['diagnose'] == 'normal':
pred_result = "正常"
confidence_0 = res_dict['y_rop_normal'][0]
confidence_1 = res_dict['y_rop_normal'][1]
else:
if res_dict['diagnose'] == "stage2":
pred_result = "ROP 1/2期"
else:
pred_result = "ROP 3/4/5期"
confidence = res_dict['y_rop_normal'][0]
confidence_0 = res_dict['y_rop_normal'][1]
confidence_2 = res_dict['y_stage_2_3'][0]
confidence_3 = res_dict['y_stage_2_3'][1]
else:
pred_result = res_dict[msg_key]
# try:
# print( ImageFont.truetype("static/fonts/msyhLight_1.0.ttc",45));
# except:
# print( ImageFont.truetype("msyhLight_1.0.ttc",45));
ttfont = ImageFont.truetype("/usr/share/fonts/type2/wqy-microhei.ttc", 36)
# ttfont = ImageFont.truetype("uming.ttc",45)
# ttfont = None
draw = ImageDraw.Draw(background)
draw.text((50, 50), u'姓名: ' + infos['name'], fill=(0,0,0), font=ttfont)
if infos['date']:
draw.text((450, 50), u'检查日期: ' + infos['date'].strftime('%Y-%m-%d %H:%M:%S'), fill=(0, 0, 0), font=ttfont)
else:
draw.text((450, 50), u'检查日期:', fill=(0, 0, 0), font=ttfont)
draw.text((1000, 50), u'眼: ' + infos['RL'], fill=(0, 0, 0), font=ttfont)
draw.text((50, 1280), u'诊断意见 :' + pred_result, fill=(0, 0, 0), font=ttfont)
draw.text((100, 1370), u'类型', fill=(0, 0, 0), font=ttfont)
draw.text((100, 1500), u'置信度', fill=(0, 0, 0), font=ttfont)
if res_dict['diagnose'] == 'normal':
draw.text((300, 1370), u'正常', fill=(0, 0, 0), font=ttfont)
draw.text((500, 1370), u'ROP', fill=(0, 0, 0), font=ttfont)
draw.text((300, 1500), u'%.2f%%' % (confidence_0 * 100.), fill=(0, 0, 0), font=ttfont)
draw.text((500, 1500), u'%.2f%%' % (confidence_1 * 100.), fill=(0, 0, 0), font=ttfont)
else:
print(confidence, confidence_2, confidence_3, confidence_2 * confidence * 100., str(confidence),
str(confidence)[:6])
draw.text((300, 1370), u'正常', fill=(0, 0, 0), font=ttfont)
draw.text((500, 1370), u'ROP 1/2期', fill=(0, 0, 0), font=ttfont)
draw.text((750, 1370), u'ROP 3/4/5期', fill=(0, 0, 0), font=ttfont)
draw.text((300, 1500), u'%.2f%%' % (confidence * 100.), fill=(0, 0, 0), font=ttfont)
draw.text((500, 1500), u'%.2f%%' % (confidence_2 * confidence_0 * 100.), fill=(0, 0, 0), font=ttfont)
draw.text((750, 1500), u'%.2f%%' % (confidence_3 * confidence_0 * 100.), fill=(0, 0, 0), font=ttfont)
filename = hashlib.md5(str(time.time()).encode('utf-8')).hexdigest()[:20]
while(os.path.exists(os.path.join(app.config['REPORT'], filename + '.jpg'))):
filename = hashlib.md5(str(time.time()).encode('utf-8')).hexdigest()[:20]
background.save(app.config['REPORT'] + '/' + filename + '.jpg')
img_name.clear()
close_time = time.time()
print("draw result:" + str(close_time - start_time))
return pred_result, filename
| [
"Wanglingdu@outlook.com"
] | Wanglingdu@outlook.com |
3304675c9f32315e636e7eb7ddd6bae94047887a | 751accf6c36b26e5da87837773bc7403691baa25 | /car_with_trailers_sims/T_LQR/T_LQR_car_w_trailers_sims.py | 0ff694cb51f92d492a7a6a6fa1e62fce606ee0a2 | [] | no_license | karthikeyaparunandi/T_PFC_paper | 0913a71c664dda6476fed5b796a7b03d9ce288e4 | c1fe2dfd06cdac4a050069919aa04a92e34f3cc9 | refs/heads/master | 2022-08-20T09:18:16.537881 | 2019-06-06T21:56:09 | 2019-06-06T21:56:09 | 171,076,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | '''
copyright @ Karthikeya S Parunandi - karthikeyasharma91@gmail.com
python code for simulations on car-like robot using T-LQR method.
'''
#!/usr/bin/env python
from __future__ import division
import h5py
from casadi import *
from T_LQR_car_w_trailers import T_LQR_car_w_trailers
import matplotlib.pyplot as plt
import numpy as np
import car_with_trailers_sims.params as params
#Initial position
X_0 = DM([0, 0, 0, 0, 0, 0]) # Initial state
x_g = DM([5.0, 6.0, 0, 0, 0, 0]) # goal state
#state dimension
n_x = params.n_x
#control imension
n_u = params.n_u
#horizon
horizon = params.horizon
control_upper_bound = DM([params.r_u[0], params.r_w[0]])
control_lower_bound = DM([params.r_u[1], params.r_w[1]])
#use the T_LQR class
t_lqr = T_LQR_car_w_trailers(n_x, n_u, horizon, X_0, x_g, control_upper_bound, control_lower_bound, params.dt)
# execute the algorithm
t_lqr.run_t_lqr()
t_lqr.plot_position(t_lqr.X_o)
'''
#save the trajectory
f = open('TLQR_no_limit.txt','a')
for i in range(len(t_lqr.X_p)):
f.write(str(t_lqr.X_o[i][0][0])+ '\t'+ str(t_lqr.X_o[i][1][0]) + '\t' + str(t_lqr.X_o[i][2][0]) + '\t' + str(t_lqr.X_o[i][3][0])+'\t'+ str(t_lqr.U_o[i][0][0])+'\t'+ str(t_lqr.U_o[i][1][0])+'\n')
f.close()
'''
#initialize the scaling factor for noise
epsilon = 0
epsilon_max = 0.1
#delta - increment in epsilon for sims
delta = .005
#no. of sims per epsilon
n_sims = 100
#creating trajectory variables to store the entire trajectory
X_t, U_t = t_lqr.create_traj_variables_DM()
while epsilon <= epsilon_max:
cost_array = []
for times in range(0, n_sims):
for t in range(0, horizon):
#apply the controller
U_t[t] = t_lqr.U_o[t] + (0 if t==0 else 1) * mtimes(t_lqr.K_o[t-1], (X_t[t-1] - t_lqr.X_o[t-1]))
if t==0:
X_t[t] = t_lqr.car_w_trailers_dynamics_propagation_d_noisy(X_0, U_t[0], epsilon)
else:
X_t[t] = t_lqr.car_w_trailers_dynamics_propagation_d_noisy(X_t[t-1], U_t[t], epsilon)
cost = t_lqr.calculate_total_cost(X_0, X_t, U_t, horizon)
cost_array.append(cost)
with h5py.File('cost_data.hdf5','a') as f:
dataset = f.create_dataset("{}".format(epsilon), data=cost_array)
epsilon += delta
| [
"karthikeyasharma91@gmail.com"
] | karthikeyasharma91@gmail.com |
a93cc724adf2790b67ffc92d88c83b143f781755 | 04c1e447a513722378d66e3f4147b21cda136bd7 | /MM/3p/venv/lib/python3.5/site-packages/plot/apps/region/draw_one_region.py | 04ee60d5cc7f1dd176899d26f11ec8ca6905372c | [] | no_license | kozakjefferson/devw | 4f2283381b9cd4ec491d181fd6564f39caf0b1fa | 592cf26c4b06c3cdc5eb5640a5cb413870308484 | refs/heads/master | 2020-03-19T15:20:23.386233 | 2018-06-08T21:16:55 | 2018-06-08T21:16:55 | 136,667,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | """
Draw a single region
"""
from typing import Dict, Tuple, List, AnyStr
from numpy import ndarray
from ...tk.matplotlibTK.legend import format_legend_label
def draw_one_region(
obj_axis, # type: object
xy1y2, # type: List
p # type: Dict
):
# type: (...) -> Tuple
"""Draw a single region
Args:
obj_axis (object): matplotlib.axis.Axis object
xy1y2 (list): a list containing
p (dict): data parameters
Returns:
("legend", object, legend_label)
"""
x, y1, y2 = xy1y2
obj_edges = obj_axis.plot(
x, y1, x, y2,
color=p['region']['edge']['color'],
linewidth=p['region']['edge']['width'],
alpha=p['region']['edge']['opacity']
)
obj_axis.fill_between(
x, y1, y2,
where=y2 >= y1,
linewidth=p['region']['edge']['width'],
facecolor=p['region']['color']['positive'],
alpha=p['region']['opacity']['positive'],
interpolate=p['region']['interpolate']['positive']
)
obj_axis.fill_between(
x, y1, y2,
where=y2 <= y1,
linewidth=p['region']['edge']['width'],
facecolor=p['region']['color']['negative'],
alpha=p['region']['opacity']['negative'],
interpolate=p['region']['interpolate']['negative']
)
return ("legend",
obj_edges[0],
format_legend_label(p['legend']['content']))
| [
"jnkkozak@gmail.com"
] | jnkkozak@gmail.com |
d6f767ba29749e80af1081a9d56b603d236a7679 | e28f6905146318c055e5d7be4feb07a92f6c679f | /semana2/ex7s2.py | 7fd759403a234ff45aaf00d2d58da8796a57f00b | [] | no_license | fcoprata/AtividadePython | 5002d8b5b639e4b032e1241f1058ab92a3047f66 | 6bca22a76492893f6ab69cc48ea13e496023108c | refs/heads/master | 2023-08-20T12:30:15.038550 | 2021-09-28T00:46:08 | 2021-09-28T00:46:08 | 397,370,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py |
lista = []
while (1):
name = input("Digite o nome: ")
if name == "parar":
break
else:
lista.append(name)
print(sorted(lista))
| [
"fcoprata@alu.ufc.br"
] | fcoprata@alu.ufc.br |
62faad3705bda1478983f032850686bfdd348dbd | 91639fea573828d08e8642a9022fe2ec62319414 | /future/types/newstr.py | 6a01f83530985735559b1b493b0bf1432abb2a4b | [
"MIT"
] | permissive | agincel/AdamTestBot | 9787a22f25a3bfc2bbab0b6c6e66b857cb369f32 | fee093c3dd944881bd92c9180fbb3a13700673da | refs/heads/master | 2020-05-22T04:26:39.241479 | 2016-12-29T22:15:04 | 2016-12-29T22:15:04 | 44,931,116 | 0 | 8 | null | 2016-10-18T22:04:33 | 2015-10-25T21:40:35 | Python | UTF-8 | Python | false | false | 15,602 | py | """
This module redefines ``str`` on Python 2.x to be a subclass of the Py2
``unicode`` type that behaves like the Python 3.x ``str``.
The main differences between ``newstr`` and Python 2.x's ``unicode`` type are
the stricter type-checking and absence of a `u''` prefix in the representation.
It is designed to be used together with the ``unicode_literals`` import
as follows:
>>> from __future__ import unicode_literals
>>> from builtins import str, isinstance
On Python 3.x and normally on Python 2.x, these expressions hold
>>> str('blah') is 'blah'
True
>>> isinstance('blah', str)
True
However, on Python 2.x, with this import:
>>> from __future__ import unicode_literals
the same expressions are False:
>>> str('blah') is 'blah'
False
>>> isinstance('blah', str)
False
This module is designed to be imported together with ``unicode_literals`` on
Python 2 to bring the meaning of ``str`` back into alignment with unprefixed
string literals (i.e. ``unicode`` subclasses).
Note that ``str()`` (and ``print()``) would then normally call the
``__unicode__`` method on objects in Python 2. To define string
representations of your objects portably across Py3 and Py2, use the
:func:`python_2_unicode_compatible` decorator in :mod:`future.utils`.
"""
from collections import Iterable
from numbers import Number
from future.utils import PY3, istext, with_metaclass, isnewbytes
from future.types import no, issubset
from future.types.newobject import newobject
if PY3:
# We'll probably never use newstr on Py3 anyway...
unicode = str
class BaseNewStr(type):
def __instancecheck__(cls, instance):
if cls == newstr:
return isinstance(instance, unicode)
else:
return issubclass(instance.__class__, cls)
class newstr(with_metaclass(BaseNewStr, unicode)):
"""
A backport of the Python 3 str object to Py2
"""
no_convert_msg = "Can't convert '{0}' object to str implicitly"
def __new__(cls, *args, **kwargs):
"""
From the Py3 str docstring:
str(object='') -> str
str(bytes_or_buffer[, encoding[, errors]]) -> str
Create a new string object from the given object. If encoding or
errors is specified, then the object must expose a data buffer
that will be decoded using the given encoding and error handler.
Otherwise, returns the result of object.__str__() (if defined)
or repr(object).
encoding defaults to sys.getdefaultencoding().
errors defaults to 'strict'.
"""
if len(args) == 0:
return super(newstr, cls).__new__(cls)
# Special case: If someone requests str(str(u'abc')), return the same
# object (same id) for consistency with Py3.3. This is not true for
# other objects like list or dict.
elif type(args[0]) == newstr and cls == newstr:
return args[0]
elif isinstance(args[0], unicode):
value = args[0]
elif isinstance(args[0], bytes): # i.e. Py2 bytes or newbytes
if 'encoding' in kwargs or len(args) > 1:
value = args[0].decode(*args[1:], **kwargs)
else:
value = args[0].__str__()
else:
value = args[0]
return super(newstr, cls).__new__(cls, value)
def __repr__(self):
"""
Without the u prefix
"""
value = super(newstr, self).__repr__()
# assert value[0] == u'u'
return value[1:]
def __getitem__(self, y):
"""
Warning: Python <= 2.7.6 has a bug that causes this method never to be called
when y is a slice object. Therefore the type of newstr()[:2] is wrong
(unicode instead of newstr).
"""
return newstr(super(newstr, self).__getitem__(y))
def __contains__(self, key):
errmsg = "'in <string>' requires string as left operand, not {0}"
# Don't use isinstance() here because we only want to catch
# newstr, not Python 2 unicode:
if type(key) == newstr:
newkey = key
elif isinstance(key, unicode) or isinstance(key, bytes) and not isnewbytes(key):
newkey = newstr(key)
else:
raise TypeError(errmsg.format(type(key)))
return issubset(list(newkey), list(self))
@no('newbytes')
def __add__(self, other):
return newstr(super(newstr, self).__add__(other))
@no('newbytes')
def __radd__(self, left):
" left + self "
try:
return newstr(left) + self
except:
return NotImplemented
def __mul__(self, other):
return newstr(super(newstr, self).__mul__(other))
def __rmul__(self, other):
return newstr(super(newstr, self).__rmul__(other))
def join(self, iterable):
errmsg = 'sequence item {0}: expected unicode string, found bytes'
for i, item in enumerate(iterable):
# Here we use type() rather than isinstance() because
# __instancecheck__ is being overridden. E.g.
# isinstance(b'abc', newbytes) is True on Py2.
if isnewbytes(item):
raise TypeError(errmsg.format(i))
# Support use as a staticmethod: str.join('-', ['a', 'b'])
if type(self) == newstr:
return newstr(super(newstr, self).join(iterable))
else:
return newstr(super(newstr, newstr(self)).join(iterable))
@no('newbytes')
def find(self, sub, *args):
return super(newstr, self).find(sub, *args)
@no('newbytes')
def rfind(self, sub, *args):
return super(newstr, self).rfind(sub, *args)
@no('newbytes', (1, 2))
def replace(self, old, new, *args):
return newstr(super(newstr, self).replace(old, new, *args))
def decode(self, *args):
raise AttributeError("decode method has been disabled in newstr")
def encode(self, encoding='utf-8', errors='strict'):
"""
Returns bytes
Encode S using the codec registered for encoding. Default encoding
is 'utf-8'. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
from future.types.newbytes import newbytes
# Py2 unicode.encode() takes encoding and errors as optional parameter,
# not keyword arguments as in Python 3 str.
# For the surrogateescape error handling mechanism, the
# codecs.register_error() function seems to be inadequate for an
# implementation of it when encoding. (Decoding seems fine, however.)
# For example, in the case of
# u'\udcc3'.encode('ascii', 'surrogateescape_handler')
# after registering the ``surrogateescape_handler`` function in
# future.utils.surrogateescape, both Python 2.x and 3.x raise an
# exception anyway after the function is called because the unicode
# string it has to return isn't encodable strictly as ASCII.
if errors == 'surrogateescape':
if encoding == 'utf-16':
# Known to fail here. See test_encoding_works_normally()
raise NotImplementedError('FIXME: surrogateescape handling is '
'not yet implemented properly')
# Encode char by char, building up list of byte-strings
mybytes = []
for c in self:
code = ord(c)
if 0xD800 <= code <= 0xDCFF:
mybytes.append(newbytes([code - 0xDC00]))
else:
mybytes.append(c.encode(encoding=encoding))
return newbytes(b'').join(mybytes)
return newbytes(super(newstr, self).encode(encoding, errors))
@no('newbytes', 1)
def startswith(self, prefix, *args):
if isinstance(prefix, Iterable):
for thing in prefix:
if isnewbytes(thing):
raise TypeError(self.no_convert_msg.format(type(thing)))
return super(newstr, self).startswith(prefix, *args)
@no('newbytes', 1)
def endswith(self, prefix, *args):
# Note we need the decorator above as well as the isnewbytes()
# check because prefix can be either a bytes object or e.g. a
# tuple of possible prefixes. (If it's a bytes object, each item
# in it is an int.)
if isinstance(prefix, Iterable):
for thing in prefix:
if isnewbytes(thing):
raise TypeError(self.no_convert_msg.format(type(thing)))
return super(newstr, self).endswith(prefix, *args)
@no('newbytes', 1)
def split(self, sep=None, maxsplit=-1):
# Py2 unicode.split() takes maxsplit as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).split(sep, maxsplit)
return [newstr(part) for part in parts]
@no('newbytes', 1)
def rsplit(self, sep=None, maxsplit=-1):
# Py2 unicode.rsplit() takes maxsplit as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).rsplit(sep, maxsplit)
return [newstr(part) for part in parts]
@no('newbytes', 1)
def partition(self, sep):
parts = super(newstr, self).partition(sep)
return tuple(newstr(part) for part in parts)
@no('newbytes', 1)
def rpartition(self, sep):
parts = super(newstr, self).rpartition(sep)
return tuple(newstr(part) for part in parts)
@no('newbytes', 1)
def index(self, sub, *args):
"""
Like newstr.find() but raise ValueError when the substring is not
found.
"""
pos = self.find(sub, *args)
if pos == -1:
raise ValueError('substring not found')
return pos
def splitlines(self, keepends=False):
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
# Py2 unicode.splitlines() takes keepends as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).splitlines(keepends)
return [newstr(part) for part in parts]
def __eq__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__eq__(other)
else:
return False
def __ne__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__ne__(other)
else:
return True
unorderable_err = 'unorderable types: str() and {0}'
def __lt__(self, other):
if not istext(other):
raise TypeError(self.unorderable_err.format(type(other)))
return super(newstr, self).__lt__(other)
def __le__(self, other):
if not istext(other):
raise TypeError(self.unorderable_err.format(type(other)))
return super(newstr, self).__le__(other)
def __gt__(self, other):
if not istext(other):
raise TypeError(self.unorderable_err.format(type(other)))
return super(newstr, self).__gt__(other)
def __ge__(self, other):
if not istext(other):
raise TypeError(self.unorderable_err.format(type(other)))
return super(newstr, self).__ge__(other)
def __getattribute__(self, name):
"""
A trick to cause the ``hasattr`` builtin-fn to return False for
the 'decode' method on Py2.
"""
if name in ['decode', u'decode']:
raise AttributeError("decode method has been disabled in newstr")
return super(newstr, self).__getattribute__(name)
def __native__(self):
"""
A hook for the future.utils.native() function.
"""
return unicode(self)
@staticmethod
def maketrans(x, y=None, z=None):
"""
Return a translation table usable for str.translate().
If there is only one argument, it must be a dictionary mapping Unicode
ordinals (integers) or characters to Unicode ordinals, strings or None.
Character keys will be then converted to ordinals.
If there are two arguments, they must be strings of equal length, and
in the resulting dictionary, each character in x will be mapped to the
character at the same position in y. If there is a third argument, it
must be a string, whose characters will be mapped to None in the result.
"""
if y is None:
assert z is None
if not isinstance(x, dict):
raise TypeError('if you give only one argument to maketrans it must be a dict')
result = {}
for (key, value) in x.items():
if len(key) > 1:
raise ValueError('keys in translate table must be strings or integers')
result[ord(key)] = value
else:
if not isinstance(x, unicode) and isinstance(y, unicode):
raise TypeError('x and y must be unicode strings')
if not len(x) == len(y):
raise ValueError('the first two maketrans arguments must have equal length')
result = {}
for (xi, yi) in zip(x, y):
if len(xi) > 1:
raise ValueError('keys in translate table must be strings or integers')
result[ord(xi)] = ord(yi)
if z is not None:
for char in z:
result[ord(char)] = None
return result
def translate(self, table):
"""
S.translate(table) -> str
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, strings, or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
l = []
for c in self:
if ord(c) in table:
val = table[ord(c)]
if val is None:
continue
elif isinstance(val, unicode):
l.append(val)
else:
l.append(chr(val))
else:
l.append(c)
return ''.join(l)
def isprintable(self):
raise NotImplementedError('fixme')
def isidentifier(self):
raise NotImplementedError('fixme')
def format_map(self):
raise NotImplementedError('fixme')
__all__ = ['newstr']
| [
"adam@DESKTOP-TA850RC.localdomain"
] | adam@DESKTOP-TA850RC.localdomain |
a33bf04505947fb836d3130ee4d89ef2351d3905 | f7b76613d8388a8725d8957450ea65c4d0d0ef3a | /Overlayi2cCap/i2cStreamManager.py | d1697aa9d0a4b87c3d766564bd6cf3cffeeefedb | [] | no_license | AbhikChowdhury6/OpenHDS | b193bbf41e48ffe3d75a6f20dd6bc9e04c610307 | b7b9f64b2acce072c9879e250e4148ef5f89335b | refs/heads/master | 2020-03-26T10:12:01.617836 | 2018-08-16T21:22:20 | 2018-08-16T21:22:20 | 144,786,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | # thisfile is to grab all of the data on the i2c bus based on the the configuration file
#write it to files
# keep an open socket and stream it (via jttp https later)
import time
import smbus
import os
#import all of the devices on the bus for now
bus = smbus.SMBus(1)
# extanciate and configure all of the objects
ECG =
GSR =
Breath =
MPU6050 =
MPU9250 =
millis = int(round(time.time() * 1000))
#open files to log
ECGf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
GSRf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
Breathf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
MPU6050f = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
MPU9250f = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
divider = 0
second = 0
starttime=time.time()
while True:
#a bunch of if statementsfor each sensors
if (divider % ECG.REFRESH_RATE == 0):
ECGf.write(ECG.READ())
if (divider % GSR.REFRESH_RATE == 0):
GSRf.write(GSR.READ())
if (divider % Breath.REFRESH_RATE == 0):
Breathf.write(Breath.READ())
if (divider % MPU6050.REFRESH_RATE == 0):
MPU6050f.write(MPU6050.READ())
if (divider % MPU9250.REFRESH_RATE == 0):
MPU9250f.write(MPU9250.READ())
if (divider % 100 == 0):
second = second + 1
if (second % 60 == 0):
second = 0
millis = int(round(time.time() * 1000))
#close all files
ECGf.close()
GSRf.close()
Breathf.close()
MPU6050f.close()
MPU9250f.close()
#Open new files to log
ECGf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
GSRf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
Breathf = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
MPU6050f = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
MPU9250f = open("MPU6050" + str(time.time()) + "." + str(millis), "a+")
divider = divider + 1
time.sleep(0.01 - ((time.time() - starttime) % 0.01))
| [
"Abhik.ch6@gmail.com"
] | Abhik.ch6@gmail.com |
366becbf38b2cbd59b48a9d5a492a6b43b6d2a89 | 00f32f70e633334d0862cf39dbe854147c85eda1 | /src/boncUtils/KafkaUtils/KafkaUtilsLocal.py | 6a8186824a844c3fede198d58f7e7ec24d580cef | [] | no_license | wanglikang/RetardedSpider | b377561338ab036a6a6272c6023c38df5c515a22 | 2303135dab359cd3975e4df4ece5d8df4d8f1584 | refs/heads/master | 2022-12-12T17:23:22.857578 | 2018-12-08T14:26:09 | 2018-12-08T14:26:09 | 154,911,464 | 0 | 0 | null | 2022-11-22T02:36:36 | 2018-10-27T01:23:15 | Python | UTF-8 | Python | false | false | 2,344 | py | # -*- coding: utf-8 -*-
import time
import random
from kafka import KafkaProducer
import datetime
import random
bootstrap_servers = 'localhost:9092'
# bootstrap_servers = '172.16.32.125:9092'
producer = KafkaProducer(bootstrap_servers=bootstrap_servers)
topicpre = 'test-topic-'
def sendAMessage2(topics):
nowtime = time.time()
nowstamp = int(nowtime * 1000)
key = str(nowstamp)
for atop in topics:
value1 = atop + ",抽汽压力," + str(nowstamp) + "," + str(random.randint(1000, 10000))
print("{} :{}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(nowtime)), value1))
producer.send(topicpre + str(atop), key=str.encode(key), value=str.encode(value1))
def sendAMessage(topics, datas, valuestr, i):
ts = datas["时间"][i]
stamp = int(time.mktime(time.strptime(ts, "%Y-%m-%d %H:%M:%S")) * 1000)
for atop in topics:
value1 = atop + "," + valuestr + "," + str(stamp) + "," + datas[valuestr][i]
print("{} :{}".format(ts, value1))
producer.send(topicpre + str(atop), key=str.encode(stamp), value=str.encode(value1))
def sendMessage2(producer,topic,key,value):
producer.send(topic, key=str.encode(key), value=str.encode(value))
def sendManyMessage(n, topics, data1, data2, data3):
i = 0
while i < n:
sendAMessage(topics)
sendAMessage(topics)
sendAMessage(topics)
time.sleep(15)
i += 1
producer.close()
# 使用kafka每秒发送一批数据,发送一小时的
# totalcount = 3 * 60 * 60
#
# topics = []
# for i in range(99999200,99999270):
# topics.append(str(i))
#
# def inittopic():
# for i in range(99999500, 99999512):
# topics.append(str(i))
# print(topics)
# sendManyMessage(totalcount,topics)
# i = 0
# while i < n:
# sendAMessage(topics)
# time.sleep(15)
# i += 1
# producer.close()
mtopic="kafka-topic-1"
for i in range(1000):
atime = time.time()
stampstr = str(int(atime * 1000))
valuestr = ""
for i in range(5):
valuestr += str(random.randint(0,1000))+" "
valuestr += str(random.randint(0, 1000))
sendvalu = stampstr+" "+valuestr
producer.send(topic=mtopic,key=str.encode(stampstr),value=str.encode(sendvalu))
print("send key:{} value:{}".format(stampstr,sendvalu))
time.sleep(5)
producer.close()
| [
"1024196018@qq.com"
] | 1024196018@qq.com |
7f47c6d8cca379d6f4c70480f6f1cdd8bf139e4b | dbdfbdd487e6aff7c09e7b091f058acf65ef9251 | /Matplotlib/2 day.py | 1dbc704b85955bd82273bfcd5fe1a7fc0c758ea6 | [] | no_license | Kevin0918/DataAnalyze_tutorial | 12795e9b2f8c57161938e3eab872076502190a9e | 3b16cbb78c7d1e2ce830acd7bfe1a776ddd87ae2 | refs/heads/master | 2020-12-02T18:11:39.007480 | 2017-07-18T11:43:29 | 2017-07-18T11:43:29 | 96,492,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | # encoding = utf-8
# Many types of figures
import numpy as np
import matplotlib.pyplot as plt
def main():
# 绘制散点图 Scatter
fig = plt.figure()
ax = fig.add_subplot(3, 3, 1)
n = 128
X = np.random.normal(0, 1, n)
Y = np.random.normal(0, 1, n)
T = np.arctan2(Y, X) # 上色
# plt.axes([0.025, 0.025, 0.95, 0.95]) # 指定显示范围
ax.scatter(X, Y, s=75, c=T, alpha=.5) # 画散点,s为点的大小,c为color,alpha为透明度
plt.xlim(-1.5, 1.5), plt.xticks([]) # x的范围
plt.ylim(-1.5, 1.5), plt.yticks([]) # y的范围
plt.axis()
plt.title("Scatter")
plt.xlabel("x")
plt.ylabel("y")
# 绘制柱状图 Bar
ax = fig.add_subplot(332)
n = 10
X = np.arange(10)
Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
ax.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
ax.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x, y in zip(X, Y1):
plt.text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va='top')
for x, y in zip(X, Y2):
plt.text(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va='top')
# Pie
fig.add_subplot(333)
n = 20
Z = np.ones(n)
Z[-1] *= 2
plt.pie(Z, explode=Z * .05, colors=['%f' % (i / float(n)) for i in range(n)],
labels=['%.2f' % (i / float(n)) for i in range(n)]) # explode表示的是每个扇形离中心的距离
plt.gca().set_aspect('equal') # 正圆
plt.xticks([]), plt.yticks([])
# Polar 极坐标
fig.add_subplot(334, polar=True) # 将注释显示出来
n = 20
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / n)
radii = 10 * np.random.rand(n)
plt.polar(theta, radii)
# plt.plot(theta, radii)
# Heatmap
from matplotlib import cm # 上色用的
fig.add_subplot(335)
data = np.random.rand(3, 3)
colormap = cm.Blues
map = plt.imshow(data, interpolation='nearest', cmap=colormap, aspect='auto', vmin=0,
vmax=1) # 使用的是插值方法'nearest',vmin、vmax表示颜色的最大值与最小值
# 3D
from mpl_toolkits.mplot3d import Axes3D
# 引入三维坐标系
ax = fig.add_subplot(336, projecton="3d")
ax.scatter(1, 1, 3, s=100)
# hot map
fig.add_subplot(313)
def f(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X, Y = np.meshgrid(x, y)
plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
plt.show()
if __name__ == '__main__':
main()
| [
"2473998520@qq.com"
] | 2473998520@qq.com |
6874ab1561fcbefe2edd109a8e4cc9a0e70cb1dd | 8a6104c025242590ac659ae9dd7488bc9faac053 | /RutishauserLabtoNWB/events/newolddelay/python/analysis/behavior.py | 9e9ba6c5863076a4a27dcf860a95dda24f562574 | [
"BSD-3-Clause"
] | permissive | sumner15/recogmem-release-NWB | 23a40f1a75e62a47e6071bd6ac168f70dbb3256e | e21b8433622dfa04d46779287e39bd0ee5e6255b | refs/heads/master | 2022-12-15T10:46:15.019037 | 2020-05-29T01:29:02 | 2020-05-29T01:29:02 | 293,928,673 | 0 | 0 | NOASSERTION | 2020-09-08T21:12:54 | 2020-09-08T21:12:53 | null | UTF-8 | Python | false | false | 14,682 | py | import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper
import scipy.stats as stats
import logging
import matplotlib.pyplot as plt
import numpy as np
def plot_behavioral_graphs(*argv):
"""
Input: Variable number of NWB sessions in the '~~~.nwb' format
Output:
Behavioral analysis, plotting six graphs:
1. Probability of responses
2. ROC curves for different sessions
3. Overall performance
4. Histogram of AUC
5. Accuracy over confidence high low
6. Confidence level over correctness of responses
"""
#Assign filenames from argument(s)
for args in argv:
filenames = args
n = 0
# make the subplots
fig, axs = plt.subplots(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(20, 10))
# Place holder ready to store separate the new and old response
response_1_old = []
response_2_old = []
response_3_old = []
response_4_old = []
response_5_old = []
response_6_old = []
response_1_new = []
response_2_new = []
response_3_new = []
response_4_new = []
response_5_new = []
response_6_new = []
# Placeholder for overall performance
all_performances = []
# Placeholder for aucs
all_auc = []
# Placeholder for accuracies for different confidence level
accuracies_high = []
accuracies_low = []
accuracies_all = []
# Placeholder for mean confidences over correctness
m_conf_all = []
for filename in filenames:
try:
print('processing file: ', filename)
nwbfile = helper.read(str(filename))
except ValueError as e:
print('Problem opening the file: ' + str(e))
logging.warning('Error File: ' + filename + ':' + str(e))
continue
except OSError as e:
print('Problem opening the file:' + str(e))
logging.warning('Error File ' + filename + ':' + str(e))
continue
recog_response = helper.extract_recog_responses(nwbfile)
ground_truth = helper.extract_new_old_label(nwbfile)
if len(recog_response) != len(ground_truth):
print('response length not equal to ground truth, skipped this session: {}'.format(filename))
continue
else:
recog_response_old = recog_response[ground_truth == 1]
n = n + 1
# Calculate the percentage of each responses
response_1_old.append(np.sum(recog_response_old == 1) / len(recog_response_old))
response_2_old.append(np.sum(recog_response_old == 2) / len(recog_response_old))
response_3_old.append(np.sum(recog_response_old == 3) / len(recog_response_old))
response_4_old.append(np.sum(recog_response_old == 4) / len(recog_response_old))
response_5_old.append(np.sum(recog_response_old == 5) / len(recog_response_old))
response_6_old.append(np.sum(recog_response_old == 6) / len(recog_response_old))
recog_response_new = recog_response[ground_truth == 0]
response_1_new.append(np.sum(recog_response_new == 1) / len(recog_response_new))
response_2_new.append(np.sum(recog_response_new == 2) / len(recog_response_new))
response_3_new.append(np.sum(recog_response_new == 3) / len(recog_response_new))
response_4_new.append(np.sum(recog_response_new == 4) / len(recog_response_new))
response_5_new.append(np.sum(recog_response_new == 5) / len(recog_response_new))
response_6_new.append(np.sum(recog_response_new == 6) / len(recog_response_new))
# Calculate the cumulative d and plot the cumulative ROC curve
stats_all = helper.cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
axs[0, 1].plot(x, y, marker='.', color='grey', alpha=0.5)
axs[0, 1].set_ylim(0, 1)
axs[0, 1].set_xlim(0, 1)
# Get the overall performance
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
# Calculate the auc
auc = helper.cal_auc(stats_all)
all_auc.append(auc)
# Check if this session should be included in the accuracies over high low section
is_included = helper.check_inclusion(recog_response, auc)
# Calculate the accuracies for high low confidence
if is_included:
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = helper.dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
# print(nr_low_response)
# print(len(ind_TN_low[0]))
# print(len(ind_TP_low[0]))
# print(len(ind_FN_low[0]))
# print(len(ind_FP_low[0]))
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high * 100)
accuracies_low.append(per_accuracy_low * 100)
accuracies_all.append(per_accuracy_all * 100)
# get correct/incorrect indexes
correct_inds, incorrect_inds = helper.correct_incorrect_indexes(recog_response, ground_truth)
# remap response
remapped_response = helper.remap_response(recog_response)
# Get the mean confidence for correctness
m_conf_all.append([np.mean(remapped_response[correct_inds]), np.mean(remapped_response[incorrect_inds])])
# Plot the percentage responses
response_old = np.asarray([response_1_old, response_2_old, response_3_old, response_4_old,
response_5_old, response_6_old])
response_new = np.asarray([response_1_new, response_2_new, response_3_new, response_4_new,
response_5_new, response_6_new])
response_percentage_old = np.mean(response_old, axis=1)
std_old = np.std(response_old, axis=1)
se_old = std_old/np.sqrt(n)
response_percentage_new = np.mean(response_new, axis=1)
std_new = np.std(response_new, axis=1)
se_new = std_new/np.sqrt(n)
x = [i for i in range(1, 7, 1)]
axs[0, 0].errorbar(x, response_percentage_old, yerr=se_old, color='blue', label='old stimuli')
axs[0, 0].errorbar(x, response_percentage_new, yerr=se_new, color='red', label='new stimuli')
axs[0, 0].legend()
axs[0, 0].set_xlabel('Confidence')
axs[0, 0].set_ylabel('Probability of Response')
axs[0, 0].set_title('n=' + str(len(filenames)) + ' sessions')
# Other settings for cumulative ROC
axs[0, 1].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 1].set_xlabel('false alarm rate')
axs[0, 1].set_ylabel('hit rate')
axs[0, 1].set_title('average roc')
# Calculate the average and overall performance
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
# Plot the overall performance
for performance in all_performances:
axs[0, 2].plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
axs[0, 2].set_ylim(0, 1)
axs[0, 2].set_xlim(0, 1)
axs[0, 2].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 2].errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
axs[0, 2].set_xlabel('false alarm rate')
axs[0, 2].set_ylabel('hit rate')
axs[0, 2].set_title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
# Plot AUC histogram
m_auc = np.mean(all_auc)
axs[1, 0].hist(all_auc, 15, histtype='bar')
axs[1, 0].set_xlim(0.5, 1)
axs[1, 0].set_xlabel('AUC')
axs[1, 0].set_ylabel('nr of subjects')
axs[1, 0].set_title('AUC m=' + str(m_auc))
# Plot the accuracies of different confidence level
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
axs[1, 1].plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o', alpha=0.5)
axs[1, 1].plot(x_axis, [50, 50], color='black')
axs[1, 1].set_ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
axs[1, 1].set_title('p=' + str(p_val))
axs[1, 1].set_xlabel('confidence p vs. 50%')
axs[1, 1].set_ylabel('accuracy % correct')
# Calculate the mean and standard deviation for the confidence for correctness level
m_conf_all = np.asarray(m_conf_all)
m_conf = np.mean(m_conf_all, axis=0)
std_conf = np.std(m_conf_all, axis=0)
n = m_conf_all.shape[0]
se_conf = std_conf/np.sqrt(n)
tstat, p_val = stats.ttest_ind(m_conf_all[:, 0], m_conf_all[:, 1], equal_var=False)
axs[1, 2].bar(['correct', 'incorrect'], m_conf, yerr=se_conf)
axs[1, 2].set_ylabel('confidence 1=high, 3=guess')
axs[1, 2].set_title('pT2=' + str(p_val) + ' n=' + str(n))
plt.show()
# Functions that plot the graphs seperately.
def plot_prob_response():
"""
Plot single graph of probability of response
"""
filenames = helper.get_nwbfile_names("../data")
x = [i for i in range(1, 7, 1)]
response_percentage_old, std_old, response_percentage_new, std_new = helper.extract_probability_response(filenames)
#type="old")
plt.errorbar(x, response_percentage_old, yerr=std_old, color='blue', label='old stimuli')
plt.errorbar(x, response_percentage_new, yerr=std_new, color='red', label='new stimuli')
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
plt.xlabel('Confidence')
plt.ylabel('Probability of Response')
plt.title('n=' + str(len(filenames)) + ' sessions')
plt.show()
def plot_cumulative_roc():
"""
Plot the cumulative roc
"""
filenames = get_nwbfile_names("../data")
for filename in filenames:
nwbfile = read(filename)
stats_all = cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
plt.plot(x, y, marker='.', color='grey', alpha=0.5)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('average roc')
plt.show()
def plot_overall_performance():
"""
Plot overall performance
"""
filenames = helper.get_nwbfile_names("../data")
all_performances = []
for filenames in filenames:
nwbfile = helper.read(filenames)
stats_all = helper.cal_cumulative_d(nwbfile)
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
for performance in all_performances:
plt.plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
plt.show()
def plot_auc():
"""
Plot histogram of AUC
"""
filenames = get_nwbfile_names("../data")
all_auc = []
for filenames in filenames:
nwbfile = read(filenames)
stats_all = cal_cumulative_d(nwbfile)
auc = cal_auc(stats_all)
all_auc.append(auc)
m_auc = np.mean(all_auc)
plt.hist(all_auc, 15, histtype='bar')
plt.xlim(0, 1)
plt.xlabel('AUC')
plt.ylabel('nr of subjects')
plt.title('AUC m=' + str(m_auc))
plt.show()
def plot_confidence_accuracy():
"""
Plot accuracy over confidence high low.
"""
filenames = get_nwbfile_names("../data")
accuracies_high = []
accuracies_low = []
accuracies_all = []
for filename in filenames:
nwbfile = read(filename)
recog_response = extract_recog_responses(nwbfile)
ground_truth = extract_new_old_label(nwbfile)
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high*100)
accuracies_low.append(per_accuracy_low*100)
accuracies_all.append(per_accuracy_all*100)
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
plt.plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o')
plt.plot(x_axis, [50, 50], color='black')
plt.ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
plt.title('p=' + str(p_val))
plt.xlabel('confidence p vs. 50%')
plt.ylabel('accuracy % correct')
plt.show()
| [
"31257907+nandchandravadia@users.noreply.github.com"
] | 31257907+nandchandravadia@users.noreply.github.com |
e3de59ab0a628f70e1187295bc11caee29962f62 | 308e318d1fd56520b1cfe093a5436043c72703db | /medicalcase/urls.py | 7fbff357f5082f6a7d17dabd49a02a808157e9fd | [] | no_license | NicholasTurner23/360MedNet-1 | b35e2b79712cd5568054e697298ad02c368f8853 | fb3939031c455c62c889383f73611b5b6845d8dd | refs/heads/master | 2021-06-18T09:57:32.656789 | 2017-06-17T22:33:32 | 2017-06-17T22:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from django.conf.urls import url
from medicalcase import views as medicalcase_views
urlpatterns = [
url(r'^post/medical_case/$', medicalcase_views.MedicalCaseCreate.as_view(), name='medical-case'),
url(r'^medical_cases/$', medicalcase_views.MedicalCaseList.as_view(), name='medical_cases'),
url(r'^medical_case/(?P<pk>[0-9]+)/detail/$', medicalcase_views.MedicalCaseDetail.as_view(),
name='medical_case-detail'),
]
| [
"faithnassiwa@gmail.com"
] | faithnassiwa@gmail.com |
4c6813a647ef95ad18309558db0dffdaeadc0528 | 4f25e4dec97be389529ddc56278703e0c1b6b6e0 | /books/migrations/0001_initial.py | 7bccea47de3a4f1546741ae330e066082f9f2893 | [] | no_license | yogae/django-test | a88621a8675abc70550d2a251d1ab7893de5ccae | 8238969f5bc8560849577b562ce6b4a1ea7edf45 | refs/heads/master | 2020-04-08T14:05:29.977001 | 2018-11-28T00:55:52 | 2018-11-28T00:55:52 | 159,421,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # Generated by Django 2.1.3 on 2018-11-22 23:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('salutation', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('publication_date', models.DateField()),
('authors', models.ManyToManyField(to='books.Author')),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=100)),
('website', models.URLField()),
],
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Publisher'),
),
]
| [
""
] | |
1f2f1d293faf734d8ce56d68a50cd959e318ddc9 | 60386b8c5513f2ba02e73b9db951e3eb636ea7c1 | /PythonExercicios/ex005.py | 13b615dbe187d55a3f34418345c0042fbfe79675 | [] | no_license | cesarsst/PythonProjects | 4cdc9c02ec034768d51def3a7cd2622d4456eede | c5cf38abe1542330a1ba364215945923b9993edd | refs/heads/master | 2020-04-12T10:26:22.750701 | 2019-04-25T00:08:00 | 2019-04-25T00:08:00 | 162,429,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | #Faça um programa que leia um numero e mostre na tela o seu sucessor e seu antecessor
n = int(input('Digite um numero: '))
ant = n-1
suc = n+1
print('O antecessor de {} é {} e o sucessor é {}'.format(n, ant, suc))
| [
"32494389+cesarsst@users.noreply.github.com"
] | 32494389+cesarsst@users.noreply.github.com |
768dcf4b3e0a31bf24e77e7aec2022d14e1cc3a0 | a213af790886bc804fa092bef63c6f4d90d37b6d | /luffycity后端/api/serializers/course.py | a6b696f8199808beff63c27859165c4b053596b3 | [] | no_license | naive9527/luffycity-1 | 47bfba70a7be33b76f05b4df14183efca5a885c2 | 730708d576949aed0ebce0dc202f5fd945b4fd67 | refs/heads/master | 2021-10-19T15:26:59.377464 | 2019-02-22T05:43:01 | 2019-02-22T05:43:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | #!/usr/bin/env python
# _*_ encoding:utf-8 _*_
__author__ = '于sir'
__date__ = '2019/2/18 14:17'
from rest_framework import serializers
from api import models
class CourseSerializer(serializers.ModelSerializer):
""" course 表"""
course_type = serializers.CharField(source='get_course_type_display')
level = serializers.CharField(source='get_level_display')
status = serializers.CharField(source='get_status_display')
sub_category = serializers.CharField(source='sub_category.name') # 所属子类
cour_category = serializers.CharField(source='sub_category.category.name') # 所属大类
# degree_course = serializers.CharField(source='degree_course.name')
degree_course = serializers.SerializerMethodField()
class Meta:
model = models.Course
fields = ['id', 'name', 'course_img', 'cour_category', 'sub_category', 'course_type', 'degree_course', 'brief',
'level',
'pub_date', 'period', 'order', 'attachment_path', 'status', 'template_id'
]
def get_degree_course(self, obj):
degree_course_obj = obj.degree_course
if degree_course_obj:
return degree_course_obj.name
else:
return obj.degree_course
class CourseDetailSerializer(serializers.ModelSerializer):
"""CourseDetail 表"""
course = serializers.CharField(source='course.name')
recommend_courses = serializers.SerializerMethodField()
teachers = serializers.SerializerMethodField()
course_outline = serializers.SerializerMethodField() # 课程大纲
often_asked_question = serializers.SerializerMethodField() # 常见问题
course_chapters = serializers.SerializerMethodField() # 章节
course_sections = serializers.SerializerMethodField() # 课时
homework = serializers.SerializerMethodField() # 作业
price_policy = serializers.SerializerMethodField() # 价格策略
class Meta:
model = models.CourseDetail
fields = ['id', 'course', 'hours', 'course_slogan', 'video_brief_link', 'why_study', 'what_to_study_brief',
'career_improvement', 'prerequisite', 'recommend_courses', 'teachers', 'course_outline',
'often_asked_question', 'course_chapters', 'course_sections', 'homework', 'price_policy'
]
def get_recommend_courses(self, obj):
temp = []
recommend_course_querysets = obj.recommend_courses.all()
for recommend_course in recommend_course_querysets:
temp.append({'id': recommend_course.id, 'name': recommend_course.name})
return temp
def get_teachers(self, obj):
temp = []
teachers_querysets = obj.teachers.all()
for teacher in teachers_querysets:
temp.append({'id': teacher.id, 'name': teacher.name})
return temp
def get_course_outline(self, obj):
temp = []
course_outline_querysets = obj.courseoutline_set.all()
for course_outline in course_outline_querysets:
temp.append(
{'order': course_outline.order, 'title': course_outline.title, 'content': course_outline.content})
return temp
def get_often_asked_question(self, obj):
temp = []
question_querysets = obj.course.asked_question.all()
for question in question_querysets:
temp.append({"id": question.id, 'question': question.question, 'answer': question.answer})
return temp
def get_course_chapters(self, obj):
temp = []
course_chapters_querysets = obj.course.coursechapters.all()
for course_chapters in course_chapters_querysets:
temp.append(
{'chapter': course_chapters.chapter, 'name': course_chapters.name, 'summary': course_chapters.summary,
'pub_date': course_chapters.pub_date
})
return temp
def get_course_sections(self, obj):
temp = []
course_chapters_querysets = obj.course.coursechapters.all()
for course_chapters in course_chapters_querysets:
course_sections_querysets = course_chapters.coursesections.all()
for course_sections in course_sections_querysets:
temp.append({
'name': course_sections.name,
'order': course_sections.order,
'section_type': course_sections.get_section_type_display(),
'section_link': course_sections.section_link,
'video_time': course_sections.video_time,
'pub_date': course_sections.pub_date,
'free_trail': course_sections.free_trail
})
return temp
def get_homework(self, obj):
temp = []
course_chapters_querysets = obj.course.coursechapters.all()
for course_chapters in course_chapters_querysets:
homework_querysets = course_chapters.homework_set.all()
for homework in homework_querysets:
temp.append({
'title': homework.title,
'order': homework.order,
'homework_type': homework.get_homework_type_display(),
'requirement': homework.requirement,
'threshold': homework.threshold,
'recommend_period': homework.recommend_period,
'scholarship_value': homework.scholarship_value,
'note': homework.note,
'enabled': homework.enabled
})
return temp
def get_price_policy(self,obj):
temp = []
price_policy_querysets = obj.course.price_policy.all()
for price_policy in price_policy_querysets:
temp.append({
'valid_period': price_policy.valid_period,
'price': price_policy.price
})
return temp | [
"xinxinainixd@qq.com"
] | xinxinainixd@qq.com |
24b8b0d128b1755bfce972e35b56b2635439d049 | 927eb86f9d2b0466f580c08ec84e6a13604ba6f8 | /worldcupapp/views/media.py | 7d48a519f7bb1ae79aa49c2624f70fec9e7f0476 | [] | no_license | by-Exist/piku_backend_api | 61ee1aa0526d29d735f0fd8c0cf0a69d2a01abe4 | 5dfc4a3fc6cb842e2dc16d5af5b6fd7dea609b4f | refs/heads/main | 2023-06-11T21:10:51.652924 | 2021-07-07T14:35:33 | 2021-07-07T14:35:33 | 338,810,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,041 | py | from itertools import chain
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from worldcupapp.models.worldcup import Worldcup
from rest_framework import mixins, viewsets, response, status
from rest_framework.decorators import action
from drf_spectacular.utils import (
PolymorphicProxySerializer,
extend_schema_view,
extend_schema,
)
from drf_patchonly_mixin import mixins as dpm_mixins
from ..models import Media, TextMedia, ImageMedia, GifMedia, VideoMedia
from ..policys import MediaViewSetAccessPolicy
from ..serializers import (
GifMediaDetailSerializer,
GifMediaListSerializer,
ImageMediaDetailSerializer,
ImageMediaListSerializer,
TextMediaDetailSerializer,
TextMediaListSerializer,
VideoMediaDetailSerializer,
VideoMediaListSerializer,
MediaCountListSerializer,
)
class MediaViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
dpm_mixins.PatchOnlyMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
detail_serializer_class = {
"Text": TextMediaDetailSerializer,
"Image": ImageMediaDetailSerializer,
"Gif": GifMediaDetailSerializer,
"Video": VideoMediaDetailSerializer,
}
list_serializer_class = {
"Text": TextMediaListSerializer,
"Image": ImageMediaListSerializer,
"Gif": GifMediaListSerializer,
"Video": VideoMediaListSerializer,
}
permission_classes = [MediaViewSetAccessPolicy]
@cached_property
def parent_object(self):
return get_object_or_404(Worldcup, pk=self.kwargs["worldcup_pk"])
def get_queryset(self):
if self.queryset:
return self.queryset
media_type_model_mapping = {
"Text": TextMedia,
"Image": ImageMedia,
"Gif": GifMedia,
"Video": VideoMedia,
}
model_cls = media_type_model_mapping[self.parent_object.media_type]
self.queryset = model_cls.objects.select_related("worldcup").filter(
worldcup=self.parent_object
)
return self.queryset
def get_serializer_class(self):
if self.action == "counts":
return MediaCountListSerializer
if self.action in ("create", "list"):
return self.list_serializer_class[self.parent_object.media_type]
return self.detail_serializer_class[self.parent_object.media_type]
@action(methods=["patch"], detail=False)
def counts(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
medias = self.get_queryset()
for counts_data in serializer.validated_data["counts"]:
media_id = counts_data["media_id"]
if up_win_count := counts_data.get("up_win_count", None):
medias.get(pk=media_id).win_count_up(up_win_count)
if up_view_count := counts_data.get("up_view_count", None):
medias.get(pk=media_id).view_count_up(up_view_count)
if up_choice_count := counts_data.get("up_choice_count", None):
medias.get(pk=media_id).choice_count_up(up_choice_count)
Media.objects.bulk_update(
medias, ["win_count", "view_count", "choice_count"]
)
return response.Response(status=status.HTTP_204_NO_CONTENT)
return response.Response(
data=serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
MediaListPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaListPolymorphic",
serializers=[
TextMediaListSerializer,
ImageMediaListSerializer,
GifMediaListSerializer,
VideoMediaListSerializer,
],
resource_type_field_name=None,
)
MediaDetailPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaDetailPolymorphic",
serializers=[
TextMediaDetailSerializer,
ImageMediaDetailSerializer,
GifMediaDetailSerializer,
VideoMediaDetailSerializer,
],
resource_type_field_name=None,
)
MediaViewSet = extend_schema_view(
list=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media List",
"## [ Permission ]",
"- AllowAny",
]
),
responses=MediaListPolymorphicSerializer,
),
create=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Create",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaListPolymorphicSerializer,
responses=MediaListPolymorphicSerializer,
),
partial_update=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Partial Update",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaDetailPolymorphicSerializer,
responses=MediaDetailPolymorphicSerializer,
),
destroy=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Destroy",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
),
counts=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Media's counts Update",
"- 게임이 종료될 때 사용된 미디어들의 정보 업데이트에 사용",
"- media의 win_count, view_count, choice_count를 대상으로 함",
"## [ Permission ]",
"- AllowAny",
]
),
responses={
200: None,
400: None,
},
),
)(MediaViewSet)
| [
"bolk9652@naver.com"
] | bolk9652@naver.com |
0fb49387c4236353b3a28aca712ad1f5c471a393 | c9458b251f12bae2526217446eb228971b7c0dae | /scrapylogin/items.py | 9f0ece86728950d50c7d3105ca8ab83b5fe3576a | [] | no_license | batulu12/scrapylogin | f36c28be41609c0ad41ebd1fecf8893c017e8452 | dd71ac3f8134de22cdf09a04ecb591cba4b0ab2e | refs/heads/master | 2021-01-01T16:50:29.800333 | 2015-08-31T01:16:35 | 2015-08-31T01:16:35 | 26,672,014 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,645 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class GithubUserItem(Item):
_id=Field()
url=Field()
user_id = Field()
username = Field()
nickname = Field()
type = Field()
location = Field()
update_time = Field()
email=Field()
website=Field()
member_num=Field()
company = Field()
join_date = Field()
followee_num = Field()
follower_num = Field()
star_num = Field()
repo_num = Field()
org_num = Field()
class GithubRepoItem(Item):
_id=Field()
url=Field()
username = Field()
name = Field()
description = Field()
update_date = Field()
star_num = Field()
watch_num = Field()
fork_num = Field()
language = Field()
type = Field() #Mirrors Forks Sources
commit_num = Field()
branch_num = Field()
tag_num = Field()
pull_num = Field()
issue_num = Field()
class ZhihuUserItem(Item):
_id=Field()
url=Field()
img=Field()
username = Field()
nickname = Field()
location = Field()
industry = Field()
sex = Field()
jobs = Field()
educations = Field()
description = Field()
sinaweibo = Field()
tencentweibo = Field()
followee_num = Field()
follower_num = Field()
ask_num = Field()
answer_num = Field()
post_num = Field()
collection_num = Field()
log_num = Field()
agree_num = Field()
thank_num = Field()
fav_num = Field()
share_num = Field()
view_num = Field()
update_time = Field()
class ZhihuAskItem(Item):
_id=Field()
username = Field()
url=Field()
view_num = Field()
title= Field()
answer_num= Field()
follower_num= Field()
class ZhihuAnswerItem(Item):
_id=Field()
username = Field()
url=Field()
ask_title = Field()
ask_url = Field()
agree_num = Field()
summary = Field()
content = Field()
comment_num = Field()
class ZhihuFolloweesItem(Item):
_id=Field()
username = Field()
followees = Field()
class ZhihuFollowersItem(Item):
_id=Field()
username = Field()
followers = Field()
class DoubanbookItem(Item):
# define the fields for your item here like:
# name = Field()
title = Field()
link = Field()
desc = Field()
num = Field()
class DoubanSubjectItem(Item):
title = Field()
link = Field()
info = Field()
rate = Field()
votes = Field()
content_intro = Field()
author_intro = Field()
tags = Field()
| [
"batulu1987315@163.com"
] | batulu1987315@163.com |
ee1a58bcf366c8ace7692c83bec3ecbc8aba652c | 9a0109c2e6dc192068374a7497b305828b65afd3 | /read_statistics/__init__.py | edec89066d3c67f577ea517a90d1c27d87391b73 | [] | no_license | laughterGod/MyblogSystem | 03d015e3cc46d27fbe9f40413ca78441fbf76905 | 63f08287c367fdb1a3410c8ce534c8a1f826e1f7 | refs/heads/master | 2023-01-09T03:43:55.536147 | 2020-12-23T14:12:11 | 2020-12-23T14:12:11 | 201,200,945 | 1 | 0 | null | 2022-12-26T21:34:53 | 2019-08-08T07:15:39 | Python | UTF-8 | Python | false | false | 65 | py | default_app_config = 'read_statistics.apps.ReadStatisticsConfig'
| [
"hanziguo@kingsoft.com"
] | hanziguo@kingsoft.com |
71439de2a364c259cfed90fea9e2175609014d0c | a8b1b81a7bc584565c31280c8be858c597d3a3a1 | /workIndia/manage.py | 41fbc478b3ad4b0a6451e0a550c4682fd5c9f850 | [] | no_license | adityachoudary54/workindiaproject_password_manager | d434f797a1548392e52705619b9f33622f16b099 | b056a7b64a11b35adad8bda49ff284dccbd7aef3 | refs/heads/master | 2022-12-11T10:21:08.305343 | 2020-08-25T14:06:49 | 2020-08-25T14:06:49 | 290,221,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'workIndia.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"33027155+adityachoudary54@users.noreply.github.com"
] | 33027155+adityachoudary54@users.noreply.github.com |
5eae1492af790922bb806b1d1c75466db26ca638 | 1d22e0cc8db1ddbdab6c06a049ccc15f35dfff99 | /hmm_class/hmm_classifier.py | ef78103fd4f6f7572a36a305ebe37019bd61ebd0 | [] | no_license | JiaxinYu/machine_learning_examples | 59f37335407d9b9523a6879602ad3d58eac7da77 | db49879ca5efd34e7d2ad6c3ddf1fb4854c24429 | refs/heads/master | 2020-06-11T07:24:29.871826 | 2016-11-27T17:54:19 | 2016-11-27T17:54:19 | 75,734,758 | 1 | 0 | null | 2016-12-06T13:39:27 | 2016-12-06T13:39:27 | null | UTF-8 | Python | false | false | 2,841 | py | # https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python
# http://lazyprogrammer.me
# Demonstrate how HMMs can be used for classification.
import string
import numpy as np
import matplotlib.pyplot as plt
from hmmd_theano import HMM
from sklearn.utils import shuffle
from nltk import pos_tag, word_tokenize
class HMMClassifier:
def __init__(self):
pass
def fit(self, X, Y, V):
K = len(set(Y)) # number of classes - assume 0..K-1
self.models = []
self.priors = []
for k in xrange(K):
# gather all the training data for this class
thisX = [x for x, y in zip(X, Y) if y == k]
C = len(thisX)
self.priors.append(np.log(C))
hmm = HMM(5)
hmm.fit(thisX, V=V, p_cost=0.1, print_period=1, learning_rate=10e-5, max_iter=100)
self.models.append(hmm)
def score(self, X, Y):
N = len(Y)
correct = 0
for x, y in zip(X, Y):
lls = [hmm.log_likelihood(x) + prior for hmm, prior in zip(self.models, self.priors)]
p = np.argmax(lls)
if p == y:
correct += 1
return float(correct) / N
# def remove_punctuation(s):
# return s.translate(None, string.punctuation)
def get_tags(s):
tuples = pos_tag(word_tokenize(s))
return [y for x, y in tuples]
def get_data():
word2idx = {}
current_idx = 0
X = []
Y = []
for fn, label in zip(('robert_frost.txt', 'edgar_allan_poe.txt'), (0, 1)):
count = 0
for line in open(fn):
line = line.rstrip()
if line:
print line
# tokens = remove_punctuation(line.lower()).split()
tokens = get_tags(line)
if len(tokens) > 1:
# scan doesn't work nice here, technically could fix...
for token in tokens:
if token not in word2idx:
word2idx[token] = current_idx
current_idx += 1
sequence = np.array([word2idx[w] for w in tokens])
X.append(sequence)
Y.append(label)
count += 1
print count
if count >= 50:
break
print "Vocabulary:", word2idx.keys()
return X, Y, current_idx
def main():
X, Y, V = get_data()
# print "Finished loading data"
print "len(X):", len(X)
print "Vocabulary size:", V
X, Y = shuffle(X, Y)
N = 20 # number to test
Xtrain, Ytrain = X[:-N], Y[:-N]
Xtest, Ytest = X[-N:], Y[-N:]
model = HMMClassifier()
model.fit(Xtrain, Ytrain, V)
print "Score:", model.score(Xtest, Ytest)
if __name__ == '__main__':
main()
| [
"sublime.balloon@gmail.com"
] | sublime.balloon@gmail.com |
a712979f0746ffdb9d01e4e7639de181f610ecfc | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/apimanagement/v20210101preview/list_delegation_setting_secrets.py | 7b3884925eda30d4b2d81d99584cc3666a53a128 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,469 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDelegationSettingSecretsResult',
'AwaitableListDelegationSettingSecretsResult',
'list_delegation_setting_secrets',
]
@pulumi.output_type
class ListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
"""
def __init__(__self__, validation_key=None):
if validation_key and not isinstance(validation_key, str):
raise TypeError("Expected argument 'validation_key' to be a str")
pulumi.set(__self__, "validation_key", validation_key)
@property
@pulumi.getter(name="validationKey")
def validation_key(self) -> Optional[str]:
"""
This is secret value of the validation key in portal settings.
"""
return pulumi.get(self, "validation_key")
class AwaitableListDelegationSettingSecretsResult(ListDelegationSettingSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDelegationSettingSecretsResult(
validation_key=self.validation_key)
def list_delegation_setting_secrets(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210101preview:listDelegationSettingSecrets', __args__, opts=opts, typ=ListDelegationSettingSecretsResult).value
return AwaitableListDelegationSettingSecretsResult(
validation_key=__ret__.validation_key)
| [
"noreply@github.com"
] | noreply@github.com |
224733db7bbbe943a5cdd5d14513e71863001123 | 37879f158886946a3328cb7c938b774eef6b12f4 | /feature_engineering_pandas.py | 003cf38f3ec684d66b08086075a253ee2016ccec | [
"MIT"
] | permissive | beckernick/cml_rapids | 82f73bb4a7a12783967e1392ab5dba0d4ca01fde | da29a412418ac5c5be038f6c96af0b926c57c1ea | refs/heads/main | 2023-04-28T17:25:42.612687 | 2021-05-13T12:17:49 | 2021-05-13T12:17:49 | 367,154,418 | 0 | 0 | MIT | 2021-05-13T19:31:23 | 2021-05-13T19:31:23 | null | UTF-8 | Python | false | false | 3,622 | py | ## Feature Engineering using dask
import time
import pandas as dd
import pandas as pd
import numpy as np
from feature_engineering_2 import (
pos_cash, process_unified, process_bureau_and_balance,
process_previous_applications, installments_payments,
credit_card_balance
)
### Load Data
bureau_balance = dd.read_parquet('raw_data/bureau_balance.parquet')
bureau = dd.read_parquet('raw_data/bureau.parquet')
# behaviour data linked to prev as well as current loan
cc_balance = dd.read_parquet('raw_data/cc_balance.parquet')
payments = dd.read_parquet('raw_data/payments.parquet')
pc_balance = dd.read_parquet('raw_data/pc_balance.parquet')
prev = dd.read_parquet('raw_data/prev.parquet')
train = dd.read_parquet('raw_data/train.parquet')
test = dd.read_parquet('raw_data/test.parquet')
train_index = train.index
test_index = test.index
train_target = train['TARGET']
unified = dd.concat([train.drop('TARGET', axis=1), test])
# fix for the process functions not working with columns of type `category`
bureau_balance['STATUS'] = bureau_balance['STATUS'].astype('object')
bureau['CREDIT_ACTIVE'] = bureau['CREDIT_ACTIVE'].astype('object')
bureau['CREDIT_CURRENCY'] = bureau['CREDIT_CURRENCY'].astype('object')
prev['NAME_CONTRACT_STATUS'] = prev['NAME_CONTRACT_STATUS'].astype('object')
# need to split out the parquet writing
# also need to fix a UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
unified_feat = process_unified(unified, dd)
bureau_agg = process_bureau_and_balance(bureau, bureau_balance, dd)
prev_agg = process_previous_applications(prev, dd)
pos_agg = pos_cash(pc_balance, dd)
ins_agg = installments_payments(payments, dd)
cc_agg = credit_card_balance(cc_balance, dd)
unified_feat = unified_feat.join(bureau_agg, how='left', on='SK_ID_CURR') \
.join(prev_agg, how='left', on='SK_ID_CURR') \
.join(pos_agg, how='left', on='SK_ID_CURR') \
.join(ins_agg, how='left', on='SK_ID_CURR') \
.join(cc_agg, how='left', on='SK_ID_CURR')
# we can't use bool column types in xgb later on
bool_columns = [col for col in unified_feat.columns if (unified_feat[col].dtype in ['bool']) ]
unified_feat[bool_columns] = unified_feat[bool_columns].astype('int64')
# We will label encode for xgb later on
from sklearn.preprocessing import LabelEncoder
# label encode cats
label_encode_dict = {}
categorical = unified_feat.select_dtypes(include=pd.CategoricalDtype).columns
for column in categorical:
label_encode_dict[column] = LabelEncoder()
unified_feat[column] = label_encode_dict[column].fit_transform(unified_feat[column])
unified_feat[column] = unified_feat[column].astype('int64')
### Fix for Int64D
Int64D = unified_feat.select_dtypes(include=[pd.Int64Dtype]).columns
unified_feat[Int64D] = unified_feat[Int64D].fillna(0)
unified_feat[Int64D] = unified_feat[Int64D].astype('int64')
### fix unit8
uint8 = unified_feat.select_dtypes(include=['uint8']).columns
unified_feat[uint8] = unified_feat[uint8].astype('int64')
nan_columns = unified_feat.columns[unified_feat.isna().any()].tolist()
unified_feat.replace([np.inf, -np.inf], np.nan, inplace=True)
unified_feat[nan_columns] = unified_feat[nan_columns].fillna(0)
train_feats = unified_feat.loc[train_index].merge(train_target, how='left',
left_index=True, right_index=True)
test_feats = unified_feat.loc[test_index]
train_feats.to_parquet('data_eng/feats/train_feats.parquet')
test_feats.to_parquet('data_eng/feats/test_feats.parquet') | [
"bpl.law@gmail.com"
] | bpl.law@gmail.com |
f66cef6a68cdeac9b5ddd4269b3bf892b66c42f5 | 72cc8d87905918d9ce8eab22180cdbeb8f040276 | /Module2/wrangling-data.py | 9bf122173ded8e529ea4e764ad647758271aabd1 | [
"MIT"
] | permissive | bozhink/DAT210x | 2b6aca1fdae33ab87faa2dea45f6a319149a15f2 | 2ac5d717e8c671f4a3884f30406ad8fc3edc5961 | refs/heads/master | 2020-07-10T18:03:31.161560 | 2016-09-11T14:14:51 | 2016-09-11T14:14:51 | 67,400,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | # -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv('', sep=',')
# Find nan
df.Feature.isnull()
df.Feature.notnull()
df.isnull()
df.notnull()
# Any time a nan is encountered, replace it with a scalar value:
df.my_feature.fillna( df.my_feature.mean() )
df.fillna(0)
# Forward/Backeard fill
df.fillna(method='ffill') # fill the values forward
df.fillna(method='bfill') # fill the values in reverse
df.fillna(limit=5)
df.fillna(method='ffill',limit=1) # fill the values forward
df.fillna(method='bfill',limit=1) # fill the values in reverse
# Interpolation
df.interpolate(method='polynomial', order=2)
# Remove nan
df = df.dropna(axis=0) # row
df = df.dropna(axis=1) # column
# Drop any row that has at least 4 NON-NaNs within it:
df = df.dropna(axis=0, thresh=4)
# Delete fratures
# Axis=1 for columns
df = df.drop(labels=['Features', 'To', 'Delete'], axis=1)
# Drop duplicates
df = df.drop_duplicates(subset=['Feature_1', 'Feature_2'])
df = df.reset_index(drop=True)
df = df.dropna(axis=0,
thresh=2).drop(labels=['ColA'],
axis=1).drop_duplicates(subset=['ColB', 'ColC']).reset_index()
# Type cast
df.Date = pd.to_datetime(df.Date, errors='coerce')
df.Height = pd.to_numeric(df.Height, errors='coerce')
df.Weight = pd.to_numeric(df.Weight, errors='coerce')
df.Age = pd.to_numeric(df.Age, errors='coerce')
df.Age.unique()
df.Age.value_counts()
| [
"bozhink@gmail.com"
] | bozhink@gmail.com |
734ded4d9ed0355de4c637681b1104410ac535e7 | a07fb7e4d7cb9a3b626769e0690d10e4bdbc8da1 | /models.py | 68cb42b762487786cb76041c64bc8018d03bc38f | [] | no_license | syrvanser/memory-efficient-distributed-representations | 23277cceeec0bd2ff914d31c9118f42797c176cf | 7a8dcf028d44660c496462585d52a4af2ada2da7 | refs/heads/master | 2023-07-09T02:45:48.832902 | 2021-08-17T22:27:25 | 2021-08-17T22:27:25 | 374,692,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,505 | py | import tensorflow as tf
import tensorflow_recommenders as tfrs
from dpq_embedding import DPQEmbedding, MGQEEmbedding, TripleMGQEEmbedding
from typing import Dict, Text
from utils import augment_data
class RankingModel(tf.keras.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
# Compute embeddings for users.
self.user_embeddings = tf.keras.layers.Embedding(len(unique_user_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
# Compute embeddings for movies.
self.movie_embeddings = tf.keras.layers.Embedding(len(unique_movie_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
# self.dot = tf.keras.layers.Dot(axes=1)
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(args.mlp_1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_2, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_3, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="sigmoid")
])
def call(self, inputs):
user_id, movie_id = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_id)
return self.ratings(tf.keras.layers.Multiply()([user_embedding, movie_embedding]))
class MovielensModel(tfrs.models.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
self.args = args
self.unique_user_ids = unique_user_ids
self.unique_movie_ids = unique_movie_ids
self.ranking_model: tf.keras.Model = RankingModel(args, unique_user_ids, unique_movie_ids)
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
if type(features) == tuple:
features = features[0]
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_id"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
def call(self, inputs):
if type(inputs) == tuple:
return self.ranking_model(inputs)
elif type(inputs) == dict:
return self.ranking_model((inputs["user_id"], inputs["movie_id"]))
class DPQRankingModel(tf.keras.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
# Compute embeddings for users.
self.user_embeddings = DPQEmbedding(args.k, args.d, len(unique_user_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), share_subspace=args.shared_centroids)
# Compute embeddings for movies.
self.movie_embeddings = DPQEmbedding(args.k, args.d, len(unique_movie_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), share_subspace=args.shared_centroids)
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(args.mlp_1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_2, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_3, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="sigmoid")
])
def call(self, inputs):
user_id, movie_id = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_id)
return self.ratings(tf.keras.layers.Multiply()([user_embedding, movie_embedding]))
class DPQMovielensModel(tfrs.models.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
self.ranking_model: tf.keras.Model = DPQRankingModel(args, unique_user_ids, unique_movie_ids)
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE),
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
if type(features) == tuple:
features = features[0]
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_id"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
def call(self, inputs):
if type(inputs) == tuple:
return self.ranking_model(inputs)
elif type(inputs) == dict:
return self.ranking_model((inputs["user_id"], inputs["movie_id"]))
class MGQERankingModel(tf.keras.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids, user_freqs, movie_freqs):
super().__init__()
# Compute embeddings for users.
if args.partitions == 3:
# Compute embeddings for users.
self.user_embeddings = TripleMGQEEmbedding(args.k, args.d, len(unique_user_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), frequencies=user_freqs, share_subspace=args.shared_centroids)
# Compute embeddings for movies.
self.movie_embeddings = TripleMGQEEmbedding(args.k, args.d, len(unique_movie_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), frequencies=movie_freqs, share_subspace=args.shared_centroids)
else:
# Compute embeddings for users.
self.user_embeddings = MGQEEmbedding(args.k, args.d, len(unique_user_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), frequencies=user_freqs, share_subspace=args.shared_centroids)
# Compute embeddings for movies.
self.movie_embeddings = MGQEEmbedding(args.k, args.d, len(unique_movie_ids) + 1, args.embedding_dimensions, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), frequencies=movie_freqs, share_subspace=args.shared_centroids)
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(args.mlp_1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_2, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_3, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="sigmoid")
])
def call(self, inputs):
user_id, movie_id = inputs
user_embedding = self.user_embeddings(tf.cast(user_id, tf.int64))
movie_embedding = self.movie_embeddings(tf.cast(movie_id, tf.int64))
return self.ratings(tf.keras.layers.Multiply()([user_embedding, movie_embedding]))
class MGQEMovielensModel(tfrs.models.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids, user_freqs, movie_freqs):
super().__init__()
self.ranking_model: tf.keras.Model = MGQERankingModel(args, unique_user_ids, unique_movie_ids, user_freqs, movie_freqs)
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE),
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
if type(features) == tuple:
features = features[0]
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_id"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
def call(self, inputs):
if type(inputs) == tuple:
return self.ranking_model(inputs)
elif type(inputs) == dict:
return self.ranking_model((inputs["user_id"], inputs["movie_id"]))
class NeuMFRankingModel(tf.keras.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
# Compute embeddings for users.
self.user_embeddings1 = tf.keras.layers.Embedding(len(unique_user_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
self.user_embeddings2 = tf.keras.layers.Embedding(len(unique_user_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
# Compute embeddings for movies.
self.movie_embeddings1 = tf.keras.layers.Embedding(len(unique_movie_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
self.movie_embeddings2 = tf.keras.layers.Embedding(len(unique_movie_ids) + 1, args.embedding_dimensions, embeddings_regularizer=tf.keras.regularizers.l2(args.l2_norm))
# self.dot = tf.keras.layers.Dot(axes=1)
# Compute predictions.
self.mlp = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(args.mlp_1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_2, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(args.mlp_3, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.BatchNormalization(),
# Make rating predictions in the final layer.
])
self.concat = tf.keras.layers.Concatenate(axis=1)
self.out = tf.keras.layers.Dense(1, activity_regularizer=tf.keras.regularizers.l2(args.l2_norm), activation="sigmoid")
def call(self, inputs):
user_id, movie_id = inputs
user_embedding1 = self.user_embeddings1(user_id)
movie_embedding1 = self.movie_embeddings1(movie_id)
user_embedding2 = self.user_embeddings2(user_id)
movie_embedding2 = self.movie_embeddings2(movie_id)
prod = tf.keras.layers.Multiply()([user_embedding1, movie_embedding1])
mlp = self.mlp(self.concat([user_embedding2, movie_embedding2]))
return self.out(self.concat([prod, mlp]))
class NeuMFMovielensModel(tfrs.models.Model):
def __init__(self, args, unique_user_ids, unique_movie_ids):
super().__init__()
self.ranking_model: tf.keras.Model = NeuMFRankingModel(args, unique_user_ids, unique_movie_ids)
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
if type(features) == tuple:
features = features[0]
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_id"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
def call(self, inputs):
if type(inputs) == tuple:
return self.ranking_model(inputs)
elif type(inputs) == dict:
return self.ranking_model((inputs["user_id"], inputs["movie_id"])) | [
"syrvanser@gmail.com"
] | syrvanser@gmail.com |
c882105ca46a3f7823f9788acf54067ac3aabd9b | 942352f51f61b1bd87d64fee12862ccede98a148 | /UI.py | 444bee867c8bea072211835849ba8b8af7afddfd | [] | no_license | andrrus2004/git-and-circles | b10f1732c6657d5eb65c861a6c795daaba421ed6 | 3dabf226dd07b5f20a1dbab387b8f2ac9e03d55f | refs/heads/master | 2023-02-16T18:30:25.590310 | 2021-01-12T17:21:01 | 2021-01-12T17:21:01 | 329,037,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 570)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.create_btn = QtWidgets.QPushButton(self.centralwidget)
self.create_btn.setGeometry(QtCore.QRect(10, 510, 480, 50))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(17)
self.create_btn.setFont(font)
self.create_btn.setObjectName("create_btn")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Git и случайные окружности"))
self.create_btn.setText(_translate("MainWindow", "Создать окружность"))
| [
"rakhmanov.andrey@gmail.com"
] | rakhmanov.andrey@gmail.com |
e0216efaaea6832c178d2b5615a91514dc4d96bb | ab704aa624338918d53ece09e1bcbcc39cbdc3b7 | /print.py | 015062292dae3c8d3e3f1a8b25e886e16ac42b10 | [] | no_license | Omega-developrs/Simple-git | fceac72f4e4e0c188ce8a6768952561d0bfbe5aa | fcdadecf50459d2985328226fea941cd7f3490a2 | refs/heads/master | 2022-12-02T15:48:55.256447 | 2020-08-25T14:45:12 | 2020-08-25T14:45:12 | 290,234,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py |
print("Let's fix the code")
print("Hello, World!")
| [
"ajaykrishnaanandhan@gmail.com"
] | ajaykrishnaanandhan@gmail.com |
2ce2737f7960c8ed5d5f3a9c942e1d8c09ac1592 | e53bc45497fc77cef79404c71760086bdcc43835 | /014-contexto-estatico/variables_de_clase.py | 46e59f57c799a761922eddd1fe7543b0dfb4aaf1 | [] | no_license | pacomgh/universidad-python | f6f33f859cb3dabc593c45ef85cb2a4ba0c7825c | e6b5d9ebdeefe7737013bb83ea87ced8c62ebc5c | refs/heads/master | 2023-04-25T01:42:38.900691 | 2021-05-04T22:56:24 | 2021-05-04T22:56:24 | 279,757,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | class MiClase:
#esta variable se asocia con la clase y no con el objeto
variableClase = "variable de clase"
#definimos un atributo de instancia, se asocia a objeto y no clase
def __init__(self, nombre):
#se asocia con los objetos a crear, variable de clase fuera de cualquier metodo
#pero dentro de la clase
self.nombre = nombre
#para acceder a las variables de clase no es necesario crear objetos
print(MiClase.variableClase)
objeto1 = MiClase("variable de instancia")
MiClase.nombre = "modificando variable instancia"
#podemos acceder a variables de instacia con el nombre de la clase u objetos
#solo aplica cuando se hizo alguna modificacion, asi es que se asocia
print(MiClase.nombre)
print(objeto1.nombre)
#podemos acceder a variables de clase a traves de los objetos
print(objeto1.variableClase)
#podemos acceder a las variables de la clase con el nombre de la clase
print(MiClase.variableClase)
objeto1.variableClase = "modificando variable de clase"
#esta modificacion solo se asocia a este objeto
print(objeto1.variableClase)
#el valor sigue siendo el mismo
print(MiClase.variableClase)
#usa el mismo valor de la clase
objeto2 = MiClase("Nuevo valor de variable de instancia")
print(objeto2.variableClase)
objeto3 = MiClase("valor de tercer objeto")
#todos los objetos veran este cambio excepto los que cambiaron el valor original
MiClase.variableClase = "Cambio desde la clase"
#consulta el valir de su propio objeto
print(objeto1.variableClase)
#consultan el valir de la variable de la clase
print(objeto2.variableClase)
print(objeto3.variableClase) | [
"paco.mgh@gmail.com"
] | paco.mgh@gmail.com |
b0a5644af806359a75483523a5f4cbb90c1cc09b | 0d52bd60f06d6e5c7a783832febc33123e171625 | /visualizers/jbrowse/JBrowse-1.12.3_ct/tests/selenium_tests/welcome_page_test.py | 9a366a1b6c4318a1b6749b0e00be8ba0b3dded95 | [
"LGPL-2.1-or-later",
"Artistic-2.0",
"LGPL-2.1-only",
"Apache-2.0"
] | permissive | Multiscale-Genomics/VRE | 13fde91dd46425e5d202cded78970cf430575a71 | 8d946226e5b0818eb16b49cdce4ef0c1ea536d99 | refs/heads/master | 2021-01-22T20:41:24.784276 | 2019-07-22T18:00:57 | 2019-07-22T18:00:57 | 85,320,027 | 5 | 2 | Apache-2.0 | 2020-07-07T04:35:57 | 2017-03-17T14:23:11 | JavaScript | UTF-8 | Python | false | false | 247 | py | import unittest
from jbrowse_selenium import JBrowseTest
class WelcomePageTest( JBrowseTest, unittest.TestCase ):
data_dir = 'nonexistent'
def test_volvox( self ):
self.assert_element('//div[contains(@class,"fatal_error")]/h1')
| [
"root@multiscalegenomics.eu"
] | root@multiscalegenomics.eu |
ca06b826d8a2b9095d0b122e68a5c4e0da83821a | 3ec283fa90fcfec323ed1ee4e5e75fc61ffa4244 | /practice_exercises/CodeSignalArcade/IsLucky.py | a747640c8df725d13ab5c8ade8cd29b0638f3ab6 | [] | no_license | jmmiddour/CSPT19 | a252076d26d45120ee4650f1e5616923ec700892 | 9f424f4521aa709ed026b7b085922e719b48e29e | refs/heads/main | 2023-04-19T10:37:40.461375 | 2021-04-27T02:49:55 | 2021-04-27T02:49:55 | 346,905,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | """
Ticket numbers usually consist of an even number of digits.
A ticket number is considered lucky if the sum of the first half
of the digits is equal to the sum of the second half.
Given a ticket number `n`, determine if it's lucky or not.
Example:
For n = 1230, the output should be:
isLucky(n) = true;
For n = 239017, the output should be:
isLucky(n) = false.
Input/Output:
[execution time limit] 4 seconds (py3)
[input] integer n
A ticket number represented as a positive integer
with an even number of digits.
Guaranteed constraints:
10 ≤ n < 106.
[output] boolean
true if `n` is a lucky ticket number, false otherwise.
"""
"""
1. Split the given integer in half
2. Add all the digits on the left and on the right of the split
3. Return a bool if the left sum == the right sum
"""
def isLucky(n):
# Split the given integer into a list of single digits
n = [x for x in str(n)]
# Create a left list of digits
left = [int(val) for val in n[:len(n) // 2]]
# Create a right list of the digits
right = [int(val) for val in n[len(n) // 2:]]
# Return True or False if
# the left and right summed list are equal.
return sum(left) == sum(right)
| [
"64432505+jmmiddour@users.noreply.github.com"
] | 64432505+jmmiddour@users.noreply.github.com |
7c2ec9f77e57a62a2549d84905a21e7d3809c20d | be9a1d2b459f5b6b795dfdfbf6c5642b0c801320 | /youtube_dlc/postprocessor/sponskrub.py | 4e9bec2571d4e77ca7f7aa5732581f12cdf1eb21 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | GRBaset/yt-dlp | 34b7880a8c59bbf3653f7c0066053e581046f3ff | 82e3f6ebda56c84166494e157e0f856467ca5581 | refs/heads/master | 2023-02-24T11:48:35.830262 | 2021-01-31T07:48:06 | 2021-01-31T15:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,805 | py | from __future__ import unicode_literals
import os
import subprocess
from .common import PostProcessor
from ..compat import compat_shlex_split
from ..utils import (
check_executable,
encodeArgument,
encodeFilename,
shell_quote,
str_or_none,
PostProcessingError,
prepend_extension,
)
class SponSkrubPP(PostProcessor):
_temp_ext = 'spons'
_exe_name = 'sponskrub'
def __init__(self, downloader, path='', args=None, ignoreerror=False, cut=False, force=False):
PostProcessor.__init__(self, downloader)
self.force = force
self.cutout = cut
self.args = str_or_none(args) or '' # For backward compatibility
self.path = self.get_exe(path)
if not ignoreerror and self.path is None:
if path:
raise PostProcessingError('sponskrub not found in "%s"' % path)
else:
raise PostProcessingError('sponskrub not found. Please install or provide the path using --sponskrub-path.')
def get_exe(self, path=''):
if not path or not check_executable(path, ['-h']):
path = os.path.join(path, self._exe_name)
if not check_executable(path, ['-h']):
return None
return path
def run(self, information):
if self.path is None:
return [], information
if information['extractor_key'].lower() != 'youtube':
self.to_screen('Skipping sponskrub since it is not a YouTube video')
return [], information
if self.cutout and not self.force and not information.get('__real_download', False):
self.report_warning(
'Skipping sponskrub since the video was already downloaded. '
'Use --sponskrub-force to run sponskrub anyway')
return [], information
self.to_screen('Trying to %s sponsor sections' % ('remove' if self.cutout else 'mark'))
if self.cutout:
self.report_warning('Cutting out sponsor segments will cause the subtitles to go out of sync.')
if not information.get('__real_download', False):
self.report_warning('If sponskrub is run multiple times, unintended parts of the video could be cut out.')
filename = information['filepath']
temp_filename = prepend_extension(filename, self._temp_ext)
if os.path.exists(encodeFilename(temp_filename)):
os.remove(encodeFilename(temp_filename))
cmd = [self.path]
if not self.cutout:
cmd += ['-chapter']
cmd += compat_shlex_split(self.args) # For backward compatibility
cmd += self._configuration_args(exe=self._exe_name)
cmd += ['--', information['id'], filename, temp_filename]
cmd = [encodeArgument(i) for i in cmd]
self.write_debug('sponskrub command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
self.to_screen('Sponsor sections have been %s' % ('removed' if self.cutout else 'marked'))
elif p.returncode == 3:
self.to_screen('No segments in the SponsorBlock database')
else:
msg = stderr.decode('utf-8', 'replace').strip() or stdout.decode('utf-8', 'replace').strip()
self.write_debug(msg, prefix=False)
line = 0 if msg[:12].lower() == 'unrecognised' else -1
msg = msg.split('\n')[line]
raise PostProcessingError(msg if msg else 'sponskrub failed with error code %s' % p.returncode)
return [], information
| [
"pukkandan@gmail.com"
] | pukkandan@gmail.com |
59c3bd06e2e52ff8c563ba694f192343d83d345f | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-SceneKit/PyObjCTest/test_scnmaterial.py | f2c6d6c21c5547c3bc9103160f5ceb299b9928c3 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | from PyObjCTools.TestSupport import *
import objc
import sys
if os_level_key(os_release()) < os_level_key("10.12") or sys.maxsize >= 2 ** 32:
import SceneKit
class TestSCNMaterial(TestCase):
def testConstants(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhong, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelBlinn, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelLambert, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelConstant, unicode)
self.assertEqual(SceneKit.SCNFillModeFill, 0)
self.assertEqual(SceneKit.SCNFillModeLines, 1)
self.assertEqual(SceneKit.SCNCullBack, 0)
self.assertEqual(SceneKit.SCNCullFront, 1)
self.assertEqual(SceneKit.SCNTransparencyModeAOne, 0)
self.assertEqual(SceneKit.SCNTransparencyModeRGBZero, 1)
self.assertEqual(SceneKit.SCNTransparencyModeSingleLayer, 2)
self.assertEqual(SceneKit.SCNTransparencyModeDualLayer, 3)
self.assertEqual(
SceneKit.SCNTransparencyModeDefault, SceneKit.SCNTransparencyModeAOne
)
self.assertEqual(SceneKit.SCNBlendModeAlpha, 0)
self.assertEqual(SceneKit.SCNBlendModeAdd, 1)
self.assertEqual(SceneKit.SCNBlendModeSubtract, 2)
self.assertEqual(SceneKit.SCNBlendModeMultiply, 3)
self.assertEqual(SceneKit.SCNBlendModeScreen, 4)
self.assertEqual(SceneKit.SCNBlendModeReplace, 5)
self.assertEqual(SceneKit.SCNBlendModeMax, 6)
@min_os_level("10.12")
def testConstants10_12(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhysicallyBased, unicode)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertIsInstance(SceneKit.SCNLightingModelShadowOnly, unicode)
def testMethods(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.isLitPerPixel)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLitPerPixel_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.isDoubleSided)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setDoubleSided_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.locksAmbientWithDiffuse)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLocksAmbientWithDiffuse_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.writesToDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setWritesToDepthBuffer_, 0)
@min_os_level("10.9")
def testMethods10_9(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.readsFromDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setReadsFromDepthBuffer_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
8b7b56add43227c30c4f35edb28e313454c621e3 | 6d2af2903eec2c2afe0394d8d9053cee4aec40d5 | /src/main.py | ed4074cac2a8ebde32ac7e0bf75af0ef74f2e324 | [] | no_license | herewestand/ducking-bear | 6e0e264546552b5f8cb26b27969c2e06c5296b95 | d11b029276b3ba6d928701f36d64ce11da61b397 | refs/heads/master | 2021-01-13T02:14:27.778236 | 2014-01-19T21:52:17 | 2014-01-19T21:52:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | '''
Created on Jan 15, 2014
@author: Knightmare
'''
from circuit import NAND
from circuit import AND
from circuit import OR
from circuit import XOR
from circuit import XNOR
from circuit import NOR
if __name__ == '__main__':
p=NAND(input1=1,input2=1)
x=AND(input1=p.getNANDcarry(), input2=1)
print(x.getANDcarry())
pass | [
"ivanbravo714@gmail.com"
] | ivanbravo714@gmail.com |
a4ad6e33db53eed60982e79cb3e0d0d0155285eb | 63b285256f049d4e1defa2ef8c548e66d84ca63b | /venv/Lib/site-packages/sqlalchemy/testing/plugin/pytestplugin.py | a0729cc0c47511578e6d9b786ec475d13ff5d6af | [] | no_license | MaxOng94/Flask-tutorial | d5ea20fa370cdf563e5e0cc301faade94a4f1346 | e17dfab71fc9ddde452c6dfb758890d3b3f7d304 | refs/heads/master | 2023-04-03T03:42:05.283274 | 2021-04-12T00:18:52 | 2021-04-12T00:18:52 | 295,922,379 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,720 | py | try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import argparse
import collections
from functools import update_wrapper
import inspect
import itertools
import operator
import os
import re
import sys
import pytest
try:
import typing
except ImportError:
pass
else:
if typing.TYPE_CHECKING:
from typing import Sequence
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(
self, parser, namespace, values, option_string=None
):
callback_(option_string, values, parser)
kw["action"] = CallableAction
zeroarg_callback = kw.pop("zeroarg_callback", None)
if zeroarg_callback:
class CallableAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=False,
required=False,
help=None, # noqa
):
super(CallableAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
default=default,
required=required,
help=help,
)
def __call__(
self, parser, namespace, values, option_string=None
):
zeroarg_callback(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
pytest.register_assert_rewrite("sqlalchemy.testing.assertions")
if hasattr(config, "workerinput"):
plugin_base.restore_important_follower_config(config.workerinput)
plugin_base.configure_follower(config.workerinput["follower_ident"])
else:
if config.option.write_idents and os.path.exists(
config.option.write_idents
):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(
bool(getattr(config.option, "cov_source", False))
)
plugin_base.set_fixture_functions(PytestFixtureFunctions)
def pytest_sessionstart(session):
plugin_base.post_begin()
def pytest_sessionfinish(session):
plugin_base.final_process_cleanup()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills workerinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.workerinput)
node.workerinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.workerinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.workerinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(
lambda: collections.defaultdict(list)
)
items[:] = [
item
for item in items
if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")
]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module
):
if sub_cls is not test_class.cls:
per_cls_dict = rebuilt_items[test_class.cls]
# in pytest 5.4.0
# for inst in pytest.Class.from_parent(
# test_class.parent.parent, name=sub_cls.__name__
# ).collect():
for inst in pytest.Class(
sub_cls.__name__, parent=test_class.parent.parent
).collect():
for t in inst.collect():
per_cls_dict[t.name].append(t)
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls][item.name])
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(
newitems,
key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name,
),
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(name, obj):
# in pytest 5.4.0
# return [
# pytest.Class.from_parent(collector,
# name=parametrize_cls.__name__)
# for parametrize_cls in _parametrize_cls(collector.module, obj)
# ]
return [
pytest.Class(parametrize_cls.__name__, parent=collector)
for parametrize_cls in _parametrize_cls(collector.module, obj)
]
elif (
inspect.isfunction(obj)
and isinstance(collector, pytest.Instance)
and plugin_base.want_method(collector.cls, obj)
):
# None means, fall back to default logic, which includes
# method-level parametrize
return None
else:
# empty list means skip this item
return []
_current_class = None
def _parametrize_cls(module, cls):
"""implement a class-based version of pytest parametrize."""
if "_sa_parametrize" not in cls.__dict__:
return [cls]
_sa_parametrize = cls._sa_parametrize
classes = []
for full_param_set in itertools.product(
*[params for argname, params in _sa_parametrize]
):
cls_variables = {}
for argname, param in zip(
[_sa_param[0] for _sa_param in _sa_parametrize], full_param_set
):
if not argname:
raise TypeError("need argnames for class-based combinations")
argname_split = re.split(r",\s*", argname)
for arg, val in zip(argname_split, param.values):
cls_variables[arg] = val
parametrized_name = "_".join(
# token is a string, but in py2k pytest is giving us a unicode,
# so call str() on it.
str(re.sub(r"\W", "", token))
for param in full_param_set
for token in param.id.split("-")
)
name = "%s_%s" % (cls.__name__, parametrized_name)
newcls = type.__new__(type, name, (cls,), cls_variables)
setattr(module, name, newcls)
classes.append(newcls)
return classes
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# pytest assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(
item, item.parent.module.__name__, item.parent.cls, item.name
)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
def getargspec(fn):
if sys.version_info.major == 3:
return inspect.getfullargspec(fn)
else:
return inspect.getargspec(fn)
def _pytest_fn_decorator(target):
"""Port of langhelpers.decorator with pytest-specific tricks."""
from sqlalchemy.util.langhelpers import format_argspec_plus
from sqlalchemy.util.compat import inspect_getfullargspec
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def decorate(fn, add_positional_parameters=()):
spec = inspect_getfullargspec(fn)
if add_positional_parameters:
spec.args.extend(add_positional_parameters)
metadata = dict(target="target", fn="fn", name=fn.__name__)
metadata.update(format_argspec_plus(spec, grouped=False))
code = (
"""\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {"target": target, "fn": fn}, fn.__name__
)
if not add_positional_parameters:
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
else:
# this is the pytest hacky part. don't do a full update wrapper
# because pytest is really being sneaky about finding the args
# for the wrapped function
decorated.__module__ = fn.__module__
decorated.__name__ = fn.__name__
return decorated
return decorate
class PytestFixtureFunctions(plugin_base.FixtureFunctions):
def skip_test_exception(self, *arg, **kw):
return pytest.skip.Exception(*arg, **kw)
_combination_id_fns = {
"i": lambda obj: obj,
"r": repr,
"s": str,
"n": operator.attrgetter("__name__"),
}
def combinations(self, *arg_sets, **kw):
"""Facade for pytest.mark.parametrize.
Automatically derives argument names from the callable which in our
case is always a method on a class with positional arguments.
ids for parameter sets are derived using an optional template.
"""
from sqlalchemy.testing import exclusions
if sys.version_info.major == 3:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"):
arg_sets = list(arg_sets[0])
else:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"):
arg_sets = list(arg_sets[0])
argnames = kw.pop("argnames", None)
def _filter_exclusions(args):
result = []
gathered_exclusions = []
for a in args:
if isinstance(a, exclusions.compound):
gathered_exclusions.append(a)
else:
result.append(a)
return result, gathered_exclusions
id_ = kw.pop("id_", None)
tobuild_pytest_params = []
has_exclusions = False
if id_:
_combination_id_fns = self._combination_id_fns
# because itemgetter is not consistent for one argument vs.
# multiple, make it multiple in all cases and use a slice
# to omit the first argument
_arg_getter = operator.itemgetter(
0,
*[
idx
for idx, char in enumerate(id_)
if char in ("n", "r", "s", "a")
]
)
fns = [
(operator.itemgetter(idx), _combination_id_fns[char])
for idx, char in enumerate(id_)
if char in _combination_id_fns
]
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
parameters = _arg_getter(fn_params)[1:]
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(
parameters,
param_exclusions,
"-".join(
comb_fn(getter(arg)) for getter, comb_fn in fns
),
)
)
else:
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(fn_params, param_exclusions, None)
)
pytest_params = []
for parameters, param_exclusions, id_ in tobuild_pytest_params:
if has_exclusions:
parameters += (param_exclusions,)
param = pytest.param(*parameters, id=id_)
pytest_params.append(param)
def decorate(fn):
if inspect.isclass(fn):
if has_exclusions:
raise NotImplementedError(
"exclusions not supported for class level combinations"
)
if "_sa_parametrize" not in fn.__dict__:
fn._sa_parametrize = []
fn._sa_parametrize.append((argnames, pytest_params))
return fn
else:
if argnames is None:
_argnames = getargspec(fn).args[1:] # type: Sequence(str)
else:
_argnames = re.split(
r", *", argnames
) # type: Sequence(str)
if has_exclusions:
_argnames += ["_exclusions"]
@_pytest_fn_decorator
def check_exclusions(fn, *args, **kw):
_exclusions = args[-1]
if _exclusions:
exlu = exclusions.compound().add(*_exclusions)
fn = exlu(fn)
return fn(*args[0:-1], **kw)
def process_metadata(spec):
spec.args.append("_exclusions")
fn = check_exclusions(
fn, add_positional_parameters=("_exclusions",)
)
return pytest.mark.parametrize(_argnames, pytest_params)(fn)
return decorate
def param_ident(self, *parameters):
ident = parameters[0]
return pytest.param(*parameters[1:], id=ident)
def fixture(self, *arg, **kw):
return pytest.fixture(*arg, **kw)
def get_current_test_name(self):
return os.environ.get("PYTEST_CURRENT_TEST")
| [
"Max_Ong_@outlook.com"
] | Max_Ong_@outlook.com |
26e408ff7b8ea76f1a3851880d6af619017a2b21 | 57b5966315e50444087f0232f19a0f32e62c8dc5 | /src/main/generic_cpu/test5_1/build_program_options_links.py | 4364e4fd5a3fd4ad298feb718caa7ceffc59b301 | [
"BSL-1.0"
] | permissive | cicerone/kosim | b44b6e29ffddd69f18a1ad7463c2a9cba0b81a48 | a9f718a19019c11fd6e6c6fc0164d4d214bbb5e2 | refs/heads/master | 2021-01-22T06:36:56.915921 | 2011-09-10T05:38:19 | 2011-09-10T05:38:19 | 377,484 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | #!/usr/bin/env python
import os
current_path = os.getcwd()
cmd = 'ls'
print "==================="
print current_path
print "==================="
cmd = 'ln -s ' + current_path + '/program_options/options_builder.h ' + current_path + '/main/generic_cpu/test5_1/options_builder.h'
os.system(cmd)
cmd = 'ln -s ' + current_path + '/program_options/options_builder.cpp ' + current_path + '/main/generic_cpu/test5_1/options_builder.cpp'
os.system(cmd)
cmd = 'ln -s ' + current_path + '/program_options/program_options_base.h ' + current_path + '/main/generic_cpu/test5_1/program_options_base.h'
os.system(cmd)
cmd = 'ln -s ' + current_path + '/program_options/program_options_base.cpp ' + current_path + '/main/generic_cpu/test5_1/program_options_base.cpp'
os.system(cmd)
| [
"cicerone_mihalache@yahoo.com"
] | cicerone_mihalache@yahoo.com |
18ab42c276337f57636ec03c57500e23dd33eeda | d57f6c045c7b07dd53ee80982005beb33450b64b | /migrations/versions/b75221b7534f_.py | 80700eb2975f945de0b9aab80daaa6d3a076c042 | [] | no_license | gwynethbradbury/ouss_ball | 7df0ccafd42bd8d6fd22816c71fbe9a6a852351a | 1115fe316f7c1ee1407017a60a054b1f7291f331 | refs/heads/master | 2023-05-11T18:36:29.921936 | 2018-03-22T15:56:52 | 2018-03-22T15:56:52 | 122,100,136 | 1 | 0 | null | 2018-03-22T13:55:05 | 2018-02-19T17:58:55 | PHP | UTF-8 | Python | false | false | 641 | py | """empty message
Revision ID: b75221b7534f
Revises: 57bc3837370a
Create Date: 2016-01-11 19:56:43.653390
"""
# revision identifiers, used by Alembic.
revision = 'b75221b7534f'
down_revision = '57bc3837370a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('postage', sa.Column('paid', sa.Boolean(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('postage', 'paid')
### end Alembic commands ###
| [
"samuel.littley@toastwaffle.com"
] | samuel.littley@toastwaffle.com |
bd0fab02a5fbbadc2955432d86b4c0f514793a5d | 1817aca734cda258cbbfd9e13fbf040d76824621 | /aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/SetLogsDownloadStatusRequest.py | 3f5de92cf81226eceacc5ace8c2ca2a158173dc2 | [
"Apache-2.0"
] | permissive | sdk-team/aliyun-openapi-python-sdk | 4bd770718e70e31f19e1e322727c27ba74d9fb80 | 996cb07bfcf010fe3ab65daa73d26df2f3b6e97f | refs/heads/master | 2022-08-04T13:11:56.729215 | 2022-07-25T10:01:10 | 2022-07-25T10:01:10 | 183,356,741 | 0 | 0 | null | 2019-04-25T04:33:24 | 2019-04-25T04:33:24 | null | UTF-8 | Python | false | false | 2,308 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetLogsDownloadStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'SetLogsDownloadStatus','asdfdsf')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_LogsDownloadStatus(self):
return self.get_query_params().get('LogsDownloadStatus')
def set_LogsDownloadStatus(self,LogsDownloadStatus):
self.add_query_param('LogsDownloadStatus',LogsDownloadStatus)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
0396db19461d11a0434ffe45dea96486a92f557d | c4ca9ceacbca44dae4616c5b21b29d033a199c54 | /Numerikal Methods/RCL.py | e5846cbe97f54eef5d15df0ba56f2ca0f3b2f0fe | [] | no_license | Fahmi-adzkar/Komputasi-Gelombang | a09f273e54daf913280a9dd6aa028b8b46fb1b73 | 74ebfea65bf2edf47148c8bbdc937cf9bb060a4e | refs/heads/master | 2022-10-21T06:48:17.676499 | 2020-06-14T13:01:33 | 2020-06-14T13:01:33 | 258,963,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | from matplotlib import pyplot as plt
import numpy as np
R = 10.0
L = 20.0
C = 2.0e-6
w = 157.0
V0 = 10.0
N = 4000
t = np.linspace(0, 20, N)
#print(C*C*R*R - 4*C*L)
q = np.zeros(N)
rq = np.zeros(N)
particular = np.zeros(N)
alpha1 = -0.5 * (C*R - np.sqrt(complex(C*C*R*R - 4*C*L))) / (C*L)
alpha2 = -0.5 * (C*R + np.sqrt(complex(C*C*R*R - 4*C*L))) / (C*L)
numerator1 = C*L * (-R * np.sqrt(complex(C*C*R*R - 4*C*L)) + 2*C-4*L) * V0
denominator1 = 2*C*C*R*C*w*w*L*L - 8*w*w*L**2*C + C*C*R**4 - \
6*R*R*C*L + 8*L*L - np.sqrt(complex(C*C*R*R - 4*C*L))*R**3*C + \
4*R*np.sqrt(complex(C*C*R*R - 4*C*L))*L
factor1 = numerator1 / denominator1
numerator2 = C*L * (-R*np.sqrt(complex(C*C*R*R-4*C*L)) + R*R*C - 4*L) * V0
denominator2 = (R*R*C -4*L) * (R*np.sqrt(complex(C*C*R*R - 4*C*L)) + \
R*R*C - 2*L + w*w*C*L*L)
factor2 = numerator2 / denominator2
numerator3 = C*V0
denominator3 = R*R*C*C*w*w + 1 - 2*C*w*w*C*L + w**4*C*C*L*L
factor3 = numerator3 / denominator3
particular = np.cos(w*t) - np.cos(w*t)*w*w*C*L + np.sin(w*t)*w*C*R
q = np.exp(alpha1*t) * factor1 + np.exp(alpha2*t)*factor2 + particular*factor3
rq = np.real(q)
plt.plot(t, rq, '-b')
plt.title("RCL circuit")
plt.xlabel("t [s]")
plt.ylabel("Q [C]")
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
e2d2378456b3bdf8a5030177685e67b9cf7dc3ee | aed701e4aa8cac8084988ff59ac84954f617bff3 | /Data Science Brazil Marathon/Week_01/Python Boothcamp/soma dos elementos.py | 990696eed643e33f762c4bc0600920d8bd70fab2 | [] | no_license | lebaruch/Courses | 4f1e4fe69c14931eebcd12374fffc90af1283d33 | 446a6e53359f06840abc97770dbe535168e7d96c | refs/heads/master | 2020-04-12T21:10:44.670379 | 2019-02-06T16:17:11 | 2019-02-06T16:17:11 | 162,755,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | #Entrada de dados
n = int(input())
x = input().split()
for i in range(len(x)):
x[i] = int(x[i])
#for loop
total = 0
for _ in x:
total = total + _
print(total) | [
"32648733+lebaruch@users.noreply.github.com"
] | 32648733+lebaruch@users.noreply.github.com |
88527c65331eeed22277879b45bee253d707fccd | 5d2d7b1664df2e2f3eb925e778df0eb28a4c9d31 | /abc181/c.py | 72ec609d2d8ee63a9278bd1a460f292ecc3135fd | [] | no_license | nsmr-sor/atcoder | 149bdff9f584b07eaac1024ccc0b80610baecdda | 103f6450d2f060b5d4acb69963a5c7035269faf5 | refs/heads/main | 2023-02-19T13:10:49.272947 | 2021-01-17T16:20:14 | 2021-01-17T16:20:14 | 327,569,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # ✅
from sys import stdin
import math
def main():
input = stdin.readline
n = int(input())
ans = 0
xy = [list(map(int, input().split())) for _ in range(n)]
d = []
d0 = 0
for i in range(n):
for j in range(i):
for k in range(j):
x1 = xy[i][0]
x2 = xy[j][0]
x3 = xy[k][0]
y1 = xy[i][1]
y2 = xy[j][1]
y3 = xy[k][1]
x1 -= x3
x2 -= x3
y1 -= y3
y2 -= y3
if x1 * y2 == x2 * y1:
print("Yes")
exit()
print("No")
if __name__=='__main__':
main() | [
"nsmr.sor@gmail.com"
] | nsmr.sor@gmail.com |
8411f21b811eca560091444108d42f0dc1514fce | 951a3c8d6ec3d4e5f0718b8e6c92348196e5ebbf | /mysite/polls/migrations/0003_remove_question_question_prompt.py | e82e0fccbdf3a513a36859ca9de862621ece514d | [] | no_license | aspiringguru/learnDjango | 6f3b178381cd8037f9c954e7cc49f68d6a8b3b4c | 24ac82293b109ad36bb375e32983154b4de23470 | refs/heads/master | 2020-12-10T23:00:33.479558 | 2020-01-15T08:46:18 | 2020-01-15T08:46:18 | 233,736,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # Generated by Django 2.2.9 on 2020-01-15 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_question_question_prompt'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='question_prompt',
),
]
| [
"bmatthewtaylor@gmail.com"
] | bmatthewtaylor@gmail.com |
28260c0c3fdb9c630f17b72bb8b5f4bddf3d0d23 | 717a1d83b2a70987e09666a4f5e5b7b1a63e103e | /gen_cont.py | 6321748d61d8e8e94acbfb11d1cff4207ab98d7f | [] | no_license | angest1000/CBPythonPtzi | 041426ac6a3160242fda7c233e7d3f5e705cb562 | efe20c86bd1df653fc8509929f30481dfc13c83f | refs/heads/master | 2022-11-24T10:36:36.810243 | 2020-08-02T15:12:41 | 2020-08-02T15:12:41 | 284,171,017 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import random
def generar_contrasena(longitud):
mayus = ['A','B','C','D','E','F','G'
,'H','I','J','K','L','M','N'
,'Ñ','O','P','Q','R','S','T'
,'U','V','W','X','Y','Z']
minus = ['a','b','c','d','e','f','g'
,'h','i','j','k','l','m','n'
,'ñ','o','p','q','r','s','t'
,'u','v','w','x','y','z']
num = ['0','1','2','3','4','5','6','7','8','9']
simbolos = ['.',',',':',';','*','{','}'
,'[',']','(',')','!','#','$'
,'%','&','/','\'','=','¿','?']
caracteres = mayus + minus + num + simbolos
contrasenia = []
for i in range(longitud):
car = random.choice(caracteres)
contrasenia.append(car)
contrasenia = "".join(contrasenia)
return contrasenia
def main():
longitud = int(input('De que longitud quieres que sea tu nueva contraseña: '))
contrasenia = generar_contrasena(longitud)
print('Tu nueva contraseña es: '+contrasenia)
if __name__ == '__main__':
main() | [
"angest1000@gmail.com"
] | angest1000@gmail.com |
4754f4b9498095bafa3fd040f74cfafe16bd2007 | 0033e322b2d0dee963ecb4ba624ba0c4a980b58d | /pythonBasics/ifThisThenThat.py | 1ff34c4a3ddf61efd0224de2c66523bfabc8256a | [
"MIT"
] | permissive | GavinThomas1192/pythonBasics | 77687fc0975e06c986db85e34bbe8d109b3fd01d | be968f75a823543d8cb39b477150c45f54e933fd | refs/heads/master | 2021-09-03T11:13:45.486975 | 2018-01-08T15:58:51 | 2018-01-08T15:58:51 | 111,608,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | age = 25 * 365
if age < 10000:
print('Wow, you\'re young!', 'Age = {}'.format(age))
else:
print('Wow, you\'re old!')
# Make admitted = true if age is 13
admitted = None
if age >= 13:
admitted = True
else:
print('Age isn\'t 13!')
days_open = ['Monday', 'Tuesday', 'Wednesday']
days_open_string = (', '.join(days_open))
today = 'Saturday'
if today in days_open:
print('Come on In!')
else:
print('Sorry we are closed {}'.format(today), 'But we are open {}'.format(days_open_string)) | [
"gthomas1192@gmail.com"
] | gthomas1192@gmail.com |
27ed906ab1dac7b2975ec6cfaf6a6c99b4f6056f | cbb4d4b58bc382b9d0cf7ea4b2d7e767ac793283 | /study/bin/pip | 9e55dc4683809303f40f85bd1ef24ebc2e49a840 | [] | no_license | GDUT-Condi/django-model-test | 46166969dde8b9571e5b4dc6ced5b53e6ddb44eb | 1372f13419fadab0dfd97375b73abc055f329239 | refs/heads/master | 2021-05-15T09:55:58.830653 | 2017-10-25T09:34:29 | 2017-10-25T09:34:29 | 108,227,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | #!/home/condi/django-model-test/study/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"465982240@qq.com"
] | 465982240@qq.com | |
3d9b92e848fdcae5c757c763e97c33e57548532f | 2a9c651a11ded845c0d6e662a1bdfeb8322ab8e4 | /code/src/sinogram.py | 7df277efb3eefbb5d7d618d11a082841ac9a2df2 | [
"MIT"
] | permissive | tomboulier/dcc-translation | 33a369b46b2ec185ca29e62fe75c6ac003954946 | 3463346918e07ffa15bb6f5a3e39a44b992da353 | refs/heads/main | 2023-04-08T01:43:16.155933 | 2022-03-27T14:02:01 | 2022-03-27T14:02:01 | 356,943,838 | 0 | 0 | null | 2021-04-12T20:09:32 | 2021-04-11T18:12:13 | TeX | UTF-8 | Python | false | false | 2,341 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import interpolate
class Results(object):
"""
Encapsulation of everything that is computed by Simulator
"""
def __init__(self, params, source, detector):
self.params = params
self.source = source
self.detector = detector
self.projections = None
self.projections_interpolator = None
self.DCC_function = None
self.DCC_function_theo = None
def plotSinogram(self, xunits='mm'):
# define the limits of the axis
imageSize = self.params.imageSize
T = self.params.T
max_angle = self.params.get_max_angle()
min_angle = self.params.get_min_angle()
phimax = np.arctan(.5 * imageSize / self.params.sdd) * 360 / (2 * np.pi)
# plot the image
plt.figure()
if xunits == 'mm':
# the units here represent a distance (on the detector)
plt.xlabel('Distance from detector center (in mm)', labelpad=20)
extent = [-imageSize / 2, imageSize / 2, max_angle, min_angle]
aspect = imageSize / (max_angle - min_angle)
elif xunits == 'degrees':
# the units here represent an angle ('phi' in T(x,phi))
plt.xlabel('Beam direction (in degrees)', labelpad=20)
extent = [-phimax, phimax, max_angle, min_angle]
aspect = 2 * phimax / (max_angle - min_angle)
plt.imshow(self.projections, cmap=cm.Greys_r, extent=extent,
aspect=aspect / 2)
plt.ylabel('Gantry angle (in degrees)', labelpad=20)
plt.show()
def interpolate_projection(self):
"""
Interpolation of the operator T(alpha,t).
Be careful: the angle alpha is the angle between the beam and the
line joining the source to the center of the detector. Not to be
confused with phi, which is the angle between the beam and the
y-axis
"""
t = self.params.get_time_range()
alpha = self.params.get_alpha_range()
self.projections_interpolator = interpolate.interp2d(alpha, t,
self.projections,
kind='linear')
| [
"boulier.thomas@gmail.com"
] | boulier.thomas@gmail.com |
1b436fbdb01b1c36642b2c16d65b37fe7528f948 | 951e8f73b3a7b160aaa7a9f63ef87b7af6dd367d | /utilities/url_utilities.py | 40b167f0f5a080dfe03339f03825c70a8d97e1c2 | [] | no_license | nickflanagan/PageSpider | adbbf3a11e677edf37d844662c428a638794140e | 7f06f29183ab8aee5039b181e5d24126e1b94f65 | refs/heads/master | 2020-05-09T19:14:53.696883 | 2019-04-17T03:02:04 | 2019-04-17T03:02:04 | 181,372,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | import re
import string
from urllib.request import urlopen
from bs4 import BeautifulSoup
def load_urls_from_files(file_path: str):
try:
with open(file_path) as f:
content = f.readlines()
return content
except FileNotFoundError:
print("the file {0} could not be found".format(file_path))
exit(2) # did not complete successfully
def load_page(url: str):
response = urlopen(url)
html = response.read().decode('utf-8')
return html
def scrape_page(page_contents: str):
chicken_noodle = BeautifulSoup(page_contents, "html5lib")
for script in chicken_noodle(["script", "style"]):
script.extract()
text = chicken_noodle.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = ' '.join(chunk for chunk in chunks if chunk)
plain_text = ''.join(filter(lambda x: x in string.printable, text))
clean_words = []
words = plain_text.split(" ")
for word in words:
clean = True
# no punctuation
for punctuation_marks in string.punctuation:
if punctuation_marks in word:
clean = False
# no numbers
if any(char.isdigit() for char in word):
clean = False
# at least two characters but no more than 10
if len(word) < 2 or len(word) > 10:
clean = False
if not re.match(r'^\w+$', word):
clean = False
if clean:
try:
clean_words.append(word.lower())
except UnicodeEncodeError:
print(".")
return clean_words
| [
"flanaganna@gmail.com"
] | flanaganna@gmail.com |
9899e61e838316ce1b3b991f7e30f2cdf4ceda1b | 2f73618f69cf43eed83d3d2af52a5618bb87d74f | /Projects/TechRecommender/python_51job_8_12/data_save.py | bd7e3915fd553f8ac570553d43ccd026476f9866 | [] | no_license | JerryLiuLYU/PyLYU | a4c18309912d94f1fb8ff2acb21a031bfc1f0888 | 096586e49a42bf2e386f9c9d025271667d1449b3 | refs/heads/master | 2021-06-16T08:25:01.630744 | 2021-03-24T12:35:49 | 2021-03-24T12:35:49 | 165,650,036 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from python_51job_8_12.crawling import *
def save(sheet_tab, content):
sheet_tab.insert(content)
| [
"xiatiandeyu1997@126.com"
] | xiatiandeyu1997@126.com |
929ee08aee8171b652e862af8479fa10eae5a457 | 6984724d0466d477635b23d073affa9b00f01f67 | /Tasks/Gerasimchik_Tasks/Task3/HomeWork3.py | b356f9898d1a93b9941f46ea4a68afa882cd1919 | [] | no_license | RomanPutsilouski/M-PT1-37-21 | 202414fac782e6c68f741e55f9b7697f0c974f45 | ceef9b4e6bcff2a9033615ec761f0e2e73c9467e | refs/heads/main | 2023-05-30T21:10:22.404817 | 2021-06-30T00:26:29 | 2021-06-30T00:26:29 | 348,462,785 | 1 | 0 | null | 2021-06-05T15:44:27 | 2021-03-16T19:06:57 | Python | UTF-8 | Python | false | false | 1,803 | py | import io
def format_text(page_width):
with io.open('text.txt', 'r', encoding='utf-8') as text_file:
raw_text = text_file.read()
result_text = ''
for line in raw_text.split('\n'):
inter_list_1 = []
inter_list_2 = []
string_length = 0
for word in line.split():
if string_length + len(word) <= page_width:
inter_list_1.append(word)
string_length += len(word) + 1
else:
inter_list_2.append(inter_list_1)
inter_list_1 = [word]
string_length = len(word) + 1
inter_list_2.append(inter_list_1)
for line_str in inter_list_2:
if len(line_str) == 1:
result_str = ''.join(line_str) + '\n'
else:
a = (page_width - len(''.join(line_str))) // (len(line_str) - 1)
b = (page_width - len(''.join(line_str))) % (len(line_str) - 1)
result_str = (a * ' ').join(line_str)
result_str = result_str.replace((a * ' '), ((a + 1) * ' '), b) + '\n'
result_text += result_str
result_text += '\n'
result_text = result_text[:-2]
with io.open('formatted_text.txt', 'w', encoding='utf-8') as text_file:
text_file.write(result_text)
print('Текст записан в файл formatted_text.txt')
active = True
while active:
input_page_width = input('Введите ширину страницы\n')
if input_page_width.isnumeric():
if int(input_page_width) <= 15:
print('Ширина страницы должна быть больше 15')
else:
format_text(int(input_page_width))
active = False
else:
print('Введите число')
| [
"gerasimchick@tut.by"
] | gerasimchick@tut.by |
ba9d90bc4d7be681860622542269363eb3dd8008 | 0228981cc246e8fdc4f04b8084a546efb4410009 | /app.py | 8173e682d0857cf509f4a7efc8edee59a6ea2b38 | [] | no_license | prates/amamentsp | 5545c3419cd7aa2e08718d00e41f249b8b7c0752 | d34a54cb4b168c5b00338796ad164a49a5b68d2f | refs/heads/master | 2020-04-10T23:19:48.032945 | 2018-12-11T14:56:28 | 2018-12-11T14:56:28 | 161,349,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,709 | py | import json
import logging
import os
import sys
import flask
from flask import Flask
from flask import request
from app.address_controller import Address
from app.auth import Auth
from app.controller_donation_user import DonationControllerUSer
from app.donation_institution import DonationInstitutionController
from app.donation_type_controller import DonationTypeController
from app.donation_type_details_controller import DonationTypeDetailsController
from app.institution_controller import CRUDInstitution
from app.promotion_controller import ControllerPromotion
from app.user_controller import UserController
from app.unit_controller import UnitController
from app.institution_stock import StockInstitution
application = Flask(__name__)
gunicorn_logger = logging.getLogger("gunicorn.error")
application.logger.handlers = gunicorn_logger.handlers
application.logger.setLevel(gunicorn_logger.level)
@application.route('/')
def index():
print('Hit on /')
return 'Hello World! 1234'
@application.route("/cities/", methods=["GET"])
def list_cities():
if request.method == "GET":
query = request.args.get("query", type=str)
state_id = request.args.get("state_id", type=str)
application.logger.info("/cities/ method: %s" % (request.method))
application.logger.info("headers")
application.logger.info(request.headers)
application.logger.info("params %s" % request.args)
addr = Address()
result = addr.query_cities(state_id=state_id, query=query)
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
if len(result) < 4:
return resp, 204
return resp
@application.route("/states/", methods=["GET"])
def list_states():
if request.method == "GET":
country_id = request.args.get("country_id", default=1, type=int)
query = request.args.get("query", type=str)
addr = Address()
result = addr.query_states(country_id=country_id, query=query)
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
if len(result) < 4:
return resp, 204
return resp
@application.route("/countries/", methods=["GET"])
def list_countries():
if request.method == "GET":
query = request.args.get("query", type=str)
addr = Address()
result = addr.query_countries(query=query)
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
if len(result) < 4:
return resp, 204
return resp
@application.route("/users/", methods=["POST", "GET", "PUT", "DELETE"])
def process_users():
usercontroller = UserController()
if request.method == "POST":
content = request.json
print(content)
#try:
user = usercontroller.create_user(city_id=content["city_id"],
email=content["email"],
password=content["password"],
name=content["name"],
birth_date=content["birth_date"],
phone = content["phone"],
role_id=content["role_id"],
nickname=content["nickname"],
gender=content["gender"],
street=content["street"],
number=content["number"],
complement=content["complement"],
district=content["district"],
postal_code=content["postal_code"])
print("usuario criado")
resp = flask.Response(user)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers.add("Access-Control-Allow-Headers", "*")
resp.headers.add("Access-Control-Allow-Methods", "*")
response = json.loads(user)
if response.get("message"):
return resp, 200
else:
return resp, 201
#except Exception as ex:
# return "", 400
elif request.method == "GET":
email_query = request.args.get("email-query", type=str)
application.logger.info("method %s args = %s" % (request.method, request.args))
result = usercontroller.list_users(email_query)
application.logger.info("result : %s" % (result))
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
elif request.method == "DELETE":
content = request.json
result = usercontroller.delete_user(user_id=content["user_id"])
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "PUT":
content = request.json
print(content)
result = usercontroller.update_user(**content)
resp = flask.Response(json.dumps(result["result"]))
resp.headers['Access-Control-Allow-Origin'] = '*'
if result["erro"] == "OK":
return resp , 200
elif result["erro"] == "NOFOUND":
resp = flask.Response(json.dumps({"message": "user not found"}))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 205
@application.route("/roles/", methods=["GET"])
def list_roles():
if request.method == "GET":
usercontroller = UserController()
result = usercontroller.list_user_types()
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@application.route("/phones/", methods=["POST", "PUT", "DELETE", "GET"])
def process_phone():
usercontroller = UserController()
content = request.json
if request.method == "POST":
try:
response = usercontroller.add_phone(user_id=content["user-id"],
phone_number=content["phone-number"])
return response, 201
except Exception as ex:
return ex, 400
elif request.method == "PUT":
#try:
usercontroller.update_phone(user_id=content["user-id"],
phone_id=content["phone-id"],
phone_number=content["phone-number"])
return "", 200
#except Exception as ex:
# return "", 400
elif request.method == "GET":
try:
result = usercontroller.list_user_phones(user_id=request.args.get("user-id", type=int))
return result
except Exception as ex:
print(ex, file=sys.stderr)
return "", 400
elif request.method == "DELETE":
try:
usercontroller.remove_phone(user_id=content["user-id"],
phone_id=content["phone-id"])
return "", 200
except Exception as ex:
return "", 400
@application.route("/login/", methods=["POST"])
def login():
content = request.json
auth = Auth()
result = auth.autenticate(email=content["email"], password=content["password"])
if result is None:
result = json.dumps({"message": "email ou password invalid"})
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/logout/", methods=["GET"])
def logout():
auth = Auth()
token = request.headers.get("token")
token = auth.logout(token=token)
result = json.dumps({"token": token, "message": "logout"})
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@application.route("/institutions/", methods=["POST", "GET", "PUT", "DELETE"])
def institutions():
inst = CRUDInstitution()
if request.method == "POST":
content = request.json
#try:
result = inst.create_institution(city_id=content["city-id"],
institution_type_id=content["institution-type"],
name=content["name"],
email=content["email"],
site=content["site"],
street=content["street"],
number=content["number"],
complement=content["complement"],
district=content["district"],
phone=content["phone"],
postal_code=content["postal-code"]
)
#except KeyError as ex:
# return json.dumps({"message": "Field not found %s" % (str(ex))}), 401
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 201
elif request.method == "GET":
result = inst.list_institution(query=request.args.get("query", type=str))
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "PUT":
content = request.json
result = inst.alter_instution(**content)
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "DELETE":
content = request.json
try:
result = inst.delete_institution(institution_id=content["institution-id"])
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
except Exception as ex:
return "", 400
@application.route("/institution-types/", methods=["POST", "GET", "PUT", "DELETE"])
def institution_type():
inst = CRUDInstitution()
if request.method == "POST":
content = request.json
result = inst.add_institution_type(content["description"])
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 201
elif request.method == "GET":
result = inst.list_institution_type()
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "PUT":
content = request.json
result = inst.update_institution_type(id=content["id"],
description=content["description"])
return "", 200
elif request.method == "DELETE":
content = request.json
inst.remove_institution_type(content["id"])
return "", 200
@application.route("/link-user-institutions/", methods=["GET", "POST", "DELETE"])
def linked_users():
users = UserController()
if request.method == "POST":
content = request.json
result = users.link_institution(user_id=content["user_id"],
institution_id=content["institution_id"])
return json.dumps(result), 200
elif request.method == "GET":
application.logger.info("/link-user-institutions/ - %s" % (request.method))
user_id = request.headers.get("user_id", type=int)
if user_id is None:
user_id = request.args.get("user_id", type=int)
application.logger.info("/link-user-institutions/ - args: %s" % (request.args))
application.logger.info("/link-user-institutions/ - headers: %s" % (request.headers))
application.logger.info("/link-user-institutions/ - user_id: %s" % (user_id) )
result = users.list_linked_institution(user_id)
result_json = json.dumps(result)
application.logger.info("/link-user-institutions/ - json_response %s " % (result_json))
resp = flask.Response(result_json)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/link-user-institutions/delete/", methods=["POST"])
def delete_linked_users():
users = UserController()
if request.method == "POST":
content = request.json
result = users.unlink_institution(user_id=content["user_id"],
institution_id=content["institution_id"])
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/link-institution-users/", methods=["GET", "POST", "DELETE"])
def linked_users_institution():
inst = CRUDInstitution()
if request.method == "GET":
inst_id = request.args.get("institution_id", type=int)
type = request.args.get("type", type=str)
application.logger.info("/link-institution-users/ method: %s" % (request.method))
application.logger.info("params: %s" % (request.args))
application.logger.info("headers")
application.logger.info(request.headers)
if type is None:
users = inst.list_linked_users(institution_id=inst_id)
else:
users = inst.list_linked_users(institution_id=inst_id, type=type)
application.logger.info("result: %s" % (users))
users_json = json.dumps(users)
resp = flask.Response(users_json)
resp.headers['Access-Control-Allow-Origin'] = '*'
#if len(users) > 0 :
return resp, 200
#else:
# return resp, 204
elif request.method == "POST":
content = request.json
user_id = content["user_id"]
institution_id = content["institution_id"]
application.logger.info("/link-institution-users/ method - %s" %(request.method))
application.logger.info("content %s" %(content))
result = inst.approve_user(institution_id=institution_id, user_id=user_id)
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/link-institution-users/delete/", methods=["POST"])
def delete_linked_users_institution():
inst = CRUDInstitution()
if request.method == "POST":
content = request.json
application.logger.info("/link-institution-users/ method - %s" %(request.method))
application.logger.info("HEADERS ------ %s" % (request.headers))
application.logger.info("body %s" %(request.json))
result = inst.remove_user(institution_id=content["institution_id"], user_id=content["user_id"])
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/status/", methods=["GET"])
def list_status():
if request.method == "GET":
status = ["PENDING", "APROVED", "DELETED", "MASTER", "MEMBER_PENDING", "MEMBER"]
resp = flask.Response(json.dumps(status))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/promotion/", methods=["GET", "PUT", "DELETE", "POST"])
def process_promotion():
promotion_ctrl = ControllerPromotion()
if request.method == "GET":
size = request.args.get("size", type=int)
promotion_result = promotion_ctrl.list_all_promotion(size)
resp = flask.Response(json.dumps(promotion_result))
if len(promotion_result) > 0:
return resp, 200
else:
return resp, 204
elif request.method == "POST":
content = request.json
institution_id = content["institution_id"]
del content["institution_id"]
data = promotion_ctrl.create_promotion(institution_id=institution_id, **content)
resp = flask.Response(json.dumps(data))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "PUT":
content = request.json
promotion_id = content["promotion_id"]
del content["promotion_id"]
result = promotion_ctrl.alter_promotion(promotion_id=promotion_id, **content)
resp = flask.Response(json.dumps(result["result"]))
resp.headers['Access-Control-Allow-Origin'] = '*'
if result["status"] == "OK":
return resp, 200
else:
return resp, 404
elif request.method == "DELETE":
promotion_id = request.headers.get("promotion_id", type=int)
result = promotion_ctrl.delete_promotion(promotion_id=promotion_id)
if result:
msg = {"message": "promotion deleted"}
else:
msg = {"message": "promotion not deleted"}
resp = flask.Response(json.dumps(msg))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donations-types/", methods=["GET", "PUT", "DELETE", "POST"])
def process_donation_types():
donation = DonationTypeController()
if request.method == "GET":
result = donation.list_donation_types()
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "POST":
content = request.json
result = donation.create_donation_types(**content)
resp = flask.Response(result)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "PUT":
content = request.json
donation_type_id = content["donation_type_id"]
del content["donation_type_id"]
result = donation.alter_donation_types(donation_type_id=donation_type_id, **content)
if result:
resp = json.dumps(content)
else:
resp = json.dumps({"message": "donation type not altered"})
resp_result = flask.Response(resp)
resp_result.headers['Access-Control-Allow-Origin'] = '*'
return resp_result, 200
elif request.method == "DELETE":
donation_type_id = request.headers.get("donation_type_id")
result = donation.delete_donation_types(donation_type_id=donation_type_id)
if result:
result_resp = json.dumps({"message": "donation type deleted"})
else:
result_resp = json.dumps({"message": "donation type not deleted"})
resp = flask.Response(result_resp)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donations-user/", methods=["GET", "DELETE", "POST"])
def process_donation_user():
donation = DonationControllerUSer()
if request.method == "GET":
user_id = request.args.get("user_id")
result = donation.list_donations(user_id)
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "POST":
application.logger.info("/donations-user/ %s" % (request.method))
content = request.json
application.logger.info(" request body - %s" % (content))
user_id = content["user_id"]
del content["user_id"]
result = donation.create_donation(user_id=user_id, **content)
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "DELETE":
application.logger.info("/donations-user/ %s" % (request.method))
donation_user_id = request.args.get("doantion_user_id", type=int)
result = donation.remove_donation(donation_user_id=donation_user_id)
response = {}
if result:
response["message"]= "Doacao removida com sucesso."
else:
response["maessage"]= "Erro ao remover a doacao"
resp = flask.request(json.dumps(response))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donations-institution/", methods=["GET", "POST"])
def process_donation_institution():
donation_insitution = DonationInstitutionController()
if request.method == "GET":
application.logger.info("/donations-institution/ %s" % (request.method))
institution_id = request.args.get("institution_id", type=int)
application.logger.info("result params %s" % (institution_id))
result = donation_insitution.list_user_donation(institution_id=institution_id)
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "POST":
application.logger.info("/donations-institution/ %s" % (request.method))
content = request.json
application.logger.info("result params %s" % (content))
result = donation_insitution.ativate_donation(donation_user_id=content["donation_user_id"],
institution_id=content["institution_id"])
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donations-institution/withdraw/", methods=["POST"])
def withdraw_donation():
donation_institution = DonationInstitutionController()
if request.method == "POST":
application.logger.info("/donations-institution/withdraw/ %s" % (request.method))
content = request.json
application.logger.info("result content %s" % (content))
result = donation_institution.withdraw_donation(institution_id=content["institution_id"],
donation_user_id=content["donation_user_id"],
date_withdraw=content["date_withdraw"])
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/stock/", methods=["GET", "POST"])
def process_stock():
stock = StockInstitution()
if request.method == "GET":
application.logger.info("/stock/ %s" % (request.method))
institution_id = request.args.get("institution_id", type=int)
donation_type_id = request.args.get("donation_type_id")
result = stock.get_balance(donation_type_id=donation_type_id, institution_id=institution_id)
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
elif request.method == "POST":
application.logger.info("/stock/ %s" % (request.method))
content = request.json
result = stock.withdraw_stock(institution_balance_id=content["institution_balance_id"],
date_out=content["date_out"], amount_out=content["amount_out"])
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/unit/", methods=["GET"])
def process_unit():
unit = UnitController()
if request.method == "GET":
application.logger.info("/unit/ - %s" % (request.method))
result = unit.list_units()
application.logger.info("result - %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donation-type/", methods=["GET"])
def process_donation_type():
donation_type = DonationTypeController()
if request.method == "GET":
application.logger.info("/donation-type/ - %s" % (request.method))
result = donation_type.list_donation_types()
application.logger.info("result - %s" % (result) )
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
@application.route("/donation-type-details/", methods=["GET"])
def process_donation_type_details():
donation_type_details = DonationTypeDetailsController()
if request.method == "GET":
application.logger.info("/donation-type-details/ - %s" % (request.method))
result = donation_type_details.list_donation_type_details()
application.logger.info("result %s" % (result))
resp = flask.Response(json.dumps(result))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp, 200
if __name__ == '__main__':
application.debug = True
application.run(host=os.getenv("HOST_ADDRESS"), port=os.getenv("HOST_PORT"))
| [
"alexandre.b.prates@gmail.com"
] | alexandre.b.prates@gmail.com |
512c76ab159a877dea30fe399f3220371dd2baf0 | 51de6a2a2ce8882ee6462cd1076c7b9675830531 | /0x0F-python-object_relational_mapping/2-my_filter_states.py | 20f1742598a0848dd05b4b932cf3a0fffab10e70 | [] | no_license | anamariaroman/holbertonschool-higher_level_programming | 9b479c9b1484e4388ec0a4390cda81480626725a | 5d75ccc35dfc92887d0f9a9e0b0773ed741d179e | refs/heads/master | 2023-08-17T23:40:25.164128 | 2021-09-23T04:57:43 | 2021-09-23T04:57:43 | 361,869,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/python3
"""
takes in an argument and displays all values in the
states table of hbtn_0e_0_usa where name matches the argument.
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=argv[1],
passwd=argv[2], db=argv[3], charset="utf8")
cursor = db.cursor()
cursor.execute("SELECT * FROM states WHERE states.name = '{:s}' ORDER BY \
states.id ASC".format(argv[4]))
r = cursor.fetchall()
for row in r:
if row[1] == argv[4]:
print(row)
cursor.close()
db.close()
| [
"2979@holbertonschool.com"
] | 2979@holbertonschool.com |
5859434341568411959a48e0941bf29a6dbeaeae | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_091/ch4_2020_09_04_14_40_54_928784.py | 652261e221bca6774cbba41cd2b6e29cac4be123 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | def classifica_idade(idade):
if idade <= 11:
return ('crianca')
if 12<=idade<=17:
return('adolescente')
if idade => 18:
return('adulto')
a= 13
b=classica_idade(a)
print(b) | [
"you@example.com"
] | you@example.com |
31e70a733b33aad326a8f4db00164efbc50b2eab | 881e5b3529f6030db8f307df477a2b7e6e1966a9 | /ascii_reader.py | c6c3d124d9cc1c002a28ed4cc654c73c060cc27e | [] | no_license | hanjae1122/PSID | f92152705947b07da2a6da6705d868cc031f9a00 | 8383626cb45decb29c5d450cc4a8737703e374ed | refs/heads/master | 2020-03-11T11:30:20.367870 | 2018-04-26T23:37:13 | 2018-04-26T23:37:13 | 129,971,282 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,291 | py | import os
import re
import pickle
import pandas as pd
INDEX_TYPE = 'sps'
class ascii_:
def __init__(self, f_path, f_name, is_fam):
self.file_path = f_path
self.file_name = f_name
self.is_fam = is_fam
def _get_index_path(self):
return os.path.join(self.file_path, self.file_name + '.' + INDEX_TYPE)
def _get_data_path(self):
return os.path.join(self.file_path, self.file_name + '.txt')
def _get_csv_path(self):
return os.path.join(self.file_path, self.file_name + '.csv')
def _get_pickle_path(self):
return os.path.join(self.file_path, self.file_name + '.pickle')
def _chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def _export_pickle(self, a):
with open(self._get_pickle_path(), 'wb') as f:
pickle.dump(a, f)
def read_index_file(self):
"""Reads SPS index file using indices stored in 'inds'.
'inds' is used to parse the PSID raw ascii .txt file,
which is given in fixed format.
For more info on ascii file formats,
refer to: http://wlm.userweb.mwn.de/SPSS/wlmsrrd.htm
"""
with open(self._get_index_path(), 'r') as f:
data = []
raw_data = f.readlines()
# get locations of 1) indices, 2) data format (int or float),
# 3) variable labels
for i, raw_line in enumerate(raw_data):
line = re.sub('\s+', ' ', raw_line).strip()
if re.match('^DATA LIST FILE = ', line):
i_index = i + 1
if line == 'FORMATS':
i_format = i + 1
if line == 'VARIABLE LABELS':
i_label = i + 1
data.append(line)
# create 'inds' variable and list of labels
inds, headers = [0], []
for j in range(i_index, i_format):
line = data[j]
if line == '.':
break
s = line.split()
for c in self._chunks(s, 4):
inds.append(int(c[3]))
headers.append(c[0])
# create dictionaries for variable labels, formats and names
lab2format, lab2name, name2lab = {}, {}, {}
for j in range(i_format, i_label):
line = data[j]
if line == '.':
break
s = line.split()
for c in self._chunks(s, 2):
lab2format[c[0]] = c[1]
if self.is_fam:
for j in range(i_label, len(data)):
line = data[j]
if line == '.':
break
s = line.replace('"', '')
s = re.sub('\s+', ' ', s).strip()
lab, *name = s.split()
name = ' '.join(name)
lab2name[lab] = name
name2lab[name] = lab
else:
for j in range(i_label, len(data)):
line = data[j]
if line == '.':
break
s = line.replace('"', '')
s = re.sub('\s+', ' ', s).strip()
yr = re.findall(' [0-9]{2}$', s)
if not yr:
lab, *name = s.split()
yr = 'NA'
else:
s = re.sub(' [0-9]{2}$', '', s)
lab, *name = s.split()
yr = yr[-1].strip()
name = ' '.join(name)
lab2name[lab] = (name, yr)
name2lab[(name, yr)] = lab
assert headers == list(lab2name.keys())
print('Exporting pickle file...')
self._export_pickle((lab2name, name2lab))
return inds, headers, lab2format, lab2name, name2lab
def read_data_file(self, inds, headers, lab2format):
"""Reads raw ascii .txt file and exports as csv"""
with open(self._get_data_path(), 'r') as f:
print('Opened ascii file and processing...')
data_table = []
for line in f:
split_data = []
split_line = [
line[inds[i]:inds[i + 1]] for i in range(len(inds) - 1)
]
for i, h in enumerate(headers):
if split_line[i].strip() == '':
split_data.append(None)
# checks if data is integer or float
elif h in lab2format and '.' in lab2format[h]:
split_data.append(float(split_line[i]))
else:
split_data.append(int(split_line[i]))
data_table.append(split_data)
print('Merging to dataframe...')
df = pd.DataFrame(data_table, columns=headers)
print('Dimensions of dataframe: {0}'.format(df.shape))
print('Exporting as csv...')
csv_path = self._get_csv_path()
df.to_csv(csv_path)
print('File available in {0}'.format(csv_path))
return csv_path
| [
"han.jae1122@gmail.com"
] | han.jae1122@gmail.com |
96b772958a9c0a774904dcf77ee5a9f9143e17c7 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/2cb4a725b4cb9be160d194f7b47df6c98709ebfd-<create_connection_team_slave>-fix.py | d3c209e5c778414dddc980ca9daa3ffc050223ca | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | def create_connection_team_slave(self):
cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name']
if (self.conn_name is not None):
cmd.append(self.conn_name)
elif (self.ifname is not None):
cmd.append(self.ifname)
cmd.append('ifname')
if (self.ifname is not None):
cmd.append(self.ifname)
elif (self.conn_name is not None):
cmd.append(self.conn_name)
cmd.append('master')
if (self.conn_name is not None):
cmd.append(self.master)
return cmd | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
6403ad2b98e96221396b32ae056c4758dbbd2e87 | dcfc53351011ca78b5856716185531c23b159a5a | /scripts/pkgs/lxml.py | 5e632e0fa4a0e03dd7f729bcf0c13dcab8a5103b | [] | no_license | PERCE-NEIGE/pkg-sigil-pour-Linux | 91a2ac1af87c7e1c2c5fd4fb757707281543fec7 | 410f3ed0a648a5a21388b494a2367296dc62bb2f | refs/heads/master | 2020-05-29T23:18:31.519552 | 2018-01-27T21:36:05 | 2018-01-27T21:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
# Sigil adaptations made by Doug Massay 2017
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import shutil
import os
from .constants import PREFIX, PYTHON, build_dir, SW, BIN
from .utils import ModifiedEnv, python_build, run
def main(args):
with ModifiedEnv(PATH='{}:{}'.format(BIN, os.environ['PATH'])):
run(PYTHON, *('setup.py build_ext -I {0}/include/libxml2 -L {0}/lib'.format(PREFIX).split()), library_path=True)
python_build()
ddir = 'lib'
os.rename(os.path.join(build_dir(), os.path.basename(SW), os.path.basename(PREFIX), ddir), os.path.join(build_dir(), ddir))
shutil.rmtree(os.path.join(build_dir(), os.path.basename(SW)))
| [
"dougmassay@users.noreply.github.com"
] | dougmassay@users.noreply.github.com |
63a6fa0d3a6a84c3ab7e40fa5567d36680c4b923 | 20428460c043318f96a7bb977a695a7b716b26d9 | /Django/Users/apps/dojo_ninjas/migrations/0001_initial.py | e22173a1fbc57b7ab0a095b311cd900adf96599b | [] | no_license | sjamal2012/Python_apps | 4498027f63904cbb10e05f9dac532adbcb57b418 | d182af3e46bc9d495935f5da96eea2fe4ff5fd56 | refs/heads/master | 2020-03-09T15:50:12.417360 | 2018-04-10T03:39:47 | 2018-04-10T03:39:47 | 128,869,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-13 05:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='dojos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=2)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ninjas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('dojo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ninjas', to='dojo_ninjas.dojos')),
],
),
]
| [
"sammyjamal12@gmail.com"
] | sammyjamal12@gmail.com |
872272668856c95b98a8b112c51f14e2082b0a8e | 3481a08fa87c8106448388558258ee91438a3db6 | /paramz/parameterized.py | 45729a18dfe37ffa46f31cc3584b3ff5ef040433 | [
"BSD-3-Clause"
] | permissive | AlexGrig/paramz | 2b96c727e3d5f0843badb114e9a76ff14d6504bf | b7b2253fc4af88e5fb0f87cd9248b9699adaed0e | refs/heads/master | 2020-12-11T02:04:46.168479 | 2016-03-08T15:52:52 | 2016-03-08T15:52:52 | 53,961,967 | 0 | 0 | null | 2016-03-15T16:48:03 | 2016-03-15T16:48:03 | null | UTF-8 | Python | false | false | 20,294 | py | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import six # For metaclass support in Python 2 and 3 simultaneously
import numpy; np = numpy
from re import compile, _pattern_type
from .param import ParamConcatenation
from .core.parameter_core import Parameterizable, adjust_name_for_printing
from .core import HierarchyError
import logging
from collections import OrderedDict
from functools import reduce
logger = logging.getLogger("parameters changed meta")
class ParametersChangedMeta(type):
def __call__(self, *args, **kw):
self._in_init_ = True
#import ipdb;ipdb.set_trace()
self = super(ParametersChangedMeta, self).__call__(*args, **kw)
#logger.debug("finished init")
self._in_init_ = False
#logger.debug("connecting parameters")
self._highest_parent_._connect_parameters()
#self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_fixes()
#logger.debug("calling parameters changed")
self.parameters_changed()
return self
@six.add_metaclass(ParametersChangedMeta)
class Parameterized(Parameterizable):
"""
Say m is a handle to a parameterized class.
Printing parameters::
- print m: prints a nice summary over all parameters
- print m.name: prints details for param with name 'name'
- print m[regexp]: prints details for all the parameters
which match (!) regexp
- print m['']: prints details for all parameters
Fields::
Name: The name of the param, can be renamed!
Value: Shape or value, if one-valued
Constrain: constraint of the param, curly "{c}" brackets indicate
some parameters are constrained by c. See detailed print
to get exact constraints.
Tied_to: which paramter it is tied to.
Getting and setting parameters::
- Set all values in param to one: m.name.to.param = 1
- Set all values in parameterized: m.name[:] = 1
- Set values to random values: m[:] = np.random.norm(m.size)
Handling of constraining, fixing and tieing parameters::
- You can constrain parameters by calling the constrain on the param itself, e.g:
- m.name[:,1].constrain_positive()
- m.name[0].tie_to(m.name[1])
- Fixing parameters will fix them to the value they are right now. If you change
the parameters value, the param will be fixed to the new value!
- If you want to operate on all parameters use m[''] to wildcard select all paramters
and concatenate them. Printing m[''] will result in printing of all parameters in detail.
"""
#===========================================================================
# Metaclass for parameters changed after init.
# This makes sure, that parameters changed will always be called after __init__
# **Never** call parameters_changed() yourself
#This is ignored in Python 3 -- you need to put the meta class in the function definition.
#__metaclass__ = ParametersChangedMeta
#The six module is used to support both Python 2 and 3 simultaneously
#===========================================================================
def __init__(self, name=None, parameters=[], *a, **kw):
super(Parameterized, self).__init__(name=name, *a, **kw)
self.size = sum(p.size for p in self.parameters)
self.add_observer(self, self._parameters_changed_notification, -100)
if not self._has_fixes():
self._fixes_ = None
self._param_slices_ = []
#self._connect_parameters()
self.link_parameters(*parameters)
def build_pydot(self, G=None):
import pydot # @UnresolvedImport
iamroot = False
if G is None:
G = pydot.Dot(graph_type='digraph', bgcolor=None)
iamroot=True
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
G.add_node(node)
for child in self.parameters:
child_node = child.build_pydot(G)
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
if iamroot:
return G
return node
#===========================================================================
# Add remove parameters:
#===========================================================================
def link_parameter(self, param, index=None):
"""
:param parameters: the parameters to add
:type parameters: list of or one :py:class:`paramz.param.Param`
:param [index]: index of where to put parameters
Add all parameters to this param class, you can insert parameters
at any given index using the :func:`list.insert` syntax
"""
if param in self.parameters and index is not None:
self.unlink_parameter(param)
self.link_parameter(param, index)
# elif param.has_parent():
# raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short())
elif param not in self.parameters:
if param.has_parent():
def visit(parent, self):
if parent is self:
raise HierarchyError("You cannot add a parameter twice into the hierarchy")
param.traverse_parents(visit, self)
param._parent_.unlink_parameter(param)
# make sure the size is set
if index is None:
start = sum(p.size for p in self.parameters)
for name, iop in self._index_operations.items():
iop.shift_right(start, param.size)
iop.update(param._index_operations[name], self.size)
param._parent_ = self
param._parent_index_ = len(self.parameters)
self.parameters.append(param)
else:
start = sum(p.size for p in self.parameters[:index])
for name, iop in self._index_operations.items():
iop.shift_right(start, param.size)
iop.update(param._index_operations[name], start)
param._parent_ = self
param._parent_index_ = index if index>=0 else len(self.parameters[:index])
for p in self.parameters[index:]:
p._parent_index_ += 1
self.parameters.insert(index, param)
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
parent = self
while parent is not None:
parent.size += param.size
parent = parent._parent_
self._notify_parent_change()
if not self._in_init_:
#self._connect_parameters()
#self._notify_parent_change()
self._highest_parent_._connect_parameters()
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_fixes()
else:
raise HierarchyError("""Parameter exists already, try making a copy""")
def link_parameters(self, *parameters):
"""
convenience method for adding several
parameters without gradient specification
"""
[self.link_parameter(p) for p in parameters]
def unlink_parameter(self, param):
"""
:param param: param object to remove from being a parameter of this parameterized object.
"""
if not param in self.parameters:
try:
raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self.size -= param.size
del self.parameters[param._parent_index_]
self._remove_parameter_name(param)
param._disconnect_parent()
param.remove_observer(self, self._pass_through_notify_observers)
for name, iop in self._index_operations.items():
iop.shift_left(start, param.size)
self._connect_parameters()
self._notify_parent_change()
parent = self._parent_
while parent is not None:
parent.size -= param.size
parent = parent._parent_
self._highest_parent_._connect_parameters()
self._highest_parent_._connect_fixes()
self._highest_parent_._notify_parent_change()
def _connect_parameters(self, ignore_added_names=False):
# connect parameterlist to this parameterized object
# This just sets up the right connection for the params objects
# to be used as parameters
# it also sets the constraints for each parameter to the constraints
# of their respective parents
if not hasattr(self, "parameters") or len(self.parameters) < 1:
# no parameters for this class
return
old_size = 0
self._param_slices_ = []
for i, p in enumerate(self.parameters):
if not p.param_array.flags['C_CONTIGUOUS']:# getattr(p, 'shape', None) != getattr(p, '_realshape_', None):
raise ValueError("""
Have you added an additional dimension to a Param object?
p[:,None], where p is of type Param does not work
and is expected to fail! Try increasing the
dimensionality of the param array before making
a Param out of it:
p = Param("<name>", array[:,None])
Otherwise this should not happen!
Please write an email to the developers with the code,
which reproduces this error.
All parameter arrays must be C_CONTIGUOUS
""")
p._parent_ = self
p._parent_index_ = i
pslice = slice(old_size, old_size + p.size)
# first connect all children
p._propagate_param_grad(self.param_array[pslice], self.gradient_full[pslice])
# then connect children to self
self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C')
self.gradient_full[pslice] = p.gradient_full.flat # , requirements=['C', 'W']).ravel(order='C')
p.param_array.data = self.param_array[pslice].data
p.gradient_full.data = self.gradient_full[pslice].data
self._param_slices_.append(pslice)
self._add_parameter_name(p)
old_size += p.size
#===========================================================================
# Get/set parameters:
#===========================================================================
def grep_param_names(self, regexp):
"""
create a list of parameters, matching regular expression regexp
"""
if not isinstance(regexp, _pattern_type): regexp = compile(regexp)
found_params = []
def visit(innerself, regexp):
if (innerself is not self) and regexp.match(innerself.hierarchy_name().partition('.')[2]):
found_params.append(innerself)
self.traverse(visit, regexp)
return found_params
def __getitem__(self, name, paramlist=None):
if isinstance(name, (int, slice, tuple, np.ndarray)):
return self.param_array[name]
else:
if paramlist is None:
paramlist = self.grep_param_names(name)
if len(paramlist) < 1: raise AttributeError(name)
if len(paramlist) == 1:
#if isinstance(paramlist[-1], Parameterized) and paramlist[-1].size > 0:
# paramlist = paramlist[-1].flattened_parameters
# if len(paramlist) != 1:
# return ParamConcatenation(paramlist)
return paramlist[-1]
return ParamConcatenation(paramlist)
def __setitem__(self, name, value, paramlist=None):
if value is None:
return # nothing to do here
if isinstance(name, (slice, tuple, np.ndarray)):
try:
self.param_array[name] = value
except:
raise ValueError("Setting by slice or index only allowed with array-like")
self.trigger_update()
else:
param = self.__getitem__(name, paramlist)
param[:] = value
def __setattr__(self, name, val):
# override the default behaviour, if setting a param, so broadcasting can by used
if hasattr(self, "parameters"):
pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False)
if name in pnames:
param = self.parameters[pnames.index(name)]
param[:] = val; return
return object.__setattr__(self, name, val);
#===========================================================================
# Pickling
#===========================================================================
def __setstate__(self, state):
super(Parameterized, self).__setstate__(state)
self._connect_parameters()
self._connect_fixes()
self._notify_parent_change()
self.parameters_changed()
def copy(self, memo=None):
if memo is None:
memo = {}
memo[id(self.optimizer_array)] = None # and param_array
memo[id(self.param_array)] = None # and param_array
copy = super(Parameterized, self).copy(memo)
copy._connect_parameters()
copy._connect_fixes()
copy._notify_parent_change()
return copy
#===========================================================================
# Printing:
#===========================================================================
def _short(self):
return self.hierarchy_name()
@property
def flattened_parameters(self):
return [xi for x in self.parameters for xi in x.flattened_parameters]
def get_property_string(self, propname):
props = []
for p in self.parameters:
props.extend(p.get_property_string(propname))
return props
@property
def _description_str(self):
return [xi for x in self.parameters for xi in x._description_str]
def _repr_html_(self, header=True):
"""Representation of the parameters in html for notebook display."""
name = adjust_name_for_printing(self.name) + "."
names = self.parameter_names()
desc = self._description_str
iops = OrderedDict()
for opname in self._index_operations:
iop = []
for p in self.parameters:
iop.extend(p.get_property_string(opname))
iops[opname] = iop
format_spec = self._format_spec(name, names, desc, iops, False)
to_print = []
if header:
to_print.append("<tr><th><b>" + '</b></th><th><b>'.join(format_spec).format(name=name, desc='value', **dict((name, name) for name in iops)) + "</b></th></tr>")
format_spec = "<tr><td class=tg-left>" + format_spec[0] + '</td><td class=tg-right>' + format_spec[1] + '</td><td class=tg-center>' + '</td><td class=tg-center>'.join(format_spec[2:]) + "</td></tr>"
for i in range(len(names)):
to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops)))
style = """<style type="text/css">
.tg {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-center{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:center;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>"""
return style + '\n' + '<table class="tg">' + '\n'.join(to_print) + '\n</table>'
def _format_spec(self, name, names, desc, iops, VT100=True):
nl = max([len(str(x)) for x in names + [name]])
sl = max([len(str(x)) for x in desc + ["value"]])
lls = [reduce(lambda a,b: max(a, len(b)), iops[opname], len(opname)) for opname in iops]
if VT100:
format_spec = [" \033[1m{{name!s:<{0}}}\033[0;0m".format(nl),"{{desc!s:>{0}}}".format(sl)]
else:
format_spec = [" {{name!s:<{0}}}".format(nl),"{{desc!s:>{0}}}".format(sl)]
for opname, l in zip(iops, lls):
f = '{{{1}!s:^{0}}}'.format(l, opname)
format_spec.append(f)
return format_spec
def __str__(self, header=True, VT100=True):
name = adjust_name_for_printing(self.name) + "."
names = self.parameter_names()
desc = self._description_str
iops = OrderedDict()
for opname in self._index_operations:
iops[opname] = self.get_property_string(opname)
format_spec = ' | '.join(self._format_spec(name, names, desc, iops, VT100))
to_print = []
if header:
to_print.append(format_spec.format(name=name, desc='value', **dict((name, name) for name in iops)))
for i in range(len(names)):
to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops)))
return '\n'.join(to_print)
pass
| [
"ibinbei@gmail.com"
] | ibinbei@gmail.com |
b78e377d1debde7999508f7dd2c2807e6db66e1c | 3bfe20b4fb2f0a7c38d33b9928e730fddc436499 | /src/dispatch/plugins/kandbox_planner/rule/travel_time.py | ca670e7f2e5bb01709a5ff282609725588e1cc75 | [
"Apache-2.0"
] | permissive | ajunlonglive/easydispatch | 27cdffd9161ee49efff7e55e4aadee43b89650d7 | 2cf32a374d12c804ff396f90b789c2a838003c5d | refs/heads/main | 2023-06-17T11:13:11.224370 | 2021-07-05T11:45:24 | 2021-07-05T11:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | import dispatch.plugins.kandbox_planner.util.kandbox_date_util as date_util
from dispatch.plugins.bases.kandbox_planner import KandboxRulePlugin
class KandboxRulePluginSufficientTravelTime(KandboxRulePlugin):
"""
Has the following members
rule_code = "sufficient_travel_time_previous_n_next"
rule_name = "Job is not blocked by other jobs"
message_template = (
"Job ({}) to Job ({}) requires {} minutes, but there are only {} minutes in between"
)
success_message_template = (
"Job ({}) to Job ({}) requires {} minutes, and there are now {} minutes."
)
"""
result = {
"score": 0,
"message": "",
"prev_job_index": None,
"prev_travel_time": 0,
}
title = "Enough Travel"
slug = "kandbox_rule_sufficient_travel_time"
author = "Kandbox"
author_url = "https://github.com/alibaba/easydispatch"
description = "Rule sufficient_travel_time for GYM for RL."
version = "0.1.0"
default_config = {
"mininum_travel_minutes": 2,
}
config_form_spec = {
"type": "object",
"properties": {
"mininum_travel_minutes": {
"type": "number",
"title": "Number of minutes for mininum_travel_minutes",
},
},
}
def evalute_normal_single_worker_n_job(self, env=None, job=None): # worker = None,
# return score, violated_rules (negative values)
# return self.weight * 1
# Now check if this new job can fit into existing slots by checking travel time
travel_time = 0
prev_job = None
next_job = None
new_job_loc_i = 0
worker_code = job["scheduled_primary_worker_id"]
job_start_time = job["assigned_start_minutes"]
for job_i in range(len(env.workers_dict[worker_code]["assigned_jobs"])):
a_job = env.jobs[env.workers_dict[worker_code]["assigned_jobs"][job_i]["job_index"]]
if a_job["assigned_start_minutes"] < job_start_time:
prev_job = a_job
if a_job["assigned_start_minutes"] > job_start_time: # can not be equal
next_job = a_job
break
new_job_loc_i += 1
overall_message = ""
res = self.result.copy()
res["new_job_loc_i"] = new_job_loc_i
if prev_job:
# same job , one is virtual for checking.
prev_travel_time = env._get_travel_time_2jobs(job["job_index"], prev_job["job_index"])
# print( job['job_index'] , prev_job['job_index'])
if job["job_index"] == prev_job["job_index"]:
print("same:", job["job_index"], prev_job["job_index"])
pass
else: # (job['job_index'] != prev_job['job_index']) :
# no more room in this time slot
res["prev_job_index"] = prev_job["job_index"]
res["prev_travel_time"] = prev_travel_time
if (
job_start_time - prev_travel_time <
prev_job["assigned_start_minutes"] + prev_job["scheduled_duration_minutes"]
):
res["message"] = "Not enough travel time from prev_job: {}, rejected.".format(
prev_job["job_code"]
)
res["score"] = -1
# print( res['message'])
return res
else:
overall_message += "(Prev_job={}, travel_time={}) ".format(
prev_job["job_code"], int(prev_travel_time)
)
else:
res["prev_job_index"] = None
res["prev_travel_time"] = 0
if next_job:
next_travel_time = env._get_travel_time_2jobs(job["job_index"], next_job["job_index"])
res["next_job_index"] = next_job["job_index"]
res["next_travel_time"] = next_travel_time
if (
next_travel_time >
next_job["assigned_start_minutes"] -
job_start_time -
job["scheduled_duration_minutes"]
):
# no more room in this time slot
res["message"] = "Not enough travel time from next_job: {}, rejected.".format(
next_job["job_code"]
)
res["score"] = -1
# print( res['message'])
return res
else:
overall_message += "(Next_job={}, travel_time={}) ".format(
next_job["job_code"], int(next_travel_time)
)
res["message"] = "Got enough travel minutes.".format() + overall_message
res["score"] = 1
return res
"""
def evalute_action_normal(self, env=None, action = None, job_i=None):
a_job = self.generate_virtual_job_from_action(env = env, action = action, job_i=job_i)
worker = env.workers_dict[a_job['scheduled_primary_worker_id']]
return self.evalute_normal_single_worker_n_job(env, worker, a_job)
"""
| [
"qiyang.duan@alibaba-inc.com"
] | qiyang.duan@alibaba-inc.com |
317f64cccda875dd79f73e6462bd48afe8bf589d | 235f96d4b2e0fb672f20be75b114375744efc9c3 | /Unindo Dicionários e Listas.py | edbea666c95d0fa593348a6d4203548d82c2808b | [] | no_license | anderportela/Learning--Python3--Curso_em_Video | f13e1c47a628aa6dee2ba883a82c36adf751d6f7 | 3b0abe6367d04b5043207ddacf29a4af623ebadf | refs/heads/master | 2020-06-29T03:34:48.422798 | 2019-09-15T00:37:50 | 2019-09-15T00:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | print('-='*50)
print(f'\033[1;31m{"CADASTRO DE PESSOAS":^100}\033[m')
print('-='*50)
pessoas = []
pessoa = {}
si = mi = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo [F/M]: ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('Opção Inválida. Digite apenas M ou F!')
pessoa['idade'] = int(input('Idade: '))
si += pessoa['idade']
pessoas.append(pessoa.copy())
while True:
r = str(input('Deseja continuar? [S/N]: ')).upper()[0]
if r in 'NS':
break
print('Opção Inválida. Digite apenas S ou N!')
if r == 'N':
break
mi = si/len(pessoas)
print('*'*100)
print(f'Foram cadastradas {len(pessoas)} pessoas. ')
print(f'\nA média de idade das pessoas cadastradas é de {mi:.2f} anos')
print('\nAs mulheres cadastradas são: ', end='')
for p in pessoas:
if p['sexo'] == 'F':
print(f'{p["nome"]}')
print('\n\nAs pessoas que têm idade acima da média são: ', end='')
for p in pessoas:
if p['idade'] > mi:
print(f'{p["nome"]} com {p["idade"]} anos')
| [
"noreply@github.com"
] | noreply@github.com |
1c32f269b6561e49a363802f8ff3ae5b4377c9b3 | 37ab6b7a61e8e50064db046dce7863d02c969d0a | /Assignment 3/part1.2(2*2over).py | 5bf43b4d3a5872ffdaed1f3441d46b43e1c95962 | [] | no_license | jamesfrj/ECE-448 | 9dc31f732ebb0b98887efdfeb91424f9d6404c30 | d9493ef0ec4409c1eb76b5f40e0e321037a2c47b | refs/heads/master | 2021-09-05T09:14:26.522583 | 2018-01-26T00:10:53 | 2018-01-26T00:10:53 | 112,629,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,949 | py | import math
import time
#important numbers
NUM_TRAINING = 5000
NUM_TESTING = 1000
NUM_DIGIT = 10
NUM_PIXEL = 784 #28*28
IMG = 28 #Dimension of images
#initiate a dict with size of 729 = 27^2
def init_likelihood():
ret = {}
for line in range(27):
for pixel in range(27):
ret[line, pixel] = [0]*NUM_DIGIT
return ret
#update likelihood for every digit class for every pixel location
def likelihoods(image, label, freq):
ret = []
for i in range(16):
temp = init_likelihood()
ret.append(temp)
for i in range(NUM_TRAINING):
curr = IMG * i #location of first line current image
for line in range(27):
for pixel in range(27):
index = get_type(image, curr, line, pixel)
ret[index][line,pixel][label[i]] += 1
#Laplace Smoothing
k = 0.1
v = 16 #feature can take 16 values
for line in range(27):
for pixel in range (27):
for i in range(NUM_DIGIT):
for index in range(16):
ret[index][line, pixel][i] = (ret[index][line, pixel][i]+k) / float(freq[i]+k*v)
return ret
def get_type(image, curr, line, pixel):
if image[curr+line][pixel] == ' ' and image[curr+line][pixel+1] == ' ' \
and image[curr+line+1][pixel] == ' ' and image[curr+line+1][pixel+1] == ' ':
return 0 #0000
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and image[curr+line][pixel+1] == ' ' \
and image[curr+line+1][pixel] == ' ' and image[curr+line+1][pixel+1] == ' ':
return 1 #1000
if image[curr+line][pixel] == ' ' and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#')\
and image[curr+line+1][pixel] == ' ' and image[curr+line+1][pixel+1] == ' ':
return 2 #0100
if image[curr+line][pixel] == ' ' and image[curr+line][pixel+1] == ' ' \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and image[curr+line+1][pixel+1] == ' ':
return 3 #0010
if image[curr+line][pixel] == ' ' and image[curr+line][pixel+1] == ' ' \
and image[curr+line+1][pixel] == ' ' and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 4 #0001
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#') \
and image[curr+line+1][pixel] == ' ' and image[curr+line+1][pixel+1] == ' ':
return 5 #1100
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and image[curr+line][pixel+1] == ' ' \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and image[curr+line+1][pixel+1] == ' ':
return 6 #1010
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and image[curr+line][pixel+1] == ' ' \
and image[curr+line+1][pixel] == ' ' and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 7 #1001
if image[curr+line][pixel] == ' ' and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#')\
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and image[curr+line+1][pixel+1] == ' ':
return 8 #0110
if image[curr+line][pixel] == ' ' and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#')\
and image[curr+line+1][pixel] == ' ' and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 9 #0101
if image[curr+line][pixel] == ' ' and image[curr+line][pixel+1] == ' ' \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 10 #0011
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#') \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and image[curr+line+1][pixel+1] == ' ':
return 11 #1110
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#') \
and image[curr+line+1][pixel] == ' ' and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 12 #1101
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and image[curr+line][pixel+1] == ' ' \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 13 #1011
if image[curr+line][pixel] == ' ' and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#') \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 14 #0111
if (image[curr+line][pixel] == '+' or image[curr+line][pixel] == '#') and (image[curr+line][pixel+1] == '+' or image[curr+line][pixel+1] == '#') \
and (image[curr+line+1][pixel] == '+' or image[curr+line+1][pixel] == '#') and (image[curr+line+1][pixel+1] == '+' or image[curr+line+1][pixel+1] == '#'):
return 15
#number of occurance
def frequency(label):
ret = []
for i in range(NUM_DIGIT):
temp = label.count(i)
ret.append(temp)
return ret
#Maximum a posterior classification
def MAP(likelihood, prior, image):
classification = [] #final classification result of each image
for n in range(NUM_TESTING): #image loop, 1000
curr = IMG * n
posterior = [] #list of posteriors of current image
for i in range(NUM_DIGIT): #digit loop, 10
temp = math.log(prior[i]) #Prior
for line in range(27): #row loop, 28
for pixel in range(27): #column loop, 28
index = get_type(image, curr, line, pixel)
temp += math.log(likelihood[index][line, pixel][i])
posterior.append(temp)
classification.append(posterior.index(max(posterior)))
return classification
#NAC
def naive_bayes_classifier():
freq = frequency(train_label) #frequency of occurance of training images
prior = [] #P(class): emprical frequency of each class
test_freq = frequency(test_label) #frequency of occurance of testing images
total_num_correct = 0 #count total correctness rate
################ Training #####################
t = time.clock()
#P(F | class): likelihood for every pixel location for every digit class
likelihood = likelihoods(training, train_label, freq)
for i in range(NUM_DIGIT):
prior.append(freq[i]/float(NUM_TRAINING))
print time.clock() - t
################ Testing #####################
t = time.clock()
result= MAP(likelihood, prior, testing)
print time.clock() - t
################ Evaluation #####################
for i in range(NUM_TESTING):
if result[i] == test_label[i]:
total_num_correct +=1
################ Results #####################
#Basic Statistics
print "Total Classification Rate: ", total_num_correct/float(NUM_TESTING)*100, \
"%. Out of 1000 images. "
################ Data ######################
filename = 'trainingimages'
f = open(filename,'r')
training_images = f.readlines()
f.close()
training = []
for each in training_images:
array = list(each)
array.remove('\n')
training.append(array)
#training is a list contain each line of the trainingimages exec file
filename = 'traininglabels'
f = open(filename,'r')
training_labels = f.readlines()
f.close()
train_label = []
for each in training_labels:
num = int(each[0])
train_label.append(num)
#train_label is a list contain labels of the traininglabels exec file
filename = 'testimages'
f = open(filename,'r')
testing_images = f.readlines()
f.close()
testing = []
for each in testing_images:
array = list(each)
array.remove('\n')
testing.append(array)
#testing is a list contain each line of the testingimages exec file
filename = 'testlabels'
f = open(filename,'r')
testing_labels = f.readlines()
f.close()
test_label = []
for each in testing_labels:
num = int(each[0])
test_label.append(num)
#test_label is a list contain labels of the testinglabels exec file
naive_bayes_classifier() #main function | [
"noreply@github.com"
] | noreply@github.com |
47a8b2b70036aab2a700572c2e5189691c8c06be | 3a31ca2fa0d24c2a9931ccc6bf516075bc091237 | /settings.py | 3bc0eb4067828adbe0f825ac6ab9aa7ced8cc448 | [] | no_license | TSLEFK/times4blog | 1f8b15b6530c5d813397055ec623e97f1d020efe | f72c95860009c7557f4671782a44292790857616 | refs/heads/master | 2020-04-10T17:06:19.657374 | 2018-12-10T12:09:27 | 2018-12-10T12:09:27 | 161,165,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import os
from os.path import join, dirname, abspath
from dotenv import load_dotenv
# dotenv_path = join(dirname(dirname(abspath(__file__))), '.env')
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
slack_incoming_webhook_pass = os.environ.get("SLACK_WEBHOOK_URL")
SLACK_INCOMING_WEBHOOK_URL = "https://hooks.slack.com/services/" + slack_incoming_webhook_pass
slack_api_token = os.environ.get("SLACK_API_TOKEN")
slack_legacy_api_token = os.environ.get("LEGACY_SLACK_API_TOKEN")
slack_channel_id = os.environ.get("SLACK_CHANNEL_ID")
slack_channel_name = os.environ.get("SLACK_CHANNEL_NAME")
hatena_consumer_key = os.environ.get("Consumer_Key")
hatena_consumer_secret = os.environ.get("Consumer_Secret")
hatena_access_token = os.environ.get("Access_Token")
hatena_access_token_secret = os.environ.get("Access_Token_Secret")
hatena_id = os.environ.get("HATENA_ID")
hatena_password = os.environ.get("HATENA_PASSWORD")
| [
"satarn.sherlock@gmail.com"
] | satarn.sherlock@gmail.com |
ede6c7d72c0c171aa6623fb47d892a6213d9c836 | f4b273a9bf90c22f4df92b444cc04a7a451564b0 | /23_climbing_Stairs/climb.py | 3db0700064c184b4a74f8b9f55bd08b22aab7e3d | [] | no_license | RawandKurdy/snippets | ac7324b8720ffd618605d18359b35493f50655cf | 935da24ee15d68ab3a4e512f88f6a0418f25bba3 | refs/heads/master | 2020-07-29T11:21:47.206388 | 2020-07-05T09:35:59 | 2020-07-05T09:35:59 | 209,779,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Climbing Stairs - Interview Question -
# Asked By Apple and Adobe
# in Python
def climbingStairs(n):
r = [1, 1, 2] # Initial Result Set
if n <= 2: return r[n]
for step in range(3, n + 1):
r.append(r[step - 1] + r[step - 2])
return r[n]
n = 26 # Steps
ways = climbingStairs(n)
print(ways) # 196418
| [
"rawand.farhad@gmail.com"
] | rawand.farhad@gmail.com |
d00b4da42d9cff46a10c86af99671a754526a5f8 | 90fb6f665e6c8c83f1c8207e0a2e793833fc3c6d | /ml_intro/ch10/__init__.py | b2079b2214f6ff5b5f2948dfb7641c56e3fc30c9 | [] | no_license | hello-wangjj/Introduction-to-Programming-Using-Python | 8698ced3067714ab639328f5e3e78ccbbb4905c4 | ff24118d1779d589e37aa6ab88002176e8f475c3 | refs/heads/master | 2021-09-13T22:29:00.355794 | 2018-05-05T07:28:29 | 2018-05-05T07:28:29 | 57,047,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | __author__ = 'wangj'
__date__ = '2018/01/20 00:01'
def main():
pass
if __name__ == '__main__':
main() | [
"wangjj886688@qq.com"
] | wangjj886688@qq.com |
d8f0a62231d4650d4d4bde11c4def29c92a68bb4 | bf46a059530990eb84b737ec737d3c7f6627407b | /mitosCalsification/plot.py | 404a77cd7505cbd2b2354065530d7a1fe0773e94 | [] | no_license | Claudio-Tapia/Mitos | 867fd93e8f193e4421f79ae3c168d39cd71db60a | d2b932021748b508b2fdff55f3183d7e5949f6bf | refs/heads/master | 2020-04-18T17:20:39.481771 | 2019-02-04T17:18:20 | 2019-02-04T17:18:20 | 167,652,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | import numpy as np
import os
from sys import platform
from sklearn.metrics import roc_curve
from keras.utils.np_utils import to_categorical
from sklearn.metrics.ranking import roc_auc_score, precision_recall_curve, average_precision_score
def print_plots(metrics_names, train_metrics, val_metrics=None, test_metrics=None):
import matplotlib.pyplot as plt
if platform == 'linux':
plt.switch_backend('agg')
x_axis = np.arange(1, len(train_metrics[0]) + 1, dtype=int)
plt.plot(x_axis, train_metrics[0], label=metrics_names[0])
i = 1
style = ['--', '-.', ':']
while i < len(train_metrics):
plt.plot(x_axis, 1 - train_metrics[i],style[min(2, i)], label='error_'+metrics_names[i])
i += 1
plt.xlabel('iteración')
plt.legend()
plt.savefig('train.png')
if val_metrics is not None:
plt.figure()
min_index = np.argmax(val_metrics[1])
max_fscore = val_metrics[1][min_index]
min_index += 1
plt.plot(x_axis, val_metrics[0], label='val_' + metrics_names[0])
plt.plot(x_axis, 1 - val_metrics[1],'--', label='val_error_' + metrics_names[1])
print_text = '({}, {:.2f})'.format(min_index, 1 - max_fscore)
plt.plot(min_index, 1 - max_fscore, 'ro')
plt.text(min_index, 1 - max_fscore, print_text)
plt.legend()
plt.xlabel('iteración')
plt.savefig('validation.png')
if test_metrics is not None:
plt.figure()
plt.plot(x_axis, test_metrics, label='fscore')
max_index = np.argmax(test_metrics)
max_fscore = test_metrics[max_index]
max_index += 1
plt.plot(max_index, max_fscore, 'ro')
print_text = '({}, {:.3f})'.format(max_index, max_fscore)
plt.text(max_index, max_fscore, print_text)
plt.legend()
plt.xlabel('iteración')
plt.savefig('test.png')
# plt.show()
def dump_metrics_2_file(train_metrics, val_metrics=None, test_metrics=None):
np.savetxt('train.csv', train_metrics, delimiter=',')
if val_metrics is not None:
np.savetxt('validation.csv', val_metrics, delimiter=',')
if test_metrics is not None:
np.savetxt('test.csv', test_metrics, delimiter=',')
def plot_from_file():
train_metrics = np.loadtxt('train.csv', delimiter=',')
metrics_names = ['loss', 'mitos_fscore', 'binary_accuracy']
val_metrics = None
test_metrics = None
if os.path.isfile('validation.csv'):
val_metrics = np.loadtxt('validation.csv', delimiter=',')
if os.path.isfile('test.csv'):
test_metrics = np.loadtxt('test.csv', delimiter=',')
print_plots(metrics_names, train_metrics, val_metrics, test_metrics)
def plot_roc(y_true, y_pred):
from matplotlib import pyplot as plt
if platform == 'linux':
plt.switch_backend('agg')
fpr, tpr, _ = roc_curve(y_true, y_pred)
score = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label='Binary_roc. auc: {:.3f}'.format(score), lw=1)
mitosis_pred = np.zeros(len(y_pred))
fpr, tpr, _ = roc_curve(y_true, mitosis_pred)
score = roc_auc_score(y_true, mitosis_pred)
plt.plot(fpr, tpr, label='Todo mitosis. auc: {:.3f}'.format(score), lw=1)
mitosis_pred = np.ones(len(y_pred))
fpr, tpr, _ = roc_curve(y_true, mitosis_pred)
score = roc_auc_score(y_true, mitosis_pred)
plt.plot(fpr, tpr, label='Todo no-mitosis. auc: {:.3f}'.format(score), lw=1)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.savefig('roc.png')
def plot_precision_recall(y_true, y_pred):
from matplotlib import pyplot as plt
if platform == 'linux':
plt.switch_backend('agg')
precition, recall, _ = precision_recall_curve(y_true, y_pred)
score = average_precision_score(y_true, y_pred)
plt.plot(precition, recall,
label='curva. Area: {:.3f}'.format(score), lw=1)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend()
plt.savefig('prec_rec.png')
if __name__ == '__main__':
plot_from_file()
| [
"claudio.t@outlook.cl"
] | claudio.t@outlook.cl |
c2ec9b6fb96c92ea4ea41a5c668b29324b7a0789 | b7fad8c328d8b55eb51d14252f050190fa06e10d | /app/models.py | 80b6ad8a5d58da55e1588e460304141ee10a6c37 | [] | no_license | hail-ans/demo-for-valiance | de9a95a4e8811d50e9128936b3d9d790b5444781 | 8573a0fc60f03913e368bb21b3363a87b4147c67 | refs/heads/master | 2023-03-12T21:18:04.060797 | 2021-02-28T09:48:32 | 2021-02-28T09:48:32 | 343,067,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | # app/models.py
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
class Person(UserMixin, db.Model):
"""
Create an Person table
"""
# Ensures table will be named in plural and not in singular
# as is the name of the model
__tablename__ = 'persons'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(60), index=True, unique=True)
username = db.Column(db.String(60), index=True, unique=True)
first_name = db.Column(db.String(60), index=True)
last_name = db.Column(db.String(60), index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
is_admin = db.Column(db.Boolean, default=False)
@property
def password(self):
"""
Prevent pasword from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<Person: {}>'.format(self.username)
# Set up user_loader
@login_manager.user_loader
def load_user(user_id):
return Person.query.get(int(user_id))
class Role(db.Model):
"""
Create a Role table
"""
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True)
description = db.Column(db.String(200))
person = db.relationship('Person', backref='role',
lazy='dynamic')
def __repr__(self):
return '<Role: {}>'.format(self.name) | [
"72516466+hail-ans@users.noreply.github.com"
] | 72516466+hail-ans@users.noreply.github.com |
609e68b30e1727e410a4945deb16ae799e18594c | e336bbe9ea2ad378b7cf801384109e1d1fdab6c5 | /DeepDream/vggClassifier.py | 405834dcafddb2dfa8a211051c0baac22df1124d | [] | no_license | viktor-ktorvi/Deep_Dream_PSIML6 | 531b0f0baaa8f43000c0f3ccc4a3d0cfccb2a03d | 1bb8e2dbd1b959c1721041751e9c82dfcc1e8258 | refs/heads/master | 2022-11-28T17:24:42.956558 | 2020-08-12T15:47:19 | 2020-08-12T15:47:19 | 285,327,037 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import torch
import os
import numpy as np
import cv2
from PIL import Image
import torchvision.models as models
import requests
from matplotlib import pyplot as plt
import torchvision.transforms as transforms
from scipy.special import softmax
from utils import *
def predictVGG16(filename, topN=10):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = load_model(device)
LABELS_URL = 'https://s3.amazonaws.com/outcome-blog/imagenet/labels.json'
response = requests.get(LABELS_URL)
labels = {int(key): value for key, value in response.json().items()}
image = load_image(filename)
image_tensor = preprocess(image, device)
with torch.no_grad():
prediction = model(image_tensor)
# prediction = torch.nn.functional.softmax(prediction.data.numpy(), dim=1)
prediction = prediction.cpu().numpy()
soft_val = softmax(prediction[0])
indexes = np.argsort(prediction)
print("N\t", "Score\t\t", "Class\n")
for i in range(1, topN):
# print(i, "\t", prediction[0, indexes[0, -i]], "\t", labels[indexes[0, -i]])
print(i, "\t", soft_val[indexes[0, -i]], "\t", labels[indexes[0, -i]])
if __name__ == "__main__":
predictVGG16('data/input_images/Maskenbal2018.jpg', 10)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = load_model()
#
# LABELS_URL = 'https://s3.amazonaws.com/outcome-blog/imagenet/labels.json'
# response = requests.get(LABELS_URL)
# labels = {int(key): value for key, value in response.json().items()}
#
# image = load_image('data/input_images/Maskenbal2018.jpg')
# image = preprocess(image)
# # h,w,rgb --> rgb,h,w
# image = np.swapaxes(image, 0, 2)
# image = np.swapaxes(image, 1, 2)
#
# imgTensor = torch.from_numpy(image[np.newaxis, :]).to(device)
# with torch.no_grad():
# prediction = model(imgTensor).to("cpu")
#
# # listOfPredisctions = prediction.tolist()
# # listOfProbabilities = torch.nn.functional.softmax(prediction, dim=0)
#
# prediction = prediction.data.numpy()
#
# indexes = np.argsort(prediction)
#
#
# for i in range(1,10):
# print(i, prediction[0, indexes[0, -i]], labels[indexes[0, -i]])
| [
"noreply@github.com"
] | noreply@github.com |
7b858252e74e2b4db0213fec0841d531fde9f5a3 | a46e5e4d4ed79f540e7709c7cbf4f9cd5ebbad7d | /price-watcher/main.py | 9e2f3c6451c54fb698387893d46f3f3fff54a98e | [
"Apache-2.0"
] | permissive | Ashton-Sidhu/prefect-home-automation | 7da2583a86f9cc5e7c75b1061824ff36532fde5e | 9ed1235c896e40f462f4f90f45cace86018c58ae | refs/heads/master | 2023-04-18T17:08:19.539830 | 2021-05-02T20:03:16 | 2021-05-02T20:03:16 | 295,056,123 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | import requests
from bs4 import BeautifulSoup
from prefect import task, Flow, Parameter
from prefect.tasks.notifications.email_task import EmailTask
from prefect.tasks.control_flow import case
@task
def get_price(url):
"""
Get price of product from a URL.
TODO: Change this to get the price of your product!
"""
html = requests.get(url).text
soup = BeautifulSoup(html, features="html.parser")
price_tag = soup.find("span", attrs={"class": "value"})
return float(price_tag.attrs["content"])
@task
def create_message(price: float) -> str:
"""Creates the message for the email. HTML is supported as per Prefect docs
https://docs.prefect.io/api/latest/tasks/notifications.html#emailtask"""
message = """
Hi,
\n<br>
Your item has a new price of : {}
\n<br>
"""
return message.format(price)
@task
def is_different(price_point: int, price: float) -> bool:
"""Checks to see if the price has changed"""
return float(price_point) != price
email_task = EmailTask(subject="Price Check")
# Schedule is set via UI
# Can't schedule jobs with required parameters programatically.
with Flow("Check Price", schedule=None) as flow:
# These are set via the Prefect UI under the flow settings
email = Parameter("email", required=True)
price_point = Parameter("price_point", required=True)
url = Parameter("url", required=True)
# Get the price from the site
price = get_price(url)
# Is the price different
cond = is_different(price_point, price)
# If price is different, send email
with case(cond, True):
msg = create_message(price)
# Send email
email_task(msg=msg, email_to=email)
flow.register(project_name="Price-Checker")
| [
"ashton.sidhu1994@gmail.com"
] | ashton.sidhu1994@gmail.com |
58b1208a3d6f193b5184242a3e0822f89b75ca2a | 87657ddcc5df51e0f8a4f96857eecbf7c742864c | /methods/matlab/qda.py | 58691fa029ad397a5ede566aa93d6dc5e56d1a6a | [] | no_license | settur1409/benchmarks | f50fb3785d6f0caf4d0ea5bb915455aca3b065fc | cfd8a139a09b65b9468af30a643ae136ae8c813c | refs/heads/master | 2020-06-29T21:48:11.382866 | 2019-02-21T18:50:25 | 2019-02-21T18:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | py | '''
@file qda.py
Class to benchmark the matlab qda method.
'''
import os
import sys
import inspect
# Import the util path, this method even works if the path contains symlinks to
# modules.
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(
os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../util")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
#Import the metrics definitions path.
metrics_folder = os.path.realpath(os.path.abspath(os.path.join(
os.path.split(inspect.getfile(inspect.currentframe()))[0], "../metrics")))
if metrics_folder not in sys.path:
sys.path.insert(0, metrics_folder)
from log import *
from profiler import *
from definitions import *
from misc import *
import shlex
import subprocess
import re
import collections
'''
This class implements the QDA benchmark.
'''
class QDA(object):
'''
Create the QDA benchmark instance.
@param dataset - Input dataset to perform QDA on.
@param timeout - The time until the timeout. Default no timeout.
@param path - Path to the matlab binary.
@param verbose - Display informational messages.
'''
def __init__(self, dataset, timeout=0, path=os.environ["MATLAB_BIN"],
verbose=True):
self.verbose = verbose
self.dataset = dataset
self.path = path
self.timeout = timeout
self.opts = {}
'''
Destructor to clean up at the end. Use this method to remove created files.
'''
def __del__(self):
Log.Info("Clean up.", self.verbose)
filelist = ["predictions.csv"]
for f in filelist:
if os.path.isfile(f):
os.remove(f)
'''
LDA. If the method has been successfully completed return
the elapsed time in seconds.
@param options - Extra options for the method.
@return - Elapsed time in seconds or a negative value if the method was not
successful.
'''
def RunMetrics(self, options):
Log.Info("Perform QDA.", self.verbose)
# No options accepted for this task.
if len(options) > 0:
Log.Fatal("Unknown parameters: " + str(options))
raise Exception("unknown parameters")
inputCmd = "-t " + self.dataset[0] + " -T " + self.dataset[1]
# Split the command using shell-like syntax.
cmd = shlex.split(self.path + "matlab -nodisplay -nosplash -r \"try, QDA('"
+ inputCmd + "'), catch, exit(1), end, exit(0)\"")
# Run command with the nessecary arguments and return its output as a byte
# string. We have untrusted input so we disable all shell based features.
try:
s = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False,
timeout=self.timeout)
except subprocess.TimeoutExpired as e:
Log.Warn(str(e))
return -2
except Exception as e:
Log.Fatal("Could not execute command: " + str(cmd))
return -1
# Datastructure to store the results.
metrics = {}
# Parse data: runtime.
timer = self.parseTimer(s)
if timer != -1:
predictions = np.genfromtxt("predictions.csv", delimiter = ',')
truelabels = np.genfromtxt(self.dataset[2], delimiter = ',')
metrics['Runtime'] = timer.total_time
confusionMatrix = Metrics.ConfusionMatrix(truelabels, predictions)
metrics['ACC'] = Metrics.AverageAccuracy(confusionMatrix)
metrics['MCC'] = Metrics.MCCMultiClass(confusionMatrix)
metrics['Precision'] = Metrics.AvgPrecision(confusionMatrix)
metrics['Recall'] = Metrics.AvgRecall(confusionMatrix)
metrics['MSE'] = Metrics.SimpleMeanSquaredError(truelabels, predictions)
Log.Info(("total time: %fs" % (metrics['Runtime'])), self.verbose)
return metrics
'''
Parse the timer data form a given string.
@param data - String to parse timer data from.
@return - Namedtuple that contains the timer data or -1 in case of an error.
'''
def parseTimer(self, data):
# Compile the regular expression pattern into a regular expression object to
# parse the timer data.
pattern = re.compile(br"""
.*?total_time: (?P<total_time>.*?)s.*?
""", re.VERBOSE|re.MULTILINE|re.DOTALL)
match = pattern.match(data)
if not match:
Log.Fatal("Can't parse the data: wrong format")
return -1
else:
# Create a namedtuple and return the timer data.
timer = collections.namedtuple("timer", ["total_time"])
return timer(float(match.group("total_time")))
| [
"dewangsultania@gmail.com"
] | dewangsultania@gmail.com |
2c6fb144b59995510b219aca23920c4346ded296 | 77e29df803e7a7e0cf2c8edf1a734e2723f79f37 | /buildmenuviews/menus2.py | c94fcd4815ac9b1498460350601b5eec38dee15d | [
"MIT"
] | permissive | acadianshadow237/BA_MDI1 | b6db7633fa8dfddd3141cb331899344d762d1498 | 73e0e87c15ff083ce860f7a09fa2de3a3c71c215 | refs/heads/master | 2023-08-28T04:54:21.497942 | 2021-10-18T18:42:19 | 2021-10-18T18:42:19 | 396,855,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,798 | py | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'menus2.ui'
##
## Created by: Qt User Interface Compiler version 6.1.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(1253, 866)
self.actionExit = QAction(MainWindow)
self.actionExit.setObjectName(u"actionExit")
self.actionLogout = QAction(MainWindow)
self.actionLogout.setObjectName(u"actionLogout")
self.actionAnalysis_Sections = QAction(MainWindow)
self.actionAnalysis_Sections.setObjectName(u"actionAnalysis_Sections")
self.actionConstruction_Rehab_History = QAction(MainWindow)
self.actionConstruction_Rehab_History.setObjectName(u"actionConstruction_Rehab_History")
self.actionConst_Rehab_Layer_Detail = QAction(MainWindow)
self.actionConst_Rehab_Layer_Detail.setObjectName(u"actionConst_Rehab_Layer_Detail")
self.actionProject = QAction(MainWindow)
self.actionProject.setObjectName(u"actionProject")
self.actionAbout = QAction(MainWindow)
self.actionAbout.setObjectName(u"actionAbout")
self.actionAdd = QAction(MainWindow)
self.actionAdd.setObjectName(u"actionAdd")
self.actionUpdate = QAction(MainWindow)
self.actionUpdate.setObjectName(u"actionUpdate")
self.actionDelete = QAction(MainWindow)
self.actionDelete.setObjectName(u"actionDelete")
self.actionsplit = QAction(MainWindow)
self.actionsplit.setObjectName(u"actionsplit")
self.actionShift = QAction(MainWindow)
self.actionShift.setObjectName(u"actionShift")
self.actionMerge = QAction(MainWindow)
self.actionMerge.setObjectName(u"actionMerge")
self.actionAdjust = QAction(MainWindow)
self.actionAdjust.setObjectName(u"actionAdjust")
self.actionCopy = QAction(MainWindow)
self.actionCopy.setObjectName(u"actionCopy")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 1253, 22))
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuView = QMenu(self.menubar)
self.menuView.setObjectName(u"menuView")
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
self.menuEdit = QMenu(self.menubar)
self.menuEdit.setObjectName(u"menuEdit")
MainWindow.setMenuBar(self.menubar)
self.toolBar = TBAS(MainWindow)
self.toolBar.setObjectName(u"toolBar")
MainWindow.addToolBar(Qt.TopToolBarArea, self.toolBar)
self.statusBar = QStatusBar(MainWindow)
self.statusBar.setObjectName(u"statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionLogout)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuView.addAction(self.actionAnalysis_Sections)
self.menuView.addAction(self.actionConstruction_Rehab_History)
self.menuView.addAction(self.actionConst_Rehab_Layer_Detail)
self.menuView.addAction(self.actionProject)
self.menuHelp.addAction(self.actionAbout)
self.menuEdit.addAction(self.actionAdd)
self.menuEdit.addAction(self.actionUpdate)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionDelete)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionsplit)
self.menuEdit.addAction(self.actionShift)
self.menuEdit.addAction(self.actionMerge)
self.menuEdit.addAction(self.actionAdjust)
self.retranslateUi(MainWindow)
self.actionExit.triggered.connect(MainWindow.close)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"PVMT_SNAP editor", None))
self.actionExit.setText(QCoreApplication.translate("MainWindow", u"Exit", None))
self.actionLogout.setText(QCoreApplication.translate("MainWindow", u"Logout", None))
self.actionAnalysis_Sections.setText(QCoreApplication.translate("MainWindow", u"Analysis Sections", None))
self.actionConstruction_Rehab_History.setText(QCoreApplication.translate("MainWindow", u"Construction Rehab History", None))
self.actionConst_Rehab_Layer_Detail.setText(QCoreApplication.translate("MainWindow", u"Const Rehab Layer Detail", None))
self.actionProject.setText(QCoreApplication.translate("MainWindow", u"Project", None))
self.actionAbout.setText(QCoreApplication.translate("MainWindow", u"About", None))
self.actionAdd.setText(QCoreApplication.translate("MainWindow", u"Add", None))
self.actionUpdate.setText(QCoreApplication.translate("MainWindow", u"Update", None))
self.actionDelete.setText(QCoreApplication.translate("MainWindow", u"Delete", None))
self.actionsplit.setText(QCoreApplication.translate("MainWindow", u"Split", None))
self.actionShift.setText(QCoreApplication.translate("MainWindow", u"Shift", None))
self.actionMerge.setText(QCoreApplication.translate("MainWindow", u"Merge", None))
self.actionAdjust.setText(QCoreApplication.translate("MainWindow", u"Adjust", None))
self.actionCopy.setText(QCoreApplication.translate("MainWindow", u"Copy", None))
self.menuFile.setTitle(QCoreApplication.translate("MainWindow", u"File", None))
self.menuView.setTitle(QCoreApplication.translate("MainWindow", u"View", None))
self.menuHelp.setTitle(QCoreApplication.translate("MainWindow", u"Help", None))
self.menuEdit.setTitle(QCoreApplication.translate("MainWindow", u"Edit", None))
self.toolBar.setWindowTitle(QCoreApplication.translate("MainWindow", u"toolBar", None))
# retranslateUi
| [
"acadianshadow237@gmail.com"
] | acadianshadow237@gmail.com |
e1f6740a875c434bf2e70839f5493f69bb4e96d7 | 64b6364b2cea4e49cc1768e159ceb3fb438fc096 | /src/metric_runner.py | dc4d64f00f77ed24aac17d9f471364a1a419b32d | [] | no_license | nkartashov/4genome_tester | 902828f2a4373df9888788d4cb98398700259e7b | 547446b9f38ee69177d8a12bb171c1d2ae993cad | refs/heads/master | 2016-09-06T01:08:24.565208 | 2015-06-04T22:55:17 | 2015-06-04T22:55:17 | 34,047,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | __author__ = 'nikita_kartashov'
from src.graph.statistics import get_distribution_metric, \
get_simple_paths_metric, \
get_bp_distance_metric, \
get_dcj_distance_metric, \
get_ca_metric, \
get_mca_metric, \
get_cumulative_metric_batch
from .metrics.metrics import Metrics
ANNOTATED_SINGLE_METRICS = (
# (get_distribution_metric, 'D'), # Distribution
# (get_simple_paths_metric, 'SP'), # Simple Paths
# (get_bp_distance_metric, 'S_BP'),
# (get_dcj_distance_metric, 'S_DCJ'),
(get_ca_metric, 'CA'),
(get_mca_metric, 'MCA'),
)
ANNOTATED_BATCH_METRICS = ((get_cumulative_metric_batch, 'MCA+'),)
METRICS = Metrics(ANNOTATED_SINGLE_METRICS, ANNOTATED_BATCH_METRICS)
A, B, C, D = 'A', 'B', 'C', 'D'
TOPOLOGIES = [((A, B), (C, D)),
((A, C), (B, D)),
((A, D), (C, B))]
# If we have m methods and n trees then function returns score matrix of m lines and n columns
# def run_metrics(breakpoint_graph):
# return (((metric(breakpoint_graph, topology), topology) for topology in TOPOLOGIES) for metric in METRICS)
def compare_metric_results(breakpoint_graph, right_tree):
metric_results = METRICS.run_metrics(breakpoint_graph, TOPOLOGIES)
def decide_if_right(scored_trees):
scored_trees = list(scored_trees)
min_score = min(scored_trees)[0]
trees_with_min_score = list(tree for score, tree in scored_trees if score == min_score)
return int(len(trees_with_min_score) == 1 and trees_with_min_score[0] == right_tree)
return (decide_if_right(score_tuple) for score_tuple in metric_results) | [
"snailandmail@gmail.com"
] | snailandmail@gmail.com |
65ef39321430c083fac304d0ea35f332bcf18662 | f8c164a1e0bab9d2816beb6fe2a7dd36759d1815 | /Python_Sintaxis/Operadores_de_asignacion.py | 6355f921595b162ff44ecbac067985c0ead9947c | [] | no_license | Solbanc/Programacion_python | e66daaf3dc7641f3934287b8090098889c6a2ac8 | 75e2d348d690ae659584cdec40841fd83722ae5e | refs/heads/main | 2023-08-14T18:03:29.111564 | 2021-09-30T05:07:37 | 2021-09-30T05:07:37 | 409,835,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | nombre = "Hola "
nombre+= input("Escribe tu nombre: ")
print(nombre," Esto es le incremento y decremento de una variable \n")
print("Incremento o Decremento ")
x = 1
print("El valor inicial de x es: ",x)
x += 1
x += 1
x += 1
x += 1
print("El valor final de x es de: ", x ,"\n")
print("Decremento o disminucio: ")
print("El valor inicial de x es: ",x)
x -= 1
x -= 1
x -= 1
x -= 1
print("El valor final de x es de: ", x)
| [
"sormack.javi@gmail.com"
] | sormack.javi@gmail.com |
36a11457b2ad103a18565c44f60b426d4dc20b3e | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch18_graph_theory/q45_1.py | 0053a3fd5f07e6c424f2a633246622ae14a46a7f | [] | no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py | """
최종 순위 - 2회차
"""
# 풀이 제한 시간 : 60분
# 2020/12/31 11:10 ~ 11:31
# 실패 - 자료의 사용(data[i])에 실수, 큐에 처음 초기화를 안함
from collections import deque
"""
# 위상 정렬 알고리즘에서는 사용할 필요가 없다.
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
"""
for tc in range(int(input())):
n = int(input())
parent = [0] * (n + 1)
for i in range(1, n+1):
parent[i] = i
indegree = [0] * (n+1)
data = list(map(int, input().split()))
graph = [[] for _ in range(n+1)]
# data[i]와 data[j]를 사용해야함!
for i in range(n):
for j in range(i+1, n):
graph[data[j]].append(data[i])
indegree[data[i]] += 1
m = int(input())
for _ in range(m):
a, b = map(int, input().split())
if b not in graph[a]:
graph[b].remove(a)
indegree[a] -= 1
graph[a].append(b)
indegree[b] += 1
else:
graph[a].remove(b)
indegree[b] -= 1
graph[b].append(a)
indegree[a] += 1
cycle = False
certain = True
q = deque()
result = []
# 맨 처음 queue에 원소를 집어 넣는 것을 뺌
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
for _ in range(n):
if len(q) == 0:
cycle = True
break
if len(q) >= 2:
certain = False
break
now = q.popleft()
result.append(now)
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
if cycle:
print("IMPOSSIBLE")
elif not certain:
print("?")
else:
for i in reversed(result):
print(i, end=" ")
print()
| [
"seok9376@gmail.com"
] | seok9376@gmail.com |
b5faf82ad73aadaff1bd1970efa1a7fe32bb250f | bf15a97a377bc49495a8c278cd247387a08361fd | /intersight/models/hcl_exempted_catalog.py | 03d80031ad546ce01d077c8a759720408006b260 | [
"Apache-2.0"
] | permissive | movinalot/intersight-python | ffcb434e5fdf3f6e857dd967c794a64b2d2e05de | cdc3b082d75eac93b74029ab610e16d3008fdd8c | refs/heads/master | 2020-12-18T15:46:06.780834 | 2019-10-29T00:39:49 | 2019-10-29T00:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,346 | py | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HclExemptedCatalog(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'comments': 'str',
'name': 'str',
'os_vendor': 'str',
'os_version': 'str',
'processor_name': 'str',
'product_models': 'list[str]',
'product_type': 'str',
'server_pid': 'str',
'ucs_version': 'str',
'version_type': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'comments': 'Comments',
'name': 'Name',
'os_vendor': 'OsVendor',
'os_version': 'OsVersion',
'processor_name': 'ProcessorName',
'product_models': 'ProductModels',
'product_type': 'ProductType',
'server_pid': 'ServerPid',
'ucs_version': 'UcsVersion',
'version_type': 'VersionType'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, shared_scope=None, tags=None, version_context=None, comments=None, name=None, os_vendor=None, os_version=None, processor_name=None, product_models=None, product_type=None, server_pid=None, ucs_version=None, version_type=None):
"""
HclExemptedCatalog - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._comments = None
self._name = None
self._os_vendor = None
self._os_version = None
self._processor_name = None
self._product_models = None
self._product_type = None
self._server_pid = None
self._ucs_version = None
self._version_type = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if comments is not None:
self.comments = comments
if name is not None:
self.name = name
if os_vendor is not None:
self.os_vendor = os_vendor
if os_version is not None:
self.os_version = os_version
if processor_name is not None:
self.processor_name = processor_name
if product_models is not None:
self.product_models = product_models
if product_type is not None:
self.product_type = product_type
if server_pid is not None:
self.server_pid = server_pid
if ucs_version is not None:
self.ucs_version = ucs_version
if version_type is not None:
self.version_type = version_type
@property
def account_moid(self):
"""
Gets the account_moid of this HclExemptedCatalog.
The Account ID for this managed object.
:return: The account_moid of this HclExemptedCatalog.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this HclExemptedCatalog.
The Account ID for this managed object.
:param account_moid: The account_moid of this HclExemptedCatalog.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this HclExemptedCatalog.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this HclExemptedCatalog.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this HclExemptedCatalog.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this HclExemptedCatalog.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this HclExemptedCatalog.
The time when this managed object was created.
:return: The create_time of this HclExemptedCatalog.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this HclExemptedCatalog.
The time when this managed object was created.
:param create_time: The create_time of this HclExemptedCatalog.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this HclExemptedCatalog.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this HclExemptedCatalog.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this HclExemptedCatalog.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this HclExemptedCatalog.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this HclExemptedCatalog.
The time when this managed object was last modified.
:return: The mod_time of this HclExemptedCatalog.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this HclExemptedCatalog.
The time when this managed object was last modified.
:param mod_time: The mod_time of this HclExemptedCatalog.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this HclExemptedCatalog.
The unique identifier of this Managed Object instance.
:return: The moid of this HclExemptedCatalog.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this HclExemptedCatalog.
The unique identifier of this Managed Object instance.
:param moid: The moid of this HclExemptedCatalog.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this HclExemptedCatalog.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this HclExemptedCatalog.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HclExemptedCatalog.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this HclExemptedCatalog.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this HclExemptedCatalog.
The array of owners which represent effective ownership of this object.
:return: The owners of this HclExemptedCatalog.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this HclExemptedCatalog.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this HclExemptedCatalog.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this HclExemptedCatalog.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this HclExemptedCatalog.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this HclExemptedCatalog.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this HclExemptedCatalog.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def shared_scope(self):
"""
Gets the shared_scope of this HclExemptedCatalog.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this HclExemptedCatalog.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this HclExemptedCatalog.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this HclExemptedCatalog.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this HclExemptedCatalog.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this HclExemptedCatalog.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this HclExemptedCatalog.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this HclExemptedCatalog.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this HclExemptedCatalog.
The versioning info for this managed object.
:return: The version_context of this HclExemptedCatalog.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this HclExemptedCatalog.
The versioning info for this managed object.
:param version_context: The version_context of this HclExemptedCatalog.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def comments(self):
"""
Gets the comments of this HclExemptedCatalog.
Reason for the exemption.
:return: The comments of this HclExemptedCatalog.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this HclExemptedCatalog.
Reason for the exemption.
:param comments: The comments of this HclExemptedCatalog.
:type: str
"""
self._comments = comments
@property
def name(self):
"""
Gets the name of this HclExemptedCatalog.
A unique descriptive name of the exemption.
:return: The name of this HclExemptedCatalog.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this HclExemptedCatalog.
A unique descriptive name of the exemption.
:param name: The name of this HclExemptedCatalog.
:type: str
"""
self._name = name
@property
def os_vendor(self):
"""
Gets the os_vendor of this HclExemptedCatalog.
Vendor of the Operating System.
:return: The os_vendor of this HclExemptedCatalog.
:rtype: str
"""
return self._os_vendor
@os_vendor.setter
def os_vendor(self, os_vendor):
"""
Sets the os_vendor of this HclExemptedCatalog.
Vendor of the Operating System.
:param os_vendor: The os_vendor of this HclExemptedCatalog.
:type: str
"""
self._os_vendor = os_vendor
@property
def os_version(self):
"""
Gets the os_version of this HclExemptedCatalog.
Version of the Operating system.
:return: The os_version of this HclExemptedCatalog.
:rtype: str
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""
Sets the os_version of this HclExemptedCatalog.
Version of the Operating system.
:param os_version: The os_version of this HclExemptedCatalog.
:type: str
"""
self._os_version = os_version
@property
def processor_name(self):
"""
Gets the processor_name of this HclExemptedCatalog.
Name of the processor supported for the server.
:return: The processor_name of this HclExemptedCatalog.
:rtype: str
"""
return self._processor_name
@processor_name.setter
def processor_name(self, processor_name):
"""
Sets the processor_name of this HclExemptedCatalog.
Name of the processor supported for the server.
:param processor_name: The processor_name of this HclExemptedCatalog.
:type: str
"""
self._processor_name = processor_name
@property
def product_models(self):
"""
Gets the product_models of this HclExemptedCatalog.
Models of the product/adapter.
:return: The product_models of this HclExemptedCatalog.
:rtype: list[str]
"""
return self._product_models
@product_models.setter
def product_models(self, product_models):
"""
Sets the product_models of this HclExemptedCatalog.
Models of the product/adapter.
:param product_models: The product_models of this HclExemptedCatalog.
:type: list[str]
"""
self._product_models = product_models
@property
def product_type(self):
"""
Gets the product_type of this HclExemptedCatalog.
Type of the product/adapter say PT for Pass Through controllers.
:return: The product_type of this HclExemptedCatalog.
:rtype: str
"""
return self._product_type
@product_type.setter
def product_type(self, product_type):
"""
Sets the product_type of this HclExemptedCatalog.
Type of the product/adapter say PT for Pass Through controllers.
:param product_type: The product_type of this HclExemptedCatalog.
:type: str
"""
self._product_type = product_type
@property
def server_pid(self):
"""
Gets the server_pid of this HclExemptedCatalog.
Three part ID representing the server model as returned by UCSM/CIMC XML APIs.
:return: The server_pid of this HclExemptedCatalog.
:rtype: str
"""
return self._server_pid
@server_pid.setter
def server_pid(self, server_pid):
"""
Sets the server_pid of this HclExemptedCatalog.
Three part ID representing the server model as returned by UCSM/CIMC XML APIs.
:param server_pid: The server_pid of this HclExemptedCatalog.
:type: str
"""
self._server_pid = server_pid
@property
def ucs_version(self):
"""
Gets the ucs_version of this HclExemptedCatalog.
Version of the UCS software.
:return: The ucs_version of this HclExemptedCatalog.
:rtype: str
"""
return self._ucs_version
@ucs_version.setter
def ucs_version(self, ucs_version):
"""
Sets the ucs_version of this HclExemptedCatalog.
Version of the UCS software.
:param ucs_version: The ucs_version of this HclExemptedCatalog.
:type: str
"""
self._ucs_version = ucs_version
@property
def version_type(self):
"""
Gets the version_type of this HclExemptedCatalog.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:return: The version_type of this HclExemptedCatalog.
:rtype: str
"""
return self._version_type
@version_type.setter
def version_type(self, version_type):
"""
Sets the version_type of this HclExemptedCatalog.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:param version_type: The version_type of this HclExemptedCatalog.
:type: str
"""
self._version_type = version_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HclExemptedCatalog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ategaw@cisco.com"
] | ategaw@cisco.com |
de8de4a17ab7c78b43d4dc1dd862aaa4d5ba5ef9 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/DestroyVirtualClusterRequest.py | f34fbf62e2477c455d21adcac88c4659473afa70 | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 1,456 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class DestroyVirtualClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'DestroyVirtualCluster','openanalytics')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
2418a512b3ff7dfd65b3964bc9f6b7e1a524eccc | 418e26d0a5db209cff7516c692195d83a6125460 | /day_12/tast_02.py | 81e1a87326847ce5482032e4c32f6e8293c9b449 | [] | no_license | Rosayme/Python_class | 6b7a728d82bbdfe7cd3e8e4a556afaf291b5a832 | 06448b8d091e7784c03f5efc13d51fa6cc5ad628 | refs/heads/master | 2020-03-21T09:16:52.417842 | 2018-06-23T10:33:43 | 2018-06-23T10:33:43 | 138,391,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import time
# 引入时间(防止报告被覆盖)
now = time.strftime('%Y-%m-%d_%H_%M_%S') #获取当前时间
file_path = 'test'+now+'.html'
print(now)
# 断言 测试用例的判断
# assertEqual(a,b) a==b
# assertNotEqual(a,b) a!=b
# assertTrue(x) bool(x) is true
# assertFalse(x) bool(x) is false
# assertIs(a,b) a is b
# assertIsNot(a,b) a is not b
# assertIsNone(x) x is None
# assertIsNotNone(x) x is not None
# assertIn(a,b) a in b # 成员运算符
# assertNotIn(a,b) a not in b
# assertIsInstance(a,b) isinstance(a,b)
# assertNotIsInstance(a,b) not isinstance(a,b) | [
"noreply@github.com"
] | noreply@github.com |
aa246a141acec672c194e15bd3b7d965d9edefad | 1f5553dbea14aae5040f1cb21f24a3f9052ec38f | /api_v1/middleware/__init__.py | 2499d14d58a4f386cbac3ce65e7c20c3dbb37589 | [] | no_license | Igorxp5/applada-api | ce79639a4fbbbbca9441994424c48ecbaa62379c | 93bf089412ebf17fb4ffec0186e5ea488abc6df2 | refs/heads/master | 2022-05-16T23:53:56.392365 | 2020-02-24T13:20:41 | 2020-02-24T13:37:36 | 231,856,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from rest_framework import status
from django.utils.deprecation import MiddlewareMixin
from django.http import JsonResponse
from api_v1.core import not_found_json
class NotFoundMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if (response.status_code == status.HTTP_404_NOT_FOUND
and 'application/json' != response.get('Content-Type')):
return JsonResponse(not_found_json(), status=status.HTTP_404_NOT_FOUND)
return response | [
"rogixp5@gmail.com"
] | rogixp5@gmail.com |
01f1a0827812125e6f431ac5d30b4e0d93d110aa | 1488596157b920b47daeba65bb7461b45d1e1b99 | /NotepadSI.py | c4818e72d3c16067ca3ef7d32b738536c2287860 | [] | no_license | Elliot-G-jackson/Simple-GUI | 9e1a64d8e467c8d189c854ab2c0dfe01466a0d39 | cd34c41138cd5dfcbf011bff231e9426b653b763 | refs/heads/main | 2023-08-18T10:59:36.898479 | 2021-10-09T14:17:30 | 2021-10-09T14:17:30 | 415,328,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | from tkinter import *
from tkinter import ttk
import tkinter as tk
from tkinter.filedialog import asksaveasfile
from tkinter.filedialog import askopenfilename, asksaveasfilename
#Main window
window = tk.Tk()
window.title("Text Editor Application")
#Save file system
def save_file():
filepath = asksaveasfilename(
defaultextension="txt",
filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")],
)
if not filepath:
return
with open(filepath, "w") as output_file:
text = txt_edit.get(1.0, tk.END)
output_file.write(text)
window.title(f"Text Editor Application - {filepath}")
#Open file system
def open_file():
"""Open a file for editing."""
filepath = askopenfilename(
filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")]
)
if not filepath:
return
txt_edit.delete(1.0, tk.END)
with open(filepath, "r") as input_file:
text = input_file.read()
txt_edit.insert(tk.END, text)
window.title(f"Text Editor Application - {filepath}")
#set the row and column configurations.
window.rowconfigure(0, minsize=900, weight=1)
window.columnconfigure(1, minsize=900, weight=1)
#widgets for text box, frame and open and save.
txt_edit = tk.Text(window)
fr_buttons = tk.Frame(window)
btn_open = ttk.Button(fr_buttons, text="Open", command=lambda:open_file())
btn_save = ttk.Button(fr_buttons, text="Save As...", command=lambda:save_file())
btn_close = ttk.Button(fr_buttons, text="Close", command=window.destroy)
#Button locations
btn_open.grid(row=0, column=0, sticky="ew")
btn_save.grid(row=1, column=0, sticky="ew")
btn_close.grid(row=2, column=0, sticky="ew")
fr_buttons.grid(row=0, column=0, sticky="ns")
txt_edit.grid(row=0, column=1, sticky="nsew")
window.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
2fe994984c180f8dbc2d5226e736a78e48b6e485 | 9880e22384803f7e575eb1b5b79be9945a7820bb | /main.py | 6bc164e65f68418d7be3ea4acab574a5476c875f | [] | no_license | Requinard/journalert | 5020c22b03aa8f723ea249e52eca97d96cc388f1 | 23f326f7a83bb31f3aab6f9d099906356fd176e3 | refs/heads/master | 2021-01-25T09:31:45.348900 | 2017-06-09T11:01:03 | 2017-06-09T11:01:03 | 93,845,243 | 0 | 0 | null | 2019-10-21T15:00:38 | 2017-06-09T09:58:22 | Python | UTF-8 | Python | false | false | 1,890 | py | import select
import json
import telepot
import os
from systemd import journal
priority = [
'Emergency',
'Alert',
'Critical',
'Error',
'Warning',
'Notice',
'Informational',
'Debug'
]
class TelegramBackend:
def __init__(self, config):
self.config = config['telegram']
self.bot = telepot.Bot(self.config['token'])
def send(self, message):
for recipient in self.config['recipients']:
print("sending message to {0}".format(recipient))
self.bot.sendMessage(recipient, message)
def create_poll(journal):
p=select.poll()
p.register(j, j.get_events())
return p
def create_journal_reader():
# Create a reader
j = journal.Reader()
j.this_boot()
j.this_machine()
# Set it to the back of the queue
j.seek_tail()
return j
def apply_config_to_journal(j, config):
for entry in config['matchers']:
j.add_match(_SYSTEMD_UNIT=entry['unit'])
j.seek_tail()
return j
def parse_message(message):
try:
return "System: {2}\nPriority: {3}\n\nService: {0}\n\nMessage: {1}".format(message['_SYSTEMD_UNIT'], message['MESSAGE'], message['_HOSTNAME'], priority[message['PRIORITY']]).strip()
except KeyError:
return "System: {2}\n\nService: {0}\n\nMessage: {1}".format('Unknown', message['MESSAGE'], message['_HOSTNAME']).strip()
def get_config():
path = os.path.abspath(os.path.dirname(__file__))
return json.loads(open(os.path.join(path, 'matchers.json'), 'r+').read())
if __name__ == '__main__':
config = get_config()
print(config)
j = apply_config_to_journal(create_journal_reader(), config)
poll = create_poll(journal)
telegram = TelegramBackend(config)
while True:
if poll.poll(250):
if j.process() == journal.APPEND:
for entry in j:
telegram.send(parse_message(entry))
| [
"d.diks94@gmail.com"
] | d.diks94@gmail.com |
a8ff70e74c31d7dfdefc8f66262b1bad05c5a1c2 | 7b98faf4dfff3efeb3138deeb1c99f1f85385c4f | /Dj/asgi.py | ea5739a210de96570868d042a3acbbaf56a43dae | [] | no_license | BBFallen20/Django_ItBooker | c120edca4324301c0a973cef472e6b0d9bbbb61f | bb0935233c0b239362530c1f917b79d887f7171b | refs/heads/master | 2023-05-09T00:40:12.317544 | 2021-06-02T16:36:31 | 2021-06-02T16:36:31 | 293,116,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | """
ASGI config for Dj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Dj.settings')
application = get_asgi_application()
| [
"Danila.333"
] | Danila.333 |
c069e09415f1defdc8d01514c6fd17aa75e89705 | 2e3c34fb789df9b221afdf11d3c71ee63ef255e8 | /python/python的类方法使用.py | d9e6089883bf316c4dc4b26e4418f7f198b3b0ae | [] | no_license | alexshenyuefei/python- | 9ada18993590fd1c167313b8d04a8f944f3369cc | e95ccd123554a8cd91ab2b985cf090d792fcefde | refs/heads/master | 2021-09-08T08:07:15.754648 | 2018-03-08T13:01:42 | 2018-03-08T13:01:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # python的类充当js里对象,是基本的数据结构,可以存储属性.
class calculator(object):
operand1 = 1
operand2 = 2
@classmethod
def add(cls):
# 全局变量在程序之中始终有定义的,局部变量在它的函数体内,以及嵌套的函数内始终有定义的.
# 这里的变量operand1,opearand2在函数外,需要通过解释器传入的cls,指定外部对象(这里是calculator)访问
cls.result = cls.operand1 + cls.operand2
calculator.add()
print(calculator.result) | [
"906634214@qq.com"
] | 906634214@qq.com |
5241cc7c3a50e8e6b00d938dec0ec8ed871222fd | f0270ae7c1c35bd42a1bec0f63919d5aad015470 | /main_app/__init__.py | 54b0baeae4bcc64e485eb57282437b48c02d60de | [] | no_license | NBsyxx/Software_engineering | 5b8497c3c5f00c7c39bd823ddd1a1c75ecda437c | e6a96cf72a751d89735a0900be9a54c661d1f5c6 | refs/heads/master | 2020-08-14T16:37:34.737900 | 2019-10-10T01:41:40 | 2019-10-10T01:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'main_app.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/welcome')
def welcome():
return "Welcome to our ERH system."
from . import database
database.init_app(app)
from . import authentication
app.register_blueprint(authentication.auth_bp)
from . import admin
app.register_blueprint(admin.admin_bp)
return app
| [
"yx1215@nyu.edu"
] | yx1215@nyu.edu |
17fcaffaf0ef060efc8efdd572e9c98802867ee9 | 7bc3c786950a5a246dae8fb4e9ae5d87a45dacc8 | /prim_dijkstra.py | 583efe5348c084d33838deccf7d2d3698d25b0e2 | [] | no_license | dsabljak/Infmre | 0d76bf8fc063b878845695bb2367c0f473fadb98 | 0505c7e4c8c714d0ae1400a09e9203d9c3d3d557 | refs/heads/master | 2023-01-31T00:03:50.001178 | 2020-12-12T09:59:16 | 2020-12-12T09:59:16 | 302,101,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | class FileReader:
def __init__(self, path):
self.file = open(path, 'r')
self.nodes = set()
self.edges = []
self.edge_cost = dict()
self.parse()
"""
Data is written in .txt file like:
begin_node, end_node, cost, direction;
This method parses all rows of file and saves data
"""
def parse(self):
data = self.file.readlines()
for row in data:
begin_node = row.split(',')[0]
end_node = row.split(', ')[1]
edge = begin_node + end_node
cost = int(row.split(', ')[2])
direct = row.split(', ')[3].split(';')[0]
self.nodes.add(begin_node)
self.nodes.add(end_node)
self.edges.append(edge)
# If cost is 0, ignore it
# Maybe not the best way?
if cost != 0:
self.edge_cost[edge] = cost
# If direction is not directed, then create simetric edge
# For example AB -> BA
if direct == 'n':
self.edge_cost[edge[::-1]] = cost
print(self.nodes)
print(self.edges)
print(self.edge_cost)
# Function for getting all possible combinations for edges by nodes
# For example [a, b] returns set {bb, aa, ab, ba} (using set to avoid redundancy)
def get_all(nodes):
return set([x + y for x in nodes for y in nodes] + [y + x for x in nodes for y in nodes])
path = input("Insert path to file with data:")
data = FileReader(path)
nodes = data.nodes
edges = data.edges
edge_cost = data.edge_cost
used_nodes = []
used_edges = []
current_node = nodes.pop()
used_nodes.append(current_node)
print(f"Odabrani početak: {current_node}")
total_cost = 0
temp_edge_cost = dict() # on class this is "dist" in table
while len(nodes) != 0:
print(f"Neobiđeni vrhovi: {nodes}")
for edge in edge_cost.keys():
if current_node == edge[0]:
if edge[1] not in used_nodes: # if watched node is already in used nodes, we do not need it
print(f"Rub koji razmatram: {edge}")
print(f"Vrh koji razmatram: {edge[1]}")
print(f"Obiđeni vrhovi: {used_nodes}")
temp_edge_cost[edge] = edge_cost[edge]
print(f"Ovo su privremeni vrhovi i udaljenosti: {temp_edge_cost}")
min_edge = min(temp_edge_cost, key=temp_edge_cost.get)
print(f"Minimalnu udaljenost ima: {min_edge} s udaljenosti {edge_cost[min_edge]}")
print(f"Ovo su iskorišteni bridovi: {used_edges}")
if min_edge not in used_edges or min_edge[::-1] not in used_edges: # checking edge witch is being added and it's simetric also, if not used already, add it
print(f"{min_edge} i {min_edge[::-1]} nije u {used_edges} pa ga dodajem")
used_edges.append(min_edge)
print(f"Sada iskoristeni bridovi zgledaju ovako: {used_edges}")
print(f"Nema smisla cuvati {min_edge} i {min_edge[::-1]} u privremenim udaljenostima: {temp_edge_cost} pa ga izbacujem")
temp_edge_cost.pop(min_edge) #need to delete that edge from "dist" column to avoid using it again later
try:
temp_edge_cost.pop(min_edge[::-1])
except:
pass
print(f"Sada izgledaju ovako: {temp_edge_cost}")
total_cost += int(edge_cost[min_edge])
current_node = min_edge[1]
print(f"Trenutni vrh je: {current_node}")
used_nodes.append(current_node)
print(f"Iskorišteni vrhovi su sada: {used_nodes}")
edges_for_deletion = set(get_all(list(used_nodes))) # combining all used nodes to get edges which need to be deleted so they don't get used again later
print(f"Treba pobrisati: {edges_for_deletion}")
for edge_for_deletion in edges_for_deletion:
try:
temp_edge_cost.pop(edge_for_deletion)
except:
pass
print(f"Nakon brisanja: {temp_edge_cost}")
nodes.remove(current_node)
print(used_edges)
print(total_cost)
| [
"dsabljak98@gmail.com"
] | dsabljak98@gmail.com |
7d455fe1a5c7e3d0f7e8897c95d81e07fe685f8e | 6c78029ef09dc24e3985afcb44e272194c8e244c | /System.py | d0337115a9e7529b9a4b571af7181bc87f5cf202 | [
"Apache-2.0"
] | permissive | Shimeshu/Login-system | a70d61af6598339bff91f6586b729e1b783c21fd | 71c47869615a31e0ea0dd019d8d778d9b2278cf3 | refs/heads/master | 2023-02-12T23:29:21.960373 | 2021-01-07T06:13:15 | 2021-01-07T06:13:15 | 327,310,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from Portals.Login import Login
from Portals.Register import Register
print('Welcome to the Login System!!')
user_input = input('Want to [L]ogin or [R]egister? -> ')
if user_input.lower() == 'l':
Login()
elif user_input.lower() == 'r':
Register()
| [
"shimeshu12345@gmail.com"
] | shimeshu12345@gmail.com |
96cdda7deaa7720cd3559f3d0b3e5accb90e9308 | 6c597d56ab500f8d0788b803fdfb9ab4dbb37a90 | /openregistry/assets/claimrights/tests/transferring.py | 29487148a1b9b1a3825f6e85e4ebbe8f092f72a2 | [
"Apache-2.0"
] | permissive | openprocurement/openregistry.assets.claimrights | 1671e55313aa69b073d1662a0fe16a8bd604f4fd | 8f8d59760da3b647730da9d56e656a6ef4d12302 | refs/heads/master | 2021-05-14T23:59:00.664485 | 2019-03-27T15:33:44 | 2019-03-27T15:33:44 | 104,233,542 | 0 | 10 | Apache-2.0 | 2019-02-06T11:28:28 | 2017-09-20T15:27:44 | Python | UTF-8 | Python | false | false | 549 | py | # -*- coding: utf-8 -*-
import unittest
from openregistry.assets.claimrights.tests.base import AssetTransferWebTest
from openregistry.assets.core.tests.plugins.transferring.mixins import AssetOwnershipChangeTestCaseMixin
class AssetOwnershipChangeTest(AssetTransferWebTest,
AssetOwnershipChangeTestCaseMixin):
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AssetOwnershipChangeTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| [
"leitsius@gmail.com"
] | leitsius@gmail.com |
cc0fb0e1305f44163f8acfc4e0a97aa557db9619 | 68b3d6369e895aadff8be90a662b9763348264e2 | /grep/work.py | 2c63dfe6d8c520fd4b85c71298be1d6fc1dca757 | [] | no_license | vidu120/myideas | 5af515eb595ef33fecb17fedbc71c001a05ef382 | c4fc02a60942651a16f8c6580c61cb47e1492f82 | refs/heads/master | 2022-12-19T12:28:53.042282 | 2020-09-17T18:47:06 | 2020-09-17T18:47:06 | 287,235,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | #!/usr/bin/env python3
with open("mine") as file:
for line in file:
print(line.rstrip("\n"))
| [
"vidhangoyal10@gmail.com"
] | vidhangoyal10@gmail.com |
7144a6ff54454b0dd9e092387cc4f7c4aea0c587 | b9c942beef6fbb70fd5ce221e8401a992b2ad202 | /Heroku Model Deployment/server/project.py | 3458ccc459feffe0e1a091190173d551d2f456d5 | [] | no_license | dmanwill/Hand-Gesture-Recognition-Dataset-and-CNN-Model | 0dd68f899778b9e96dac5fb6ef2e02c2655a523b | fce4e825e5e87a82f8910dc3603bbf2313983842 | refs/heads/master | 2023-03-30T02:00:08.015228 | 2021-04-11T00:12:11 | 2021-04-11T00:12:11 | 354,685,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,074 | py | import os
# GUI Packages.
import matplotlib.pyplot as plt
import ipywidgets as widgets
import time
# AWS Packages.
import boto3
# AWS Variables.
accessKeyID = os.environ["AWS_ACCESS_KEY_ID"]
secretAccessKey = os.environ["AWS_SECRET_ACCESS_KEY"]
s3BucketName = "heroku-deployment"
lambda_function_name = "heroku_deployment"
inputImageFileName = "digit.jpg"
resultsDataFileName = "results.txt"
def parseAndShowResults(resultsDataFileName):
with open(resultsDataFileName, "r") as results:
# Extract prediction results.
# Find the prediction value with the highest prediction value.
print(open(resultsDataFileName).read())
# Display predicted value, prediction probability, and image of the hand-writtent digit that was classified.
display(widgets.Image(value=imageBytesData))
pass
## AWS Image Upload callback function and button ##
# Upload digit.png to S3 to produce the results.txt using lambda.
def awsImageUpload(data):
client = boto3.client(
's3',
aws_access_key_id=accessKeyID,
aws_secret_access_key=secretAccessKey
)
# Upload digit.png to S3.
try:
client.upload_file(inputImageFileName, s3BucketName, inputImageFileName)
print("Upload Successful")
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
try:
lambda_client = boto3.client('lambda', region_name='us-east-1')
lambda_client.invoke(FunctionName=lambda_function_name, InvocationType='Event')
print("AWS Processing...")
except:
print("Couldn't properly call AWS Lambda function")
# Waiting and checking to see if the results.txt has been produced and placed in S3 from Lambda.
time.sleep(awsProgressRefreshRateSlider.value)
fount_text = False
while(not fount_text):
time.sleep(awsProgressRefreshRateSlider.value)
try:
client.download_file(s3BucketName, resultsDataFileName, resultsDataFileName)
fount_text = True
except:
print("waiting for result")
# Removing input digit.jpg and output results.txt from S3.
client.delete_object(Bucket=s3BucketName, Key = inputImageFileName)
client.delete_object(Bucket=s3BucketName, Key = resultsDataFileName)
# Display Results
parseAndShowResults(resultsDataFileName)
## Image upload callback function and button ##
def selectimage2upload(imageData):
# Due to the file structure, image file name needs to be
# extracted to access the bytes data of the image.
imageFileName = list(imageData["new"].keys())[0]
# Image bytes data.
global imageBytesData
imageBytesData = imageData["new"][imageFileName]["content"]
# Writing image file to current directory with "inputImageFileName".
with open(inputImageFileName, "wb") as imageFile:
imageFile.write(imageBytesData)
# Displaying uploaded image in GUI.
display(widgets.Image(value=imageBytesData))
# Showing AWS GUI Components after image is uploaded.
display(awsProgressRefreshRateSlider)
display(awsUploadButton)
awsUploadButton.on_click(awsImageUpload)
def createDashBoard():
# Allows the buttons to be accessed globally: Necessary
# since some callback functions are dependent on these
# widgets.
global awsUploadButton
global awsProgressRefreshRateSlider
global image_upload_button
awsUploadButton = widgets.Button(description='Upload to AWS')
# AWS Image Upload Button.
image_upload_button = widgets.FileUpload()
# AWS Progress Refresh Rate Selector.
awsProgressRefreshRateSlider = widgets.FloatSlider(max = 1.0)
# Display GUI.
display(image_upload_button)
time.sleep(0.1)
def when_loaded(change):
selectimage2upload(change)
image_upload_button.observe(when_loaded, names='value') | [
"Daniel.Manwiller@kla-tencor.com"
] | Daniel.Manwiller@kla-tencor.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.