blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2
values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684
values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22
values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147
values | src_encoding stringclasses 25
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 128 12.7k | extension stringclasses 142
values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7303cff5ec99102bf82cefbe7dbba1e098b81285 | 9088d49a7716bdfc9b5770e8e54ebf7be6958fcf | /Tests/Aula_20d.py | 0b3ae15da823725ffb438bbc6b939143e05b6448 | [
"MIT"
] | permissive | o-Ian/Practice-Python | 579e8ff5a63a2e7efa7388bf2d866bb1b11bdfe2 | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | refs/heads/main | 2023-05-02T02:21:48.459725 | 2021-05-18T18:46:06 | 2021-05-18T18:46:06 | 360,925,568 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def count(*num):
print(f'Recebi ao todos os números {num}. Há {len(num)} números.')
count(12, 34, 2, 1, 4)
count(4, 3, 1, 7, 10)
count(1, 2)
| [
"ianstigli@hotmail.com"
] | ianstigli@hotmail.com |
f4386254b812a61719e4b1a2931b317d490bcc62 | 65f3ada144f45bd5dbaf3d37ca9366ff54796f0c | /month7/findLadders.py | a417abd90108d7af8b53f6a6496eb2eadb19d4eb | [] | no_license | BruceHi/leetcode | 43977db045d9b78bef3062b16d04ae3999fe9ba7 | 0324d247a5567745cc1a48b215066d4aa796abd8 | refs/heads/master | 2022-09-22T14:18:41.022161 | 2022-09-11T23:45:21 | 2022-09-11T23:45:21 | 248,240,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | # 单词接龙
from typing import List
from collections import deque
from collections import defaultdict
from string import ascii_lowercase
class Solution:
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# word_set = set(wordList)
# if endWord not in word_set:
# return []
#
# def ischange(A, B):
# count = n = len(A)
# i = 0
# while i < n:
# if A[i] == B[i]:
# count -= 1
# i += 1
# return count == 1
#
# tmp, res = [beginWord], []
#
# def dfs(begin, end, word_set):
# if ischange(begin, end):
# tmp.append(end)
# res.append(tmp)
# return
# for word in word_set:
# if ischange(begin, word):
# tmp.append(word)
# word_set.remove(word)
# dfs(word, end, word_set)
# word_set.add(word) # 会打乱原有顺序
#
# dfs(beginWord, endWord, word_set)
# return res
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
#
# def ischange(A, B):
# count = n = len(A)
# i = 0
# while i < n:
# if A[i] == B[i]:
# count -= 1
# i += 1
# return count == 1
#
# tmp = [beginWord]
#
# def dfs(begin, end, wordList):
# if ischange(begin, end):
# tmp.append(end)
# return
# for i, word in enumerate(wordList):
# if ischange(begin, word):
# tmp.append(word)
# dfs(word, end, wordList[:i] + wordList[i+1:])
# # word_set.add(word) # 会打乱原有顺序
#
# dfs(beginWord, endWord, wordList)
# return tmp
# bfs,以后要多看看,没怎么看懂
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# wordList = set(wordList)
# dic = defaultdict(list)
# n = len(beginWord)
# for w in wordList:
# for i in range(n):
# dic[w[:i] + '*' + w[i + 1:]].append(w)
# q, s = deque([(beginWord, [beginWord])]), deque() # 列表里面是一个一个的元组
# seen = set() # 访问过的结点都要记录
# res = []
# while q:
# while q:
# w, path = q.popleft()
# if w == endWord: res.append(path)
# seen.add(w)
# for i in range(n):
# for v in dic[w[:i] + '*' + w[i + 1:]]:
# if v not in seen:
# s.append((v, path + [v]))
# if res: return res # 先有结果的自然是最短的
# q, s = s, q # 因为要交换,所以两者的数据类型应该相同。
# return []
# 看得一脸懵逼
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
if endWord not in wordList:
return []
# 同一组的转换放一块:该无向图相当于邻接表的存储方式
dic = defaultdict(list)
n = len(beginWord)
for w in wordList:
for i in range(n):
dic[w[:i] + '*' + w[i + 1:]].append(w)
queue, tmp = deque([(beginWord, [beginWord])]), deque()
res = []
visited = set()
while queue:
while queue:
w, path = queue.popleft()
if w == endWord:
res.append(path)
visited.add(w)
for i in range(n):
for v in dic[w[:i] + '*' + w[i + 1:]]:
if v not in visited:
tmp.append((v, path + [v]))
if res:
return res
queue, tmp = tmp, queue
return []
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
# res = [beginWord]
# n = len(beginWord)
# for i in range(n):
# for c in ascii_lowercase:
# tmp = beginWord[:i] + c + beginWord[i+1:]
# if tmp in wordList:
# res.append(tmp)
# wordList.remove(tmp)
# self.findLadders(tmp, endWord, wordList)
# wordList.append(tmp)
# return res
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# if endWord not in wordList:
# return []
# dic = defaultdict(list)
# n = len(beginWord)
#
# for word in wordList:
# for i in range(n):
# tmp = word[:i] + '-' + word[i+1:]
# dic[tmp].append(word)
s = Solution()
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log", "cog"]
print(s.findLadders(beginWord, endWord, wordList))
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log"]
print(s.findLadders(beginWord, endWord, wordList))
| [
"BruceHi@users.github.com"
] | BruceHi@users.github.com |
df7913dd36c7238962153d6e680c9a6e7ad9d375 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/13164201.py | 05dfc374b8b17fa012a7aa7f0a1397e0cc3ab6c7 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13164201.py generated: Fri, 27 Mar 2015 15:48:02
#
# Event Type: 13164201
#
# ASCII decay Descriptor: {[[B_s0]nos -> (D*(2007)~0 -> (D~0 -> K+ pi-) gamma ) (phi(1020) -> K+ K-) ]cc, [[B_s0]os -> (D*(2007)0 -> (D0 -> K- pi+) gamma ) (phi(1020) -> K- K+) ]cc}
#
from Configurables import Generation
Generation().EventType = 13164201
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_Dst0phi,D0gamma,Kpi=DecProdCut,HELAMP001.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13164201
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
32aae12aa67d9697a2cbea89e1ab6142f273cd3b | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/healthcareapis/azure-mgmt-healthcareapis/azure/mgmt/healthcareapis/models/__init__.py | cb27d5affb6e885dc7d62df53c2daa9ddf2efef1 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 2,970 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CheckNameAvailabilityParameters
from ._models_py3 import ErrorDetails, ErrorDetailsException
from ._models_py3 import ErrorDetailsInternal
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationResultsDescription
from ._models_py3 import Resource
from ._models_py3 import ServiceAccessPolicyEntry
from ._models_py3 import ServiceAuthenticationConfigurationInfo
from ._models_py3 import ServiceCorsConfigurationInfo
from ._models_py3 import ServiceCosmosDbConfigurationInfo
from ._models_py3 import ServicesDescription
from ._models_py3 import ServicesNameAvailabilityInfo
from ._models_py3 import ServicesPatchDescription
from ._models_py3 import ServicesProperties
except (SyntaxError, ImportError):
from ._models import CheckNameAvailabilityParameters
from ._models import ErrorDetails, ErrorDetailsException
from ._models import ErrorDetailsInternal
from ._models import Operation
from ._models import OperationDisplay
from ._models import OperationResultsDescription
from ._models import Resource
from ._models import ServiceAccessPolicyEntry
from ._models import ServiceAuthenticationConfigurationInfo
from ._models import ServiceCorsConfigurationInfo
from ._models import ServiceCosmosDbConfigurationInfo
from ._models import ServicesDescription
from ._models import ServicesNameAvailabilityInfo
from ._models import ServicesPatchDescription
from ._models import ServicesProperties
from ._paged_models import OperationPaged
from ._paged_models import ServicesDescriptionPaged
from ._healthcare_apis_management_client_enums import (
ProvisioningState,
Kind,
ServiceNameUnavailabilityReason,
OperationResultStatus,
)
__all__ = [
'CheckNameAvailabilityParameters',
'ErrorDetails', 'ErrorDetailsException',
'ErrorDetailsInternal',
'Operation',
'OperationDisplay',
'OperationResultsDescription',
'Resource',
'ServiceAccessPolicyEntry',
'ServiceAuthenticationConfigurationInfo',
'ServiceCorsConfigurationInfo',
'ServiceCosmosDbConfigurationInfo',
'ServicesDescription',
'ServicesNameAvailabilityInfo',
'ServicesPatchDescription',
'ServicesProperties',
'ServicesDescriptionPaged',
'OperationPaged',
'ProvisioningState',
'Kind',
'ServiceNameUnavailabilityReason',
'OperationResultStatus',
]
| [
"zikalino@microsoft.com"
] | zikalino@microsoft.com |
935577a52f81fd6a39af6a8ab69bbb45ab1ed8b6 | 4809471274d6e136ac66d1998de5acb185d1164e | /pypureclient/flasharray/FA_2_5/models/software_installation_step.py | 4d482e18e00abb8dc317c2cd8d70662c91a5fdfc | [
"BSD-2-Clause"
] | permissive | astrojuanlu/py-pure-client | 053fef697ad03b37ba7ae21a0bbb466abf978827 | 6fa605079950765c316eb21c3924e8329d5e3e8a | refs/heads/master | 2023-06-05T20:23:36.946023 | 2021-06-28T23:44:24 | 2021-06-28T23:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,629 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class SoftwareInstallationStep(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'start_time': 'int',
'end_time': 'int',
'checks': 'list[SoftwareInstallationStepsChecks]',
'description': 'str',
'details': 'str',
'hop_version': 'str',
'installation': 'Reference',
'status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'start_time': 'start_time',
'end_time': 'end_time',
'checks': 'checks',
'description': 'description',
'details': 'details',
'hop_version': 'hop_version',
'installation': 'installation',
'status': 'status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
start_time=None, # type: int
end_time=None, # type: int
checks=None, # type: List[models.SoftwareInstallationStepsChecks]
description=None, # type: str
details=None, # type: str
hop_version=None, # type: str
installation=None, # type: models.Reference
status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): Name of the resource. The name cannot be modified.
start_time (int): Start time in milliseconds since the UNIX epoch.
end_time (int): End time in milliseconds since the UNIX epoch.
checks (list[SoftwareInstallationStepsChecks]): A list of checks in this upgrade step.
description (str): Detailed description of the step.
details (str): Detailed result of the step used to diagnose step failures.
hop_version (str): The version to which the current hop is upgrading.
installation (Reference): Referenced `software-installation` to which the step belongs.
status (str): Status of the step. Valid values are `running` and `finished`. A status of `running` indicates that the step has not finished. A status of `finished` indicates that the check has finished.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if checks is not None:
self.checks = checks
if description is not None:
self.description = description
if details is not None:
self.details = details
if hop_version is not None:
self.hop_version = hop_version
if installation is not None:
self.installation = installation
if status is not None:
self.status = status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStep`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareInstallationStep, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareInstallationStep):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hubert.chan@purestorage.com"
] | hubert.chan@purestorage.com |
f31541e6d2ecf93b94e193eee325b50654e6e8d5 | d2bc6792983724b22175c9d42a5b5a0fa174d576 | /Trakttv.bundle/Contents/Code/pts/session_manager.py | d335e61d9ad5ddf1cfb39c79ce1629805df7ee6b | [] | no_license | frentrop/Plex-Trakt-Scrobbler | f8e70bc4d1cf82545f675447bd0237a6436f41f5 | 70a59f62eb3812f9dba36a45697a4123b8c89dd9 | refs/heads/master | 2021-01-17T22:13:52.603253 | 2015-02-18T11:12:16 | 2015-02-18T11:12:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | from core.helpers import total_seconds
from core.logger import Logger
from data.watch_session import WatchSession
from pts.scrobbler import ScrobblerMethod
from datetime import datetime
from threading import Thread
import traceback
import time
log = Logger('pts.session_manager')
class SessionManager(Thread):
def __init__(self):
self.active = True
super(SessionManager, self).__init__()
def run(self):
while self.active:
try:
self.check_sessions()
except Exception, ex:
log.error('Exception raised in session manager: %s', ex, exc_info=True)
time.sleep(5)
def check_sessions(self):
sessions = WatchSession.all()
if not len(sessions):
return
for key, ws in sessions:
self.check_paused(ws)
def check_paused(self, ws):
if not ws or ws.cur_state != 'paused' or not ws.paused_since:
return
if ws.active and Datetime.Now() > ws.paused_since + Datetime.Delta(seconds=15):
log.debug("%s paused for 15s, watching status cancelled" % ws.title)
ws.active = False
ws.save()
if not self.send_action(ws, 'pause'):
log.info('Failed to send "pause" action for watch session')
def start(self):
# Cleanup sessions
self.cleanup()
# Start thread
super(SessionManager, self).start()
def stop(self):
self.active = False
@staticmethod
def send_action(ws, action):
if not ws.type:
return False
if ScrobblerMethod.handle_action(ws, action):
return False
return True
@staticmethod
def cleanup():
log.debug('Cleaning up stale or invalid sessions')
sessions = WatchSession.all()
if not len(sessions):
return
for key, ws in sessions:
delete = False
# Destroy invalid sessions
if ws is None:
delete = True
elif not ws.last_updated or type(ws.last_updated) is not datetime:
delete = True
elif total_seconds(datetime.now() - ws.last_updated) / 60 / 60 > 24:
# Destroy sessions last updated over 24 hours ago
log.debug('Session %s was last updated over 24 hours ago, queued for deletion', key)
delete = True
# Delete session or flag for update
if delete:
log.info('Session %s looks stale or invalid, deleting it now', key)
WatchSession.delete(key)
elif not ws.update_required:
log.info('Queueing session %s for update', key)
ws.update_required = True
ws.save()
log.debug('Finished cleaning up')
| [
"gardiner91@gmail.com"
] | gardiner91@gmail.com |
d07ea0e9e3e5000189867d9bf01d01e77c958188 | efd471380d976614667e56c92f0aed671371fc63 | /All Programs/Day 2 - Operators.py | 7e26ea13a61812b56bc8a246512592504dfe4556 | [] | no_license | anshumanairy/Hacker-Rank | 39af46e76182d34637340d1755aff4afd7820083 | 6fef4c6a415422d9379232932358e4ee7430a6af | refs/heads/master | 2021-07-04T07:41:37.769152 | 2020-10-12T05:49:24 | 2020-10-12T05:49:24 | 181,359,750 | 2 | 2 | null | 2020-10-12T05:49:25 | 2019-04-14T19:38:18 | Python | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
def func():
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
total_cost=meal_cost+(meal_cost*tip_percent/100)+(meal_cost*tax_percent/100)
print(round(total_cost))
func()
# In[ ]:
| [
"anshuman.airy04@gmail.com"
] | anshuman.airy04@gmail.com |
e48f1cd2379953a3e76ed6637485ba7475088ac1 | e90a772733e73e45b4cdbb5f240ef3b4a9e71de1 | /251. Flatten 2D Vector.py | bda19790c5d3ad2a9bc44c05e8ad35702b52bffd | [] | no_license | jiewu-stanford/leetcode | 102829fcbcace17909e4de49c01c3d705b6e6e3a | cbd47f713d3307f900daf55c8f27301c70542fc4 | refs/heads/master | 2022-05-28T18:25:00.885047 | 2022-05-18T05:16:22 | 2022-05-18T05:16:22 | 214,486,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | '''
Title : 251. Flatten 2D Vector ($$$)
Problem : https://leetcode.com/problems/flatten-2d-vector/
: https://www.lintcode.com/problem/flatten-2d-vector/description
'''
''' Reference: https://www.cnblogs.com/lightwindy/p/8577871.html '''
class Vector2D(object):
def __init__(self, vec2d):
self.row, self.col, self.vec2d = 0, 0, vec2d
def next(self):
self.col += 1
return self.vec2d[self.row][self.col-1]
def hasNext(self):
while self.row < len(self.vec2d) and self.col == len(self.vec2d[self.row]):
self.row, self.col = self.row + 1, 0
return self.row < len(self.vec2d)
# Your Vector2D object will be instantiated and called as such:
# i, v = Vector2D(vec2d), []
# while i.hasNext(): v.append(i.next()) | [
"bayernscience@hotmail.com"
] | bayernscience@hotmail.com |
b07be83a3998ff9cfe606edc6c01aa9efbd148ac | a904e99110721719d9ca493fdb91679d09577b8d | /month01/all_code/day05/homework/exercise05.py | 0f12f974bdd279f020581ab649d02e68d2cbf968 | [
"Apache-2.0"
] | permissive | chaofan-zheng/tedu-python-demo | 7c7c64a355e5380d1f8b6464affeddfde0d27be7 | abe983ddc52690f4726cf42cc6390cba815026d8 | refs/heads/main | 2023-03-12T05:17:34.596664 | 2021-02-27T08:33:31 | 2021-02-27T08:33:31 | 323,350,480 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
练习:根据下列文字,提取变量,使用字符串格式化打印信息
湖北确诊67802人,治愈63326人,治愈率0.99
70秒是01分零10秒
"""
region = "湖北"
confirmed = 67802
cure = 63326
cure_rate = 0.9912345
# print("%s确诊%d人,治愈%d人,治愈率%.2f" %
# (region, confirmed, cure, cure_rate))
print(f"{region}确诊{confirmed}人,治愈{cure}人,治愈率{cure_rate:.2f}")
total_second = 70
# print("%d秒是%.2d分零%.2d秒" %
# (total_second, total_second // 60, total_second % 60))
print(f"{total_second}秒是{total_second // 60:02}分零{total_second % 60:02}秒")
| [
"chaofan1@berkeley.edu"
] | chaofan1@berkeley.edu |
08d43bcd8b9340063e0076a14ee544d6aa0c45fc | a29c6e83ae4f9010941d15c8fd4cfc67680bb054 | /keras/keras43_boston_3_lstm.py | cfebcb0b1b796a4fd2b370ae022e7dd61d10d643 | [] | no_license | ym0179/bit_seoul | f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8 | 14d1fb2752312790c39898fc53a45c1cf427a4d1 | refs/heads/master | 2023-02-27T19:52:23.577540 | 2021-02-08T00:30:16 | 2021-02-08T00:30:16 | 311,265,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | #Day7
#2020-11-17
#보스턴 집값 예측: 1978년에 발표된 데이터로 미국 보스턴 지역의 주택 가격에 영향을 미치는 요소들을 정리
from sklearn.datasets import load_boston
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
dataset = load_boston()
x = dataset.data
y = dataset.target
# print(x)
# print(x.shape, y.shape) #(506, 13) (506,)
#1. 전처리
#train-test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8)
x_train ,x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.8)
#scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train) #fit은 train data만 함
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
#reshape
x_train = x_train.reshape(x_train.shape[0],13,1)
x_val = x_val.reshape(x_val.shape[0],13,1)
x_test = x_test.reshape(x_test.shape[0],13,1)
x_pred = x_test[:10]
y_pred = y_test[:10]
#2. 모델링
#input shape
#DNN - 1차원, RNN - 2차원, LSTM - 2차원
model = Sequential()
#(행,열,몇개씩 자르는지) -> 마지막에 LSTM 만들 때 한개씩 잘라서 연산하겠다는게 명시됨
model.add(LSTM(32, activation='relu',input_shape=(13,1)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
# model.add(Dropout(0.2))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
# model.summary()
#3. 컴파일, 훈련
model.compile(loss="mse", optimizer="adam", metrics=["mae"])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss',patience=10,mode='auto')
model.fit(x_train,y_train,epochs=300,batch_size=1,verbose=2,callbacks=[es],
validation_data=(x_val,y_val))
#4. 평가
loss,mae = model.evaluate(x_test,y_test,batch_size=1)
print("loss : ",loss)
print("mae : ",mae)
#5. 예측
result = model.predict(x_pred)
print("예측값 : ", result.T.reshape(10,)) #보기 쉽게
print("실제값 : ", y_pred)
y_predicted = model.predict(x_test) #x_pred 10개밖에 없음응로 x_test 가지고 RMSE, R2 계산
#RMSE
#R2
import numpy as np
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predicted):
return np.sqrt(mean_squared_error(y_test,y_predicted))
print("RMSE : ", RMSE(y_test, y_predicted))
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predicted)
print("R2 : ",r2) # max 값: 1
'''
loss : 12.263466835021973
mae : 2.7167487144470215
예측값 : [25.90948 6.2764387 20.263472 17.902828 13.495611 26.259878
19.45948 22.261282 23.709982 23.103811 ]
실제값 : [23.1 10.4 17.4 20.5 13. 20.5 21.8 21.2 21.8 23.1]
RMSE : 3.5019234178103877
R2 : 0.8028192283008149
''' | [
"ym4766@gmail.com"
] | ym4766@gmail.com |
db05fb0ae0a739328e4c844cf59e78fe6fca7fd6 | 0cd64f3f67c6a3b130a788906da84ffc3d15396a | /Library/lib/python3.9/site-packages/sympy/physics/quantum/qft.py | 5d35d22e1f0985800bcb28b49c256ecca1930a4d | [
"MIT",
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Ryorama/codeapp | 32ef44a3e8058da9858924df211bf82f5f5018f1 | cf7f5753c6c4c3431d8209cbaacf5208c3c664fa | refs/heads/main | 2023-06-26T09:24:13.724462 | 2021-07-27T17:54:25 | 2021-07-27T17:54:25 | 388,520,626 | 0 | 0 | MIT | 2021-07-22T16:01:32 | 2021-07-22T16:01:32 | null | UTF-8 | Python | false | false | 6,199 | py | """An implementation of qubits and gates acting on them.
Todo:
* Update docstrings.
* Update tests.
* Implement apply using decompose.
* Implement represent using decompose or something smarter. For this to
work we first have to implement represent for SWAP.
* Decide if we want upper index to be inclusive in the constructor.
* Fix the printing of Rk gates in plotting.
"""
from sympy import Expr, Matrix, exp, I, pi, Integer, Symbol
from sympy.functions import sqrt
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qexpr import QuantumError, QExpr
from sympy.matrices import eye
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.gate import (
Gate, HadamardGate, SwapGate, OneQubitGate, CGate, PhaseGate, TGate, ZGate
)
__all__ = [
'QFT',
'IQFT',
'RkGate',
'Rk'
]
#-----------------------------------------------------------------------------
# Fourier stuff
#-----------------------------------------------------------------------------
class RkGate(OneQubitGate):
"""This is the R_k gate of the QTF."""
gate_name = 'Rk'
gate_name_latex = 'R'
def __new__(cls, *args):
if len(args) != 2:
raise QuantumError(
'Rk gates only take two arguments, got: %r' % args
)
# For small k, Rk gates simplify to other gates, using these
# substitutions give us familiar results for the QFT for small numbers
# of qubits.
target = args[0]
k = args[1]
if k == 1:
return ZGate(target)
elif k == 2:
return PhaseGate(target)
elif k == 3:
return TGate(target)
args = cls._eval_args(args)
inst = Expr.__new__(cls, *args)
inst.hilbert_space = cls._eval_hilbert_space(args)
return inst
@classmethod
def _eval_args(cls, args):
# Fall back to this, because Gate._eval_args assumes that args is
# all targets and can't contain duplicates.
return QExpr._eval_args(args)
@property
def k(self):
return self.label[1]
@property
def targets(self):
return self.label[:1]
@property
def gate_name_plot(self):
return r'$%s_%s$' % (self.gate_name_latex, str(self.k))
def get_target_matrix(self, format='sympy'):
if format == 'sympy':
return Matrix([[1, 0], [0, exp(Integer(2)*pi*I/(Integer(2)**self.k))]])
raise NotImplementedError(
'Invalid format for the R_k gate: %r' % format)
Rk = RkGate
class Fourier(Gate):
"""Superclass of Quantum Fourier and Inverse Quantum Fourier Gates."""
@classmethod
def _eval_args(self, args):
if len(args) != 2:
raise QuantumError(
'QFT/IQFT only takes two arguments, got: %r' % args
)
if args[0] >= args[1]:
raise QuantumError("Start must be smaller than finish")
return Gate._eval_args(args)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""
Represents the (I)QFT In the Z Basis
"""
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
size = self.size
omega = self.omega
#Make a matrix that has the basic Fourier Transform Matrix
arrayFT = [[omega**(
i*j % size)/sqrt(size) for i in range(size)] for j in range(size)]
matrixFT = Matrix(arrayFT)
#Embed the FT Matrix in a higher space, if necessary
if self.label[0] != 0:
matrixFT = matrix_tensor_product(eye(2**self.label[0]), matrixFT)
if self.min_qubits < nqubits:
matrixFT = matrix_tensor_product(
matrixFT, eye(2**(nqubits - self.min_qubits)))
return matrixFT
@property
def targets(self):
return range(self.label[0], self.label[1])
@property
def min_qubits(self):
return self.label[1]
@property
def size(self):
"""Size is the size of the QFT matrix"""
return 2**(self.label[1] - self.label[0])
@property
def omega(self):
return Symbol('omega')
class QFT(Fourier):
"""The forward quantum Fourier transform."""
gate_name = 'QFT'
gate_name_latex = 'QFT'
def decompose(self):
"""Decomposes QFT into elementary gates."""
start = self.label[0]
finish = self.label[1]
circuit = 1
for level in reversed(range(start, finish)):
circuit = HadamardGate(level)*circuit
for i in range(level - start):
circuit = CGate(level - i - 1, RkGate(level, i + 2))*circuit
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
return circuit
def _apply_operator_Qubit(self, qubits, **options):
return qapply(self.decompose()*qubits)
def _eval_inverse(self):
return IQFT(*self.args)
@property
def omega(self):
return exp(2*pi*I/self.size)
class IQFT(Fourier):
"""The inverse quantum Fourier transform."""
gate_name = 'IQFT'
gate_name_latex = '{QFT^{-1}}'
def decompose(self):
"""Decomposes IQFT into elementary gates."""
start = self.args[0]
finish = self.args[1]
circuit = 1
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
for level in range(start, finish):
for i in reversed(range(level - start)):
circuit = CGate(level - i - 1, RkGate(level, -i - 2))*circuit
circuit = HadamardGate(level)*circuit
return circuit
def _eval_inverse(self):
return QFT(*self.args)
@property
def omega(self):
return exp(-2*pi*I/self.size)
| [
"ken.chung@thebaselab.com"
] | ken.chung@thebaselab.com |
fd289b356f9617c2ebaaa01d271f3e18fca8ee97 | f2a2f41641eb56a17009294ff100dc9b39cb774b | /old_session/session_1/_144/_144_binary_tree_preorder_traversal.py | 09b613446a5be0cf7e9539edd9c8b63a2327f9da | [] | no_license | YJL33/LeetCode | 0e837a419d11d44239d1a692140a1468f6a7d9bf | b4da922c4e8406c486760639b71e3ec50283ca43 | refs/heads/master | 2022-08-13T01:46:14.976758 | 2022-07-24T03:59:52 | 2022-07-24T04:11:32 | 52,939,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | """
144. Binary Tree Preorder Traversal
Given a binary tree, return the preorder traversal of its nodes' values.
For example:
Given binary tree {1,#,2,3},
1
\
2
/
3
return [1,2,3].
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
# Preorder: Root => Left => Right
def helper(root, res):
res += root.val,
if root.left != None:
helper(root.left, res)
if root.right != None:
helper(root.right, res)
return
if root is None:
return []
res = []
helper(root, res)
return res | [
"yunjun.l33@gmail.com"
] | yunjun.l33@gmail.com |
b843458b7624d7a008fa6052f1a9a98ce728076f | 0396bc649c5b2ddb21a6b629e3daf1501dafd13f | /evalml/pipelines/components/ensemble/stacked_ensemble_base.py | 025bc52dafc00ab673641bf89fa6d07c059915c1 | [
"BSD-3-Clause"
] | permissive | Sandy4321/evalml | 3324fe6501091c51b67b4a573214ad4c6103c5a5 | 32f9be24d9d8479cf1a4d7a261c17fde213c50d1 | refs/heads/main | 2023-02-09T04:38:53.077488 | 2021-01-01T03:59:18 | 2021-01-01T03:59:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | from evalml.exceptions import EnsembleMissingPipelinesError
from evalml.model_family import ModelFamily
from evalml.pipelines.components import Estimator
from evalml.pipelines.components.utils import scikit_learn_wrapped_estimator
from evalml.utils import classproperty
_nonstackable_model_families = [ModelFamily.BASELINE, ModelFamily.NONE]
class StackedEnsembleBase(Estimator):
"""Stacked Ensemble Base Class."""
model_family = ModelFamily.ENSEMBLE
_stacking_estimator_class = None
_default_final_estimator = None
_default_cv = None
def __init__(self, input_pipelines=None, final_estimator=None, cv=None, n_jobs=None, random_state=0, **kwargs):
"""Stacked ensemble base class.
Arguments:
input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use as the base estimators.
This must not be None or an empty list or else EnsembleMissingPipelinesError will be raised.
final_estimator (Estimator or subclass): The estimator used to combine the base estimators.
cv (int, cross-validation generator or an iterable): Determines the cross-validation splitting strategy used to train final_estimator.
For int/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used.
Possible inputs for cv are:
- None: 5-fold cross validation
- int: the number of folds in a (Stratified) KFold
- An scikit-learn cross-validation generator object
- An iterable yielding (train, test) splits
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Defaults to None.
- Note: there could be some multi-process errors thrown for values of `n_jobs != 1`. If this is the case, please use `n_jobs = 1`.
random_state (int, np.random.RandomState): seed for the random number generator
"""
if not input_pipelines:
raise EnsembleMissingPipelinesError("`input_pipelines` must not be None or an empty list.")
if [pipeline for pipeline in input_pipelines if pipeline.model_family in _nonstackable_model_families]:
raise ValueError("Pipelines with any of the following model families cannot be used as base pipelines: {}".format(_nonstackable_model_families))
parameters = {
"input_pipelines": input_pipelines,
"final_estimator": final_estimator,
"cv": cv,
"n_jobs": n_jobs
}
parameters.update(kwargs)
if len(set([pipeline.problem_type for pipeline in input_pipelines])) > 1:
raise ValueError("All pipelines must have the same problem type.")
cv = cv or self._default_cv(n_splits=3, random_state=random_state)
estimators = [scikit_learn_wrapped_estimator(pipeline) for pipeline in input_pipelines]
final_estimator = scikit_learn_wrapped_estimator(final_estimator or self._default_final_estimator())
sklearn_parameters = {
"estimators": [(f"({idx})", estimator) for idx, estimator in enumerate(estimators)],
"final_estimator": final_estimator,
"cv": cv,
"n_jobs": n_jobs
}
sklearn_parameters.update(kwargs)
super().__init__(parameters=parameters,
component_obj=self._stacking_estimator_class(**sklearn_parameters),
random_state=random_state)
@property
def feature_importance(self):
"""Not implemented for StackedEnsembleClassifier and StackedEnsembleRegressor"""
raise NotImplementedError("feature_importance is not implemented for StackedEnsembleClassifier and StackedEnsembleRegressor")
@classproperty
def default_parameters(cls):
"""Returns the default parameters for stacked ensemble classes.
Returns:
dict: default parameters for this component.
"""
return {
'final_estimator': None,
'cv': None,
'n_jobs': 1,
}
| [
"noreply@github.com"
] | Sandy4321.noreply@github.com |
435d64183cddf28cb10b05c638226a18cfe23383 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s072464251.py | 0667e7e766e9808fb1edf62e69f58f1a05ba3c1c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | s = input()
def YYMM(s):
p = int(s[2:])
if 1 <= p <= 12:
return True
return False
def MMYY(s):
p = int(s[:2])
if 1 <= p <= 12:
return True
return False
if YYMM(s) and MMYY(s):
print('AMBIGUOUS')
elif YYMM(s):
print('YYMM')
elif MMYY(s):
print('MMYY')
else:
print('NA') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7915777eddf122fb1204b725cd49b39bf2fa5c7b | 45c0651d7785025f0e7a137d8abac0e66092a659 | /roles/lib_zabbix/library/zbx_user.py | c916fa96a61d1acba94daf38922e1d8ef0f4ede5 | [
"Apache-2.0"
] | permissive | pkdevbox/openshift-ansible | df3f0d75032b5dee4f962852e265437ba2656925 | 318ac6b9b65f42f032382114f35d3c9fa7f5610b | refs/heads/master | 2021-01-21T01:39:10.120698 | 2015-09-04T20:29:58 | 2015-09-04T20:29:58 | 41,973,441 | 1 | 0 | null | 2015-09-05T19:45:46 | 2015-09-05T19:45:46 | null | UTF-8 | Python | false | false | 6,779 | py | #!/usr/bin/env python
'''
ansible module for zabbix users
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix user ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_usergroups(zapi, usergroups):
''' Get usergroups
'''
ugroups = []
for ugr in usergroups:
content = zapi.get_content('usergroup',
'get',
{'search': {'name': ugr},
#'selectUsers': 'userid',
#'getRights': 'extend'
})
if content['result']:
ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
return ugroups or None
def get_passwd(passwd):
'''Determine if password is set, if not, return 'zabbix'
'''
if passwd:
return passwd
return 'zabbix'
def get_usertype(user_type):
'''
Determine zabbix user account type
'''
if not user_type:
return None
utype = 1
if 'super' in user_type:
utype = 3
elif 'admin' in user_type or user_type == 'admin':
utype = 2
return utype
def main():
'''
ansible zabbix module for users
'''
##def user(self, name, state='present', params=None):
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
login=dict(default=None, type='str'),
first_name=dict(default=None, type='str'),
last_name=dict(default=None, type='str'),
user_type=dict(default=None, type='str'),
password=dict(default=None, type='str'),
update_password=dict(default=False, type='bool'),
user_groups=dict(default=[], type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
## before we can create a user media and users with media types we need media
zbx_class_name = 'user'
idname = "userid"
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'output': 'extend',
'search': {'alias': module.params['login']},
"selectUsrgrps": 'usergrpid',
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'alias': module.params['login'],
'passwd': get_passwd(module.params['password']),
'usrgrps': get_usergroups(zapi, module.params['user_groups']),
'name': module.params['first_name'],
'surname': module.params['last_name'],
'type': get_usertype(module.params['user_type']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('Error'):
module.exit_json(failed=True, changed=False, results=content, state='present')
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
# Update password
if not module.params['update_password']:
params.pop('passwd', None)
zab_results = content['result'][0]
for key, value in params.items():
if key == 'usrgrps':
# this must be done as a list of ordered dictionaries fails comparison
if not all([True for _ in zab_results[key][0] if _ in value[0]]):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| [
"kwoodson@redhat.com"
] | kwoodson@redhat.com |
5d851d8cfe60de655c230608e5c5bb4c09078032 | e5a044708032b853f1cdf8906da63502716fd410 | /openapi_client/models/post_auth_transaction_all_of.py | 3108af8ded3b97784e38a869b08eb2ce2181a632 | [] | no_license | GBSEcom/Python | 4b93bab80476051fc99f379f018ac9fa109a8a6a | 5fa37dba8d0c3853686fdc726f863743376060c9 | refs/heads/master | 2021-12-04T12:55:29.605843 | 2021-11-19T22:01:03 | 2021-11-19T22:01:03 | 136,058,345 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,222 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.5.0.20211029.001
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PostAuthTransactionAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'transaction_amount': 'Amount',
'transaction_origin': 'TransactionOrigin',
'split_shipment': 'SplitShipment',
'soft_descriptor': 'SoftDescriptor'
}
attribute_map = {
'transaction_amount': 'transactionAmount',
'transaction_origin': 'transactionOrigin',
'split_shipment': 'splitShipment',
'soft_descriptor': 'softDescriptor'
}
def __init__(self, transaction_amount=None, transaction_origin=None, split_shipment=None, soft_descriptor=None): # noqa: E501
"""PostAuthTransactionAllOf - a model defined in OpenAPI""" # noqa: E501
self._transaction_amount = None
self._transaction_origin = None
self._split_shipment = None
self._soft_descriptor = None
self.discriminator = None
self.transaction_amount = transaction_amount
if transaction_origin is not None:
self.transaction_origin = transaction_origin
if split_shipment is not None:
self.split_shipment = split_shipment
if soft_descriptor is not None:
self.soft_descriptor = soft_descriptor
@property
def transaction_amount(self):
"""Gets the transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:return: The transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:rtype: Amount
"""
return self._transaction_amount
@transaction_amount.setter
def transaction_amount(self, transaction_amount):
"""Sets the transaction_amount of this PostAuthTransactionAllOf.
:param transaction_amount: The transaction_amount of this PostAuthTransactionAllOf. # noqa: E501
:type: Amount
"""
if transaction_amount is None:
raise ValueError("Invalid value for `transaction_amount`, must not be `None`") # noqa: E501
self._transaction_amount = transaction_amount
@property
def transaction_origin(self):
"""Gets the transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:return: The transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:rtype: TransactionOrigin
"""
return self._transaction_origin
@transaction_origin.setter
def transaction_origin(self, transaction_origin):
"""Sets the transaction_origin of this PostAuthTransactionAllOf.
:param transaction_origin: The transaction_origin of this PostAuthTransactionAllOf. # noqa: E501
:type: TransactionOrigin
"""
self._transaction_origin = transaction_origin
@property
def split_shipment(self):
"""Gets the split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:return: The split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:rtype: SplitShipment
"""
return self._split_shipment
@split_shipment.setter
def split_shipment(self, split_shipment):
"""Sets the split_shipment of this PostAuthTransactionAllOf.
:param split_shipment: The split_shipment of this PostAuthTransactionAllOf. # noqa: E501
:type: SplitShipment
"""
self._split_shipment = split_shipment
@property
def soft_descriptor(self):
"""Gets the soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:return: The soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:rtype: SoftDescriptor
"""
return self._soft_descriptor
@soft_descriptor.setter
def soft_descriptor(self, soft_descriptor):
"""Sets the soft_descriptor of this PostAuthTransactionAllOf.
:param soft_descriptor: The soft_descriptor of this PostAuthTransactionAllOf. # noqa: E501
:type: SoftDescriptor
"""
self._soft_descriptor = soft_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostAuthTransactionAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
002a0a8ed95336f133ab86237afdd69b34f56e73 | 1da61f69428c4318e6bb43b96b9a72e65d6b1a59 | /arche_papergirl/models/tests/test_newsletter.py | 5b8dc42452b1611b1eb3e6d89bd5e539e51fba31 | [] | no_license | ArcheProject/arche_papergirl | ea16e453af248ca1ab571297559d8ebd8b6770b5 | e69a4c3ddd0c7e0d27f45f354f4c96807509de1a | refs/heads/master | 2020-04-10T01:44:21.189511 | 2018-07-13T12:33:42 | 2018-07-13T12:33:42 | 68,215,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from pyramid import testing
from zope.interface.verify import verifyClass, verifyObject
from arche_papergirl.exceptions import AlreadyInQueueError
from arche_papergirl.interfaces import INewsletter
class NewsletterTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
@property
def _cut(self):
from arche_papergirl.models.newsletter import Newsletter
return Newsletter
def test_verify_class(self):
self.failUnless(verifyClass(INewsletter, self._cut))
def test_verify_obj(self):
self.failUnless(verifyObject(INewsletter, self._cut()))
def test_add_queue(self):
obj = self._cut()
obj.add_queue('subscriber_uid', 'list_uid')
self.assertEqual(obj._queue[1], ('subscriber_uid', 'list_uid'))
self.assertEqual(obj._uid_to_status['subscriber_uid'][1][0:2], (1, 'list_uid'))
self.assertRaises(AlreadyInQueueError, obj.add_queue, 'subscriber_uid', 'list_uid')
def test_queue_len(self):
obj = self._cut()
self.assertEqual(obj.queue_len, 0)
obj.add_queue('subscriber_uid', 'list_uid')
self.assertEqual(obj.queue_len, 1)
def test_pop_next(self):
obj = self._cut()
obj.add_queue('subscriber_uid1', 'list_uid')
obj.add_queue('subscriber_uid2', 'list_uid')
obj.add_queue('subscriber_uid3', 'list_uid')
self.assertEqual(obj.pop_next(), ('subscriber_uid1', 'list_uid'))
self.assertEqual(obj.get_uid_status('subscriber_uid1')[0:2], (0, 'list_uid'))
def test_pop_next_empty(self):
obj = self._cut()
self.assertEqual(obj.pop_next(), (None, None, None))
| [
"robin@betahaus.net"
] | robin@betahaus.net |
1fd8eaaf0397d32379f65f494be6bf3ed513ef53 | 828115da62a687e1e9ea96bd5072f8b148e873a3 | /Segmenter.py | a4368dcd77129a6ead937c22f4282ef74099b30a | [] | no_license | VitalyRomanov/scidoc | cafc86f0f83c1ebd7ce17c8420cb158ab0844b89 | 2029abfff4eee8b919cc9bca7251d2edc7fd005f | refs/heads/master | 2021-02-16T11:44:02.495918 | 2020-03-05T07:33:08 | 2020-03-05T07:33:08 | 245,001,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from LanguageTools.nltk_wrapper import NltkWrapper
from nltk.classify.textcat import TextCat
class Segmenter:
def __init__(self):
self.tc = TextCat()
self.nlp_en = NltkWrapper("en")
self.nlp_ru = NltkWrapper("ru")
def __call__(self, full_text, segment_len=5, segment_overlap=2):
full_text = " ".join(full_text.split("\n"))
lang_guess = self.tc.guess_language(full_text[:200])
if lang_guess == "eng":
nlp = self.nlp_en
elif lang_guess == "rus":
nlp = self.nlp_ru
else:
nlp = None
if nlp is None:
return iter([])
sentences = nlp(full_text, tagger=False)
for ind in range(0, len(sentences) - segment_overlap, segment_len - segment_overlap):
segment_id = f"{ind}/{len(sentences)}_{segment_len}"
yield segment_id, sentences[ind:ind + segment_len]
| [
"mortiv16@gmail.com"
] | mortiv16@gmail.com |
87d56f0c0931ae0db0ef2c5b9d39c0a2dd09901b | cb2e2d84d970894bb6d1b414e91551118fcfc209 | /src/hal_configurator/ui/gen/testingflow.py | 11071babd0b3f997183097c0394d1f6135f98d6c | [] | no_license | codechem/hal_automator | 0cb54f1d086e831208533f9b934806045700e1e8 | e9404f33ee34a9068293daff33434d2c80d23865 | refs/heads/master | 2021-01-13T13:58:58.552727 | 2016-06-21T17:26:34 | 2016-06-21T17:26:34 | 72,894,281 | 1 | 0 | null | 2016-11-05T00:01:32 | 2016-11-05T00:01:32 | null | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/halicea/projects/hal_automator/utils/qtUi/testingflow.ui'
#
# Created: Sun Nov 1 19:09:21 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 300)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
| [
"costa@halicea.com"
] | costa@halicea.com |
9d4663d936f5bd08c8a3aee4b917a9858f6eace8 | 170864b6ec66be48138f231fe8ac3381481b8c9d | /python/BOJ_2563.py | ece6f7af4088ada9440550e4b7600777dadef860 | [] | no_license | hyesungoh/AA_Algorithm | 5da3d8312d035d324dfaa31eef73f01a238231f3 | d68f52eaa29cfc4656a8b5623359166779ded06e | refs/heads/master | 2023-06-09T14:49:01.402456 | 2021-06-28T10:10:09 | 2021-06-28T10:10:09 | 272,701,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import sys
input = sys.stdin.readline
n = int(input())
paper = [[0 for _ in range(100)] for _ in range(100)]
ans = 0
for _ in range(n):
y, x = map(int, input().split())
for i in range(y, y+10):
for j in range(x, x+10):
if not paper[i][j]:
paper[i][j] = 1
ans += 1
print(ans)
| [
"haesungoh414@gmail.com"
] | haesungoh414@gmail.com |
03097fb708d399c95e2aaad8f59df7478613eea5 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_258/ch81_2019_06_05_12_38_09_879499.py | cab21705c88ed0161cb1240956590b1c9a7fc3c7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | def interseccao_valores(dic1, dic2):
lista_dic1 = []
lista_dic2 = []
for e in dic1.values():
lista_dic1.append(e)
for k in dic2.values():
lista_dic2.append(k)
lista_interseccao = []
for m in range(0, len(lista_dic1)):
for n in range(0, len(lista_dic2)):
if lista_dic1[m] == lista_dic2[n]:
lista_interseccao.append(lista_dic1[m])
return lista_interseccao | [
"you@example.com"
] | you@example.com |
b84e96d2e5c7ab1fd2060b7a26ec821333dca8bc | 767c3ca94095db80c7c23577a93f85cf27dd0579 | /testcases/test_register.py | e83ce01d24f95e007513e92b9fb8c763ef8cdabe | [] | no_license | zhang0123456789/interface_test | 2e95c532c0fc5662f8ecba6e74c905b8374c2034 | d77b4e3c6deace6ae0a87893d62a5fa40cdca462 | refs/heads/master | 2022-12-12T13:45:24.194832 | 2019-01-05T01:31:29 | 2019-01-05T01:31:29 | 164,169,447 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | #!/usr/bin/env python
# -*- coding:utf-8-*-
#@author:蜜蜜
#@file: test_register.py
#@time: 2018/12/31
#@email:1402686685@qq.com
import json
import unittest
from ddt import ddt, data
from common import contants
from common.do_execl import DoExecl
from common.mysql_util import MysqlUtil
from common.request import Request
from common.logger2 import MyLog
do_excel = DoExecl(contants.cases_file)
cases = do_excel.get_cases('register')
@ddt
class TestRegister(unittest.TestCase):
@classmethod
def setUpClass(cls):
global mysql
mysql = MysqlUtil()
sql = 'select mobilephone from future.member where ' \
' mobilephone != ""order by mobilephone desc limit 1 '
global max_phone
max_phone = mysql.fetch_one(sql)['mobilephone']
# def setUp(self):
# # 查询最大手机号码
# self.mysql = MysqlUtil() #
# self.sql = 'select mobilephone from future.member where ' \
# ' mobilephone != "" order by mobilephone desc limit 1 '
#
# self.max_phone = self.mysql.fetch_one(self.sql)['mobilephone']
@data(*cases)
def test_register(self, case):
data = json.loads(case.data) # 将字符串序列化为字典
if data['mobilephone'] == '${register}': # 判断是否是需要进行参数化
data['mobilephone'] = int(max_phone) + 1 # 取到数据库里面最大的手机号码进行加1
MyLog.info('测试用例名称:{0}'.format(case.title))
MyLog.info('测试用例数据:{0}'.format(case.data))
MyLog.error('测试用例数据error')
resp = Request(method=case.method, url=case.url, data=data) # 通过封装的Request类来完成接口的调用
MyLog.debug('status_code:{0}'.format(resp.get_status_code()))
resp_dict = resp.get_json() # 获取请求响应,字典
self.assertEqual(case.expected, resp.get_text())
if resp_dict['code'] == 20110: # 注册成功的数据校验,判断数据库有这条数据
sql = 'select * from future.member where mobilephone = "{0}"'.format(max_phone)
expected = int(self.max_phone) + 1
member = self.mysql.fetch_one(sql)
if member is not None: # 正常注册成功就不应该返回None
self.assertEqual(expected,member['mobilephone'])
else:# 返回None则代表注册成功之后但是数据库里面没有插入数据
MyLog.error('注册失败')
raise AssertionError
# else:# 注册失败的数据校验,判断数据库没有这条数据,自己写
# def tearDown(self):
# self.mysql.close()
@classmethod
def tearDownClass(cls):
mysql.close()
| [
"1402686685@qq.com"
] | 1402686685@qq.com |
eb06ee00f22ecf3f7526bbf89d4810daa1b97b13 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/delegatednetwork/get_delegated_subnet_service_details.py | 1cea4ab12edb53966c5bbea1377cd526ee76b4bb | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 6,281 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDelegatedSubnetServiceDetailsResult',
'AwaitableGetDelegatedSubnetServiceDetailsResult',
'get_delegated_subnet_service_details',
]
@pulumi.output_type
class GetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
"""
def __init__(__self__, controller_details=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, subnet_details=None, tags=None, type=None):
if controller_details and not isinstance(controller_details, dict):
raise TypeError("Expected argument 'controller_details' to be a dict")
pulumi.set(__self__, "controller_details", controller_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if subnet_details and not isinstance(subnet_details, dict):
raise TypeError("Expected argument 'subnet_details' to be a dict")
pulumi.set(__self__, "subnet_details", subnet_details)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="controllerDetails")
def controller_details(self) -> Optional['outputs.ControllerDetailsResponse']:
"""
Properties of the controller.
"""
return pulumi.get(self, "controller_details")
@property
@pulumi.getter
def id(self) -> str:
"""
An identifier that represents the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current state of dnc delegated subnet resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="subnetDetails")
def subnet_details(self) -> Optional['outputs.SubnetDetailsResponse']:
"""
subnet details
"""
return pulumi.get(self, "subnet_details")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDelegatedSubnetServiceDetailsResult(GetDelegatedSubnetServiceDetailsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDelegatedSubnetServiceDetailsResult(
controller_details=self.controller_details,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
subnet_details=self.subnet_details,
tags=self.tags,
type=self.type)
def get_delegated_subnet_service_details(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
API Version: 2021-03-15.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:delegatednetwork:getDelegatedSubnetServiceDetails', __args__, opts=opts, typ=GetDelegatedSubnetServiceDetailsResult).value
return AwaitableGetDelegatedSubnetServiceDetailsResult(
controller_details=__ret__.controller_details,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
subnet_details=__ret__.subnet_details,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
1d205dda89d185af991d054d0eca492e10d11142 | 5c94e032b2d43ac347f6383d0a8f0c03ec3a0485 | /Alesis_V/__init__.py | a0c19a33939f7a8259468648ba791b0f6717a22d | [] | no_license | Elton47/Ableton-MRS-10.1.13 | 997f99a51157bd2a2bd1d2dc303e76b45b1eb93d | 54bb64ba5e6be52dd6b9f87678ee3462cc224c8a | refs/heads/master | 2022-07-04T01:35:27.447979 | 2020-05-14T19:02:09 | 2020-05-14T19:02:09 | 263,990,585 | 0 | 0 | null | 2020-05-14T18:12:04 | 2020-05-14T18:12:03 | null | UTF-8 | Python | false | false | 966 | py | # uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Alesis_V/__init__.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
from .Alesis_V import Alesis_V
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=5042, product_ids=[
134, 135, 136], model_name=[
'V25', 'V49', 'V61']),
PORTS_KEY: [
inport(props=[NOTES_CC, SCRIPT, REMOTE]), outport(props=[SCRIPT])]}
def create_instance(c_instance):
return Alesis_V(c_instance) | [
"ahmed.emerah@icloud.com"
] | ahmed.emerah@icloud.com |
d3e388b971965bb7667505ef54d6f50b4e5d98c5 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ton_425/sdB_ton_425_coadd.py | f3713f8fcfe32e4dcaaa4e8452709e8086361d7c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[142.527417,31.716667], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ton_425/sdB_ton_425_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ton_425/sdB_ton_425_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
9b12d02cc2a2150cdd9759b246560d369ebebc9a | 8fc999f5262b5a2dadc830f1cc345f51b6dde862 | /samples/conceptual_samples/exceptional_handling/exceptional_handling.py | c0f88a8c6cdb9961b2c83bc310c86e153c0ed4de | [] | no_license | pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate | 5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a | a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae | refs/heads/master | 2022-04-09T20:33:28.527653 | 2020-03-27T06:35:50 | 2020-03-27T06:35:50 | 250,226,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # this is a sample python script program which is created to demonstrate the exceptional handling concept in the python
def get_number():
"the function returns a float number"
number=float(input("enter a float number:\n"))
return number
exit(0)
while True:
try:
print get_number()
break
except:
print"\nYou have entered a wrong value."
print"\nPlease enter a value that is integer or a float value"
else:
print"there is a error over here, better be carefully about executing it..!!"
# this is the end of the program file. happy coding..!! | [
"becool.pandiyan@gmail.com"
] | becool.pandiyan@gmail.com |
0dd328e28c261b6378ae5bb07c13860fccdbabd7 | b6a84594f8c29d968014faaddd49abeb7537a5fc | /python/443.string-compression.py | 3159cf2b852f7a4b099661f5428f9a01ceb3108e | [] | no_license | nickyfoto/lc | 8a6af3df114e693e265d0ede03f4d4e1283e010e | 3633b4df3e24968057c7d684689b931c5a8032d3 | refs/heads/master | 2020-09-16T19:23:07.765917 | 2020-06-07T17:18:06 | 2020-06-07T17:18:06 | 223,866,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | #
# @lc app=leetcode id=443 lang=python3
#
# [443] String Compression
#
# https://leetcode.com/problems/string-compression/description/
#
# algorithms
# Easy (37.79%)
# Total Accepted: 56.7K
# Total Submissions: 149.2K
# Testcase Example: '["a","a","b","b","c","c","c"]'
#
# Given an array of characters, compress it in-place.
#
# The length after compression must always be smaller than or equal to the
# original array.
#
# Every element of the array should be a character (not int) of length 1.
#
# After you are done modifying the input array in-place, return the new length
# of the array.
#
#
# Follow up:
# Could you solve it using only O(1) extra space?
#
#
# Example 1:
#
#
# Input:
# ["a","a","b","b","c","c","c"]
#
# Output:
# Return 6, and the first 6 characters of the input array should be:
# ["a","2","b","2","c","3"]
#
# Explanation:
# "aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by
# "c3".
#
#
#
#
# Example 2:
#
#
# Input:
# ["a"]
#
# Output:
# Return 1, and the first 1 characters of the input array should be: ["a"]
#
# Explanation:
# Nothing is replaced.
#
#
#
#
# Example 3:
#
#
# Input:
# ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
#
# Output:
# Return 4, and the first 4 characters of the input array should be:
# ["a","b","1","2"].
#
# Explanation:
# Since the character "a" does not repeat, it is not compressed. "bbbbbbbbbbbb"
# is replaced by "b12".
# Notice each digit has it's own entry in the array.
#
#
#
#
# Note:
#
#
# All characters have an ASCII value in [35, 126].
# 1 <= len(chars) <= 1000.
#
#
#
class Solution:
# def compress(self, chars: List[str]) -> int:
def compress(self, chars):
# n = len(chars)
i = 0
current = chars[i]
# res = []
while i < len(chars):
count = 1
while i < len(chars) - 1 and chars[i+1] == current:
count += 1
# i += 1
chars.pop(i+1)
if count > 1:
l = list(str(count))
while l:
chars.insert(i+1, l.pop(0))
i += 1
# res.extend([current, str(count)])
# else:
# res.append(current)
# print(current, count, 'i=', i, chars)
if i < len(chars) - 1:
current = chars[i+1]
# count = 1
i += 1
# chars = list("".join(res))
# print(chars)
return len(chars)
# s = Solution()
# chars = ["a","a","b","b","c","c","c"]
# print(s.compress(chars))
# chars = ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
# print(s.compress(chars))
# chars = ['a']
# print(s.compress(chars))
| [
"nickyfoto@gmail.com"
] | nickyfoto@gmail.com |
9aeca146b15c38122ca0078e969386954136da3f | 2cb6294bc2a92b082edb34110937902bf5227303 | /6/CalThreeKingdomV1.py | b218d853adc19d8edb2ecf544703849e77f8dbe7 | [] | no_license | arnoqlk/icourse163-Python | f1e08a43a833f278c64fa79d57e0b6a261895b0b | 2766e500151fc7990617a9e3f9df3af5e259f960 | refs/heads/master | 2020-04-15T22:45:38.695397 | 2019-01-11T03:13:28 | 2019-01-11T03:13:28 | 165,085,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # 三国演义人物出场统计排序
import jieba
txt = open(r"D:\PyCode\6\threekingdoms.txt", "r", encoding="utf-8").read()
# jieba库错误分析为人名的词语
excludes = {"将军","却说","荆州","二人","不可","不能","如此"}
words = jieba.lcut(txt)
counts = {}
for word in words:
if len(word) == 1:
continue
elif word == "诸葛亮" or word == "孔明曰":
rword = "孔明"
elif word == "关公" or word == "云长":
rword = "关羽"
elif word == "玄德" or word == "玄德曰":
rword = "刘备"
elif word == "孟德" or word == "丞相":
rword = "曹操"
else:
rword = word
counts[rword] = counts.get(rword, 0) + 1
for word in excludes:
del counts[word]
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True)
for i in range(10):
word, count = items[i]
print("{0:<10}{1:>5}".format(word, count))
| [
"123@gmail.com"
] | 123@gmail.com |
bdd48947c531b535b04e28f94a17564d959c22fe | fa2ffc5487bef8240a1a5c7cfb893c234df21ee0 | /modelformproject/modelformproject/settings.py | c9a0a0c311cb15f9ef257ce997acb17937e0fc42 | [] | no_license | sandipdeshmukh77/django-practice-projects | cfd4d8f29aa74832ed3dc5501a651cba3f201251 | 78f9bd9f0fac4aaeecce4a94e88c6880e004b873 | refs/heads/main | 2023-02-11T05:59:28.029867 | 2020-12-29T22:12:52 | 2020-12-29T22:12:52 | 325,446,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | """
Django settings for modelformproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATES_DIR=os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e4@wu))+2^3^8xpw^)dag3fsx*jwv)7bcq$+5pyoev(tp*kto!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelformproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelformproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[STATIC_DIR,]
| [
"sandipdeshmukh77@gmail.com"
] | sandipdeshmukh77@gmail.com |
b53c0419d05dd7cd3d70cc10ab5ff7381ba63d1d | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/applications/network/wengophone/actions.py | ce51c4bbb7335818be785452d31ef95f3c286ab3 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import scons
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def build():
scons.make("mode=release-symbols \
enable-shared-portaudio=no enable-shared-webcam=no \
enable-shared-wengocurl=no enable-shared-phapi=no \
softphone-runtime softphone")
def install():
scons.install("prefix=%s/usr mode=release-symbols softphone-install" % get.installDIR())
pisitools.dosed("%s/usr/bin/wengophone" % get.installDIR(), get.installDIR(), "")
shelltools.chmod("%s/usr/bin/wengophone" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "wengophone.png")
pisitools.insinto("/usr/share/applications", "wengophone.desktop")
pisitools.dodoc("COPYING", "TODO", "README*")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
d42b0dfc0e08ccb4a075e3b8c1c12a1368c26efc | b149a744da7b512d9ec2bfc3c0d855638d23d7fb | /docs/conf.py | d24946fe260123e404a39a2deb04ff9c0a4c97b0 | [
"Apache-2.0"
] | permissive | codeprimate123/imaps | 250c8f7c6b71fd1725c676a70b2f3b171a1e75e2 | 241bcd586013c43db8aa4dcb2d42058ac9d142f8 | refs/heads/master | 2020-09-01T13:58:44.124344 | 2019-12-09T16:48:05 | 2019-12-09T16:48:05 | 218,973,850 | 0 | 0 | Apache-2.0 | 2019-11-01T11:36:12 | 2019-11-01T11:36:12 | null | UTF-8 | Python | false | false | 1,890 | py | import imp
import os
import shlex
import sys
import sphinx_rtd_theme
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# Get package metadata from 'imaps/__about__.py' file
about = {}
with open(os.path.join(base_dir, 'imaps', '__about__.py')) as f:
exec(f.read(), about)
# -- General configuration ------------------------------------------------
# The extension modules to enable.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
]
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = about['__summary__']
version = about['__version__']
release = version
author = about['__author__']
copyright = about['__copyright__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'imapsdoc'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"zmrzlikar.jure@gmail.com"
] | zmrzlikar.jure@gmail.com |
a7f4b5315497455f122da51f24d9c8695537822c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02722/s256113021.py | df12675f2b68c6a34a4ddfe3d05d28fbbdf84c3c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import sys, math
from itertools import permutations, combinations
from collections import defaultdict, Counter, deque
from math import factorial#, gcd
from bisect import bisect_left #bisect_left(list, value)
sys.setrecursionlimit(10**7)
enu = enumerate
MOD = 10**9+7
def input(): return sys.stdin.readline()[:-1]
def pri(x): print('\n'.join(map(str, x)))
def prime_decomposition(n):
i = 2
table = []
while i*i <= n:
while n%i == 0:
n //= i
table.append(i)
i += 1
if n > 1:
table.append(n)
return table
def prime_decomposition2(n):
i = 2
table = defaultdict(int)
while i*i <= n:
while n%i == 0:
n //= i
table[i] += 1
i += 1
if n > 1:
table[n] += 1
return table
def make_divisor(n):
divisors = []
for i in range(1, int(n**0.5)+1):
if n%i == 0:
divisors.append(i)
if i != n//i:
divisors.append(n//i)
return divisors
N = int(input())
list_pd1 = make_divisor(N)
list_pd1.sort()
dict_pd2 = prime_decomposition2(N-1)
#print(N, ':', list_pd1)
#print(N-1, ':', dict_pd2)
cnt = 1
# -1 nohou
for val in dict_pd2.values():
cnt *= (val+1)
cnt -= 1
#print(cnt)
for k in list_pd1[1:]:
#print('k:', k)
sN = N
while sN >= k:
if sN%k==0:
sN //= k
else:
sN %= k
if sN == 1:
cnt += 1
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d91558179efd30f1c3e5e855ef97de00d8721ad3 | 58cfad962e57b935e7782bb214a2008d689751d6 | /xero_python/payrolluk/models/employee_leave_type_object.py | 352b44d1e9d4189a25aaae9039a94a68a273e287 | [
"MIT"
] | permissive | XeroAPI/xero-python | ce43c060c216a42efd5f47159987468deb0e4622 | 07efa3bfc87a3bd08ba217dd2b642f6a3515ddff | refs/heads/master | 2023-07-21T04:01:27.461727 | 2023-07-11T02:35:44 | 2023-07-11T02:35:44 | 240,158,613 | 109 | 42 | MIT | 2023-07-11T02:35:45 | 2020-02-13T02:17:05 | Python | UTF-8 | Python | false | false | 3,178 | py | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EmployeeLeaveTypeObject(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"leave_type": "EmployeeLeaveType",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"leave_type": "leaveType",
}
def __init__(self, pagination=None, problem=None, leave_type=None): # noqa: E501
"""EmployeeLeaveTypeObject - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._leave_type = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if leave_type is not None:
self.leave_type = leave_type
@property
def pagination(self):
"""Gets the pagination of this EmployeeLeaveTypeObject. # noqa: E501
:return: The pagination of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this EmployeeLeaveTypeObject.
:param pagination: The pagination of this EmployeeLeaveTypeObject. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this EmployeeLeaveTypeObject. # noqa: E501
:return: The problem of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this EmployeeLeaveTypeObject.
:param problem: The problem of this EmployeeLeaveTypeObject. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def leave_type(self):
"""Gets the leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:return: The leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:rtype: EmployeeLeaveType
"""
return self._leave_type
@leave_type.setter
def leave_type(self, leave_type):
"""Sets the leave_type of this EmployeeLeaveTypeObject.
:param leave_type: The leave_type of this EmployeeLeaveTypeObject. # noqa: E501
:type: EmployeeLeaveType
"""
self._leave_type = leave_type
| [
"sid.maestre@gmail.com"
] | sid.maestre@gmail.com |
0ccc0e47066590b574b24615b6b772781536b4e1 | bfdde1d758e9b366f0eee27599e56322340e93e0 | /version1/feature4StructuralVAr/f4StrucVarTesting/Bayesian_Classifier.py | 3c1caaed13808eaf1002f0a67a910966d26125de | [] | no_license | A-Infinite/Sarcasm-Detection | cb22db5c38705a5d1c3ed10bbdbf1ba4d9a16227 | fd42ece85a9412b0e6b377874c82fe9544a19701 | refs/heads/master | 2020-03-08T14:13:02.085085 | 2018-06-05T07:39:03 | 2018-06-05T07:39:03 | 128,179,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | import numpy as np
import csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.naive_bayes import GaussianNB
x = []
y = []
with open('feature4StrucVar.csv') as csvfile:
reader = csv.reader(csvfile, delimiter = ' ')
for row in reader:
x.append(row[0: (len(row))])
for i in x:
i[0] = i[0].split(',')
y.append(i[0][-1])
del i[0][-1]
X = []
for i in x:
X.append(i[0])
Y = []
for i in y:
Y.append(i)
#print(str(x[0]) + "\n")
#print(str(x[0]) + " " + str(y[4000]) + "\n")
#X = np.asarray(X)
#Y = np.asarray(Y)
x = []
y = []
for i in X:
temp = []
for j in i:
temp.append(float(j))
x.append(temp)
for i in Y:
temp = []
for j in i:
temp.append(float(j))
y.append(temp)
#print(y[0])
x = np.asarray(x)
y = np.asarray(y)
#print(x[0])
#Naive Bayes Classifier
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 42)
clfnb = GaussianNB()
clfnb.fit(x_train, y_train)
print("Naive Bayes classifier : ")
print(clfnb.score(x_test, y_test))
print("\n")
#****************************************************************************************** | [
"arushi131.bhatt@gmail.com"
] | arushi131.bhatt@gmail.com |
9a10bb495cf4fc83b00c8d9c97248edd59a5dfc9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/323/usersdata/284/89341/submittedfiles/mdc.py | d1667dac5fa29ea45712274a24489656ba05b237 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # -*- coding: utf-8 -*-
import math
def divisor(x):
x=int(input('digite o valor:'))
for n in range(1,x+1,1):
if(x%n==0):
return(n)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
74d26786f508654108a2147365f55668187dad26 | 9044b440bed2b8407ed9e04f7fb9d3d2a7593136 | /recommendation/knet/krecall/ops/openblas_top_k/test.py | 942125ff70e3d537f3ca6765d04febc83c0cce72 | [] | no_license | xuzhezhaozhao/ai | d4264f5d15cc5fa514e81adb06eb83731a0ca818 | 925cbd31ad79f8827e2c3c706f4b51910f9f85d1 | refs/heads/master | 2022-01-22T07:04:29.082590 | 2022-01-17T06:49:39 | 2022-01-17T06:49:39 | 136,691,051 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #! /usr/bin/env python
# -*- coding=utf8 -*-
import tensorflow as tf
import numpy as np
import struct
openblas_top_k_ops = tf.load_op_library('openblas_top_k_ops.so')
openblas_top_k = openblas_top_k_ops.openblas_top_k
WEIGHTS_PATH = 'weights.bin'
BIASES_PATH = 'biases.bin'
weights = np.arange(100).reshape([20, 5]).astype(np.float)
biases = np.array([0.1]*20)
def save_numpy_float_array(array, filename):
with open(filename, 'wb') as f:
for d in array.shape:
f.write(struct.pack('<q', d))
fl = array.flat
for v in fl:
f.write(struct.pack('<f', v))
save_numpy_float_array(weights, WEIGHTS_PATH)
save_numpy_float_array(biases, BIASES_PATH)
sess = tf.Session()
user_vector = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
values, indices = openblas_top_k(input=user_vector, k=5,
weights_path=WEIGHTS_PATH,
biases_path=BIASES_PATH)
values = sess.run(values)
indices = sess.run(indices)
print(values)
print(indices)
| [
"zhezhaoxu@tencent.com"
] | zhezhaoxu@tencent.com |
9a337d7549581b57b42242cdd52a155c5dcec46e | 55cf061ccf7cff9e02190ea8dec10a3fc5e82729 | /tutorials/3.CodeOrganization/Person.py | 8b427a49315da90747d8f4f3b9f8e0a3baab66e0 | [
"MIT"
] | permissive | Jess3Jane/pyforms | a9e491310590f510ece910beabb2ea291273cfa1 | f34816db018f05c581ede42804771faa39e78824 | refs/heads/master | 2021-04-26T23:53:08.877082 | 2018-03-05T06:36:07 | 2018-03-05T06:36:07 | 123,875,438 | 0 | 0 | MIT | 2018-03-05T06:33:19 | 2018-03-05T06:33:19 | null | UTF-8 | Python | false | false | 289 | py |
class Person(object):
def __init__(self, firstName, middleName, lastName):
self._firstName = firstName
self._middleName = middleName
self._lastName = lastName
@property
def fullName(self):
return "{0} {1} {2}".format(self._firstName, self._middleName, self._lastName) | [
"ricardojvr@gmail.com"
] | ricardojvr@gmail.com |
884c23b9dde9349f41a3614ef6a4675579014561 | 5b4c803f68e52849a1c1093aac503efc423ad132 | /UnPyc/tests/tests/CFG/2/pass/pass_while+else_try+except+else+finally_.py | 08b949ad1aa4d769c103db68c1a684ce80ac8cfa | [] | no_license | Prashant-Jonny/UnPyc | 9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c | 4b9d4ab96dfc53a0b4e06972443e1402e9dc034f | refs/heads/master | 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | try:
while 1:
pass
else:
pass
except:
while 1:
pass
else:
pass
else:
while 1:
pass
else:
pass
finally:
while 1:
pass
else:
pass
| [
"d.v.kornev@gmail.com"
] | d.v.kornev@gmail.com |
ebb1eba75e644fcc50606ed8fd173e72a66784ad | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2788/60617/264414.py | 2b2b3b9b9de2320c65e7ff5f40cb8ce89ca0f4d8 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | def dance():
n=int(input())
boys=list(map(int, input().split(" ")))
m=int(input())
girls=list(map(int, input().split(" ")))
pairs=0
boys.sort()
girls.sort()
if n<=m:
for ele in boys:
if ele-1 in girls:
pairs+=1
girls.remove(ele-1)
elif ele in girls:
pairs+=1
girls.remove(ele)
elif ele+1 in girls:
pairs+=1
girls.remove(ele+1)
else:
for ele in girls:
if ele-1 in boys:
pairs+=1
boys.remove(ele-1)
elif ele in boys:
pairs+=1
boys.remove(ele)
elif ele+1 in boys:
pairs+=1
boys.remove(ele+1)
print(pairs)
if __name__=='__main__':
dance() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d458f4e83c13f6a8060121c872a13308240f3fc4 | 2fe58e7f6bfc3efdb78ca56f72a4e2a75a24c270 | /eric/eric6/Plugins/UiExtensionPlugins/Translator/ConfigurationPage/__init__.py | c589525eb78455712c21edf6336a189a21bfd13e | [] | no_license | testerclub/eric6-20.3 | 3053e0e6962060b213f5df329ee331a4893d18e6 | bba0b9f13fa3eb84938422732d751219bc3e29e2 | refs/heads/master | 2023-03-18T08:24:03.472297 | 2020-03-14T06:44:14 | 2020-03-14T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014 - 2020 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package implementing the Translator page of the configuration dialog.
"""
| [
"skeptycal@gmail.com"
] | skeptycal@gmail.com |
ac73581c07d933e9d8e3d52a3f3a553ed7d1a77b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03680/s252443262.py | 3378adf8c331e679f71f21f1cc7f8e53ad921529 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import sys
from sys import exit
from collections import deque
from bisect import bisect_left, bisect_right, insort_left, insort_right #func(リスト,値)
from heapq import heapify, heappop, heappush
sys.setrecursionlimit(10**6)
INF = 10**20
def mint():
return map(int,input().split())
def lint():
return map(int,input().split())
N = int(input())
a = [int(input()) for _ in range(N)]
tmp = 1
for i in range(1,N+1):
tmp = a[tmp-1]
if tmp==2:
print(i)
exit()
print(-1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
46fe99471204a18d79b8e197d11e569f943d6c86 | 07e2e27f20531452bb070597803a52f1e4f4e4a0 | /average.py | b4d6974dfd0ee869169925f5589f308fc315d0d5 | [] | no_license | ashantisease19/lab-08-loops | de413028d7ede6aee0cf98def3aa63b4bc3ba066 | 0117f23dd1371b01bc31fdb3f8aa952bf1a28516 | refs/heads/master | 2023-04-07T08:02:27.834787 | 2021-04-11T03:16:33 | 2021-04-11T03:16:33 | 354,944,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | average = 0
sum = 0
for i in range (0,4,1):
userinput = input("Just give me a number.")
usernum = int(userinput, 10)
sum = sum + usernum
print("So you put the number " + str(usernum) + " and the current sum is " + str(sum))
average = sum / 4
print("Okay, bro, so the average is " + str(average))
| [
"you@example.com"
] | you@example.com |
eb1c2d999d1c52084a21a951371b6816ed211083 | 1d1c1dce863a4e8b6c9987e9c50fa46908aa0ff6 | /pipeline/feature-extraction/seri/extraction_lbp_top_codebook_final.py | c8d81fa0b1fc3d0357cfff5d8307e440ad06ff20 | [] | no_license | I2Cvb/lemaitre-2016-apr | 4692f64b365c3e8095c96944431e8e03bc9ecc7e | 266dc93026fa70c83a34790c1bd9ae14a23492ba | refs/heads/master | 2021-01-18T19:30:26.845275 | 2016-03-19T21:11:11 | 2016-03-19T21:11:11 | 54,284,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | py | #title :extraction_codebook.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre
#date :2015/06/07
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
# Panda library
import pandas as pd
# OS library
import os
from os.path import join
# SYS library
import sys
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
# Multiprocessing library
import multiprocessing
from protoclass.extraction.codebook import *
#########################################################################
### Definition of the parallel codebook
def CBComputation(idx_test, (pat_test_norm, pat_test_dme),
filename_normal, filename_dme, nw):
pat_train_norm = np.delete(filename_normal, idx_test)
pat_train_dme = np.delete(filename_dme, idx_test)
# Open the current training data
training_data = np.concatenate((np.concatenate([get_lbp_data(f) for f in pat_train_norm],
axis=0),
np.concatenate([get_lbp_data(f) for f in pat_train_dme],
axis=0)),
axis=0)
print 'The size of the training dataset is {}'.format(training_data.shape)
# Create the codebook using the training data
num_cores = 8
cbook = [CodeBook(n_words=w, init='k-means++', n_jobs=num_cores, n_init=5)
for w in nw]
# Fit each code book for the data currently open
for idx_cb, c in enumerate(cbook):
print 'Fitting for dictionary with {} words'.format(nw[idx_cb])
c.fit(training_data)
return cbook
################################################################################################
################################################################################################
# Define the number of words
nb_words = [int(sys.argv[3])]
################################################################################################
# Read the csv file with the ground truth
gt_csv_filename = '/work/le2i/gu5306le/retinopathy/OCT/SERI/data.csv'
gt_csv = pd.read_csv(gt_csv_filename)
gt = gt_csv.values
data_filename = gt[:, 0]
# Get the good extension
radius = sys.argv[1]
data_filename = np.array([f + '_nlm_flatten_lbp_' + str(radius) + '_hist.npz' for f in data_filename])
label = gt[:, 1]
label = ((label + 1.) / 2.).astype(int)
from collections import Counter
count_gt = Counter(label)
if (count_gt[0] != count_gt[1]):
raise ValueError('Not balanced data.')
else:
# Split data into positive and negative
# TODO TACKLE USING PERMUTATION OF ELEMENTS
filename_normal = data_filename[label == 0]
filename_dme = data_filename[label == 1]
# Get the input folder where the information are located
input_folder = sys.argv[2]
# Build the data folder from the radius given
data_folder = join(input_folder, 'r_' + str(radius) + '_hist_npz')
# Open the data
### Features
get_lbp_data = lambda f: np.load(join(data_folder, f))['vol_lbp_top_hist']
# Compute a codebook for each fold
codebook_list = []
for idx_test, (pat_test_norm, pat_test_dme) in enumerate(zip(filename_normal, filename_dme)):
codebook_list.append(CBComputation(idx_test, (pat_test_norm, pat_test_dme),
filename_normal, filename_dme, nb_words))
# We have to store the final codebook
# Give the location of the random codebook previously generated
codebook_type = 'codebook_final'
codebook_path = join(data_folder, codebook_type)
codebook_filename = join(codebook_path, 'codebook.pkl')
if not os.path.exists(codebook_path):
os.makedirs(codebook_path)
from sklearn.externals import joblib
joblib.dump(codebook_list, codebook_filename)
| [
"glemaitre@visor.udg.edu"
] | glemaitre@visor.udg.edu |
1d5ecb9898306e73daa11e2c834b4fa76e3d4b76 | 7826681647933249c8949c00238392a0128b4a18 | /cosypose/utils/resources.py | 0d0042740659a09b95b1032cea0c91c7fe8516b9 | [
"MIT"
] | permissive | imankgoyal/cosypose | b35678a32a6491bb15d645bc867f4b2e49bee6d2 | fa494447d72777f1d3bd5bd134d79e5db0526009 | refs/heads/master | 2022-12-09T11:18:23.188868 | 2020-08-31T15:34:02 | 2020-08-31T15:34:02 | 291,834,596 | 2 | 0 | MIT | 2020-08-31T22:06:12 | 2020-08-31T22:06:11 | null | UTF-8 | Python | false | false | 517 | py | import os
import psutil
from shutil import which
def is_egl_available():
return is_gpu_available and 'EGL_VISIBLE_DEVICES' in os.environ
def is_gpu_available():
return which('nvidia-smi') is not None
def is_slurm_available():
return which('sinfo') is not None
def get_total_memory():
current_process = psutil.Process(os.getpid())
mem = current_process.memory_info().rss
for child in current_process.children(recursive=True):
mem += child.memory_info().rss
return mem / 1e9
| [
"labbe.yann1994@gmail.com"
] | labbe.yann1994@gmail.com |
4985a0fa1bd9664fb90cd7db6fe4ebf87eb5bf80 | 7ef39106ff091002c3a22781628fe3076a0941b0 | /bindings/python/pyiree/rt/vm_test.py | ed7e66f7b3ab0b3c19c8bde23f8fab9ccec738e7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | YashRajSingh-4799/iree | 798c01b44696f1360014075f4eca275c7a4dc87f | ace19e886efe3702bfe7b17185a5daaa20808e82 | refs/heads/master | 2022-04-20T23:52:58.303107 | 2020-04-23T01:39:08 | 2020-04-23T02:47:41 | 258,150,320 | 2 | 0 | Apache-2.0 | 2020-04-23T09:09:26 | 2020-04-23T09:09:26 | null | UTF-8 | Python | false | false | 5,127 | py | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-variable
from absl.testing import absltest
import numpy as np
from pyiree import compiler
from pyiree import rt
def create_simple_static_mul_module():
ctx = compiler.Context()
input_module = ctx.parse_asm("""
func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
attributes { iree.module.export } {
%0 = "xla_hlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
return %0 : tensor<4xf32>
}
""")
binary = input_module.compile()
m = rt.VmModule.from_flatbuffer(binary)
return m
def create_simple_dynamic_abs_module():
ctx = compiler.Context()
# TODO(laurenzo): Compile for more backends as dynamic shapes come online.
target_backends = ["vmla"]
input_module = ctx.parse_asm("""
func @simple_mul(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32>
attributes { iree.module.export } {
%0 = "xla_hlo.abs"(%arg0) : (tensor<?x?xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
""")
binary = input_module.compile(target_backends=target_backends)
m = rt.VmModule.from_flatbuffer(binary)
return m
class VmTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
driver_names = rt.HalDriver.query()
print("DRIVER_NAMES =", driver_names)
cls.driver = rt.HalDriver.create("vmla")
cls.device = cls.driver.create_default_device()
cls.hal_module = rt.create_hal_module(cls.device)
cls.htf = rt.HostTypeFactory.get_numpy()
def test_variant_list(self):
l = rt.VmVariantList(5)
print(l)
self.assertEqual(l.size, 0)
def test_context_id(self):
instance = rt.VmInstance()
context1 = rt.VmContext(instance)
context2 = rt.VmContext(instance)
self.assertGreater(context2.context_id, context1.context_id)
def test_module_basics(self):
m = create_simple_static_mul_module()
f = m.lookup_function("simple_mul")
self.assertGreater(f.ordinal, 0)
notfound = m.lookup_function("notfound")
self.assertIs(notfound, None)
def test_dynamic_module_context(self):
instance = rt.VmInstance()
context = rt.VmContext(instance)
m = create_simple_static_mul_module()
context.register_modules([self.hal_module, m])
def test_static_module_context(self):
m = create_simple_static_mul_module()
print(m)
instance = rt.VmInstance()
print(instance)
context = rt.VmContext(instance, modules=[self.hal_module, m])
print(context)
def test_dynamic_shape_compile(self):
m = create_simple_dynamic_abs_module()
print(m)
instance = rt.VmInstance()
print(instance)
context = rt.VmContext(instance, modules=[self.hal_module, m])
print(context)
def test_synchronous_dynamic_shape_invoke_function(self):
m = create_simple_dynamic_abs_module()
instance = rt.VmInstance()
context = rt.VmContext(instance, modules=[self.hal_module, m])
f = m.lookup_function("simple_mul")
abi = context.create_function_abi(self.device, self.htf, f)
print("INVOKING:", abi)
arg0 = np.array([[-1., 2.], [3., -4.]], dtype=np.float32)
inputs = abi.raw_pack_inputs((arg0,))
print("INPUTS:", inputs)
allocated_results = abi.allocate_results(inputs, static_alloc=False)
print("ALLOCATED RESULTS:", allocated_results)
print("--- INVOKE:")
context.invoke(f, inputs, allocated_results)
print("--- DONE.")
results = abi.raw_unpack_results(allocated_results)
print("RESULTS:", results)
np.testing.assert_allclose(results[0], [[1., 2.], [3., 4.]])
def test_synchronous_invoke_function(self):
m = create_simple_static_mul_module()
instance = rt.VmInstance()
context = rt.VmContext(instance, modules=[self.hal_module, m])
f = m.lookup_function("simple_mul")
abi = context.create_function_abi(self.device, self.htf, f)
print("INVOKING:", abi)
arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
inputs = abi.raw_pack_inputs((arg0, arg1))
print("INPUTS:", inputs)
allocated_results = abi.allocate_results(inputs, static_alloc=False)
print("ALLOCATED RESULTS:", allocated_results)
print("--- INVOKE:")
context.invoke(f, inputs, allocated_results)
print("--- DONE.")
results = abi.raw_unpack_results(allocated_results)
print("RESULTS:", results)
np.testing.assert_allclose(results[0], [4., 10., 18., 28.])
if __name__ == "__main__":
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
98fd540f8660d0e5851214ffcfc28a448989e90e | 06a863150a7a3a7bfc0c341b9c3f267727606464 | /lib/gii/core/AssetUtils.py | 260e5649c1ed35c15c0ad503ee6b87826f8902a3 | [
"MIT"
] | permissive | brucelevis/gii | c843dc738a958b4a2ffe42178cff0dd04da44071 | 03624a57cf74a07e38bfdc7f53c50bd926b7b5a7 | refs/heads/master | 2020-10-02T00:41:02.723597 | 2016-04-08T07:44:45 | 2016-04-08T07:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import os
import os.path
import subprocess
import platform
import ctypes
##TOOL Functions
def openFileInOS(path):
sysName=platform.system()
if sysName=='Darwin':
subprocess.call(["open", path])
elif sysName == 'Windows':
os.startfile( os.path.normpath(path) )
#TODO:linux?
def showFileInBrowser(path):
sysName=platform.system()
if sysName=='Darwin':
subprocess.call(["open", "--reveal", path])
elif sysName == 'Windows':
ctypes.windll.shell32.ShellExecuteW(None, u'open', u'explorer.exe', u'/n,/select, ' + os.path.normpath(path), None, 1)
#TODO:linux? | [
"tommo.zhou@gmail.com"
] | tommo.zhou@gmail.com |
5d29772916a157b070f30c565edce75aee066945 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /120/120.triangle.249953022.Runtime-Error.leetcode.py | bb813fc6a70d765796b9ff45665c1b95a1f683dc | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | class Solution(object):
def minimumTotal(self, triangle):
for row in triangle[::-1]:
for col in range(len(triangle[row])):
triangle[row][col] += min(triangle[row + 1][col], triangle[row + 1][col + 1])
return triangle[0][0]
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
3282e7d18794deb4d673a620e223b8ab2c976279 | 3d90d6753cbb48c74dc8e72f4a886635a706ee16 | /day20-django之数据库外键多表,ajax/application/application/settings.py | 07dcebd87c9f8e60a649fde1b389c928c01bb465 | [] | no_license | shun-zi/python | 01354dfc23e470c67ae6adc323b7b23c446faf95 | 9b9851a608cfa18392464b7d887659ced8eb58a6 | refs/heads/master | 2021-09-12T23:15:35.586858 | 2018-04-22T12:32:41 | 2018-04-22T12:32:41 | 113,460,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for application project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yuqr0@1y0kt_)oib%&o2b_=q=78d4=c^q4cr7=-o%(l5nlwid^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'application_host',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'application.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'application',
'USER': 'root',
'PASSWORD': 'z960520@',
"HOST": "localhost",
"port": '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/statics/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'statics'),
)
| [
"mac@macdeMacBook-Pro.local"
] | mac@macdeMacBook-Pro.local |
b7bf9d90e7b82119aa39546ac07392e2794321d0 | d916d9f15b039abe4f824d58714e0c3277939711 | /Encapsulation/Encapsulation-Lab/01_person.py | 2924a2a7ca413bcab6ea528fd27ca351de366977 | [] | no_license | jesalshah14/SoftUni-Python-OOP-February-2021 | a641e31e7144785cd05b0b6324348570ff90d7d7 | 45a584316951bca4d1bcfe35861f140d9fedf62a | refs/heads/main | 2023-04-09T20:15:32.764633 | 2021-04-18T15:29:57 | 2021-04-18T15:29:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | class Person:
def __init__(self, name, age):
self.__name = name
self.__age = age
def get_name(self):
return self.__name
def get_age(self):
return self.__age
# class Person:
# def __init__(self, name, age):
# self.__name = name
# self.__age = age
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self, value):
# self.__name = value
#
# @property
# def age(self):
# return self.__age
#
# @age.setter
# def age(self, value):
# self.__age = value
# class Person:
# def __init__(self, name, age):
# self.name = name
# self.age = age
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self, value):
# if not value or not isinstance(value, str):
# raise ValueError("Name must be a non-empty string")
# self.__name = value
person = Person("George", 32)
print(person.get_name())
print(person.get_age())
# person = Person("George", 32)
# print(person.name)
# print(person.age)
| [
"eng.antonov@gmail.com"
] | eng.antonov@gmail.com |
bc6f06c449429d99388dfabc101bd41903a224ec | 3479fca8dd50fb0f27a981cca2e4d1cd9a34d36b | /post/permissions.py | f61e1d34af829fedff5583d7f68a9fae3a0e4672 | [] | no_license | Harshvartak/blogproj | bdea67e935789ba2bacd29ec91d070b0650f73da | 4fd23d3664218bfb0c0f6817995b9595c7aa08f2 | refs/heads/master | 2020-11-25T23:24:47.459955 | 2019-12-23T20:27:13 | 2019-12-23T20:27:13 | 228,888,052 | 0 | 0 | null | 2019-12-18T19:01:54 | 2019-12-18T17:16:57 | Python | UTF-8 | Python | false | false | 465 | py | from rest_framework import permissions
'''
class BasePermission(object):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return True
'''
class IsAuthorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
| [
"vartak.harsh@gmail.com"
] | vartak.harsh@gmail.com |
fe10ec94891a3d476f0c90796f87d44a9790613f | a0127e596323a7016b1662d57cedea7bae3f3588 | /calendly/common/logger.py | 8c1c938878c5238c4ba3c5ce9761f3901ccab360 | [] | no_license | cpj2195/calendly | 042710b959b3c4e390b9044927e92bbe7a384908 | 157907ddaf0c4330a03c8acf407239955b056d70 | refs/heads/master | 2022-06-13T16:17:43.936713 | 2020-04-06T12:52:07 | 2020-04-06T12:52:07 | 252,102,917 | 1 | 0 | null | 2022-05-25T03:25:24 | 2020-04-01T07:27:03 | Python | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
def log_to_cloudwatch(log_marker, message):
'''
This functions is used to print the log messages so that they can be logged
to cloudwatch.
PARAMETERS
----------
message : str
message to be logged
'''
traceback.print_exc()
print(log_marker)
print(message)
| [
"you@example.com"
] | you@example.com |
03d30a5a7bb912b677db9b56561c7e7e5fb31035 | 18dc0f38e3d43a4b457c626c20fa16da715d1773 | /qa/Test_dm_sip_idb.py | 19f3a61475ee919eec91948be065d7a53c3791e2 | [
"MIT"
] | permissive | KDahlgren/orik | 424f6495effe0113ca56db3954453e708aa857a5 | 4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f | refs/heads/master | 2021-01-01T15:38:24.385363 | 2018-06-23T01:24:55 | 2018-06-23T01:24:55 | 97,662,025 | 2 | 2 | MIT | 2018-09-24T20:57:27 | 2017-07-19T02:04:15 | Python | UTF-8 | Python | false | false | 7,064 | py | #!/usr/bin/env python
'''
Test_dm_sip_idb.py
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, logging, os, re, string, sqlite3, sys, unittest
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../../src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../src" ) )
from derivation import FactNode, GoalNode, Node, ProvTree, RuleNode
if not os.path.abspath( __file__ + "/../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../lib/iapyx/src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import dumpers, globalCounters, tools
from evaluators import c4_evaluator
# ------------------------------------------------------ #
#####################
# TEST DM SIP IDB #
#####################
class Test_dm_sip_idb( unittest.TestCase ) :
logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.DEBUG )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.INFO )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.WARNING )
PRINT_STOP = False
#############
# SIMPLOG #
#############
#@unittest.skip( "works." )
def test_simplog( self ) :
test_id = "simplog"
test_file_name = "simplog_driver"
print " >>> RUNNING " + test_id + " <<<"
test_id = "dm_sip_idb_" + test_id
serial_nodes_path = "./testFiles/" + test_id + "_expected_nodes.txt"
serial_edges_path = "./testFiles/" + test_id + "_expected_edges.txt"
input_file = "./dedalus_drivers/" + test_file_name + ".ded"
argDict = self.getArgDict( input_file )
argDict[ 'data_save_path' ] = "./data/" + test_id + "/"
argDict[ 'EOT' ] = 6
argDict[ 'nodes' ] = [ "a", "b", "c" ]
cursor = self.set_up_test( test_id, argDict )
provTree = self.get_prov_tree( serial_nodes_path, \
serial_edges_path, \
argDict, \
cursor )
provTree.create_pydot_graph( 0, 0, test_id )
###############
# PATH LINK #
###############
#@unittest.skip( "works." )
def test_path_link( self ) :
test_id = "path_link"
test_file_name = "path_link"
print " >>> RUNNING " + test_id + " <<<"
test_id = "dm_sip_idb_" + test_id
serial_nodes_path = "./testFiles/" + test_id + "_expected_nodes.txt"
serial_edges_path = "./testFiles/" + test_id + "_expected_edges.txt"
input_file = "./testFiles/" + test_file_name + ".ded"
argDict = self.getArgDict( input_file )
argDict[ 'data_save_path' ] = "./data/" + test_id + "/"
argDict[ 'EOT' ] = 1
argDict[ 'nodes' ] = [ "a" ]
cursor = self.set_up_test( test_id, argDict )
provTree = self.get_prov_tree( serial_nodes_path, \
serial_edges_path, \
argDict, \
cursor )
provTree.create_pydot_graph( 0, 0, test_id )
###################
# GET PROV TREE #
###################
def get_prov_tree( self, serial_nodes_path, serial_edges_path, argDict, cursor ) :
if not os.path.exists( argDict[ "data_save_path" ] ) :
os.system( "mkdir " + argDict[ "data_save_path" ] )
# --------------------------------------------------------------- #
# convert dedalus into c4 datalog and evaluate
parsedResults = self.get_program_results( argDict, cursor )
# --------------------------------------------------------------- #
# build provenance tree
provTree = ProvTree.ProvTree( rootname = "FinalState", \
parsedResults = parsedResults, \
cursor = cursor, \
treeType = "goal", \
isNeg = False, \
eot = argDict[ "EOT" ], \
prev_prov_recs = {}, \
argDict = argDict )
# get actual serialized graph
if serial_nodes_path :
actual_serial_nodes = provTree.nodeset_pydot_str
if serial_edges_path :
actual_serial_edges = provTree.edgeset_pydot_str
if self.PRINT_STOP :
if serial_nodes_path :
for n in actual_serial_nodes :
logging.debug( " n = " + n.rstrip() )
if serial_nodes_path :
for e in actual_serial_edges :
logging.debug( " e = " + e.rstrip() )
tools.bp( __name__, inspect.stack()[0][3], "print stop." )
return provTree
#########################
# GET PROGRAM RESULTS #
#########################
# convert the input dedalus program into c4 datalog and evaluate.
# return evaluation results dictionary.
def get_program_results( self, argDict, cursor ) :
# convert dedalus into c4 datalog
allProgramData = dedt.translateDedalus( argDict, cursor )
# run c4 evaluation
results_array = c4_evaluator.runC4_wrapper( allProgramData[0], argDict )
parsedResults = tools.getEvalResults_dict_c4( results_array )
return parsedResults
#################
# SET UP TEST #
#################
def set_up_test( self, test_id, argDict ) :
if os.path.exists( "./IR_" + test_id + ".db*" ) :
os.remove( "./IR*.db*" )
testDB = "./IR_" + test_id + ".db"
IRDB = sqlite3.connect( testDB )
cursor = IRDB.cursor()
dedt.createDedalusIRTables(cursor)
dedt.globalCounterReset()
return cursor
##################
# GET ARG DICT #
##################
# specify the default test arguments.
# return dictionary.
def getArgDict( self, inputfile ) :
# initialize
argDict = {}
# populate with unit test defaults
argDict[ 'prov_diagrams' ] = False
argDict[ 'use_symmetry' ] = False
argDict[ 'crashes' ] = 0
argDict[ 'solver' ] = None
argDict[ 'disable_dot_rendering' ] = False
argDict[ 'settings' ] = "./settings_dm_sip_idb.ini"
argDict[ 'negative_support' ] = False
argDict[ 'strategy' ] = None
argDict[ 'file' ] = inputfile
argDict[ 'EOT' ] = 4
argDict[ 'find_all_counterexamples' ] = False
argDict[ 'nodes' ] = [ "a", "b", "c" ]
argDict[ 'evaluator' ] = "c4"
argDict[ 'EFF' ] = 2
argDict[ 'data_save_path' ] = "./data/"
argDict[ 'neg_writes' ] = "dm"
return argDict
##############################
# MAIN THREAD OF EXECUTION #
##############################
if __name__ == "__main__":
unittest.main()
#########
# EOF #
#########
| [
"kdahlgren15@gmail.com"
] | kdahlgren15@gmail.com |
eea9054193fcde002fa2322da0daf6e6b6bbd769 | d561fab22864cec1301393d38d627726671db0b2 | /python/helpers/typeshed/third_party/3.6/click/decorators.pyi | a3dcdddeb065b9be1908801b10384ecdca051c3b | [
"Apache-2.0",
"MIT"
] | permissive | Vedenin/intellij-community | 724dcd8b3e7c026936eed895cf964bb80574689a | 74a89fa7083dedc6455a16e10cf779d191d79633 | refs/heads/master | 2021-01-25T00:47:43.514138 | 2017-03-27T15:48:36 | 2017-03-27T15:54:02 | 86,361,176 | 1 | 1 | null | 2017-03-27T16:54:23 | 2017-03-27T16:54:23 | null | UTF-8 | Python | false | false | 5,494 | pyi | from typing import Any, Callable, Dict, List, TypeVar, Union
from click.core import Command, Group, Argument, Option, Parameter, Context
from click.types import ParamType
T = TypeVar('T')
Decorator = Callable[[T], T]
def pass_context(T) -> T:
...
def pass_obj(T) -> T:
...
def make_pass_decorator(
object_type: type, ensure: bool = False
) -> Callable[[T], T]:
...
# NOTE: Decorators below have **attrs converted to concrete constructor
# arguments from core.pyi to help with type checking.
def command(
name: str = None,
cls: type = Command,
# Command
help: str = None,
epilog: str = None,
short_help: str = None,
options_metavar: str = '[OPTIONS]',
add_help_option: bool = True,
) -> Decorator:
...
# This inherits attrs from Group, MultiCommand and Command.
def group(
name: str = None,
cls: type = Group,
# Group
commands: Dict[str, Command] = None,
# MultiCommand
invoke_without_command: bool = False,
no_args_is_help: bool = None,
subcommand_metavar: str = None,
chain: bool = False,
result_callback: Callable = None,
# Command
help: str = None,
epilog: str = None,
short_help: str = None,
options_metavar: str = '[OPTIONS]',
add_help_option: bool = True,
) -> Decorator:
...
def argument(
*param_decls: str,
cls: type = Argument,
# Argument
required: bool = None,
# Parameter
type: Union[type, ParamType] = None,
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
def option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = None,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = None,
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def confirmation_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: str = 'Do you want to continue?',
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Confirm the action without prompting.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def password_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = True,
confirmation_prompt: bool = True,
hide_input: bool = True,
is_flag: bool = None,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = None,
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def version_option(
version: str = None,
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Show the version and exit.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = True,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
# Defaults copied from the decorator body.
def help_option(
*param_decls: str,
cls: type = Option,
# Option
show_default: bool = False,
prompt: bool = False,
confirmation_prompt: bool = False,
hide_input: bool = False,
is_flag: bool = True,
flag_value: Any = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: Union[type, ParamType] = None,
help: str = 'Show this message and exit.',
# Parameter
default: Any = None,
callback: Callable[[Context, Parameter, str], Any] = None,
nargs: int = None,
metavar: str = None,
expose_value: bool = False,
is_eager: bool = True,
envvar: Union[str, List[str]] = None
) -> Decorator:
...
| [
"andrey.vlasovskikh@gmail.com"
] | andrey.vlasovskikh@gmail.com |
f17014c2e1af3c37315c054d5633d98ac328b1c3 | 9784a90cac667e8e0aaba0ca599b4255b215ec67 | /gluon/datasets/librispeech_asr_dataset.py | 9726c17ef5fa85b5af7e5f85752be4319238b0ff | [
"MIT"
] | permissive | osmr/imgclsmob | d2f48f01ca541b20119871393eca383001a96019 | f2993d3ce73a2f7ddba05da3891defb08547d504 | refs/heads/master | 2022-07-09T14:24:37.591824 | 2021-12-14T10:15:31 | 2021-12-14T10:15:31 | 140,285,687 | 3,017 | 624 | MIT | 2022-07-04T15:18:37 | 2018-07-09T12:57:46 | Python | UTF-8 | Python | false | false | 5,226 | py | """
LibriSpeech ASR dataset.
"""
__all__ = ['LibriSpeech', 'LibriSpeechMetaInfo']
import os
import numpy as np
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class LibriSpeech(AsrDataset):
"""
LibriSpeech dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
subset : str, default 'dev-clean'
Data subset.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="test",
subset="dev-clean",
transform=None):
super(LibriSpeech, self).__init__(
root=root,
mode=mode,
transform=transform)
self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
data_dir_path = os.path.join(root_dir_path, subset)
assert os.path.exists(data_dir_path)
for speaker_id in os.listdir(data_dir_path):
speaker_dir_path = os.path.join(data_dir_path, speaker_id)
for chapter_id in os.listdir(speaker_dir_path):
chapter_dir_path = os.path.join(speaker_dir_path, chapter_id)
transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id))
with open(transcript_file_path, "r") as f:
transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines())
for flac_file_name in os.listdir(chapter_dir_path):
if flac_file_name.endswith(".flac"):
wav_file_name = flac_file_name.replace(".flac", ".wav")
wav_file_path = os.path.join(chapter_dir_path, wav_file_name)
if not os.path.exists(wav_file_path):
flac_file_path = os.path.join(chapter_dir_path, flac_file_name)
pcm, sample_rate = soundfile.read(flac_file_path)
soundfile.write(wav_file_path, pcm, sample_rate)
text = transcripts[wav_file_name.replace(".wav", "")]
text = text.strip("\n ").lower()
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
class LibriSpeechMetaInfo(DatasetMetaInfo):
def __init__(self):
super(LibriSpeechMetaInfo, self).__init__()
self.label = "LibriSpeech"
self.short_label = "ls"
self.root_dir_name = "LibriSpeech"
self.dataset_class = LibriSpeech
self.dataset_class_extra_kwargs = {"subset": "dev-clean"}
self.ml_type = "asr"
self.num_classes = 29
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.test_net_extra_kwargs = {"from_audio": True}
self.allow_hybridize = False
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--subset",
type=str,
default="dev-clean",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(LibriSpeechMetaInfo, self).update(args)
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset._data.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| [
"osemery@gmail.com"
] | osemery@gmail.com |
cf20a934ec81a718afd3d4f520e7a181de4d4ea6 | 479117fe710b1cadf9252f08769d8fd1476934e4 | /flask_ipywidgets/__init__.py | ead135d9ab9e51af7a15b2604dd43d767070b29b | [
"BSD-3-Clause"
] | permissive | jf---/flask-ipywidgets | 3f1b310c81015c3b4fbc66ee24d356987998613e | 3c0d7356c7185cb59e9dfa0f13e702273bbd7696 | refs/heads/master | 2020-03-15T12:32:05.166269 | 2018-07-30T10:06:15 | 2018-07-30T10:06:15 | 132,146,254 | 0 | 0 | null | 2018-05-04T13:51:06 | 2018-05-04T13:51:06 | null | UTF-8 | Python | false | false | 3,088 | py | from .kernel import *
from flask_sockets import Sockets
_kernel_spec = {
"display_name": "flask_kernel",
"language": "python",
"argv": ["python", "doesnotworkthisway"],
"env": {
},
"display_name": "Flask kernel",
"language": "python",
"interrupt_mode": "signal",
"metadata": {},
}
from flask import Flask, Blueprint
http = Blueprint('jupyter', __name__)
websocket = Blueprint('jupyter', __name__)
@http.route('/api/kernelspecs')
def kernelspecs(name=None):
return jsonify({
'default': 'flask_kernel',
'kernelspecs': {
'flask_kernel': {
'name': 'flask_kernel',
'resources': {},
'spec': _kernel_spec
}
}
})
@http.route('/api/kernels', methods=['GET', 'POST'])
def kernels_normal():
data = {
"id": "4a8a8c6c-188c-40aa-8bab-3c79500a4b26",
"name":
"flask_kernel",
"last_activity": "2018-01-30T19:32:04.563616Z",
"execution_state":
"starting",
"connections": 0
}
return jsonify(data), 201
@websocket.route('/api/kernels/<id>/<name>')
def kernels(ws, id, name):
print(id, name)
kernel = FlaskKernel.instance()
#kernel.stream.last_ws = ws
while not ws.closed:
message = ws.receive()
if message is not None:
msg = json.loads(message)
msg_serialized = kernel.session.serialize(msg)
# print("msg from front end", msg)
# print(kernel.comm_manager.comms)
msg_id = msg['header']['msg_id']
kernel.session.websockets[msg_id] = ws
if msg['channel'] == 'shell':
kernel.dispatch_shell(WebsocketStreamWrapper(ws, msg['channel']), [
BytesWrap(k) for k in msg_serialized])
else:
print('unknown channel', msg['channel'])
def app(prefix='/jupyter'):
kernel = FlaskKernel.instance()
app = Flask(__name__)
@app.template_filter()
def ipywidget_view(widget):
from jinja2 import Markup, escape
import json
return Markup("""<script type="application/vnd.jupyter.widget-view+json">%s</script>""" % json.dumps(widget.get_view_spec()))
@app.template_filter()
def ipywidget_state(widgets):
from jinja2 import Markup, escape
from ipywidgets import embed as wembed
drop_defaults = True
state = wembed.dependency_state(widgets, drop_defaults=drop_defaults)
from ipywidgets import Widget
json_data = Widget.get_manager_state(widgets=[])
json_data['state'] = state
json_data_str = json.dumps(json_data, indent=' ')
snippet = wembed.snippet_template.format(
load='', widget_views='', json_data=json_data_str)
return Markup(snippet)
sockets = Sockets(app)
app.register_blueprint(http, url_prefix=prefix)
sockets.register_blueprint(websocket, url_prefix=prefix)
return app
def init(app):
kernel = FlaskKernel.instance()
sockets = Sockets(app)
| [
"maartenbreddels@gmail.com"
] | maartenbreddels@gmail.com |
c0639249f7c07c28cd08a1583e8193dcd657342f | e23881d9b059f3fbe3f75a7c8c53737ed0f53545 | /Django_two_factor_auth/manage.py | 842f7f604e05ff01e4c5825614bc8edaf8790bb5 | [] | no_license | GK-SVG/Django_Boy | b1fbf9c2b3d35e38bcd2da54956476aad0f2310d | 27121c1dc70b44065cd2c5fe854335cd5d1214c5 | refs/heads/master | 2023-05-07T22:47:02.414738 | 2021-06-03T17:03:43 | 2021-06-03T17:03:43 | 308,200,818 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_two_factor_auth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"gk32239@gmail.com"
] | gk32239@gmail.com |
85052a32ac62faefb4696f65719c5b84466465aa | facf7941a8ef5b1f3eceac59b390ef78ea18c6b8 | /EmoEstimator/utils/evaluate.py | f9134555f6cb0e1a9f4e8b46ce9fae19ab3e90b7 | [] | no_license | augustdemi/demi | efd29caa3fcccbd92b3ac4e9ba39ed910c3a75ef | 059a1bc93f9597b4db98e2c8e8c6f60d180d4fc3 | refs/heads/master | 2020-03-18T17:05:25.398744 | 2019-03-13T18:55:28 | 2019-03-13T18:55:28 | 135,005,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,475 | py | import numpy as np
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.2f' % x)
def _process(y_hat, y_lab, fun):
'''
- split y_true and y_pred in lists
- removes frames where labels are unknown (-1)
- returns list of predictions
'''
y1 = [x for x in y_hat.T]
y2 = [x for x in y_lab.T]
out = []
for i, [_y1, _y2] in enumerate(zip(y1, y2)):
idx = _y2!=-1
_y1 = _y1[idx]
_y2 = _y2[idx]
if np.all(_y2==-1):
out.append(np.nan)
else:
out.append(fun(_y1,_y2))
return np.array(out)
def _acc(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.round(y_hat)
y_lab = np.round(y_lab)
return np.mean(y_hat==y_lab)
return _process(y_hat, y_lab, fun)
def _mae(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return np.mean(np.abs(y_hat-y_lab))
return _process(y_hat, y_lab, fun)
def _mse(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return np.mean((y_hat-y_lab)**2)
return _process(y_hat, y_lab, fun)
def _rmse(y_hat, y_lab):
def fun(y_hat,y_lab):
y_hat = np.float32(y_hat)
y_lab = np.float32(y_lab)
return (np.mean((y_hat-y_lab)**2))**0.5
return _process(y_hat, y_lab, fun)
def _f1(y_hat, y_lab, threshold=1):
def fun(y_hat,y_lab):
y_hat = np.array(y_hat>=threshold)
y_lab = np.array(y_lab>=threshold)
tp = np.sum( (y_hat==1) * (y_lab==1) )
fp = np.sum( (y_hat==1) * (y_lab==0) )
fn = np.sum( (y_hat==0) * (y_lab==1) )
if tp==0:
return 0
else:
return (2*tp)/float(2*tp+fp+fn)
return _process(y_hat, y_lab, fun)
def _icc(y_hat, y_lab, cas=3, typ=1):
def fun(y_hat,y_lab):
y_hat = y_hat[None,:]
y_lab = y_lab[None,:]
Y = np.array((y_lab, y_hat))
# number of targets
n = Y.shape[2]
# mean per target
mpt = np.mean(Y, 0)
# print mpt.eval()
mpr = np.mean(Y, 2)
# print mpr.eval()
tm = np.mean(mpt, 1)
# within target sum sqrs
WSS = np.sum((Y[0]-mpt)**2 + (Y[1]-mpt)**2, 1)
# within mean sqrs
WMS = WSS/n
# between rater sum sqrs
RSS = np.sum((mpr - tm)**2, 0) * n
# between rater mean sqrs
RMS = RSS
# between target sum sqrs
TM = np.tile(tm, (y_hat.shape[1], 1)).T
BSS = np.sum((mpt - TM)**2, 1) * 2
# between targets mean squares
BMS = BSS / (n - 1)
# residual sum of squares
ESS = WSS - RSS
# residual mean sqrs
EMS = ESS / (n - 1)
if cas == 1:
if typ == 1:
res = (BMS - WMS) / (BMS + WMS)
if typ == 2:
res = (BMS - WMS) / BMS
if cas == 2:
if typ == 1:
res = (BMS - EMS) / (BMS + EMS + 2 * (RMS - EMS) / n)
if typ == 2:
res = (BMS - EMS) / (BMS + (RMS - EMS) / n)
if cas == 3:
if typ == 1:
res = (BMS - EMS) / (BMS + EMS)
if typ == 2:
res = (BMS - EMS) / BMS
res = res[0]
if np.isnan(res) or np.isinf(res):
return 0
else:
return res
return _process(y_hat, y_lab, fun)
def _pcc(y_hat, y_lab):
def fun(y1, y2):
res = np.corrcoef(y1, y2)[0, 1]
if np.isnan(res) or np.isinf(res):
return 0
else:
return res
return _process(y_hat, y_lab, fun)
def print_summary(y_hat, y_lab, log_dir=None, verbose=1, mode='max'):
assert(y_hat.shape==y_lab.shape)
# remove unlabeled frames
idx = y_lab.reshape(y_lab.shape[0],-1).max(-1)>=0
y_lab = y_lab[idx]
y_hat = y_hat[idx]
if y_hat.ndim==3:
if mode=='exp':
tmp = np.zeros(y_hat.shape[:2])
for i in range(y_hat.shape[2]):
tmp+=y_hat[:,:,i]*i
y_hat = tmp
tmp = np.zeros(y_lab.shape[:2])
for i in range(y_lab.shape[2]):
tmp+=y_lab[:,:,i]*i
y_lab = tmp
if mode=='max':
y_hat = y_hat.argmax(2)
y_lab = y_lab.argmax(2)
data = []
data.append(_icc(y_hat, y_lab))
data.append(_pcc(y_hat, y_lab))
data.append(_rmse(y_hat, y_lab))
data.append(_mae(y_hat, y_lab))
data.append(_acc(y_hat, y_lab))
data.append(_f1(y_hat, y_lab))
data = np.vstack(data)
columns = [str(i) for i in np.arange(data.shape[1])]+['avr.']
table = np.hstack((data,data.mean(1)[:,None]))
index = ['ICC','PCC','RMSE','MAE','ACC','F1-b']
t = pd.DataFrame(np.abs(table), index=index, columns = columns)
out = {
'index':index,
'columns':columns,
'data':data
}
if verbose:
print(t)
print()
if log_dir:
f = open(log_dir, 'w')
print(t, file=f)
f.close()
return out
if __name__ == "__main__":
import numpy as np
y1 = np.random.randint(0,5,[100,4])
y2 = np.random.randint(0,5,[100,4])
y1[:,0] = y2[:,0]
y1[:50,2]=-1
y2[:,3]=-1
print(_acc(y1,y2))
print(_mae(y1,y2))
print(_rmse(y1,y2))
print(_icc(y1,y2))
print(_pcc(y1,y2))
print(_f1(y1,y2))
| [
"augustdemi@gmail.com"
] | augustdemi@gmail.com |
4f285ca0d361ca2986f77184b1364e48262952d5 | 6bb99b53ae72f03e4ebce2c80c3be1c13871e46f | /pyweb/web_11_framework_v3/test_case/test_bid.py | 09aadaa2667f5e987cb84621e5717cf50f64b8d1 | [] | no_license | change1q2/Learn | b9ac7085ae476f92fbf04043bda74605b723abf0 | 28e93c56c0a3aaf72006614a565fb7fff267b893 | refs/heads/master | 2021-03-15T16:31:51.641845 | 2020-04-10T14:19:23 | 2020-04-10T14:19:23 | 246,864,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
# email: wagyu2016@163.com
# wechat: shoubian01
# author: 王雨泽
import time
import unittest
from selenium import webdriver
from data.login_data import login_data_success
from pages.index_page import IndexPage
from pages.login_page import LoginPage
class TestBid(unittest.TestCase):
def setUp(self) -> None:
"""
前置条件:
1, 登录
:return:
"""
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(20)
# 初始化页面
self.login_page = LoginPage(self.driver)
self.index_page = IndexPage(self.driver)
# 登录
login_data = login_data_success[0]
self.login_page.login(login_data['mobile'], login_data['pwd'])
def tearDown(self) -> None:
pass
def test_bid_error(self):
"测试投资失败"
time.sleep(1)
self.index_page.get()
# 如果不等待新页面出现而直接定位元素,可能找到的是上一个页面当中的元素。
self.index_page.get_element_bid().click()
print('hello')
# def test_bid_success(self):
# """投资成功"""
# pass
| [
"274882401@qq.com"
] | 274882401@qq.com |
4db4270f3fe0a646bfc6601b1a0ca5d44b124cad | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/13_OOP/f_Advanced/02_abc_classes.py | 698048fec9be8984002dbb3ddec5ae264a1514ff | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 952 | py | #!/usr/bin/python
"""
Purpose: Abstract Base classes
"""
from abc import ABC, abstractmethod, abstractproperty
class BasicCar(ABC):
modal_name: str = NotImplemented
@abstractmethod
def get_chasis_number(self):
pass
def get_car_model(self):
pass
# Solution
class RolsRoys(BasicCar):
def get_chasis_number(self):
pass
car_r = RolsRoys()
# NOTE: We cant enforce variables to be defined.
# for that we need to use property
# ----------------------------------------
class BasicCar(ABC):
@abstractmethod
def get_chasis_number(self):
pass
def get_car_model(self):
pass
@property
@abstractmethod
def modal_name(self):
pass
# NOTE: Earlier asbtractproperty is used, but deprecated in Python 3.8
# Solution
class RolsRoys(BasicCar):
def get_chasis_number(self):
pass
@property
def modal_name(self):
pass
car_r = RolsRoys()
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
5e2198bbbaad10200ebe8913df6a6cce46ac2e95 | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /scripts/linux-menuconfig/menuconfig.py | d3b29c6414cf8265292e0e7ae04ab48eb6d085c9 | [
"Apache-2.0"
] | permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 1,160 | py | # -*- coding:utf-8 -*-
#
# File : menuconfig.py
# This file is part of OneOS RTOS
#
import os
import sys
import argparse
import platform
import cmd_menuconfig
__version__ = 'OneOS packages v1.1.0'
def main():
bsp_root = os.getcwd()
os_root = os.path.join(bsp_root, "../..")
script_root = os.path.split(os.path.realpath(__file__))[0]
sys.path = sys.path + [os.path.join(script_root)]
try:
bsp_root.encode().decode("ascii")
except Exception as e:
if platform.system() == "Windows":
os.system('chcp 65001 > nul')
print ("\n\033[1;31;40m警告:\033[0m")
print ("\033[1;31;40m当前路径不支持非英文字符,请修改当前路径为纯英文路径。\033[0m")
print ("\033[1;31;40mThe current path does not support non-English characters.\033[0m")
print ("\033[1;31;40mPlease modify the current path to a pure English path.\033[0m")
print(bsp_root)
if platform.system() == "Windows":
os.system('chcp 437 > nul')
return False
cmd_menuconfig.cmd()
if __name__ == '__main__':
main()
| [
"cmcc_oneos@cmiot.chinamobile.com"
] | cmcc_oneos@cmiot.chinamobile.com |
79b15adf19e99c7c49e5040691a05f0842aedc20 | 09ac5476e94122bf8ccdb0b404175dff0820c8a7 | /283 移动零.py | de5fd9bb96328ff663044152764e0112c737d6ec | [] | no_license | wxke/LeetCode-python | df27c456ad0c7042e3bfcf2a697e3958d3b85f1f | 37a66e426e9c7e279928d2f6fcdecb9641f4121c | refs/heads/master | 2020-04-29T14:03:22.554357 | 2020-03-10T12:38:24 | 2020-03-10T12:38:24 | 176,185,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | 移动零
class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
n=nums.count(0)
for i in range(n):
nums.remove(0)
for i in range(n):
nums.append(0)
| [
"noreply@github.com"
] | wxke.noreply@github.com |
0556703511c977fa66d8a51c13ed395a5a309986 | 0da6e1000e071d97822ffe5d84efa7f998d72ae8 | /2021-03-08-Introduction-to-Python/examples/14-matmul.py | 6b0863741e3b60985b2abf63859843e460fa769c | [
"BSD-3-Clause"
] | permissive | s3rvac/talks | 1c3dfec03d1f798125a50438b26aa8daf1f86b65 | 5e76250ee98424c090fdfbf3c1a2a92f36ccaca6 | refs/heads/master | 2023-05-12T01:13:03.365621 | 2023-05-05T17:32:27 | 2023-05-05T17:32:27 | 84,107,862 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # @ is an operator for matrix multiplication since Python 3.5 (__matmul__).
#
# Requires numpy (http://www.numpy.org/).
import numpy as np
A = np.matrix('4 1; 9 3')
B = np.matrix('5 1; 3 8')
# Prints
#
# [[23 12]
# [54 33]]
#
print(A @ B)
| [
"s3rvac@gmail.com"
] | s3rvac@gmail.com |
7fa6c925a941e1607fa873947b412cbf1688cefe | f8f8651ab604acc4937f8725caadaca1fb97a5e8 | /src/lightning_app/cli/commands/app_commands.py | 0b08538e76ba6d2c9bab478aa86190a00f8b3954 | [
"Apache-2.0"
] | permissive | neptune-ai/pytorch-lightning | ac59e746a486e07e21abae426b28e5d72812ac98 | 702014418e2ec0437e67d8bf97809edef686a02c | refs/heads/master | 2022-09-28T09:34:07.653729 | 2022-09-12T11:13:48 | 2022-09-12T11:13:48 | 229,063,811 | 1 | 1 | Apache-2.0 | 2022-09-26T03:29:49 | 2019-12-19T13:48:16 | Python | UTF-8 | Python | false | false | 3,430 | py | import os
import sys
from typing import Dict, Optional
import requests
from lightning_app.cli.commands.connection import _resolve_command_path
from lightning_app.utilities.cli_helpers import _retrieve_application_url_and_available_commands
from lightning_app.utilities.commands.base import _download_command
from lightning_app.utilities.enum import OpenAPITags
def _run_app_command(app_name: str, app_id: Optional[str]):
"""Execute a function in a running App from its name."""
# 1: Collect the url and comments from the running application
url, api_commands, _ = _retrieve_application_url_and_available_commands(app_id)
if url is None or api_commands is None:
raise Exception("We couldn't find any matching running App.")
if not api_commands:
raise Exception("This application doesn't expose any commands yet.")
full_command = "_".join(sys.argv)
has_found = False
for command in list(api_commands):
if command in full_command:
has_found = True
break
if not has_found:
raise Exception(f"The provided command isn't available in {list(api_commands)}")
# 2: Send the command from the user
metadata = api_commands[command]
# 3: Execute the command
if metadata["tag"] == OpenAPITags.APP_COMMAND:
_handle_command_without_client(command, metadata, url)
else:
_handle_command_with_client(command, metadata, app_name, app_id, url)
if sys.argv[-1] != "--help":
print("Your command execution was successful.")
def _handle_command_without_client(command: str, metadata: Dict, url: str) -> None:
supported_params = list(metadata["parameters"])
if "--help" == sys.argv[-1]:
print(f"Usage: lightning {command} [ARGS]...")
print(" ")
print("Options")
for param in supported_params:
print(f" {param}: Add description")
return
provided_params = [param.replace("--", "") for param in sys.argv[1 + len(command.split("_")) :]]
# TODO: Add support for more argument types.
if any("=" not in param for param in provided_params):
raise Exception("Please, use --x=y syntax when providing the command arguments.")
if any(param.split("=")[0] not in supported_params for param in provided_params):
raise Exception(f"Some arguments need to be provided. The keys are {supported_params}.")
# TODO: Encode the parameters and validate their type.
query_parameters = "&".join(provided_params)
resp = requests.post(url + f"/command/{command}?{query_parameters}")
assert resp.status_code == 200, resp.json()
def _handle_command_with_client(command: str, metadata: Dict, app_name: str, app_id: Optional[str], url: str):
debug_mode = bool(int(os.getenv("DEBUG", "0")))
if app_name == "localhost":
target_file = metadata["cls_path"]
else:
target_file = _resolve_command_path(command) if debug_mode else _resolve_command_path(command)
if debug_mode:
print(target_file)
client_command = _download_command(
command,
metadata["cls_path"],
metadata["cls_name"],
app_id,
debug_mode=debug_mode,
target_file=target_file if debug_mode else _resolve_command_path(command),
)
client_command._setup(command_name=command, app_url=url)
sys.argv = sys.argv[len(command.split("_")) :]
client_command.run()
| [
"noreply@github.com"
] | neptune-ai.noreply@github.com |
9c08d98c2e1c10b1d3156cdc716e1f61bdac4ecd | b0549c720ffc7222c1b159db601d083f4422232f | /aib/init/tables/dir_companies.py | 5b540cdf12644b9121d40b33a113ed766f872a24 | [
"MIT"
] | permissive | FrankMillman/AccInABox | e7f6fd84caca27e3c4871b23b104cfd9de2150b3 | 3f2fc881cc9ee3e9e27022d90c90a7141fc59588 | refs/heads/develop | 2023-06-26T08:32:48.319840 | 2023-06-18T07:14:10 | 2023-06-18T07:14:10 | 23,425,845 | 3 | 1 | NOASSERTION | 2020-01-03T07:12:47 | 2014-08-28T11:43:13 | Python | UTF-8 | Python | false | false | 3,758 | py | # table definition
table = {
'table_name' : 'dir_companies',
'module_id' : 'dir',
'short_descr' : 'Companies',
'long_descr' : 'Directory of companies',
'sub_types' : None,
'sub_trans' : None,
'sequence' : None,
'tree_params' : None,
'roll_params' : None,
'indexes' : None,
'ledger_col' : None,
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'company_id',
'data_type' : 'TEXT',
'short_descr': 'Company id',
'long_descr' : 'Company id',
'col_head' : 'Company',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 15,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'company_name',
'data_type' : 'TEXT',
'short_descr': 'Company name',
'long_descr' : 'Company name',
'col_head' : 'Name',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 30,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# virtual column definitions
virt = []
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'companies',
'title': 'Maintain companies',
'columns': [
['company_id', 100, False, False],
['company_name', 260, True, False],
],
'filter': [],
'sequence': [['company_id', False]],
})
# actions
actions = []
actions.append([
'del_checks',
[
[
'not_sys',
'Cannot delete _sys',
[
['check', '', 'company_id', '!=', "'_sys'", ''],
],
],
],
])
actions.append([
'after_insert', '<create_company/>'
])
actions.append([
'after_commit', '<pyfunc name="db.cache.company_changed"/>'
])
| [
"frank@chagford.com"
] | frank@chagford.com |
18cedc9c8cb002b7f8892e0fcdfd09d244337590 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/share/doc/networkx-2.3/examples/drawing/plot_house_with_colors.py | 68cb5d35963bc4da741f8e85b2e81230e2bb2533 | [
"BSD-3-Clause"
] | permissive | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1063c25d17b4cbafe673c0f1acef5d9c1c6b29ef79501dfe0f10cec8d2767104
size 649
| [
"Nqk180998!"
] | Nqk180998! |
461b635b955e33ca50eb9eb4f5cd167f0ab81b3a | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/views_20201115192231.py | f4f483905ce72aa6aed71af7ca372af90704fdb2 | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,180 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
# contains user signup + login form
def view_index(request):
# bonus, if user is already logged in, lets not show them login/registration page,
# and instead redirect them to /books, which is already where we redirect users
# after they login/register.
if 'user_id' in request.session:
return redirect("/books")
return render(request, "index.html")
# user signup form will post to a url (/register) which maps to this function
def register_new_user(request):
# returns a dictionary of errors.
# e.g. errors['first_name'] = 'letters only'
errors = User.objects.user_registration_validator(request.POST)
# iterate over each error (key/value) pair in the errors dictionary
# and take the error key and value and makes a full error message,
# and then adds the error message via messages.error()
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/books')
def login(request):
# user did provide email/password, now lets check database
email_from_post = request.POST['email']
password_from_post = request.POST['password']
# this will return all users that have the email_from_post
# in future we should require email to be unique
users = User.objects.filter(email=email_from_post)
if len(users) == 0:
messages.error(request, "email/password does not exist")
return redirect("/")
user = users[0]
print(user)
# check that the user submitted password is the same as what we have stored in the database
if (user.password != password_from_post):
messages.error(request, "email/password does not exist")
return redirect("/")
# we store the logged in user's id in the session variable,
# so that we can quickly get the current logged in user's id any time we need it in back end functions.
# e.g. view_books when we look up the user by: User.objects.get(id=request.session['user_id'])
# session variables are shared accors all of my requests
# LEARN
request.session['user_id'] = user.id
return redirect("/books")
def logout(request):
request.session.clear()
return redirect("/")
# this will render view_books.html page.
# this page will show a list of all the books and the current logged in user.
def view_books(request):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
all_books_from_db = Books.objects.all()
context = {
"user": user,
"all_books": all_books_from_db
}
return render(request, "view_books.html", context)
# this will render view_book.html page.
# this page will show a single book and the current logged in user.
def view_book(request, book_id):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
book_from_db = Books.objects.get(id=book_id)
context = {
"user": user,
"book": book_from_db
}
print(book_from_db.id)
return render(request, "view_book.html", context)
# adds new book to database that you like
def add_book(request):
if 'user_id' not in request.session:
return redirect("/")
errors = Books.objects.add_book_validator(request.POST)
print(errors)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/books")
# current logged in user
current_user = User.objects.get(id=request.session['user_id'])
title_from_post = request.POST['title']
description_from_post = request.POST['desc']
book = Books.objects.create(
title=title_from_post,
desc=description_from_post,
uploaded_by_id=current_user.id,
)
print(book)
book.users_who_favorite.add(current_user)
return redirect("/books")
# favorite a book that you did not upload
def favorite_book(request, book_id):
if 'user_id' not in request.session:
return redirect("/")
book_from_db = Books.objects.get(id=book_id)
user_from_db = User.objects.get(id=request.session['user_id'])
# TODO if user has already added book as favorite, just return, don't re-add
book_from_db.users_who_favorite.add(user_from_db)
book_from_db.save()
return redirect("/books/" + str(book_id))
#this will edit the description of the book and redirect back to book page
def edit_book(request, book_id):
errors = Books.objects.add_book_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/books/" + str(book_id) + "/edit")
book_to_update = Books.objects.get(id=book_id)
book_to_update.title = request.POST['title']
book_to_update.desc = request.POST['desc']
book_to_update.save()
return redirect("/books/" + str(book_id))
#delete a book from the db but only if you uploaded it
def delete_book(request, book_id):
this_book = Books.objects.get(id=book_id)
this_book.delete()
return redirect("/books")
#removes a book from the favorite list of the user
def unfav_book(request, book_id):
this_book = Books.objects.get(id=book_id)
this_book.uploaded_by = False
this
return redirect("/books/" + str(book_id))
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
2afb20a8d6138518efc06a6055d56149e339e7ab | 7d44745a63b5f470e718be3b02b08a2e4c90ff45 | /205IsomorphicStrings.py | d249d951cd37eef228fdd2d9612887bd285c7a92 | [] | no_license | SixingYan/algorithm | 20895471baca1b77d3dbe4a3310cc3789dc10c78 | 25b20d03b5613b731ac07baad1073daa3955113b | refs/heads/master | 2020-03-25T01:00:42.908903 | 2019-12-14T02:04:51 | 2019-12-14T02:04:51 | 143,217,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | """
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.
Example 1:
Input: s = "egg", t = "add"
Output: true
Example 2:
Input: s = "foo", t = "bar"
Output: false
Example 3:
Input: s = "paper", t = "title"
Output: true
Note:
You may assume both s and t have the same length.
"""
"""
Comments
"""
"""
My
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return self.analysis(list(s)) == self.analysis(list(t))
def analysis(self, s):
arr = []
d = {}
idx = 0
for i in range(len(s)):
if s[i] in d.keys():
arr.append(d[s[i]])
else:
d[s[i]] = idx
arr.append(idx)
idx += 1
return arr
"""
Fast
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s_to_t = {}
length = len(s)
dict_values = {}
for i in range(length):
if s[i] in s_to_t:
if s_to_t[s[i]] != t[i]:
return False
else:
if t[i] in dict_values:
if s[i] != dict_values[t[i]]:
return False
s_to_t[s[i]] = t[i]
dict_values[t[i]] = s[i]
return True | [
"plutoyem@outlook.com"
] | plutoyem@outlook.com |
03c2543b84cafbe50d743af624d68e6d7e91f476 | 33421188df7d7dcf2ee9be0771b0f2fe1ffad4f5 | /2012/gul-uc3m/bbdd-clave-valor/ejemplos/hash.py | 1dc822a5ca1e0c28e50659742f6cf7f6b5e48b81 | [
"CC-BY-4.0"
] | permissive | Gustavo17/ponencias | c0482fc7a72d7d4d829a54b94775e77c81ca5d97 | effb002b0300fe57d26776654b61a2396010da40 | refs/heads/master | 2021-01-13T09:18:13.837313 | 2014-11-21T04:58:11 | 2014-11-21T04:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from kyotocabinet import *
import time
import random
db = DB()
db.open("db.kch", DB.OCREATE|DB.OWRITER)
pre_time = time.time()
# 1 Million loop
for x in range(1,1000000):
db.add(x,x+x)
post_time = time.time()
print "Escribir 1M de registros: %.4f segundos" % (post_time-pre_time)
keys = [random.randint(1, 1000000) for x in range(1,10000)]
pre_time = time.time()
for x in keys:
db.get(x)
post_time = time.time()
print "Leer 10K registros aleatorios: %.4f segundos" % (post_time-pre_time)
cur = db.cursor()
pre_time = time.time()
cur.jump(10000)
for x in range(1,10000):
cur.step()
post_time = time.time()
print "Leer 10K registros consecutivos: %.4f segundos" % (post_time-pre_time)
db.close()
| [
"jesus.espino@kaleidos.net"
] | jesus.espino@kaleidos.net |
0d0af063bede796cb727ece6c2cdda4f9bf71a6a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_conforming.py | 7156a408292dbb1e044fcef4ea5ca44b272f2a6a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _CONFORMING():
def __init__(self,):
self.name = "CONFORMING"
self.definitions = conform
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['conform']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d52d63f378c671167dc83b8eb693e2317905e11a | 16ba38ef11b82e93d3b581bbff2c21e099e014c4 | /haohaninfo/Python_Stock_Sample/Python股票技巧範例/實單範例/92.py | 50bbeb402fdbf215e5b4078a807ba1937d5bdafb | [] | no_license | penguinwang96825/Auto-Trading | cb7a5addfec71f611bdd82534b90e5219d0602dd | a031a921dbc036681c5054f2c035f94499b95d2e | refs/heads/master | 2022-12-24T21:25:34.835436 | 2020-09-22T09:59:56 | 2020-09-22T09:59:56 | 292,052,986 | 2 | 5 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # -*- coding: UTF-8 -*-
# 載入相關套件
import datetime,function,indicator
import talib,numpy
import sys
# 取得當天日期
Date=datetime.datetime.now().strftime("%Y%m%d")
# 測試股票下單
Sid=sys.argv[1]
# 趨勢判斷
Trend=0
TrendEndTime=datetime.datetime.strptime(Date+'09:30:00','%Y%m%d%H:%M:%S')
BSPower2= indicator.BSPower2()
for i in function.getSIDMatch(Date,Sid):
time=datetime.datetime.strptime(Date+i[0],'%Y%m%d%H:%M:%S.%f')
price=float(i[2])
qty=int(i[3])
ask=float(i[5])
bid=float(i[6])
BSPower2.Add(price,qty,ask,bid)
if time > TrendEndTime:
sig = BSPower2.Get()
if sig[0] > sig[1]:
print('當日只做多單')
Trend=1
break
elif sig[0] < sig[1]:
print('當日只做空單')
Trend=-1
break
else:
print('當日趨勢不明')
break
| [
"penguinwang@smail.nchu.edu.tw"
] | penguinwang@smail.nchu.edu.tw |
6d88d996e37554efc91b4639f8e20013073ea73d | c55996cce55db9e15f679f2358f6782754cd7013 | /Chips/Or4Way.py | f5668ccfe102eb722df4a89c2402a0ce45333d49 | [
"MIT"
] | permissive | AdilRas/Nand2TetrisCaseGenerator | efc1e0d7900593f6ba47d99a43dfa1647bbc35ec | db82e6988d03d64884e4ac0cf02cecb78e275bc5 | refs/heads/master | 2021-01-15T02:58:23.552602 | 2020-02-26T19:14:39 | 2020-02-26T19:14:39 | 242,856,705 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from src.TestCaseGenerator import *
input_variables = [Var("a", 4, "B"), Var("b", 4, "B")]
output_variables = [Var("out", 1, "B")]
# args = [in, sel]
def or4way_logic(args):
a = args[0]
out = []
if a > 0:
out.append(1)
else:
out.append(0)
return out
generate(name="Or4Way", numCases=10, inVars=input_variables, outVars=output_variables, function=or4way_logic)
| [
"="
] | = |
937bcf121e3fd1140f67af8b53050b8cfd8c62b3 | 6f8113e7a06699e8448dcbeb7c329be11c5926a5 | /apps/facebook/tests/urls.py | d86907b4cb8b7bf2049a0dc4ec2fde70619b8531 | [
"MIT",
"BSD-3-Clause"
] | permissive | groovecoder/affiliates | b906a565f3f035da09e22b15e9843b1aed7b313d | 7d22304ada7ffdb893fe8305630ec11eb84cfab5 | refs/heads/master | 2020-04-06T04:36:09.980997 | 2014-01-10T15:07:16 | 2014-01-10T15:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import os
from django.conf.urls.defaults import include, patterns, url
from django.http import HttpResponse
from django.template import RequestContext
from funfactory.manage import ROOT
from jingo import env
def base_template_view(request):
template = env.from_string("""
{% extends 'facebook/base.html' %}
{% block content %}test{% endblock %}
""")
return HttpResponse(template.render(RequestContext(request)))
urlpatterns = patterns('',
# Include base urls to avoid NoReverseMatch errors.
(r'', include('%s.urls' % os.path.basename(ROOT))),
url('^fb/test$', base_template_view, name='facebook.base_test'),
)
| [
"mkelly@mozilla.com"
] | mkelly@mozilla.com |
4ca89110e5367a6b1a354bffa4531ba99188ae58 | 1866d40b66fe6b0291f96a3c5eec1fbd9e1aee88 | /tests/_testsite/apps/forum_conversation/migrations/0011_topic_dummy.py | 0461c28d62f0db1a5b5769bb4f9088406f135caa | [
"BSD-3-Clause"
] | permissive | ellmetha/django-machina | f612ea0d1191001f8188fe868ddec69ec530b4d7 | 6586d2608bbffc31911ea6f9a15c570580116238 | refs/heads/main | 2023-07-31T18:11:28.819165 | 2023-05-28T01:19:33 | 2023-05-28T01:19:33 | 14,761,593 | 640 | 174 | BSD-3-Clause | 2023-07-25T23:20:46 | 2013-11-27T23:12:41 | Python | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forum_conversation', '0010_auto_20170120_0224'),
]
operations = [
migrations.AddField(
model_name='topic',
name='dummy',
field=models.CharField(max_length=128, null=True, blank=True),
preserve_default=True,
),
]
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
28c1ab842c0df1278993d803b17abeb1dccb5a46 | c7295c1ffd8ad82c273524eab1a42d3a22741ba9 | /figures/third/trajectories.py | f4fa229790815bcdc4bc3dab6420f4162a41887e | [] | no_license | FedeClaudi/EscapePathSelection | 629d3ea6f5c14af144bdda16a899b3fb86340169 | 1bbdd95384e1c343495fcf33fc0c46b21110fe91 | refs/heads/master | 2023-04-18T18:16:27.153848 | 2022-04-25T12:20:40 | 2022-04-25T12:20:40 | 247,850,434 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,430 | py |
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from fcutils.plot.figure import clean_axes
from fcutils.plot.elements import plot_mean_and_error
from fcutils.maths import rolling_mean
import sys
from pathlib import Path
import os
module_path = Path(os.path.abspath(os.path.join("."))).parent.parent
sys.path.append(str(module_path))
sys.path.append('./')
from figures.third import MODELS_COLORS, MODELS, MAZES, fig_3_path
from figures.settings import dpi
from figures.third import PsychometricM1, PsychometricM6, QTableModel, DynaQModel, InfluenceZones, Status, QTableTracking, DynaQTracking, InfluenceZonesTracking
from figures.third.settings import TRAINING_SETTINGS, RANDOM_INIT_POS, REWARDS
from figures.settings import dpi
'''
Plot the escape trajectories of trained agents
'''
# %%
logger.remove()
logger.add(sys.stdout, level='INFO')
# -------------------------------- parameters -------------------------------- #
# change training settings to reflect parametsr
TRAINING_SETTINGS['episodes'] = 250
TRAINING_SETTINGS['max_n_steps'] = 500
# def plot_maze(states_counts, name, exploration):
# norm=mpl.colors.LogNorm(vmin=0, vmax=500)
# f, ax = plt.subplots()
# ax.scatter(
# [k[0] for k,v in states_counts.items() if v>0],
# [k[1] for k,v in states_counts.items() if v>0],
# c=[v for v in states_counts.values() if v>0],
# vmin=1, vmax=500, cmap='bwr', lw=1, edgecolors=['k'], marker='s', s=65, norm=norm,
# )
# ax.set(ylim=[50, 0], title=name + ' ' + exploration)
# ax.axis('equal')
# ax.axis('off')
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.1)
# cmap = mpl.cm.bwr
# # norm = mpl.colors.Normalize(vmin=1, vmax=500)
# f.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
# cax=cax, orientation='vertical', label='# visits')
# f.savefig(fig_3_path / f'{name}_{exploration}_exploration_occupancy.eps', format='eps', dpi=dpi)
# %%
# ---------------------------------------------------------------------------- #
# FREE EXPL #
# ---------------------------------------------------------------------------- #
def plot(agent, trajectories, name, exploration):
f, ax = plt.subplots(figsize=(9, 9))
for traj in trajectories:
ax.plot([s[0] for s in traj], [s[1] for s in traj], color='w', lw=8, zorder=1)
ax.plot([s[0] for s in traj], [s[1] for s in traj], color='r', lw=6, zorder=2)
# draw maze
x, y = np.where(agent.environment.maze == 0)[::-1]
ax.scatter(
x,
y,
color=[.8, .8, .8],
lw=1, edgecolors=['k'], marker='s', s=250, zorder=-1
)
ax.set(ylim=[50, 0], title=name)
ax.axis('equal')
ax.axis('off')
f.savefig(fig_3_path / f'{name}_{exploration}_escape_trajectories.eps', format='eps', dpi=dpi)
agents = {
# 'QTable':QTableModel,
# 'DynaQ_20': DynaQModel,
'InfluenceZonesNoSheltVec':InfluenceZones,
}
agent_kwargs = {
'QTable':dict(learning_rate=.9, penalty_move = 1e-8),
'DynaQ_20':dict(n_planning_steps=20),
'InfluenceZonesNoSheltVec':dict(predict_with_shelter_vector=False, learning_rate=.2, discount=.8),
}
maze = PsychometricM1
for n, (name, model) in enumerate(agents.items()):
trajectories = []
for i in range(3):
logger.info(f' training agent: {name} ')
# remove duplicate parameters
settings = TRAINING_SETTINGS.copy()
rewards = REWARDS.copy()
for param in agent_kwargs[name].keys():
if param in settings.keys():
# print(f'[dim]Overring default settings value for {param}')
del settings[param]
# adjust rewards per model
if param in rewards.keys():
# print(f'[dim]Overring default reward value for {param}')
rewards[param] = agent_kwargs[name][param]
# create an instance
_maze = maze(rewards)
_maze.build_graph()
_maze.shelter_found = False
agent = model(_maze, name=_maze.name, **settings, **agent_kwargs[name])
# train
agent.train(random_start=RANDOM_INIT_POS, episodes=TRAINING_SETTINGS['episodes'], test_performance=True)
# test
status, play_steps, play_reward, escape_arm, states = _maze.play(agent, start_cell=_maze.START)
trajectories.append(states)
# draw trajectories
plot(agent, trajectories, name, 'free')
# %%
# %%
# ---------------------------------------------------------------------------- #
# GUIDED EXPL #
# ---------------------------------------------------------------------------- #
sessions = [36, 24, 25]
agents = {
'QTable':QTableTracking,
'DynaQ_20':DynaQTracking,
'InfluenceZonesNoSheltVec':InfluenceZonesTracking,
}
agent_kwargs = {
'QTable':dict(learning_rate=.9),
'DynaQ_20':dict(n_planning_steps=20),
'InfluenceZonesNoSheltVec':dict(predict_with_shelter_vector=False, learning_rate=.2, discount=.8),
}
# iterate over mazes and models
for name, model in agents.items():
# agent specific settings
agent_settings = TRAINING_SETTINGS.copy()
agent_rewards = REWARDS.copy()
for param in agent_kwargs[name].keys():
if param in agent_settings.keys():
del agent_settings[param]
# adjust rewards per model
if param in agent_rewards.keys():
agent_rewards[param] = agent_kwargs[name][param]
# iterate over trials
trajectories = []
for session_number in sessions:
# instantiate model and maze
_maze = maze(agent_rewards)
_model = model(
_maze,
'M1',
take_all_actions=False,
trial_number=session_number,
name=_maze.name,
**agent_settings, **agent_kwargs[name])
# train
_model.train(film=False)
# test
status, play_steps, play_reward, escape_arm, states = _maze.play(_model, start_cell=_maze.START)
trajectories.append(states)
plot(_model, trajectories, name, 'guided')
# %%
| [
"federicoclaudi@protonmail.com"
] | federicoclaudi@protonmail.com |
8ed056e6cedb9357097a5fd5b51a47b4610ce6ae | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /kaggle_song_git/code_box/VALIDATION_fake_feature_insert_V1001/report/0.687954one_train_V1003.py | 5e8ec3aab8083acb97a86c9b41a530c4981ac756 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 8,077 | py | import numpy as np
import pandas as pd
import lightgbm as lgb
import datetime
import math
import gc
import time
import pickle
from sklearn.model_selection import train_test_split
since = time.time()
data_dir = '../data/'
save_dir = '../saves/'
load_name = 'train_set'
dt = pickle.load(open(save_dir+load_name+'_dict.save', "rb"))
df = pd.read_csv(save_dir+load_name+".csv", dtype=dt)
del dt
# barebone = True
barebone = False
if barebone:
ccc = [i for i in df.columns]
ccc.remove('target')
df.drop(ccc, axis=1, inplace=True)
# must be a fake feature
inner = [
'FAKE_[]_0.6788_Light_gbdt_1512883008.csv'
]
inner = False
def insert_this(on):
global df
on = on[:-4]
df1 = pd.read_csv('../saves/feature/'+on+'.csv')
df1.drop('id', axis=1, inplace=True)
on = on[-10:]
df1.rename(columns={'target': 'FAKE_'+on}, inplace=True)
df = df.join(df1)
del df1
cc = df.drop('target', axis=1)
# print(cc.dtypes)
cols = cc.columns
del cc
counter = {}
def get_count(x):
try:
return counter[x]
except KeyError:
return 0
def add_this_counter_column(on_in):
global counter, df
read_from = '../fake/saves/'
counter = pickle.load(open(read_from+'counter/'+'ITC_'+on_in+'_dict.save', "rb"))
df['ITC_'+on_in] = df[on_in].apply(get_count).astype(np.int64)
# counter = pickle.load(open(read_from + 'counter/' + 'CC11_' + on_in + '_dict.save', "rb"))
# df['CC11_' + on_in] = df[on_in].apply(get_count).astype(np.int64)
# df.drop(on_in, axis=1, inplace=True)
for col in cols:
print("'{}',".format(col))
# add_this_counter_column(col)
cols = ['song_id', 'msno']
for col in cols:
# print("'{}',".format(col))
add_this_counter_column(col)
def log10me(x):
return np.log10(x)
def log10me1(x):
return np.log10(x+1)
def xxx(x):
d = x / (x + 1)
return x
for col in cols:
colc = 'ITC_'+col
# df[colc + '_log10'] = df[colc].apply(log10me).astype(np.float64)
df[colc + '_log10_1'] = df[colc].apply(log10me1).astype(np.float64)
# df[colc + '_x_1'] = df[colc].apply(xxx).astype(np.float64)
# col1 = 'CC11_'+col
# df['OinC_'+col] = df[col1]/df[colc]
# df.drop(colc, axis=1, inplace=True)
# load_name = 'train_set'
# read_from = '../saves01/'
# dt = pickle.load(open(read_from+load_name+'_dict.save', "rb"))
# train = pd.read_csv(read_from+load_name+".csv", dtype=dt)
# del dt
#
# train.drop(
# [
# 'target',
# ],
# axis=1,
# inplace=True
# )
#
# df = df.join(train)
# del train
if inner:
for i in inner:
insert_this(i)
print('What we got:')
print(df.dtypes)
print('number of rows:', len(df))
print('number of columns:', len(df.columns))
num_boost_round = 5
early_stopping_rounds = 50
verbose_eval = 10
boosting = 'gbdt'
learning_rate = 0.02
num_leaves = 511
max_depth = -1
max_bin = 255
lambda_l1 = 0.2
lambda_l2 = 0
bagging_fraction = 0.9
bagging_freq = 2
bagging_seed = 2
feature_fraction = 0.9
feature_fraction_seed = 2
params = {
'boosting': boosting,
'learning_rate': learning_rate,
'num_leaves': num_leaves,
'max_depth': max_depth,
'lambda_l1': lambda_l1,
'lambda_l2': lambda_l2,
'max_bin': max_bin,
'bagging_fraction': bagging_fraction,
'bagging_freq': bagging_freq,
'bagging_seed': bagging_seed,
'feature_fraction': feature_fraction,
'feature_fraction_seed': feature_fraction_seed,
}
# on = [
# 'msno',
# 'song_id',
# 'target',
# 'source_system_tab',
# 'source_screen_name',
# 'source_type',
# 'language',
# 'artist_name',
# 'song_count',
# 'member_count',
# 'song_year',
# ]
# df = df[on]
fixed = [
'target',
'msno',
'song_id',
'source_system_tab',
'source_screen_name',
'source_type',
'artist_name',
# 'composer',
# 'lyricist',
'song_year',
# 'language',
# 'top3_in_song',
# 'rc',
'ITC_song_id_log10_1',
'ITC_msno_log10_1',
# 'ITC_source_system_tab_log10_1',
# 'ITC_source_screen_name_log10_1',
# 'ITC_source_type_log10_1',
# 'ITC_artist_name_log10_1',
# 'FAKE_1512883008',
]
result = {}
for w in df.columns:
print("'{}',".format(w))
work_on = [
'top3_in_song',
# 'ITC_composer_log10_1',
# 'ITC_lyricist_log10_1',
# 'ITC_language_log10_1',
# 'ITC_song_year_log10_1',
# 'ITC_song_country_log10_1',
# 'ITC_rc_log10_1',
]
for w in work_on:
if w in fixed:
pass
else:
print('working on:', w)
toto = [i for i in fixed]
toto.append(w)
df_on = df[toto]
for col in df_on.columns:
if df_on[col].dtype == object:
df_on[col] = df_on[col].astype('category')
print()
print('Our guest selection:')
print(df_on.dtypes)
print('number of columns:', len(df_on.columns))
print()
# save_me = True
save_me = False
if save_me:
print(' SAVE ' * 5)
print(' SAVE ' * 5)
print(' SAVE ' * 5)
print('creating train set.')
save_name = 'train'
vers = '_me2'
d = df_on.dtypes.to_dict()
# print(d)
print('dtypes of df:')
print('>' * 20)
print(df_on.dtypes)
print('number of columns:', len(df_on.columns))
print('number of data:', len(df_on))
print('<' * 20)
df_on.to_csv(save_dir + save_name + vers + '.csv', index=False)
pickle.dump(d, open(save_dir + save_name + vers + '_dict.save', "wb"))
print('done.')
length = len(df_on)
train_size = 0.76
train_set = df_on.head(int(length*train_size))
val_set = df_on.drop(train_set.index)
del df_on
train_set = train_set.sample(frac=1)
X_tr = train_set.drop(['target'], axis=1)
Y_tr = train_set['target'].values
X_val = val_set.drop(['target'], axis=1)
Y_val = val_set['target'].values
del train_set, val_set
t = len(Y_tr)
t1 = sum(Y_tr)
t0 = t - t1
print('train size:', t, 'number of 1:', t1, 'number of 0:', t0)
print('train: 1 in all:', t1/t, '0 in all:', t0/t, '1/0:', t1/t0)
t = len(Y_val)
t1 = sum(Y_val)
t0 = t - t1
print('val size:', t, 'number of 1:', t1, 'number of 0:', t0)
print('val: 1 in all:', t1/t, '0 in all:', t0/t, '1/0:', t1/t0)
print()
print()
train_set = lgb.Dataset(
X_tr, Y_tr,
# weight=[0.1, 1]
)
# train_set.max_bin = max_bin
val_set = lgb.Dataset(
X_val, Y_val,
# weight=[0.1, 1]
)
train_set.max_bin = max_bin
val_set.max_bin = max_bin
del X_tr, Y_tr, X_val, Y_val
params['metric'] = 'auc'
params['verbose'] = -1
params['objective'] = 'binary'
print('Training...')
model = lgb.train(params,
train_set,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
valid_sets=[train_set, val_set],
verbose_eval=verbose_eval,
)
print('best score:', model.best_score['valid_1']['auc'])
print('best iteration:', model.best_iteration)
del train_set, val_set
print('complete on:', w)
result[w] = model.best_score['valid_1']['auc']
print()
print(model.feature_name())
print(model.feature_importance())
import operator
sorted_x = sorted(result.items(), key=operator.itemgetter(1))
# reversed(sorted_x)
# print(sorted_x)
for i in sorted_x:
name = i[0] + ': '
name = name.rjust(40)
name = name + str(i[1])
print(name)
print()
time_elapsed = time.time() - since
print('[timer]: complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
'''1,2, artist name'''
| [
"geemguang@gmail.com"
] | geemguang@gmail.com |
1bb914908b9a0071556fbc47a8343135d70438a3 | da7d1008d925872317bcbe8b5f1e4f00a79d0936 | /cloudywatch/manage.py | d1e93db31df166038f0669f71b3b47f7bbf152c7 | [] | no_license | gregdingle/cloudywatch | b08b32ca50bb958bc30796c4d1482755c46978b2 | 009651f6302f7bb12b5cd46bcb7161b278d7dfbb | refs/heads/master | 2021-01-01T15:35:53.760805 | 2013-11-20T12:28:21 | 2013-11-20T12:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
# Django should be at least 1.5
import django
assert django.VERSION[0:2] == (1, 5)
if __name__ == "__main__":
execute_manager(settings)
| [
"ppr.vitaly@gmail.com"
] | ppr.vitaly@gmail.com |
f291c903c72585e9ca8089e497ba10907c373013 | 67af9dc77608a6cd83fdf219b3b76000634c0634 | /pixelcnn/layers.py | dab5d229536d0e0e042a5d70968fb13802ee7c03 | [
"Apache-2.0"
] | permissive | kngwyu/pytorch-pixelcnn | 7b4d0bb0e8662ce976b5faede41249b94d81e03c | e59585d5d533de77c7b51a8e822da0264f2b56e5 | refs/heads/master | 2020-06-01T00:23:54.716580 | 2019-08-12T05:44:29 | 2019-08-12T05:44:29 | 190,556,770 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,522 | py | import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.nn.utils import weight_norm
from typing import Callable, Optional, Tuple
from .utils import down_cut, right_cut
class ConcatELU(nn.Module):
__constants__ = ['alpha']
def __init__(self, alpha: float = 1.) -> None:
super().__init__()
self.alpha = alpha
def forward(self, x: Tensor) -> Tensor:
return F.elu(torch.cat((x, -x), dim=1), self.alpha, inplace=True)
def extra_repr(self):
return 'alpha={}'.format(self.alpha)
class DownShiftedConv2d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel: Tuple[int, int] = (2, 3),
stride: int = 1,
right_shift: bool = False,
) -> None:
super().__init__()
kh, kw = kernel
# pad: (Left, Right, Top, Bottom)
pad = (kw - 1, 0, kh - 1, 0) if right_shift else ((kw - 1) // 2, (kw - 1) // 2, kh - 1, 0)
self.pad = nn.ZeroPad2d(pad)
self.conv = weight_norm(nn.Conv2d(in_channel, out_channel, kernel, stride))
def forward(self, x: Tensor) -> Tensor:
x = self.pad(x)
x = self.conv(x)
return x
class DownShiftedDeconv2d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel: Tuple[int, int] = (2, 3),
stride: int = 1,
right_shift: bool = False,
) -> None:
super().__init__()
if stride != 1 and stride != 2:
raise ValueError('Only 1 or 2 is allowed as stride size for DownShiftedDeconv2d')
pad = 0 if stride == 1 else 1
deconv = nn.ConvTranspose2d(in_channel, out_channel, kernel, stride, output_padding=pad)
self.deconv = weight_norm(deconv)
self.kernel = kernel
self.scaler = right_cut if right_shift else down_cut
def forward(self, x: Tensor) -> Tensor:
x = self.deconv(x)
return self.scaler(x, *self.kernel)
class Conv1x1(nn.Module):
def __init__(self, in_channel: int, out_channel: int) -> None:
super().__init__()
self.conv = weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size=1))
def forward(self, x: Tensor) -> Tensor:
return self.conv(x)
class GatedResNet(nn.Module):
def __init__(
self,
in_channel: int,
conv: Callable[[int, int], nn.Module],
nonlinearity: nn.Module = ConcatELU(),
aux_enlargement: int = 0,
) -> None:
super().__init__()
nl_enlargement = 2 if isinstance(nonlinearity, ConcatELU) else 1
self.conv1 = conv(in_channel * nl_enlargement, in_channel)
if aux_enlargement == 0:
self.skip_op = None
else:
self.skip_op = Conv1x1(nl_enlargement * aux_enlargement * in_channel, in_channel)
self.nonlinearity = nonlinearity
self.dropout = nn.Dropout2d(0.5)
self.conv2 = conv(nl_enlargement * in_channel, nl_enlargement * in_channel)
def forward(self, x_orig: Tensor, aux: Optional[Tensor] = None) -> Tensor:
x = self.conv1(self.nonlinearity(x_orig))
if aux is not None and self.skip_op is not None:
x += self.skip_op(self.nonlinearity(aux))
x = self.nonlinearity(x)
x = self.dropout(x)
x = self.conv2(x)
x1, x2 = torch.chunk(x, 2, dim=1)
c3 = x1 * torch.sigmoid(x2)
return x_orig + c3
| [
"yuji.kngw.80s.revive@gmail.com"
] | yuji.kngw.80s.revive@gmail.com |
329a20b4c7110842b2129cc25616775c00bf8168 | d22a2fbb9adb82644c5665242661bad172550552 | /venv/ex44.py | b5c74a3b546e9fe734219d94756137000ecbe908 | [] | no_license | felipemanfrin/Python-Zero-ao-Zeno | e98ba3e4b974e88801b8bc947f461b125bc665b8 | d6d08aa17071f77170bbd105452b0d05586131c8 | refs/heads/master | 2022-07-29T19:38:41.729178 | 2020-05-25T01:02:18 | 2020-05-25T01:02:18 | 265,356,280 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | preco = float(input('Insira o valor do produto a ser pago : '))
print('''Escolha o metodo de pagamento:
[1] A vista ou cheque
[2] a vista cartão
[3] 2x no cartao
[4] 3x ou mais no cartão''')
opcao = int(input('Digite a opção : '))
if opcao == 1 :
final = preco*0.90
print('O valor a se pagar nesses metodos é de {} '.format(final))
elif opcao == 2 :
final = preco*0.95
print('O valor a se pagar é de {}'.format(final))
elif opcao == 3 :
final = preco
print('o valor a se pagar é de {} '.format(final))
else :
final = preco *1.20
print('O valor a se pagar é de {} '.format(final))
| [
"felipemanfrin@gmail.com"
] | felipemanfrin@gmail.com |
b1ef8a76cff5ccdd0bd13d20890f8c2df9f25e16 | 92578e316b0d1b760db85c449898c1560433a4bb | /backend/notes/urls.py | 66f4181f4a81f45ec0f6d6b4f4f1882089f05ed7 | [] | no_license | turamant/dj_plus_vue | 461dc6b7165ab7ecdf8eb3206ca9047b6db6c920 | 2d7b5ef897b418e5269b7b01c1fd6207ec5be4b8 | refs/heads/main | 2023-03-29T15:43:49.311175 | 2021-04-12T11:43:36 | 2021-04-12T11:43:36 | 357,166,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py |
from rest_framework import routers
from .views import NoteViewSet
# Создаем router и регистрируем наш ViewSet
router = routers.DefaultRouter()
router.register('notes', NoteViewSet)
# URLs настраиваются автоматически роутером
urlpatterns = router.urls | [
"tur1amant@gmail.com"
] | tur1amant@gmail.com |
95bc2c171d57e18811811934b2b5b0cc902e8cc5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_137/544.py | d93a3ffd436e017136abd200f2f8988eb7da45e9 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | import cPickle as pickle
def main():
d = pickle.load(open('c.pickle', 'rb'))
num_of_tests = int(raw_input())
for test_i in range(num_of_tests):
n, m, k = map(int, raw_input().split())
ans = d['%s-%s-%s' % (n, m, k)]
if k == n * m - 1:
ans = 'c' + '*' * (m - 1) + '\n'
for i in range(n - 1):
ans += '*' * m + '\n'
print "Case #%d:" % (test_i + 1)
if ans[-1] == '\n':
ans = ans[:-1]
print ans
if __name__ == "__main__":
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
61ef2940a1d29bb6e311bce2f96ea1678f79606b | f11ecb59dab63af605c6e5f256ee59e00447ecc1 | /763-partition-labels.py | f873de4ea39b18121ba64673b1decc40b496d1ef | [] | no_license | floydchenchen/leetcode | 626d55f72ec914764385ce82b0f3c57f5a7e9de8 | 9d9e0c08992ef7dbd9ac517821faa9de17f49b0e | refs/heads/master | 2022-10-07T20:33:55.728141 | 2020-06-08T16:09:17 | 2020-06-08T16:09:17 | 269,525,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | # 763. Partition Labels
# A string S of lowercase letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part,
# and return a list of integers representing the size of these parts.
# Example 1:
# Input: S = "ababcbacadefegdehijhklij"
# Output: [9,7,8]
# Explanation:
# The partition is "ababcbaca", "defegde", "hijhklij".
# This is a partition so that each letter appears in at most one part.
# A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
from collections import defaultdict
class Solution:
# sliding window
def partitionLabels(self, S: str) -> List[int]:
# a map to store a char's last occurring location
pos = defaultdict(int)
for i, char in enumerate(S):
pos[char] = i
partition = []
l, r = 0, 0
for i, char in enumerate(S):
# update the right index
r = max(r, pos[char])
if i == r:
partition.append(r - l + 1)
l = r + 1
return partition
| [
"chen2918@umn.edu"
] | chen2918@umn.edu |
207c5d7f1c7e8039b7aad55a5d63284589af7e80 | 8ee5dfd87ce637a46c496853f55d32f226b238f8 | /backend/Experiments/Data/PosControl/Plotter.py | 06270322e2630227c68dd802b20334c0aec05b2d | [] | no_license | cholazzzb/react-parrotar2-swarm | 71eb6be8682e00015103af3df69a6cc01f7a919f | dccdfa841184af6ec62910f50c3335b812cd0201 | refs/heads/main | 2023-06-16T01:24:57.169242 | 2021-07-08T03:54:08 | 2021-07-08T03:54:08 | 354,490,913 | 0 | 0 | null | 2021-07-08T03:54:08 | 2021-04-04T08:15:37 | JavaScript | UTF-8 | Python | false | false | 1,245 | py | import matplotlib.pyplot as plt
# from tum_PSO import tum_PSO
# from custom_PSO import custom_PSO
from custom_PSO_z import custom_PSO_z
from custom_PSO_z_lama import custom_PSO_z_lama
from fine_tuning import fine_tuning
from fine_tuning_disturbance import fine_tuning_disturbance
time = fine_tuning_disturbance["time"]
xPos = fine_tuning_disturbance["xPos"]
yPos = fine_tuning_disturbance["yPos"]
zPos = fine_tuning_disturbance["zPos"]
# range(170) for POS AND TIME
# for i in range(350):
# time.pop()
# xPos.pop()
# yPos.pop()
# zPos.pop()
xPosTarget = []
yPosTarget = []
for data in yPos:
xPosTarget.append(1.95)
yPosTarget.append(1.27)
# ----- POS AND TIME -----
plt.plot(time, xPos, label="Marvelmind Koordinat X")
plt.plot(time, xPosTarget, label="Setpoint X")
plt.plot(time, yPos, label="Marvelmind Koordinat Y")
plt.plot(time, yPosTarget, label="Setpoint Y")
plt.title("Kontrol Posisi")
plt.xlabel('Waktu (detik)')
plt.ylabel('Koordinat (meter)')
# plt.ylim(0.5, 2.5)
plt.legend(loc="lower right")
# ----- MAP -----
# plt.scatter(xPos, yPos)
# plt.title("Posisi X dan Y")
# plt.xlabel('Koordinat x (meter)')
# plt.ylabel('Koordinat y (meter)')
# plt.ylim(0.85, 2.163)
plt.show()
# Set point: X =1.95, Y = 1.27 | [
"nicsphehehe@gmail.com"
] | nicsphehehe@gmail.com |
3257ffae5f57a16cff15a802d965f1ae58e0f0e7 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/contrib/graph_editor/subgraph.py | caf690f68dcdece5c40a3526673d99e70d7d5a26 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a739031b57fa33ef7882c98b7eefeea5d588318fc9ef255db3f17b609ce8653c
size 25666
| [
"github@cuba12345"
] | github@cuba12345 |
e04235d8de2b8dbad77d7369cee71f1946da3910 | eabc8c12802053683ab0b42d95135c0b777945a1 | /rooms/room-h/main.py | 09fffc0f9de7cd8371014ddd2af950e79eb386fa | [] | no_license | Hacker0x01/44con-ctf-2019 | e569fb3722520411f9928c9b0d5d21d91c2ee8ca | 4a86c14d488dd1515615d702942172fa5e7c5fc2 | refs/heads/master | 2023-08-08T19:34:11.081687 | 2019-09-13T22:22:30 | 2019-09-13T22:22:30 | 208,354,485 | 18 | 7 | null | 2023-07-22T16:09:35 | 2019-09-13T22:12:05 | Python | UTF-8 | Python | false | false | 1,960 | py | from flask import Flask, request
import hashlib, json, os, sys
from socket import *
import sqlite3
def query(sql, commit=False):
c = conn.cursor()
c.execute(sql.replace('%', '%%'))
if commit:
conn.commit()
else:
return c.fetchall()
def setup():
global conn
conn = sqlite3.connect(':memory:')
def sha1(data):
return hashlib.sha1(data).hexdigest()
conn.create_function('sha1', 1, sha1)
query('''
CREATE TABLE users (username text, password text)
''', commit=True)
query('''
INSERT INTO users (username, password) VALUES ('eldon', sha1('chess'))
''', commit=True)
query('''
CREATE TABLE flag (value text)
''', commit=True)
query('''
INSERT INTO flag (value) VALUES ('I had in mind 5ome7hing.a.little m0re~radical.')
''', commit=True)
app = Flask(__name__)
home = '''
<!doctype html>
<html>
<body>
<form action="/login" method="POST">
USERNAME: <input type="text" name="username"><br>
PASSWORD: <input type="password" name="password"><br>
<input type="submit" value="LOG IN">
</form>
</body>
</html>
'''
login = '''
<!doctype html>
<html>
<body>
<b>%s</b>
</body>
</html>
'''
@app.route('/')
def hello():
return home
@app.route('/login', methods=['POST'])
def login():
try:
username, password = request.form['username'], request.form['password']
data = query('SELECT username FROM users WHERE username=\'%s\' AND password=sha1(\'%s\')' % (
username.replace('\\', '\\\\').replace('\'', '\\\''),
password
))
if len(data) == 0:
return '<b>INVALID CREDENTIALS</b>'
else:
return '<b>INSUFFICIENT ACCESS FOR USER %s</b>' % data[0][0]
except:
return '<b>ERROR</b>'
if __name__ == "__main__":
if os.fork() > 0:
while True:
try:
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 80))
break
except:
pass
sys.exit(0)
os.setsid()
os.umask(0)
if os.fork() > 0:
sys.exit(0)
setup()
app.run(host='127.0.0.1', port=80, threaded=False, processes=1)
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
ac5e879ff023cf1256f3cf20aecf9aed14119993 | 3df1bdc21727f40ef0ee296bf137bf3190ad00ec | /astrodynamics/Rocket_Seminar_Series/projectile_newtonian_gravity.py | 5b9b13fb403c9a761d3080689f4c4aae63aefae2 | [] | no_license | tin97-bb/Python | c2b30829fd4e14c50963dc4074e87ef06869cebe | 3cedc7d5bef2af46b6618026cd7a41e61700ce62 | refs/heads/master | 2023-07-29T07:06:15.116923 | 2021-09-13T17:04:48 | 2021-09-13T17:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 21:56:24 2021
@author: carlos
"""
####Import all the modules we need
import numpy as np ###numeric python
import matplotlib.pyplot as plt ###matlab style plotting
import scipy.integrate as sci ##integration toolbox
plt.close("all")
##DEFINE SOME CONSTANT PARAMETERS
G = 6.6742*10**-11; #%%Gravitational constant (SI Unit)
###PLANET
###EARTH
Rplanet = 6357000.0 #meters
mplanet = 5.972e24 #kg
###KERBIN
#Rplanet = 600000 #meters
#mplanet = 5.2915158*10**22 #
##ROCKET
mass = 640.0/1000.0 ##kg
##Gravitational Acceleration Model
def gravity(z):
global Rplanet,mplanet
r = np.sqrt(z**2)
if r < Rplanet:
accel = 0.0
else:
accel = G*mplanet/(r**3)*r
return accel
###Equations of Motion
###F = m*a = m*zddot
## z is the altitude of the surface
## this is in meter
## zdot is the velocity
## zddot is the acceleration
###Second Order Differential Equation
def Derivatives(state,t):
###Globals
global mass
#state vector
z = state[0]
velz = state[1]
#Compute zdot - Kinematic Relationship
zdot = velz
###Compute the Total Forces
###GRavity
gravityF = -gravity(z)*mass
###Aerodynamics
aeroF = 0.0
###Thrust
thrustF = 0.0
Forces = gravityF + aeroF + thrustF
#Compute Acceleration
zddot = Forces/mass
#Compute the statedot
statedot = np.asarray([zdot,zddot])
return statedot
###########EVERYTHING BELOW HERE IS THE MAIN SCRIPT###
###Test Surface Gravity
print('Surface Gravity (m/s^2) = ',gravity(Rplanet))
###Initial Conditions
z0 = Rplanet ##m
velz0 = 25*331.0 #m/s
stateinitial = np.asarray([z0,velz0])
##Time window
tout = np.linspace(0,345,1000)
###Numerical Integration Call
stateout = sci.odeint(Derivatives,stateinitial,tout)
###REname variables
zout = stateout[:,0]
altitude = zout - Rplanet
velzout = stateout[:,1]
###Plot
###ALTITUDE
plt.plot(tout,altitude)
plt.xlabel('Time (sec)')
plt.ylabel('Altitude (m)')
plt.grid()
###VELOCITY
plt.figure()
plt.plot(tout,velzout)
plt.xlabel('Time (sec)')
plt.ylabel('Normal Speed (m/s)')
plt.grid()
| [
"cmontalvo@southalabama.edu"
] | cmontalvo@southalabama.edu |
22b4ec30dc144453b9f66f4de4f090bee518ffaf | ecf89d17601ee16ef176a9fc0ce497e4e685cc21 | /python/112.py | f74559351f115ee03b2fd2f5a3f7192f843447a5 | [] | no_license | kartikeya-shandilya/project-euler | daa984cda1a476a6f29b80d4b86ca03f9292d910 | 6265db7c5a8fedc3ded627829ce6040e8c8542d4 | refs/heads/master | 2021-01-21T04:27:36.649729 | 2020-10-13T00:25:52 | 2020-10-13T00:25:52 | 26,404,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/python
cnt=0
for i in range(1,2500000):
j=str(i)
tag1="incr"
for k in range(0,len(j)-1):
if j[k]>j[k+1]:
tag1="bouncy"
break
tag2="decr"
for k in range(0,len(j)-1):
if j[k]<j[k+1]:
tag2="bouncy"
break
if tag1=="bouncy" and tag2=="bouncy":
cnt+=1
# print i,tag1,tag2
if cnt/(1.0*i)>0.99:
print "yes",i,cnt
break
#print "no",i,cnt
| [
"kartikeya.shandilya@gmail.com"
] | kartikeya.shandilya@gmail.com |
4d9725de9f5d6415c6a02d66311fb489c305c554 | a81a1efe1a93d5af0ef3f6403862a1544befd6cf | /HashTable/387_FirstUniqueCharacterInAString.py | b2a92fa927e8f971aa0b15b6b3070f9234e20fc0 | [] | no_license | fishleongxhh/LeetCode | 89da4ae3ca1715b1909c350437c0ba79eb2a8349 | d0352fecc61396fc460e1350572189b175a13f61 | refs/heads/master | 2020-04-05T17:14:27.976946 | 2018-12-16T14:10:54 | 2018-12-16T14:10:54 | 157,050,997 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # -*- coding: utf-8 -*-
# Author: Xu Hanhui
# 此程序用来求解LeetCode387: First Unique Character in a String问题
def firstUniqChar(s):
dic = {}
uniq_str = set()
for loc, item in enumerate(s):
if item in dic:
uniq_str.discard(item)
else:
dic[item] = loc
uniq_str.add(item)
res = [dic[item] for item in uniq_str]
if res:
return min(res)
return -1
if __name__ == "__main__":
s = 'huhuihui'
print(s)
print(firstUniqChar(s))
| [
"xhh1120132805@163.com"
] | xhh1120132805@163.com |
148ee2aa423bec48565132b2a7ea8db0853d712d | be6b8e5f65ab1e86e72d4a70a6bcfe0891458df1 | /sigfig.py | 3bdf3596edba9f1336625991197baeb64b8616c7 | [] | no_license | annayqho/papers | c406185485ef447b4aebf0d37011e79ef4f872fb | 527bab334f118e77eeb028ed1a787be3b887724c | refs/heads/master | 2021-07-10T05:18:47.134685 | 2017-10-12T02:38:32 | 2017-10-12T02:38:32 | 100,283,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import numpy as np
from math import log10, floor
def round_sig(x, sig=2):
if x == 0:
return 0
elif x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
def ndec(num):
dec = str(num).split('.')[-1]
return len(dec)
def format_val(val, sig):
valrd = round_sig(val, 2)
sigrd = np.round(sig, ndec(valrd))
val_str = str(valrd) + "$\pm$" + str(sigrd)
if val < 0:
val_str = ""
return val_str
| [
"annayqho@gmail.com"
] | annayqho@gmail.com |
96a9a0075629c44cfce0d3f9471369f36b35ffd9 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/pair/_version.py | 6203753e169cead8711dc820254d774becccc3be | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 271 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.pair', 14, 0, 0)
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
232499e5789d32aef85d5d3d8bef8407bdaa9cb7 | ae672becf06e084728388e2ca1fb72ca786336d2 | /chapter_08/exercise_8_4.py | 5c3ca3fe1e715841932ad35f802661267a72ad24 | [] | no_license | kexiaojiu/python_based_programming | d9631ba3aa0636d9b01020a7711834ba15d4843c | bd497479037856de6ef5852902e3352afb5c7cc9 | refs/heads/master | 2018-10-04T22:37:11.908957 | 2018-08-07T13:54:44 | 2018-08-07T13:54:44 | 116,146,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python3
def make_shirt(size, text='I love Python'):
print("The size of the shirt is " + str(size) + " ,and it's text is " +
text + "." )
make_shirt('big')
make_shirt('middle')
make_shirt('small', 'I love China')
| [
"kexiaojiu@163.com"
] | kexiaojiu@163.com |
7cf38f77e6652d444241e8aa1a1c3c5a15945497 | 1872b89ba17a08db60d58551f073a6b4e0d31a50 | /instrument_server/commands/command_parser.py | ea64dcc7971e724d84b863439d90b5abc4ba6f33 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Terrabits/instrument-server | f6e4c40d8e9835ada150dc0a8245024505d0a7d7 | a4950099410ac81b37516aeefcc6072d9865ba4e | refs/heads/master | 2022-05-28T01:27:25.051729 | 2022-04-29T22:53:39 | 2022-04-29T22:53:39 | 189,175,077 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from .mixins import RaiseErrorMixin
import re
class CommandParser(RaiseErrorMixin):
def __init__(self, command, args={}):
RaiseErrorMixin.__init__(self)
self.command = command
self.args = args
def is_match(self, command_bytes):
regex = f'^{self.command}\\s*'.replace('?', r'\?').encode()
matches = re.match(regex, command_bytes)
return matches is not None
def parse_args(self, command_bytes):
values = command_bytes.strip().split()[1:]
if len(values) < len(self.args):
self.raise_error('too few arguments')
if len(values) > len(self.args):
self.raise_error('too many arguments')
args = {}
for name, type, value in zip(self.args.keys(), self.args.values(), values):
if type:
try: # type(arg), catch Exception
typed_value = type(value)
except Exception:
self.raise_error(f"'{value}' could not be converted to {type}")
args[name] = typed_value
else:
# decode bytes to str
args[name] = value.decode()
return args
| [
"nick.lalic@gmail.com"
] | nick.lalic@gmail.com |
9d06c4edefa4ba7412c5c85dd29d8babd72b8034 | d3750f32f8bc8a961de778f313a547e8636621e3 | /docs/conf.py | 02524b2634ac9ea5185b817381ebece9804fe68f | [
"BSD-3-Clause"
] | permissive | diamond0411/ndexncipidloader | 506e26b9ea8c716fb08dcaf72b138e557ce5b227 | cf1519bd7e9ada30e00df56011180a9069e4e967 | refs/heads/master | 2020-06-13T19:09:19.466283 | 2019-07-16T16:30:23 | 2019-07-16T16:30:23 | 194,761,167 | 1 | 1 | NOASSERTION | 2019-07-02T00:40:05 | 2019-07-02T00:40:05 | null | UTF-8 | Python | false | false | 4,970 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ndexncipidloader documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import ndexncipidloader
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NDEx NCI-PID content loader'
copyright = u"2019, Chris Churas"
author = u"Chris Churas"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ndexncipidloader.__version__
# The full version, including alpha/beta/rc tags.
release = ndexncipidloader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ndexncipidloaderdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ndexncipidloader.tex',
u'NDEx NCI-PID content loader Documentation',
u'Chris Churas', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ndexncipidloader',
u'NDEx NCI-PID content loader Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ndexncipidloader',
u'NDEx NCI-PID content loader Documentation',
author,
'ndexncipidloader',
'One line description of project.',
'Miscellaneous'),
]
| [
"churas.camera@gmail.com"
] | churas.camera@gmail.com |
f12bc2202d1d785c55b97d61b4bdb753af43f43f | 1e84a9fec36deaf9a55a2734749ea035f72ac869 | /KAKAO BLIND RECRUITMENT/2018/1차/오픈채팅방/다시풀기.py | d45e1623d4e94692ba8b281ad8cadaaccec56d79 | [] | no_license | mgh3326/programmers_algorithm | aa3afc91231550e1fec2d72d90e85b140f79d677 | b62f08ccccbdcac71e484d508985a5a9ce5f2434 | refs/heads/master | 2022-08-31T04:19:15.728666 | 2022-07-31T14:02:26 | 2022-07-31T14:02:26 | 201,747,526 | 0 | 0 | null | 2022-07-23T10:19:13 | 2019-08-11T10:02:15 | Python | UTF-8 | Python | false | false | 965 | py | def solution(record):
answer = []
user_name_dict = {}
saved_list = []
for record_value in record:
split = record_value.split()
if split[0] == "Enter":
user_name_dict[split[1]] = split[2]
saved_list.append([split[1], split[0]])
elif split[0] == "Leave":
saved_list.append([split[1], split[0]])
elif split[0] == "Change":
user_name_dict[split[1]] = split[2]
for saved in saved_list:
out_str = ""
user_id, enter_or_leave = saved
user_name = user_name_dict[user_id]
out_str += user_name
if enter_or_leave == "Enter":
out_str += "님이 들어왔습니다."
else:
out_str += "님이 나갔습니다."
answer.append(out_str)
return answer
print(
solution(
["Enter uid1234 Muzi", "Enter uid4567 Prodo", "Leave uid1234", "Enter uid1234 Prodo", "Change uid4567 Ryan"]
)
)
| [
"mgh3326@naver.com"
] | mgh3326@naver.com |
4985819f4fdae6c3a772431253a034ccb65fd50d | e7be538e812d499fd41e483313c486581ac8995c | /scripts/curate_cerebra_labels.py | 2674e048eeed30c7b6f930f5f872efb99271aefc | [
"LicenseRef-scancode-other-permissive"
] | permissive | templateflow/tpl-MNI152NLin2009cSym | 7f11bbd4cdc365c7fc1e6158a6df9c3ee4d52b1e | 69a5e68d2b276b1e46f701892ac630397f56a741 | refs/heads/master | 2022-01-24T16:36:35.462848 | 2022-01-04T19:34:59 | 2022-01-04T19:34:59 | 253,858,504 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | import pandas as pd
data = pd.read_csv('CerebrA_LabelDetails.csv')
right = data.copy()
left = data.copy()
# Add hemisphere column
right['hemi'] = 'R'
left['hemi'] = 'L'
# Reassign headers, drop opposite hemi column
right = right.rename(columns={'Mindboggle ID': 'name', 'Label Name': 'label', 'RH Label': 'drop', 'LH LabelsNotes': 'notes', 'Dice Kappa': 'dice/kappa'})
left = left.rename(columns={'Mindboggle ID': 'name', 'Label Name': 'drop', 'RH Label': 'label', 'LH LabelsNotes': 'notes', 'Dice Kappa': 'dice/kappa'})
right = right.drop(columns=['drop'])
left = left.drop(columns=['drop'])
# Drop index
left.index.name = 'mindboggle mapping'
right.index.name = 'mindboggle mapping'
left = left.reset_index()
right = right.reset_index()
# Merge L/R tables
curated = pd.concat((right, left)).sort_values(by=['mindboggle mapping', 'hemi'])
curated[['label', 'name', 'hemi', 'mindboggle mapping', 'dice/kappa', 'notes']].to_csv('tpl-MNI152NLin2009cSym_atlas-CerebA_dseg.tsv', sep='\t', na_rep='n/a', header=True, index=False)
| [
"code@oscaresteban.es"
] | code@oscaresteban.es |
aada5ffab5800dd2c9361210ed80a88ae3ae8493 | 85e08aa6dcc83ecd33512ba453634b4eb8909638 | /tools/new-file.py | f75d2681fdf30bd02aa32477c92e635046159e55 | [
"MIT"
] | permissive | full-stack-hero/snippet | f6f3f1c6e0a95398a4cfe088821b186512d5940e | 9600c856c171d1296a151b4d654af0808980f939 | refs/heads/master | 2020-04-25T15:39:57.786782 | 2019-03-05T14:19:01 | 2019-03-05T14:19:01 | 172,886,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import os
import datetime
title = input('Title: ')
filename = datetime.datetime.now().strftime("%Y%m%d%H%M-") + title + '.py'
url = f'https://github.com/full-stack-hero/snippet/blob/master/snippet/snippets/{filename}'
print('Create new file', filename)
with open(f'snippets/{filename}', 'w') as f:
f.write(f'# :autor: @full.stack.hero\n')
f.write(f'# :url: {url}\n\n')
| [
"axel.juraske@short-report.de"
] | axel.juraske@short-report.de |
7baad3a25f85335f80301e2a2cf89fbb9dbe4349 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayEcoCityserviceCityserviceEnergySendModel.py | f8a6a8917786fd6a4f675b9c04236c7cbd3cb520 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,425 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EnergyExtRequest import EnergyExtRequest
class AlipayEcoCityserviceCityserviceEnergySendModel(object):
def __init__(self):
self._ext_info = None
self._outer_no = None
self._scene = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, list):
self._ext_info = list()
for i in value:
if isinstance(i, EnergyExtRequest):
self._ext_info.append(i)
else:
self._ext_info.append(EnergyExtRequest.from_alipay_dict(i))
@property
def outer_no(self):
return self._outer_no
@outer_no.setter
def outer_no(self, value):
self._outer_no = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if isinstance(self.ext_info, list):
for i in range(0, len(self.ext_info)):
element = self.ext_info[i]
if hasattr(element, 'to_alipay_dict'):
self.ext_info[i] = element.to_alipay_dict()
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.outer_no:
if hasattr(self.outer_no, 'to_alipay_dict'):
params['outer_no'] = self.outer_no.to_alipay_dict()
else:
params['outer_no'] = self.outer_no
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoCityserviceCityserviceEnergySendModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'outer_no' in d:
o.outer_no = d['outer_no']
if 'scene' in d:
o.scene = d['scene']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
b2dba4a290d39607a7b51c5620c166530d270fad | 22b348a0d10519cb1f1da5e886fdf2d3c167cf5a | /myweb/test/coroutine_/client.py | ce807f0eede3f72de22276beaab46a1d5b077f7d | [] | no_license | liuluyang/openstack_mogan_study | dab0a8f918ffd17e0a747715998e81304672b75b | 8624f765da7f5aa0c210f0fa945fc50cf8a67b9e | refs/heads/master | 2021-01-19T17:03:15.370323 | 2018-04-12T09:50:38 | 2018-04-12T09:50:38 | 101,040,396 | 1 | 1 | null | 2017-11-01T02:17:31 | 2017-08-22T08:30:22 | Python | UTF-8 | Python | false | false | 336 | py | # -*- coding:utf-8 -*-
from socket import *
ADDR, PORT = 'localhost', 8001
client = socket(AF_INET,SOCK_STREAM)
client.connect((ADDR, PORT))
while 1:
cmd = raw_input('>>:').strip()
if len(cmd) == 0: continue
client.send(cmd)
data = client.recv(1024)
print data
#print('Received', repr(data))
#client.close() | [
"1120773382@qq.com"
] | 1120773382@qq.com |
dc42d8e89404cf9af6ce1eb141a5c7628715b53b | 8195f2c3a3b46a3b01571bcbc33960290fce3f32 | /biz/errors.py | 6511688a69bf2bd05d4f59a6e320e8e807cc7c69 | [] | no_license | adoggie/camel | e391e5544a1602ab43257b255dd6558bcc0ee3b1 | 24b4e9b397ca68b8d4d21be7c372b8163a6ca678 | refs/heads/master | 2021-01-18T20:27:55.172148 | 2017-05-22T14:19:57 | 2017-05-22T14:19:57 | 86,969,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,050 | py | #coding:utf-8
from camel.fundamental.errors import hash_object,ErrorEntry
class ErrorDefs:
__ver__ =u'1.0'
__BASE__ = 0
SUCC = ErrorEntry(0, u'成功')
class ErrorDefsDispatcher:
__ver__ = u'1.0'
__BASE__ = 10000
OK = ErrorEntry(1000, u'success成功')
DB_ERROR = ErrorEntry(1001, u'数据库错误')
LOGIN_FAIL = ErrorEntry(1002, u'登录失败')
REPEAT_TIC = ErrorEntry(1003, u'')
SERVER_ERR = ErrorEntry(1004, u'服务器错误')
REFRESH = ErrorEntry(1005, u'')
TRANS_OK = ErrorEntry(1006, u'')
TRUCK_IS_ONWAY = ErrorEntry(1007, u'车辆正在运输中')
UNKNOW_DEVICE = ErrorEntry(1008, u'未知设备,仅支持web登录')
AUTH_FAIL = ErrorEntry(1100, u'鉴权失败')
NO_TOKEN = ErrorEntry(1101, u'没有TOKEN')
NO_USER = ErrorEntry(1102, u'没有用户')
TOKEN_INVALID = ErrorEntry(1103, u'token已失效')
NO_VOUCHER = ErrorEntry(1104, u'票据获取失败')
USER_NOT_ACTIVE = ErrorEntry(1201, u'用户未激活')
RESET_PWD_FAIL = ErrorEntry(1202, u'重置密码错误')
USER_UP_ERR = ErrorEntry(1203, u'用户更新错误')
USER_NO_AUTHORIZATION = ErrorEntry(1204, u'用户无权限')
NO_DRIVER = ErrorEntry(1300, u'司机用户不存在')
NO_GROUP_STATUS = ErrorEntry(1301, u'不存在此状态组')
NO_TNUMBER = ErrorEntry(1302, u'无此订单')
INVALID_STATUS = ErrorEntry(1303, u'运单状态无效')
VALUE_ERROR = ErrorEntry(1304, u'参数值错误')
TRANS_DATA_CHANGED = ErrorEntry(1305, u'运单状态被改变')
TRANS_STATUS_CHANGE_FAIL = ErrorEntry(1306, u'运单状态改变失败')
LOCATION_NOT_IN_TRANS = ErrorEntry(1307, u'该地点不存在运单中')
TRANS_FINISHED = ErrorEntry(1308, u'运单已完成,异常上报失败')
TRANS_IS_EXIST = ErrorEntry(1309, u'运单号已存在')
PLATE_NO_TRANS = ErrorEntry(1310, u'此车牌无正在进行中的运单')
NO_LOCATION = ErrorEntry(1401, u'没有此location编码')
NOTE_EXIST = ErrorEntry(1402, u'重复添加note')
RECORD_EXIST = ErrorEntry(1403, u'重复补录操作')
TRANS_STATUS_ERROR = ErrorEntry(1404, u'运单状态错误')
DRIVER_QR_RELA_FAILED = ErrorEntry(1501, u'车牌已有司机绑定')
DRIVER_QR_CODE_INVALID = ErrorEntry(1502, u'无效的司机二维码')
DRIVER_QR_CODE_EXPIRED = ErrorEntry(1503, u'司机二维码已过期')
NO_TRUCK = ErrorEntry(1601, u'车辆不存在')
NO_QR_RS = ErrorEntry(1701, u'车辆未绑定司机')
NO_LINE = ErrorEntry(1702, u'线路不存在')
DRIVER_HAS_BOUND_PLATE = ErrorEntry(1703, u'司机已绑定车牌')
NO_CQ = ErrorEntry(1704, u'未获取到车签号')
CQ_IS_EXIST = ErrorEntry(1801, u'车签已存在')
PLATE_NO_SAME = ErrorEntry(1802, u'建立关联关系的两个运单车牌不一致')
TRANS_HAVE_LINKED = ErrorEntry(1803, u'运单已经被关联')
TIME_MATCH_ERROR = ErrorEntry(1804, u'客户端时间与服务器时间不匹配')
class ErrorDefsDriver:
__ver__ =u'1.0'
__BASE__ = 20000
OK = ErrorEntry( 1000 ,u'success成功')
DB_ERROR = ErrorEntry( 1001 ,u'服务器打了个盹')
SERVER_ERR = ErrorEntry( 1004 ,u'服务器开小差啦')
REFRESH = ErrorEntry( 1005 ,u'刷新回调')
NO_PERMIT = ErrorEntry( 1008 ,u'未获取到运单信息')
AUTH_FAIL = ErrorEntry( 1100 ,u'密码输入错误,请重新输入')
TOKEN_INVALID = ErrorEntry( 1101 ,u'您的帐号登录已过期失效,请重新登录')
NO_USER = TOKEN_INVALID
NO_DRIVER = TOKEN_INVALID
NO_USER_EXIST = ErrorEntry( 1102 ,u'该手机号未注册')
NO_DRIVER_EXIST = NO_USER_EXIST
USER_OUT = ErrorEntry( 1103 ,u'您的帐号已在其他手机登录')
USER_EXIST = ErrorEntry( 1104 ,u'该手机号已被注册')
REGISTER_ERR = ErrorEntry( 1105 ,u'网络连接失败,请检查网络')
NOT_DRIVER = ErrorEntry( 1106 ,u'请使用司机端APP注册')
PASSWD_ERR = ErrorEntry( 1107 ,u'原密码输入错误,请重新输入')
USER_NOT_ACTIVE = ErrorEntry( 1201 ,u'请修改初始密码')
NO_TNUMBER = ErrorEntry( 1302 ,u'运单不存在')
SMS_EXPIRE = ErrorEntry( 1303 ,u'验证码已过期, 请重新获取')
PARAMS_ERROR = ErrorEntry( 1304 ,u'参数类型错误')
SMS_ERROR = ErrorEntry( 1305 ,u'验证码错误,请重新输入')
SMS_SENDED = ErrorEntry( 1306 ,u'验证码已发送,请稍后再试')
TRANS_FINISHED = ErrorEntry( 1308 ,u'运单已完成,无法进行异常上报')
NO_LOCATION = ErrorEntry( 1401 ,u'没有此location编码')
DRIVER_QR_RELA_FAILED = ErrorEntry( 1501 ,u'绑定失败')
NO_TRUCK = ErrorEntry( 1601 ,u'未找到对应的车辆信息')
NO_QR_RS = ErrorEntry( 1701 ,u'未绑定车辆')
EXCEPTION_EXIST = ErrorEntry(1805, u'重复上报异常')
class ErrorDefsCarrier:
__ver__ =u'1.0'
__BASE__ = 30000
OK = ErrorEntry(1800,u'success')
SERVER_ERR = ErrorEntry(1801,u'server err!')
LOGIN_FAIL = ErrorEntry(1817,u'login fail!')
NOT_ALLOW = ErrorEntry(1803,u'not allow!')
COMMITED = ErrorEntry(1804,u'commited')
REGISTERED = ErrorEntry(1805,u'registered')
NO_USER = ErrorEntry(1806,u'no user')
METHOD_ERR = ErrorEntry(1807,u'method err!')
NO_DATA = ErrorEntry(1808,u'no data')
TEMP_TOKEN = ErrorEntry(1809,u'tmp token')
PASSWD_EXPIRE = ErrorEntry(1810,u'token expire')
DB_ERROR = ErrorEntry(1811,u'db err')
CHECKED = ErrorEntry(1812,u'已审核')
ADMIN_USER = ErrorEntry(1813,u'admin user')
NO_TOKEN = ErrorEntry(1814,u'NO TOKEN')
PASSWD_ERR = ErrorEntry(1816,u'passwd error!')
TOKEN_EXPIRE = ErrorEntry(1802,u'token expire!')
PARAMS_ERR = ErrorEntry(1818,u'params_err!')
NO_SHIPPER = ErrorEntry(1819,u'no shipper')
NO_MATCH_DATA = ErrorEntry(1820,u'no match data')
SHIPPER_NO_COMMIT = ErrorEntry(1821,u'shpper have no committed')
TRUCK_EXISTS = ErrorEntry(1822,u'truck exists')
errordefs = (ErrorDefsDispatcher,ErrorDefsCarrier,ErrorDefsDriver)
def reIndex():
for defs in errordefs:
kvs = hash_object( defs)
for k,v in kvs.items():
v.value+= defs.__BASE__
print defs,':',k,'=',v.value,v.comment
| [
"24509826@qq.com"
] | 24509826@qq.com |
4b63255fa149486950f043c7f04558b67ca41f7f | 0c53c0a5dcd5b4a6e237fb034a9e9f544fdc7d20 | /pdkb/planner.py | e1b0c18f8f7b96f634343efd4bab04a60aa3ffcc | [
"MIT"
] | permissive | javiermtorres/pdkb-planning | 4b379776ba42ac907c246d21a93e186c54926005 | 61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d | refs/heads/master | 2022-11-08T12:14:06.781734 | 2020-06-23T01:40:27 | 2020-06-23T01:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py |
import os, sys, time, pickle
from .actions import *
from .problems import *
def cleanup():
os.system('rm -f pdkb-domain.pddl')
os.system('rm -f pdkb-problem.pddl')
os.system('rm -f pdkb-plan.txt')
os.system('rm -f pdkb-plan.out')
os.system('rm -f pdkb-plan.out.err')
os.system('rm -f execution.details')
def solve(pdkbddl_file, old_planner=False):
print()
if not os.path.isdir('.problem-cache'):
os.mkdir('.problem-cache')
t_start = time.time()
print("Parsing problem...", end=' ')
sys.stdout.flush()
problem = parse_pdkbddl(pdkbddl_file)
print("done!")
print("Preprocessing problem...", end=' ')
sys.stdout.flush()
prob_hash = hash(pickle.dumps(problem))
fname = ".problem-cache/%s" % str(prob_hash)
if os.path.isfile(fname) and not os.path.isfile('.nocache'):
problem = pickle.load(open(fname, 'r'))
print("done! (from cache)")
else:
problem.preprocess()
with open(fname, 'wb') as f:
pickle.dump(problem, f, 2)
print("done!")
print("Solving problem...", end=' ')
sys.stdout.flush()
problem.solve(old_planner)
print("done!")
print("\nTime: %f s" % (time.time() - t_start))
problem.output_solution()
print()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("\nUsage: python planner.py <pdkbddl file> [--keep-files] [--old-planner]\n")
sys.exit(1)
solve(sys.argv[1], old_planner=('--old-planner' in sys.argv))
if len(sys.argv) < 3 or '--keep-files' != sys.argv[2]:
cleanup()
| [
"christian.muise@gmail.com"
] | christian.muise@gmail.com |
bf0ab1c5d71cbb173fe840a0b6c59b8c19cfc5e2 | ad0e853db635edc578d58891b90f8e45a72a724f | /doc/source/data/doc_code/batch_formats.py | 2099f70e9bb80050091134df790645f849ed25d4 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | ericl/ray | 8c93fc713af3b753215d4fe6221278700936e2db | e9a1c6d814fb1a81033809f56695030d651388f5 | refs/heads/master | 2023-08-31T11:53:23.584855 | 2023-06-07T21:04:28 | 2023-06-07T21:04:28 | 91,077,004 | 2 | 4 | Apache-2.0 | 2023-01-11T17:19:10 | 2017-05-12T09:51:04 | Python | UTF-8 | Python | false | false | 2,059 | py | # flake8: noqa
# isort: skip_file
# fmt: off
# __simple_map_function_start__
import ray
ds = ray.data.read_csv("example://iris.csv")
def map_function(data):
return data[data["sepal.length"] < 5]
batch = ds.take_batch(10, batch_format="pandas")
mapped_batch = map_function(batch)
transformed = ds.map_batches(map_function, batch_format="pandas", batch_size=10)
# __simple_map_function_end__
# __simple_pandas_start__
import ray
import pandas as pd
ds = ray.data.read_csv("example://iris.csv")
ds.show(1)
# -> {'sepal.length': 5.1, ..., 'petal.width': 0.2, 'variety': 'Setosa'}
def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame:
df_batch = df_batch[df_batch["variety"] == "Versicolor"]
df_batch.loc[:, "normalized.sepal.length"] = df_batch["sepal.length"] / df_batch["sepal.length"].max()
df_batch = df_batch.drop(columns=["sepal.length"])
return df_batch
ds.map_batches(transform_pandas, batch_format="pandas").show(1)
# -> {..., 'variety': 'Versicolor', 'normalized.sepal.length': 1.0}
# __simple_pandas_end__
# __simple_numpy_start__
from typing import Dict
import ray
import numpy as np
from typing import Dict
ds = ray.data.range_tensor(1000, shape=(2, 2))
def transform_numpy(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
arr["data"] = arr["data"] * 2
return arr
# test map function on a batch
batch = ds.take_batch(1)
mapped_batch = transform_numpy(batch)
ds.map_batches(transform_numpy)
# __simple_numpy_end__
# __simple_pyarrow_start__
import ray
import pyarrow as pa
import pyarrow.compute as pac
ds = ray.data.read_csv("example://iris.csv")
def transform_pyarrow(batch: pa.Table) -> pa.Table:
batch = batch.filter(pac.equal(batch["variety"], "Versicolor"))
return batch.drop(["sepal.length"])
# test map function on a batch
batch = ds.take_batch(1, batch_format="pyarrow")
mapped_batch = transform_pyarrow(batch)
ds.map_batches(transform_pyarrow, batch_format="pyarrow").show(1)
# -> {'sepal.width': 3.2, ..., 'variety': 'Versicolor'}
# __simple_pyarrow_end__
# fmt: on
| [
"noreply@github.com"
] | ericl.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.